wrestlers.tex: Use `strayman''s `indentpar' option.
[doc/wrestlers] / wrestlers.tex
CommitLineData
a6e375a6
MW
1%%% -*-latex-*-
2%%%
3%%% The Wrestlers Protocol: secure, deniable key-exchange
4%%%
5%%% (c) 2006 Mark Wooding
6%%%
7
b12a993f
MW
8\makeatletter
9\def\@doit#1{#1}
10\ifx\iffancystyle\xxundefined\expandafter\@doit\else\expandafter\@gobble\fi
11{\newif\iffancystyle\fancystyletrue}
12\ifx\ifshort\xxundefined\expandafter\@doit\else\expandafter\@gobble\fi
13{\newif\ifshort\shortfalse}
14\iffancystyle\expandafter\@gobble\else\expandafter\@doit\fi{\newif\ifpdfing}
15\makeatother
32609df3
MW
16
17\typeout{Options:}
18\typeout{ Fancy style: \iffancystyle ON\else OFF\fi}
19\typeout{ Short version: \ifshort ON\else OFF\fi}
20
a6e375a6
MW
21\errorcontextlines=\maxdimen
22\showboxdepth=\maxdimen
23\showboxbreadth=\maxdimen
24
25\iffancystyle
d8eb72b5 26 \documentclass[indentpar]{strayman}
a6e375a6
MW
27 \usepackage[T1]{fontenc}
28 \usepackage[palatino, helvetica, courier, maths=cmr]{mdwfonts}
29 \usepackage[within = subsection, mdwmargin]{mdwthm}
30 \usepackage{mdwlist}
31 \usepackage{sverb}
32609df3 32 \ifpdfing\else
d8139f2c
MW
33 \PassOptionsToPackage{dvips}{xy}
34 \fi
a6e375a6 35\else
32609df3
MW
36 \PassOptionsToClass{runningheads}{llncs}
37 \documentclass{llncs}
a6e375a6
MW
38\fi
39
40\PassOptionsToPackage{show}{slowbox}
41%\PassOptionsToPackage{hide}{slowbox}
32609df3 42\usepackage{mdwtab, mdwmath, crypto}
a6e375a6
MW
43\usepackage{slowbox}
44\usepackage{amssymb, amstext}
45\usepackage{url, multicol}
46\usepackage{tabularx}
47\DeclareUrlCommand\email{\urlstyle{tt}}
48\ifslowboxshow
49 \usepackage[all]{xy}
50 \turnradius{4pt}
51\fi
32609df3 52\usepackage{mathenv}
a6e375a6 53
32609df3 54\newcommand{\Nupto}[1]{\{0, 1, \ldots, #1 - 1\}}
a6e375a6 55\iffancystyle
32609df3 56 \let\next\title
a6e375a6 57\else
32609df3 58 \def\next[#1]{\titlerunning{#1}\title}
a6e375a6
MW
59\fi
60\next
32609df3
MW
61 [The Wrestlers Protocol]
62 {The Wrestlers Protocol%
63 \ifshort\thanks{This is an extended abstract; the full version
081da1b1 64 \cite{wooding-2006:wrestlers} is available from
32609df3 65 \texttt{http://eprint.iacr.org/2006/386/}.}\fi \\
a6e375a6
MW
66 A simple, practical, secure, deniable protocol for key-exchange}
67\iffancystyle
68 \author{Mark Wooding \\ \email{mdw@distorted.org.uk}}
69\else
70 \author{Mark Wooding}
71 \institute{\email{mdw@distorted.org.uk}}
72\fi
73
74\iffancystyle
75 \bibliographystyle{mdwalpha}
76 \let\epsilon\varepsilon
77 \let\emptyset\varnothing
78 \let\le\leqslant\let\leq\le
79 \let\ge\geqslant\let\geq\ge
80 \numberwithin{table}{section}
81 \numberwithin{figure}{section}
82 \numberwithin{equation}{subsection}
83 \let\random\$
84\else
32609df3 85 \bibliographystyle{splncs}
a6e375a6
MW
86 \expandafter\let\csname claim*\endcsname\claim
87 \expandafter\let\csname endclaim*\endcsname\endclaim
88\fi
89
a6e375a6
MW
90\let\Bin\Sigma
91\let\emptystring\lambda
92\edef\Pr{\expandafter\noexpand\Pr\nolimits}
93\newcommand{\bitsto}{\mathbin{..}}
94\newcommand{\E}{{\mathcal{E}}}
95\newcommand{\M}{{\mathcal{M}}}
96\iffancystyle
97 \def\description{%
98 \basedescript{%
99 \let\makelabel\textit%
100 \desclabelstyle\multilinelabel%
101 \desclabelwidth{1in}%
102 }%
103 }
104\fi
105\def\fixme#1{\marginpar{FIXME}[FIXME: #1]}
106\def\hex#1{\texttt{#1}_{x}}
107
32609df3
MW
108\newenvironment{longproof}[1]{%
109 \ifshort#1\expandafter\ignore
110 \else\proof\fi
111}{%
112 \ifshort\else\endproof\fi
113}
114
a6e375a6
MW
115\def\dbox#1{%
116 \vtop{%
117 \def\\{\unskip\egroup\hbox\bgroup\strut\ignorespaces}%
118 \hbox{\strut#1}%
119 }%
120}
121
122\def\Wident{\Xid{W}{ident}}
123\def\Wkx{\Xid{W}{kx}}
124\def\Wdkx{\Xid{W}{dkx}}
125\def\Func#1#2{\mathcal{F}[#1\to#2]}
126\def\diff#1#2{\Delta_{#1, #2}}
127\def\Hid{H_{\textit{ID}}}
128
129%% protocol run diagrams
130\def\PRaction#1{#1\ar[r]}
131\def\PRcreatex#1{\PRaction{\textsf{Create session\space}#1}}
132\def\PRcreate#1#2#3{\PRcreatex{(\text{#1},\text{#2},#3)}}
133\def\PRreceivex#1{\PRaction{\textsf{Receive\space}#1}}
134\def\PRreceive#1#2{\PRreceivex{\msg{#1}{#2}}}
135\def\PRsession#1{\relax\mathstrut#1\ar[r]}
136\def\msg#1#2{(\cookie{#1},#2)}
137\def\PRresult#1{#1}
138\def\PRsendx#1{\PRresult{\textsf{Send\space}#1}}
139\def\PRsend#1#2{\PRsendx{\msg{#1}{#2}}}
140\def\PRcomplete#1{\textsf{Complete:\space}#1}
141\def\PRstate#1{\textsf{State:\space}#1}
142\def\PRignore{\textsf{(ignored)}}
143\def\PRreveal{\textsf{Session-state reveal}\ar[r]}
144\def\protocolrun#1{\[\xymatrix @R=0pt @C=2em {#1}\]}
145
32609df3
MW
146\def\protocol{%
147 \unskip\bigskip
148 \begin{tabular*}{\linewidth}%
149 {@{\qquad}l@{\extracolsep{0ptplus1fil}}r@{\qquad}}}
150\def\endprotocol{\end{tabular*}}
151\def\send#1#2{\noalign{%
152 \centerline{\xy\ar @{#1}|*+{\mathstrut#2}<.5\linewidth, 0pt>\endxy}}}
153
a6e375a6
MW
154%% class-ids for proof of extractor lemma
155\let\Cid=\Lambda
156\let\Csession=S
157\let\Cindex=r
158\let\Cquery=Q
159\let\Chash=H
160\let\Ccheck=c
161\let\Ccheckset=V
162\let\Ccount=\nu
163
dff0fad2
MW
164\def\HG#1{\mathbf{H}_{#1}}
165
32609df3
MW
166\iffancystyle\else
167 \let\xsssec\subsubsection\def\subsubsection#1{\xsssec[#1]{#1.}}
168\fi
169
a6e375a6
MW
170\begin{document}
171
172%%%--------------------------------------------------------------------------
173
174\maketitle
d8139f2c 175\iffancystyle \thispagestyle{empty} \fi
a6e375a6
MW
176
177\begin{abstract}
178 We describe and prove (in the random-oracle model) the security of a simple
179 but efficient zero-knowledge identification scheme, whose security is based
180 on the computational Diffie-Hellman problem. Unlike other recent proposals
181 for efficient identification protocols, we don't need any additional
182 assumptions, such as the Knowledge of Exponent assumption.
183
184 From this beginning, we build a simple key-exchange protocol, and prove
185 that it achieves `SK-security' -- and hence security in Canetti's Universal
186 Composability framework.
187
188 Finally, we show how to turn the simple key-exchange protocol into a
189 slightly more complex one which provides a number of valuable `real-life'
190 properties, without damaging its security.
191\end{abstract}
192
193\iffancystyle
194\newpage
d8139f2c 195\thispagestyle{empty}
a6e375a6
MW
196\columnsep=2em \columnseprule=0pt
197\tableofcontents[\begin{multicols}{2}\raggedright][\end{multicols}]
198%%\listoffigures[\begin{multicols}{2}\raggedright][\end{multicols}]
199%% \listoftables[\begin{multicols}{2}\raggedright][\end{multicols}]
200\newpage
201\fi
202
203%%%--------------------------------------------------------------------------
204
205\section{Introduction}
206
dff0fad2
MW
207This paper proposes protocols for \emph{identification} and
208\emph{authenticated key-exchange}.
a6e375a6 209
dff0fad2
MW
210An identification protocol allows one party, say Bob, to be sure that he's
211really talking to another party, say Alice. It assumes that Bob has some way
212of recognising Alice; for instance, he might know her public key. Our
213protocol requires only two messages -- a challenge and a response -- and has
214a number of useful properties. It is very similar to, though designed
32609df3 215independently of, a recent protocol by Stinson and Wu
081da1b1
MW
216\cite{stinson-wu-2006:two-flow-zero-knowledge}; we discuss their protocol and
217compare it to ours in \ifshort the full version of this paper. \else
32609df3 218section~\ref{sec:stinson-ident}. \fi
a6e375a6 219
dff0fad2 220Identification protocols are typically less useful than they sound. As Shoup
081da1b1
MW
221\cite{shoup-1999:formal-model-key-exchange} points out, it provides a `secure
222ping', by which Bob can know that Alice is `there', but provides no guarantee
223that any other communication was sent to or reached her. However, there are
4331bf84
MW
224situations where this an authentic channel between two entities -- e.g., a
225device and a smartcard -- where a simple identification protocol can still be
226useful.
a6e375a6 227
dff0fad2
MW
228An authenticated key-exchange protocol lets Alice and Bob agree on a shared
229secret, known to them alone, even if there is an enemy who can read and
230intercept all of their messages, and substitute messages of her own. Once
231they have agreed on their shared secret, of course, they can use standard
232symmetric cryptography techniques to ensure the privacy and authenticity of
233their messages.
a6e375a6
MW
234
235
236\subsection{Desirable properties of our protocols}
237
238Our identification protocol has a number of desirable properties.
239\begin{itemize}
240\item It is \emph{simple} to understand and implement. In particular, it
241 requires only two messages.
242\item It is fairly \emph{efficient}, requiring two scalar multiplications by
243 each of the prover and verifier.
244\item It is provably \emph{secure} (in the random oracle model), assuming the
245 intractability of the computational Diffie-Hellman problem.
246\end{itemize}
247
dff0fad2 248Our key-exchange protocol also has a number of desirable
d8139f2c 249properties.
a6e375a6
MW
250\begin{itemize}
251\item It is fairly \emph{simple} to understand and implement, though there
252 are a few subtleties. In particular, it is \emph{symmetrical}. We have
253 implemented a virtual private network system based on this protocol.
254\item It is \emph{efficient}, requiring four scalar multiplications by each
255 participant. The communication can be reduced to three messages by
256 breaking the protocol's symmetry.
257\item It is provably \emph{secure} (again, in the random oracle model),
258 assuming the intractability of the computational Diffie-Hellman problem,
259 and the security of a symmetric encryption scheme.
260\item It provides \emph{perfect forward secrecy}. That is, even if a user's
261 long-term secrets are compromised, past sessions remain secure.
262\item It is \emph{deniable}. It is possible to construct simulated
263 transcripts of protocol executions between any number of parties without
264 knowing any of their private keys. The simulated transcripts are (almost)
265 indistinguishable from real protocol transcripts. Hence, a transcript
266 does not provide useful evidence that a given party was really involved in
267 a given protocol execution.
268\end{itemize}
269
32609df3 270\ifshort\else
dff0fad2 271\subsection{Asymptotic and concrete security results}
a6e375a6 272
dff0fad2
MW
273Most security definitions for identification (particularly zero-knowledge)
274and key-exchange in the literature are \emph{asymptotic}. That is, they
275consider a family of related protocols, indexed by a \emph{security
276parameter}~$k$; they that any \emph{polynomially-bounded} adversary has only
277\emph{negligible} advantage. A polynomially-bounded adversary is one whose
278running time is a bounded by some polynomial~$t(k)$. The security definition
279requires that, for any such polynomially-bounded adversary, and any
280polynomial $p(k)$, the adversary's advantage is less than $p(k)$ for all
281sufficiently large values of $k$.
282
283Such asymptotic notions are theoretically interesting, and have obvious
284connections to complexity theory. Unfortunately, such an asymptotic result
285tells us nothing at all about the security of a particular instance of a
286protocol, or what parameter sizes one needs to choose for a given level of
287security against a particular kind of adversary. Koblitz and Menezes
081da1b1
MW
288\cite{koblitz-menezes-2006:another-look-provable-security-ii} (among other
289useful observations) give examples of protocols, proven to meet asymptotic
290notions of security, whose security proofs guarantee nothing at all for the
291kinds of parameters typically used in practice.
dff0fad2
MW
292
293Since, above all, we're interested in analysing a practical and implemented
294protocol, we follow here the `practice-oriented provable security' approach
295largely inspired by Bellare and Rogaway, and exemplified by
081da1b1
MW
296\cite{bellare-1994:security-cbc,bellare-1995:xor-macs,%
297bellare-rogaway-1995:oaep,bellare-rogaway-1996:exact-security-sigs,%
298bellare-1996:hmac,bellare-1997:concrete-symmetric}; see also
299\cite{bellare-1999:practice-oriented-provable-security}.
dff0fad2
MW
300Rather than attempting to say, formally, whether or not a protocol is
301`secure', we associate with each protocol an `insecurity function' which
302gives an upper bound on the advantage of any adversary attacking the protocol
303within given resource bounds.
32609df3 304\fi
a6e375a6 305
dff0fad2 306\subsection{Formal models for key-exchange}
a6e375a6 307
32609df3
MW
308\ifshort
309
310The first model for studying the \emph{computational} security of
311key-exchange protocols (rather than using protocol-analysis logics like that
081da1b1
MW
312of \cite{burrows-1989:logic-authn}) was given by Bellare and Rogaway
313\cite{bellare-rogaway-1994:entity-authn-key-distrib}; the model has since
314been enhanced, both by the original authors and others, in
315\cite{bellare-rogaway-1995:session-key-distrib,%
316 blake-wilson-1997:key-agreement,%
317 blake-wilson-menezes-1998:asymm-key-transport}.
318The model defines security in terms of a game: key-exchange protocols are
319secure if an adversary can't distinguish the key agreed by a chosen
320`challenge session' from a key chosen independently at random. Other models
321for key-exchange have been proposed in
322\cite{bellare-1998:modular-key-exchange} and
323\cite{shoup-1999:formal-model-key-exchange}; these use a different notion of
324security, involving implementation of an ideal functionality.
32609df3
MW
325
326\else
327
dff0fad2
MW
328Many proposed key-exchange protocols have turned out to have subtle security
329flaws. The idea of using formal methods to analyse key-exchange protocols
081da1b1
MW
330begins with the logic of Burrows, Abadi and Needham
331\cite{burrows-1989:logic-authn}. Their approach requires a `formalising'
332step, in which one expresses in the logic the contents of the message flows,
333and the \emph{beliefs} of the participants.
334
335Bellare and Rogaway \cite{bellare-rogaway-1994:entity-authn-key-distrib}
336describe a model for studying the computational security of authentication
337and key-exchange protocols in a concurrent setting, i.e., where multiple
338parties are running several instances of a protocol simultaneously. They
339define a notion of security in this setting, and show that several simple
340protocols achieve this notion. Their original paper dealt with pairs of
341parties using symmetric cryptography; they extended their definitions in
342\cite{bellare-rogaway-1995:session-key-distrib} to study three-party
343protocols involving a trusted key-distribution centre.
344
345Blake-Wilson, Johnson and Menezes \cite{blake-wilson-1997:key-agreement}
346applied the model of \cite{bellare-rogaway-1994:entity-authn-key-distrib} to
347key-exchange protocols using asymmetric cryptography, and Blake-Wilson and
348Menezes \cite{blake-wilson-menezes-1998:asymm-key-transport} applied it to
349protocols based on the Diffie-Hellman protocol.
350
351The security notion of \cite{bellare-rogaway-1994:entity-authn-key-distrib}
352is based on a \emph{game}, in which an adversary nominates a \emph{challenge
353session}, and is given either the key agreed by the participants of the
354challenge session, or a random value independently sampled from an
355appropriate distribution. The adversary's advantage -- and hence the
356insecurity of the protocol -- is measured by its success probability in
357guessing whether the value it was given is really the challenge key. This
358challenge-session notion was also used by the subsequent papers described
359above.
360
361Bellare, Canetti and Krawczyk \cite{bellare-1998:modular-key-exchange}
362described a pair of models which they called the \textsc{am} (for
363`authenticated links model') and \textsc{um} (`unauthenticated links model').
364They propose a modular approach to the design of key-exchange protocols,
365whereby one first designs a protocol and proves its security in the
366\textsc{am}, and then applies a authenticating `compiler' to the protocol
367which they prove yields a protocol secure in the realistic \textsc{um}.
368Their security notion is new. They define an `ideal model', in which an
369adversary is limited to assigning sessions fresh, random and unknown keys, or
370matching up one session with another, so that both have the same key. They
371define a protocol to be secure if, for any adversary~$A$ in the \textsc{am}
372or \textsc{um}, there is an ideal adversary~$I$, such that the outputs of $A$
373and $I$ are computationally indistinguishable.
374
375In \cite{shoup-1999:formal-model-key-exchange}, Shoup presents a new model
376for key-exchange, also based on the idea of simulation. He analyses the
377previous models, particularly
378\cite{bellare-rogaway-1994:entity-authn-key-distrib} and
379\cite{bellare-1998:modular-key-exchange}, and highlights some of their inadequacies.
dff0fad2 380
32609df3
MW
381\fi
382
081da1b1
MW
383Canetti and Krawczyk
384\cite{canetti-krawczyk-2001:secure-channels-eprint,%
385 canetti-krawczyk-2001:secure-channels}
386describe a new notion of security in the model of
387\cite{bellare-1998:modular-key-exchange}, based on the challenge-session
388notion of \cite{bellare-rogaway-1994:entity-authn-key-distrib}. The security
389notion, called `SK-security', seems weaker in various ways than those of
390earlier works such as \cite{bellare-rogaway-1994:entity-authn-key-distrib} or
391\cite{shoup-1999:formal-model-key-exchange}. However, the authors show that
392their notion suffices for constructing `secure channel' protocols, which they
393also define.
394
395\ifshort\else In \cite{canetti-2001:uc-security-eprint,%
396 canetti-2001:uc-security},
397Canetti describes the `universal composition' framework. Here, security
398notions are simulation-based: one defines security notions by presenting an
399`ideal functionality'. A protocol securely implements a particular
400functionality if, for any adversary interacting with parties who use the
401protocol, there is an adversary which interacts with parties using the ideal
402functionality such that no `environment' can distinguish the two. The
403environment is allowed to interact freely with the adversary throughout,
404differentiating this approach from that of
405\cite{bellare-1998:modular-key-exchange} and
406\cite{shoup-1999:formal-model-key-exchange}, where the distinguisher was
407given only transcripts of the adversary's interaction with the parties. With
408security defined in this way, it's possible to prove a `universal composition
409theorem': one can construct a protocol, based upon various ideal
410functionalities, and then `plug in' secure implementations of the ideal
411functionalities and appeal to the theorem to prove the security of the entire
412protocol. The UC framework gives rise to very strong notions of security,
413due to the interactive nature of the `environment' distinguisher. \fi
414
415Canetti and Krawczyk \cite{canetti-krawczyk-2002:uc-key-exchange} show that
416the SK-security notion of \cite{canetti-krawczyk-2001:secure-channels} is
417\emph{equivalent} to a `relaxed' notion of key-exchange security in the UC
418framework\ifshort\space of \cite{canetti-2001:uc-security}\fi, and suffices
419for the construction of UC secure channels.
420
421The result of \cite{canetti-krawczyk-2002:uc-key-exchange} gives us
422confidence that SK-security is the `right' notion of security for
423key-exchange protocols. Accordingly, SK-security is the standard against
424which we analyse our key-exchange protocol.
a6e375a6
MW
425
426
427\subsection{Outline of the paper}
428
429The remaining sections of this paper are as follows.
430\begin{itemize}
431\item Section \ref{sec:prelim} provides the essential groundwork for the rest
432 of the paper. It introduces important notation, and describes security
433 notions and intractability assumptions.
434\item Section \ref{sec:zk-ident} describes our zero-knowledge identification
435 protocol and proves its security.
436\item Section \ref{sec:kx} describes the simple version of our key-exchange
437 protocol, and proves its security and deniability. It also describes some
438 minor modifications which bring practical benefits without damaging
32609df3 439 security.
dff0fad2 440\item Finally, section \ref{sec:conc} presents our conclusions.
a6e375a6
MW
441\end{itemize}
442
32609df3
MW
443\ifshort
444The full version of this paper describes how to make our protocols
445identity-based by using bilinear pairings using the techniques introduced in
081da1b1
MW
446\cite{boneh-franklin-2003:ibe-weil-pairing}. It also contains proofs of the
447various theorems stated here.
32609df3
MW
448\fi
449
a6e375a6
MW
450%%%--------------------------------------------------------------------------
451
452\section{Preliminaries}
453\label{sec:prelim}
454
32609df3
MW
455\ifshort
456\subsection{Basics}
457\let\prelimsec\subsubsection
458\else
459\let\prelimsec\subsection
460\fi
461
462\prelimsec{Miscellaneous notation}
a6e375a6
MW
463
464We write $\Func{D}{R}$ for the set of all functions with domain $D$ and range
465$R$.
a6e375a6 466
32609df3 467\prelimsec{Groups}
a6e375a6
MW
468
469Let $(G, +)$ be a cyclic group\footnote{
470 We find that additive group notation is easier to read. In particular, in
471 multiplicative groups, one ends up with many interesting things tucked away
472 in little superscripts.}%
473of prime order $q$, and generated by an element $P$. We shall write the
474identity of $G$ as $0_G$, or simply as $0$ when no ambiguity is likely to
475arise. Thus, we have $\langle P \rangle = G$ and $q P = 0$. Any $X \in G$
32609df3 476can be written as $X = x P$ for some $x \in \{0, 1, \ldots, q - 1\}$.
a6e375a6 477
32609df3
MW
478We consider a cyclic group of order $n$ as a $\Z/n\Z$-module, and in
479particular our group $G$ can be seen as a vector space over $\gf{q}$. This
480makes the notation slightly more convenient.
a6e375a6 481
32609df3 482\prelimsec{Bit strings and encodings}
a6e375a6
MW
483\label{sec:bitenc}
484
485Let $\Bin = \{0, 1\}$ be the set of binary digits. Then $\Bin^n$ is the set
486of $n$-bit strings, and $\Bin^*$ the set of all (finite) bit strings. If $x
487\in \Bin^n$ is a bit string, we write its length as $|x| = n$. For a bit
488string $x \in \Bin^n$, and for $0 \le i < n$, we write $x[i]$ as the $i$th
489bit of $x$. The empty string is denoted $\emptystring$.
490
491Let $x$ and $y$ be two bit strings. If $|x| = |y| = n$, we write $x \xor y$
32609df3
MW
492to mean the bitwise exclusive-or of $x$ and $y$\ifshort\else: if $z = x \xor
493y$ then $|z| = n$, and $z[i] = (x[i] + y[i]) \bmod 2$ for $0 \le i < n$\fi.
494We write $x \cat y$ to mean the concatenation of $x$ and $y$\ifshort\else: if
495$z = x \cat y$ then $|z| = |x| + |y|$ and $z[i] = x[i]$ if $0 \le i < |x|$
496and $z[i] = y[i - |x|]$ if $|x| < i \le |x| + |y|$\fi.
a6e375a6
MW
497
498Finally, we let $\bot$ be a value distinct from any bit string.
499
500We shall want to encode group elements $X \in G$ and indices $x \in I =
32609df3
MW
501\gf{q}$ as bit strings.
502\ifshort
503To this end, we shall assume the existence of efficient, unambiguous
504encodings of group elements as $\ell_G$-bit strings, and indices as
505$\ell_I$-bit strings. To reduce clutter, we shall leave encoding and
506decoding as implicit operations.
507\else
508To this end, we shall assume the existence of
a6e375a6
MW
509integers $\ell_G, \ell_I > 0$ and functions
510\[
511 e_S\colon S \to \Bin^{\ell_S}
512 \quad \textrm{and} \quad
513 d_S\colon \Bin^{\ell_S} \to S \cup \{ \bot \}
514 \qquad
32609df3 515 \textrm{for } S \in \{ G, \F \}.
a6e375a6
MW
516\]
517with the following properties.
518\begin{itemize}
519\item The functions are \emph{unique} and \emph{unambiguous}, i.e., for any
520 $t \in \Bin^{\ell_S}$, we have
521 \[ d_S(t) = \begin{cases}
522 s & if there is some $s \in S$ such that $t = e_S(s)$, or \\
523 \bot & if no such $s$ exists.
524 \end{cases}
525 \]
526\item The functions should be \emph{efficient} to compute. Indeed, we shall
527 be assuming that the time taken for encoding and decoding is essentially
528 trivial.
529\end{itemize}
530Note that, as we have defined them, all encodings of group elements are the
531same length, and similarly for encodings of indices. This is necessary for
532the security of our protocols.
533
534We shall frequently abuse notation by omitting the encoding and decoding
535functions where it is obvious that they are required.
32609df3 536\fi
a6e375a6 537
32609df3
MW
538\ifshort\else
539\prelimsec{Games, adversaries, and oracles}
a6e375a6
MW
540\label{sec:games}
541
542Many of the security definitions and results given here make use of
543\emph{games}, played with an \emph{adversary}. An adversary is a
544probabilistic algorithm. In some games, the adversary is additionally
545equipped with \emph{oracles}, which perform computations with values chosen
546by the adversary and secrets chosen by the game but not revealed to the
547adversary. We impose limits on the adversary's resource usage: in
548particular, the total time it takes, and the number of queries it makes to
549its various oracles. Throughout, we include the size of the adversary's
550program as part of its `time', in order to model adversaries which contain
551large precomputed tables.
552
553The games provide models of someone trying to attack a construction or
554protocol. For security, we will either define a notion of `winning' the
555game, and require that all adversaries have only a very small probability of
556winning, or we consider two different games and require that no adversary can
32609df3 557distinguish between the two except with very small probability.
a6e375a6
MW
558
559Our proofs make frequent use of sequences of games; see
081da1b1
MW
560\cite{bellare-rogaway-2006:triple-enc,shoup-2004:sequences-of-games}.
561The presentation owes much to Shoup \cite{shoup-2004:sequences-of-games}. We
562begin with a game $\G0$ based directly on a relevant security definition, and
563construct a sequence of games $\G1$, $\G2$, \dots, each slightly different
564from the last. We define all of the games in a sequence over the same
565underlying probability space -- the random coins tossed by the algorithms
566involved -- though different games may have slightly differently-defined
567events and random variables. Our goal in doing this is to bound the
568probability of the adversary winning the initial game $\G0$ by simultaneously
569(a) relating the probability of this event to that of corresponding events in
570subsequent games, and (b) simplifying the game until the probability of the
571corresponding event can be computed directly.
572
573The following simple lemma from \cite{shoup-2001:oaep-reconsidered} will be
574frequently useful.
a6e375a6
MW
575\begin{lemma}[Difference Lemma]
576 \label{lem:shoup}
32609df3
MW
577 Let $S$, $T$, $F$ be events. Suppose $\Pr[S \mid \bar F] =
578 \Pr[T \mid \bar F]$. Then $|\Pr[S] - \Pr[T]| \le \Pr[F]$.
a6e375a6
MW
579\end{lemma}
580\begin{proof}
581 A simple calculation:
582 \begin{eqnarray*}[rl]
583 |\Pr[S] - \Pr[T]|
32609df3
MW
584 & = |(\Pr[S \mid F]\Pr[F] + \Pr[S \mid \bar F]\Pr[\bar F]) -
585 (\Pr[T \mid F]\Pr[F] + \Pr[T \mid \bar F]\Pr[\bar F])| \\
586 & = \Pr[F] \cdot |\Pr[S \mid F] - \Pr[T \mid F]| \\
a6e375a6
MW
587 & \le \Pr[F]
588 \end{eqnarray*}
589 and we're done!
590\end{proof}
32609df3 591\fi
a6e375a6
MW
592
593
32609df3 594\prelimsec{The random oracle model}
a6e375a6
MW
595\label{sec:ro}
596
32609df3 597\ifshort\else
a6e375a6 598In particular, most of our results will make use of the \emph{random oracle}
081da1b1
MW
599model \cite{bellare-rogaway-1993:random-oracles}, in which all the
600participants, including the adversary, have access to a number of `random
601oracles'. A random oracle with domain $D$ and range $R$ is an oracle which
602computes a function chosen uniformly at random from the set of all such
603functions. (In the original paper
604\cite{bellare-rogaway-1993:random-oracles}, random oracles are considered
605having domain $\Bin^*$ and range $\Bin^\omega$; we use finite random oracles
606here, because they're easier to work with.)
a6e375a6
MW
607
608Given a protocol proven secure in the random oracle model, we can instantiate
609each random oracle by a supposedly-secure hash function and be fairly
610confident that either our protocol will be similarly secure, or one of the
611hash functions we chose has some unfortunate property.
612
613Proofs in the random oracle must be interpreted carefully. For example,
081da1b1
MW
614Canetti, Goldreich and Halevi \cite{canetti-2004:rand-oracle-revisit} show
615that there are schemes which can be proven secure in the random oracle model
616but provably have no secure instantiation in the standard model.
32609df3 617\fi
a6e375a6 618
081da1b1
MW
619The random oracle model \ifshort\cite{bellare-rogaway-1993:random-oracles}
620\fi is useful for constructing reductions and simulators for two main
621reasons.
a6e375a6
MW
622\begin{enumerate}
623\item One can use the transcript of an adversary's queries to random oracles
624 in order to extract knowledge from it.
625\item One can `program' a random oracle so as to avoid being bound by prior
626 `commitments', or to guide an adversary towards solving a selected instance
627 of some problem.
628\end{enumerate}
629Our proofs only make use of the first feature. This becomes particularly
630important when we consider issues of zero-knowledge and deniability in a
631concurrent setting, because we want to be able to claim that we retain these
632features when the random oracle is instantiated using a cryptographic hash
633function, and hash functions definitely aren't `programmable' in this way!
634The former property seems rather more defensible -- one would indeed hope
635that the only sensible way of working out (anything about) the hash of a
636particular string is to actually compute the hash function, and the random
637oracle model is, we hope, just giving us a `hook' into this process.
638
32609df3 639\ifshort\else
a6e375a6
MW
640(Our protocols can be modified to make use of bilinear pairings so as to
641provide identity-based identification and key-exchange, using the techniques
081da1b1
MW
642of \cite{boneh-franklin-2003:ibe-weil-pairing}. Proving the security of the
643modifications we discuss would involve `programming' random oracles, but this
644doesn't affect the zero-knowledge or deniability of the resulting protocols.)
32609df3 645\fi
a6e375a6
MW
646
647
32609df3
MW
648\ifshort\else
649\prelimsec{Notation for algorithms}
a6e375a6
MW
650
651We shall have occasion to describe algorithms by means of a pseudocode. Our
652choice of pseudocode is unlikely to be particularly controversial. We let $x
653\gets y$ denote the action of setting $x$ to the value $y$; similarly, $x
654\getsr Y$ denotes the action of sampling $x$ from the set $Y$ uniformly at
655random.
656
657The expression $a \gets A^{O(\cdot, x)}(y)$ means `assign to $a$ the value
658output by algorithm $A$ on input $y$, and with oracle access to the algorithm
659which, given input $z$, computes $O(z, x)$'.
660
661We make use of conditional (\IF-\ELSE) and looping (\FOR-\DO and \WHILE-\DO)
662constructions; in order to reduce the amount of space taken up, the bodies of
663such constructions are shown by indentation only.
664
665We don't declare the types of our variables explicitly, assuming that these
666will be obvious by inspection; also, we don't describe our variables' scopes
667explicitly, leaving the reader to determine these from context.
668
669Finally, the notation $\Pr[\textit{algorithm} : \textit{condition}]$ denotes
670the probability that \textit{condition} is true after running the given
671\textit{algorithm}.
32609df3 672\fi
a6e375a6 673
32609df3 674\prelimsec{Diffie-Hellman problems}
a6e375a6
MW
675\label{sec:dhp}
676
677The security of our protocols is related to the hardness of the
678computational, decisional, and gap Diffie-Hellman problems in the group $G$.
679We define these problems and what it means for them to be `hard' here.
680
681The \emph{computational} Diffie-Hellman problem (CDH) is as follows: given
682two group elements $X = x P$ and $Y = y P$, find $Z = x y P$.
32609df3 683\ifshort\else
a6e375a6
MW
684\begin{definition}[The computational Diffie-Hellman problem]
685 Let $(G, +)$ be a cyclic group generated by $P$. For any adversary $A$, we
686 say that $A$'s \emph{success probability} at solving the computational
687 Diffie-Hellman problem in $G$ is
688 \[ \Succ{cdh}{G}(A) =
32609df3 689 \Pr[ x \getsr I; y \getsr \Z/\#G\Z : A(x P, y P) = x y P ]
a6e375a6
MW
690 \]
691 where the probability is taken over the random choices of $x$ and $y$ and
692 any random decisions made by $A$. We say that the \emph{CDH insecurity
693 function} of $G$ is
694 \[ \InSec{cdh}(G; t) = \max_A \Succ{cdh}{G}(A) \]
695 where the maximum is taken over adversaries which complete in time $t$.
696\end{definition}
697Certainly, if one can compute discrete logarithms in the group $G$ (i.e.,
698given $x P$, find $x$), then one can solve the computational Diffie-Hellman
081da1b1
MW
699problem. The converse is not clear, though. Shoup
700\cite{shoup-1997:dh-lower-bounds} gives us some confidence in the difficulty
701of the problem by showing that a \emph{generic} adversary -- i.e., one which
702makes no use of the specific structure of a group -- has success probability
703no greater than $q^2/\#G$.
a6e375a6
MW
704
705This isn't quite sufficient for our purposes. Our proofs will be able to
706come up with (possibly) a large number of guesses for the correct answer, and
707at most one of them will be correct. Unfortunately, working out which one is
708right seems, in general, to be difficult. This is the \emph{decision}
081da1b1
MW
709Diffie-Hellman problem (DDH), which \cite{shoup-1997:dh-lower-bounds} shows,
710in the generic group model, is about as hard as CDH. (See
711\cite{boneh-1998:ddh} for a survey of the decision Diffie-Hellman problem.)
32609df3 712\par\fi
a6e375a6
MW
713Our reference problem will be a `multiple-guess computational Diffie-Hellman
714problem' (MCDH), which is captured by a game as follows. An adversary is
715given a pair of group elements $(x P, y P)$, and an oracle $V(\cdot)$ which
716accepts group elements as input. The adversary wins the game if it queries
717$V(x y P)$.
a6e375a6
MW
718
719\begin{figure}
720 \begin{program}
721 $\Game{mcdh}{G}(A)$: \+ \\
722 $w \gets 0$; \\
32609df3 723 $x \getsr \Z/\#G\Z$; $y \getsr \Z/\#G\Z$; \\
a6e375a6
MW
724 $A^{V(\cdot)}(x P, y P)$; \\
725 \RETURN $w$;
726 \next
727 Function $V(Z)$: \+ \\
728 \IF $Z = x y P$ \THEN \\ \ind
729 $w \gets 1$; \\
730 \RETURN $1$; \- \\
731 \RETURN $0$;
732 \end{program}
733
734 \caption{The multiple-guess computational Diffie-Hellman problem:
735 $\Game{mcdh}{G}(A)$}
736 \label{fig:mcdh}
737\end{figure}
738
32609df3
MW
739\begin{definition}[The multiple-guess computational Diffie-Hellman problem]
740 \label{def:mcdh}
741 Let $(G, +)$ be a cyclic group generated by $P$. For some adversary $A$,
742 we say that $A$'s \emph{success probability} at solving the multiple-guess
743 computational Diffie-Hellman problem in $G$ is
744 \[ \Succ{mcdh}{G}(A) = \Pr[\Game{mcdh}{G}(A) = 1] \]
745 where $\Game{mcdh}{G}(A)$ is shown in figure~\ref{fig:mcdh}. We say that
746 the \emph{MCDH insecurity function of $G$} is
747 \[ \InSec{mcdh}(G; t, q_V) = \max_A \Succ{mcdh}{G}(A) \]
748 where the maximum is taken over adversaries which complete in time $t$ and
749 make at most $q_V$-oracle queries.
750\end{definition}
751\ifshort
752We can (loosely) relate the difficulty of MCDH to the difficulty of
753the standard CDH problem, in which the adversary is allowed only a single
754guess.
755\else
a6e375a6
MW
756Note that our MCDH problem is not quite the `gap Diffie-Hellman problem'
757(GDH). The gap problem measures the intractibility of solving CDH even with
758the assistance of an oracle for solving (restricted) decision Diffie-Hellman
759problems in the group. Specifically, the adversary is given $(X, Y) = (x P,
760y P)$ and tries to find $Z = x y P$, as for CDH, but now it has access to an
761oracle $D(R, S)$ which answers $1$ if $S = x R$ and $0$ otherwise.
762
763Clearly MCDH is at least as hard as GDH, since our simple verification oracle
764$V(Z)$ can be simulated with the gap problem's DDH oracle, as $D(Y, Z)$.
765However, we can (loosely) relate the difficulty of MCDH to the difficulty of
766CDH.
32609df3 767\fi
a6e375a6
MW
768\begin{proposition}[Comparison of MCDH and CDH security]
769 For any cyclic group $(G, +)$,
32609df3
MW
770 \[ \InSec{mcdh}(G; t, q_V) \le
771 \ifshort q_V\,\InSec{mcdh}(G; t + O(q_V), 1) \else
772 q_V\,\InSec{cdh}(G; t + O(q_V)) \fi.
773 \]
a6e375a6 774\end{proposition}
32609df3
MW
775\begin{longproof}{The proof of this proposition may be found in the full
776 version of this paper.}
a6e375a6
MW
777 Let $A$ be an adversary attacking the multiple-guess computational
778 Diffie-Hellman problem in $G$, and suppose that it runs in time $t$ and
779 issues $q_V$ queries to its verification oracle.
780
781 We use a sequence of games. Game $\G0$ is the original MCDH attack game.
782 In each game $\G{i}$, we let the event $S_i$ be the probability that the
783 adversary wins the game.
784
785 Game $\G1$ is the same as $\G0$, except that we change the behaviour of the
786 verification oracle. Specifically, we make the oracle always return $0$.
787 We claim that this doesn't affect the adversary's probability of winning,
788 i.e., $\Pr[S_1] = \Pr[S_0]$. To see this, note that if none of the
789 adversary's $V(\cdot)$ queries was correct, then there is no change in the
790 game; conversely, if any query was correct, then the adversary will have
791 won regardless of its subsequent behaviour (which may differ arbitrarily
792 between the two games).
793
794 We are now ready to construct from $A$ an adversary $B$ attacking the
795 standard computational Diffie-Hellman problem.
796 \begin{program}
797 Adversary $B(X, Y)$: \+ \\
798 $n \gets 0$; \\
799 \FOR $i \in \Nupto{q_V}$ \DO $Q_i \gets 0$; \\
800 $A^{V(\cdot)}$; \\
801 $r \getsr \Nupto{n}$; \\
802 \RETURN $Q_r$;
803 \next
804 Function $D(Z')$: \+ \\
805 $Q_n \gets Z'$; \\
806 $n \gets n + 1$; \\
807 \RETURN $0$;
808 \end{program}
809 Observe that $B$ provides $A$ with an accurate simulation of game $\G1$.
32609df3
MW
810 Moreover, at the end of the algorithm, we have $0 < n \le q_V$, and the
811 values $Q_0$, $Q_1$, \dots, $Q_{n-1}$ are the values of $A$'s oracle
812 queries. Hence, with probability $Pr[S_1]$, at least of one of the $Q_i$
813 is the correct answer to the CDH problem. Let $\epsilon = \Pr[S_1] =
814 \Pr[S_0]$; we claim that $B$'s probability of success is at least
815 $\epsilon/q_V$. The proposition follows directly from this claim and that,
816 because $A$ was chosen arbitrarily, we can maximize and count resources.
a6e375a6
MW
817
818 We now prove the above claim. For $0 \le i < q_V$, let $W_i$ be the
819 event that $Q_i = x y P$, i.e., that $Q_i$ is the correct response. A
820 simple union bound shows that
821 \[ \sum_{0\le i<j} \Pr[W_i \mid n = j] \ge \epsilon. \]
822 We now perform a calculation:
823 \begin{eqnarray*}[rl]
824 \Succ{cdh}{G}(B)
825 & = \sum_{0\le i<q_V} \Pr[W_i \land r = i] \\
826 & = \sum_{0<j\le q_V} \Pr[n = j]
827 \biggl( \sum_{0\le i<j} \Pr[W_i \land r = i \mid n = j] \biggr) \\
828 & = \sum_{0<j\le q_V} \Pr[n = j]
829 \biggl( \frac{1}{j} \sum_{0\le i<j} \Pr[W_i \mid n = j] \biggr) \\
830 &\ge \sum_{0<j\le q_V} \Pr[n = j] \frac{\epsilon}{j} \\
831 &\ge \frac{\epsilon}{q_V} \sum_{0<j\le q_V} \Pr[n = j] \\
832 & = \frac{\epsilon}{q_V}.
833 \end{eqnarray*}
834 which completes the proof.
32609df3 835\end{longproof}
a6e375a6 836
32609df3
MW
837\ifshort\else
838\prelimsec{Example groups and encodings}
a6e375a6
MW
839
840For nonnegative integers $0 \le n < 2^\ell$, there is a natural binary
841encoding $N_\ell\colon \Nupto{2^\ell} \to \Bin^\ell$ which we can define
842recursively as follows.
843\[ N_0(0) = \emptystring \qquad
844 N_\ell(n) = \begin{cases}
845 N_{\ell-1}(n) \cat 0 & if $0 \le n < 2^{\ell-1}$ \\
846 N_{\ell-1}(n - 2^{\ell-1}) \cat 1 & if $2^{\ell-1} \le n < 2^\ell$.
847 \end{cases}
848\]
849Given an encoding $a = N_\ell(n)$ we can recover $n$ as
850\[ n = \sum_{0\le i<\ell} a[i] 2^i. \]
851Hence, given some limit $L \le 2^\ell$, we can encode elements of $\Nupto{L}$
852using the functions $(e, d)$:
853\[ e(L, \ell, n) = N_\ell(n) \qquad
854 d(L, \ell, a) = \begin{cases}
855 N_\ell(a) & if $N_\ell(a) < L$ \\
856 \bot & otherwise
857 \end{cases}
858\]
859The reader can verify that the functions $e(L, \ell, \cdot)$ and $d(L, \ell,
860\cdot)$ satisfy the requirements of section~\ref{sec:bitenc}.
861
32609df3
MW
862Given some $q$ with $q < 2^{\ell_I}$, then, we can define an encoding
863$(e_\F, d_\F)$ by $e_\F(n) = e(q, \ell_I, n)$ and $d_\F(a) = d(q, \ell_I,
864a)$.
865
866Let $p$ and $q$ be primes, with $q \mid (p - 1)$. Then there is an order-$q$
867subgroup of $\F_p^*$. In practice, an order-$q$ element can be found easily
868by taking elements $h \in \F_p^*$ at random and computing $g = h^{(p-1)/2}$
869until $g \ne 1$; then $G = \langle g \rangle$ is a group of $q$ elements.
870Assuming that $p$ and $q$ are sufficiently large, the Diffie-Hellman problems
871seem to be difficult in $G$. Some texts recommend additional restrictions on
872$p$, in particular that $(p - 1)/2q$ be either prime or the product of large
873primes. Primes of this form protect against small-subgroup attacks; but our
874protocols are naturally immune to these attacks, so such precautions are
875unnecessary here. Elements of $G$ can be encoded readily, since each element
876$n + p\Z$ of $\F_p = \Z/p\Z$ has an obvious `representative' integer $n$ such
877that $0 \le n < p$, and given $2^{\ell_G} > p$, we can encode $n$ as $e(p,
878\ell_G, n)$, as above.
a6e375a6
MW
879
880Alternatively, let $\F = \gf{p^f}$ be a finite field, and $E$ be an elliptic
881curve defined over $\F$ such that the group $E(\F)$ of $\F$-rational points
882of $E$ has a prime-order cyclic subgroup $G$. Elements of $G$ can be
883represented as pairs of elements of $\F$. If $f = 1$, i.e., $\F = \Z/p\Z$
884then field elements can be encoded as above. If $p = 2$, we can represent
885the field as $\F_2/(p(x))$ for some irreducible polynomial $p(x) \in \F_2[x]$
886of degree $f$. An element $r \in \F$ can then be represented by a polynomial
887$r(x)$ with degree less than $f$, and coefficients $c_i \in \{0, 1\}$, i.e.,
888\[ r(x) = \sum_{0\le i<f} c_i x^i \]
889and hence we can uniquely encode $r$ as an $f$-bit string $a$ such that $a[i]
890= c_i$.
32609df3 891\fi
a6e375a6
MW
892
893
32609df3 894\prelimsec{Symmetric encryption}
a6e375a6
MW
895\label{sec:sym-enc}
896
897Our key-exchange protocol requires a symmetric encryption scheme. Our
898definition is fairly standard, except that, rather than specifying a
899key-generation algorithm, we assume that key generation simply involves
900selecting a string of a given length uniformly at random.
901\begin{definition}[Symmetric encryption schemes]
902 A \emph{symmetric encryption scheme} $\E = (\kappa, E, D)$ consists of:
903 \begin{itemize}
904 \item an integer $\kappa \ge 0$,
905 \item a randomized \emph{encryption algorithm} $E$ which, on input $K \in
906 \Bin^\kappa$ and $p \in \Bin^*$ outputs some $c \in \Bin^*$, written $c
907 \gets E_K(p)$;
908 \item a \emph{decryption algorithm} $D$ which, on input $K \in \Bin^\kappa$
909 and $c \in \Bin^*$ outputs some $p' \in \Bin^* \cup \{\bot\}$, written
910 $p' \gets D_K(c)$.
911 \end{itemize}
912 Furthermore, a symmetric encryption scheme must be \emph{sound}: that is,
913 if $c \gets E_K(p)$ for some $K \in \Bin^\kappa$ and $p \in \Bin^*$, and
914 $p' \gets D_K(c)$ then $p = p'$.
915\end{definition}
916Our security notion for symmetric encryption is the standard notion of
917left-or-right indistinguishability of ciphertexts under chosen-ciphertext
918attack.
32609df3 919\begin{definition}[IND-CCA]
dff0fad2 920 \label{def:ind-cca}
a6e375a6
MW
921 Let $\E = (\kappa, E, D)$ be a symmetric encryption scheme, and $A$ be an
922 adversary. Let $\id{lr}_b(x_0, x_1) = x_b$ for $b \in \{0, 1\}$. Let
923 \[ P_b =
dff0fad2
MW
924 \Pr[K \getsr \Bin^\kappa;
925 b \gets A^{E_K(\id{lr}_b(\cdot, \cdot)), D_K(\cdot)}() :
926 b = 1]
a6e375a6
MW
927 \]
928 An adversary is \emph{valid} if
929 \begin{itemize}
930 \item for any query to its encryption oracle $E_K(\id{lr}_b(x_0, x_1))$ we
931 have $|x_0| = |x_1|$, and
932 \item no query to the decryption oracle $D_K(\cdot)$ is equal to any reply
933 from an encryption query.
934 \end{itemize}
935 If $A$ is valid, then we define its \emph{advantage} in attacking the
936 security of $\E$ as follows
937 \[ \Adv{ind-cca}{\E} = P_1 - P_0. \]
938 Further, we define the \emph{IND-CCA insecurity function of $\E$} to be
939 \[ \InSec{ind-cca}(\E; t, q_E, q_D) = \max_A \Adv{ind-cca}{\E}(A) \]
940 where the maximum is taken over all valid adversaries $A$ which run in time
941 $t$, and issue at most $q_E$ encryption and $q_D$ decryption queries.
942\end{definition}
943
944
945\subsection{Simulations}
946\label{sec:sim}
947
948In section~\ref{sec:zk-ident}, we shall prove that our identification
949protocol is zero-knowledge; in section~\ref{sec:denial}, we show that our
950key-exchange protocol is deniable. In both of these proofs, we shall need to
32609df3
MW
951demonstrate \emph{simulatability}.
952
953\ifshort
954
955We consider an adversary~$A$ interacting with a `world'~$W$; we model both as
956probabilistic algorithms. Both $A$ and~$W$ are given a common input~$c$; the
957world is additionally given a private input~$w$; these are chosen by a
958randomized initialization function $I$. The adversary is additionally given
959an auxiliary input~$u$ computed from $w$ by a randomized algorithm~$U$. All
960these algorithms -- the adversary and the world, but also the initialization
961and auxiliary-input algorithms $I$ and~$U$ -- have access to a number of
962random oracles $\mathcal{H} = (H_0, H_1, \ldots, H_{n-1})$. The adversary
963eventually decides to stop interacting, and produces an output~$a$.
964
965A \emph{simulator} for $A$'s interaction with $W$ is an algorithm $S^A$ which
966attempts to produce a similar output distribution, but without interacting
967with $W$. The simulator is given the same inputs $(c, u)$ as we gave
968to~$A$, and $S$ is also allowed to query the random oracles~$\mathcal{H}$.
969
970To measure the effectiveness of a simulator, we consider a distinguisher~$D$
971which is given $(c, u, a)$, and access to $\mathcal{H}$, and returns a bit
972$b$ representing its verdict as to whether the output $a$ was produced by the
973adversary or the simulator.
974
975\else
a6e375a6
MW
976
977\subsubsection{General framework}
978Consider a game in which an adversary~$A$ interacts with some `world'~$W$,
979which we shall represent as a probabilistic algorithm. The world may in fact
980represent a number of honest parties communicating in a concurrent fashion,
981but we can consider them as a single algorithm for our present purposes.
982
983Initially the world and the adversary are both given the same \emph{common
984input}~$c$; in addition, the world is given a \emph{private input}~$w$.
985Both $c$ and~$w$ are computed by an \emph{initialization function}~$I$, which
986is considered to be part of the definition of the game. Finally, the
987adversary decides somehow that it has finished interacting, and outputs a
988value~$a$. All this we notate as
989\[ (w, c) \gets I(); a \gets A^{W(w, c)}(c). \]
990This game is \emph{simulatable} if there is an algorithm~$S$ -- the
991\emph{simulator} -- which can compute the same things as~$A$, but all by
992itself without interacting with the world. That is, we run the simulator on
993the common input~$c$, allowing it to interact in some way with the
994adversary~$A$, and finally giving us an output~$s$.
995\[ (w, c) \gets I(); s \gets S^A(c). \]
996We shall say that the simulator is \emph{effective} if it's difficult to tell
997whether a given string was output by the adversary after interacting with the
998world, or by the simulator running by itself. That is, for any algorithm~$D$
999-- a \emph{distinguisher} -- running in some bounded amount of time, its
1000advantage
1001\begin{spliteqn*}
1002 \Pr[(w, c) \gets I(); a \gets A^{W(w, c)}(c);
1003 b \gets D(c, a) : b = 1] - {} \\
1004 \Pr[(w, c) \gets I(); s \gets S^A(c); b \gets D(c, s) : b = 1]
1005\end{spliteqn*}
1006is small. (Note that we gave the distinguisher the common input as well as
1007the output of the adversary or the simulator.)
1008
1009It's usual to study \emph{transcripts} of interactions in these kinds of
1010settings. We are considering arbitrary adversarial outputs here, so this
1011certainly includes adversaries which output a transcript of their
1012interactions. Indeed, for any adversary~$A$, we could construct an
1013adversary~$A_T$ which performs the same computation, and outputs the same
1014result, but also includes a complete transcript of $A$'s interaction with the
1015world. Therefore we're just providing additional generality.
1016
1017\subsubsection{Random oracles}
1018We shall be considering interactions in which all the parties have access to
1019several random oracles. We could simply say that the random oracles are part
1020of the world~$W$. In the setting described above, only the adversary
1021actually interacts with the world (and therefore would be able to query
1022random oracles). The simulator would be forced to `make up' its own random
1023oracle, and the distinguisher would have to study the distributions of the
1024random-oracle queries and their responses to make up its mind about which it
1025was given.
1026
1027However, this would be a poor model for the real world, since once we
1028instantiate the random oracle with a hash function, we know that everyone
1029would in actually be able to compute the hash function for themselves. Thus
1030a distinguisher in the real world would be able to tell the difference
1031immediately between a real interaction and the simulated transcript, since
1032the `random oracle' queries recorded in the latter would be wrong!
1033
1034Therefore we decide not to include the random oracles as part of the world,
1035but instead allow all the participants -- adversary, simulator and
1036distinguisher -- access to them. If we denote by~$\mathcal{H} = (H_0, H_1,
1037\ldots, H_{n-1})$ the collection of random oracles under consideration, the
1038expression for the distinguisher's advantage becomes
1039\begin{spliteqn*}
1040 \Pr[(w, c) \gets I(); a \gets A^{W(w, c), \mathcal{H}}(c);
1041 b \gets D^{\mathcal{H}}(c, a) : b = 1] - {} \\
1042 \Pr[(w, c) \gets I(); s \gets S^{A, \mathcal{H}}(c);
1043 b \gets D^{\mathcal{H}}(c, s) : b = 1].
1044\end{spliteqn*}
1045
1046\subsubsection{Auxiliary inputs}
1047If an adversary's output can be effectively simulated, then we can
1048confidently state that the adversary `learnt' very little of substance from
1049its interaction, and certainly very little it can \emph{prove} to anyone
1050else. However, as we have described the setting so far, we fix an adversary
1051before we choose inputs to the world, so our model says little about what an
1052adversary which has already acquired some knowledge might learn beyond that.
1053For example, an adversary might overhear some other conversation between
1054honest parties and be able to use this to its advantage.
1055
1056To this end, we give the adversary an \emph{auxiliary input}~$u$, computed by
1057an algorithm~$U$. We give $U$ both $c$ and $w$, in order to allow the
1058adversary to gain some (possibly partial) knowledge of the secrets of the
1059other parties. We also allow $U$ access to the random oracles~$\mathcal{H}$,
1060because clearly in the `real world' it would be ridiculous to forbid such an
1061algorithm from computing a publicly-known hash function.
1062
1063The simulator and distinguisher are also given the auxiliary input. The
1064simulator is meant to represent the adversary's ability to compute things on
1065its own, without interacting with the world, and since the adversary is given
1066the auxiliary input, the simulator must be too. The distinguisher must be
32609df3
MW
1067given the auxiliary input because otherwise the simulator could just `make
1068up' plausible-looking inputs.
1069
1070\fi
1071
1072\ifshort
1073Because we're interested in a concrete, quantitative analysis, we must
1074constrain the resource usage of the various algorithms described above.
1075Specifically, we shall be interested in
1076\else
a6e375a6
MW
1077
1078\subsubsection{Resource limits}
1079We shall not allow our algorithms to perform completely arbitrary
1080computations and interactions. Instead, we impose limits on the amount of
1081time they are allowed to take, the number of random-oracle queries they make,
1082and so on. Specifically, we are interested in
32609df3 1083\fi
a6e375a6
MW
1084\begin{itemize}
1085\item the time $t_A$ taken by the adversary and $t_D$ taken by the
1086 distinguisher,
1087\item the number of oracle queries $\mathcal{Q}_A = (q_{A,0}, q_{A,1},
1088 \ldots, q_{A,n-1})$ made by the adversary, and $\mathcal{Q}_D$ made by the
1089 distinguisher,
1090\item a number of resource bounds $\mathcal{R}$ on the adversary's
1091 interaction with the world (e.g., number of messages of various kinds sent
1092 and received), and
1093\item a number of bounds $\mathcal{U}$ on the contents of the adversary's
1094 auxiliary input~$u$.
1095\end{itemize}
1096Sometimes we shall not be interested in proving simulatability of adversaries
1097with auxiliary inputs. We write $\mathcal{U} = 0$ to indicate that auxiliary
32609df3
MW
1098input is not allowed.
1099
1100\ifshort\else
a6e375a6
MW
1101
1102\subsubsection{World syntax}
1103It will be worth our while being more precise about what a `world' actually
1104is, syntactically. We define a world to be a single, randomized algorithm
1105taking inputs $(\iota, \sigma, \tau, \mu) \in (\Bin^*)^4$; the algorithm's
1106output is a pair $(\sigma', \rho) \in (\Bin^*)^2$. We show how the
1107adversary's interaction is mapped on to this world algorithm in
1108figure~\ref{fig:sim-world}.
1109\begin{itemize}
1110\item The `input' $\iota$ is the result of the initialization function~$I$.
1111 That is, it is the pair $(w, c)$ of the world's private input and the
1112 common input.
1113\item The `state' $\sigma$ is empty on the world's first invocation; on each
1114 subsequent call, the value of the world's output $\sigma'$ is passed back.
1115 In this way, the world can maintain state.
32609df3 1116\item The `type $\tau$ is a token giving the type of invocation this is.
a6e375a6
MW
1117\item The `message' $\mu$ is any other information passed in; its form will
1118 typically depend on the type~$\tau$ of the invocation.
1119\item The `new state' $\sigma'$ is the value of $\sigma$ to pass to the next
1120 invocation of the world.
1121\item The `reply $\rho$ is the actual output of the invocation.
1122\end{itemize}
1123There are two special invocation types. The adversary is \emph{forbidden}
1124from making special invocations.
1125\begin{itemize}
1126\item The special invocation type $\cookie{init}$ is used to allow the world to
1127 prepare an initial state. The world is invoked as
1128 \[ W^{\mathcal{H}}(\iota, \emptystring, \cookie{init}, \emptystring) \]
1129 and should output an initial state $\sigma'$. The world's reply $\rho$ is
1130 ignored. (Recall that $\emptystring$ represents the empty string.)
1131\item The special invocation type $\cookie{random}$ is used to inform the
1132 world that the adversary has issued a random oracle query. The world is
1133 invoked as
1134 \[ W^{\mathcal{H}}(\iota, \sigma, \cookie{random}, (i, x, h)) \]
1135 to indicate that the adversary has queried its random oracle $H_i(\cdot)$
1136 on the input $x$, giving output~$h$. The world may output an updated state
1137 $\sigma'$; its reply $\rho$ is ignored.
1138\end{itemize}
1139The latter special query is a technical device used to allow the `fake-world'
1140simulators we define below to be aware of the adversary's random oracle
1141queries without being able to `program' the random oracle. Including it here
1142does little harm, and simplifies the overall exposition.
1143
1144\begin{figure}
1145 \begin{program}
1146 Interaction $A^{W(w, c), \mathcal{H}}(c, u)$: \+ \\
1147 $(\sigma, \rho) \gets
1148 W((w, c), \emptystring, \cookie{init}, \emptystring)$; \\
1149 $a \gets A^{\id{world}(\cdot, \cdot),
1150 \id{random}(\cdot, \cdot)}(c, u)$; \\
1151 \RETURN $a$;
1152 \newline
1153 Function $\id{world}(\tau, \mu)$: \+ \\
1154 \IF $\tau \in \{\cookie{init}, \cookie{random}\}$ \THEN
1155 \RETURN $\bot$; \\
1156 $(\sigma, \rho) \gets W((w, c), \sigma, \tau, \mu)$; \\
1157 \RETURN $\rho$; \-
1158 \next
1159 Function $\id{random}(i, x)$: \+ \\
1160 $h \gets H_i(x)$; \\
1161 $(\sigma, \rho) \gets
1162 W((w, c), \sigma, \cookie{random}, (i, x, h))$; \\
1163 \RETURN $h$;
1164 \end{program}
1165
1166 \caption{Interacting with a world: Interaction $A^{W, \mathcal{H}}$}
1167 \label{fig:sim-world}
1168\end{figure}
1169
1170\subsubsection{Definitions}
1171We are now ready to begin making definitions.
32609df3
MW
1172\fi
1173
a6e375a6
MW
1174\begin{definition}[Simulation security]
1175 \label{def:sim}
1176 Consider the game described above, with the initialization function~$I$,
1177 and the world~$W$: let $A$ be an adversary, and let~$U$ be an
1178 auxiliary-input function; let $S$ be a simulator, and let $D$ be a
1179 distinguisher. We define $D$'s \emph{advantage against $S$'s simulation of
1180 $A$'s interaction with~$W$ with auxiliary inputs provided by~$U$} to be
1181 \[ \Adv{sim}{W, I, S}(A, U, D) =
1182 \Pr[\Game{real}{W, I, S}(A, U, D) = 1] -
1183 \Pr[\Game{sim}{W, I, S}(A, U, D)= 1]
1184 \]
1185 where the games are as shown in figure~\ref{fig:sim}.
1186 Furthermore, we define the \emph{simulator's insecurity function} to be
1187 \[ \InSec{sim}(W, I, S;
1188 t_D, t_A, \mathcal{Q}_D, \mathcal{Q}_A, \mathcal{R}, \mathcal{U}) =
1189 \max_{D, A, U} \Adv{sim}{W, I, S}(A, U, D)
1190 \]
1191 where the maximum is taken over all distinguishers~$D$ running in
1192 time~$t_D$ and making at most $\mathcal{Q}_D$ random-oracle queries, and
1193 all adversaries~$A$ running in time~$t_A$, making at most $\mathcal{Q}_A$
1194 random-oracle queries, not exceeding the other stated resource
1195 bounds~$\mathcal{R}$ on its interaction with~$W$, and auxiliary-input
1196 functions producing output not exceeding the stated bounds~$\mathcal{U}$.
1197\end{definition}
1198\begin{remark}
1199 The usual definitions of zero-knowledge, for example, require the simulator
1200 to work for all choices of inputs (common, private and auxiliary), rather
1201 than for random choices. Our definition therefore looks weaker. Our proof
1202 of zero-knowledge actually carries through to the traditional
1203 stronger-looking definition. Critically, however, the standard
1204 universal quantification over inputs fails to capture deniability in the
1205 random oracle model, since the inputs can't therefore depend on the random
1206 oracle. Our formulation therefore actually gives \emph{stronger}
32609df3 1207 deniability than the usual one.
a6e375a6
MW
1208\end{remark}
1209
1210\begin{figure}
1211 \begin{program}
1212 $\Game{real}{W, I, S}(A, U, D)$: \+ \\
1213 $(w, c) \gets I()$; \\
1214 $u \gets U^{\mathcal{H}}(w, c)$; \\
1215 $a \gets A^{W(w, c), \mathcal{H}}(c, u)$; \\
1216 $b \gets D^{\mathcal{H}}(c, u, a)$; \\
1217 \RETURN $b$;
1218 \next
1219 $\Game{sim}{W, I, S}(A, U, D)$: \+ \\
1220 $(w, c) \gets I()$; \\
1221 $u \gets U^{\mathcal{H}}(w, c)$; \\
1222 $s \gets S^{A, \mathcal{H}}(c, u)$; \\
1223 $b \gets D^{\mathcal{H}}(c, u, s)$; \\
1224 \RETURN $b$;
1225 \end{program}
1226
1227 \caption{Games for simulation: $\Game{real}{W, I}$ and $\Game{sim}{W, I}$}
1228 \label{fig:sim}
1229\end{figure}
1230
32609df3 1231\ifshort\else
a6e375a6
MW
1232\subsubsection{Fake-world simulators}
1233The simulators we shall be considering in the present paper are of a specific
1234type which we call `fake-world simulators'. They work by running the
1235adversary in a fake `cardboard cut-out' world, and attempting to extract
1236enough information from the adversary's previous interactions and random
1237oracle queries to maintain a convincing illusion.
1238
1239That is, the behaviour of a fake-world simulator~$S$ is simply to allow the
1240adversary to interact with a `fake world'~$W'$, which was not given the world
1241private input. That is, there is some world $W'$ such that
1242\[ S^{A, \mathcal{H}}(c, u) \equiv A^{W'(u, c), \mathcal{H}}(c, u) \]
1243Fake-world simulators are convenient because they allow us to remove from
1244consideration the distinguisher~$D$ as the following definition shows.
1245\begin{definition}[Fake-world simulation security]
1246 \label{def:fakesim}
1247 Let $I$, $W$ and $U$ be as in definition~\ref{def:sim}. Let $A$ be an
1248 adversary which outputs a single bit. Let $S$ be a fake-world simulator.
1249 We define $A$'s \emph{advantage against $S$'s fake-world simulation of $W$
1250 with auxiliary inputs provided by~$U$} to be
1251 \begin{spliteqn*}
1252 \Adv{fw}{W, I, S}(A, U) =
1253 \Pr[(w, c) \gets I(); u \gets U^{\mathcal{H}}(w, c);
1254 b \gets A^{W(w, c), \mathcal{H}}(c, u) : b = 1] - {} \\
1255 \Pr[(w, c) \gets I(); u \gets U^{\mathcal{H}}(w, c);
1256 b \gets S^{A, \mathcal{H}}(c, u) : b = 1]
1257 \end{spliteqn*}
1258 Furthermore, we define the \emph{simulator's insecurity function} to be
1259 \[ \InSec{fw}(W, I, S;
1260 t_D, t, \mathcal{Q}, \mathcal{R}, \mathcal{U}) =
1261 \max_{A, U} \Adv{fw}{W, I, S}(A, U)
1262 \]
1263 where the maximum is taken over all adversaries~$A$ running in time~$t$,
1264 making at most $\mathcal{Q}$ random-oracle queries, not exceeding the other
1265 stated resource bounds~$\mathcal{R}$ on its interaction with~$W$, and
1266 auxiliary-input functions producing output not exceeding the stated
1267 bounds~$\mathcal{U}$.
1268\end{definition}
1269It remains for us to demonstrate that this is a valid way of analysing
1270simulators; the following simple proposition shows that this is indeed the
1271case.
1272\begin{proposition}[Fake-world simulation]
1273 \label{prop:fakesim}
1274 Let $I$ be an initialization function and let $W$ be a world. Then, for
1275 any fake-world simulator~$S$,
1276 \[ \InSec{sim}(W, I, S; t_D, t_A, \mathcal{Q}_D, \mathcal{Q}_A,
1277 \mathcal{R}, \mathcal{U}) \le
1278 \InSec{fw}(W, I, S; t_A + t_D, \mathcal{Q}_D + \mathcal{Q}_A,
1279 \mathcal{R}, \mathcal{U})
1280 \]
1281 (where addition of query bounds $\mathcal{Q}$ is done elementwise).
1282\end{proposition}
1283\begin{proof}
1284 Let $W$ and $I$ as in the proposition statement be given; also let a
1285 distinguisher~$D$ running in time~$t_D$ and making $\mathcal{Q}_D$
1286 random-oracle queries, an adversary~$A$ running in time~$t_A$ and making
1287 $\mathcal{Q}_A$ random-oracle queries and interacting with its world within
1288 the stated bounds~$\mathcal{R}$, an auxiliary-input function~$U$ satisfying
1289 the constraints~$\mathcal{U}$ on its output, and a fake-world simulator~$S$
1290 all be given.
1291
1292 We construct an adversary~$B$ outputting a single bit as follows
1293 \begin{program}
1294 Adversary $B^{W, \mathcal{H}}(c, u)$: \+ \\
1295 $a \gets A^{W, \mathcal{H}}(c, u)$; \\
1296 $b \gets D^{\mathcal{H}}(c, u, a)$; \\
1297 \RETURN $b$;
1298 \end{program}
1299 A glance at definitions \ref{def:sim} and~\ref{def:fakesim} and the
1300 resources used by $B$ shows that
1301 \[ \Adv{sim}{W, I, S}(A, U) = \Adv{fw}{W, I, S}(B, U)
1302 \le \InSec{fw}(W, I, S; t_D + t_A, \mathcal{Q}_D + \mathcal{Q}_A,
1303 \mathcal{R}, \mathcal{U})
1304 \]
1305 as required.
1306\end{proof}
32609df3 1307\fi
a6e375a6
MW
1308
1309%%%--------------------------------------------------------------------------
1310
1311\section{A zero-knowledge identification scheme}
1312\label{sec:zk-ident}
1313
1314
1315\subsection{Description}
1316
1317Here we present a simple zero-knowledge identification scheme. Fix some
32609df3
MW
1318group $G$ with prime order $q = \#G$. Suppose Alice chooses a private key $x
1319\inr \gf{q}$, and publishes the corresponding public key $X = x P$. Let
1320$H_I\colon G^2 \to \Bin^{\ell_I}$ be a secure hash function. Here's a simple
1321protocol which lets her prove her identity to Bob.
a6e375a6 1322\begin{enumerate}
32609df3
MW
1323\item Bob selects a random $r \inr \gf{q}$, and computes $R = r P$, $Y = r X$,
1324 and $c = r \xor H_I(R, Y)$. He sends the pair $(R, c)$ to Alice as his
1325 \emph{challenge}.
a6e375a6
MW
1326\item Alice receives $(R, c)$. She computes $Y' = x R$ and $r' = c \xor
1327 H_I(R', Y')$, and checks that $R = r' P$. If so, she sends $Y'$ as her
1328 \emph{response}; otherwise she sends $\bot$.
1329\item Bob receives $Y'$ from Alice. He checks that $Y' = Y$. If so, he
1330 accepts that he's talking to Alice; otherwise he becomes suspicious.
1331\end{enumerate}
1332We name this the Wrestlers Identification Protocol in~$G$, $\Wident^G$ (we
1333drop the superscript to refer to the protocol in general, or when no
1334ambiguity is likely to result). A summary is shown in
1335figure~\ref{fig:wident}.
1336
1337\begin{figure}
1338 \begin{description}
32609df3 1339 \item[Setup] Group $G = \langle P \rangle$; $\#G = q$ is prime.
a6e375a6 1340 $H_I(\cdot, \cdot)$ is a secure hash.
32609df3 1341 \item[Private key] $x \inr \gf{q}$.
a6e375a6 1342 \item[Public key] $X = x P$.
32609df3 1343 \item[Challenge] $(R, c)$ where $r \inr \gf{q}$, $R = r P$, $c = r \xor
a6e375a6
MW
1344 H_I(R, r X)$.
1345 \item[Response] $x R = r X$ if $R = (c \xor H_I(R, x R)) P$; otherwise
1346 $\bot$.
1347 \end{description}
1348
1349 \caption{Summary of the Wrestlers Identification Protocol, $\Wident$}
1350 \label{fig:wident}
1351\end{figure}
1352
1353
1354\subsection{Security}
1355
1356In order to evaluate the security of our protocol, we present a formal
1357description of the algorithms involved in figure~\ref{fig:wident}. Here, the
1358hash function $H_I(\cdot, \cdot)$ is modelled as a random oracle.
1359
1360\begin{figure}
1361 \begin{program}
1362 Function $\id{setup}()$: \+ \\
32609df3 1363 $x \getsr \gf{q}$; \\
a6e375a6
MW
1364 $X \gets x P$; \\
1365 \RETURN $(x, X)$;
32609df3 1366 \ifshort\newline\else\next\fi
a6e375a6 1367 Function $\id{challenge}^{H_I(\cdot, \cdot)}(R, c, X)$: \+ \\
32609df3 1368 $r \getsr \gf{q}$; \\
a6e375a6
MW
1369 $R \gets r P$; $Y \gets r X$; \\
1370 $h \gets H_I(R, Y)$; $c \gets r \xor h$; \\
1371 \RETURN $(Y, R, c)$; \- \\[\medskipamount]
1372 Function $\id{verify}(Y, Y')$: \+ \\
1373 \IF $Y' = Y$ \THEN \RETURN $1$; \\
1374 \RETURN $0$;
1375 \next
1376 Function $\id{response}^{H_I(\cdot, \cdot)}(R, c, x)$: \+ \\
1377 $Y' \gets x R$; \\
1378 $h \gets H_I(R', Y')$; $r' \gets c \xor h$; \\
1379 \IF $R \ne r' P$ \THEN \RETURN $\bot$; \\
1380 \RETURN $Y'$;
1381 \end{program}
1382
1383 \caption{Functions implementing $\Wident$ in the random oracle model}
1384 \label{fig:wident-ro}
1385\end{figure}
1386
1387\subsubsection{Completeness}
1388Suppose that Bob really is talking to Alice. Note that $Y' = x R = x (r P) =
1389r (x P) = r X = Y$. Hence $r' = c \xor H_I(R', Y') = c \xor H_I(R, Y) = r$,
1390so $r' P = r P = R$, so Alice returns $Y' = Y$ to Bob. Therefore $\Wident$
1391is \emph{complete}: if Bob really is communicating with Alice then he
1392accepts.
1393
1394\subsubsection{Soundness}
1395We next show that impersonating Alice is difficult. The natural way to prove
1396this would be to give an adversary a challenge and prove that its probability
1397of giving a correct response is very small. However, we prove a stronger
1398result: we show that if the adversary can respond correctly to any of a large
1399collection of challenges then it can solve the MCDH problem.
1400
1401Consider the game $\Game{imp}{\Wident}$ shown in
1402figure~\ref{fig:wident-sound}. An adversary's probability of successfully
1403impersonating Alice in our protocol, by correctly responding to any one of
1404$n$ challenges, is exactly its probability of winning the game (i.e., causing
1405it to return $1$).
1406
1407\begin{figure}
1408 \begin{program}
1409 $\Game{imp-$n$}{\Wident}(A)$: \+ \\
1410 $H_I \getsr \Func{G^2}{\Bin^{\ell_I}}$; \\
1411 $(x, X) \gets \id{setup}()$; \\
1412 $\id{win} \gets 0$; \\
1413 $\Xid{R}{map} \gets \emptyset$; \\
1414 $\mathbf{c} \gets \id{challenges}(n)$; \\
1415 $(R', Y') \gets A^{H_I(\cdot, \cdot), \id{check}(\cdot, \cdot)}
1416 (X, \mathbf{c})$; \\
1417 \RETURN $\id{win}$;
1418 \newline
1419 Function $\id{challenges}(n)$: \+ \\
1420 \FOR $i \in \Nupto{n}$ \DO \\ \ind
1421 $(Y, R, c) \gets \id{challenge}^{H_I(\cdot, \cdot)}$; \\
1422 $\Xid{R}{map} \gets \Xid{R}{map} \cup \{ R \mapsto Y \}$; \\
1423 $\mathbf{c}[i] \gets (R, c)$; \- \\
1424 \RETURN $\mathbf{c}$; \next
1425 Function $\id{check}(R', Y')$: \\
1426 \IF $R' \notin \dom \Xid{R}{map}$ \THEN \RETURN $0$; \\
1427 $Y \gets \Xid{R}{map}(R')$; \\
1428 \IF $\id{verify}(Y, Y')$ \THEN \\ \ind
1429 $\id{win} \gets 1$; \\
1430 \RETURN $1$; \- \\
1431 \RETURN $0$;
1432 \end{program}
1433
1434 \caption{Soundness of $\Wident$: $\Game{imp-$n$}{\Wident}(A)$}
1435 \label{fig:wident-sound}
1436\end{figure}
1437
1438\begin{theorem}[Soundness of $\Wident$]
1439 \label{thm:wident-sound}
1440 Let $A$ be any adversary running in time $t$ and making $q_I$ queries to
1441 its random oracle, and $q_V$ queries to its verification oracle. Let $G$
1442 be a cyclic group. Then
1443 \[ \Pr[\Game{imp-$n$}{\Wident^G}(A) = 1] \le
1444 \InSec{mcdh}(G; t', q_I + q_V)
1445 \]
1446 where $t' = t + O(q_I) + O(q_V)$.
1447\end{theorem}
1448\begin{remark}
1449 Note that the security bound here is \emph{independent} of the value of
1450 $n$.
1451\end{remark}
32609df3
MW
1452\begin{longproof}{The proof of this theorem can be found in the full version
1453 of the paper.}
a6e375a6
MW
1454 We prove this by defining a sequence of games $\G{i}$. The first will be
1455 the same as the attack game $\Game{imp-$n$}{\Wident}(A)$ and the others
1456 will differ from it in minor ways. In each game $\G{i}$, let $S_i$ be the
1457 event that $A$ wins the game -- i.e., that it successfully impersonates the
1458 holder of the private key~$x$.
32609df3 1459
a6e375a6
MW
1460 Let game $\G0$ be the attack game $\Game{imp}{\Wident}(A)$, and let $(R',
1461 Y')$ be the output of $A$ in the game.
1462
1463 We define a new game $\G1$ which is the same as $\G0$, except that we query
1464 the random oracle $H_I$ at $(R', Y')$ whenever the adversary queries
1465 $\id{check}(R', Y')$. (We don't charge the adversary for this.) This
1466 obviously doesn't affect the adversary's probability of winning, so
1467 $\Pr[S_1] = \Pr[S_0]$.
1468
1469 Game $\G2$ is like $\G1$, except that we change the way we generate
1470 challenges and check their responses. Specifically, we new functions
1471 $\id{challenges}_2$ and $\id{check}_2$, as shown in
1472 figure~\ref{fig:wident-sound-2}.
1473
1474 \begin{figure}
1475 \begin{program}
1476 Function $\id{challenges}_2(n)$: \+ \\
1477 $r^* \getsr I$; $R^* \gets r^* P$; $Y^* \gets r^* X$; \\
1478 \FOR $i \in \Nupto{n}$ \DO \\ \ind
1479 $r \getsr I$; $R \gets r R^*$; $Y \gets r Y^*$; \\
1480 $h \gets H_I(R, Y)$; $c \gets r \xor h$; \\
1481 $\Xid{R}{map} \gets \Xid{R}{map} \cup \{ R \mapsto r \}$; \\
1482 $\mathbf{c}[i] \gets (R, c)$; \- \\
1483 \RETURN $\mathbf{c}$;
1484 \next
1485 Function $\id{check}_2(R', Y')$: \+ \\
1486 \IF $R' \notin \dom \Xid{R}{map}$ \THEN \RETURN $0$; \\
1487 $r \gets \Xid{R}{map}(R')$; \\
1488 \IF $\id{verify}(Y^*, Y'/r)$ \THEN \\ \ind
1489 $\id{win} \gets 1$; \\
1490 \RETURN $1$; \- \\
1491 \RETURN $0$;
1492 \end{program}
1493
1494 \caption{Soundness of $\Wident$: $\id{challenges}_2$ and $\id{check}_2$}
1495 \label{fig:wident-sound-2}
1496 \end{figure}
1497
1498 While we're generating and checking challenges in a more complicated way
1499 here, we're not actually changing the distribution of the challenges, or
1500 changing the winning condition. Hence $\Pr[S_2] = \Pr[S_1]$.
1501
1502 Now we change the rules again. Let $\G3$ be the same as $\G2$ except that
1503 we change the winning condition. Instead, we say that the adversary wins
1504 if any of the queries to its random oracle $H_I(R', Y')$ would be a correct
1505 response -- i.e., $\id{check}_2(R', Y')$ would return $1$. Since we query
1506 the oracle on $(R', Y')$ on its behalf at the end of the game, no adversary
1507 can do worse in this game than it does in $\G2$, so we have $\Pr[S_3] \ge
1508 \Pr[S_2]$. (It's not hard to see that this only helps quite stupid
1509 adversaries. We can transform any adversary into one for which equality
1510 holds here.)
1511
1512 Finally, let $\G4$ be the same as $\G3$ except that we change the way we
1513 generate challenges again: rather than computing $h$ and setting $c \gets h
1514 \xor r$, we just choose $c$ at random. Specifically, we use the new
1515 function, $\id{challenges}_4$, shown in figure~\ref{fig:wident-sound-4}.
1516
1517 \begin{figure}
1518 \begin{program}
1519 Function $\id{challenges}_4(n)$: \+ \\
1520 $r^* \getsr I$; $R^* \gets r^* P$; $Y^* \gets r^* X$; \\
1521 \FOR $i \in \Nupto{n}$ \DO \\ \ind
1522 $r \getsr I$; $R \gets r R^*$; \\
1523 $c \getsr \Bin^{\ell_I}$; \\
1524 $\Xid{R}{map} \gets \Xid{R}{map} \cup \{ R \mapsto r \}$; \\
1525 $\mathbf{c}[i] \gets (R, c)$; \- \\
1526 \RETURN $\mathbf{c}$;
1527 \end{program}
1528
1529 \caption{Soundness of $\Wident$: $\id{challenges}_4$}
1530 \label{fig:wident-sound-4}
1531 \end{figure}
1532
1533 Since $H_I(\cdot, \cdot)$ is a random function, the adversary can only
1534 distinguish $\G4$ from $\G3$ if it queries its random oracle at some $(R, r
1535 Y^*)$. But if it does this, then by the rule introduced in $\G3$ it has
1536 already won. Therefore we must have $\Pr[S_4] = \Pr[S_3]$.
1537
1538 Our $\id{challenges}_4$ function is interesting, since it doesn't actually
1539 make use of $r^*$ or $Y^*$ when generating its challenges. This gives us
1540 the clue we need to bound $\Pr[S_4]$: we can use adversary $A$ to solve the
1541 multiple-guess Diffie-Hellman problem in $G$ by simulating the game $\G4$.
1542 Specifically, we define the adversary $B$ as shown in
1543 figure~\ref{fig:wident-sound-cdh}. That is, for each query $A$ makes to
1544 its random oracle at a new pair $(R', Y')$, we see whether this gives us
1545 the answer we're looking for. We have $\Pr[S_0] \le \Pr[S_4] =
1546 \Succ{mcdh}{G}(B) \le \InSec{gdh}(G; t', q_I + q_V)$ as required.
1547
1548 \begin{figure}
1549 \begin{program}
1550 Adversary $B^{V(\cdot)}(X, R^*)$: \+ \\
1551 $F \gets \emptyset$; $\Xid{R}{map} \gets \emptyset$; \\
1552 \FOR $i \in \Nupto{n}$ \DO \\ \ind
1553 $r \getsr I$; $R \gets r R^*$; $c \getsr \Bin^{\ell_I}$; \\
1554 $\Xid{R}{map} \gets \Xid{R}{map} \cup \{ R \mapsto r \}$; \\
1555 $\mathbf{c}[i] \gets (R, c)$; \- \\
1556 $(R', Y') \gets A^{H_I(\cdot, \cdot), \id{check}(\cdot, \cdot)}
1557 (X, \mathbf{c})$; \\
1558 \IF $Y' \neq \bot$ \THEN $H_I(R', Y')$;
1559 \next
1560 Oracle $H_I(R', Y')$: \+ \\
1561 \IF $(R', Y') \in \dom F$ \THEN \\ \quad
1562 $h \gets F(x)$; \\
1563 \ELSE \\ \ind
1564 $\id{check}(R', Y')$; \\
1565 $h \getsr \Bin^{\ell_I}$;
1566 $F \gets F \cup \{ (R', Y') \mapsto h \}$; \- \\
1567 \RETURN $h$;
1568 \- \\[\medskipamount]
1569 Oracle $\id{check}(R', Y')$: \+ \\
1570 \IF $R' \in \dom \Xid{R}{map}$ \THEN
1571 $V(Y'/\Xid{R}{map}(R'))$;
1572 \end{program}
1573
1574 \caption{Soundness of $\Wident$: reduction from MCDH}
1575 \label{fig:wident-sound-cdh}
1576 \end{figure}
32609df3 1577\end{longproof}
a6e375a6
MW
1578
1579\subsubsection{Zero-knowledge}
1580Finally we must prove that $\Wident$ is (statistical) zero-knowledge -- i.e.,
1581that, except with very small probability, Bob learns nothing of use to him
1582except that he's interacting with Alice. To do this, we show that, for any
1583algorithm $B$ which Bob might use to produce his challenge to the real Alice,
1584there exists a simulator $S$ which produces transcripts distributed very
1585similarly to transcripts of real conversations between $B$ and Alice, the
1586difference being that $S$ doesn't know Alice's key. We shall show that the
1587statistical difference between the two distributions is $2^{-\ell_I}$.
1588
1589The intuition here is that Bob ought to know what answer Alice is going to
1590give him when he constructs his challenge. This is certainly true if he's
1591honest: his challenge is $R = r P$ for some $r$ he knows, so he won't learn
1592anything useful when Alice responds with $x R = r X$. However, if Bob sends
1593a challenge $R$ when he doesn't know the corresponding $r$, he learns
1594something potentially useful. The accompanying check value $c = r \xor
1595H_I(R, r X)$ keeps him honest.
1596
1597To show this, we present an \emph{extractor} which, given any challenge $(R,
1598c)$ Bob can construct, and his history of random-oracle queries, either
1599returns a pair $(r, Y)$ such that $R = r P$ and $Y = r X$, or $\bot$;
1600moreover, the probability that Alice returns a response $Y' \ne \bot$ given
1601the challenge $(R, c)$ is $2^{-\ell}$. We can, of course, readily convert
1602this extractor into a simulator to prove the zero-knowledge property of our
1603protocol.
1604
1605We shall actually consider a slightly more complex setting. We grant Bob
1606access to an oracle which produces random, correctly-formed challenges. We
1607require this to model the legitimate challenges of other parties when we
32609df3 1608analyse the security of our key exchange protocol.
a6e375a6
MW
1609
1610\begin{definition}[Discrete-log extractors]
1611 Let $T$, $B$ be randomized algorithms. Define the game
1612 $\Game{dl-ext}{G}(T, B)$ as shown in figure~\ref{fig:dlext}. The
1613 \emph{success probability of $T$ as a discrete-log extractor against $B$}
1614 is defined as
1615 \[ \Succ{dl-ext}{G}(T, B) = \Pr[\Game{dl-ext}{G}(T, B) = 1]. \]
1616\end{definition}
a6e375a6
MW
1617
1618\begin{figure}
1619 \begin{program}
1620 $\Game{dl-ext}{G}(T, B):$ \+ \\
1621 $H_I \getsr \Func{G^2}{\Bin^{\ell_I}}$;
1622 $Q_H \gets \emptyset$; $Q_C \gets \emptyset$; \\
1623 $(x, X) \gets \id{setup}()$; \\
1624 $(R, c) \gets B^{\Xid{H_I}{trap}(\cdot, \cdot), C()}(x, X)$; \\
1625 $(r, Y) \gets T(R, c, Q_H)$; \\
1626 $Y' \gets x R$; $h' \gets H_I(R, Y')$; $r' \gets c \xor h'$; \\
1627 \IF $r \ne \bot$ \THEN \\ \quad
1628 \IF $Y = \bot \lor R \ne r P \lor Y \ne Y'$ \THEN \RETURN $0$; \\
32609df3 1629 \IF $R = r' P$ \THEN $(r^*, Y^*) \gets (r', Y')$; \\
a6e375a6
MW
1630 \ELSE $(r^*, Y^*) \gets (\bot, \bot)$; \\
1631 \IF $(R, c) \in Q_C$ \THEN \RETURN $1$; \\
1632 \IF $(r, Y) = (r', Y')$ \THEN \RETURN $1$; \\
1633 \RETURN $0$;
1634 \next
1635 Oracle $\Xid{H_I}{trap}(R', Y')$: \+ \\
1636 $h \gets H_I(R', Y')$; \\
1637 $Q_H \gets Q_H \cup \{(R', Y', h)\}$; \\
1638 \RETURN $h$; \- \\[\medskipamount]
1639 Oracle $C()$: \+ \\
32609df3 1640 $r \getsr \gf{q}$; \\
a6e375a6
MW
1641 $R \gets r P$; $c \gets r \xor H_I(R, r X)$; \\
1642 $Q_C \gets Q_C \cup \{(R, c)\}$; \\
32609df3 1643 \RETURN $(R, c)$
a6e375a6
MW
1644 \end{program}
1645
1646 \caption{Discrete log extraction game: $\Game{dl-ext}{G}(T, B)$}
1647 \label{fig:dlext}
1648\end{figure}
1649
32609df3
MW
1650Let's unpack this definition slightly. We make the following demands of our
1651extractor.
1652\begin{itemize}
1653\item It is given a bare `transcript' of $B$'s execution. In particular, it
1654 is given only its output and a list of $B$'s random-oracle queries in no
1655 particular order.
1656\item While the extractor is not given the private key~$x$, the adversary~$B$
1657 is given the private key.
1658\item We require that, if the extractor produces values $r, Y \ne \bot$ then
1659 $r$ and $Y$ are \emph{correct}; i.e., that $R = r P$ and $Y = x R$.
1660\item The extractor is explicitly \emph{not} given the outputs of the
1661 challenge-generation oracle $C()$, nor of the random-oracle queries issued
1662 by $C()$. However, we allow the extractor to fail (i.e., return $\bot$) if
1663 $B$ simply parrots one of its $C$-outputs.
1664\item The extractor is allowed -- indeed \emph{required} -- to fail if the
1665 challenge $(R, c)$ is \emph{invalid} (i.e., Alice would return $\bot$ given
1666 the challenge).
1667\end{itemize}
1668The resulting definition bears a striking similarity to the concept of
081da1b1 1669\emph{plaintext awareness} in \cite{bellare-1998:pub-enc-notions}.
32609df3 1670
a6e375a6
MW
1671Such an extractor indeed exists, as the following lemma states.
1672\begin{lemma}[Effectiveness of extractor $T_\Wident$]
1673 \label{lem:dlext}
32609df3
MW
1674 There exists a \emph{universal discrete-log extractor} $T_\Wident$, shown
1675 in figure~\ref{fig:twident}, such that, for any algorithm $B$,
a6e375a6
MW
1676 \[ \Succ{dl-ext}{G}(T_\Wident, B) \ge 1 - \frac{1}{2^{\ell_I}}. \]
1677 Moreover, if $B$ issues at most $q_H$ random-oracle queries, then the
1678 running time of $T_\Wident$ is $O(q_H)$.
1679\end{lemma}
32609df3
MW
1680\ifshort
1681The proof of this lemma is given in the full version of this paper.
1682\else
a6e375a6
MW
1683We prove this result at the end of the section. For now, let us see how to
1684prove that $\Wident$ is zero-knowledge.
32609df3
MW
1685\fi
1686
1687\begin{figure}
1688 \begin{program}
1689 Extractor $T_\Wident(R, c, Q_H)$: \+ \\
1690 \FOR $(R', Y', h)$ \IN $Q_H$ \DO \\ \ind
1691 $r \gets h \xor c$; \\
1692 \IF $R = R' = r P \land Y' = r X$ \THEN \RETURN $(r, Y')$; \- \\
1693 \RETURN $(\bot, \bot)$;
1694 \end{program}
1695
1696 \caption{The discrete-log extractor $T_\Wident$}
1697 \label{fig:twident}
1698\end{figure}
a6e375a6
MW
1699
1700We use the set-up described in section~\ref{sec:sim}. Our initialization
32609df3 1701function~$I_\Wident$ just chooses a random $x \in \gf{q}$ as the world
a6e375a6
MW
1702private input and sets $X = x P$ as the common input. In the `real world',
1703the adversary is allowed to submit a (single) challenge to the prover; it is
1704given the prover's response, and must then compute its output. This is shown
1705on the left hand side of figure~\ref{fig:wident-sim}.
1706
1707The zero-knowledge property of the scheme is described by the following
1708theorem.
1709\begin{theorem}[Statistical zero-knowledge of $\Wident$]
1710 \label{thm:wident-zk}
1711 Let $I_\Wident$, $W_\Wident$ and $S_\Wident$ be the real-prover world and
1712 simulator shown in figure~\ref{fig:wident-sim}. Then, for any~$t$,
1713 $q_I$ and $q_C$,
1714 \[ \InSec{sim}(W_\Wident, I_\Wident, S_\Wident; t, q_I, q_C, 0) \le
1715 \frac{q_C}{2^\ell_I}.
1716 \]
1717 where $q_C$ is the maximum number of challenges allowed by the adversary.
1718\end{theorem}
32609df3 1719\begin{longproof}{}
a6e375a6
MW
1720 The simulator simply uses the extractor~$T_\Wident$ to extract the answer
1721 from the adversary's history of random oracle queries. Observe that
1722 $S_\Wident$ is a fake-world simulator. By lemma~\ref{lem:dlext}, the
1723 extractor fails with probability only $2^{-\ell_I}$. The theorem follows
1724 by a simple union bound and proposition~\ref{prop:fakesim}.
32609df3 1725\end{longproof}
a6e375a6 1726
32609df3 1727%\ifshort\else
a6e375a6
MW
1728\begin{figure}
1729 \begin{program}
1730 Initialization function $I_\Wident()$: \+ \\
32609df3 1731 $x \getsr \gf{q}$; \\
a6e375a6
MW
1732 $X \gets x P$; \\
1733 \RETURN $(x, X)$;
1734 \- \\[\medskipamount]
1735 Real-prover world $W_\Wident^{H_I(\cdot, \cdot)}
1736 ((x, X), \sigma, \tau, \mu)$: \+ \\
1737 \IF $\tau = \cookie{challenge}$ \THEN \\ \ind
1738 $(R, c) \gets \mu$; \\
1739 $Y \gets \id{response}^{H_I(\cdot, \cdot)}(R, c, x)$; \\
1740 \RETURN $(1, Y)$; \- \\
1741 \ELSE \\ \ind
1742 \RETURN $(\sigma, \bot)$;
1743 \next
1744 Simulator $S_\Wident$'s fake world \\
1745 \hspace{1in} $W_{\text{sim}}^{H_I(\cdot, \cdot)}
1746 ((X, u), \sigma, \tau, \mu)$: \+ \\
1747 \IF $\tau = \cookie{init}$ \THEN \\ \ind
1748 \RETURN $(\emptyset, \emptystring)$; \- \\
1749 $Q_H \gets \sigma$; \\
1750 \IF $\tau = \cookie{challenge}$ \THEN \\ \ind
1751 $(R, c) \gets \mu$; \\
1752 $(r, Y) \gets T_\Wident(R, c, Q_H)$; \\
1753 \RETURN $(Q_H, Y)$; \- \\
1754 \ELSE \IF $\tau = \cookie{random}$ \THEN \\ \ind
1755 $(i, (R', Y'), h) \gets \mu$; \\
1756 $Q_H \gets Q_H \cup \{(R', Y', h)\}$; \\
1757 \RETURN $(Q_H, \emptystring)$; \- \\
1758 \ELSE \\ \ind
1759 \RETURN $(\sigma, \bot)$;
1760 \end{program}
1761
1762 \caption{Real-prover and simulator for zero-knowledge of $\Wident$}
1763 \label{fig:wident-sim}
1764\end{figure}
32609df3 1765%\fi
a6e375a6 1766
32609df3
MW
1767\ifshort\else
1768We now return to proving that the extractor $T_\Wident$ functions as claimed.
dff0fad2 1769The following two trivial lemmata will be useful, both now and later.
a6e375a6
MW
1770\begin{lemma}[Uniqueness of discrete-logs]
1771 \label{lem:unique-dl}
1772 Let $G = \langle P \rangle$ be a cyclic group. For any $X \in G$ there is
32609df3 1773 a unique $x \in \gf{q}$ where $X = x P$.
a6e375a6
MW
1774\end{lemma}
1775\begin{proof}
1776 Certainly such an $x$ exists, since $G$ is cyclic and finite. Suppose $X =
1777 x P = x' P$: then $0 = x P - x' P = (x - x') P$. Hence $(x - x')$ is a
32609df3 1778 multiple of $q$, i.e., $x = x'$.
a6e375a6
MW
1779\end{proof}
1780\begin{lemma}[Uniqueness of check values]
1781 \label{lem:unique-c}
32609df3
MW
1782 Let $G = \langle P \rangle$ be a cyclic group of prime order $q$; let $H_I$
1783 be a function $H_I\colon \Bin^{2\ell_G} \to \Bin^{\ell_I}$. Fix some $x
1784 \in \gf{q}$ and define the set
a6e375a6
MW
1785 \[ V_x = \bigl\{\, (R, c) \in G \times \Bin^{\ell_I} \bigm|
1786 R = \bigl( c \xor H_I(R, x R) \bigr) P \,\bigr\}.
1787 \]
1788 Then, for any $R$, $c$, $c'$, if $(R, c) \in V_x$ and $(R, c') \in V_x$
32609df3 1789 then $c = c'$.
a6e375a6
MW
1790\end{lemma}
1791\begin{proof}
32609df3
MW
1792 From lemma~\ref{lem:unique-dl}, we see that there is a unique $r \in \gf{q}$
1793 for which $R = r P$. Now, if $(R, c) \in V_x$, we must have $r = c \xor
1794 H_I(R, x R)$. It follows that $c = r \xor H_I(R, x R)$.
a6e375a6
MW
1795\end{proof}
1796
1797\begin{proof}[Proof of lemma~\ref{lem:dlext}]
a6e375a6
MW
1798 Let $B$ be any randomized algorithm, and let $(R, c, Q_H)$ be as given to
1799 the extractor by $\Game{dl-ext}{G}(T_\Wident, B)$. Let the quantities
1800 $H_I$, $Q_C$, $r$, $r'$, $x$ and $X$ be as in that game.
1801
1802 Suppose that the extractor returns values $(r, Y) \ne (\bot, \bot)$. Let
1803 $h = r \xor c$; then there must be a query $(R, Y, h) \in Q_H$, and we have
1804 $R = r P$ and $Y = r X = r (x P) = x (r P) = x R = Y'$, so the extractor's
1805 output must be correct unless it fails.
1806
1807 Furthermore, in the case where the extractor did not fail, we have $h =
1808 H_I(R, Y) = H_I(R, Y')$ and $c = r \xor h$, so the challenge was valid.
1809 Therefore, if the challenge was invalid, the extractor will fail.
1810
1811 We now deal with the challenge-generation oracle. Suppose that $(R, c')
1812 \in Q_C$ for some $c'$. Now, if $c = c'$ then $(R, c')$ is a repeat of
1813 some challenge from the challenge-generation oracle, and the extractor is
1814 permitted to fail. On the other hand, suppose $c \ne c'$; then, the
1815 challenge $(R, c)$ must be invalid by lemma~\ref{lem:unique-c}, so the
1816 extractor is required to fail, and we have established that indeed it will.
1817 From now on, suppose that $R$ is distinct from all the $R$-values returned
1818 by $C()$.
1819
1820 Let $Y = x R$. Suppose that $B$ queried its random oracle at $(R, Y)$.
1821 Let $h = H_I(Y)$, so $r' = c \xor h$. If the challenge is valid then $R =
1822 r' P$; therefore $Y = x R = x r' P = r' X$, so we have $(R, Y, h) \in Q_H$
1823 with $R = r P$ and $Y = r X$. Hence the extractor returns $r = r'$ as
1824 required.
1825
1826 It remains to deal with the case where there is no random-oracle query at
1827 $(R, Y)$. But then $h = H_I(R, Y)$ is uniformly distributed, and
1828 independent of the entire game up to this point. Let $r$ be the correct
1829 discrete log of $R$; by lemma~\ref{lem:unique-dl} there is only one
1830 possible value. The extractor always fails under these circumstances, but
1831 a correct responder would reply with probability
1832 \[ \Pr[h = c \xor r] = \frac{1}{2^{\ell_I}}. \]
1833 This concludes the proof.
1834\end{proof}
1835\begin{remark}
1836 Note that the fact that the algorithm~$B$ was given the private key is
1837 irrelevant to the above argument. However, we shall need this property
1838 when we come to prove deniability for the key-exchange protocol.
1839\end{remark}
1840\begin{remark}
1841 It's easy to see from the above proof that the extractor works flawlessly
1842 on the `honest verifier' algorithm $\id{challenge}$ shown in
1843 figure~\ref{fig:wident-ro}. This shows that $\Wident$ is perfect
1844 zero-knowledge against honest verifiers. We're much more interested in
1845 dishonest verifiers, though.
1846\end{remark}
32609df3 1847\fi
a6e375a6
MW
1848
1849
32609df3 1850\ifshort\else
a6e375a6
MW
1851\subsection{An identity-based identification scheme}
1852\label{sec:wident-id}
1853
081da1b1
MW
1854Boneh and Franklin \cite{boneh-franklin-2003:ibe-weil-pairing} showed how to
1855construct an identity-based encryption scheme using bilinear pairings. The
1856resulting encryption scheme looks somewhat like a pairing-based version of
1857ElGamal's encryption scheme \cite{elgamal-1985:dlog-enc-sign}. We can
1858easily apply their techniques to our identification protocol, and thereby
1859obtain an identity-based identification scheme. Providing the necessary
1860formalisms to prove theorems analogous to our theorems~\ref{thm:wident-sound}
a6e375a6
MW
1861and~\ref{thm:wident-zk} would take us too far from our objectives; but given
1862appropriate security notions, we can readily adapt our existing proofs to the
1863new setting.
1864
1865\subsubsection{Bilinear pairings}
1866Before we describe the necessary modifications to the protocol, we first give
1867a (very brief!) summary of cryptographic pairings. (The Boneh-Franklin paper
081da1b1
MW
1868\cite{boneh-franklin-2003:ibe-weil-pairing} gives more detail; also
1869\cite{menezes-2005:intro-pairing-crypto} provides a useful introduction to
1870the topic.)
a6e375a6
MW
1871
1872Let $(G, +)$, $(G', +)$ and $(G_T, \times)$ be cyclic groups with prime order
1873$q$; let $P \in G$ and $P' \in G'$ be elements of order $q$ in $G$ and $G'$
1874respectively. We say that a mapping $\hat{e}\colon G \times G' \to G_T$ is a
1875\emph{non-degenerate bilinear pairing} if it satisfies the following
1876properties.
1877\begin{description}
1878\item[Bilinearity] For all $R \in G$ and $S', T' \in G'$, we have $\hat{e}(R,
dff0fad2
MW
1879 S' + T') = \hat{e}(R, S')\,\hat{e}(R, T')$; and for all $R, S \in G$ and $T'
1880 \in G'$ we have $\hat{e}(R + S, T') = \hat{e}(R, T')\,\hat{e}(S, T')$.
a6e375a6
MW
1881\item[Non-degeneracy] $\hat{e}(P, P') \ne 1$.
1882\end{description}
1883For practical use, we also want $\hat{e}(\cdot, \cdot)$ to be efficient to
1884compute. The reader can verify that $\hat{e}(a P, b P') = \hat{e}(P,
1885P')^{ab}$. It is permitted for the two groups $G$ and $G'$ to be equal.
1886
1887We require a different intractability assumption, specifically that the
1888\emph{bilinear} Diffie-Hellman problem (BDH) -- given $(a P, b P, a P', c P')
1889\in G^2 \times G'^2$, find $\hat{e}(P, P')^{abc} \in G_T$ -- is difficult.
1890This implies the difficulty of the computational Diffie-Hellman problem in
1891all three of $G$, $G'$ and~$G_T$.
1892
1893\subsubsection{The identity-based scheme}
081da1b1
MW
1894We need a trusted authority; following \cite{schneier-1996:applied-crypto} we
1895shall call him Trent. Trent's private key is $t \in \gf{q}$; his public key
1896is $T = t P$.
a6e375a6
MW
1897
1898Finally, we need cryptographic hash functions $H_I\colon G \times G_T \to
1899\Bin^{\ell_I}$ and $\Hid\colon \Bin^* \to G'$; a formal security analysis
1900would model these as random oracles.
1901
1902Alice's public key is $A = \Hid(\texttt{Alice}) \in G'$. Her private key is
1903$K_A = t A \in G'$ -- she needs Trent to give this to her. Bob can interact
1904with Alice in order to verify her identity as follows.
1905\begin{enumerate}
1906\item Bob computes $\gamma_A = \hat{e}(T, A) \in G_T$. (He can do this once
1907 and store the result if he wants, but it's not that onerous to work it out
1908 each time.)
32609df3 1909\item Bob chooses $r \inr \gf{q}$, and sets $R = r P$. He also computes
a6e375a6
MW
1910 $\psi = \gamma_A^r$, $h = H_I(R, \psi)$ and $c = r \xor h$. He sends his
1911 challenge $(R, c)$ to Alice.
1912\item Alice receives $(R', c')$. She computes $\psi' = \hat{e}(R, K_A)$, $h'
1913 = H_I(R', \psi')$, and $r' = c' \xor h')$. She checks that $R' = r' P$; if
1914 so, she sends $\psi'$ back to Bob; otherwise she refuses to talk to him.
1915\item Bob receives $\psi'$. If $\psi = \psi'$ then he accepts that he's
1916 talking to Alice.
1917\end{enumerate}
1918This works because $\psi = \gamma_A^r = \hat{e}(T, A)^r = \hat{e}(t P, A)^r =
1919\hat{e}(r P, A)^t = \hat{e}(R, t A) = \psi'$.
1920
1921\subsubsection{Informal analysis}
1922An analogue to lemma~\ref{lem:dlext} can be proven to show how to extract $r$
1923from a verifier's random-oracle queries; statistical zero knowledge would
1924then follow easily, as in theorem~\ref{thm:wident-zk}. Soundness is
1925intuitively clear, since an adversary must compute $\psi = \hat{e}(P,
1926P')^{art}$ given $A = a P'$, $R = r P$ and $T = t P$, which is an instance of
1927the BDH problem. An analogue of theorem~\ref{thm:wident-sound} would have to
1928prove this for an adversary capable of making identity requests as well as
1929obtaining challenges. Finally, our key-exchange protocol can be constructed
1930out of this identity-based identification scheme, yielding an identity-based
1931authenticated key-exchange protocol. We leave it to the reader to work
1932through the details.
32609df3 1933\fi
a6e375a6
MW
1934
1935
32609df3 1936\ifshort\else
a6e375a6
MW
1937\subsection{Comparison with the protocol of Stinson and Wu}
1938\label{sec:stinson-ident}
1939
1940Our protocol is similar to a recent proposal by Stinson and Wu
081da1b1
MW
1941\cite{stinson-wu-2006:two-flow-zero-knowledge}. They restrict their
1942attention to Schnorr groups $G \subset \F_p^*$. Let $\gamma$ be an element
1943of order $q = \#G$. The prover's private key is $a \inr \gf{q}$ and her
1944public key is $\alpha = \gamma^a$. In their protocol, the challenger chooses
4331bf84
MW
1945$r \inr \gf{q}$, computes $\rho = \gamma^r$ and $\psi = \alpha^r$, and sends
1946a challenge $(\rho, H(\psi))$. The prover checks that $\rho^q \ne 1$,
1947computes $\psi = \rho^a$, checks the hash, and sends $\psi$ back by way of
1948response. They prove their protocol's security in the random-oracle model.
a6e375a6
MW
1949
1950Both the Wrestlers protocol and Stinson-Wu require both prover and verifier
1951to compute two exponentiations (or scalar multiplications) each. The
32609df3 1952sizes of the messages used by the two protocols are also identical.
a6e375a6
MW
1953
1954(An earlier version of the Stinson-Wu protocol used a cofactor
1955exponentiation: if we set $f = (p - 1)/q$, then we use $\psi = \alpha^{rf}) =
1956\rho^{af} = \gamma^{afr}$. This is more efficient in typical elliptic curve
1957subgroups, since the cofactor of such subgroups is usually small: indeed,
081da1b1
MW
1958\cite{certicom-2000:sec1} recommends rejecting groups with cofactor $f > 4$.
1959However, in the Schnorr groups used by Stinson and Wu, the cofactor is much
1960larger than $q$, and their new variant is more efficient.)
a6e375a6
MW
1961
1962We note that the zero-knowledge property of the Stinson-Wu protocol requires
1963the Diffie-Hellman knowledge of exponent assumption (KEA). Very briefly:
1964suppose $A$ is a randomized algorithm which takes as input $X \in G$ and
1965outputs a pair $(r P, r X)$; intuitively, the KEA asserts $A$ must have done
1966this by choosing $r$ somehow and then computing its output from it.
1967Formally, it asserts the existence of an `extractor' algorithm which takes as
1968input the element $X$ and the random coins used by $A$ and outputs $r$ with
1969high probability. This is a very strong assumption, and one which is
1970unnecessary for our protocol, since we can present an \emph{explicit}
1971extractor.
1972
081da1b1
MW
1973The KEA assumption as stated in
1974\cite{stinson-wu-2006:two-flow-zero-knowledge} allows the extractor to fail
1975with some negligible probability, over and above the probability that a
1976dishonest verifier managed to guess the correct $h = H(\psi)$ without making
1977this random-oracle query. Not only does our protocol achieve zero- knowledge
1978without the KEA, our extractor is, in this sense, `perfect'.
a6e375a6
MW
1979
1980Our protocol is just as strong as Stinson-Wu under attack from active
1981intruders: see table~\ref{tab:wident-active} for a very brief sketch of the
1982case-analysis which would be the basis of a proof of this.
1983
1984\begin{table}
1985 \begin{tabular}[C]{|*{3}{c|}p{8cm}|}
1986 \hlx{hv[1]}
1987 \multicolumn{2}{|c|}{\textbf{Challenge}} &
1988 \textbf{Response} &
1989 \textbf{Security}
1990 \\ \hlx{v[1]hv}
1991 %% unpleasant hacking to make the R and c columns the same width :-(
1992 \settowidth{\dimen0}{\textbf{Challenge}}%
32609df3 1993 \dimen0=.5\dimen0
a6e375a6
MW
1994 \advance\dimen0by-\tabcolsep
1995 \advance\dimen0by-.5\arrayrulewidth
1996 \hbox to\dimen0{\hfil$R$\hfil}
1997 & $c$ & $Y$ & Nothing to prove. \\ \hlx{v}
1998 $R$ & $c'$ & --- & Prover rejects by lemma~\ref{lem:unique-c};
1999 $Y'$ probably wrong by
2000 theorem~\ref{thm:wident-sound}. \\ \hlx{v}
2001 $R$ & $c$ & $Y'$ & Response is incorrect. \\ \hlx{v}
2002 $R'$ & --- & $Y$ & Response is incorrect. \\ \hlx{v}
2003 $R'$ & $c$ & $Y'$ & Prover rejects with probability $1 - 2^{-\ell_I}$;
2004 $Y'$ probably wrong by
2005 theorem~\ref{thm:wident-sound}. \\ \hlx{v}
2006 $R'$ & $c'$ & $Y'$ & Simulate prover using extractor
2007 (lemma~\ref{lem:dlext}); $Y'$ probably wrong by
2008 theorem~\ref{thm:wident-sound}. \\ \hlx{vh}
2009 \end{tabular}
32609df3 2010
a6e375a6
MW
2011 \caption{Security of $\Wident$ against active intruders (summary)}
2012 \label{tab:wident-active}
2013\end{table}
2014
2015An identity-based analogue of Stinson-Wu can be defined using a bilinear
2016pairing, just as we did in section~\ref{sec:wident-id}. However, to prove
2017the zero-knowledge property, one needs to make a bilinear analogue of the
2018knowledge of exponent assumption.
2019
2020We suspect that a key-exchange protocol like ours can be constructed using
2021Stinson-Wu rather than the Wrestlers identification scheme. We haven't,
2022however, gone through all the details, since we believe our protocol is just
2023as efficient and is based on much more conservative assumptions.
32609df3 2024\fi
a6e375a6
MW
2025
2026%%%--------------------------------------------------------------------------
2027
2028\section{A simple key-exchange protocol}
2029\label{sec:kx}
2030
2031In this section, we describe a simple key-exchange protocol built out of the
2032identification protocol shown previously.
2033
2034The key-exchange protocol arises from the following observation. If Bob
2035sends a challenge, presumably to Alice, and gets a correct response, then not
2036only did he really send the challenge to Alice but he knows that she received
2037it correctly.
2038
2039So, if Alice and Bob authenticate each other, by the end of it, they should
2040each have chosen a random private value, sent the corresponding public value
2041to the other, and been convinced that it arrived safely.
2042
2043Unfortunately, life isn't quite this kind, and we have to do some more work
2044to make this scheme secure.
2045
2046
2047Our key exchange protocol essentially consists of two parallel instances of
2048the identification protocol. If Alice receives a correct response to her
2049challenge, she will know that Bob received her challenge correctly, and
2050\emph{vice versa}. If we let Alice's challenge be $R_0 = r_0 P$ and Bob's
2051challenge be $R_1 = r_1 P$ then each can compute a shared secret $Z = r_0 R_1
2052= r_0 r_1 P = r_1 R_0$ unknown to an adversary. There are, unfortunately, a
2053few subtleties involved in turning this intuition into a secure key-exchange
2054protocol, which we now describe.
2055
2056
2057\subsection{Overview}
2058\label{sec:kx-overview}
2059
2060We present a quick, informal description of our basic key-exchange protocol.
2061In addition to our group $G$, we shall also need a secure symmetric
2062encryption scheme $\E = (\kappa, E, D)$, and two secure hash functions
2063$H_I\colon \Bin^{2\ell_G} \to \Bin^{\ell_I}$ and $H_K\colon \Bin^{\ell_G+1}
2064\to \Bin^\kappa$.
2065
2066Suppose that Alice's and Bob's private keys are $a$ and $b$ respectively, and
2067their public keys are $A = a P$ and $B = b P$.
2068\begin{enumerate}
32609df3
MW
2069\item Alice chooses a random index $r \inr \gf{q}$. She computes $R = r P$ and
2070 $c = r \xor H_I(R, r B)$. She sends the pair $(R, c)$ to Bob.
2071\item Similarly, Bob chooses a random $s \inr \gf{q}$. He computes $S = s P$
2072 and $d = s \xor H_I(S, s A)$. He sends $(S, d)$ to Alice.
a6e375a6
MW
2073\item Alice receives $(S', d')$ from Bob. She computes $s' = d' \xor H_I(S',
2074 a S')$, and verifies that $S' = s' P$. If so, she computes $K_A = H_K(0
2075 \cat r S')$, and sends $R, E_{K_A}(a S')$ to Bob.
2076\item Similarly, Bob receives $(R', c')$ from Alice. He verifies that $R' =
2077 \bigl( c' \xor H_I(R', b R') \bigr) P$. If so, he computes $K_B = H_K(0
2078 \cat s R')$ and sends S, $E_{K_B}(b R')$ to Alice.
2079\item Alice receives a ciphertext $(S'', \chi_B)$ from Bob. She checks that
2080 $S'' = S'$, decrypts $\chi_B$, and checks that $D_{K_A}(\chi_B) = r B$. If
2081 so, she uses $H_K(1 \cat r S')$ as her shared secret.
2082\item Similarly, Bob receives $(R'', \chi_A)$ from Alice, and checks $R'' =
2083 R'$ and $D_{K_B}(\chi_A) = s A$. If so, he uses $H_K(1 \cat s R')$ as his
2084 shared secret.
2085\end{enumerate}
2086This is the Wrestlers Key Exchange protocol, $\Wkx^{G, \E}$ (again, we omit
2087the superscripts when referring to the general protocol, or when confusion is
2088unlikely). A diagrammatic summary of the protocol is shown in
2089figure~\ref{fig:wkx}.
2090
2091\begin{figure}
2092 \begin{description}
32609df3 2093 \item[Setup] Group $G = \langle P \rangle$; $\#G = q$ is prime.
a6e375a6
MW
2094 $H_I(\cdot, \cdot)$ and $H_K(\cdot)$ are secure hashes. $\E = (\kappa,
2095 E, D)$ is an IND-CCA2 symmetric encryption scheme.
2096 \item[Parties] $U_i$ for $0 \le i < n$.
32609df3 2097 \item[Private keys] $x_i \inr \gf{q}$.
a6e375a6
MW
2098 \item[Public keys] $X_i = x_i P$.
2099 \end{description}
32609df3
MW
2100 \begin{protocol}
2101 $r_i \getsr I$; $R_i \gets r_i P$; &
2102 $r_j \getsr I$; $R_j \gets r_j P$; \\
2103 $c_i \gets r_i \xor H_I(R_i, r_i X_j)$; &
2104 $c_j \gets r_j \xor H_I(R_j, r_j X_i)$; \\
2105 \send{->}{(R_i, c_i)}
2106 \send{<-}{(R_j, c_j)}
2107 Check $R_j = \bigl(c_j \xor H_I(x_i R_j)\bigr) P$; &
2108 Check $R_i = \bigl(c_i \xor H_I(x_j R_i)\bigr) P$; \\
2109 $Z \gets r_i R_j$; $(K_0, K_1) \gets H_K(Z)$; &
2110 $Z \gets r_j R_i$; $(K_0, K_1) \gets H_K(Z)$; \\
2111 $\chi_i \gets E_{K_0}(x_i R_j)$; &
2112 $\chi_j \gets E_{K_0}(x_j R_i)$; \\
2113 \send{->}{(R_i, \chi_i)}
2114 \send{<-}{(R_j, \chi_j)}
2115 Check $D_{K_0}(\chi_j) = r_i X_j$; &
2116 Check $D_{K_0}(\chi_i) = r_j X_i$; \\
2117 Shared key is $K_1$. & Shared key is $K_1$.
2118 \end{protocol}
a6e375a6
MW
2119
2120 \caption{Summary of the Wrestlers Key Exchange protocol, $\Wkx$}
2121 \label{fig:wkx}
2122\end{figure}
2123
2124Assume, for the moment, that Alice and Bob's messages are relayed honestly.
2125Then:
2126\begin{itemize}
2127\item $a S' = a S = a (s P) = s (a P) = s A$, so $s' = d' \xor H_I(S' a S') =
2128 d \xor H_I(S, s A) = s$, and $S' = S = s P = s' P$, and therefore Alice
2129 responds to Bob's message;
2130\item similarly $b R' = r B$, so $r' = r$ and $R' = r' P$, and therefore Bob
2131 responds to Alice's message;
2132\item $b R' = b R = b (r P) = r (b P) = r B$, and $a S' = a S = a (s P) = s
2133 (a P) = s A$, and therefore both parties compute their responses correctly;
32609df3 2134 and
a6e375a6
MW
2135\item $r S' = r S = r (s P) = s (r P) = s R = s R'$, so $K_A = K_B$, and
2136 therefore they can decrypt each others' responses, and agree the same
2137 shared secret.
2138\end{itemize}
2139This shows that the protocol is basically valid, but says little about its
2140security. The remainder of this section will describe our protocol in more
2141formal detail, and prove its security in a model with multiple parties and an
2142adversary who controls the network.
2143
2144Observe that the protocol as we've presented here is \emph{symmetrical}.
2145There's no notion of `initiator' or `responder'. There are a total of four
2146messages which must be sent before both parties accept. However, this can be
2147reduced to three by breaking the symmetry of the protocol and combining one
2148or other party's challenge and response messages. We choose to analyse the
2149symmetrical version, since to do so, it suffices to consider only the two
2150different kinds of messages. Since our security model allows the messages to
2151be adversarially delayed and reordered, it is easy to show that the security
2152of an optimized, asymmetrical protocol is no worse than the symmetrical
2153version we present here.
2154
2155
2156\subsection{Security model and security definition}
2157\label{sec:um}
2158
2159Our model is very similar to that of Canetti and Krawczyk
081da1b1
MW
2160\cite{canetti-krawczyk-2001:secure-channels}, though we have modified it in
2161two ways.
a6e375a6
MW
2162\begin{enumerate}
2163\item We allow all the participants (including the adversary) in the protocol
2164 access to the various random oracles required to implement it.
2165\item Since we want to analyse a specific, practical scheme, asymptotic
2166 results are useless. We measure the adversary's resource usage carefully,
2167 and produce a quantitative bound on the adversary's advantage in the
2168 SK-security game.
2169\end{enumerate}
2170
32609df3
MW
2171\ifshort
2172
2173Readers interested in the details of the model should see Canetti and
081da1b1
MW
2174Krawczyk's paper \cite{canetti-krawczyk-2001:secure-channels}, or the full
2175version of this paper.
32609df3
MW
2176
2177\else
2178
a6e375a6
MW
2179\subsubsection{Overview}
2180We briefly describe our modified model, pointing out the changes we have
2181made, and how they apply to our protocol. Much of Canetti and Krawczyk's
2182model (for example, the local and global outputs) is useful for proving more
2183general security properties such as demonstrating that SK-security suffices
2184for constructing secure channels, and we shall not concern ourselves with
2185such details. Other parts deal with issues such as security parameters and
2186ensuring that all the computation is polynomially bounded, which are
2187irrelevant since we are dealing with a single concrete protocol rather than a
2188family of them.
2189
2190The entities in the model are the \emph{adversary}~$A$, and a (fixed) number
2191of \emph{parties}~$P_i$, for $0 \le i < n$. If the protocol under
2192consideration makes use of random oracles, then all the participants -- the
2193adversary and the parties -- are all allowed access to the random oracles.
2194
2195The parties and the adversary play a `game'. At the beginning of the game,
2196the participants are given some inputs computed by a randomized
2197\emph{initialization procedure}~$\id{init}$. This produces as output a pair
2198$(i_U, \mathbf{i})$; the value $i_U$ is the \emph{global input}, and is given
2199to all the participants including the adversary. The vector $\mathbf{i}$ has
2200$n$ components, and party $P_i$ is given $(i_U, \mathbf{i}[i])$ as input.
2201
2202\subsubsection{Sessions}
2203Parties don't act directly. Instead, each party runs a number of
32609df3
MW
2204\emph{sessions}. A session is represented by a triple $S = (P_i, P_j, s)$,
2205where $i, j \in \Nupto{n}$ identify the owning party and a \emph{partner},
2206and $s \in \Bin^{\ell_S}$ is a \emph{session-id}. (The original model
2207includes a r\^ole, for distinguishing between initiators and responders. Our
2208protocol is symmetrical, so this distinction isn't useful.) If $P_i$ runs a
2209session $S = (P_i, P_j, s)$ and $P_j$ runs a session $S' = (P_j, P_i, s)$
2210then we say that $S$ and $S'$ are \emph{matching}, and that $P_j$ is $P_i$'s
a6e375a6
MW
2211\emph{partner} for the session.
2212
2213At most one participant in the game is \emph{active} at any given time.
2214Initially the adversary is active. The adversary may \emph{activate} a
2215session in one of two ways.
2216\begin{enumerate}
2217\item It may \emph{create a session} of a party~$P_i$, by selecting a
2218 session-id~$s \in \Bin^{\ell_S}$ and a partner $j$. There is no
2219 requirement that $P_j$ ever have a matching session. However, all sessions
2220 of a party must be distinct, i.e., sessions with the same partner must have
2221 different session-ids.
2222\item It may \emph{deliver a message}~$\mu \in \Bin^*$, from party~$P_j$, to
2223 an existing session~$S = (P_i, P_j, s)$. There is no requirement that any
2224 party previously sent $\mu$: the adversary is free to make up messages as
2225 it sees fit.
2226\end{enumerate}
2227The adversary becomes inactive, and the session becomes active. The session
2228performs some computation, according to its protocol, and may request a
2229message~$\mu$ be delivered to the matching session running in its partner
2230(which may not exist). The session may also \emph{terminate}. In the case
2231we are interested in, of key-exchange protocols, a session~$S = (P_i, P_j,
2232s)$ may terminate in one of two ways:
2233\begin{enumerate}
2234\item it may \emph{complete}, outputting $(i, j, s, K)$, for some
2235 \emph{session key}~$K$, or
2236\item it may \emph{abort}, outputting $(i, j, s, \bot)$.
2237\end{enumerate}
2238Once it has performed these actions, the session deactivates and the
2239adversary becomes active again. The adversary is given the message~$\mu$, if
2240any, and informed of whether the session completed or aborted, but, in the
2241case of completion, not of the value of the key~$K$. A session is
2242\emph{running} if it has been created and has not yet terminated.
2243
2244\subsubsection{Other adversarial actions}
2245As well as activating sessions, the adversary has other capabilities, as
2246follows.
2247\begin{itemize}
2248\item It may \emph{expire} any session~$S$, causing the owning party to
2249 `forget' the session key output by that session.
2250\item It may \emph{corrupt} any party~$P_i$, at will: the adversary learns
2251 the entire state of the corrupted party, including its initial
2252 input~$\mathbf{i}[i]$, the state of any sessions it was running at the
2253 time, and the session keys of any completed but unexpired sessions. Once
2254 corrupted, a party can no longer be activated. Of course, the adversary
2255 can continue to send messages allegedly from the corrupted party.
2256\item It may \emph{reveal the state} of a running session~$S$, learning any
2257 interesting values specific to that session, but \emph{not} the owning
2258 party's long-term secrets.
2259\item It may \emph{reveal the session-key} of a completed session~$S$.
2260\item It may elect to be \emph{challenged} with a completed session~$S$,
2261 provided. Challenge sessions form part of the security notion for
2262 key-exchange protocols. See below for more details.
2263\end{itemize}
2264We say that a session $S = (P_i, P_j, s)$ is \emph{locally exposed} if
2265\begin{itemize}
2266\item it has had its state revealed,
2267\item it has had its session-key revealed, or
2268\item $P_i$ has been corrupted, and $S$ had not been expired when this
2269 happened.
2270\end{itemize}
2271A session is \emph{exposed} if it is locally exposed, or if its matching
2272session exists and has been locally exposed.
2273
2274At the beginning of the game, a bit $b^*$ is chosen at random. The adversary
2275may choose to be \emph{challenged} with any completed, unexposed
2276session;\footnote{%
2277 The original Canetti-Krawczyk definition restricts the adversary to a
2278 single challenge session, but our proof works independent of the number of
2279 challenge sessions, so we get a stronger result by relaxing the requirement
2280 here.)}
2281the adversary is then given either the session's key -- if $b^* = 1$ -- or a
2282string chosen at random and independently of the game so far from a
2283protocol-specific distribution -- if $b^* = 0$. At the end of the game, the
2284adversary outputs a single bit~$b$.
2285
2286\subsubsection{SK-security}
2287We've now described the game; it is time to explain the adversary's goal in
2288it. The adversary \emph{wins} the game if either
2289\begin{enumerate}
2290\item two unexposed, matching sessions complete, but output different
2291 keys,\footnote{%
2292 The original Canetti-Krawczyk definition differs slightly here. It
2293 requires that `if two \emph{uncorrupted} parties complete matching
2294 sessions then they both output the same key' [original emphasis]. This
2295 can't be taken at face value, since none of the protocols they claim to
2296 be secure actually meet this requirement: they meet only the weaker
2297 requirement that parties completing matching sessions output different
2298 keys with negligible probability. We assume here that this is what they
2299 meant.}
2300 or
2301\item the adversary correctly guesses the hidden bit~$b^*$.
2302\end{enumerate}
2303More formally, we make the following definition.
32609df3 2304\fi
a6e375a6
MW
2305\begin{definition}[SK-security]
2306 \label{def:sk}
2307 Let $\Pi^{H_0(\cdot), H_1(\cdot), \ldots}$ be a key-exchange protocol
2308 which makes use of random oracles $H_0(\cdot)$, $H_1(\cdot)$, \dots, and
32609df3 2309 let $A$ be an adversary playing the game described \ifshort in
081da1b1 2310 \cite{canetti-krawczyk-2001:secure-channels}\else previously\fi, where $n$
a6e375a6
MW
2311 parties run the protocol~$\Pi$. Let $V$ be the event that any pair of
2312 matching, unexposed sessions completed, but output different session keys.
2313 Let $W$ be the event that the adversary's output bit matches the game's
2314 hidden bit~$b^*$. We define the adversary's \emph{advantage against the
2315 SK-security of the protocol~$\Pi$} to be
2316 \[ \Adv{sk}{\Pi}(A, n) = \max(\Pr[V], 2\Pr[W] - 1). \]
2317 Furthermore, we define the \emph{SK insecurity function of the
2318 protocol~$\Pi$} to be
2319 \[ \InSec{sk}(\Pi; t, n, q_S, q_M, q_{H_0}, q_{H_1}, \dots) =
2320 \max_A \Adv{sk}{\Pi}(A, n)
2321 \]
2322 where the maximum is taken over all adversaries~$A$ with total running
2323 time~$t$ (not including time taken by the parties), create at most $q_S$
2324 sessions, deliver at most $q_M$~messages, and (if applicable) make at most
2325 $q_{H_i}$ random-oracle queries to each random oracle $H_i(\cdot)$.
2326\end{definition}
2327
2328
2329\subsection{Security}
2330
2331In order to analyse our protocol $\Wkx^{G, \E}$ properly, we must describe
32609df3 2332exactly how it fits into our formal model.
a6e375a6
MW
2333
2334\subsubsection{Sessions and session-ids}
2335Our formal model introduced the concept of sessions, which the informal
2336description of section~\ref{sec:kx-overview} neglected to do. (One could
2337argue that we described a single session only.) As we shall show in
2338section~\ref{sec:kx-insecure}, our protocol is \emph{insecure} unless we
2339carefully modify it to distinguish between multiple sessions.
2340
2341In the model, distinct key-exchange sessions are given distinct partners and
2342session-ids. In order to prevent sessions interfering with each other, we
2343shall make explicit use of the session-ids.
2344
2345Suppose the session-ids are $\ell_S$-bit strings. We expand the domain of
2346the random oracle $H_I$ so that it's now
2347\[ H_I\colon G \times \Bin^{\ell_S} \times G \times G \to \Bin_{\ell_I}. \]
2348
2349\subsubsection{Messages}
2350We split the messages our protocols into two parts: a \emph{type}~$\tau$ and
2351a \emph{body}~$\mu$. We assume some convenient, unambiguous encoding of
2352pairs $(\tau, \mu)$ as bit-strings. For readability, we present message
2353types as text strings, e.g., `\cookie{challenge}', though in practice one
2354could use numerical codes instead.
2355
2356The message body itself may be a tuple of values, which, again, we assume are
2357encoded as bit-strings in some convenient and unambiguous fashion. We shall
2358abuse the notation for the sake of readability by dropping a layer of nesting
2359in this case: for example, we write $(\cookie{hello}, x, y, z)$ rather than
2360$\bigl(\cookie{hello}, (x, y, z)\bigr)$.
2361
2362\subsubsection{The protocol}
2363Our protocol is represented by three functions, shown in
2364figure~\ref{fig:wkx-formal}.
2365\begin{itemize}
2366\item $\id{init}(n)$ is the initialization function, as described in
2367 section~\ref{sec:um}. It outputs a pair $(\mathbf{p}, \mathbf{i})$, where
2368 $\mathbf{i}[i]$ is the private key of party~$P_i$ and $\mathbf{p}[i]$ is
2369 the corresponding public key. Only $P_i$ is given $\mathbf{i}[i]$, whereas
2370 all parties and the adversary are given $\mathbf{p}$.
2371\item $\id{new-session}^{H_I(\cdot, \cdot, \cdot, \cdot), H_K(\cdot)}
2372 (\mathbf{p}, x, i, j, s)$ is the new-session function. This is executed by
2373 party~$P_i$ when the adversary decides to create a new session~$S = (P_i,
2374 P_j, s)$. It is also given the relevant outputs of $\id{init}$, and
2375 allowed access to the random oracles $H_I$ and $H_K$.
2376\item $\id{message}^{H_I(\cdot, \cdot, \cdot, \cdot), H_K(\cdot)} (\tau,
2377 \mu)$ is the incoming-message function. This is executed by a session when
2378 the adversary decides to deliver a message $(\tau, \mu)$ to it. It makes
2379 use of the subsidiary functions $\id{msg-challenge}$ and
2380 $\id{msg-response}$ to handle the messages.
2381\end{itemize}
2382We observe that the protocol never aborts. We could make it do so if it
2383receives an invalid message, but we prefer to ignore invalid messages for the
2384sake of robustness.\footnote{%
2385 Note that this protocol would therefore require modification before it was
081da1b1
MW
2386 acceptable in the simulation-based model of
2387 \cite{shoup-1999:formal-model-key-exchange}. There it is required that a
2388 key-exchange protocol terminate after a polynomially-bounded number of
2389 messages are delivered to it.}
a6e375a6
MW
2390
2391\begin{figure}
2392 \begin{program}
2393 Function $\id{init}(n)$: \+ \\
2394 \FOR $i \in \Nupto{n}$ \DO \\ \ind
32609df3 2395 $x \getsr \gf{q}$; \\
a6e375a6
MW
2396 $\mathbf{i}[i] \gets x$; \\
2397 $\mathbf{p}[i] \gets x P$; \- \\
2398 \RETURN $(\mathbf{p}, \mathbf{i})$;
2399 \- \\[\medskipamount]
2400 Function $\id{new-session}^{H_I(\cdot, \cdot, \cdot, \cdot), H_K(\cdot)}
2401 (\mathbf{p}, x, i, j, s)$: \+ \\
2402 $X \gets \mathbf{p}[i]$;
2403 $X' \gets \mathbf{p}[j]$;
2404 $C \gets \emptyset$; \\
32609df3 2405 $r \getsr \gf{q}$;
a6e375a6
MW
2406 $R \gets r P$;
2407 $Y \gets r X'$; \\
2408 $h \gets H_I(X, s, R, Y)$;
2409 $c \gets r \xor h$; \\
2410 \SEND $(\cookie{challenge}, R, c)$;
2411 \- \\[\medskipamount]
2412 Function $\id{message}^{H_I(\cdot, \cdot, \cdot, \cdot), H_K(\cdot)}
2413 (\tau, \mu)$: \+ \\
2414 \IF $\tau = \cookie{challenge}$ \THEN $\id{msg-challenge}(\mu)$; \\
2415 \ELSE \IF $\tau = \cookie{response}$ \THEN $\id{msg-response}(\mu)$;
2416 \next
2417 Function $\id{msg-challenge}(\mu)$: \+ \\
2418 $(R', c') \gets \mu$; \\
2419 $Y' \gets x R'$; \\
2420 $h' \gets H_I(X', s, R', Y')$; \\
2421 $r' \gets c' \xor h'$; \\
2422 \IF $R' \ne r' P$ \THEN \RETURN; \\
2423 $C \gets C \cup \{R\}$; \\
2424 $Z \gets r R'$; \\
2425 $(K_0, K_1) \gets H_K(Z)$; \\
2426 $\chi \gets E_{K_0}(Y')$; \\
2427 \SEND $(\cookie{response}, R, \chi)$;
2428 \- \\[\medskipamount]
2429 Function $\id{msg-response}(\mu)$: \+ \\
2430 $(R', \chi') \gets \mu$; \\
2431 \IF $R' \notin C$ \THEN \RETURN; \\
2432 $Z \gets r R'$; \\
2433 $(K_0, K_1) \gets H_K(Z)$; \\
2434 $Y' \gets D_{K_0}(\chi')$; \\
2435 \IF $Y' \ne Y$ \THEN \RETURN; \\
2436 \OUTPUT $K_1$;
2437 \STOP;
2438 \end{program}
32609df3 2439
a6e375a6
MW
2440 \caption{Formalization of $\Wkx$}
2441 \label{fig:wkx-formal}
2442\end{figure}
2443
2444\subsubsection{Session states}
2445We must specify what the adversary obtains when it chooses to reveal a
2446session's state. Given the program in figure~\ref{fig:wkx-formal}, we can
2447see that the session state consists of the variables $(x, X, X', r, R, Y,
2448C)$.
2449
2450However, $x$ is the owning party's long-term secret, and it seems
2451unreasonable to disclose this to an adversary who stops short of total
2452corruption.
2453
2454The public keys $X$ and $X'$ are part of the adversary's input anyway, so
2455revealing them doesn't help. Similarly, the set $C$ of valid challenges
2456could have been computed easily by the adversary, since a group element $R'
2457\in C$ if and only if the session $S$ responded to some message
2458$(\cookie{challenge}, R', c')$.
2459
2460The value $R = r P$ is easily computed given $r$, and besides is sent in the
2461clear in the session's first message. The expected response $Y = r X'$ is
2462also easily computed from $r$. The converse is also true, since $r$ can be
2463recovered from $R$ and $c$ in the session's challenge message and the value
2464$Y$. Besides, $r$ is necessary for computing $Z$ in response to incoming
2465challenges.
2466
2467We conclude that the only `interesting' session state is $r$.
2468
2469\subsubsection{Security}
2470Having formally presented the protocol, we can now state our main theorem
32609df3
MW
2471about its security. The proof is given in \ifshort the full version of the
2472paper\else appendix~\ref{sec:sk-proof}\fi.
a6e375a6
MW
2473\begin{theorem}[SK-security of $\Wkx$]
2474 \label{thm:sk}
2475 Let $G$ be a cyclic group. Let $\E = (\kappa, E, D)$ be a symmetric
2476 encryption scheme. Then
2477 \begin{spliteqn*}
2478 \InSec{sk}(\Wkx^{G, \E}; t, n, q_S, q_M, q_I, q_K) \le
dff0fad2
MW
2479 2 q_S \bigl( \InSec{ind-cca}(\E; t', q_M, q_M) + {} \\
2480 \InSec{mcdh}(G; t', q_K) +
2481 n \,\InSec{mcdh}(G; t', q_M + q_I) \bigr) +
32609df3 2482 \frac{n (n - 1)}{q} +
dff0fad2 2483 \frac{2 q_M}{2^{\ell_I}}.
a6e375a6
MW
2484 \end{spliteqn*}
2485 where $t' = t + O(n) + O(q_S) + O(q_M q_I) + O(q_K)$.
2486\end{theorem}
2487
2488
32609df3 2489\ifshort\else
a6e375a6
MW
2490\subsection{Insecure protocol variants}
2491\label{sec:kx-insecure}
2492
2493It's important to feed the session-id and verifier's public key to the random
2494oracle $H_I$ when constructing the check-value~$c$. Without these, the
2495protocol is vulnerable to attack. In this section, we consider some variants
2496of our protocol which hash less information, and show attacks against them.
2497
2498To simplify the presentation, we shall consider Alice and Bob, and a third
2499character Carol. We shall be attacking a pair of matching sessions $A$
2500and~$B$, run by Alice and Bob respectively. Let Alice and Bob's private keys
2501be $x_A$ and~$x_B$, and let their public keys be $X_A = x_A P$ and $X_B = x_B
2502P$ respectively.
2503
2504\subsubsection{Protocol diagram notation}
2505In order to keep the presentation as clear as possible, we use a simple
2506diagrammatic notation for describing protocol runs. A line of the form
2507\protocolrun{\textit{action} \ar[r] & \PRsession{S} & \textit{result}}
2508states that the adversary performs the given \textit{action} on session~$S$,
2509with the stated \textit{result}. The \textit{action} may be
2510\begin{itemize}
2511\item \textsf{Create session $(P_i, P_j, s)$}: the session is created,
2512 running in party~$P_i$, with partner~$P_j$ and session-id~$s$.
2513\item \textsf{Receive $\mu$}: the session is activated with an incoming message~$\mu$.
2514\item \textsf{Session-state reveal}: The adversary requests the session's
2515 internal state.
2516\end{itemize}
2517The \textit{result} may be
2518\begin{itemize}
2519\item \textsf{Send $\mu'$}: the session requests the delivery of the
2520 message~$\mu'$.
2521\item \textsf{Complete: $K$}: the session completes, outputting the key~$K$.
2522\item \textsf{State: $\sigma$}: the session's state is revealed to
2523 be~$\sigma$.
2524\item \textsf{(Ignore)}: the result of the action is unimportant.
2525\end{itemize}
2526
2527\subsubsection{Omitting the session-id}
2528Consider a protocol variant where session $S$ sets $h_S = H_I(X_N, R_S,
2529Y_S)$, where $N$ is the session's partner. That is, we've omitted the
2530session-id from the hash. An adversary can cross over two sessions between
2531Alice and Bob. Here's how.
2532
2533The attack assumes that Alice and Bob set up two pairs of matching sessions
2534with each other, thus.
2535\protocolrun{
2536 \PRcreate{Alice}{Bob}{s} & \PRsession{A} &
2537 \PRsend{challenge}{R_A, c_A} \\
2538 \PRcreate{Bob}{Alice}{s} & \PRsession{B} &
2539 \PRsend{challenge}{R_B, c_B} \\
2540 \PRcreate{Alice}{Bob}{s'} & \PRsession{A'} &
2541 \PRsend{challenge}{R_{A'}, c_{A'}} \\
2542 \PRcreate{Bob}{Alice}{s'} & \PRsession{B'} &
2543 \PRsend{challenge}{R_{B'}, c_{B'}}
2544}
2545Observe that the session pairs use distinct session-ids $s \ne s'$, so this
2546is allowed. Now the adversary crosses over the challenges, using the second
2547pair of sessions to provide responses to the challenges issued by the first
2548pair. By revealing the state in the second pair of sessions, the adversary
2549can work out the (probably different) session keys accepted by the first
2550pair.
2551\protocolrun{
2552 \PRreceive{challenge}{R_B, c_B} & \PRsession{A'} &
2553 \PRsend{response}{R_{A'}, E_{K_{A'B,0}}(x_A R_B)} \\
2554 \PRreceive{challenge}{R_A, c_A} & \PRsession{B'} &
2555 \PRsend{response}{R_{B'}, E_{K_{B'A,0}}(x_B R_A)} \\
2556 \PRreceive{challenge}{R_{A'}, c_{A'}} & \PRsession{A} & \PRignore \\
2557 \PRreceive{challenge}{R_{B'}, c_{B'}} & \PRsession{B} & \PRignore \\
2558 \PRreveal & \PRsession{A'} & r_{A'} \\
2559 \PRreveal & \PRsession{B'} & r_{B'} \\
2560 \PRreceive{response}{R_{B'}, E_{K_{B'A,0}}(x_B R_A)} & \PRsession{A} &
2561 \PRcomplete{K_{AB',1}} \\
2562 \PRreceive{response}{R_{A'}, E_{K_{A'B,0}}(x_A R_B)} & \PRsession{B} &
2563 \PRcomplete{K_{BA',1}} \\
2564}
2565The adversary can now compute $K_{AB'} = H_K(r_{B'} R_A)$ and $K_{B'A} =
2566H_K(r_{A'} R_B)$. Safely in possession of both keys, the adversary can now
2567read and impersonate freely.
2568
2569\subsubsection{Omitting the partner's public key}
2570Now consider a protocol variant where session $S$ sets $h_S = H_I(s, R_S,
2571Y_S)$, where $s$ is the session-id. An adversary can use a sessions with
2572Carol to attack a session between Alice and Bob. Here's how the sessions are
2573set up.
2574\protocolrun{
2575 \PRcreate{Alice}{Bob}{s} & \PRsession{A} &
2576 \PRsend{challenge}{R_A, c_A} \\
2577 \PRcreate{Bob}{Alice}{s} & \PRsession{B} &
2578 \PRsend{challenge}{R_B, c_B} \\
2579 \PRcreate{Alice}{Carol}{s} & \PRsession{A'} &
2580 \PRsend{challenge}{R_{A'}, c_{A'}} \\
2581 \PRcreate{Bob}{Carol}{s} & \PRsession{B'} &
2582 \PRsend{challenge}{R_{B'}, c_{B'}}
2583}
2584Although each of Alice and Bob have two sessions with session-id~$s$, this is
2585allowed, since they are with different partners. The rest of the attack in
2586fact proceeds identically to the previous case.
32609df3 2587\fi
a6e375a6
MW
2588
2589\subsection{Deniability}
2590\label{sec:denial}
2591
2592We have claimed that the Wrestlers key-exchange protocol is \emph{deniable}.
2593In this section, we define what we mean, explain the limits of the
2594deniablility of the protocol as currently presented, fix the protocol with an
2595additional pass (which can be collapsed to a single message flow by breaking
2596the protocol's symmetry), and prove the deniability of the resulting
2597protocol.
2598
2599Our notion of deniability is taken from Di~Raimondo, Gennaro and Krawczyk
081da1b1
MW
2600\cite{raimondo-2006:deniable-authn-key-exchange}, except that, as usual, we
2601opt for a concrete security approach.
a6e375a6
MW
2602
2603\subsubsection{Discussion}
2604Our definition for deniability is that, for any adversary interacting with
2605participants in the protocol, a simulator exists which can compute the same
2606things as the adversary. In particular, since an adversary could output a
2607transcript of the interactions between itself and the parties, it would
2608follow that a simulator could do this too. If the simulator is effective,
2609its output is indistinguishable from that of the real adversary, and hence no
2610`judge' (distinguisher) should be persuaded by evidence presented by someone
2611who claims to have witnessed or participated in an interaction.
2612
2613We work again the model described in~\ref{sec:um}. That is, our adversary
2614has complete control over the ordering and delivery of all messages. The
2615adversary is also able, at any time, to reveal the state of any session.
2616However, deniability is obviously impossible against an adversary who can
2617\emph{corrupt} other parties, since simulating such an adversary's actions
2618would necessarily require the simulator to compute private keys corresponding
2619to the known public keys, and this is (we believe) difficult, because an
2620efficient algorithm for doing this could easily attack our protocol, which we
2621already proved secure. Therefore, we forbid the adversary from corrupting
2622parties.
2623
2624In order to allow the adversary to participate in the protocol, rather than
2625merely observing it, we need to give it one or more private keys. We could
2626modify the initialization function \id{init} from figure~\ref{fig:wkx-formal}
2627to give the adversary a private key, but there's an easier way: we can just
2628give the adversary a number of private keys in its auxiliary input.
2629
2630\subsubsection{Definitions}
2631Let $\Pi$ be a key-exchange protocol, in the model described in
2632section~\ref{sec:um}. We use the simulation framework of
2633section~\ref{sec:sim}. We define the initialization function $I_\Pi$ to be
2634the initialization function of $\Pi$, as before, and the corresponding
2635world~$W_\Pi(\iota, \sigma, \tau, \mu)$ is a fairly straightforward mapping
2636of the adversary's possible actions to the simulation model:
2637\begin{itemize}
2638\item The invocation $\cookie{new-session}$ with $\mu = (i, j, s)$ creates a
2639 new session on party~$P_i$, with partner~$P_j$ and session-id~$s$. The
2640 reply $\rho = (\delta, m)$ is a \emph{decision} $\delta \in
2641 \{\cookie{continue}, \cookie{abort}, \cookie{complete}\}$ and an output
2642 message $m \in \Bin^* \cup \{\bot\}$. If $m \ne \bot$ then $m$ is a
2643 message to be sent to the matching session (if any).
2644\item The invocation $\cookie{deliver}$ with $\mu = (i, j, s, m)$ delivers
2645 message $m$ to the session $S = (P_i, P_j, s)$. The response $\rho$ is as
2646 for $\cookie{new-session}$ invocations.
2647\item The invocation $\cookie{reveal-session-state}$ with $\mu = (i, j, s)$
2648 reveals to the adversary the state of the running session $S = (P_i, P_j,
2649 s)$. The response $\rho$ is the session's state if $S$ is indeed a running
2650 session, or $\bot$ otherwise.
2651\item The invocation $\cookie{reveal-session-key}$ with $\mu = (i, j, s)$
2652 reveals to the adversary the session-key of the completed session~$S =
2653 (P_i, P_j, s)$. The response $\rho$ is the session key~$K$ if the session
2654 is indeed complete, or $\bot$ otherwise.
2655\end{itemize}
2656There are no invocations corresponding to the adversary actions of corrupting
2657parties (since deniability against an corrupting adversary is impossible, as
2658discussed earlier), or session expiry or challenging (since they are useless
2659in this context).
2660
2661We measure the deniability of a protocol~$\Pi$, using a given simulator~$S$,
2662by the insecurity function $\InSec{sim}(W_\Pi, I_\Pi, S; t_D, t_A,
2663\mathcal{Q}_D, \mathcal{Q}_A, \mathcal{R}, \mathcal{U})$ of
2664definition~\ref{def:sim}. The interaction bounds $\mathcal{R} = (q_S, q_M)$
2665we place on the adversary are on the number ($q_S$) of \cookie{new-session}
2666and ($q_M$) \cookie{deliver} invocations it makes.
2667
2668We shall (informally) say that a protocol~$\Pi$ is deniable if there is a
2669simulator~$S_\Pi$ for which the insecurity function is small for appropriate
2670resource bounds on the adversary and distinguisher.
2671
2672\subsubsection{The current protocol}
2673As it stands, $\Wkx$ isn't deniable, according to our definition, for
2674arbitrary auxiliary inputs. Let's see why.
2675
2676Suppose that Bob is an informant for the secret police, and wants to convince
2677a judge that Alice is involved in subversive activities in cyberspace.
2678Unfortunately, Bob's job is difficult, because of the strong zero-knowledge
2679nature of the Wrestlers identification protocol. However, Bob can work with
2680the judge to bring him the evidence necessary to convict Alice. Here's how.
2681
2682Alice's public key is $A$, and Bob's public key is $B$. The judge chooses
32609df3 2683some session-id $s$, and $r \inr \gf{q}$. He computes $R = r P$ and $c =
a6e375a6
MW
2684r \xor H_I(B, s, R, r A)$, and gives Bob the triple $(s, R, c)$, keeping $r$
2685secret. Bob can now persuade Alice to enter into a key-exchange with him,
2686with session-id $s$. He uses $(R, c)$ as his challenge message. When Alice
2687sends back her response $(R', \chi)$ (because Bob's challenge is correctly
2688formed), Bob aborts and shows $(R', \chi)$ to the judge. The judge knows $r$
2689and can therefore decrypt $\chi$ and check the response. If it's wrong, then
2690Bob was cheating and gets demoted -- he can't get the right answer by himself
2691because that would require him to impersonate Alice. If it's right, Alice is
2692really a subversive element and `disappears' in the night.
2693
2694We shall show in theorem~\ref{thm:denial} below that this is basically the
2695only attack against the deniability of the protocol. However, we can do
2696better.
2697
2698\subsubsection{Fixing deniability}
2699We can fix the protocol to remove even the attack discussed above. The
2700change is simple: we feed \emph{both} parties' challenges to the hash
2701function~$H_I$ rather than just the sender's. We use a five-argument hash
2702function (random oracle) $H_I\colon G^2 \times \Bin^{\ell_S} \times G^2 \to
2703\Bin^{\ell_I}$. We introduce a new message pass at the beginning of the
2704protocol: each session simply sends its challenge point $R = r P$ in the
2705clear as a `pre-challenge'. The actual challenge is $R$ and $c = r \xor
2706H_I(X, R', s, R, c)$, where $R'$ is the challenge of the matching session.
2707
2708By breaking symmetry, we can reduce the communication complexity of this
2709variant to four messages. As before, we analyse the symmetrical version.
2710The extra flow might seem a high price to pay, but we shall see that it has
2711additional benefits beyond deniability.
2712
2713A summary of the new protocol is shown in figure~\ref{fig:wdkx}, and the
2714formal description is shown in figure~\ref{fig:wdkx-formal}.
2715
2716\begin{figure}
2717 \begin{description}
32609df3 2718 \item[Setup] Group $G = \langle P \rangle$; $\#G = q$ is prime.
a6e375a6
MW
2719 $H_I(\cdot, \cdot, \cdot, \cdot, \cdot)$ and $H_K(cdot)$ are secure
2720 hashes. $\E = (\kappa, E, D)$ is an IND-CCA2 symmetric encryption
2721 scheme.
2722 \item[Parties] $U_i$ for $0 \le i < n$.
32609df3 2723 \item[Private keys] $x_i \inr \gf{q}$.
a6e375a6
MW
2724 \item[Public keys] $X_i = x_i P$.
2725 \end{description}
32609df3
MW
2726
2727 \begin{protocol}
2728 $r_i \getsr I$; $R_i \gets r_i P$; &
2729 $r_j \getsr I$; $R_j \gets r_j P$; \\
2730 \send{->}{R_i}
2731 \send{<-}{R_j}
2732 $c_i \gets r_i \xor H_I(R_j, X_i, s, R_i, r_i X_j)$; &
2733 $c_j \gets r_j \xor H_I(R_i, X_j, s, R_j, r_j X_i)$; \\
2734 \send{->}{(R_i, c_i)}
2735 \send{<-}{(R_j, c_j)}
2736 Check $R_j = \bigl(c_j \xor H_I(x_i R_j)\bigr) P$; &
2737 Check $R_i = \bigl(c_i \xor H_I(x_j R_i)\bigr) P$; \\
2738 $Z \gets r_i R_j$; $(K_0, K_1) \gets H_K(Z)$; &
2739 $Z \gets r_j R_i$; $(K_0, K_1) \gets H_K(Z)$; \\
2740 $\chi_i \gets E_{K_0}(x_i R_j)$; &
2741 $\chi_j \gets E_{K_0}(x_j R_i)$; \\
2742 \send{->}{(R_i, \chi_i)}
2743 \send{<-}{(R_j, \chi_j)}
2744 Check $D_{K_0}(\chi_j) = r_i X_j$; &
2745 Check $D_{K_0}(\chi_i) = r_j X_i$; \\
2746 Shared key is $K_1$. & Shared key is $K_1$.
2747 \end{protocol}
a6e375a6
MW
2748
2749 \caption{Summary of the Deniable Wrestlers Key Exchange protocol, $\Wdkx$}
2750 \label{fig:wdkx}
2751\end{figure}
2752
2753\begin{figure}
2754 \begin{program}
2755 Function $\id{init}(n)$: \+ \\
2756 \FOR $i \in \Nupto{n}$ \DO \\ \ind
32609df3 2757 $x \getsr \gf{q}$; \\
a6e375a6
MW
2758 $\mathbf{i}[i] \gets x$; \\
2759 $\mathbf{p}[i] \gets x P$; \- \\
2760 \RETURN $(\mathbf{p}, \mathbf{i})$;
2761 \- \\[\medskipamount]
2762 Function $\id{new-session}^{H_I(\cdot, \cdot, \cdot, \cdot, \cdot), H_K(\cdot)}
2763 (\mathbf{p}, x, i, j, s)$: \+ \\
2764 $X \gets \mathbf{p}[i]$;
2765 $X' \gets \mathbf{p}[j]$;
2766 $C \gets \emptyset$; \\
32609df3 2767 $r \getsr \gf{q}$;
a6e375a6
MW
2768 $R \gets r P$;
2769 $Y \gets r X'$; \\
2770 \SEND $(\cookie{pre-challange}, R)$;
2771 \- \\[\medskipamount]
2772 Function $\id{message}^{H_I(\cdot, \cdot, \cdot, \cdot, \cdot), H_K(\cdot)}
2773 (\tau, \mu)$: \+ \\
2774 \IF $\tau = \cookie{pre-challenge}$ \THEN
2775 $\id{msg-pre-challenge}(\mu)$; \\
2776 \ELSE \IF $\tau = \cookie{challenge}$ \THEN
2777 $\id{msg-challenge}(\mu)$; \\
2778 \ELSE \IF $\tau = \cookie{response}$ \THEN $\id{msg-response}(\mu)$;
2779 \next
2780 Function $\id{msg-pre-challenge}(\mu)$: \+ \\
2781 $R' \gets \mu$; \\
2782 $h \gets H_I(R', X, s, R, c)$; \\
2783 $c \gets r \xor h$; \\
2784 \SEND $(\id{msg-challenge}, R, c)$;
2785 \- \\[\medskipamount]
2786 Function $\id{msg-challenge}(\mu)$: \+ \\
2787 $(R', c') \gets \mu$; \\
2788 $Y' \gets x R'$; \\
2789 $h' \gets H_I(R, X', s, R', Y')$; \\
2790 $r' \gets c' \xor h'$; \\
2791 \IF $R' \ne r' P$ \THEN \RETURN; \\
2792 $C \gets C \cup \{R\}$; \\
2793 $Z \gets r R'$; \\
2794 $(K_0, K_1) \gets H_K(Z)$; \\
2795 $\chi \gets E_{K_0}(Y')$; \\
2796 \SEND $(\cookie{response}, R, \chi)$;
2797 \- \\[\medskipamount]
2798 Function $\id{msg-response}(\mu)$: \+ \\
2799 $(R', \chi') \gets \mu$; \\
2800 \IF $R' \notin C$ \THEN \RETURN; \\
2801 $Z \gets r R'$; \\
2802 $(K_0, K_1) \gets H_K(Z)$; \\
2803 $Y' \gets D_{K_0}(\chi')$; \\
2804 \IF $Y' \ne Y$ \THEN \RETURN; \\
2805 \OUTPUT $K_1$;
2806 \STOP;
2807 \end{program}
32609df3 2808
a6e375a6
MW
2809 \caption{Deniable key-exchange: formalization of $\Wdkx$}
2810 \label{fig:wdkx-formal}
2811\end{figure}
2812
2813The security of this variant is given by the following theorem, whose proof
32609df3
MW
2814is \ifshort given in the full version of this paper\else in
2815appendix~\ref{sec:sk2-proof}\fi.
a6e375a6
MW
2816\begin{theorem}[SK-security of $\Wdkx$]
2817 \label{thm:sk2}
2818 Let $G$ be a cyclic group. Let $\E = (\kappa, E, D)$ be a symmetric
2819 encryption scheme. Then
2820 \[ \InSec{sk}(\Wdkx^{G, \E}; t, n, q_S, q_M, q_I, q_K) =
2821 \InSec{sk}(\Wkx^{G, \E}; t, n, q_S, q_M, q_I, q_K)
2822 \]
2823\end{theorem}
2824
2825\subsubsection{Deniability of the Wrestlers protocols}
2826In order to quantify the level of deniability our protocols provide, we shall
2827impose a limit on the auxiliary input to the adversary. In particular, we
2828shall use $\mathcal{U}$ of definition~\ref{def:sim} to count the number of
2829\emph{challenges} in the auxiliary input. That is, $\mathcal{U} = n_C$ is
2830the number of tuples $(i, j, s, R', R, c)$ for which there is an $r$ such
2831that $R = r P$ and $c = r \xor H_I(R', X_j, s, R, r X_i)$ (or without the
2832$R'$ for $\Wkx$).
2833
2834With this restriction in place, we can state the following theorem about the
2835deniability of our protocols.
2836\begin{theorem}[Deniability of $\Wkx$ and $\Wdkx$]
2837 \label{thm:denial}
2838 There exist simulators $S_\Wkx$ and $\Wdkx$ such that
2839 \[ \InSec{sim}(W_{\Wkx^{G, \E}}, I_{\Wkx^{G, \E}}, S_{\Wkx^{G, \E}};
2840 t_D, t_A, \mathcal{Q}_D, \mathcal{Q}_A, (q_S, q_M), 0) \le
2841 \frac{q_M}{2^{\ell_I}}
2842 \]
2843 and
32609df3
MW
2844 \iffancystyle\[\else\begin{spliteqn*}\fi
2845 \InSec{sim}(W_{\Wdkx^{G, \E}}, I_{\Wdkx^{G, \E}}, S_{\Wdkx^{G, \E}};
2846 t_D, t_A, \mathcal{Q}_D, \mathcal{Q}_A, (q_S, q_M), n_C) \le
2847 \iffancystyle\else\\\fi
2848 \frac{n_C q_S}{\#G} +
a6e375a6 2849 \frac{q_M}{2^{\ell_I}}.
32609df3 2850 \iffancystyle\]\else\end{spliteqn*}\fi
a6e375a6
MW
2851 The running time of the simulators is $O(t_A) + O(\mathcal{Q}_A q_M)$.
2852\end{theorem}
32609df3
MW
2853\begin{longproof}{The proof of this theorem can be found in the full version
2854 of this paper.}
a6e375a6
MW
2855 The simulators $S_\Wkx$ and $S_\Wdkx$ are very similar. We describe both
2856 here. Both are fake-world simulators, working as follows.
2857 \begin{enumerate}
2858 \item Initially, it constructs simulated parties $P_i$, for $0 \le i < n$,
2859 giving each the public key $X_i$ from the common input.
2860 \item Suppose the adversary requests creation of a new session $S = (P_i,
2861 P_j, s)$. Then the simulator creates a new session, including a random
32609df3 2862 value $r_S \inr \gf{q}$, and computes $R_S = r_S P$, and $Y_S = r_S
a6e375a6
MW
2863 X_j$. For $\Wdkx$, it sends the message $(\cookie{pre-challenge}, R_S)$;
2864 for $\Wkx$, it additionally computes $h = H_I(X_i, s, R_S, Y_S)$ and
2865 sends $(\cookie{challenge}, R_S, r_S \xor h)$.
2866 \item Suppose, for $\Wdkx$, the adversary sends a message
2867 $(\cookie{pre-challenge}, R')$ to a session~$S = (P_i, P_j, s)$. The
2868 simulator computes $h = H_I(R', X_i, s, R_S, Y_S)$, and sends
2869 $(\cookie{challenge}, R_S, r_S \xor h)$.
2870 \item Suppose the adversary sends a message $(\cookie{challenge}, R', c')$
2871 to session $S = (P_i, P_j, s)$. The simulator doesn't know $x_i$.
2872 \begin{enumerate}
2873 \item If $R' = R_T$ for some other simulated session $T$, then the
2874 simulator knows $r_T$ such that $R_T = r_T P$. Let $Y' = r_T X_i$.
2875 The simulator computes $h = H_I(X_j, s, R', Y')$ (resp.\ $h = H_I(R_S,
2876 X_j, s, R', Y')$) for $\Wkx$ (resp.\ $\Wdkx$) and checks that $r_T = c'
2877 \xor h$. If not, the simulator discards the message. Otherwise, it
2878 computes $(K_0, K_1) = H_K(r_S R')$, and sends the message
2879 $(\cookie{response}, R, E_{K_0}(Y'))$.
2880 \item \label{en:simextract} Otherwise the simulator runs the extractor
2881 $T_\Wident$ on the adversary's history of queries $H_I(X_j, s, R',
2882 \cdot)$ (resp.\ $H_I(R_S, X_j, s, R', \cdot)$) for $\Wkx$ (resp.\
2883 $\Wdkx$). The extractor returns $(r', Y')$. If $Y' = \bot$ then the
2884 simulator ignores the message. Otherwise, the simulator computes
2885 $(K_0, K_1) = H_K(r R')$ and sends back $(\cookie{response}, R,
2886 E_{K_0}(Y'))$.
2887 \end{enumerate}
2888 \item Suppose the adversary sends a message $(\cookie{response}, R', \chi)$
2889 to session $S = (P_i, P_j, s)$. The simulator computes $(K_0, K_1) =
2890 H_K(r_S R')$, and decrypts $Y' = D_{K_0}(\chi)$. If $Y' \ne Y_S$ then
2891 the simulator discards the message. Otherwise, it makes the simulated
2892 session complete, and outputs key $K_1$.
2893 \item Finally, if the adversary reveals a session's state, the simulator
2894 reveals $r_S$ as required; if the adversary reveals a session-key, the
2895 simulator reveals the $K_1$ it output.
2896 \end{enumerate}
2897 The only point where the simulation fails to be perfect is in
2898 \ref{en:simextract}. Let $R'$ and $c'$ be the values from an incoming
2899 challenge message to session $S = (P_i, P_j, s)$. Let $r'$ be such that
2900 $R' = r' P$ and let $Y' = r' X_i$. If a random-oracle query $H_I(X_j, s,
2901 R', Y')$ (or $H_I(R_S, X_j, s, R', Y')$ for $\Wdkx$) has been issued, then
2902 there are a number of possibilities. Let $h'$ be the result of this query.
2903 \begin{itemize}
2904 \item The adversary made this query. Then the extractor will find it and
2905 return $Y'$ if $c' = h' \xor r'$, or $\bot$ otherwise.
2906 \item Some simulated session $U = (P_{i'}, P_{j'}, s')$ made this query.
2907 But simulated sessions only make $H_I$-queries when constructing
2908 challenges, so $R' = R_U$ for some session~$U$. But the simulator does
2909 something different in that case.
2910 \item In $\Wdkx$, the quadruple $(s, R_S, R', c')$ came from the
2911 adversary's auxiliary input. In this case the simulator must fail. But
2912 $R_S = r_S P$, and $r_S$ was chosen uniformly at random. If there are at
2913 most $n_C$ challenge sets in the auxiliary input then this happens with
32609df3 2914 probability at most $n_C/\#G$ for any given session.
a6e375a6
MW
2915 \end{itemize}
2916 We conclude that the simulator fails with probability
32609df3 2917 \[ \frac{q_M}{2^{\ell_I}} + \frac{q_S n_C}{\#G}. \]
a6e375a6
MW
2918 (Note that we only consider $n_C = 0$ for $\Wkx$.) No adversary can
2919 distinguish the simulator from a real interaction unless the simulator
2920 fails, and the simulator is a fake-world simulator. We therefore apply
2921 proposition~\ref{prop:fakesim}; the theorem follows.
32609df3 2922\end{longproof}
a6e375a6 2923
32609df3 2924\ifshort\else
a6e375a6
MW
2925\subsection{Practical issues}
2926\label{sec:practice}
2927
2928\subsubsection{Denial of service from spoofers}
2929The adversary we considered in~\ref{sec:um} is very powerful. Proving
2930security against such a powerful adversary is good and useful. However,
2931there are other useful security properties we might like against weaker
2932adversaries.
2933
2934Eavesdropping on the Internet is actually nontrivial. One needs control of
2935one of the intermediate routers between two communicating parties. (There
2936are tricks one can play involving subversion of DNS servers, but this is also
2937nontrivial.) However, injecting packets with bogus source addresses is very
2938easy.
2939
4331bf84 2940Layering the protocol over TCP \cite{rfc793} ameliorates this problem because
a6e375a6
MW
2941an adversary needs to guess or eavesdrop in order to obtain the correct
2942sequence numbers for a spoofed packet; but the Wrestlers protocol is
2943sufficiently simple that we'd like to be able to run it over UDP
4331bf84 2944\cite{rfc768}, for which spoofing is trivial.
a6e375a6
MW
2945
2946Therefore, it's possible for anyone on the 'net to send Alice a spurious
2947challenge message $(R, c)$. She will then compute $Y = a R$, recover $r' = c
2948\xor H_I(\ldots, R, Y)$ check that $R = r' P$ and so on. That's at least two
2949scalar multiplications to respond to a spoofed packet, and even with very
2950efficient group operations, coping with this kind of simple denial-of-service
2951attack might be difficult.
2952
2953A straightforward solution is to use the Deniable variant of the protocol,
2954and require a challenge to quote its matching session's challenge $R'$ in its
2955challenge. That is, upon receiving a $(\cookie{pre-challenge}, R')$, the
2956session sends $(\cookie{challenge}, R', R, c)$. Alice then rejects any
2957\emph{incoming} challenge message which doesn't quote her current challenge
2958value. Now only eavesdroppers can force her to perform expensive
2959computations.
2960
2961Indeed, one need not quote the entire challenge $R'$: it suffices to send
2962some short hard-to-guess hash of it, maybe just the bottom 128 bits or so.
2963
2964This can't reduce security. Consider any adversary attacking this protocol
2965variant. We can construct an adversary which attacks the original protocol
2966just as efficiently. The new adversary attaches fake $R'$ values to
2967challenges output by other parties, and strips them off on delivery,
2968discarding messages with incorrect $R'$ values.
2969
a6e375a6
MW
2970\subsubsection{Key confirmation}
2971Consider an application which uses the Wrestlers protocol to re-exchange keys
2972periodically. The application can be willing to \emph{receive} incoming
2973messages using the new key as soon as the key exchange completes
2974successfully; however, it should refrain from \emph{sending} messages under
2975the new key until it knows that its partner has also completed. The partner
2976may not have received the final response message, and therefore be unwilling
2977to accept a new key; it will therefore (presumably) reject incoming messages
2978under this new key.
2979
2980While key confirmation is unnecessary for \emph{security}, it has
2981\emph{practical} value, since it solves the above problem. If the
2982application sends a \cookie{switch} message when it `completes', it can
2983signal its partner that it is indeed ready to accept messages under the new
dff0fad2
MW
2984key. Our implementation sends $(\cookie{switch-rq}, E_{K_0}(H_S(0, R, R')))$
2985as its switch message; the exact contents aren't important. Our
2986retransmission policy (below) makes use of an additional message
2987\cookie{switch-ok}, which can be defined similarly.
2988
2989It's not hard to show that this doesn't adversely affect the security of the
2990protocol, since the encrypted message is computed only from public values.
2991In the security proof, we modify the generation of \cookie{response}
2992messages, so that the plaintexts are a constant string rather than the true
2993responses, guaranteeing that the messages give no information about the
2994actual response. To show this is unlikely to matter, we present an adversary
2995attacking the encryption scheme by encrypting either genuine responses or
2996fake constant strings. Since the adversary can't distinguish which is being
2997encrypted (by the definition of IND-CCA security,
2998definition~\ref{def:ind-cca}), the change goes unnoticed. In order to allow
2999incorporate our switch messages, we need only modify this adversary, to
3000implement the modified protocol. This is certainly possible, since the
3001messages contain (hashes of) public values. We omit the details.
3002
d8139f2c
MW
3003However, while the extra message doesn't affect the security of our protocol,
3004it would be annoying if an adversary could forge the switch request message,
3005since this would be a denial of service. In the strong adversarial model,
3006this doesn't matter, since the adversary can deny service anyway, but it's a
3007concern against less powerful adversaries. Most IND-CCA symmetric encryption
081da1b1
MW
3008schemes also provide integrity of plaintexts
3009\cite{bellare-namprempre-2000:authn-enc-notions} (e.g., the
3010encrypt-then-MAC generic composition approach
3011\cite{bellare-namprempre-2000:authn-enc-notions,%
3012 krawczyk-2001:order-enc-authn},
3013and the authenticated-encryption modes of
3014\cite{rogaway-2001:ocb,bellare-2004:eax,%
3015 mcgrew-viega-2004:gcm-security-performance}),
3016so this isn't a great imposition.
d8139f2c 3017
dff0fad2
MW
3018\subsubsection{Optimization and piggybacking}
3019We can optimize the number of messages sent by combining them. Here's one
3020possible collection of combined messages:
3021\begin{description}
3022\item [\cookie{pre-challenge}] $R$
3023\item [\cookie{challenge}] $R'$, $R$, $c = H_I(R', X, s, R, c) \xor r$
3024\item [\cookie{response}] $R'$, $R$, $c$, $E_{K_0}(x R')$
3025\item [\cookie{switch}] $R'$, $E_{K_0}(x R', H_S(0, R, R'))$
3026\item [\cookie{switch-ok}] $R'$, $E_{K_0}(H_S(1, R, R'))$
3027\end{description}
3028The combination is safe:
3029\begin{itemize}
3030\item the \cookie{switch} and \cookie{switch-ok} messages are safe by the
3031 argument above; and
3032\item the other recombinations can be performed and undone in a `black box'
3033 way, by an appropriately defined SK-security adversary.
3034\end{itemize}
a6e375a6
MW
3035
3036\subsubsection{Unreliable transports}
4331bf84 3037The Internet UDP \cite{rfc768} is a simple, unreliable protocol for
a6e375a6
MW
3038transmitting datagrams. However, it's very efficient, and rather attractive
3039as a transport for datagram-based applications such as virtual private
dff0fad2
MW
3040networks (VPNs). Since UDP is a best-effort rather than a reliable
3041transport, it can occasionally drop packets. Therefore it is necessary for a
3042UDP application to be able to retransmit messages which appear to have been
3043lost.
a6e375a6 3044
dff0fad2
MW
3045We recommend the following simple retransmission policy for running the
3046Wrestlers protocol over UDP.
3047\begin{itemize}
3048\item Initially, send out the \cookie{pre-challenge} message every minute.
3049\item On receipt of a \cookie{pre-challenge} message, send the corresponding
3050 full \cookie{challenge}, but don't retain any state.
3051\item On receipt of a (valid) \cookie{challenge}, record the challenge value
3052 $R'$ in a table, together with $K = (K_0, K_1)$ and the response $Y' = x
3053 R'$. If the table is full, overwrite an existing entry at random. Send
3054 the corresponding \cookie{response} message, and retransmit it every ten
3055 seconds or so.
3056\item On receipt of a (valid) \cookie{response}, discard any other
3057 challenges, and stop sending \cookie{pre-challenge} and \cookie{response}
3058 retransmits. At this point, the basic protocol described above would
3059 \emph{accept}, so the key $K_1$ is known to be good. Send the
3060 \cookie{switch} message, including its response to the (now known-good)
3061 sender's challenge.
3062\item On receipt of a (valid) \cookie{switch}, send back a \cookie{switch-ok}
3063 message and stop retransmissions. It is now safe to start sending messages
3064 under $K_1$.
3065\item On receipt of a (valid) \cookie{switch-ok}, stop retransmissions. It
3066 is now safe to start sending messages under $K_1$.
3067\end{itemize}
a6e375a6
MW
3068
3069\subsubsection{Key reuse}
dff0fad2
MW
3070Suppose our symmetric encryption scheme $\E$ is not only IND-CCA secure
3071(definition~\ref{def:ind-cca}) but also provides integrity of plaintexts
081da1b1
MW
3072\cite{bellare-namprempre-2000:authn-enc-notions} (or, alternatively,
3073is an AEAD scheme \cite{rogaway-2002:aead}. Then we can use it to construct
3074a secure channel, by including message type and sequence number fields in the
3075plaintexts, along with the message body. If we do this, we can actually get
3076away with just the one key $K = H_K(Z)$ rather than both $K_0$ and $K_1$.
dff0fad2
MW
3077
3078To do this, it is essential that the plaintext messages (or additional data)
3079clearly distinguish between the messages sent as part of the key-exchange
3080protocol and messages sent over the `secure channel'. Otherwise, there is a
d8139f2c
MW
3081protocol-interference attack: an adversary can replay key-exchange
3082ciphertexts to insert the corresponding plaintexts into the channel.
dff0fad2
MW
3083
3084We offer a sketch proof of this claim in appendix~\ref{sec:sc-proof}.
32609df3 3085\fi
dff0fad2
MW
3086
3087%%%--------------------------------------------------------------------------
a6e375a6 3088
dff0fad2
MW
3089\section{Conclusions}
3090\label{sec:conc}
3091
3092We have presented new protocols for identification and authenticated
3093key-exchange, and proven their security. We have shown them to be efficient
3094and simple. We have also shown that our key-exchange protocol is deniable.
3095Finally, we have shown how to modify the key-exchange protocol for practical
3096use, and proven that this modification is still secure.
a6e375a6
MW
3097
3098%%%--------------------------------------------------------------------------
3099
3100\section{Acknowledgements}
3101
3102The Wrestlers Protocol is named after the Wrestlers pub in Cambridge where
3103Clive Jones and I worked out the initial design.
3104
3105%%%--------------------------------------------------------------------------
3106
081da1b1 3107\bibliography{mdw-crypto}
a6e375a6
MW
3108
3109%%%--------------------------------------------------------------------------
3110
32609df3 3111\ifshort\def\next{\end{document}}\expandafter\next\fi
a6e375a6
MW
3112\appendix
3113\section{Proofs}
3114
3115\subsection{Proof of theorem~\ref{thm:sk}}
3116\label{sec:sk-proof}
3117
3118Before we embark on the proof proper, let us settle on some notation. Let
3119$P_i$ be a party. Then we write $x_i$ for $P_i$'s private key and $X_i = x_i
3120P$ is $P_i$'s public key. Let $S = (P_i, P_j, s)$ be a session. We write
3121$r_S$ for the random value chosen at the start of the session, and $R_S$,
3122$c_S$ etc.\ are the corresponding derived values in that session.
3123
a6e375a6
MW
3124The proof uses a sequence of games. For each game~$\G{i}$, let $V_i$ be the
3125event that some pair of unexposed, matching sessions both complete but output
3126different keys, and let $W_i$ be the event that the adversary's final output
3127equals the game's hidden bit~$b^*$. To save on repetition, let us write
3128\[ \diff{i}{j} = \max(|\Pr[V_i] - \Pr[V_j]|, |\Pr[W_i] - \Pr[W_j]|). \]
3129Obviously,
3130\[ \diff{i}{j} \le \sum_{i\le k<j} \diff{k}{k + 1}. \]
3131
3132Here's a quick overview of the games we use.
3133\begin{itemize}
3134\item $\G0$ is the original SK-security game.
3135\item In $\G1$, we abort the game unless all parties' public keys are
3136 distinct. Since keys are generated at random, parties are unlikely to be
3137 given the same key by accident.
3138\item In $\G2$, we change the way sessions respond to challenge messages, by
3139 using the extractor to fake up answers to most challenges. Since the
3140 extractor is good, the adversary is unlikely to notice.
3141\item In $\G3$, we abort the game if the adversary ever queries $H_K(\cdot)$
3142 on the Diffie-Hellman secret $r_S r_T P$ shared between two unexposed
3143 matching sessions. We show that this is unlikely to happen if the
3144 Diffie-Hellman problem is hard.
3145\item In $\G4$, we abort the game if any unexposed session \emph{accepts} a
3146 response message which wasn't sent by a matching session.
3147\end{itemize}
3148Finally, we show that the adversary has no advantage in $\G4$. The theorem
3149follows.
3150
3151For ease of comprehension, we postpone the detailed proofs of some of the
3152steps until after we conclude the main proof.
3153
3154Let $A$ be a given adversary which runs in time~$t$, creates at most~$q_S$
3155sessions, delivers at most~$q_M$ messages, and makes at most~$q_I$ queries to
3156its $H_I(\cdot, \cdot, \cdot, \cdot)$ oracle and at most~$q_K$ queries to its
3157$H_K(\cdot)$ oracle. Let $\G0$ be the original SK-security game of
3158definition~\ref{def:sk}, played with adversary~$A$.
3159
3160Game~$\G1$ is the same as game~$\G0$ except, if the initialization function
3161reports two parties as having the same public key (i.e., we have $X_i \ne
3162X_j$ where $0 \le i < j < n$), we stop the game immediately and without
3163crediting the adversary with a win. This only happens when the corresponding
3164private keys are equal, i.e., $x_i = x_j$, and since the initialization
3165function chooses private keys uniformly at random, this happens with
32609df3 3166probability at most $\binom{n}{2}/\#G$. Since if this doesn't happen, the
a6e375a6
MW
3167game is identical to $\G0$, we can apply lemma~\ref{lem:shoup}, and see that
3168\begin{equation}
3169 \label{eq:sk-g0-g1}
32609df3 3170 \diff{0}{1} \le \frac{1}{\#G} \binom{n}{2} = \frac{n (n - 1)}{2 \#G}.
a6e375a6
MW
3171\end{equation}
3172In game~$\G1$ and onwards, we can assume that public keys for distinct
dff0fad2
MW
3173parties are themselves distinct. Note that the game now takes at most
3174$O(q_I)$ times longer to process each message delivered by the adversary.
3175This is where the $O(q_I q_M)$ term comes from in the theorem statement.
a6e375a6
MW
3176
3177Game~$\G2$ is the same as game~$\G1$, except that we change the way that we
3178make parties respond to \cookie{challenge} messages $(\cookie{challenge}, R,
3179c)$. Specifically, suppose that $S = (P_i, P_j, s)$ is a session.
3180\begin{itemize}
3181\item Suppose $T = (P_j, P_i, s)$ is the matching session of $S$. The game
3182 proceeds as before if $(R, c) = (R_T, c_T)$ is the challenge issued by $T$.
3183\item Otherwise, we run the extractor $T_\Wident$ on the adversary's history
3184 so far of oracle queries $H_I(X_i, s, R, \cdot)$ to determine a pair $(r,
3185 Y)$. If $r = \bot$ then we discard the message. Otherwise, we add $R$ to
3186 the list~$C$, and return a fake response to the adversary by computing $K =
3187 H_K(r R_S)$ and handing the adversary $(\cookie{response}, R_S, E_K(Y))$.
3188\end{itemize}
3189The following lemma shows how this affects the adversary's probabilities of
3190winning.
3191\begin{lemma}
3192 \label{lem:sk-g1-g2}
3193 \begin{equation}
3194 \label{eq:sk-g1-g2}
3195 \diff{1}{2} \le \frac{q_M}{2^{\ell_I}}.
3196 \end{equation}
3197\end{lemma}
3198
3199Let us say that a session $S = (P_i, P_j, s)$ is \emph{ripe} if
3200\begin{itemize}
3201\item there is a matching session $T = (P_j, P_i, s)$, and
3202\item $S$ is unexposed.
3203\end{itemize}
3204Suppose that $S$ is a ripe session, and that it has a matching session~$T$:
3205let $Z_S = Z_T = r_S r_T P$.
3206
3207Game~$\G3$ is the same as $\G2$, except that the game is immediately aborted
3208if ever the adversary queries its random oracle $H_K(\cdot)$ at a value $Z_S$
3209for any ripe session~$S$. The following lemma shows how this affects the
3210adversary's probabilities of winning.
3211\begin{lemma}
3212 \label{lem:sk-g2-g3}
dff0fad2 3213 For some $t'$ within the bounds given in the theorem statement we have
a6e375a6
MW
3214 \begin{equation}
3215 \label{eq:sk-g2-g3}
3216 \diff{2}{3} \le q_S \InSec{mcdh}(G; t', q_K).
3217 \end{equation}
3218\end{lemma}
3219
3220Game~$\G4$ is the same as $\G3$ except that the game is immediately aborted
3221if ever the adversary sends a response message to a ripe session~$S$ which
3222wasn't output by its matching session as a response to $S$'s challenge, with
3223the result that $S$ completes.
3224
3225Let's make this more precise. Let $U$ and $V$ be a pair of matching
3226sessions. Let $C_U = (\cookie{challenge}, R_U, c_U$ be the challenge message
3227sent by $U$. Let $M_T$ be the set of messages which $T$ has sent upon
3228delivery of $C_U$. Then, in $\G4$, we abort the game if, for any pair $S$
3229and~$T$ of matching, unexposed sessions, $S$ has completed as a result of
3230being sent a message $\mu \notin M_T$. We have the following lemma.
3231\begin{lemma}
3232 \label{lem:sk-g3-g4}
dff0fad2 3233 For a $t'$ within the stated bounds, we have
a6e375a6
MW
3234 \begin{equation}
3235 \label{eq:sk-g3-g4}
3236 \diff{3}{4} \le q_S \bigl( \InSec{ind-cca}(\E; t', q_M, q_M) +
3237 n \cdot \InSec{mcdh}(G; t', q_M + q_I) \bigr)
3238 \end{equation}
3239\end{lemma}
3240
3241Finally, let us consider the state we're in with $\G4$.
3242\begin{itemize}
3243\item No ripe session completes except as a result the adversary faithfully
3244 delivering messages between it and its matching session.
3245\item The adversary never queries $Z_S$ for any ripe session~$S$. If we set
3246 $K_S = (K_{S, 0}, K_{S, 1}) = H_K(Z_S)$, then $K_{S, 1}$ is the key output
3247 by $S$ when it completes.
3248\item If $S$ and $T$ are matching ripe sessions, then $K_S = K_T$, since $Z_S
3249 = r_S R_T = r_T R_S = Z_T$.
3250\item For any ripe session~$S$, $K_{S, 1}$ is uniformly distributed in
3251 $\Bin^\kappa$ and independent of the adversary's view.
3252\item If $S = (P_i, P_j, s)$ and $T = (P_j, P_i, s)$ are matching ripe
3253 sessions, then $Z_S$ depends only $r_S$ and $r_T$. Hence, once $S$ and~$T$
3254 complete, and erase their states, $Z_S$ is independent of everything except
3255 the messages sent between the two sessions. In particular, $Z_S$ is
3256 independent of the long-term secrets $x_i$ and~$x_j$, so if either player
3257 is later corrupted, the key $K_{S, 1}$ remains independent of the
3258 adversary's view.
3259\item Hence, the keys output by unexposed sessions are indistinguishable from
3260 freshly-generated random strings, and remain so indefinitely.
3261\end{itemize}
3262We conclude that, for any adversary $A$,
3263\begin{equation}
3264 \label{eq:sk-g4}
3265 \Pr[V_4] = 0 \qquad \text{and} \qquad \Pr[W_4] = \frac{1}{2}.
3266\end{equation}
3267Putting equations~\ref{eq:sk-g0-g1}--\ref{eq:sk-g4} together, we find
32609df3 3268\begingroup \splitright=4em minus 4em
a6e375a6 3269\begin{spliteqn}
dff0fad2
MW
3270 \Adv{sk}{\Wident^{G, \E}}(A) \le
3271 2 q_S \bigl(\InSec{ind-cca}(\E; t', q_M, q_M) + {} \\
3272 \InSec{mcdh}(G; t', q_K) +
3273 n \,\InSec{mcdh}(G; t', q_M + q_I) \bigr) + {}
32609df3 3274 \frac{n (n - 1)}{\#G} +
a6e375a6 3275 \frac{2 q_M}{2^{\ell_I}}.
32609df3 3276\end{spliteqn} \endgroup
a6e375a6
MW
3277The theorem follows, since $A$ was chosen arbitrarily.
3278
3279
3280\begin{proof}[Proof of lemma~\ref{lem:sk-g1-g2}]
3281 The two games $\G1$ and~$\G2$ differ only in whether they accept or reject
3282 particular challenge messages $(\cookie{challenge}, R, c)$.
3283
3284 We claim firstly that no message is \emph{accepted} by $\G2$ which would
3285 have been rejected by $\G1$. To prove the claim, it is sufficient to note
3286 that the extractor's output, if not $\bot$, is always correct, and hence if
3287 $\G2$ accepts a message then $\G1$ would have done so too.
3288
3289 Since $\G2$ also behaves identically when the adversary submits to~$S$ the
3290 challenge from the matching session~$T$, we have nothing to prove in this
3291 case. Let $F$ be the event that the adversary submits a message
3292 $(\cookie{challenge}, R, c)$ to a session~$S$ which $S$ would have accepted
3293 in $\G1$ but would be rejected by the new rule in~$\G2$. By
3294 lemma~\ref{lem:shoup} we have $\diff{1}{2} \le \Pr[F]$. To prove the
3295 current lemma, therefore, we must show that $\Pr[F] \le q_M/2^{\ell_I}$.
3296
3297 Rather than consider individual challenge messages, we consider
3298 \emph{classes} of messages. We shall refer to a quadruple~$\Cid = (i, j,
3299 s, R)$ as a \emph{class-id}, and define some useful functions:
3300 \begin{itemize}
3301 \item the class's \emph{session} $\Csession(\Cid) = (P_i, P_j, s)$;
3302 \item the class's \emph{index} $\Cindex(\Cid)$ is $r \in I$ where $R = r
3303 P$, which is well-defined by lemma~\ref{lem:unique-dl};
3304 \item the class's \emph{query} $\Cquery(\Cid) = (X_j, s, R, x_i R)$;
3305 \item the class's \emph{hash} $\Chash(\Cid) = H_I(\Cquery(\Cid)) = H_I(X_j,
3306 s, R, x_i R)$;
3307 \item the class's \emph{check-value} $\Ccheck(\Cid) = \Chash(\Cid) \xor
3308 \Cindex(\Cid)$;
3309 \item the class's \emph{check-set} $\Ccheckset(\Cid)$ is the set of
3310 check-values $c$ such that a message $(\cookie{challenge}, R, c)$ was
3311 sent to session $S = (P_i, P_j, s)$; and
3312 \item the class's \emph{count} $\Ccount(\Cid) = |\Ccheckset(\Cid)|$.
3313 \end{itemize}
3314
3315 Consider any class-id~$\Cid = (i, j, s, R)$. A natural question which
3316 arises is: which participants have issued $\Cid$'s query, i.e., queried
3317 $H_I$ at $\Cquery(\Cid)$?
3318
3319 We can characterise the $H_I(\cdot, \cdot, \cdot, \cdot)$ queries of a
3320 session $U = (P_{i'}, P_{j'}, s')$ as follows:
3321 \begin{itemize}
3322 \item computing the check-value for the challenge $R_U$ by querying
3323 $H_I(X_{i'}, s', R_U, r_U X_{j'})$, and
3324 \item checking an incoming challenge~$R'$ by querying $H_I(X_{j'}, s', R',
3325 x_{i'} R')$.
3326 \end{itemize}
3327 The class~$\Cid$'s query $\Cquery(\Cid)$ is $U$'s check-value query if
3328 \[ (j, i, s, R) = (i', j', s', R_U) \]
3329 i.e., $U$ is the matching session of $\Csession(\Cid)$, and moreover $R =
3330 R_U$ is the challenge value issued by $U$. For any $c \in
3331 \Ccheckset(\Cid)$, if $c = \Ccheck(\Cid)$ then $(\cookie{challenge}, R, c)$
3332 is precisely the challenge message issued by~$U$ to $\Csession(\Cid)$; the
3333 rules for handling this message didn't change. However, if $c \ne
3334 \Ccheck(\Cid)$ then the message would have been rejected in $\G1$, and we
3335 have already shown that $\G2$ continues to reject all messages rejected by
3336 $\G1$.
3337
3338 Let us say that a class-id~$\Cid = (i, j, s, R)$ is \emph{bad} if
3339 \begin{enumerate}
3340 \item the value $R$ is not the challenge issued by $\Csession(\Cid)$'s
3341 matching session, and
3342 \item the adversary has not issued $\Cid$'s query $\Cquery(\Cid)$,
3343 \emph{but}
3344 \item $\Ccheck(\Cid) \in \Ccheckset(\Cid)$, so one of the check-values
3345 submitted to $S$ was actually correct.
3346 \end{enumerate}
3347 We claim that our extractor will work perfectly unless some class-id is
3348 bad. Certainly, if $R$ was issued by the matching session, there is
3349 nothing to prove; if the adversary has issued the relevant query then the
3350 extractor will recover $\Cindex(\Cid)$ just fine; and if $\Ccheck(\Cid)
3351 \notin \Ccheckset(\Cid)$ then all messages in the class would have been
3352 rejected by $\G1$ anyway.
3353
3354 Let $B(\Cid)$ be the event that the class~$\Cid$ is bad. We claim that
3355 \[ \Pr[B(\Cid)] \le \frac{\Ccount(\Cid)}{2^{\ell_I}}. \]
3356 The present lemma follows, since
3357 \[ \diff{1}{2}
3358 \le \Pr[F]
3359 \le \sum_\Cid \Pr[B(\Cid)]
3360 \le \sum_\Cid \frac{\Ccount(\Cid)}{2^{\ell_I}}
3361 = \frac{1}{2^{\ell_I}} \sum_\Cid \Ccount(\Cid)
3362 \le \frac{q_M}{2^{\ell_I}}
3363 \]
32609df3 3364 as required.
a6e375a6
MW
3365
3366 Now observe that, in $\G2$, sessions don't actually check incoming
3367 challenges in this way any more -- instead we run the extractor. So, to
3368 prove the claim, we consider a class~$\Cid$ where properties~1 and~2 above
3369 hold. The correct hash $\Chash(\Cid)$ is then independent of the rest of
3370 the game, so the probability that $\Ccheck(\Cid) \in \Ccheckset(\Cid)$ is
3371 precisely $\Ccount(\Cid)/2^{\ell_I}$ as required.
3372
3373 This completes the proof the lemma.
3374\end{proof}
3375
3376\begin{proof}[Proof of lemma~\ref{lem:sk-g2-g3}]
3377 Let $F$ be the event that the adversary makes a query $H_K(Z_S)$ for some
3378 ripe session~$S$. Since $\G3$ proceeds exactly as $\G2$ did unless $F_2$
3379 occurs, we apply lemma~\ref{lem:shoup}, which tells us that $\diff{2}{3}
3380 \le \Pr[F_2]$. We must therefore bound this probability.
3381
3382 To do this, we consider a new game~$\G3'$, which is the same as $\G3$,
3383 except that, at the start of the game, we choose a random number $k \inr
3384 \Nupto{q_S}$. For $0 \le i < q_S$, let $S_i$ be the $i$th session created
3385 by the adversary. We define $F'$ to be the event that the adversary
3386 queries $H_K(Z_{S_k})$ when $S_k$ is ripe.
3387
3388 The lemma now follows from these two claims.
3389
3390 \begin{claim}
3391 $\Pr[F] \le q_S \Pr[F']$.
3392 \end{claim}
3393 To see this, for any session~$S$, let $F_S$ be the event that the adversary
3394 queries~$H_K(Z_S)$ when $S$ is ripe. Then
3395 \[ \Pr[F] \le \sum_{0\le i<q_S} \Pr[F_{S_i}]. \]
3396 Hence,
3397 \[ \Pr[F'] = \Pr[F_{S_k}] = \sum_{0\le i<q_S} \Pr[F_{S_i}] \Pr[k = i]
3398 = \frac{1}{q_S} \sum_{0\le i<q_S} \Pr[F_{S_i}]
3399 \ge \frac{\Pr[F]}{q_S}
3400 \]
3401 proving the claim.
3402
3403 \begin{claim}
3404 For some $t' = t + O(n) + O(q_S q_M) + O(q_I) + O(q_K)$, we have
3405 $\Pr[F'] \le \InSec{mcdh}(G; t', q_K).$
3406 \end{claim}
3407 To prove this claim, we construct an adversary~$B$ which solves the MCDH
3408 problem in~$G$. The adversary works as follows.
3409 \begin{enumerate}
3410 \item It is given a pair $(R^*, S^*) = (r^* P, s^* P)$ of group elements;
3411 its objective is to make a verification-oracle query $V(Z^*)$ where $Z^*
3412 = r^* s^* P$.
3413 \item It sets up a simulation of the game~$\G3'$, by running the
3414 $\id{init}$ function, and simulating all of the parties. In particular,
3415 it chooses a random~$k \in \Nupto{q_S}$.
3416 \item It sets up accurate simulations of the random oracles $H_K(\cdot)$
3417 and $H_I(\cdot, \cdot, \cdot, \cdot)$, which choose random outputs for
3418 new, fresh inputs. However, whenever $A$ queries $H_K(\cdot)$ on a group
3419 element $Z$, $B$ also queries $V(Z)$.
3420 \item It runs $A$ in its simulated game. It responds to all of $A$'s
3421 instructions faithfully, until the $k$th session-creation.
3422 \item When creating the $k$th session $S = S_k = (P_i, P_j, s)$, $B$ has
3423 party~$P_i$ choose $R^*$ as its challenge, rather than choosing $r_S$ and
3424 setting $R_S = r_S P$. Because it simulates all the parties, $B$ can
3425 compute $Y_S = x_j R$, which is still correct.
3426 \item If $A$ requests the creation of a matching session $T = (P_j, P_i,
3427 s)$ then $B$ has party~$P_j$ choose $S^*$ as its challenge. Again, $B$
3428 computes $Y_T = x_i S^*$.
3429 \item If $A$ ever corrupts the parties $P_i$ or $P_j$, or reveals the
3430 session state of $S$ or $T$ then $B$ stops the simulation abruptly and
3431 halts.
3432 \end{enumerate}
dff0fad2
MW
3433 Adversary $B$'s running time is within the bounds required of $t'$, and $B$
3434 makes at most $q_K$ queries to $V(\cdot)$; we therefore have
a6e375a6
MW
3435 \[ \Pr[F'] \le \Succ{mcdh}{G}(B) \le \InSec{mcdh}(G; t', q_K) \]
3436 as required.
3437\end{proof}
3438
3439\begin{proof}[Proof of lemma~\ref{lem:sk-g3-g4}]
3440 Let $F_4$ be the event under which we abort the game~$\G4$. Clearly, if
3441 $F$ doesn't occur, games $\G3$ and $\G4$ proceed identically, so we can
3442 apply lemma~\ref{lem:shoup} to see that $\diff{3}{4} \le \Pr[F_4]$.
3443 Bounding $\Pr[F_4]$, however, is somewhat complicated. We use a further
3444 sequence of games.
3445
3446 Firstly, let $\G5$ be like $\G4$ with the exception that we choose, at
3447 random, an integer~$k \inr \Nupto{q_S}$. As we did in the proof for
3448 lemma~\ref{lem:sk-g3-g4}, let $S_i$ be the $i$th session created by the
3449 adversary. For each session~$S_i$, let $T_i$ be its matching session, if
3450 there is one. We define $F_5$ to be the event that
3451 \begin{itemize}
3452 \item $S_k$ completes immediately following delivery of a message $\mu
3453 \notin M_{T_k}$, and
3454 \item $S_k$ was ripe at this point.
3455 \end{itemize}
3456 For games~$\G{i}$, for $i > 5$, we define the event $F_i$ to be the event
3457 corresponding to $F_5$ in $\G{i}$. Note that if $S_k$ \emph{is} sent a
3458 message in $M_{T_k}$ then $S_k$ immediately completes.
3459
3460 \begin{claim}
3461 $\Pr[F_4] \le \Pr[F_5]/q_S$.
3462 \end{claim}
3463 This claim is proven exactly as we did for claim~1 of
3464 lemma~\ref{lem:sk-g2-g3}.
3465
3466 Let~$\G6$ be the same as $\G5$ except that we change the encrypted
3467 responses of session~$S_k$ and its matching session~$T_k$. Let $K^* =
3468 (K_0^*, K_1^*) = H_K(Z_S)$. Then, rather than sending $(\cookie{response},
3469 R_S, E_{K_0^*}(Y_T))$, session~$S$ sends $(\cookie{response}, R_S,
3470 E_{K_0^*}(1^{\ell_G}))$.
3471 \begin{claim}
3472 $|\Pr[F_6] - \Pr[F_5]| \le \InSec{ind-cca}(\E; t', q_M, q_M).$
3473 \end{claim}
3474 To prove this claim, we construct an adversary~$B$ which attacks the
3475 IND-CCA security of our encryption scheme $\E$. The adversary~$B$ works as
3476 follows.
3477 \begin{enumerate}
3478 \item It is given no input, but a pair of oracles $E(\cdot, \cdot)$ and
3479 $D(\cdot)$; the former encrypts either the left or right input, according
3480 to a hidden bit, and the latter decrypts ciphertexts other than those
3481 returned by $E(\cdot, \cdot)$. Its goal is to guess the hidden bit.
3482 \item It sets up a simulation of the game~$\G5$, by running the $\id{init}$
3483 function, and simulating all of the parties. In particular, it chooses a
3484 random $k \in \Nupto{q_S}$.
3485 \item It sets up accurate simulations of the random oracles $H_K(\cdot)$
3486 and $H_I(\cdot, \cdot, \cdot, \cdot)$.
3487 \item It runs $A$ in its simulated game. It responds to all of $A$'s
3488 instructions faithfully, except for the matching sessions $S_k$ and
3489 $T_k$. Let $S = S_k = (P_i, P_j, s)$, and $T = T_k = (P_j, P_i, s)$.
3490 \item Suppose $T$ is sent the message $C_S = (\cookie{challenge}, R_S,
3491 c_S)$. Rather than computing $K^* = H_K(r_T R_S)$ and performing the
3492 encryption properly, $B$ queries its left-or-right encryption oracle
3493 $E(\cdot, \cdot)$ on $E(1^{\ell_G}, x_j R_S)$, and sends the resulting
3494 ciphertext~$\chi$ back to~$S$ as part of a message $(\cookie{response},
3495 R_T, \chi)$. The set $M_T$ is precisely the set of messages constructed
3496 in this fashion. (Recall that challenge messages other than $C_S$ aren't
3497 actually delivered to $T$, since we simulate the responses using the
3498 extractor, as of $\G2$.)
3499 \item Suppose $S$ is sent a message $M = (\cookie{response}, R_T, \chi) \in
3500 M_T$. We immediately stop the simulation, and $B$ returns $0$.
3501 \item Suppose, instead, that $S$ is sent some message $M' =
3502 (\cookie{response}, R, \chi) \notin M_T$. There are two cases to
3503 consider. If $R = R_T$ then we must have $\chi$ distinct from the
3504 ciphertexts returned by the $E(\cdot, \cdot)$ oracle, so we can invoke
3505 the decryption oracle $D(\cdot)$ on $\chi$ to obtain a response $Y$.
3506 Alternatively, if $R \ne R_T$, we can compute the key $K = (K_0, K_1) =
3507 H_K(r_S R)$, and recover $Y = D_{K_0}(\chi)$. In either case, if $Y =
3508 r_S X_j)$ then $S$ would complete at this point: $B$ stops the simulation
3509 and returns $1$.
3510 \item If $A$ exposes $S$ (by corrupting $P_i$ or~$P_j$, or revealing $S$ or
3511 $T$) then we stop the simulation and $B$ returns $0$.
3512 \item Finally, if the game stops, either because $A$ halts, or because of
3513 one of the special rules introduced in earlier games, $B$ returns $0$.
3514 \end{enumerate}
3515 It is clear that $B$'s oracle queries are acceptable, since $|x_j R_S| =
3516 \ell_G$ by definition, and $B$ never queries $D(\cdot)$ on a ciphertext
3517 returned by its encryption oracle. By the rules of~$\G3$, we know that the
3518 game stops immediately if $A$ ever queries $Z_S$, so the key $K^*$ is
3519 independent of everything in $A$'s view except the ciphertexts $\chi$
3520 output by $S$ and $T$. Therefore, if the hidden bit of the IND-CCA game is
3521 $1$, $B$ accurately simulates $\G5$, whereas if the bit is $0$ then $B$
dff0fad2
MW
3522 accurately simulates $\G6$. We issue no more that $q_M$ encryption or
3523 decryption queries. Finally, $B$'s running time is within the bounds
3524 allowed for $t'$. Therefore,
a6e375a6
MW
3525 \[ \Adv{ind-cca}{\E}(B) = \Pr[F_5] - \Pr[F_6]
3526 \le \InSec{ind-cca}(\E; t', q_M, q_M). \]
3527 We construct the adversary~$\bar{B}$ which is the same as $B$ above, except
3528 that $\bar{B}$ returns $0$ whenever $B$ returns~$1$, and \emph{vice versa}.
3529 Clearly
3530 \[ \Adv{ind-cca}{\E}(\bar{B})
3531 = (1 - \Pr[F_5]) - (1 - \Pr[F_6])
3532 = \Pr[F_6] - \Pr[F_5]
3533 \le \InSec{ind-cca}(\E; t', q_M, q_M).
3534 \]
3535 This proves the claim.
3536
3537 Let $\G7$ be the same as $\G6$, except that at the start of the game we
3538 choose a random $m \in \Nupto{n}$, and when the adversary creates the
3539 session $S = S_k = (P_i, P_j, s)$, we abort the game unless $j = m$.
3540 Clearly we have $\Pr[F_6] = n \Pr[F_7]$.
32609df3 3541
a6e375a6
MW
3542 Finally, we can explicitly bound $F_6$. In $\G6$, the adversary's view is
3543 independent of the correct response $Y_S = r_S X_S = x_j R_S$ to $S$'s
3544 challenge. Therefore, if $A$ manages to send any message $\mu \notin M_T$
3545 which causes $S$ to complete, then it has impersonated~$P_j$.
3546 \begin{claim}
3547 $\Pr[F_7] \le \InSec{mcdh}(G; t', q_M + q_I)$.
3548 \end{claim}
3549 The present lemma follows from this and the previous claims.
3550
3551 To prove the claim formally, we construct an adversary~$B'$, which behaves
3552 as follows.
3553 \begin{enumerate}
3554 \item It is given as input a public key $X^*$ and a single challenge $(R^*,
3555 c^*)$, a random oracle $H^*_I(\cdot, \cdot)$, and an oracle $V(\cdot,
3556 \cdot)$, which verifies responses $(R, Y)$. Its goal is to invoke
3557 $V(\cdot, \cdot)$ with a correct response to the challenge.
3558 \item It chooses a random $k \in \Nupto{q_S}$ and $m \in \Nupto{n}$. It
3559 sets up a simulation of the game~$\G7$, by running the $\id{init}$
3560 function, and simulating all of the parties, except that it gives party
3561 $P_m$ the public key $X^*$. This makes no difference, since $P_m$
3562 doesn't actually need to give any `honest' responses because of the
3563 change we made in $\G6$.
3564 \item It sets up accurate simulations of the random oracles $H_K(\cdot)$
3565 and $H_I(\cdot, \cdot, \cdot, \cdot)$, with one exception -- see below.
3566 \item It runs $A$ in its simulated game. It responds to all of $A$'s
3567 instructions faithfully, except for the session $S_k$. Let $S = S_k =
3568 (P_i, P_j, s)$, and let $T = T_k = (P_j, P_i, s)$ be its matching
3569 session.
3570 \item When session~$S$ is created, $B'$ checks that $j = m$, and if not
3571 stops the simulation and halts. Otherwise, $B'$ invokes its oracle~$C()$
3572 to obtain a pair $(R, c)$. Session~$S$ sends $C_S = (\cookie{challenge},
3573 R, c)$ as its challenge to~$T$.
3574 \item When $A$ makes a query $H_I(X^*, s, R, Y)$, $B$ answers it by
3575 querying its \emph{own} random oracle $H^*_I(R, Y)$.
3576 \item When $S$ receives a message $(\cookie{response}, R, \chi)$, we
3577 compute $(K_0, K_1) = r_S R$, and $Y = D_{K_0}(\chi)$. If $Y \ne \bot$
3578 then $B'$ calls $V(R, Y)$.
3579 \item If $A$ reveals $S$ or corrupts $P_i$ or $P_j$ then $B'$ stops the
3580 simulation immediately and halts.
3581 \end{enumerate}
dff0fad2
MW
3582 The running time of $B'$ is within the bounds required of $t'$; it makes at
3583 most $q_I$ random-oracle and at most $q_M$ verification queries. Clearly
3584 $B'$ succeeds whenever $F_7$ occurs. The claim follows from
3585 theorem~\ref{thm:wident-sound}.
a6e375a6
MW
3586\end{proof}
3587
3588
3589\subsection{Proof of theorem~\ref{thm:sk2}}
3590\label{sec:sk2-proof}
3591
3592The proof is almost identical to the proof of theorem~\ref{thm:sk}, in
3593appendix~\ref{sec:sk-proof}. Unfortunately a black-box reduction doesn't
32609df3 3594seem possible.
a6e375a6
MW
3595
3596We use the games and notation of section~\ref{sec:sk-proof}.
3597
3598The change to the check-value calculation doesn't affect key-generation at
3599all, so the transition to $\G1$ goes through as before.
3600
3601The transition from $\G1$ to $\G2$ -- answering challenges using the
3602extractor -- needs a little care. Let $S = (P_i, P_j, s)$ be a session, and
3603consider an incoming message $(\cookie{challenge}, R, c)$.
3604\begin{itemize}
3605\item If $T = (P_j, P_i, s)$ is the matching session to $S$, and $R = R_T$ is
3606 the public challenge value of $T$, and $c = r_T \xor H_I(R_S, X_j, s, R_T,
3607 r_T X_i)$ is the check-value output by $T$ when it received
3608 $(\cookie{pre-challenge}, R_S)$ as input, then $S$ replies as it did in
3609 $\G1$.
3610\item If the challenge message is any other message, then we use the
3611 extractor.
3612\end{itemize}
3613As in lemma~\ref{lem:sk-g1-g2}, we examine which sessions could have queried
3614$H_I(R_S, X_j, s, R, x_i R)$, and for the same reasons conclude that only the
3615matching session would have done this, and only in response to the
3616pre-challenge $R_S$. It follows that $\diff{1}{2} \le q_M/2^{\ell_I}$ as
3617before.
3618
3619The remaining game steps go through unchanged. In particular, we conclude
3620that a ripe session will only complete if the adversary has transmitted
3621messages from its matching session correctly, and the session key is
3622independent of the adversary's view. The theorem follows.
3623
dff0fad2
MW
3624
3625\subsection{Sketch proof of single-key protocol for secure channels}
3626\label{sec:sc-proof}
3627
3628We want to show that the Wrestlers Key-Exchange protocol, followed by use of
3629the encryption scheme $\E$, with the \emph{same} key $K = K_0$, still
3630provides secure channels.
3631
3632\subsubsection{Secure channels definition}
081da1b1
MW
3633We (very briefly!) recall the \cite{canetti-krawczyk-2001:secure-channels}
3634definition of a secure channels protocol. We again play a game with the
3635adversary. At the beginning, we choose a bit $b^* \inr \{0, 1\}$ at random.
3636We allow the adversary the ability to establish \emph{secure channels}
3637sessions within the parties. Furthermore, for any running session $S = (P_i,
3638P_j, s)$, we allow the adversary to request $S$ to send a message~$\mu$
3639through its secure channel. Finally, the adversary is allowed to choose one
3640ripe \emph{challenge} session, and ask for it to send of one of a \emph{pair}
3641of messages $(\mu_0, \mu_1)$, subject to the restriction that $|\mu_0| =
dff0fad2
MW
3642|\mu_1|$; the session sends message $\mu_{b^*}$. The adversary may not
3643expose the challenge session.
3644
3645The adversary wins if (a)~it can guess the bit $b^*$, or (b)~it can cause a
3646ripe session $S$ (i.e., an unexposed, running session), with a matching
3647session~$T$ to output a message other than one that it requested that $T$
3648send.
3649
3650\subsubsection{Protocol definition}
3651The protocol begins with Wrestlers key-exchange. The encryption in the
3652key-exchange protocol is performed as $E_K(\cookie{kx}, \cdot)$; encryption
3653for secure channels is performed as $E_K(\cookie{sc}, i, o, \cdot)$, where
3654$i$ is a sequence number to prevent replays and $o \in \{S, T\}$ identifies
3655the sender.
3656
3657\subsubsection{Proof sketch}
3658We use the games and notation of appendix~\ref{sec:sk-proof}.
3659
3660The proof goes through as far as the step between $\G5$ and $\G6$ in the
3661proof of lemma~\ref{lem:sk-g3-g4}. Here we make the obvious adjustments to
3662our adversary against the IND-CCA security of $\E$. (Our adversary will need
3663to use the left-or-right oracle for messages sent using the secure channel
3664built on $K^*$. That's OK.)
3665
3666In $\G4$, we know that ripe sessions agree the correct key, and the adversary
3667never queries the random oracle, so the key is independent of the adversary's
3668view.
3669
3670We define a new game $\G8$, which is like $\G4$, except that we stop the game
3671if the adversary ever forges a message sent over the secure channel. That
3672is, if a ripe session $S$ ever announces receipt of a message not sent (at
3673the adversary's request) by its matching session $T$. Let $F_8$ be the event
3674that a forgery occurs. We apply lemma~\ref{lem:shoup}, which tells us that
3675$\diff{4}{8} \le \Pr[F_8]$. To bound $F_8$, we isolate a session at random
3676(as in lemmata \ref{lem:sk-g2-g3} and~\ref{lem:sk-g3-g4}), which tells us
3677that
3678\begin{equation}
3679 \label{eq:sc-g4-g8}
3680 \diff{4}{8} \le q_S \cdot \InSec{int-ptxt}(\E; t', q_M, q_M)
3681\end{equation}
3682Finally, we can bound the adversary's advantage at guessing the hidden bit
3683$b^*$. We isolate (we hope) the challenge session $S$ by choosing a target
32609df3 3684session at random, as before. Let $K^* = H_K(Z_S)$ be the key agreed by the
dff0fad2
MW
3685session (if it becomes ripe). We define an adversary $B$ against the IND-CCA
3686security of $\E$. The adversary $B$ simulates the game. If the adversary
3687exposes the target session, or doesn't choose it as the challenge session,
3688$B$ fails (and exits 0); otherwise it uses the left-or-right encryption
3689oracle to encrypt both of the adversary's message choices, and outputs the
3690adversary's choice. Let $b$ be the adversary's output, and let $\epsilon$ be
3691the advantage of our IND-CCA distinguisher. Then
3692\begin{eqnarray*}[rl]
3693 \Pr[b = b^*]
3694 & = \Pr[b = b^* \wedge b^* = 1] + \Pr[b = b^* \wedge b^* = 0] \\
3695 & = \frac{1}{2}\bigl( \Pr[b = b^* \mid b^* = 1] +
3696 \Pr[b = b^* \mid b^* = 0] \bigr) \\
3697 & = \frac{1}{2}\bigl( \Pr[b = b^* \mid b^* = 1] +
3698 (1 - \Pr[b \ne b^* \mid b^* = 0]) \bigr) \\
3699 & = \frac{1}{2}\bigl( \Pr[b = 1 \mid b^* = 1] -
3700 \Pr[b = 1 \mid b^* = 0] + 1 \bigr) \\
3701 & = \frac{1}{2}\bigl(1 + q_S\,\Adv{ind-cca}{\E}(B) \bigr) \\
3702 & \le \frac{1}{2} \bigl( 1 + q_S\,\InSec{ind-cca}(\E; t', q_M, q_M) \bigr).
3703 \eqnumber
3704\end{eqnarray*}
3705
a6e375a6
MW
3706\end{document}
3707
32609df3 3708%%% Local Variables:
a6e375a6
MW
3709%%% mode: latex
3710%%% TeX-master: t
32609df3 3711%%% End: