44a8a5d12d1bbf367445b4acba6e0e61a96b0ce6
[doc/wrestlers] / wrestlers.tex
1 %%% -*-latex-*-
2 %%%
3 %%% The Wrestlers Protocol: secure, deniable key-exchange
4 %%%
5 %%% (c) 2006 Mark Wooding
6 %%%
7
8 \newif\iffancystyle\fancystylefalse
9 \newif\ifshort\shortfalse
10 \fancystyletrue
11 \InputIfFileExists{wr.cfg}\relax\relax
12 \errorcontextlines=\maxdimen
13 \showboxdepth=\maxdimen
14 \showboxbreadth=\maxdimen
15
16 \iffancystyle
17 \documentclass
18 [a4paper, article, 10pt, numbering, noherefloats, notitlepage,
19 runinsubsubsec]
20 {strayman}
21 \usepackage[T1]{fontenc}
22 \usepackage[palatino, helvetica, courier, maths=cmr]{mdwfonts}
23 \usepackage[within = subsection, mdwmargin]{mdwthm}
24 \usepackage{mdwlist}
25 \usepackage{sverb}
26 \if0\ifx\pdfoutput\xxundefined0\else\the\pdfoutput\fi
27 \PassOptionsToPackage{dvips}{xy}
28 \fi
29 \else
30 \documentclass[a4paper]{llncs}
31 \usepackage{a4wide}
32 \fi
33
34 \PassOptionsToPackage{show}{slowbox}
35 %\PassOptionsToPackage{hide}{slowbox}
36 \usepackage{mdwtab, mathenv, mdwmath, crypto}
37 \usepackage{slowbox}
38 \usepackage{amssymb, amstext}
39 \usepackage{url, multicol}
40 \usepackage{tabularx}
41 \DeclareUrlCommand\email{\urlstyle{tt}}
42 \ifslowboxshow
43 \usepackage[all]{xy}
44 \turnradius{4pt}
45 \fi
46
47 \iffancystyle
48 \def\next{\title[The Wrestlers Protocol]}
49 \else
50 \def\next{\title}
51 \fi
52 \next
53 {The Wrestlers Protocol \\
54 A simple, practical, secure, deniable protocol for key-exchange}
55 \iffancystyle
56 \author{Mark Wooding \\ \email{mdw@distorted.org.uk}}
57 \else
58 \author{Mark Wooding}
59 \institute{\email{mdw@distorted.org.uk}}
60 \fi
61
62 \iffancystyle
63 \bibliographystyle{mdwalpha}
64 \let\epsilon\varepsilon
65 \let\emptyset\varnothing
66 \let\le\leqslant\let\leq\le
67 \let\ge\geqslant\let\geq\ge
68 \numberwithin{table}{section}
69 \numberwithin{figure}{section}
70 \numberwithin{equation}{subsection}
71 \let\random\$
72 \else
73 \bibliographystyle{plain}
74 \expandafter\let\csname claim*\endcsname\claim
75 \expandafter\let\csname endclaim*\endcsname\endclaim
76 \fi
77
78 \iffancystyle
79 \newcommand{\Nupto}[1]{\N_{<{#1}}}
80 \else
81 \newcommand{\Nupto}[1]{\{0, 1, \ldots, #1 - 1\}}
82 \fi
83 \let\Bin\Sigma
84 \let\emptystring\lambda
85 \edef\Pr{\expandafter\noexpand\Pr\nolimits}
86 \newcommand{\bitsto}{\mathbin{..}}
87 \newcommand{\E}{{\mathcal{E}}}
88 \newcommand{\M}{{\mathcal{M}}}
89 \iffancystyle
90 \def\description{%
91 \basedescript{%
92 \let\makelabel\textit%
93 \desclabelstyle\multilinelabel%
94 \desclabelwidth{1in}%
95 }%
96 }
97 \fi
98 \def\fixme#1{\marginpar{FIXME}[FIXME: #1]}
99 \def\hex#1{\texttt{#1}_{x}}
100
101 \def\dbox#1{%
102 \vtop{%
103 \def\\{\unskip\egroup\hbox\bgroup\strut\ignorespaces}%
104 \hbox{\strut#1}%
105 }%
106 }
107
108 \def\Wident{\Xid{W}{ident}}
109 \def\Wkx{\Xid{W}{kx}}
110 \def\Wdkx{\Xid{W}{dkx}}
111 \def\Func#1#2{\mathcal{F}[#1\to#2]}
112 \def\diff#1#2{\Delta_{#1, #2}}
113 \def\Hid{H_{\textit{ID}}}
114
115 %% protocol run diagrams
116 \def\PRaction#1{#1\ar[r]}
117 \def\PRcreatex#1{\PRaction{\textsf{Create session\space}#1}}
118 \def\PRcreate#1#2#3{\PRcreatex{(\text{#1},\text{#2},#3)}}
119 \def\PRreceivex#1{\PRaction{\textsf{Receive\space}#1}}
120 \def\PRreceive#1#2{\PRreceivex{\msg{#1}{#2}}}
121 \def\PRsession#1{\relax\mathstrut#1\ar[r]}
122 \def\msg#1#2{(\cookie{#1},#2)}
123 \def\PRresult#1{#1}
124 \def\PRsendx#1{\PRresult{\textsf{Send\space}#1}}
125 \def\PRsend#1#2{\PRsendx{\msg{#1}{#2}}}
126 \def\PRcomplete#1{\textsf{Complete:\space}#1}
127 \def\PRstate#1{\textsf{State:\space}#1}
128 \def\PRignore{\textsf{(ignored)}}
129 \def\PRreveal{\textsf{Session-state reveal}\ar[r]}
130 \def\protocolrun#1{\[\xymatrix @R=0pt @C=2em {#1}\]}
131
132 %% class-ids for proof of extractor lemma
133 \let\Cid=\Lambda
134 \let\Csession=S
135 \let\Cindex=r
136 \let\Cquery=Q
137 \let\Chash=H
138 \let\Ccheck=c
139 \let\Ccheckset=V
140 \let\Ccount=\nu
141
142 \def\HG#1{\mathbf{H}_{#1}}
143
144 \begin{document}
145
146 %%%--------------------------------------------------------------------------
147
148 \maketitle
149 \iffancystyle \thispagestyle{empty} \fi
150
151 \begin{abstract}
152 We describe and prove (in the random-oracle model) the security of a simple
153 but efficient zero-knowledge identification scheme, whose security is based
154 on the computational Diffie-Hellman problem. Unlike other recent proposals
155 for efficient identification protocols, we don't need any additional
156 assumptions, such as the Knowledge of Exponent assumption.
157
158 From this beginning, we build a simple key-exchange protocol, and prove
159 that it achieves `SK-security' -- and hence security in Canetti's Universal
160 Composability framework.
161
162 Finally, we show how to turn the simple key-exchange protocol into a
163 slightly more complex one which provides a number of valuable `real-life'
164 properties, without damaging its security.
165 \end{abstract}
166
167 \iffancystyle
168 \newpage
169 \thispagestyle{empty}
170 \columnsep=2em \columnseprule=0pt
171 \tableofcontents[\begin{multicols}{2}\raggedright][\end{multicols}]
172 %%\listoffigures[\begin{multicols}{2}\raggedright][\end{multicols}]
173 %% \listoftables[\begin{multicols}{2}\raggedright][\end{multicols}]
174 \newpage
175 \fi
176
177 %%%--------------------------------------------------------------------------
178
179 \section{Introduction}
180
181
182 This paper proposes protocols for \emph{identification} and
183 \emph{authenticated key-exchange}.
184
185 An identification protocol allows one party, say Bob, to be sure that he's
186 really talking to another party, say Alice. It assumes that Bob has some way
187 of recognising Alice; for instance, he might know her public key. Our
188 protocol requires only two messages -- a challenge and a response -- and has
189 a number of useful properties. It is very similar to, though designed
190 independently of, a recent protocol by Stinson and Wu; we discuss their
191 protocol in section~\ref{sec:stinson-ident}.
192
193 Identification protocols are typically less useful than they sound. As Shoup
194 \cite{Shoup:1999:OFM} points out, it provides a `secure ping', by which Bob
195 can know that Alice is `there', but provides no guarantee that any other
196 communication was sent to or reached her. However, there are situations
197 where this an authentic channel between two entities -- e.g., a device and a
198 smartcard -- where a simple identification protocol can still be useful.
199
200 An authenticated key-exchange protocol lets Alice and Bob agree on a shared
201 secret, known to them alone, even if there is an enemy who can read and
202 intercept all of their messages, and substitute messages of her own. Once
203 they have agreed on their shared secret, of course, they can use standard
204 symmetric cryptography techniques to ensure the privacy and authenticity of
205 their messages.
206
207
208 \subsection{Desirable properties of our protocols}
209
210 Our identification protocol has a number of desirable properties.
211 \begin{itemize}
212 \item It is \emph{simple} to understand and implement. In particular, it
213 requires only two messages.
214 \item It is fairly \emph{efficient}, requiring two scalar multiplications by
215 each of the prover and verifier.
216 \item It is provably \emph{secure} (in the random oracle model), assuming the
217 intractability of the computational Diffie-Hellman problem.
218 \end{itemize}
219
220 Our key-exchange protocol also has a number of desirable
221 properties.
222 \begin{itemize}
223 \item It is fairly \emph{simple} to understand and implement, though there
224 are a few subtleties. In particular, it is \emph{symmetrical}. We have
225 implemented a virtual private network system based on this protocol.
226 \item It is \emph{efficient}, requiring four scalar multiplications by each
227 participant. The communication can be reduced to three messages by
228 breaking the protocol's symmetry.
229 \item It is provably \emph{secure} (again, in the random oracle model),
230 assuming the intractability of the computational Diffie-Hellman problem,
231 and the security of a symmetric encryption scheme.
232 \item It provides \emph{perfect forward secrecy}. That is, even if a user's
233 long-term secrets are compromised, past sessions remain secure.
234 \item It is \emph{deniable}. It is possible to construct simulated
235 transcripts of protocol executions between any number of parties without
236 knowing any of their private keys. The simulated transcripts are (almost)
237 indistinguishable from real protocol transcripts. Hence, a transcript
238 does not provide useful evidence that a given party was really involved in
239 a given protocol execution.
240 \end{itemize}
241
242 \subsection{Asymptotic and concrete security results}
243
244 Most security definitions for identification (particularly zero-knowledge)
245 and key-exchange in the literature are \emph{asymptotic}. That is, they
246 consider a family of related protocols, indexed by a \emph{security
247 parameter}~$k$; they that any \emph{polynomially-bounded} adversary has only
248 \emph{negligible} advantage. A polynomially-bounded adversary is one whose
249 running time is a bounded by some polynomial~$t(k)$. The security definition
250 requires that, for any such polynomially-bounded adversary, and any
251 polynomial $p(k)$, the adversary's advantage is less than $p(k)$ for all
252 sufficiently large values of $k$.
253
254 Such asymptotic notions are theoretically interesting, and have obvious
255 connections to complexity theory. Unfortunately, such an asymptotic result
256 tells us nothing at all about the security of a particular instance of a
257 protocol, or what parameter sizes one needs to choose for a given level of
258 security against a particular kind of adversary. Koblitz and Menezes
259 \cite{Koblitz:2006:ALP} (among other useful observations) give examples of
260 protocols, proven to meet asymptotic notions of security, whose security
261 proofs guarantee nothing at all for the kinds of parameters typically used in
262 practice.
263
264 Since, above all, we're interested in analysing a practical and implemented
265 protocol, we follow here the `practice-oriented provable security' approach
266 largely inspired by Bellare and Rogaway, and exemplified by
267 \cite{Bellare:1994:SCB,Bellare:1995:XMN,Bellare:1995:OAE,Bellare:1996:ESD,%
268 Bellare:1996:KHF,Bellare:2000:CST}; see also \cite{Bellare:1999:POP}.
269 Rather than attempting to say, formally, whether or not a protocol is
270 `secure', we associate with each protocol an `insecurity function' which
271 gives an upper bound on the advantage of any adversary attacking the protocol
272 within given resource bounds.
273
274
275 \subsection{Formal models for key-exchange}
276
277 Many proposed key-exchange protocols have turned out to have subtle security
278 flaws. The idea of using formal methods to analyse key-exchange protocols
279 begins with the logic of Burrows, Abadi and Needham \cite{Burrows:1989:LA}.
280 Their approach requires a `formalising' step, in which one expresses in the
281 logic the contents of the message flows, and the \emph{beliefs} of the
282 participants.
283
284 Bellare and Rogaway \cite{Bellare:1994:EAK} describe a model for studying the
285 computational security of authentication and key-exchange protocols in a
286 concurrent setting, i.e., where multiple parties are running several
287 instances of a protocol simultaneously. They define a notion of security in
288 this setting, and show that several simple protocols achieve this notion.
289 Their original paper dealt with pairs of parties using symmetric
290 cryptography; they extended their definitions in \cite{Bellare:1995:PSS} to
291 study three-party protocols involving a trusted key-distribution centre.
292
293 Blake-Wilson, Johnson and Menezes \cite{Blake-Wilson:1997:KAP} applied the
294 model of \cite{Bellare:1994:EAK} to key-exchange protocols using asymmetric
295 cryptography, and Blake-Wilson and Menezes \cite{Blake-Wilson:1998:EAA}
296 applied it to protocols based on the Diffie-Hellman protocol.
297
298 The security notion of \cite{Bellare:1994:EAK} is based on a \emph{game}, in
299 which an adversary nominates a \emph{challenge session}, and is given either
300 the key agreed by the participants of the challenge session, or a random
301 value independently sampled from an appropriate distribution. The
302 adversary's advantage -- and hence the insecurity of the protocol -- is
303 measured by its success probability in guessing whether the value it was
304 given is really the challenge key. This challenge-session notion was also
305 used by the subsequent papers described above.
306
307 Bellare, Canetti and Krawczyk \cite{Bellare:1998:MAD} described a pair of
308 models which they called the \textsc{am} (for `authenticated links model')
309 and \textsc{um} (`unauthenticated links model'). They propose a modular
310 approach to the design of key-exchange protocols, whereby one first designs a
311 protocol and proves its security in the \textsc{am}, and then applies a
312 authenticating `compiler' to the protocol which they prove yields a protocol
313 secure in the realistic \textsc{um}. Their security notion is new. They
314 define an `ideal model', in which an adversary is limited to assigning
315 sessions fresh, random and unknown keys, or matching up one session with
316 another, so that both have the same key. They define a protocol to be secure
317 if, for any adversary~$A$ in the \textsc{am} or \textsc{um}, there is an
318 ideal adversary~$I$, such that the outputs of $A$ and $I$ are computationally
319 indistinguishable.
320
321 In \cite{Shoup:1999:OFM}, Shoup presents a new model for key-exchange, also
322 based on the idea of simulation. He analyses the previous models,
323 particularly \cite{Bellare:1994:EAK} and \cite{Bellare:1998:MAD}, and
324 highlights some of their inadequacies.
325
326 Canetti and Krawczyk \cite{Canetti:2001:AKE} describe a new notion of
327 security in the basic model of \cite{Bellare:1998:MAD}, based on the
328 challenge-session notion of \cite{Bellare:1994:EAK}. The security notion,
329 called `SK-security', seems weaker in various ways than those of earlier
330 works such as \cite{Bellare:1994:EAK} or \cite{Shoup:1999:OFM}. However, the
331 authors show that their notion suffices for constructing `secure channel'
332 protocols, which they also define.
333
334 In \cite{Canetti:2001:UCS}, Canetti describes the `universal composition'
335 framework. Here, security notions are simulation-based: one defines security
336 notions by presenting an `ideal functionality'. A protocol securely
337 implements a particular functionality if, for any adversary interacting with
338 parties who use the protocol, there is an adversary which interacts with
339 parties using the ideal functionality such that no `environment' can
340 distinguish the two. The environment is allowed to interact freely with the
341 adversary throughout, differentiating this approach from that of
342 \cite{Bellare:1998:MAD} and \cite{Shoup:1999:OFM}, where the distinguisher
343 was given only transcripts of the adversary's interaction with the parties.
344 With security defined in this way, it's possible to prove a `universal
345 composition theorem': one can construct a protocol, based upon various ideal
346 functionalities, and then `plug in' secure implementations of the ideal
347 functionalities and appeal to the theorem to prove the security of the entire
348 protocol. The UC framework gives rise to very strong notions of security,
349 due to the interactive nature of the `environment' distinguisher.
350
351 Canetti and Krawczyk \cite{Canetti:2002:UCN} show that the SK-security notion
352 of \cite{Canetti:2001:AKE} is \emph{equivalent} to a `relaxed' notion of
353 key-exchange security in the UC framework, and suffices for the construction
354 of UC secure channels.
355
356 The result of \cite{Canetti:2002:UCN} gives us confidence that SK-security is
357 the `right' notion of security for key-exchange protocols. Accordingly,
358 SK-security is the standard against which we analyse our key-exchange
359 protocol.
360
361
362 \subsection{Outline of the paper}
363
364 The remaining sections of this paper are as follows.
365 \begin{itemize}
366 \item Section \ref{sec:prelim} provides the essential groundwork for the rest
367 of the paper. It introduces important notation, and describes security
368 notions and intractability assumptions.
369 \item Section \ref{sec:zk-ident} describes our zero-knowledge identification
370 protocol and proves its security.
371 \item Section \ref{sec:kx} describes the simple version of our key-exchange
372 protocol, and proves its security and deniability. It also describes some
373 minor modifications which bring practical benefits without damaging
374 security.
375 \item Finally, section \ref{sec:conc} presents our conclusions.
376 \end{itemize}
377
378 %%%--------------------------------------------------------------------------
379
380 \section{Preliminaries}
381 \label{sec:prelim}
382
383 \subsection{Miscellaneous notation}
384
385 We write $\Func{D}{R}$ for the set of all functions with domain $D$ and range
386 $R$.
387 \iffancystyle
388 We write $\Nupto{n} = \{\, i \in \Z \mid 0 \le i < n \,\} = \{0, 1, \ldots, n
389 - 1\}$ for the set of nonnegative integers less than $n$.
390 \fi
391
392
393 \subsection{Groups}
394
395 Let $(G, +)$ be a cyclic group\footnote{
396 We find that additive group notation is easier to read. In particular, in
397 multiplicative groups, one ends up with many interesting things tucked away
398 in little superscripts.}%
399 of prime order $q$, and generated by an element $P$. We shall write the
400 identity of $G$ as $0_G$, or simply as $0$ when no ambiguity is likely to
401 arise. Thus, we have $\langle P \rangle = G$ and $q P = 0$. Any $X \in G$
402 can be written as $X = x P$ for some $x \in \Nupto{q}$.
403
404
405 \subsection{Bit strings and encodings}
406 \label{sec:bitenc}
407
408 Let $\Bin = \{0, 1\}$ be the set of binary digits. Then $\Bin^n$ is the set
409 of $n$-bit strings, and $\Bin^*$ the set of all (finite) bit strings. If $x
410 \in \Bin^n$ is a bit string, we write its length as $|x| = n$. For a bit
411 string $x \in \Bin^n$, and for $0 \le i < n$, we write $x[i]$ as the $i$th
412 bit of $x$. The empty string is denoted $\emptystring$.
413
414 Let $x$ and $y$ be two bit strings. If $|x| = |y| = n$, we write $x \xor y$
415 to mean the bitwise exclusive-or of $x$ and $y$: if $z = x \xor y$ then $|z|
416 = n$, and $z[i] = (x[i] + y[i]) \bmod 2$ for $0 \le i < n$. We write $x \cat
417 y$ to mean the concatenation of $x$ and $y$: if $z = x \cat y$ then $|z| =
418 |x| + |y|$ and $z[i] = x[i]$ if $0 \le i < |x|$ and $z[i] = y[i - |x|]$ if
419 $|x| < i \le |x| + |y|$.
420
421 Finally, we let $\bot$ be a value distinct from any bit string.
422
423 We shall want to encode group elements $X \in G$ and indices $x \in I =
424 \Nupto{|G|}$ as bit strings. To this end, we shall assume the existence of
425 integers $\ell_G, \ell_I > 0$ and functions
426 \[
427 e_S\colon S \to \Bin^{\ell_S}
428 \quad \textrm{and} \quad
429 d_S\colon \Bin^{\ell_S} \to S \cup \{ \bot \}
430 \qquad
431 \textrm{for } S \in \{ G, I \}.
432 \]
433 with the following properties.
434 \begin{itemize}
435 \item The functions are \emph{unique} and \emph{unambiguous}, i.e., for any
436 $t \in \Bin^{\ell_S}$, we have
437 \[ d_S(t) = \begin{cases}
438 s & if there is some $s \in S$ such that $t = e_S(s)$, or \\
439 \bot & if no such $s$ exists.
440 \end{cases}
441 \]
442 \item The functions should be \emph{efficient} to compute. Indeed, we shall
443 be assuming that the time taken for encoding and decoding is essentially
444 trivial.
445 \end{itemize}
446 Note that, as we have defined them, all encodings of group elements are the
447 same length, and similarly for encodings of indices. This is necessary for
448 the security of our protocols.
449
450 We shall frequently abuse notation by omitting the encoding and decoding
451 functions where it is obvious that they are required.
452
453
454 \subsection{Games, adversaries, and oracles}
455 \label{sec:games}
456
457 Many of the security definitions and results given here make use of
458 \emph{games}, played with an \emph{adversary}. An adversary is a
459 probabilistic algorithm. In some games, the adversary is additionally
460 equipped with \emph{oracles}, which perform computations with values chosen
461 by the adversary and secrets chosen by the game but not revealed to the
462 adversary. We impose limits on the adversary's resource usage: in
463 particular, the total time it takes, and the number of queries it makes to
464 its various oracles. Throughout, we include the size of the adversary's
465 program as part of its `time', in order to model adversaries which contain
466 large precomputed tables.
467
468 The games provide models of someone trying to attack a construction or
469 protocol. For security, we will either define a notion of `winning' the
470 game, and require that all adversaries have only a very small probability of
471 winning, or we consider two different games and require that no adversary can
472 distinguish between the two except with very small probability.
473
474 Our proofs make frequent use of sequences of games; see
475 \cite{Shoup:2004:SGT,Bellare:2004:CBG}. The presentation owes much to Shoup
476 \cite{Shoup:2004:SGT}. We begin with a game $\G0$ based directly on a
477 relevant security definition, and construct a sequence of games $\G1$, $\G2$,
478 \dots, each slightly different from the last. We define all of the games in
479 a sequence over the same underlying probability space -- the random coins
480 tossed by the algorithms involved -- though different games may have slightly
481 differently-defined events and random variables. Our goal in doing this is
482 to bound the probability of the adversary winning the initial game $\G0$ by
483 simultaneously (a) relating the probability of this event to that of
484 corresponding events in subsequent games, and (b) simplifying the game until
485 the probability of the corresponding event can be computed directly.
486
487 The following simple lemma from \cite{Shoup:2001:OR} will be frequently
488 useful.
489 \begin{lemma}[Difference Lemma]
490 \label{lem:shoup}
491 Let $S$, $T$, $F$ be events. Suppose $\Pr[S|\bar F] = \Pr[T|\bar F]$.
492 Then $|\Pr[S] - \Pr[T]| \le \Pr[F]$.
493 \end{lemma}
494 \begin{proof}
495 A simple calculation:
496 \begin{eqnarray*}[rl]
497 |\Pr[S] - \Pr[T]|
498 & = |(\Pr[S|F]\Pr[F] + \Pr[S|\bar F]\Pr[\bar F]) -
499 (\Pr[T|F]\Pr[F] + \Pr[T|\bar F]\Pr[\bar F])| \\
500 & = \Pr[F] \cdot |\Pr[S|F] - \Pr[T|F]| \\
501 & \le \Pr[F]
502 \end{eqnarray*}
503 and we're done!
504 \end{proof}
505
506
507 \subsection{The random oracle model}
508 \label{sec:ro}
509
510 In particular, most of our results will make use of the \emph{random oracle}
511 model \cite{Bellare:1993:ROP}, in which all the participants, including the
512 adversary, have access to a number of `random oracles'. A random oracle with
513 domain $D$ and range $R$ is an oracle which computes a function chosen
514 uniformly at random from the set of all such functions. (In the original
515 paper \cite{Bellare:1993:ROP}, random oracles are considered having domain
516 $\Bin^*$ and range $\Bin^\omega$; we use finite random oracles here, because
517 they're easier to work with.)
518
519 Given a protocol proven secure in the random oracle model, we can instantiate
520 each random oracle by a supposedly-secure hash function and be fairly
521 confident that either our protocol will be similarly secure, or one of the
522 hash functions we chose has some unfortunate property.
523
524 Proofs in the random oracle must be interpreted carefully. For example,
525 Canetti, Goldreich and Halevi \cite{Canetti:2004:ROM} show that there are
526 schemes which can be proven secure in the random oracle model but provably
527 have no secure instantiation in the standard model.
528
529 The random oracle model is useful for constructing reductions and simulators
530 for two main reasons.
531 \begin{enumerate}
532 \item One can use the transcript of an adversary's queries to random oracles
533 in order to extract knowledge from it.
534 \item One can `program' a random oracle so as to avoid being bound by prior
535 `commitments', or to guide an adversary towards solving a selected instance
536 of some problem.
537 \end{enumerate}
538 Our proofs only make use of the first feature. This becomes particularly
539 important when we consider issues of zero-knowledge and deniability in a
540 concurrent setting, because we want to be able to claim that we retain these
541 features when the random oracle is instantiated using a cryptographic hash
542 function, and hash functions definitely aren't `programmable' in this way!
543 The former property seems rather more defensible -- one would indeed hope
544 that the only sensible way of working out (anything about) the hash of a
545 particular string is to actually compute the hash function, and the random
546 oracle model is, we hope, just giving us a `hook' into this process.
547
548 (Our protocols can be modified to make use of bilinear pairings so as to
549 provide identity-based identification and key-exchange, using the techniques
550 of \cite{Boneh:2003:IBE}. Proving the security of the modifications we
551 discuss would involve `programming' random oracles, but this doesn't affect
552 the zero-knowledge or deniability of the resulting protocols.)
553
554
555 \subsection{Notation for algorithms}
556
557 We shall have occasion to describe algorithms by means of a pseudocode. Our
558 choice of pseudocode is unlikely to be particularly controversial. We let $x
559 \gets y$ denote the action of setting $x$ to the value $y$; similarly, $x
560 \getsr Y$ denotes the action of sampling $x$ from the set $Y$ uniformly at
561 random.
562
563 The expression $a \gets A^{O(\cdot, x)}(y)$ means `assign to $a$ the value
564 output by algorithm $A$ on input $y$, and with oracle access to the algorithm
565 which, given input $z$, computes $O(z, x)$'.
566
567 We make use of conditional (\IF-\ELSE) and looping (\FOR-\DO and \WHILE-\DO)
568 constructions; in order to reduce the amount of space taken up, the bodies of
569 such constructions are shown by indentation only.
570
571 We don't declare the types of our variables explicitly, assuming that these
572 will be obvious by inspection; also, we don't describe our variables' scopes
573 explicitly, leaving the reader to determine these from context.
574
575 Finally, the notation $\Pr[\textit{algorithm} : \textit{condition}]$ denotes
576 the probability that \textit{condition} is true after running the given
577 \textit{algorithm}.
578
579
580 \subsection{Diffie-Hellman problems}
581 \label{sec:dhp}
582
583 The security of our protocols is related to the hardness of the
584 computational, decisional, and gap Diffie-Hellman problems in the group $G$.
585 We define these problems and what it means for them to be `hard' here.
586
587 The \emph{computational} Diffie-Hellman problem (CDH) is as follows: given
588 two group elements $X = x P$ and $Y = y P$, find $Z = x y P$.
589 \begin{definition}[The computational Diffie-Hellman problem]
590 Let $(G, +)$ be a cyclic group generated by $P$. For any adversary $A$, we
591 say that $A$'s \emph{success probability} at solving the computational
592 Diffie-Hellman problem in $G$ is
593 \[ \Succ{cdh}{G}(A) =
594 \Pr[ x \getsr I; y \getsr \Nupto{|G|} : A(x P, y P) = x y P ]
595 \]
596 where the probability is taken over the random choices of $x$ and $y$ and
597 any random decisions made by $A$. We say that the \emph{CDH insecurity
598 function} of $G$ is
599 \[ \InSec{cdh}(G; t) = \max_A \Succ{cdh}{G}(A) \]
600 where the maximum is taken over adversaries which complete in time $t$.
601 \end{definition}
602 Certainly, if one can compute discrete logarithms in the group $G$ (i.e.,
603 given $x P$, find $x$), then one can solve the computational Diffie-Hellman
604 problem. The converse is not clear, though. Shoup \cite{Shoup:1997:LBD}
605 gives us some confidence in the difficulty of the problem by showing that a
606 \emph{generic} adversary -- i.e., one which makes no use of the specific
607 structure of a group -- has success probability no greater than $q^2/|G|$.
608
609 This isn't quite sufficient for our purposes. Our proofs will be able to
610 come up with (possibly) a large number of guesses for the correct answer, and
611 at most one of them will be correct. Unfortunately, working out which one is
612 right seems, in general, to be difficult. This is the \emph{decision}
613 Diffie-Hellman problem (DDH), which \cite{Shoup:1997:LBD} shows, in the
614 generic group model, is about as hard as CDH. (See \cite{Boneh:1998:DDP} for
615 a survey of the decision Diffie-Hellman problem.)
616
617 Our reference problem will be a `multiple-guess computational Diffie-Hellman
618 problem' (MCDH), which is captured by a game as follows. An adversary is
619 given a pair of group elements $(x P, y P)$, and an oracle $V(\cdot)$ which
620 accepts group elements as input. The adversary wins the game if it queries
621 $V(x y P)$.
622 \begin{definition}[The multiple-guess computational Diffie-Hellman problem]
623 \label{def:mcdh}
624 Let $(G, +)$ be a cyclic group generated by $P$. For some adversary $A$,
625 we say that $A$'s \emph{success probability} at solving the multiple-guess
626 computational Diffie-Hellman problem in $G$ is
627 \[ \Succ{mcdh}{G}(A) = \Pr[\Game{mcdh}{G}(A) = 1] \]
628 where $\Game{mcdh}{G}(A)$ is shown in figure~\ref{fig:mcdh}. We say that
629 the \emph{MCDH insecurity function of $G$} is
630 \[ \InSec{mcdh}(G; t, q_V) = \max_A \Succ{mcdh}{G}(A) \]
631 where the maximum is taken over adversaries which complete in time $t$ and
632 make at most $q_V$-oracle queries.
633 \end{definition}
634
635 \begin{figure}
636 \begin{program}
637 $\Game{mcdh}{G}(A)$: \+ \\
638 $w \gets 0$; \\
639 $x \getsr \Nupto{|G|}$; $y \getsr \Nupto{|G|}$; \\
640 $A^{V(\cdot)}(x P, y P)$; \\
641 \RETURN $w$;
642 \next
643 Function $V(Z)$: \+ \\
644 \IF $Z = x y P$ \THEN \\ \ind
645 $w \gets 1$; \\
646 \RETURN $1$; \- \\
647 \RETURN $0$;
648 \end{program}
649
650 \caption{The multiple-guess computational Diffie-Hellman problem:
651 $\Game{mcdh}{G}(A)$}
652 \label{fig:mcdh}
653 \end{figure}
654
655 Note that our MCDH problem is not quite the `gap Diffie-Hellman problem'
656 (GDH). The gap problem measures the intractibility of solving CDH even with
657 the assistance of an oracle for solving (restricted) decision Diffie-Hellman
658 problems in the group. Specifically, the adversary is given $(X, Y) = (x P,
659 y P)$ and tries to find $Z = x y P$, as for CDH, but now it has access to an
660 oracle $D(R, S)$ which answers $1$ if $S = x R$ and $0$ otherwise.
661
662 Clearly MCDH is at least as hard as GDH, since our simple verification oracle
663 $V(Z)$ can be simulated with the gap problem's DDH oracle, as $D(Y, Z)$.
664 However, we can (loosely) relate the difficulty of MCDH to the difficulty of
665 CDH.
666 \begin{proposition}[Comparison of MCDH and CDH security]
667 For any cyclic group $(G, +)$,
668 \[ \InSec{mcdh}(G; t, q_V) \le q_V\,\InSec{cdh}(G; t + O(q_V)). \]
669 \end{proposition}
670 \begin{proof}
671 Let $A$ be an adversary attacking the multiple-guess computational
672 Diffie-Hellman problem in $G$, and suppose that it runs in time $t$ and
673 issues $q_V$ queries to its verification oracle.
674
675 We use a sequence of games. Game $\G0$ is the original MCDH attack game.
676 In each game $\G{i}$, we let the event $S_i$ be the probability that the
677 adversary wins the game.
678
679 Game $\G1$ is the same as $\G0$, except that we change the behaviour of the
680 verification oracle. Specifically, we make the oracle always return $0$.
681 We claim that this doesn't affect the adversary's probability of winning,
682 i.e., $\Pr[S_1] = \Pr[S_0]$. To see this, note that if none of the
683 adversary's $V(\cdot)$ queries was correct, then there is no change in the
684 game; conversely, if any query was correct, then the adversary will have
685 won regardless of its subsequent behaviour (which may differ arbitrarily
686 between the two games).
687
688 We are now ready to construct from $A$ an adversary $B$ attacking the
689 standard computational Diffie-Hellman problem.
690 \begin{program}
691 Adversary $B(X, Y)$: \+ \\
692 $n \gets 0$; \\
693 \FOR $i \in \Nupto{q_V}$ \DO $Q_i \gets 0$; \\
694 $A^{V(\cdot)}$; \\
695 $r \getsr \Nupto{n}$; \\
696 \RETURN $Q_r$;
697 \next
698 Function $D(Z')$: \+ \\
699 $Q_n \gets Z'$; \\
700 $n \gets n + 1$; \\
701 \RETURN $0$;
702 \end{program}
703 Observe that $B$ provides $A$ with an accurate simulation of game $\G1$.
704 Moreover, at the end of the algorithm, we have $0 < n \le q_V$, the
705 output of $A$ is stored in $Q_{n-1}$ and the values $Q_0$, $Q_1$, \dots,
706 $Q_{n-1}$ are the values of $A$'s oracle queries. Hence, with probability
707 $Pr[S_1]$, at least of one of the $Q_i$ is the correct answer to the CDH
708 problem. Let $\epsilon = \Pr[S_1] = \Pr[S_0]$; we claim that $B$'s
709 probability of success is at least $\epsilon/q_V$. The proposition
710 follows directly from this claim and that, because $A$ was chosen
711 arbitrarily, we can maximize and count resources.
712
713 We now prove the above claim. For $0 \le i < q_V$, let $W_i$ be the
714 event that $Q_i = x y P$, i.e., that $Q_i$ is the correct response. A
715 simple union bound shows that
716 \[ \sum_{0\le i<j} \Pr[W_i \mid n = j] \ge \epsilon. \]
717 We now perform a calculation:
718 \begin{eqnarray*}[rl]
719 \Succ{cdh}{G}(B)
720 & = \sum_{0\le i<q_V} \Pr[W_i \land r = i] \\
721 & = \sum_{0<j\le q_V} \Pr[n = j]
722 \biggl( \sum_{0\le i<j} \Pr[W_i \land r = i \mid n = j] \biggr) \\
723 & = \sum_{0<j\le q_V} \Pr[n = j]
724 \biggl( \frac{1}{j} \sum_{0\le i<j} \Pr[W_i \mid n = j] \biggr) \\
725 &\ge \sum_{0<j\le q_V} \Pr[n = j] \frac{\epsilon}{j} \\
726 &\ge \frac{\epsilon}{q_V} \sum_{0<j\le q_V} \Pr[n = j] \\
727 & = \frac{\epsilon}{q_V}.
728 \end{eqnarray*}
729 which completes the proof.
730 \end{proof}
731
732
733 \subsection{Example groups and encodings}
734
735 For nonnegative integers $0 \le n < 2^\ell$, there is a natural binary
736 encoding $N_\ell\colon \Nupto{2^\ell} \to \Bin^\ell$ which we can define
737 recursively as follows.
738 \[ N_0(0) = \emptystring \qquad
739 N_\ell(n) = \begin{cases}
740 N_{\ell-1}(n) \cat 0 & if $0 \le n < 2^{\ell-1}$ \\
741 N_{\ell-1}(n - 2^{\ell-1}) \cat 1 & if $2^{\ell-1} \le n < 2^\ell$.
742 \end{cases}
743 \]
744 Given an encoding $a = N_\ell(n)$ we can recover $n$ as
745 \[ n = \sum_{0\le i<\ell} a[i] 2^i. \]
746 Hence, given some limit $L \le 2^\ell$, we can encode elements of $\Nupto{L}$
747 using the functions $(e, d)$:
748 \[ e(L, \ell, n) = N_\ell(n) \qquad
749 d(L, \ell, a) = \begin{cases}
750 N_\ell(a) & if $N_\ell(a) < L$ \\
751 \bot & otherwise
752 \end{cases}
753 \]
754 The reader can verify that the functions $e(L, \ell, \cdot)$ and $d(L, \ell,
755 \cdot)$ satisfy the requirements of section~\ref{sec:bitenc}.
756
757 Given some $q < 2^{\ell_I}$ and $I = \Nupto{q}$, then, we can define an
758 encoding $(e_I, d_I)$ by $e_I(n) = e(q, \ell_I, n)$ and $d_I(a) = d(q,
759 \ell_I, a)$.
760
761 Let $p$ and $q$ be primes, with $q|(p - 1)$. Then there is an order-$q$
762 subgroup of $(\Z/p\Z)^*$. In practice, an order-$q$ element can be found
763 easily by taking elements $h \in (\Z/p\Z)^*$ at random and computing $g =
764 h^{(p-1)/2}$ until $g \ne 1$; then $G = \langle g \rangle$ is a group of $q$
765 elements. Assuming that $p$ and $q$ are sufficiently large, the
766 Diffie-Hellman problems seem to be difficult in $G$. Some texts recommend
767 additional restrictions on $p$, in particular that $(p - 1)/2q$ be either
768 prime or the product of large primes. Primes of this form protect against
769 small-subgroup attacks; but our protocols are naturally immune to these
770 attacks, so such precautions are unnecessary here. Elements of $G$ can be
771 encoded readily, since each element $n + p\Z$ of $\Z/p\Z$ has an obvious
772 `representative' integer $n$ such that $0 \le n < p$, and given $2^{\ell_G} >
773 p$, we can encode $n$ as $e(p, \ell_G, n)$, as above.
774
775 Alternatively, let $\F = \gf{p^f}$ be a finite field, and $E$ be an elliptic
776 curve defined over $\F$ such that the group $E(\F)$ of $\F$-rational points
777 of $E$ has a prime-order cyclic subgroup $G$. Elements of $G$ can be
778 represented as pairs of elements of $\F$. If $f = 1$, i.e., $\F = \Z/p\Z$
779 then field elements can be encoded as above. If $p = 2$, we can represent
780 the field as $\F_2/(p(x))$ for some irreducible polynomial $p(x) \in \F_2[x]$
781 of degree $f$. An element $r \in \F$ can then be represented by a polynomial
782 $r(x)$ with degree less than $f$, and coefficients $c_i \in \{0, 1\}$, i.e.,
783 \[ r(x) = \sum_{0\le i<f} c_i x^i \]
784 and hence we can uniquely encode $r$ as an $f$-bit string $a$ such that $a[i]
785 = c_i$.
786
787
788 \subsection{Symmetric encryption}
789 \label{sec:sym-enc}
790
791 Our key-exchange protocol requires a symmetric encryption scheme. Our
792 definition is fairly standard, except that, rather than specifying a
793 key-generation algorithm, we assume that key generation simply involves
794 selecting a string of a given length uniformly at random.
795 \begin{definition}[Symmetric encryption schemes]
796 A \emph{symmetric encryption scheme} $\E = (\kappa, E, D)$ consists of:
797 \begin{itemize}
798 \item an integer $\kappa \ge 0$,
799 \item a randomized \emph{encryption algorithm} $E$ which, on input $K \in
800 \Bin^\kappa$ and $p \in \Bin^*$ outputs some $c \in \Bin^*$, written $c
801 \gets E_K(p)$;
802 \item a \emph{decryption algorithm} $D$ which, on input $K \in \Bin^\kappa$
803 and $c \in \Bin^*$ outputs some $p' \in \Bin^* \cup \{\bot\}$, written
804 $p' \gets D_K(c)$.
805 \end{itemize}
806 Furthermore, a symmetric encryption scheme must be \emph{sound}: that is,
807 if $c \gets E_K(p)$ for some $K \in \Bin^\kappa$ and $p \in \Bin^*$, and
808 $p' \gets D_K(c)$ then $p = p'$.
809 \end{definition}
810 Our security notion for symmetric encryption is the standard notion of
811 left-or-right indistinguishability of ciphertexts under chosen-ciphertext
812 attack.
813 \begin{definition}
814 [Indistinguishability under chosen-ciphertext attack (IND-CCA)]
815 \label{def:ind-cca}
816 Let $\E = (\kappa, E, D)$ be a symmetric encryption scheme, and $A$ be an
817 adversary. Let $\id{lr}_b(x_0, x_1) = x_b$ for $b \in \{0, 1\}$. Let
818 \[ P_b =
819 \Pr[K \getsr \Bin^\kappa;
820 b \gets A^{E_K(\id{lr}_b(\cdot, \cdot)), D_K(\cdot)}() :
821 b = 1]
822 \]
823 An adversary is \emph{valid} if
824 \begin{itemize}
825 \item for any query to its encryption oracle $E_K(\id{lr}_b(x_0, x_1))$ we
826 have $|x_0| = |x_1|$, and
827 \item no query to the decryption oracle $D_K(\cdot)$ is equal to any reply
828 from an encryption query.
829 \end{itemize}
830 If $A$ is valid, then we define its \emph{advantage} in attacking the
831 security of $\E$ as follows
832 \[ \Adv{ind-cca}{\E} = P_1 - P_0. \]
833 Further, we define the \emph{IND-CCA insecurity function of $\E$} to be
834 \[ \InSec{ind-cca}(\E; t, q_E, q_D) = \max_A \Adv{ind-cca}{\E}(A) \]
835 where the maximum is taken over all valid adversaries $A$ which run in time
836 $t$, and issue at most $q_E$ encryption and $q_D$ decryption queries.
837 \end{definition}
838
839
840 \subsection{Simulations}
841 \label{sec:sim}
842
843 In section~\ref{sec:zk-ident}, we shall prove that our identification
844 protocol is zero-knowledge; in section~\ref{sec:denial}, we show that our
845 key-exchange protocol is deniable. In both of these proofs, we shall need to
846 demonstrate \emph{simulatability}.
847
848 \subsubsection{General framework}
849 Consider a game in which an adversary~$A$ interacts with some `world'~$W$,
850 which we shall represent as a probabilistic algorithm. The world may in fact
851 represent a number of honest parties communicating in a concurrent fashion,
852 but we can consider them as a single algorithm for our present purposes.
853
854 Initially the world and the adversary are both given the same \emph{common
855 input}~$c$; in addition, the world is given a \emph{private input}~$w$.
856 Both $c$ and~$w$ are computed by an \emph{initialization function}~$I$, which
857 is considered to be part of the definition of the game. Finally, the
858 adversary decides somehow that it has finished interacting, and outputs a
859 value~$a$. All this we notate as
860 \[ (w, c) \gets I(); a \gets A^{W(w, c)}(c). \]
861 This game is \emph{simulatable} if there is an algorithm~$S$ -- the
862 \emph{simulator} -- which can compute the same things as~$A$, but all by
863 itself without interacting with the world. That is, we run the simulator on
864 the common input~$c$, allowing it to interact in some way with the
865 adversary~$A$, and finally giving us an output~$s$.
866 \[ (w, c) \gets I(); s \gets S^A(c). \]
867 We shall say that the simulator is \emph{effective} if it's difficult to tell
868 whether a given string was output by the adversary after interacting with the
869 world, or by the simulator running by itself. That is, for any algorithm~$D$
870 -- a \emph{distinguisher} -- running in some bounded amount of time, its
871 advantage
872 \begin{spliteqn*}
873 \Pr[(w, c) \gets I(); a \gets A^{W(w, c)}(c);
874 b \gets D(c, a) : b = 1] - {} \\
875 \Pr[(w, c) \gets I(); s \gets S^A(c); b \gets D(c, s) : b = 1]
876 \end{spliteqn*}
877 is small. (Note that we gave the distinguisher the common input as well as
878 the output of the adversary or the simulator.)
879
880 It's usual to study \emph{transcripts} of interactions in these kinds of
881 settings. We are considering arbitrary adversarial outputs here, so this
882 certainly includes adversaries which output a transcript of their
883 interactions. Indeed, for any adversary~$A$, we could construct an
884 adversary~$A_T$ which performs the same computation, and outputs the same
885 result, but also includes a complete transcript of $A$'s interaction with the
886 world. Therefore we're just providing additional generality.
887
888 \subsubsection{Random oracles}
889 We shall be considering interactions in which all the parties have access to
890 several random oracles. We could simply say that the random oracles are part
891 of the world~$W$. In the setting described above, only the adversary
892 actually interacts with the world (and therefore would be able to query
893 random oracles). The simulator would be forced to `make up' its own random
894 oracle, and the distinguisher would have to study the distributions of the
895 random-oracle queries and their responses to make up its mind about which it
896 was given.
897
898 However, this would be a poor model for the real world, since once we
899 instantiate the random oracle with a hash function, we know that everyone
900 would in actually be able to compute the hash function for themselves. Thus
901 a distinguisher in the real world would be able to tell the difference
902 immediately between a real interaction and the simulated transcript, since
903 the `random oracle' queries recorded in the latter would be wrong!
904
905 Therefore we decide not to include the random oracles as part of the world,
906 but instead allow all the participants -- adversary, simulator and
907 distinguisher -- access to them. If we denote by~$\mathcal{H} = (H_0, H_1,
908 \ldots, H_{n-1})$ the collection of random oracles under consideration, the
909 expression for the distinguisher's advantage becomes
910 \begin{spliteqn*}
911 \Pr[(w, c) \gets I(); a \gets A^{W(w, c), \mathcal{H}}(c);
912 b \gets D^{\mathcal{H}}(c, a) : b = 1] - {} \\
913 \Pr[(w, c) \gets I(); s \gets S^{A, \mathcal{H}}(c);
914 b \gets D^{\mathcal{H}}(c, s) : b = 1].
915 \end{spliteqn*}
916
917 \subsubsection{Auxiliary inputs}
918 If an adversary's output can be effectively simulated, then we can
919 confidently state that the adversary `learnt' very little of substance from
920 its interaction, and certainly very little it can \emph{prove} to anyone
921 else. However, as we have described the setting so far, we fix an adversary
922 before we choose inputs to the world, so our model says little about what an
923 adversary which has already acquired some knowledge might learn beyond that.
924 For example, an adversary might overhear some other conversation between
925 honest parties and be able to use this to its advantage.
926
927 To this end, we give the adversary an \emph{auxiliary input}~$u$, computed by
928 an algorithm~$U$. We give $U$ both $c$ and $w$, in order to allow the
929 adversary to gain some (possibly partial) knowledge of the secrets of the
930 other parties. We also allow $U$ access to the random oracles~$\mathcal{H}$,
931 because clearly in the `real world' it would be ridiculous to forbid such an
932 algorithm from computing a publicly-known hash function.
933
934 The simulator and distinguisher are also given the auxiliary input. The
935 simulator is meant to represent the adversary's ability to compute things on
936 its own, without interacting with the world, and since the adversary is given
937 the auxiliary input, the simulator must be too. The distinguisher must be
938 the auxiliary input because otherwise the simulator could just `make up'
939 plausible-looking inputs.
940
941 \subsubsection{Resource limits}
942 We shall not allow our algorithms to perform completely arbitrary
943 computations and interactions. Instead, we impose limits on the amount of
944 time they are allowed to take, the number of random-oracle queries they make,
945 and so on. Specifically, we are interested in
946 \begin{itemize}
947 \item the time $t_A$ taken by the adversary and $t_D$ taken by the
948 distinguisher,
949 \item the number of oracle queries $\mathcal{Q}_A = (q_{A,0}, q_{A,1},
950 \ldots, q_{A,n-1})$ made by the adversary, and $\mathcal{Q}_D$ made by the
951 distinguisher,
952 \item a number of resource bounds $\mathcal{R}$ on the adversary's
953 interaction with the world (e.g., number of messages of various kinds sent
954 and received), and
955 \item a number of bounds $\mathcal{U}$ on the contents of the adversary's
956 auxiliary input~$u$.
957 \end{itemize}
958 Sometimes we shall not be interested in proving simulatability of adversaries
959 with auxiliary inputs. We write $\mathcal{U} = 0$ to indicate that auxiliary
960 output is not allowed.
961
962 \subsubsection{World syntax}
963 It will be worth our while being more precise about what a `world' actually
964 is, syntactically. We define a world to be a single, randomized algorithm
965 taking inputs $(\iota, \sigma, \tau, \mu) \in (\Bin^*)^4$; the algorithm's
966 output is a pair $(\sigma', \rho) \in (\Bin^*)^2$. We show how the
967 adversary's interaction is mapped on to this world algorithm in
968 figure~\ref{fig:sim-world}.
969 \begin{itemize}
970 \item The `input' $\iota$ is the result of the initialization function~$I$.
971 That is, it is the pair $(w, c)$ of the world's private input and the
972 common input.
973 \item The `state' $\sigma$ is empty on the world's first invocation; on each
974 subsequent call, the value of the world's output $\sigma'$ is passed back.
975 In this way, the world can maintain state.
976 \item The `type $\tau$ is a token giving the type of invocation this is.
977 \item The `message' $\mu$ is any other information passed in; its form will
978 typically depend on the type~$\tau$ of the invocation.
979 \item The `new state' $\sigma'$ is the value of $\sigma$ to pass to the next
980 invocation of the world.
981 \item The `reply $\rho$ is the actual output of the invocation.
982 \end{itemize}
983 There are two special invocation types. The adversary is \emph{forbidden}
984 from making special invocations.
985 \begin{itemize}
986 \item The special invocation type $\cookie{init}$ is used to allow the world to
987 prepare an initial state. The world is invoked as
988 \[ W^{\mathcal{H}}(\iota, \emptystring, \cookie{init}, \emptystring) \]
989 and should output an initial state $\sigma'$. The world's reply $\rho$ is
990 ignored. (Recall that $\emptystring$ represents the empty string.)
991 \item The special invocation type $\cookie{random}$ is used to inform the
992 world that the adversary has issued a random oracle query. The world is
993 invoked as
994 \[ W^{\mathcal{H}}(\iota, \sigma, \cookie{random}, (i, x, h)) \]
995 to indicate that the adversary has queried its random oracle $H_i(\cdot)$
996 on the input $x$, giving output~$h$. The world may output an updated state
997 $\sigma'$; its reply $\rho$ is ignored.
998 \end{itemize}
999 The latter special query is a technical device used to allow the `fake-world'
1000 simulators we define below to be aware of the adversary's random oracle
1001 queries without being able to `program' the random oracle. Including it here
1002 does little harm, and simplifies the overall exposition.
1003
1004 \begin{figure}
1005 \begin{program}
1006 Interaction $A^{W(w, c), \mathcal{H}}(c, u)$: \+ \\
1007 $(\sigma, \rho) \gets
1008 W((w, c), \emptystring, \cookie{init}, \emptystring)$; \\
1009 $a \gets A^{\id{world}(\cdot, \cdot),
1010 \id{random}(\cdot, \cdot)}(c, u)$; \\
1011 \RETURN $a$;
1012 \newline
1013 Function $\id{world}(\tau, \mu)$: \+ \\
1014 \IF $\tau \in \{\cookie{init}, \cookie{random}\}$ \THEN
1015 \RETURN $\bot$; \\
1016 $(\sigma, \rho) \gets W((w, c), \sigma, \tau, \mu)$; \\
1017 \RETURN $\rho$; \-
1018 \next
1019 Function $\id{random}(i, x)$: \+ \\
1020 $h \gets H_i(x)$; \\
1021 $(\sigma, \rho) \gets
1022 W((w, c), \sigma, \cookie{random}, (i, x, h))$; \\
1023 \RETURN $h$;
1024 \end{program}
1025
1026 \caption{Interacting with a world: Interaction $A^{W, \mathcal{H}}$}
1027 \label{fig:sim-world}
1028 \end{figure}
1029
1030 \subsubsection{Definitions}
1031 We are now ready to begin making definitions.
1032 \begin{definition}[Simulation security]
1033 \label{def:sim}
1034 Consider the game described above, with the initialization function~$I$,
1035 and the world~$W$: let $A$ be an adversary, and let~$U$ be an
1036 auxiliary-input function; let $S$ be a simulator, and let $D$ be a
1037 distinguisher. We define $D$'s \emph{advantage against $S$'s simulation of
1038 $A$'s interaction with~$W$ with auxiliary inputs provided by~$U$} to be
1039 \[ \Adv{sim}{W, I, S}(A, U, D) =
1040 \Pr[\Game{real}{W, I, S}(A, U, D) = 1] -
1041 \Pr[\Game{sim}{W, I, S}(A, U, D)= 1]
1042 \]
1043 where the games are as shown in figure~\ref{fig:sim}.
1044 Furthermore, we define the \emph{simulator's insecurity function} to be
1045 \[ \InSec{sim}(W, I, S;
1046 t_D, t_A, \mathcal{Q}_D, \mathcal{Q}_A, \mathcal{R}, \mathcal{U}) =
1047 \max_{D, A, U} \Adv{sim}{W, I, S}(A, U, D)
1048 \]
1049 where the maximum is taken over all distinguishers~$D$ running in
1050 time~$t_D$ and making at most $\mathcal{Q}_D$ random-oracle queries, and
1051 all adversaries~$A$ running in time~$t_A$, making at most $\mathcal{Q}_A$
1052 random-oracle queries, not exceeding the other stated resource
1053 bounds~$\mathcal{R}$ on its interaction with~$W$, and auxiliary-input
1054 functions producing output not exceeding the stated bounds~$\mathcal{U}$.
1055 \end{definition}
1056 \begin{remark}
1057 The usual definitions of zero-knowledge, for example, require the simulator
1058 to work for all choices of inputs (common, private and auxiliary), rather
1059 than for random choices. Our definition therefore looks weaker. Our proof
1060 of zero-knowledge actually carries through to the traditional
1061 stronger-looking definition. Critically, however, the standard
1062 universal quantification over inputs fails to capture deniability in the
1063 random oracle model, since the inputs can't therefore depend on the random
1064 oracle. Our formulation therefore actually gives \emph{stronger}
1065 deniability that the usual one.
1066 \end{remark}
1067
1068 \begin{figure}
1069 \begin{program}
1070 $\Game{real}{W, I, S}(A, U, D)$: \+ \\
1071 $(w, c) \gets I()$; \\
1072 $u \gets U^{\mathcal{H}}(w, c)$; \\
1073 $a \gets A^{W(w, c), \mathcal{H}}(c, u)$; \\
1074 $b \gets D^{\mathcal{H}}(c, u, a)$; \\
1075 \RETURN $b$;
1076 \next
1077 $\Game{sim}{W, I, S}(A, U, D)$: \+ \\
1078 $(w, c) \gets I()$; \\
1079 $u \gets U^{\mathcal{H}}(w, c)$; \\
1080 $s \gets S^{A, \mathcal{H}}(c, u)$; \\
1081 $b \gets D^{\mathcal{H}}(c, u, s)$; \\
1082 \RETURN $b$;
1083 \end{program}
1084
1085 \caption{Games for simulation: $\Game{real}{W, I}$ and $\Game{sim}{W, I}$}
1086 \label{fig:sim}
1087 \end{figure}
1088
1089 \subsubsection{Fake-world simulators}
1090 The simulators we shall be considering in the present paper are of a specific
1091 type which we call `fake-world simulators'. They work by running the
1092 adversary in a fake `cardboard cut-out' world, and attempting to extract
1093 enough information from the adversary's previous interactions and random
1094 oracle queries to maintain a convincing illusion.
1095
1096 That is, the behaviour of a fake-world simulator~$S$ is simply to allow the
1097 adversary to interact with a `fake world'~$W'$, which was not given the world
1098 private input. That is, there is some world $W'$ such that
1099 \[ S^{A, \mathcal{H}}(c, u) \equiv A^{W'(u, c), \mathcal{H}}(c, u) \]
1100 Fake-world simulators are convenient because they allow us to remove from
1101 consideration the distinguisher~$D$ as the following definition shows.
1102 \begin{definition}[Fake-world simulation security]
1103 \label{def:fakesim}
1104 Let $I$, $W$ and $U$ be as in definition~\ref{def:sim}. Let $A$ be an
1105 adversary which outputs a single bit. Let $S$ be a fake-world simulator.
1106 We define $A$'s \emph{advantage against $S$'s fake-world simulation of $W$
1107 with auxiliary inputs provided by~$U$} to be
1108 \begin{spliteqn*}
1109 \Adv{fw}{W, I, S}(A, U) =
1110 \Pr[(w, c) \gets I(); u \gets U^{\mathcal{H}}(w, c);
1111 b \gets A^{W(w, c), \mathcal{H}}(c, u) : b = 1] - {} \\
1112 \Pr[(w, c) \gets I(); u \gets U^{\mathcal{H}}(w, c);
1113 b \gets S^{A, \mathcal{H}}(c, u) : b = 1]
1114 \end{spliteqn*}
1115 Furthermore, we define the \emph{simulator's insecurity function} to be
1116 \[ \InSec{fw}(W, I, S;
1117 t_D, t, \mathcal{Q}, \mathcal{R}, \mathcal{U}) =
1118 \max_{A, U} \Adv{fw}{W, I, S}(A, U)
1119 \]
1120 where the maximum is taken over all adversaries~$A$ running in time~$t$,
1121 making at most $\mathcal{Q}$ random-oracle queries, not exceeding the other
1122 stated resource bounds~$\mathcal{R}$ on its interaction with~$W$, and
1123 auxiliary-input functions producing output not exceeding the stated
1124 bounds~$\mathcal{U}$.
1125 \end{definition}
1126 It remains for us to demonstrate that this is a valid way of analysing
1127 simulators; the following simple proposition shows that this is indeed the
1128 case.
1129 \begin{proposition}[Fake-world simulation]
1130 \label{prop:fakesim}
1131 Let $I$ be an initialization function and let $W$ be a world. Then, for
1132 any fake-world simulator~$S$,
1133 \[ \InSec{sim}(W, I, S; t_D, t_A, \mathcal{Q}_D, \mathcal{Q}_A,
1134 \mathcal{R}, \mathcal{U}) \le
1135 \InSec{fw}(W, I, S; t_A + t_D, \mathcal{Q}_D + \mathcal{Q}_A,
1136 \mathcal{R}, \mathcal{U})
1137 \]
1138 (where addition of query bounds $\mathcal{Q}$ is done elementwise).
1139 \end{proposition}
1140 \begin{proof}
1141 Let $W$ and $I$ as in the proposition statement be given; also let a
1142 distinguisher~$D$ running in time~$t_D$ and making $\mathcal{Q}_D$
1143 random-oracle queries, an adversary~$A$ running in time~$t_A$ and making
1144 $\mathcal{Q}_A$ random-oracle queries and interacting with its world within
1145 the stated bounds~$\mathcal{R}$, an auxiliary-input function~$U$ satisfying
1146 the constraints~$\mathcal{U}$ on its output, and a fake-world simulator~$S$
1147 all be given.
1148
1149 We construct an adversary~$B$ outputting a single bit as follows
1150 \begin{program}
1151 Adversary $B^{W, \mathcal{H}}(c, u)$: \+ \\
1152 $a \gets A^{W, \mathcal{H}}(c, u)$; \\
1153 $b \gets D^{\mathcal{H}}(c, u, a)$; \\
1154 \RETURN $b$;
1155 \end{program}
1156 A glance at definitions \ref{def:sim} and~\ref{def:fakesim} and the
1157 resources used by $B$ shows that
1158 \[ \Adv{sim}{W, I, S}(A, U) = \Adv{fw}{W, I, S}(B, U)
1159 \le \InSec{fw}(W, I, S; t_D + t_A, \mathcal{Q}_D + \mathcal{Q}_A,
1160 \mathcal{R}, \mathcal{U})
1161 \]
1162 as required.
1163 \end{proof}
1164
1165 %%%--------------------------------------------------------------------------
1166
1167 \section{A zero-knowledge identification scheme}
1168 \label{sec:zk-ident}
1169
1170
1171 \subsection{Description}
1172
1173 Here we present a simple zero-knowledge identification scheme. Fix some
1174 group $G$. Suppose Alice chooses a private key $x \inr \Nupto{|G|}$, and
1175 publishes the corresponding public key $X = x P$. Let $H_I\colon G^2 \to
1176 \Bin^{\ell_I}$ be a secure hash function. Here's a simple protocol which
1177 lets her prove her identity to Bob.
1178 \begin{enumerate}
1179 \item Bob selects a random $r \inr \Nupto{|G|}$, and computes $R = r P$, $Y =
1180 r X$, and $c = r \xor H_I(R, Y)$. He sends the pair $(R, c)$ to Alice as
1181 his \emph{challenge}.
1182 \item Alice receives $(R, c)$. She computes $Y' = x R$ and $r' = c \xor
1183 H_I(R', Y')$, and checks that $R = r' P$. If so, she sends $Y'$ as her
1184 \emph{response}; otherwise she sends $\bot$.
1185 \item Bob receives $Y'$ from Alice. He checks that $Y' = Y$. If so, he
1186 accepts that he's talking to Alice; otherwise he becomes suspicious.
1187 \end{enumerate}
1188 We name this the Wrestlers Identification Protocol in~$G$, $\Wident^G$ (we
1189 drop the superscript to refer to the protocol in general, or when no
1190 ambiguity is likely to result). A summary is shown in
1191 figure~\ref{fig:wident}.
1192
1193 \begin{figure}
1194 \begin{description}
1195 \item[Setup] Group $G = \langle P \rangle$; $|G| = q$ is prime.
1196 $H_I(\cdot, \cdot)$ is a secure hash.
1197 \item[Private key] $x \inr \Nupto{q}$.
1198 \item[Public key] $X = x P$.
1199 \item[Challenge] $(R, c)$ where $r \inr \Nupto{q}$, $R = r P$, $c = r \xor
1200 H_I(R, r X)$.
1201 \item[Response] $x R = r X$ if $R = (c \xor H_I(R, x R)) P$; otherwise
1202 $\bot$.
1203 \end{description}
1204
1205 \caption{Summary of the Wrestlers Identification Protocol, $\Wident$}
1206 \label{fig:wident}
1207 \end{figure}
1208
1209
1210 \subsection{Security}
1211
1212 In order to evaluate the security of our protocol, we present a formal
1213 description of the algorithms involved in figure~\ref{fig:wident}. Here, the
1214 hash function $H_I(\cdot, \cdot)$ is modelled as a random oracle.
1215
1216 \begin{figure}
1217 \begin{program}
1218 Function $\id{setup}()$: \+ \\
1219 $x \getsr \Nupto{q}$; \\
1220 $X \gets x P$; \\
1221 \RETURN $(x, X)$;
1222 \next
1223 Function $\id{challenge}^{H_I(\cdot, \cdot)}(R, c, X)$: \+ \\
1224 $r \getsr \Nupto{q}$; \\
1225 $R \gets r P$; $Y \gets r X$; \\
1226 $h \gets H_I(R, Y)$; $c \gets r \xor h$; \\
1227 \RETURN $(Y, R, c)$; \- \\[\medskipamount]
1228 Function $\id{verify}(Y, Y')$: \+ \\
1229 \IF $Y' = Y$ \THEN \RETURN $1$; \\
1230 \RETURN $0$;
1231 \next
1232 Function $\id{response}^{H_I(\cdot, \cdot)}(R, c, x)$: \+ \\
1233 $Y' \gets x R$; \\
1234 $h \gets H_I(R', Y')$; $r' \gets c \xor h$; \\
1235 \IF $R \ne r' P$ \THEN \RETURN $\bot$; \\
1236 \RETURN $Y'$;
1237 \end{program}
1238
1239 \caption{Functions implementing $\Wident$ in the random oracle model}
1240 \label{fig:wident-ro}
1241 \end{figure}
1242
1243 \subsubsection{Completeness}
1244 Suppose that Bob really is talking to Alice. Note that $Y' = x R = x (r P) =
1245 r (x P) = r X = Y$. Hence $r' = c \xor H_I(R', Y') = c \xor H_I(R, Y) = r$,
1246 so $r' P = r P = R$, so Alice returns $Y' = Y$ to Bob. Therefore $\Wident$
1247 is \emph{complete}: if Bob really is communicating with Alice then he
1248 accepts.
1249
1250 \subsubsection{Soundness}
1251 We next show that impersonating Alice is difficult. The natural way to prove
1252 this would be to give an adversary a challenge and prove that its probability
1253 of giving a correct response is very small. However, we prove a stronger
1254 result: we show that if the adversary can respond correctly to any of a large
1255 collection of challenges then it can solve the MCDH problem.
1256
1257 Consider the game $\Game{imp}{\Wident}$ shown in
1258 figure~\ref{fig:wident-sound}. An adversary's probability of successfully
1259 impersonating Alice in our protocol, by correctly responding to any one of
1260 $n$ challenges, is exactly its probability of winning the game (i.e., causing
1261 it to return $1$).
1262
1263 \begin{figure}
1264 \begin{program}
1265 $\Game{imp-$n$}{\Wident}(A)$: \+ \\
1266 $H_I \getsr \Func{G^2}{\Bin^{\ell_I}}$; \\
1267 $(x, X) \gets \id{setup}()$; \\
1268 $\id{win} \gets 0$; \\
1269 $\Xid{R}{map} \gets \emptyset$; \\
1270 $\mathbf{c} \gets \id{challenges}(n)$; \\
1271 $(R', Y') \gets A^{H_I(\cdot, \cdot), \id{check}(\cdot, \cdot)}
1272 (X, \mathbf{c})$; \\
1273 \RETURN $\id{win}$;
1274 \newline
1275 Function $\id{challenges}(n)$: \+ \\
1276 \FOR $i \in \Nupto{n}$ \DO \\ \ind
1277 $(Y, R, c) \gets \id{challenge}^{H_I(\cdot, \cdot)}$; \\
1278 $\Xid{R}{map} \gets \Xid{R}{map} \cup \{ R \mapsto Y \}$; \\
1279 $\mathbf{c}[i] \gets (R, c)$; \- \\
1280 \RETURN $\mathbf{c}$; \next
1281 Function $\id{check}(R', Y')$: \\
1282 \IF $R' \notin \dom \Xid{R}{map}$ \THEN \RETURN $0$; \\
1283 $Y \gets \Xid{R}{map}(R')$; \\
1284 \IF $\id{verify}(Y, Y')$ \THEN \\ \ind
1285 $\id{win} \gets 1$; \\
1286 \RETURN $1$; \- \\
1287 \RETURN $0$;
1288 \end{program}
1289
1290 \caption{Soundness of $\Wident$: $\Game{imp-$n$}{\Wident}(A)$}
1291 \label{fig:wident-sound}
1292 \end{figure}
1293
1294 \begin{theorem}[Soundness of $\Wident$]
1295 \label{thm:wident-sound}
1296 Let $A$ be any adversary running in time $t$ and making $q_I$ queries to
1297 its random oracle, and $q_V$ queries to its verification oracle. Let $G$
1298 be a cyclic group. Then
1299 \[ \Pr[\Game{imp-$n$}{\Wident^G}(A) = 1] \le
1300 \InSec{mcdh}(G; t', q_I + q_V)
1301 \]
1302 where $t' = t + O(q_I) + O(q_V)$.
1303 \end{theorem}
1304 \begin{remark}
1305 Note that the security bound here is \emph{independent} of the value of
1306 $n$.
1307 \end{remark}
1308 \begin{proof}
1309 We prove this by defining a sequence of games $\G{i}$. The first will be
1310 the same as the attack game $\Game{imp-$n$}{\Wident}(A)$ and the others
1311 will differ from it in minor ways. In each game $\G{i}$, let $S_i$ be the
1312 event that $A$ wins the game -- i.e., that it successfully impersonates the
1313 holder of the private key~$x$.
1314
1315 Let game $\G0$ be the attack game $\Game{imp}{\Wident}(A)$, and let $(R',
1316 Y')$ be the output of $A$ in the game.
1317
1318 We define a new game $\G1$ which is the same as $\G0$, except that we query
1319 the random oracle $H_I$ at $(R', Y')$ whenever the adversary queries
1320 $\id{check}(R', Y')$. (We don't charge the adversary for this.) This
1321 obviously doesn't affect the adversary's probability of winning, so
1322 $\Pr[S_1] = \Pr[S_0]$.
1323
1324 Game $\G2$ is like $\G1$, except that we change the way we generate
1325 challenges and check their responses. Specifically, we new functions
1326 $\id{challenges}_2$ and $\id{check}_2$, as shown in
1327 figure~\ref{fig:wident-sound-2}.
1328
1329 \begin{figure}
1330 \begin{program}
1331 Function $\id{challenges}_2(n)$: \+ \\
1332 $r^* \getsr I$; $R^* \gets r^* P$; $Y^* \gets r^* X$; \\
1333 \FOR $i \in \Nupto{n}$ \DO \\ \ind
1334 $r \getsr I$; $R \gets r R^*$; $Y \gets r Y^*$; \\
1335 $h \gets H_I(R, Y)$; $c \gets r \xor h$; \\
1336 $\Xid{R}{map} \gets \Xid{R}{map} \cup \{ R \mapsto r \}$; \\
1337 $\mathbf{c}[i] \gets (R, c)$; \- \\
1338 \RETURN $\mathbf{c}$;
1339 \next
1340 Function $\id{check}_2(R', Y')$: \+ \\
1341 \IF $R' \notin \dom \Xid{R}{map}$ \THEN \RETURN $0$; \\
1342 $r \gets \Xid{R}{map}(R')$; \\
1343 \IF $\id{verify}(Y^*, Y'/r)$ \THEN \\ \ind
1344 $\id{win} \gets 1$; \\
1345 \RETURN $1$; \- \\
1346 \RETURN $0$;
1347 \end{program}
1348
1349 \caption{Soundness of $\Wident$: $\id{challenges}_2$ and $\id{check}_2$}
1350 \label{fig:wident-sound-2}
1351 \end{figure}
1352
1353 While we're generating and checking challenges in a more complicated way
1354 here, we're not actually changing the distribution of the challenges, or
1355 changing the winning condition. Hence $\Pr[S_2] = \Pr[S_1]$.
1356
1357 Now we change the rules again. Let $\G3$ be the same as $\G2$ except that
1358 we change the winning condition. Instead, we say that the adversary wins
1359 if any of the queries to its random oracle $H_I(R', Y')$ would be a correct
1360 response -- i.e., $\id{check}_2(R', Y')$ would return $1$. Since we query
1361 the oracle on $(R', Y')$ on its behalf at the end of the game, no adversary
1362 can do worse in this game than it does in $\G2$, so we have $\Pr[S_3] \ge
1363 \Pr[S_2]$. (It's not hard to see that this only helps quite stupid
1364 adversaries. We can transform any adversary into one for which equality
1365 holds here.)
1366
1367 Finally, let $\G4$ be the same as $\G3$ except that we change the way we
1368 generate challenges again: rather than computing $h$ and setting $c \gets h
1369 \xor r$, we just choose $c$ at random. Specifically, we use the new
1370 function, $\id{challenges}_4$, shown in figure~\ref{fig:wident-sound-4}.
1371
1372 \begin{figure}
1373 \begin{program}
1374 Function $\id{challenges}_4(n)$: \+ \\
1375 $r^* \getsr I$; $R^* \gets r^* P$; $Y^* \gets r^* X$; \\
1376 \FOR $i \in \Nupto{n}$ \DO \\ \ind
1377 $r \getsr I$; $R \gets r R^*$; \\
1378 $c \getsr \Bin^{\ell_I}$; \\
1379 $\Xid{R}{map} \gets \Xid{R}{map} \cup \{ R \mapsto r \}$; \\
1380 $\mathbf{c}[i] \gets (R, c)$; \- \\
1381 \RETURN $\mathbf{c}$;
1382 \end{program}
1383
1384 \caption{Soundness of $\Wident$: $\id{challenges}_4$}
1385 \label{fig:wident-sound-4}
1386 \end{figure}
1387
1388 Since $H_I(\cdot, \cdot)$ is a random function, the adversary can only
1389 distinguish $\G4$ from $\G3$ if it queries its random oracle at some $(R, r
1390 Y^*)$. But if it does this, then by the rule introduced in $\G3$ it has
1391 already won. Therefore we must have $\Pr[S_4] = \Pr[S_3]$.
1392
1393 Our $\id{challenges}_4$ function is interesting, since it doesn't actually
1394 make use of $r^*$ or $Y^*$ when generating its challenges. This gives us
1395 the clue we need to bound $\Pr[S_4]$: we can use adversary $A$ to solve the
1396 multiple-guess Diffie-Hellman problem in $G$ by simulating the game $\G4$.
1397 Specifically, we define the adversary $B$ as shown in
1398 figure~\ref{fig:wident-sound-cdh}. That is, for each query $A$ makes to
1399 its random oracle at a new pair $(R', Y')$, we see whether this gives us
1400 the answer we're looking for. We have $\Pr[S_0] \le \Pr[S_4] =
1401 \Succ{mcdh}{G}(B) \le \InSec{gdh}(G; t', q_I + q_V)$ as required.
1402
1403 \begin{figure}
1404 \begin{program}
1405 Adversary $B^{V(\cdot)}(X, R^*)$: \+ \\
1406 $F \gets \emptyset$; $\Xid{R}{map} \gets \emptyset$; \\
1407 \FOR $i \in \Nupto{n}$ \DO \\ \ind
1408 $r \getsr I$; $R \gets r R^*$; $c \getsr \Bin^{\ell_I}$; \\
1409 $\Xid{R}{map} \gets \Xid{R}{map} \cup \{ R \mapsto r \}$; \\
1410 $\mathbf{c}[i] \gets (R, c)$; \- \\
1411 $(R', Y') \gets A^{H_I(\cdot, \cdot), \id{check}(\cdot, \cdot)}
1412 (X, \mathbf{c})$; \\
1413 \IF $Y' \neq \bot$ \THEN $H_I(R', Y')$;
1414 \next
1415 Oracle $H_I(R', Y')$: \+ \\
1416 \IF $(R', Y') \in \dom F$ \THEN \\ \quad
1417 $h \gets F(x)$; \\
1418 \ELSE \\ \ind
1419 $\id{check}(R', Y')$; \\
1420 $h \getsr \Bin^{\ell_I}$;
1421 $F \gets F \cup \{ (R', Y') \mapsto h \}$; \- \\
1422 \RETURN $h$;
1423 \- \\[\medskipamount]
1424 Oracle $\id{check}(R', Y')$: \+ \\
1425 \IF $R' \in \dom \Xid{R}{map}$ \THEN
1426 $V(Y'/\Xid{R}{map}(R'))$;
1427 \end{program}
1428
1429 \caption{Soundness of $\Wident$: reduction from MCDH}
1430 \label{fig:wident-sound-cdh}
1431 \end{figure}
1432
1433 \end{proof}
1434
1435 \subsubsection{Zero-knowledge}
1436 Finally we must prove that $\Wident$ is (statistical) zero-knowledge -- i.e.,
1437 that, except with very small probability, Bob learns nothing of use to him
1438 except that he's interacting with Alice. To do this, we show that, for any
1439 algorithm $B$ which Bob might use to produce his challenge to the real Alice,
1440 there exists a simulator $S$ which produces transcripts distributed very
1441 similarly to transcripts of real conversations between $B$ and Alice, the
1442 difference being that $S$ doesn't know Alice's key. We shall show that the
1443 statistical difference between the two distributions is $2^{-\ell_I}$.
1444
1445 The intuition here is that Bob ought to know what answer Alice is going to
1446 give him when he constructs his challenge. This is certainly true if he's
1447 honest: his challenge is $R = r P$ for some $r$ he knows, so he won't learn
1448 anything useful when Alice responds with $x R = r X$. However, if Bob sends
1449 a challenge $R$ when he doesn't know the corresponding $r$, he learns
1450 something potentially useful. The accompanying check value $c = r \xor
1451 H_I(R, r X)$ keeps him honest.
1452
1453 To show this, we present an \emph{extractor} which, given any challenge $(R,
1454 c)$ Bob can construct, and his history of random-oracle queries, either
1455 returns a pair $(r, Y)$ such that $R = r P$ and $Y = r X$, or $\bot$;
1456 moreover, the probability that Alice returns a response $Y' \ne \bot$ given
1457 the challenge $(R, c)$ is $2^{-\ell}$. We can, of course, readily convert
1458 this extractor into a simulator to prove the zero-knowledge property of our
1459 protocol.
1460
1461 We shall actually consider a slightly more complex setting. We grant Bob
1462 access to an oracle which produces random, correctly-formed challenges. We
1463 require this to model the legitimate challenges of other parties when we
1464 analyse the security of our key exchange protocol. The extractor is
1465 permitted to fail given
1466
1467 \begin{definition}[Discrete-log extractors]
1468 Let $T$, $B$ be randomized algorithms. Define the game
1469 $\Game{dl-ext}{G}(T, B)$ as shown in figure~\ref{fig:dlext}. The
1470 \emph{success probability of $T$ as a discrete-log extractor against $B$}
1471 is defined as
1472 \[ \Succ{dl-ext}{G}(T, B) = \Pr[\Game{dl-ext}{G}(T, B) = 1]. \]
1473 \end{definition}
1474 Let's unpack this definition slightly. We make the following demands of our
1475 extractor.
1476 \begin{itemize}
1477 \item It is given a bare `transcript' of $B$'s execution. In particular, it
1478 is given only its output and a list of $B$'s random-oracle queries in no
1479 particular order.
1480 \item While the extractor is not given the private key~$x$, the adversary~$B$
1481 is given the private key.
1482 \item We require that, if the extractor produces values $r, Y \ne \bot$ then
1483 $r$ and $Y$ are \emph{correct}; i.e., that $R = r P$ and $Y = x R$.
1484 \item The extractor is explicitly \emph{not} given the outputs of the
1485 challenge-generation oracle $C()$, nor of the random-oracle queries issued
1486 by $C()$. However, we allow the extractor to fail (i.e., return $\bot$) if
1487 $B$ simply parrots one of its $C$-outputs.
1488 \item The extractor is allowed -- indeed \emph{required} -- to fail if the
1489 challenge $(R, c)$ is \emph{invalid} (i.e., Alice would return $\bot$ given
1490 the challenge).
1491 \end{itemize}
1492 The resulting definition bears a striking similarity to the concept of
1493 \emph{plaintext awareness} in \cite{Bellare:1998:RAN}.
1494
1495 \begin{figure}
1496 \begin{program}
1497 $\Game{dl-ext}{G}(T, B):$ \+ \\
1498 $H_I \getsr \Func{G^2}{\Bin^{\ell_I}}$;
1499 $Q_H \gets \emptyset$; $Q_C \gets \emptyset$; \\
1500 $(x, X) \gets \id{setup}()$; \\
1501 $(R, c) \gets B^{\Xid{H_I}{trap}(\cdot, \cdot), C()}(x, X)$; \\
1502 $(r, Y) \gets T(R, c, Q_H)$; \\
1503 $Y' \gets x R$; $h' \gets H_I(R, Y')$; $r' \gets c \xor h'$; \\
1504 \IF $r \ne \bot$ \THEN \\ \quad
1505 \IF $Y = \bot \lor R \ne r P \lor Y \ne Y'$ \THEN \RETURN $0$; \\
1506 \IF $R = r' P$ \THEN $(r^*, Y^*) = (r', Y')$; \\
1507 \ELSE $(r^*, Y^*) \gets (\bot, \bot)$; \\
1508 \IF $(R, c) \in Q_C$ \THEN \RETURN $1$; \\
1509 \IF $(r, Y) = (r', Y')$ \THEN \RETURN $1$; \\
1510 \RETURN $0$;
1511 \next
1512 Oracle $\Xid{H_I}{trap}(R', Y')$: \+ \\
1513 $h \gets H_I(R', Y')$; \\
1514 $Q_H \gets Q_H \cup \{(R', Y', h)\}$; \\
1515 \RETURN $h$; \- \\[\medskipamount]
1516 Oracle $C()$: \+ \\
1517 $r \getsr \Nupto{q}$; \\
1518 $R \gets r P$; $c \gets r \xor H_I(R, r X)$; \\
1519 $Q_C \gets Q_C \cup \{(R, c)\}$; \\
1520 \RETURN $(R, c)$
1521 \end{program}
1522
1523 \caption{Discrete log extraction game: $\Game{dl-ext}{G}(T, B)$}
1524 \label{fig:dlext}
1525 \end{figure}
1526
1527 Such an extractor indeed exists, as the following lemma states.
1528 \begin{lemma}[Effectiveness of extractor $T_\Wident$]
1529 \label{lem:dlext}
1530 There exists a \emph{universal discrete-log extractor} $T_\Wident$ which,
1531 for any algorithm $B$,
1532 \[ \Succ{dl-ext}{G}(T_\Wident, B) \ge 1 - \frac{1}{2^{\ell_I}}. \]
1533 Moreover, if $B$ issues at most $q_H$ random-oracle queries, then the
1534 running time of $T_\Wident$ is $O(q_H)$.
1535 \end{lemma}
1536 We prove this result at the end of the section. For now, let us see how to
1537 prove that $\Wident$ is zero-knowledge.
1538
1539 We use the set-up described in section~\ref{sec:sim}. Our initialization
1540 function~$I_\Wident$ just chooses a random $x \in \Nupto{q}$ as the world
1541 private input and sets $X = x P$ as the common input. In the `real world',
1542 the adversary is allowed to submit a (single) challenge to the prover; it is
1543 given the prover's response, and must then compute its output. This is shown
1544 on the left hand side of figure~\ref{fig:wident-sim}.
1545
1546 The zero-knowledge property of the scheme is described by the following
1547 theorem.
1548 \begin{theorem}[Statistical zero-knowledge of $\Wident$]
1549 \label{thm:wident-zk}
1550 Let $I_\Wident$, $W_\Wident$ and $S_\Wident$ be the real-prover world and
1551 simulator shown in figure~\ref{fig:wident-sim}. Then, for any~$t$,
1552 $q_I$ and $q_C$,
1553 \[ \InSec{sim}(W_\Wident, I_\Wident, S_\Wident; t, q_I, q_C, 0) \le
1554 \frac{q_C}{2^\ell_I}.
1555 \]
1556 where $q_C$ is the maximum number of challenges allowed by the adversary.
1557 \end{theorem}
1558 \begin{proof}
1559 The simulator simply uses the extractor~$T_\Wident$ to extract the answer
1560 from the adversary's history of random oracle queries. Observe that
1561 $S_\Wident$ is a fake-world simulator. By lemma~\ref{lem:dlext}, the
1562 extractor fails with probability only $2^{-\ell_I}$. The theorem follows
1563 by a simple union bound and proposition~\ref{prop:fakesim}.
1564 \end{proof}
1565
1566 \begin{figure}
1567 \begin{program}
1568 Initialization function $I_\Wident()$: \+ \\
1569 $x \getsr \Nupto{|G|}$; \\
1570 $X \gets x P$; \\
1571 \RETURN $(x, X)$;
1572 \- \\[\medskipamount]
1573 Real-prover world $W_\Wident^{H_I(\cdot, \cdot)}
1574 ((x, X), \sigma, \tau, \mu)$: \+ \\
1575 \IF $\tau = \cookie{challenge}$ \THEN \\ \ind
1576 $(R, c) \gets \mu$; \\
1577 $Y \gets \id{response}^{H_I(\cdot, \cdot)}(R, c, x)$; \\
1578 \RETURN $(1, Y)$; \- \\
1579 \ELSE \\ \ind
1580 \RETURN $(\sigma, \bot)$;
1581 \next
1582 Simulator $S_\Wident$'s fake world \\
1583 \hspace{1in} $W_{\text{sim}}^{H_I(\cdot, \cdot)}
1584 ((X, u), \sigma, \tau, \mu)$: \+ \\
1585 \IF $\tau = \cookie{init}$ \THEN \\ \ind
1586 \RETURN $(\emptyset, \emptystring)$; \- \\
1587 $Q_H \gets \sigma$; \\
1588 \IF $\tau = \cookie{challenge}$ \THEN \\ \ind
1589 $(R, c) \gets \mu$; \\
1590 $(r, Y) \gets T_\Wident(R, c, Q_H)$; \\
1591 \RETURN $(Q_H, Y)$; \- \\
1592 \ELSE \IF $\tau = \cookie{random}$ \THEN \\ \ind
1593 $(i, (R', Y'), h) \gets \mu$; \\
1594 $Q_H \gets Q_H \cup \{(R', Y', h)\}$; \\
1595 \RETURN $(Q_H, \emptystring)$; \- \\
1596 \ELSE \\ \ind
1597 \RETURN $(\sigma, \bot)$;
1598 \end{program}
1599
1600 \caption{Real-prover and simulator for zero-knowledge of $\Wident$}
1601 \label{fig:wident-sim}
1602 \end{figure}
1603
1604 We now return to proving that our extractor exists and functions as claimed.
1605 The following two trivial lemmata will be useful, both now and later.
1606 \begin{lemma}[Uniqueness of discrete-logs]
1607 \label{lem:unique-dl}
1608 Let $G = \langle P \rangle$ be a cyclic group. For any $X \in G$ there is
1609 a unique integer $x$ where $0 \le x < |G|$ and $X = x P$.
1610 \end{lemma}
1611 \begin{proof}
1612 Certainly such an $x$ exists, since $G$ is cyclic and finite. Suppose $X =
1613 x P = x' P$: then $0 = x P - x' P = (x - x') P$. Hence $(x - x')$ is a
1614 multiple of $|G|$.
1615 \end{proof}
1616 \begin{lemma}[Uniqueness of check values]
1617 \label{lem:unique-c}
1618 Let $G = \langle P \rangle$ be a cyclic group; let $H_I$ be a function
1619 $H_I\colon \Bin^{2\ell_G} \to \Bin^{\ell_I}$. Fix some $x \in
1620 \Nupto{|G|}$ and define the set
1621 \[ V_x = \bigl\{\, (R, c) \in G \times \Bin^{\ell_I} \bigm|
1622 R = \bigl( c \xor H_I(R, x R) \bigr) P \,\bigr\}.
1623 \]
1624 Then, for any $R$, $c$, $c'$, if $(R, c) \in V_x$ and $(R, c') \in V_x$
1625 then $c = c'$.
1626 \end{lemma}
1627 \begin{proof}
1628 From lemma~\ref{lem:unique-dl}, we see that there is a unique $r \in
1629 \Nupto{|G|}$ for which $R = r P$. Now, if $(R, c) \in V_x$, we must have
1630 $r = c \xor H_I(R, x R)$. It follows that $c = r \xor H_I(R, x R)$.
1631 \end{proof}
1632
1633 \begin{proof}[Proof of lemma~\ref{lem:dlext}]
1634 Our extractor $T_\Wident$ is shown in figure~\ref{fig:twident}.
1635
1636 \begin{figure}
1637 \begin{program}
1638 Extractor $T_\Wident(R, c, Q_H)$: \+ \\
1639 \FOR $(R', Y', h)$ \IN $Q_H$ \DO \\ \ind
1640 $r \gets h \xor c$; \\
1641 \IF $R = R' = r P \land Y' = r X$ \THEN \RETURN $(r, Y')$; \- \\
1642 \RETURN $(\bot, \bot)$;
1643 \end{program}
1644
1645 \caption{The discrete-log extractor $T_\Wident$}
1646 \label{fig:twident}
1647 \end{figure}
1648
1649 Let $B$ be any randomized algorithm, and let $(R, c, Q_H)$ be as given to
1650 the extractor by $\Game{dl-ext}{G}(T_\Wident, B)$. Let the quantities
1651 $H_I$, $Q_C$, $r$, $r'$, $x$ and $X$ be as in that game.
1652
1653 Suppose that the extractor returns values $(r, Y) \ne (\bot, \bot)$. Let
1654 $h = r \xor c$; then there must be a query $(R, Y, h) \in Q_H$, and we have
1655 $R = r P$ and $Y = r X = r (x P) = x (r P) = x R = Y'$, so the extractor's
1656 output must be correct unless it fails.
1657
1658 Furthermore, in the case where the extractor did not fail, we have $h =
1659 H_I(R, Y) = H_I(R, Y')$ and $c = r \xor h$, so the challenge was valid.
1660 Therefore, if the challenge was invalid, the extractor will fail.
1661
1662 We now deal with the challenge-generation oracle. Suppose that $(R, c')
1663 \in Q_C$ for some $c'$. Now, if $c = c'$ then $(R, c')$ is a repeat of
1664 some challenge from the challenge-generation oracle, and the extractor is
1665 permitted to fail. On the other hand, suppose $c \ne c'$; then, the
1666 challenge $(R, c)$ must be invalid by lemma~\ref{lem:unique-c}, so the
1667 extractor is required to fail, and we have established that indeed it will.
1668 From now on, suppose that $R$ is distinct from all the $R$-values returned
1669 by $C()$.
1670
1671 Let $Y = x R$. Suppose that $B$ queried its random oracle at $(R, Y)$.
1672 Let $h = H_I(Y)$, so $r' = c \xor h$. If the challenge is valid then $R =
1673 r' P$; therefore $Y = x R = x r' P = r' X$, so we have $(R, Y, h) \in Q_H$
1674 with $R = r P$ and $Y = r X$. Hence the extractor returns $r = r'$ as
1675 required.
1676
1677 It remains to deal with the case where there is no random-oracle query at
1678 $(R, Y)$. But then $h = H_I(R, Y)$ is uniformly distributed, and
1679 independent of the entire game up to this point. Let $r$ be the correct
1680 discrete log of $R$; by lemma~\ref{lem:unique-dl} there is only one
1681 possible value. The extractor always fails under these circumstances, but
1682 a correct responder would reply with probability
1683 \[ \Pr[h = c \xor r] = \frac{1}{2^{\ell_I}}. \]
1684 This concludes the proof.
1685 \end{proof}
1686 \begin{remark}
1687 Note that the fact that the algorithm~$B$ was given the private key is
1688 irrelevant to the above argument. However, we shall need this property
1689 when we come to prove deniability for the key-exchange protocol.
1690 \end{remark}
1691 \begin{remark}
1692 It's easy to see from the above proof that the extractor works flawlessly
1693 on the `honest verifier' algorithm $\id{challenge}$ shown in
1694 figure~\ref{fig:wident-ro}. This shows that $\Wident$ is perfect
1695 zero-knowledge against honest verifiers. We're much more interested in
1696 dishonest verifiers, though.
1697 \end{remark}
1698
1699
1700 \subsection{An identity-based identification scheme}
1701 \label{sec:wident-id}
1702
1703 Boneh and Franklin \cite{Boneh:2003:IBE} showed how to construct an
1704 identity-based encryption scheme using bilinear pairings. The resulting
1705 encryption scheme looks somewhat like a pairing-based version of ElGamal's
1706 encryption scheme \cite{ElGamal:1985:PKC}. We can easily apply their
1707 techniques to our identification protocol, and thereby obtain an
1708 identity-based identification scheme. Providing the necessary formalisms to
1709 prove theorems analogous to our theorems~\ref{thm:wident-sound}
1710 and~\ref{thm:wident-zk} would take us too far from our objectives; but given
1711 appropriate security notions, we can readily adapt our existing proofs to the
1712 new setting.
1713
1714 \subsubsection{Bilinear pairings}
1715 Before we describe the necessary modifications to the protocol, we first give
1716 a (very brief!) summary of cryptographic pairings. (The Boneh-Franklin paper
1717 \cite{Boneh:2003:IBE} gives more detail; also \cite{Menezes:2005:IPB}
1718 provides a useful introduction to the topic.)
1719
1720 Let $(G, +)$, $(G', +)$ and $(G_T, \times)$ be cyclic groups with prime order
1721 $q$; let $P \in G$ and $P' \in G'$ be elements of order $q$ in $G$ and $G'$
1722 respectively. We say that a mapping $\hat{e}\colon G \times G' \to G_T$ is a
1723 \emph{non-degenerate bilinear pairing} if it satisfies the following
1724 properties.
1725 \begin{description}
1726 \item[Bilinearity] For all $R \in G$ and $S', T' \in G'$, we have $\hat{e}(R,
1727 S' + T') = \hat{e}(R, S')\,\hat{e}(R, T')$; and for all $R, S \in G$ and $T'
1728 \in G'$ we have $\hat{e}(R + S, T') = \hat{e}(R, T')\,\hat{e}(S, T')$.
1729 \item[Non-degeneracy] $\hat{e}(P, P') \ne 1$.
1730 \end{description}
1731 For practical use, we also want $\hat{e}(\cdot, \cdot)$ to be efficient to
1732 compute. The reader can verify that $\hat{e}(a P, b P') = \hat{e}(P,
1733 P')^{ab}$. It is permitted for the two groups $G$ and $G'$ to be equal.
1734
1735 We require a different intractability assumption, specifically that the
1736 \emph{bilinear} Diffie-Hellman problem (BDH) -- given $(a P, b P, a P', c P')
1737 \in G^2 \times G'^2$, find $\hat{e}(P, P')^{abc} \in G_T$ -- is difficult.
1738 This implies the difficulty of the computational Diffie-Hellman problem in
1739 all three of $G$, $G'$ and~$G_T$.
1740
1741 \subsubsection{The identity-based scheme}
1742 We need a trusted authority; following \cite{Schneier:1996:ACP} we shall call
1743 him Trent. Trent's private key is $t \in \Nupto{q}$; his public key is $T =
1744 t P$.
1745
1746 Finally, we need cryptographic hash functions $H_I\colon G \times G_T \to
1747 \Bin^{\ell_I}$ and $\Hid\colon \Bin^* \to G'$; a formal security analysis
1748 would model these as random oracles.
1749
1750 Alice's public key is $A = \Hid(\texttt{Alice}) \in G'$. Her private key is
1751 $K_A = t A \in G'$ -- she needs Trent to give this to her. Bob can interact
1752 with Alice in order to verify her identity as follows.
1753 \begin{enumerate}
1754 \item Bob computes $\gamma_A = \hat{e}(T, A) \in G_T$. (He can do this once
1755 and store the result if he wants, but it's not that onerous to work it out
1756 each time.)
1757 \item Bob chooses $r \inr \Nupto{q}$, and sets $R = r P$. He also computes
1758 $\psi = \gamma_A^r$, $h = H_I(R, \psi)$ and $c = r \xor h$. He sends his
1759 challenge $(R, c)$ to Alice.
1760 \item Alice receives $(R', c')$. She computes $\psi' = \hat{e}(R, K_A)$, $h'
1761 = H_I(R', \psi')$, and $r' = c' \xor h')$. She checks that $R' = r' P$; if
1762 so, she sends $\psi'$ back to Bob; otherwise she refuses to talk to him.
1763 \item Bob receives $\psi'$. If $\psi = \psi'$ then he accepts that he's
1764 talking to Alice.
1765 \end{enumerate}
1766 This works because $\psi = \gamma_A^r = \hat{e}(T, A)^r = \hat{e}(t P, A)^r =
1767 \hat{e}(r P, A)^t = \hat{e}(R, t A) = \psi'$.
1768
1769 \subsubsection{Informal analysis}
1770 An analogue to lemma~\ref{lem:dlext} can be proven to show how to extract $r$
1771 from a verifier's random-oracle queries; statistical zero knowledge would
1772 then follow easily, as in theorem~\ref{thm:wident-zk}. Soundness is
1773 intuitively clear, since an adversary must compute $\psi = \hat{e}(P,
1774 P')^{art}$ given $A = a P'$, $R = r P$ and $T = t P$, which is an instance of
1775 the BDH problem. An analogue of theorem~\ref{thm:wident-sound} would have to
1776 prove this for an adversary capable of making identity requests as well as
1777 obtaining challenges. Finally, our key-exchange protocol can be constructed
1778 out of this identity-based identification scheme, yielding an identity-based
1779 authenticated key-exchange protocol. We leave it to the reader to work
1780 through the details.
1781
1782
1783 \subsection{Comparison with the protocol of Stinson and Wu}
1784 \label{sec:stinson-ident}
1785
1786 Our protocol is similar to a recent proposal by Stinson and Wu
1787 \cite{Stinson:2006:EST}. They restrict their attention to Schnorr groups $G
1788 \subset \F_p^*$. Let $\gamma$ be an element of order $q = |G|$. The
1789 prover's private key is $a \inr \Nupto{q}$ and her public key is $\alpha =
1790 \gamma^a$. In their protocol, the challenger chooses $r \inr \Nupto{q}$,
1791 computes $\rho = \gamma^r$ and $\psi = \alpha^r$, and sends a challenge
1792 $(\rho, H(\psi))$. The prover checks that $\rho^q \ne 1$, computes $\psi =
1793 \rho^a$, checks the hash, and sends $\psi$ back by way of response. They
1794 prove their protocol's security in the random-oracle model.
1795
1796 Both the Wrestlers protocol and Stinson-Wu require both prover and verifier
1797 to compute two exponentiations (or scalar multiplications) each. The
1798 sizes of the messages used by the two protocols are also identical.
1799
1800 (An earlier version of the Stinson-Wu protocol used a cofactor
1801 exponentiation: if we set $f = (p - 1)/q$, then we use $\psi = \alpha^{rf}) =
1802 \rho^{af} = \gamma^{afr}$. This is more efficient in typical elliptic curve
1803 subgroups, since the cofactor of such subgroups is usually small: indeed,
1804 \cite{SEC1} recommends rejecting groups with cofactor $f > 4$. However, in
1805 the Schnorr groups used by Stinson and Wu, the cofactor is much larger than
1806 $q$, and their new variant is more efficient.)
1807
1808 We note that the zero-knowledge property of the Stinson-Wu protocol requires
1809 the Diffie-Hellman knowledge of exponent assumption (KEA). Very briefly:
1810 suppose $A$ is a randomized algorithm which takes as input $X \in G$ and
1811 outputs a pair $(r P, r X)$; intuitively, the KEA asserts $A$ must have done
1812 this by choosing $r$ somehow and then computing its output from it.
1813 Formally, it asserts the existence of an `extractor' algorithm which takes as
1814 input the element $X$ and the random coins used by $A$ and outputs $r$ with
1815 high probability. This is a very strong assumption, and one which is
1816 unnecessary for our protocol, since we can present an \emph{explicit}
1817 extractor.
1818
1819 The KEA assumption as stated in \cite{Stinson:2006:EST} allows the extractor
1820 to fail with some negligible probability, over and above the probability that
1821 a dishonest verifier managed to guess the correct $h = H(\psi)$ without
1822 making this random-oracle query. Not only does our protocol achieve zero-
1823 knowledge without the KEA, our extractor is, in this sense, `perfect'.
1824
1825 Our protocol is just as strong as Stinson-Wu under attack from active
1826 intruders: see table~\ref{tab:wident-active} for a very brief sketch of the
1827 case-analysis which would be the basis of a proof of this.
1828
1829 \begin{table}
1830 \begin{tabular}[C]{|*{3}{c|}p{8cm}|}
1831 \hlx{hv[1]}
1832 \multicolumn{2}{|c|}{\textbf{Challenge}} &
1833 \textbf{Response} &
1834 \textbf{Security}
1835 \\ \hlx{v[1]hv}
1836 %% unpleasant hacking to make the R and c columns the same width :-(
1837 \settowidth{\dimen0}{\textbf{Challenge}}%
1838 \dimen0=.5\dimen0
1839 \advance\dimen0by-\tabcolsep
1840 \advance\dimen0by-.5\arrayrulewidth
1841 \hbox to\dimen0{\hfil$R$\hfil}
1842 & $c$ & $Y$ & Nothing to prove. \\ \hlx{v}
1843 $R$ & $c'$ & --- & Prover rejects by lemma~\ref{lem:unique-c};
1844 $Y'$ probably wrong by
1845 theorem~\ref{thm:wident-sound}. \\ \hlx{v}
1846 $R$ & $c$ & $Y'$ & Response is incorrect. \\ \hlx{v}
1847 $R'$ & --- & $Y$ & Response is incorrect. \\ \hlx{v}
1848 $R'$ & $c$ & $Y'$ & Prover rejects with probability $1 - 2^{-\ell_I}$;
1849 $Y'$ probably wrong by
1850 theorem~\ref{thm:wident-sound}. \\ \hlx{v}
1851 $R'$ & $c'$ & $Y'$ & Simulate prover using extractor
1852 (lemma~\ref{lem:dlext}); $Y'$ probably wrong by
1853 theorem~\ref{thm:wident-sound}. \\ \hlx{vh}
1854 \end{tabular}
1855
1856 \caption{Security of $\Wident$ against active intruders (summary)}
1857 \label{tab:wident-active}
1858 \end{table}
1859
1860 An identity-based analogue of Stinson-Wu can be defined using a bilinear
1861 pairing, just as we did in section~\ref{sec:wident-id}. However, to prove
1862 the zero-knowledge property, one needs to make a bilinear analogue of the
1863 knowledge of exponent assumption.
1864
1865 We suspect that a key-exchange protocol like ours can be constructed using
1866 Stinson-Wu rather than the Wrestlers identification scheme. We haven't,
1867 however, gone through all the details, since we believe our protocol is just
1868 as efficient and is based on much more conservative assumptions.
1869
1870 %%%--------------------------------------------------------------------------
1871
1872 \section{A simple key-exchange protocol}
1873 \label{sec:kx}
1874
1875 In this section, we describe a simple key-exchange protocol built out of the
1876 identification protocol shown previously.
1877
1878 The key-exchange protocol arises from the following observation. If Bob
1879 sends a challenge, presumably to Alice, and gets a correct response, then not
1880 only did he really send the challenge to Alice but he knows that she received
1881 it correctly.
1882
1883 So, if Alice and Bob authenticate each other, by the end of it, they should
1884 each have chosen a random private value, sent the corresponding public value
1885 to the other, and been convinced that it arrived safely.
1886
1887 Unfortunately, life isn't quite this kind, and we have to do some more work
1888 to make this scheme secure.
1889
1890
1891 Our key exchange protocol essentially consists of two parallel instances of
1892 the identification protocol. If Alice receives a correct response to her
1893 challenge, she will know that Bob received her challenge correctly, and
1894 \emph{vice versa}. If we let Alice's challenge be $R_0 = r_0 P$ and Bob's
1895 challenge be $R_1 = r_1 P$ then each can compute a shared secret $Z = r_0 R_1
1896 = r_0 r_1 P = r_1 R_0$ unknown to an adversary. There are, unfortunately, a
1897 few subtleties involved in turning this intuition into a secure key-exchange
1898 protocol, which we now describe.
1899
1900
1901 \subsection{Overview}
1902 \label{sec:kx-overview}
1903
1904 We present a quick, informal description of our basic key-exchange protocol.
1905 In addition to our group $G$, we shall also need a secure symmetric
1906 encryption scheme $\E = (\kappa, E, D)$, and two secure hash functions
1907 $H_I\colon \Bin^{2\ell_G} \to \Bin^{\ell_I}$ and $H_K\colon \Bin^{\ell_G+1}
1908 \to \Bin^\kappa$.
1909
1910 Suppose that Alice's and Bob's private keys are $a$ and $b$ respectively, and
1911 their public keys are $A = a P$ and $B = b P$.
1912 \begin{enumerate}
1913 \item Alice chooses a random index $r \inr \Nupto{|G|}$. She computes $R = r
1914 P$ and $c = r \xor H_I(R, r B)$. She sends the pair $(R, c)$ to Bob.
1915 \item Similarly, Bob chooses a random $s \inr \Nupto{|G|}$. He computes $S
1916 = s P$ and $d = s \xor H_I(S, s A)$. He sends $(S, d)$ to Alice.
1917 \item Alice receives $(S', d')$ from Bob. She computes $s' = d' \xor H_I(S',
1918 a S')$, and verifies that $S' = s' P$. If so, she computes $K_A = H_K(0
1919 \cat r S')$, and sends $R, E_{K_A}(a S')$ to Bob.
1920 \item Similarly, Bob receives $(R', c')$ from Alice. He verifies that $R' =
1921 \bigl( c' \xor H_I(R', b R') \bigr) P$. If so, he computes $K_B = H_K(0
1922 \cat s R')$ and sends S, $E_{K_B}(b R')$ to Alice.
1923 \item Alice receives a ciphertext $(S'', \chi_B)$ from Bob. She checks that
1924 $S'' = S'$, decrypts $\chi_B$, and checks that $D_{K_A}(\chi_B) = r B$. If
1925 so, she uses $H_K(1 \cat r S')$ as her shared secret.
1926 \item Similarly, Bob receives $(R'', \chi_A)$ from Alice, and checks $R'' =
1927 R'$ and $D_{K_B}(\chi_A) = s A$. If so, he uses $H_K(1 \cat s R')$ as his
1928 shared secret.
1929 \end{enumerate}
1930 This is the Wrestlers Key Exchange protocol, $\Wkx^{G, \E}$ (again, we omit
1931 the superscripts when referring to the general protocol, or when confusion is
1932 unlikely). A diagrammatic summary of the protocol is shown in
1933 figure~\ref{fig:wkx}.
1934
1935 \begin{figure}
1936 \begin{description}
1937 \item[Setup] Group $G = \langle P \rangle$; $|G| = q$ is prime.
1938 $H_I(\cdot, \cdot)$ and $H_K(\cdot)$ are secure hashes. $\E = (\kappa,
1939 E, D)$ is an IND-CCA2 symmetric encryption scheme.
1940 \item[Parties] $U_i$ for $0 \le i < n$.
1941 \item[Private keys] $x_i \inr \Nupto{q}$.
1942 \item[Public keys] $X_i = x_i P$.
1943 \end{description}
1944 \[ \begin{graph}
1945 !{0; <8cm, 0cm>: <0cm, 3cm>::}
1946 *+[F]\dbox{$r_i \getsr I$; $R_i \gets r_i P$ \\
1947 $c_i \gets r_i \xor H_I(R_i, r_i X_j)$} ="i0"
1948 [d]
1949 *+[F]\dbox{Check $R_j = \bigl(c_j \xor H_I(x_i R_j)\bigr) P$ \\
1950 $Z \gets r_i R_j$; $K \gets H_K(0, Z)$ \\
1951 $\chi_i \gets E_K(x_i R_j)$} ="i1"
1952 [d]
1953 *+[F]\dbox{Check $D_K(\chi_j) = r_i X_j$ \\
1954 Shared key is $H_K(1, Z)$} ="i2"
1955 "i0" [r]
1956 *+[F]\dbox{$r_j \getsr I$; $R_j \gets r_j P$ \\
1957 $c_j \gets r_j \xor H_I(R_j, r_j X_i)$} ="j0"
1958 [d]
1959 *+[F]\dbox{Check $R_i = \bigl(c_i \xor H_I(x_j R_i)\bigr) P$ \\
1960 $Z \gets r_j R_i$; $K \gets H_K(0, Z)$ \\
1961 $\chi_j \gets E_K(x_j R_i)$} ="j1"
1962 [d]
1963 *+[F]\dbox{Check $D_K(\chi_i) = r_j X_i$ \\
1964 Shared key is $H_K(1, Z)$} ="j2"
1965 %
1966 "i0" : |(0)/3.25cm/*+{(R_i, c_i)} "j1"
1967 "i1" : |(0)/3.25cm/*+{(R_i, \chi_i)} "j2"
1968 "j0" : |(0)/3.25cm/*+{(R_j, c_j)} "i1"
1969 "j1" : |(0)/3.25cm/*+{(R_j, \chi_j)} "i2"
1970 \end{graph} \]
1971
1972 \caption{Summary of the Wrestlers Key Exchange protocol, $\Wkx$}
1973 \label{fig:wkx}
1974 \end{figure}
1975
1976 Assume, for the moment, that Alice and Bob's messages are relayed honestly.
1977 Then:
1978 \begin{itemize}
1979 \item $a S' = a S = a (s P) = s (a P) = s A$, so $s' = d' \xor H_I(S' a S') =
1980 d \xor H_I(S, s A) = s$, and $S' = S = s P = s' P$, and therefore Alice
1981 responds to Bob's message;
1982 \item similarly $b R' = r B$, so $r' = r$ and $R' = r' P$, and therefore Bob
1983 responds to Alice's message;
1984 \item $b R' = b R = b (r P) = r (b P) = r B$, and $a S' = a S = a (s P) = s
1985 (a P) = s A$, and therefore both parties compute their responses correctly;
1986 and
1987 \item $r S' = r S = r (s P) = s (r P) = s R = s R'$, so $K_A = K_B$, and
1988 therefore they can decrypt each others' responses, and agree the same
1989 shared secret.
1990 \end{itemize}
1991 This shows that the protocol is basically valid, but says little about its
1992 security. The remainder of this section will describe our protocol in more
1993 formal detail, and prove its security in a model with multiple parties and an
1994 adversary who controls the network.
1995
1996 Observe that the protocol as we've presented here is \emph{symmetrical}.
1997 There's no notion of `initiator' or `responder'. There are a total of four
1998 messages which must be sent before both parties accept. However, this can be
1999 reduced to three by breaking the symmetry of the protocol and combining one
2000 or other party's challenge and response messages. We choose to analyse the
2001 symmetrical version, since to do so, it suffices to consider only the two
2002 different kinds of messages. Since our security model allows the messages to
2003 be adversarially delayed and reordered, it is easy to show that the security
2004 of an optimized, asymmetrical protocol is no worse than the symmetrical
2005 version we present here.
2006
2007
2008 \subsection{Security model and security definition}
2009 \label{sec:um}
2010
2011 Our model is very similar to that of Canetti and Krawczyk
2012 \cite{Canetti:2001:AKE}, though we have modified it in two ways.
2013 \begin{enumerate}
2014 \item We allow all the participants (including the adversary) in the protocol
2015 access to the various random oracles required to implement it.
2016 \item Since we want to analyse a specific, practical scheme, asymptotic
2017 results are useless. We measure the adversary's resource usage carefully,
2018 and produce a quantitative bound on the adversary's advantage in the
2019 SK-security game.
2020 \end{enumerate}
2021
2022 \subsubsection{Overview}
2023 We briefly describe our modified model, pointing out the changes we have
2024 made, and how they apply to our protocol. Much of Canetti and Krawczyk's
2025 model (for example, the local and global outputs) is useful for proving more
2026 general security properties such as demonstrating that SK-security suffices
2027 for constructing secure channels, and we shall not concern ourselves with
2028 such details. Other parts deal with issues such as security parameters and
2029 ensuring that all the computation is polynomially bounded, which are
2030 irrelevant since we are dealing with a single concrete protocol rather than a
2031 family of them.
2032
2033 The entities in the model are the \emph{adversary}~$A$, and a (fixed) number
2034 of \emph{parties}~$P_i$, for $0 \le i < n$. If the protocol under
2035 consideration makes use of random oracles, then all the participants -- the
2036 adversary and the parties -- are all allowed access to the random oracles.
2037
2038 The parties and the adversary play a `game'. At the beginning of the game,
2039 the participants are given some inputs computed by a randomized
2040 \emph{initialization procedure}~$\id{init}$. This produces as output a pair
2041 $(i_U, \mathbf{i})$; the value $i_U$ is the \emph{global input}, and is given
2042 to all the participants including the adversary. The vector $\mathbf{i}$ has
2043 $n$ components, and party $P_i$ is given $(i_U, \mathbf{i}[i])$ as input.
2044
2045 \subsubsection{Sessions}
2046 Parties don't act directly. Instead, each party runs a number of
2047 \emph{sessions}. A session a triple $S = (P_i, P_j, s)$, where $i, j \in
2048 \Nupto{n}$ identify the owning party and a \emph{partner}, and $s \in
2049 \Bin^{\ell_S}$ is a \emph{session-id}. (The original model includes a
2050 r\^ole, for distinguishing between initiators and responders. Our protocol
2051 is symmetrical, so this distinction isn't useful.) If $P_i$ runs a session
2052 $S = (P_i, P_j, s)$ and $P_j$ runs a session $S' = (P_j, P_i, s)$ then we say
2053 that $S$ and $S'$ are \emph{matching}, and that $P_j$ is $P_i$'s
2054 \emph{partner} for the session.
2055
2056 At most one participant in the game is \emph{active} at any given time.
2057 Initially the adversary is active. The adversary may \emph{activate} a
2058 session in one of two ways.
2059 \begin{enumerate}
2060 \item It may \emph{create a session} of a party~$P_i$, by selecting a
2061 session-id~$s \in \Bin^{\ell_S}$ and a partner $j$. There is no
2062 requirement that $P_j$ ever have a matching session. However, all sessions
2063 of a party must be distinct, i.e., sessions with the same partner must have
2064 different session-ids.
2065 \item It may \emph{deliver a message}~$\mu \in \Bin^*$, from party~$P_j$, to
2066 an existing session~$S = (P_i, P_j, s)$. There is no requirement that any
2067 party previously sent $\mu$: the adversary is free to make up messages as
2068 it sees fit.
2069 \end{enumerate}
2070 The adversary becomes inactive, and the session becomes active. The session
2071 performs some computation, according to its protocol, and may request a
2072 message~$\mu$ be delivered to the matching session running in its partner
2073 (which may not exist). The session may also \emph{terminate}. In the case
2074 we are interested in, of key-exchange protocols, a session~$S = (P_i, P_j,
2075 s)$ may terminate in one of two ways:
2076 \begin{enumerate}
2077 \item it may \emph{complete}, outputting $(i, j, s, K)$, for some
2078 \emph{session key}~$K$, or
2079 \item it may \emph{abort}, outputting $(i, j, s, \bot)$.
2080 \end{enumerate}
2081 Once it has performed these actions, the session deactivates and the
2082 adversary becomes active again. The adversary is given the message~$\mu$, if
2083 any, and informed of whether the session completed or aborted, but, in the
2084 case of completion, not of the value of the key~$K$. A session is
2085 \emph{running} if it has been created and has not yet terminated.
2086
2087 \subsubsection{Other adversarial actions}
2088 As well as activating sessions, the adversary has other capabilities, as
2089 follows.
2090 \begin{itemize}
2091 \item It may \emph{expire} any session~$S$, causing the owning party to
2092 `forget' the session key output by that session.
2093 \item It may \emph{corrupt} any party~$P_i$, at will: the adversary learns
2094 the entire state of the corrupted party, including its initial
2095 input~$\mathbf{i}[i]$, the state of any sessions it was running at the
2096 time, and the session keys of any completed but unexpired sessions. Once
2097 corrupted, a party can no longer be activated. Of course, the adversary
2098 can continue to send messages allegedly from the corrupted party.
2099 \item It may \emph{reveal the state} of a running session~$S$, learning any
2100 interesting values specific to that session, but \emph{not} the owning
2101 party's long-term secrets.
2102 \item It may \emph{reveal the session-key} of a completed session~$S$.
2103 \item It may elect to be \emph{challenged} with a completed session~$S$,
2104 provided. Challenge sessions form part of the security notion for
2105 key-exchange protocols. See below for more details.
2106 \end{itemize}
2107 We say that a session $S = (P_i, P_j, s)$ is \emph{locally exposed} if
2108 \begin{itemize}
2109 \item it has had its state revealed,
2110 \item it has had its session-key revealed, or
2111 \item $P_i$ has been corrupted, and $S$ had not been expired when this
2112 happened.
2113 \end{itemize}
2114 A session is \emph{exposed} if it is locally exposed, or if its matching
2115 session exists and has been locally exposed.
2116
2117 At the beginning of the game, a bit $b^*$ is chosen at random. The adversary
2118 may choose to be \emph{challenged} with any completed, unexposed
2119 session;\footnote{%
2120 The original Canetti-Krawczyk definition restricts the adversary to a
2121 single challenge session, but our proof works independent of the number of
2122 challenge sessions, so we get a stronger result by relaxing the requirement
2123 here.)}
2124 the adversary is then given either the session's key -- if $b^* = 1$ -- or a
2125 string chosen at random and independently of the game so far from a
2126 protocol-specific distribution -- if $b^* = 0$. At the end of the game, the
2127 adversary outputs a single bit~$b$.
2128
2129 \subsubsection{SK-security}
2130 We've now described the game; it is time to explain the adversary's goal in
2131 it. The adversary \emph{wins} the game if either
2132 \begin{enumerate}
2133 \item two unexposed, matching sessions complete, but output different
2134 keys,\footnote{%
2135 The original Canetti-Krawczyk definition differs slightly here. It
2136 requires that `if two \emph{uncorrupted} parties complete matching
2137 sessions then they both output the same key' [original emphasis]. This
2138 can't be taken at face value, since none of the protocols they claim to
2139 be secure actually meet this requirement: they meet only the weaker
2140 requirement that parties completing matching sessions output different
2141 keys with negligible probability. We assume here that this is what they
2142 meant.}
2143 or
2144 \item the adversary correctly guesses the hidden bit~$b^*$.
2145 \end{enumerate}
2146 More formally, we make the following definition.
2147 \begin{definition}[SK-security]
2148 \label{def:sk}
2149 Let $\Pi^{H_0(\cdot), H_1(\cdot), \ldots}$ be a key-exchange protocol
2150 which makes use of random oracles $H_0(\cdot)$, $H_1(\cdot)$, \dots, and
2151 let $A$ be an adversary playing the game described previously, where $n$
2152 parties run the protocol~$\Pi$. Let $V$ be the event that any pair of
2153 matching, unexposed sessions completed, but output different session keys.
2154 Let $W$ be the event that the adversary's output bit matches the game's
2155 hidden bit~$b^*$. We define the adversary's \emph{advantage against the
2156 SK-security of the protocol~$\Pi$} to be
2157 \[ \Adv{sk}{\Pi}(A, n) = \max(\Pr[V], 2\Pr[W] - 1). \]
2158 Furthermore, we define the \emph{SK insecurity function of the
2159 protocol~$\Pi$} to be
2160 \[ \InSec{sk}(\Pi; t, n, q_S, q_M, q_{H_0}, q_{H_1}, \dots) =
2161 \max_A \Adv{sk}{\Pi}(A, n)
2162 \]
2163 where the maximum is taken over all adversaries~$A$ with total running
2164 time~$t$ (not including time taken by the parties), create at most $q_S$
2165 sessions, deliver at most $q_M$~messages, and (if applicable) make at most
2166 $q_{H_i}$ random-oracle queries to each random oracle $H_i(\cdot)$.
2167 \end{definition}
2168
2169
2170 \subsection{Security}
2171
2172 In order to analyse our protocol $\Wkx^{G, \E}$ properly, we must describe
2173 exactly how it fits into the formal model described in our formal model.
2174
2175 \subsubsection{Sessions and session-ids}
2176 Our formal model introduced the concept of sessions, which the informal
2177 description of section~\ref{sec:kx-overview} neglected to do. (One could
2178 argue that we described a single session only.) As we shall show in
2179 section~\ref{sec:kx-insecure}, our protocol is \emph{insecure} unless we
2180 carefully modify it to distinguish between multiple sessions.
2181
2182 In the model, distinct key-exchange sessions are given distinct partners and
2183 session-ids. In order to prevent sessions interfering with each other, we
2184 shall make explicit use of the session-ids.
2185
2186 Suppose the session-ids are $\ell_S$-bit strings. We expand the domain of
2187 the random oracle $H_I$ so that it's now
2188 \[ H_I\colon G \times \Bin^{\ell_S} \times G \times G \to \Bin_{\ell_I}. \]
2189
2190 \subsubsection{Messages}
2191 We split the messages our protocols into two parts: a \emph{type}~$\tau$ and
2192 a \emph{body}~$\mu$. We assume some convenient, unambiguous encoding of
2193 pairs $(\tau, \mu)$ as bit-strings. For readability, we present message
2194 types as text strings, e.g., `\cookie{challenge}', though in practice one
2195 could use numerical codes instead.
2196
2197 The message body itself may be a tuple of values, which, again, we assume are
2198 encoded as bit-strings in some convenient and unambiguous fashion. We shall
2199 abuse the notation for the sake of readability by dropping a layer of nesting
2200 in this case: for example, we write $(\cookie{hello}, x, y, z)$ rather than
2201 $\bigl(\cookie{hello}, (x, y, z)\bigr)$.
2202
2203 \subsubsection{The protocol}
2204 Our protocol is represented by three functions, shown in
2205 figure~\ref{fig:wkx-formal}.
2206 \begin{itemize}
2207 \item $\id{init}(n)$ is the initialization function, as described in
2208 section~\ref{sec:um}. It outputs a pair $(\mathbf{p}, \mathbf{i})$, where
2209 $\mathbf{i}[i]$ is the private key of party~$P_i$ and $\mathbf{p}[i]$ is
2210 the corresponding public key. Only $P_i$ is given $\mathbf{i}[i]$, whereas
2211 all parties and the adversary are given $\mathbf{p}$.
2212 \item $\id{new-session}^{H_I(\cdot, \cdot, \cdot, \cdot), H_K(\cdot)}
2213 (\mathbf{p}, x, i, j, s)$ is the new-session function. This is executed by
2214 party~$P_i$ when the adversary decides to create a new session~$S = (P_i,
2215 P_j, s)$. It is also given the relevant outputs of $\id{init}$, and
2216 allowed access to the random oracles $H_I$ and $H_K$.
2217 \item $\id{message}^{H_I(\cdot, \cdot, \cdot, \cdot), H_K(\cdot)} (\tau,
2218 \mu)$ is the incoming-message function. This is executed by a session when
2219 the adversary decides to deliver a message $(\tau, \mu)$ to it. It makes
2220 use of the subsidiary functions $\id{msg-challenge}$ and
2221 $\id{msg-response}$ to handle the messages.
2222 \end{itemize}
2223 We observe that the protocol never aborts. We could make it do so if it
2224 receives an invalid message, but we prefer to ignore invalid messages for the
2225 sake of robustness.\footnote{%
2226 Note that this protocol would therefore require modification before it was
2227 acceptable in the simulation-based model of \cite{Shoup:1999:OFM}. There
2228 it is required that a key-exchange protocol terminate after a
2229 polynomially-bounded number of messages are delivered to it.}
2230
2231 \begin{figure}
2232 \begin{program}
2233 Function $\id{init}(n)$: \+ \\
2234 \FOR $i \in \Nupto{n}$ \DO \\ \ind
2235 $x \getsr \Nupto{|G|}$; \\
2236 $\mathbf{i}[i] \gets x$; \\
2237 $\mathbf{p}[i] \gets x P$; \- \\
2238 \RETURN $(\mathbf{p}, \mathbf{i})$;
2239 \- \\[\medskipamount]
2240 Function $\id{new-session}^{H_I(\cdot, \cdot, \cdot, \cdot), H_K(\cdot)}
2241 (\mathbf{p}, x, i, j, s)$: \+ \\
2242 $X \gets \mathbf{p}[i]$;
2243 $X' \gets \mathbf{p}[j]$;
2244 $C \gets \emptyset$; \\
2245 $r \getsr \Nupto{|G|}$;
2246 $R \gets r P$;
2247 $Y \gets r X'$; \\
2248 $h \gets H_I(X, s, R, Y)$;
2249 $c \gets r \xor h$; \\
2250 \SEND $(\cookie{challenge}, R, c)$;
2251 \- \\[\medskipamount]
2252 Function $\id{message}^{H_I(\cdot, \cdot, \cdot, \cdot), H_K(\cdot)}
2253 (\tau, \mu)$: \+ \\
2254 \IF $\tau = \cookie{challenge}$ \THEN $\id{msg-challenge}(\mu)$; \\
2255 \ELSE \IF $\tau = \cookie{response}$ \THEN $\id{msg-response}(\mu)$;
2256 \next
2257 Function $\id{msg-challenge}(\mu)$: \+ \\
2258 $(R', c') \gets \mu$; \\
2259 $Y' \gets x R'$; \\
2260 $h' \gets H_I(X', s, R', Y')$; \\
2261 $r' \gets c' \xor h'$; \\
2262 \IF $R' \ne r' P$ \THEN \RETURN; \\
2263 $C \gets C \cup \{R\}$; \\
2264 $Z \gets r R'$; \\
2265 $(K_0, K_1) \gets H_K(Z)$; \\
2266 $\chi \gets E_{K_0}(Y')$; \\
2267 \SEND $(\cookie{response}, R, \chi)$;
2268 \- \\[\medskipamount]
2269 Function $\id{msg-response}(\mu)$: \+ \\
2270 $(R', \chi') \gets \mu$; \\
2271 \IF $R' \notin C$ \THEN \RETURN; \\
2272 $Z \gets r R'$; \\
2273 $(K_0, K_1) \gets H_K(Z)$; \\
2274 $Y' \gets D_{K_0}(\chi')$; \\
2275 \IF $Y' \ne Y$ \THEN \RETURN; \\
2276 \OUTPUT $K_1$;
2277 \STOP;
2278 \end{program}
2279
2280 \caption{Formalization of $\Wkx$}
2281 \label{fig:wkx-formal}
2282 \end{figure}
2283
2284 \subsubsection{Session states}
2285 We must specify what the adversary obtains when it chooses to reveal a
2286 session's state. Given the program in figure~\ref{fig:wkx-formal}, we can
2287 see that the session state consists of the variables $(x, X, X', r, R, Y,
2288 C)$.
2289
2290 However, $x$ is the owning party's long-term secret, and it seems
2291 unreasonable to disclose this to an adversary who stops short of total
2292 corruption.
2293
2294 The public keys $X$ and $X'$ are part of the adversary's input anyway, so
2295 revealing them doesn't help. Similarly, the set $C$ of valid challenges
2296 could have been computed easily by the adversary, since a group element $R'
2297 \in C$ if and only if the session $S$ responded to some message
2298 $(\cookie{challenge}, R', c')$.
2299
2300 The value $R = r P$ is easily computed given $r$, and besides is sent in the
2301 clear in the session's first message. The expected response $Y = r X'$ is
2302 also easily computed from $r$. The converse is also true, since $r$ can be
2303 recovered from $R$ and $c$ in the session's challenge message and the value
2304 $Y$. Besides, $r$ is necessary for computing $Z$ in response to incoming
2305 challenges.
2306
2307 We conclude that the only `interesting' session state is $r$.
2308
2309 \subsubsection{Security}
2310 Having formally presented the protocol, we can now state our main theorem
2311 about its security. The proof is given in appendix~\ref{sec:sk-proof}.
2312 \begin{theorem}[SK-security of $\Wkx$]
2313 \label{thm:sk}
2314 Let $G$ be a cyclic group. Let $\E = (\kappa, E, D)$ be a symmetric
2315 encryption scheme. Then
2316 \begin{spliteqn*}
2317 \InSec{sk}(\Wkx^{G, \E}; t, n, q_S, q_M, q_I, q_K) \le
2318 2 q_S \bigl( \InSec{ind-cca}(\E; t', q_M, q_M) + {} \\
2319 \InSec{mcdh}(G; t', q_K) +
2320 n \,\InSec{mcdh}(G; t', q_M + q_I) \bigr) +
2321 \frac{n (n - 1)}{|G|} +
2322 \frac{2 q_M}{2^{\ell_I}}.
2323 \end{spliteqn*}
2324 where $t' = t + O(n) + O(q_S) + O(q_M q_I) + O(q_K)$.
2325 \end{theorem}
2326
2327
2328 \subsection{Insecure protocol variants}
2329 \label{sec:kx-insecure}
2330
2331 It's important to feed the session-id and verifier's public key to the random
2332 oracle $H_I$ when constructing the check-value~$c$. Without these, the
2333 protocol is vulnerable to attack. In this section, we consider some variants
2334 of our protocol which hash less information, and show attacks against them.
2335
2336 To simplify the presentation, we shall consider Alice and Bob, and a third
2337 character Carol. We shall be attacking a pair of matching sessions $A$
2338 and~$B$, run by Alice and Bob respectively. Let Alice and Bob's private keys
2339 be $x_A$ and~$x_B$, and let their public keys be $X_A = x_A P$ and $X_B = x_B
2340 P$ respectively.
2341
2342 \subsubsection{Protocol diagram notation}
2343 In order to keep the presentation as clear as possible, we use a simple
2344 diagrammatic notation for describing protocol runs. A line of the form
2345 \protocolrun{\textit{action} \ar[r] & \PRsession{S} & \textit{result}}
2346 states that the adversary performs the given \textit{action} on session~$S$,
2347 with the stated \textit{result}. The \textit{action} may be
2348 \begin{itemize}
2349 \item \textsf{Create session $(P_i, P_j, s)$}: the session is created,
2350 running in party~$P_i$, with partner~$P_j$ and session-id~$s$.
2351 \item \textsf{Receive $\mu$}: the session is activated with an incoming message~$\mu$.
2352 \item \textsf{Session-state reveal}: The adversary requests the session's
2353 internal state.
2354 \end{itemize}
2355 The \textit{result} may be
2356 \begin{itemize}
2357 \item \textsf{Send $\mu'$}: the session requests the delivery of the
2358 message~$\mu'$.
2359 \item \textsf{Complete: $K$}: the session completes, outputting the key~$K$.
2360 \item \textsf{State: $\sigma$}: the session's state is revealed to
2361 be~$\sigma$.
2362 \item \textsf{(Ignore)}: the result of the action is unimportant.
2363 \end{itemize}
2364
2365 \subsubsection{Omitting the session-id}
2366 Consider a protocol variant where session $S$ sets $h_S = H_I(X_N, R_S,
2367 Y_S)$, where $N$ is the session's partner. That is, we've omitted the
2368 session-id from the hash. An adversary can cross over two sessions between
2369 Alice and Bob. Here's how.
2370
2371 The attack assumes that Alice and Bob set up two pairs of matching sessions
2372 with each other, thus.
2373 \protocolrun{
2374 \PRcreate{Alice}{Bob}{s} & \PRsession{A} &
2375 \PRsend{challenge}{R_A, c_A} \\
2376 \PRcreate{Bob}{Alice}{s} & \PRsession{B} &
2377 \PRsend{challenge}{R_B, c_B} \\
2378 \PRcreate{Alice}{Bob}{s'} & \PRsession{A'} &
2379 \PRsend{challenge}{R_{A'}, c_{A'}} \\
2380 \PRcreate{Bob}{Alice}{s'} & \PRsession{B'} &
2381 \PRsend{challenge}{R_{B'}, c_{B'}}
2382 }
2383 Observe that the session pairs use distinct session-ids $s \ne s'$, so this
2384 is allowed. Now the adversary crosses over the challenges, using the second
2385 pair of sessions to provide responses to the challenges issued by the first
2386 pair. By revealing the state in the second pair of sessions, the adversary
2387 can work out the (probably different) session keys accepted by the first
2388 pair.
2389 \protocolrun{
2390 \PRreceive{challenge}{R_B, c_B} & \PRsession{A'} &
2391 \PRsend{response}{R_{A'}, E_{K_{A'B,0}}(x_A R_B)} \\
2392 \PRreceive{challenge}{R_A, c_A} & \PRsession{B'} &
2393 \PRsend{response}{R_{B'}, E_{K_{B'A,0}}(x_B R_A)} \\
2394 \PRreceive{challenge}{R_{A'}, c_{A'}} & \PRsession{A} & \PRignore \\
2395 \PRreceive{challenge}{R_{B'}, c_{B'}} & \PRsession{B} & \PRignore \\
2396 \PRreveal & \PRsession{A'} & r_{A'} \\
2397 \PRreveal & \PRsession{B'} & r_{B'} \\
2398 \PRreceive{response}{R_{B'}, E_{K_{B'A,0}}(x_B R_A)} & \PRsession{A} &
2399 \PRcomplete{K_{AB',1}} \\
2400 \PRreceive{response}{R_{A'}, E_{K_{A'B,0}}(x_A R_B)} & \PRsession{B} &
2401 \PRcomplete{K_{BA',1}} \\
2402 }
2403 The adversary can now compute $K_{AB'} = H_K(r_{B'} R_A)$ and $K_{B'A} =
2404 H_K(r_{A'} R_B)$. Safely in possession of both keys, the adversary can now
2405 read and impersonate freely.
2406
2407 \subsubsection{Omitting the partner's public key}
2408 Now consider a protocol variant where session $S$ sets $h_S = H_I(s, R_S,
2409 Y_S)$, where $s$ is the session-id. An adversary can use a sessions with
2410 Carol to attack a session between Alice and Bob. Here's how the sessions are
2411 set up.
2412 \protocolrun{
2413 \PRcreate{Alice}{Bob}{s} & \PRsession{A} &
2414 \PRsend{challenge}{R_A, c_A} \\
2415 \PRcreate{Bob}{Alice}{s} & \PRsession{B} &
2416 \PRsend{challenge}{R_B, c_B} \\
2417 \PRcreate{Alice}{Carol}{s} & \PRsession{A'} &
2418 \PRsend{challenge}{R_{A'}, c_{A'}} \\
2419 \PRcreate{Bob}{Carol}{s} & \PRsession{B'} &
2420 \PRsend{challenge}{R_{B'}, c_{B'}}
2421 }
2422 Although each of Alice and Bob have two sessions with session-id~$s$, this is
2423 allowed, since they are with different partners. The rest of the attack in
2424 fact proceeds identically to the previous case.
2425
2426
2427 \subsection{Deniability}
2428 \label{sec:denial}
2429
2430 We have claimed that the Wrestlers key-exchange protocol is \emph{deniable}.
2431 In this section, we define what we mean, explain the limits of the
2432 deniablility of the protocol as currently presented, fix the protocol with an
2433 additional pass (which can be collapsed to a single message flow by breaking
2434 the protocol's symmetry), and prove the deniability of the resulting
2435 protocol.
2436
2437 Our notion of deniability is taken from Di~Raimondo, Gennaro and Krawczyk
2438 \cite{DiRaimondo:2006:DAK}, except that, as usual, we opt for a concrete
2439 security approach.
2440
2441 \subsubsection{Discussion}
2442 Our definition for deniability is that, for any adversary interacting with
2443 participants in the protocol, a simulator exists which can compute the same
2444 things as the adversary. In particular, since an adversary could output a
2445 transcript of the interactions between itself and the parties, it would
2446 follow that a simulator could do this too. If the simulator is effective,
2447 its output is indistinguishable from that of the real adversary, and hence no
2448 `judge' (distinguisher) should be persuaded by evidence presented by someone
2449 who claims to have witnessed or participated in an interaction.
2450
2451 We work again the model described in~\ref{sec:um}. That is, our adversary
2452 has complete control over the ordering and delivery of all messages. The
2453 adversary is also able, at any time, to reveal the state of any session.
2454 However, deniability is obviously impossible against an adversary who can
2455 \emph{corrupt} other parties, since simulating such an adversary's actions
2456 would necessarily require the simulator to compute private keys corresponding
2457 to the known public keys, and this is (we believe) difficult, because an
2458 efficient algorithm for doing this could easily attack our protocol, which we
2459 already proved secure. Therefore, we forbid the adversary from corrupting
2460 parties.
2461
2462 In order to allow the adversary to participate in the protocol, rather than
2463 merely observing it, we need to give it one or more private keys. We could
2464 modify the initialization function \id{init} from figure~\ref{fig:wkx-formal}
2465 to give the adversary a private key, but there's an easier way: we can just
2466 give the adversary a number of private keys in its auxiliary input.
2467
2468 \subsubsection{Definitions}
2469 Let $\Pi$ be a key-exchange protocol, in the model described in
2470 section~\ref{sec:um}. We use the simulation framework of
2471 section~\ref{sec:sim}. We define the initialization function $I_\Pi$ to be
2472 the initialization function of $\Pi$, as before, and the corresponding
2473 world~$W_\Pi(\iota, \sigma, \tau, \mu)$ is a fairly straightforward mapping
2474 of the adversary's possible actions to the simulation model:
2475 \begin{itemize}
2476 \item The invocation $\cookie{new-session}$ with $\mu = (i, j, s)$ creates a
2477 new session on party~$P_i$, with partner~$P_j$ and session-id~$s$. The
2478 reply $\rho = (\delta, m)$ is a \emph{decision} $\delta \in
2479 \{\cookie{continue}, \cookie{abort}, \cookie{complete}\}$ and an output
2480 message $m \in \Bin^* \cup \{\bot\}$. If $m \ne \bot$ then $m$ is a
2481 message to be sent to the matching session (if any).
2482 \item The invocation $\cookie{deliver}$ with $\mu = (i, j, s, m)$ delivers
2483 message $m$ to the session $S = (P_i, P_j, s)$. The response $\rho$ is as
2484 for $\cookie{new-session}$ invocations.
2485 \item The invocation $\cookie{reveal-session-state}$ with $\mu = (i, j, s)$
2486 reveals to the adversary the state of the running session $S = (P_i, P_j,
2487 s)$. The response $\rho$ is the session's state if $S$ is indeed a running
2488 session, or $\bot$ otherwise.
2489 \item The invocation $\cookie{reveal-session-key}$ with $\mu = (i, j, s)$
2490 reveals to the adversary the session-key of the completed session~$S =
2491 (P_i, P_j, s)$. The response $\rho$ is the session key~$K$ if the session
2492 is indeed complete, or $\bot$ otherwise.
2493 \end{itemize}
2494 There are no invocations corresponding to the adversary actions of corrupting
2495 parties (since deniability against an corrupting adversary is impossible, as
2496 discussed earlier), or session expiry or challenging (since they are useless
2497 in this context).
2498
2499 We measure the deniability of a protocol~$\Pi$, using a given simulator~$S$,
2500 by the insecurity function $\InSec{sim}(W_\Pi, I_\Pi, S; t_D, t_A,
2501 \mathcal{Q}_D, \mathcal{Q}_A, \mathcal{R}, \mathcal{U})$ of
2502 definition~\ref{def:sim}. The interaction bounds $\mathcal{R} = (q_S, q_M)$
2503 we place on the adversary are on the number ($q_S$) of \cookie{new-session}
2504 and ($q_M$) \cookie{deliver} invocations it makes.
2505
2506 We shall (informally) say that a protocol~$\Pi$ is deniable if there is a
2507 simulator~$S_\Pi$ for which the insecurity function is small for appropriate
2508 resource bounds on the adversary and distinguisher.
2509
2510 \subsubsection{The current protocol}
2511 As it stands, $\Wkx$ isn't deniable, according to our definition, for
2512 arbitrary auxiliary inputs. Let's see why.
2513
2514 Suppose that Bob is an informant for the secret police, and wants to convince
2515 a judge that Alice is involved in subversive activities in cyberspace.
2516 Unfortunately, Bob's job is difficult, because of the strong zero-knowledge
2517 nature of the Wrestlers identification protocol. However, Bob can work with
2518 the judge to bring him the evidence necessary to convict Alice. Here's how.
2519
2520 Alice's public key is $A$, and Bob's public key is $B$. The judge chooses
2521 some session-id $s$, and $r \inr \Nupto{q}$. He computes $R = r P$ and $c =
2522 r \xor H_I(B, s, R, r A)$, and gives Bob the triple $(s, R, c)$, keeping $r$
2523 secret. Bob can now persuade Alice to enter into a key-exchange with him,
2524 with session-id $s$. He uses $(R, c)$ as his challenge message. When Alice
2525 sends back her response $(R', \chi)$ (because Bob's challenge is correctly
2526 formed), Bob aborts and shows $(R', \chi)$ to the judge. The judge knows $r$
2527 and can therefore decrypt $\chi$ and check the response. If it's wrong, then
2528 Bob was cheating and gets demoted -- he can't get the right answer by himself
2529 because that would require him to impersonate Alice. If it's right, Alice is
2530 really a subversive element and `disappears' in the night.
2531
2532 We shall show in theorem~\ref{thm:denial} below that this is basically the
2533 only attack against the deniability of the protocol. However, we can do
2534 better.
2535
2536 \subsubsection{Fixing deniability}
2537 We can fix the protocol to remove even the attack discussed above. The
2538 change is simple: we feed \emph{both} parties' challenges to the hash
2539 function~$H_I$ rather than just the sender's. We use a five-argument hash
2540 function (random oracle) $H_I\colon G^2 \times \Bin^{\ell_S} \times G^2 \to
2541 \Bin^{\ell_I}$. We introduce a new message pass at the beginning of the
2542 protocol: each session simply sends its challenge point $R = r P$ in the
2543 clear as a `pre-challenge'. The actual challenge is $R$ and $c = r \xor
2544 H_I(X, R', s, R, c)$, where $R'$ is the challenge of the matching session.
2545
2546 By breaking symmetry, we can reduce the communication complexity of this
2547 variant to four messages. As before, we analyse the symmetrical version.
2548 The extra flow might seem a high price to pay, but we shall see that it has
2549 additional benefits beyond deniability.
2550
2551 A summary of the new protocol is shown in figure~\ref{fig:wdkx}, and the
2552 formal description is shown in figure~\ref{fig:wdkx-formal}.
2553
2554 \begin{figure}
2555 \begin{description}
2556 \item[Setup] Group $G = \langle P \rangle$; $|G| = q$ is prime.
2557 $H_I(\cdot, \cdot, \cdot, \cdot, \cdot)$ and $H_K(cdot)$ are secure
2558 hashes. $\E = (\kappa, E, D)$ is an IND-CCA2 symmetric encryption
2559 scheme.
2560 \item[Parties] $U_i$ for $0 \le i < n$.
2561 \item[Private keys] $x_i \inr \Nupto{q}$.
2562 \item[Public keys] $X_i = x_i P$.
2563 \end{description}
2564 \[ \begin{graph}
2565 !{0; <8cm, 0cm>: <0cm, 3cm>::}
2566 *+[F]\dbox{$r_i \getsr I$; $R_i \gets r_i P$} ="i-1"
2567 [d]
2568 *+[F]\dbox{$c_i \gets r_i \xor H_I(R_j, X_i, s, R_i, r_i X_j)$} ="i0"
2569 [d]
2570 *+[F]\dbox{Check $R_j = \bigl(c_j \xor H_I(x_i R_j)\bigr) P$ \\
2571 $Z \gets r_i R_j$; $K \gets H_K(0, Z)$ \\
2572 $\chi_i \gets E_K(x_i R_j)$} ="i1"
2573 [d]
2574 *+[F]\dbox{Check $D_K(\chi_j) = r_i X_j$ \\
2575 Shared key is $H_K(1, Z)$} ="i2"
2576 "i-1" [r]
2577 *+[F]\dbox{$r_j \getsr I$; $R_j \gets r_j P$} ="j-1"
2578 [d]
2579 *+[F]\dbox{$c_j \gets r_j \xor H_I(R_i, X_j, s, R_j, r_j X_i)$} ="j0"
2580 [d]
2581 *+[F]\dbox{Check $R_i = \bigl(c_i \xor H_I(x_j R_i)\bigr) P$ \\
2582 $Z \gets r_j R_i$; $K \gets H_K(0, Z)$ \\
2583 $\chi_j \gets E_K(x_j R_i)$} ="j1"
2584 [d]
2585 *+[F]\dbox{Check $D_K(\chi_i) = r_j X_i$ \\
2586 Shared key is $H_K(1, Z)$} ="j2"
2587 %
2588 "i-1" : |(0)/3.25cm/*+{R_i} "j0"
2589 "i0" : |(0)/3.25cm/*+{(R_i, c_i)} "j1"
2590 "i1" : |(0)/3.25cm/*+{(R_i, \chi_i)} "j2"
2591 "j-1" : |(0)/3.25cm/*+{R_j} "i0"
2592 "j0" : |(0)/3.25cm/*+{(R_j, c_j)} "i1"
2593 "j1" : |(0)/3.25cm/*+{(R_j, \chi_j)} "i2"
2594 \end{graph} \]
2595
2596 \caption{Summary of the Deniable Wrestlers Key Exchange protocol, $\Wdkx$}
2597 \label{fig:wdkx}
2598 \end{figure}
2599
2600 \begin{figure}
2601 \begin{program}
2602 Function $\id{init}(n)$: \+ \\
2603 \FOR $i \in \Nupto{n}$ \DO \\ \ind
2604 $x \getsr \Nupto{|G|}$; \\
2605 $\mathbf{i}[i] \gets x$; \\
2606 $\mathbf{p}[i] \gets x P$; \- \\
2607 \RETURN $(\mathbf{p}, \mathbf{i})$;
2608 \- \\[\medskipamount]
2609 Function $\id{new-session}^{H_I(\cdot, \cdot, \cdot, \cdot, \cdot), H_K(\cdot)}
2610 (\mathbf{p}, x, i, j, s)$: \+ \\
2611 $X \gets \mathbf{p}[i]$;
2612 $X' \gets \mathbf{p}[j]$;
2613 $C \gets \emptyset$; \\
2614 $r \getsr \Nupto{|G|}$;
2615 $R \gets r P$;
2616 $Y \gets r X'$; \\
2617 \SEND $(\cookie{pre-challange}, R)$;
2618 \- \\[\medskipamount]
2619 Function $\id{message}^{H_I(\cdot, \cdot, \cdot, \cdot, \cdot), H_K(\cdot)}
2620 (\tau, \mu)$: \+ \\
2621 \IF $\tau = \cookie{pre-challenge}$ \THEN
2622 $\id{msg-pre-challenge}(\mu)$; \\
2623 \ELSE \IF $\tau = \cookie{challenge}$ \THEN
2624 $\id{msg-challenge}(\mu)$; \\
2625 \ELSE \IF $\tau = \cookie{response}$ \THEN $\id{msg-response}(\mu)$;
2626 \next
2627 Function $\id{msg-pre-challenge}(\mu)$: \+ \\
2628 $R' \gets \mu$; \\
2629 $h \gets H_I(R', X, s, R, c)$; \\
2630 $c \gets r \xor h$; \\
2631 \SEND $(\id{msg-challenge}, R, c)$;
2632 \- \\[\medskipamount]
2633 Function $\id{msg-challenge}(\mu)$: \+ \\
2634 $(R', c') \gets \mu$; \\
2635 $Y' \gets x R'$; \\
2636 $h' \gets H_I(R, X', s, R', Y')$; \\
2637 $r' \gets c' \xor h'$; \\
2638 \IF $R' \ne r' P$ \THEN \RETURN; \\
2639 $C \gets C \cup \{R\}$; \\
2640 $Z \gets r R'$; \\
2641 $(K_0, K_1) \gets H_K(Z)$; \\
2642 $\chi \gets E_{K_0}(Y')$; \\
2643 \SEND $(\cookie{response}, R, \chi)$;
2644 \- \\[\medskipamount]
2645 Function $\id{msg-response}(\mu)$: \+ \\
2646 $(R', \chi') \gets \mu$; \\
2647 \IF $R' \notin C$ \THEN \RETURN; \\
2648 $Z \gets r R'$; \\
2649 $(K_0, K_1) \gets H_K(Z)$; \\
2650 $Y' \gets D_{K_0}(\chi')$; \\
2651 \IF $Y' \ne Y$ \THEN \RETURN; \\
2652 \OUTPUT $K_1$;
2653 \STOP;
2654 \end{program}
2655
2656 \caption{Deniable key-exchange: formalization of $\Wdkx$}
2657 \label{fig:wdkx-formal}
2658 \end{figure}
2659
2660 The security of this variant is given by the following theorem, whose proof
2661 is in appendix~\ref{sec:sk2-proof}.
2662 \begin{theorem}[SK-security of $\Wdkx$]
2663 \label{thm:sk2}
2664 Let $G$ be a cyclic group. Let $\E = (\kappa, E, D)$ be a symmetric
2665 encryption scheme. Then
2666 \[ \InSec{sk}(\Wdkx^{G, \E}; t, n, q_S, q_M, q_I, q_K) =
2667 \InSec{sk}(\Wkx^{G, \E}; t, n, q_S, q_M, q_I, q_K)
2668 \]
2669 \end{theorem}
2670
2671 \subsubsection{Deniability of the Wrestlers protocols}
2672 In order to quantify the level of deniability our protocols provide, we shall
2673 impose a limit on the auxiliary input to the adversary. In particular, we
2674 shall use $\mathcal{U}$ of definition~\ref{def:sim} to count the number of
2675 \emph{challenges} in the auxiliary input. That is, $\mathcal{U} = n_C$ is
2676 the number of tuples $(i, j, s, R', R, c)$ for which there is an $r$ such
2677 that $R = r P$ and $c = r \xor H_I(R', X_j, s, R, r X_i)$ (or without the
2678 $R'$ for $\Wkx$).
2679
2680 With this restriction in place, we can state the following theorem about the
2681 deniability of our protocols.
2682 \begin{theorem}[Deniability of $\Wkx$ and $\Wdkx$]
2683 \label{thm:denial}
2684 There exist simulators $S_\Wkx$ and $\Wdkx$ such that
2685 \[ \InSec{sim}(W_{\Wkx^{G, \E}}, I_{\Wkx^{G, \E}}, S_{\Wkx^{G, \E}};
2686 t_D, t_A, \mathcal{Q}_D, \mathcal{Q}_A, (q_S, q_M), 0) \le
2687 \frac{q_M}{2^{\ell_I}}
2688 \]
2689 and
2690 \[ \InSec{sim}(W_{\Wdkx^{G, \E}}, I_{\Wdkx^{G, \E}}, S_{\Wdkx^{G, \E}};
2691 t_D, t_A, \mathcal{Q}_D, \mathcal{Q}_A, (q_S, q_M), n_C) \le
2692 \frac{n_C q_S}{|G|} +
2693 \frac{q_M}{2^{\ell_I}}.
2694 \]
2695 The running time of the simulators is $O(t_A) + O(\mathcal{Q}_A q_M)$.
2696 \end{theorem}
2697 \begin{proof}
2698 The simulators $S_\Wkx$ and $S_\Wdkx$ are very similar. We describe both
2699 here. Both are fake-world simulators, working as follows.
2700 \begin{enumerate}
2701 \item Initially, it constructs simulated parties $P_i$, for $0 \le i < n$,
2702 giving each the public key $X_i$ from the common input.
2703 \item Suppose the adversary requests creation of a new session $S = (P_i,
2704 P_j, s)$. Then the simulator creates a new session, including a random
2705 value $r_S \inr \Nupto{|G|}$, and computes $R_S = r_S P$, and $Y_S = r_S
2706 X_j$. For $\Wdkx$, it sends the message $(\cookie{pre-challenge}, R_S)$;
2707 for $\Wkx$, it additionally computes $h = H_I(X_i, s, R_S, Y_S)$ and
2708 sends $(\cookie{challenge}, R_S, r_S \xor h)$.
2709 \item Suppose, for $\Wdkx$, the adversary sends a message
2710 $(\cookie{pre-challenge}, R')$ to a session~$S = (P_i, P_j, s)$. The
2711 simulator computes $h = H_I(R', X_i, s, R_S, Y_S)$, and sends
2712 $(\cookie{challenge}, R_S, r_S \xor h)$.
2713 \item Suppose the adversary sends a message $(\cookie{challenge}, R', c')$
2714 to session $S = (P_i, P_j, s)$. The simulator doesn't know $x_i$.
2715 \begin{enumerate}
2716 \item If $R' = R_T$ for some other simulated session $T$, then the
2717 simulator knows $r_T$ such that $R_T = r_T P$. Let $Y' = r_T X_i$.
2718 The simulator computes $h = H_I(X_j, s, R', Y')$ (resp.\ $h = H_I(R_S,
2719 X_j, s, R', Y')$) for $\Wkx$ (resp.\ $\Wdkx$) and checks that $r_T = c'
2720 \xor h$. If not, the simulator discards the message. Otherwise, it
2721 computes $(K_0, K_1) = H_K(r_S R')$, and sends the message
2722 $(\cookie{response}, R, E_{K_0}(Y'))$.
2723 \item \label{en:simextract} Otherwise the simulator runs the extractor
2724 $T_\Wident$ on the adversary's history of queries $H_I(X_j, s, R',
2725 \cdot)$ (resp.\ $H_I(R_S, X_j, s, R', \cdot)$) for $\Wkx$ (resp.\
2726 $\Wdkx$). The extractor returns $(r', Y')$. If $Y' = \bot$ then the
2727 simulator ignores the message. Otherwise, the simulator computes
2728 $(K_0, K_1) = H_K(r R')$ and sends back $(\cookie{response}, R,
2729 E_{K_0}(Y'))$.
2730 \end{enumerate}
2731 \item Suppose the adversary sends a message $(\cookie{response}, R', \chi)$
2732 to session $S = (P_i, P_j, s)$. The simulator computes $(K_0, K_1) =
2733 H_K(r_S R')$, and decrypts $Y' = D_{K_0}(\chi)$. If $Y' \ne Y_S$ then
2734 the simulator discards the message. Otherwise, it makes the simulated
2735 session complete, and outputs key $K_1$.
2736 \item Finally, if the adversary reveals a session's state, the simulator
2737 reveals $r_S$ as required; if the adversary reveals a session-key, the
2738 simulator reveals the $K_1$ it output.
2739 \end{enumerate}
2740 The only point where the simulation fails to be perfect is in
2741 \ref{en:simextract}. Let $R'$ and $c'$ be the values from an incoming
2742 challenge message to session $S = (P_i, P_j, s)$. Let $r'$ be such that
2743 $R' = r' P$ and let $Y' = r' X_i$. If a random-oracle query $H_I(X_j, s,
2744 R', Y')$ (or $H_I(R_S, X_j, s, R', Y')$ for $\Wdkx$) has been issued, then
2745 there are a number of possibilities. Let $h'$ be the result of this query.
2746 \begin{itemize}
2747 \item The adversary made this query. Then the extractor will find it and
2748 return $Y'$ if $c' = h' \xor r'$, or $\bot$ otherwise.
2749 \item Some simulated session $U = (P_{i'}, P_{j'}, s')$ made this query.
2750 But simulated sessions only make $H_I$-queries when constructing
2751 challenges, so $R' = R_U$ for some session~$U$. But the simulator does
2752 something different in that case.
2753 \item In $\Wdkx$, the quadruple $(s, R_S, R', c')$ came from the
2754 adversary's auxiliary input. In this case the simulator must fail. But
2755 $R_S = r_S P$, and $r_S$ was chosen uniformly at random. If there are at
2756 most $n_C$ challenge sets in the auxiliary input then this happens with
2757 probability at most $n_C/|G|$ for any given session.
2758 \end{itemize}
2759 We conclude that the simulator fails with probability
2760 \[ \frac{q_M}{2^{\ell_I}} + \frac{q_S n_C}{|G|}. \]
2761 (Note that we only consider $n_C = 0$ for $\Wkx$.) No adversary can
2762 distinguish the simulator from a real interaction unless the simulator
2763 fails, and the simulator is a fake-world simulator. We therefore apply
2764 proposition~\ref{prop:fakesim}; the theorem follows.
2765 \end{proof}
2766
2767
2768 \subsection{Practical issues}
2769 \label{sec:practice}
2770
2771 \subsubsection{Denial of service from spoofers}
2772 The adversary we considered in~\ref{sec:um} is very powerful. Proving
2773 security against such a powerful adversary is good and useful. However,
2774 there are other useful security properties we might like against weaker
2775 adversaries.
2776
2777 Eavesdropping on the Internet is actually nontrivial. One needs control of
2778 one of the intermediate routers between two communicating parties. (There
2779 are tricks one can play involving subversion of DNS servers, but this is also
2780 nontrivial.) However, injecting packets with bogus source addresses is very
2781 easy.
2782
2783 Layering the protocol over TCP \cite{RFC0793} ameliorates this problem because
2784 an adversary needs to guess or eavesdrop in order to obtain the correct
2785 sequence numbers for a spoofed packet; but the Wrestlers protocol is
2786 sufficiently simple that we'd like to be able to run it over UDP
2787 \cite{RFC0768}, for which spoofing is trivial.
2788
2789 Therefore, it's possible for anyone on the 'net to send Alice a spurious
2790 challenge message $(R, c)$. She will then compute $Y = a R$, recover $r' = c
2791 \xor H_I(\ldots, R, Y)$ check that $R = r' P$ and so on. That's at least two
2792 scalar multiplications to respond to a spoofed packet, and even with very
2793 efficient group operations, coping with this kind of simple denial-of-service
2794 attack might be difficult.
2795
2796 A straightforward solution is to use the Deniable variant of the protocol,
2797 and require a challenge to quote its matching session's challenge $R'$ in its
2798 challenge. That is, upon receiving a $(\cookie{pre-challenge}, R')$, the
2799 session sends $(\cookie{challenge}, R', R, c)$. Alice then rejects any
2800 \emph{incoming} challenge message which doesn't quote her current challenge
2801 value. Now only eavesdroppers can force her to perform expensive
2802 computations.
2803
2804 Indeed, one need not quote the entire challenge $R'$: it suffices to send
2805 some short hard-to-guess hash of it, maybe just the bottom 128 bits or so.
2806
2807 This can't reduce security. Consider any adversary attacking this protocol
2808 variant. We can construct an adversary which attacks the original protocol
2809 just as efficiently. The new adversary attaches fake $R'$ values to
2810 challenges output by other parties, and strips them off on delivery,
2811 discarding messages with incorrect $R'$ values.
2812
2813 \subsubsection{Key confirmation}
2814 Consider an application which uses the Wrestlers protocol to re-exchange keys
2815 periodically. The application can be willing to \emph{receive} incoming
2816 messages using the new key as soon as the key exchange completes
2817 successfully; however, it should refrain from \emph{sending} messages under
2818 the new key until it knows that its partner has also completed. The partner
2819 may not have received the final response message, and therefore be unwilling
2820 to accept a new key; it will therefore (presumably) reject incoming messages
2821 under this new key.
2822
2823 While key confirmation is unnecessary for \emph{security}, it has
2824 \emph{practical} value, since it solves the above problem. If the
2825 application sends a \cookie{switch} message when it `completes', it can
2826 signal its partner that it is indeed ready to accept messages under the new
2827 key. Our implementation sends $(\cookie{switch-rq}, E_{K_0}(H_S(0, R, R')))$
2828 as its switch message; the exact contents aren't important. Our
2829 retransmission policy (below) makes use of an additional message
2830 \cookie{switch-ok}, which can be defined similarly.
2831
2832 It's not hard to show that this doesn't adversely affect the security of the
2833 protocol, since the encrypted message is computed only from public values.
2834 In the security proof, we modify the generation of \cookie{response}
2835 messages, so that the plaintexts are a constant string rather than the true
2836 responses, guaranteeing that the messages give no information about the
2837 actual response. To show this is unlikely to matter, we present an adversary
2838 attacking the encryption scheme by encrypting either genuine responses or
2839 fake constant strings. Since the adversary can't distinguish which is being
2840 encrypted (by the definition of IND-CCA security,
2841 definition~\ref{def:ind-cca}), the change goes unnoticed. In order to allow
2842 incorporate our switch messages, we need only modify this adversary, to
2843 implement the modified protocol. This is certainly possible, since the
2844 messages contain (hashes of) public values. We omit the details.
2845
2846 However, while the extra message doesn't affect the security of our protocol,
2847 it would be annoying if an adversary could forge the switch request message,
2848 since this would be a denial of service. In the strong adversarial model,
2849 this doesn't matter, since the adversary can deny service anyway, but it's a
2850 concern against less powerful adversaries. Most IND-CCA symmetric encryption
2851 schemes also provide integrity of plaintexts \cite{Bellare:2000:AER} (e.g.,
2852 the encrypt-then-MAC generic composition approach \cite{Bellare:2000:AER,%
2853 Krawczyk:2001:OEA}, and the authenticated-encryption modes of
2854 \cite{Rogaway:2003:OCB,Bellare:2004:EAX,McGrew:2004:SPG}), so this isn't a
2855 great imposition.
2856
2857 \subsubsection{Optimization and piggybacking}
2858 We can optimize the number of messages sent by combining them. Here's one
2859 possible collection of combined messages:
2860 \begin{description}
2861 \item [\cookie{pre-challenge}] $R$
2862 \item [\cookie{challenge}] $R'$, $R$, $c = H_I(R', X, s, R, c) \xor r$
2863 \item [\cookie{response}] $R'$, $R$, $c$, $E_{K_0}(x R')$
2864 \item [\cookie{switch}] $R'$, $E_{K_0}(x R', H_S(0, R, R'))$
2865 \item [\cookie{switch-ok}] $R'$, $E_{K_0}(H_S(1, R, R'))$
2866 \end{description}
2867 The combination is safe:
2868 \begin{itemize}
2869 \item the \cookie{switch} and \cookie{switch-ok} messages are safe by the
2870 argument above; and
2871 \item the other recombinations can be performed and undone in a `black box'
2872 way, by an appropriately defined SK-security adversary.
2873 \end{itemize}
2874
2875 \subsubsection{Unreliable transports}
2876 The Internet UDP \cite{RFC0768} is a simple, unreliable protocol for
2877 transmitting datagrams. However, it's very efficient, and rather attractive
2878 as a transport for datagram-based applications such as virtual private
2879 networks (VPNs). Since UDP is a best-effort rather than a reliable
2880 transport, it can occasionally drop packets. Therefore it is necessary for a
2881 UDP application to be able to retransmit messages which appear to have been
2882 lost.
2883
2884 We recommend the following simple retransmission policy for running the
2885 Wrestlers protocol over UDP.
2886 \begin{itemize}
2887 \item Initially, send out the \cookie{pre-challenge} message every minute.
2888 \item On receipt of a \cookie{pre-challenge} message, send the corresponding
2889 full \cookie{challenge}, but don't retain any state.
2890 \item On receipt of a (valid) \cookie{challenge}, record the challenge value
2891 $R'$ in a table, together with $K = (K_0, K_1)$ and the response $Y' = x
2892 R'$. If the table is full, overwrite an existing entry at random. Send
2893 the corresponding \cookie{response} message, and retransmit it every ten
2894 seconds or so.
2895 \item On receipt of a (valid) \cookie{response}, discard any other
2896 challenges, and stop sending \cookie{pre-challenge} and \cookie{response}
2897 retransmits. At this point, the basic protocol described above would
2898 \emph{accept}, so the key $K_1$ is known to be good. Send the
2899 \cookie{switch} message, including its response to the (now known-good)
2900 sender's challenge.
2901 \item On receipt of a (valid) \cookie{switch}, send back a \cookie{switch-ok}
2902 message and stop retransmissions. It is now safe to start sending messages
2903 under $K_1$.
2904 \item On receipt of a (valid) \cookie{switch-ok}, stop retransmissions. It
2905 is now safe to start sending messages under $K_1$.
2906 \end{itemize}
2907
2908 \subsubsection{Key reuse}
2909 Suppose our symmetric encryption scheme $\E$ is not only IND-CCA secure
2910 (definition~\ref{def:ind-cca}) but also provides integrity of plaintexts
2911 \cite{Bellare:2000:AER} (or, alternatively, is an AEAD scheme
2912 \cite{Rogaway:2002:AEA}. Then we can use it to construct a secure channel,
2913 by including message type and sequence number fields in the plaintexts, along
2914 with the message body. If we do this, we can actually get away with just the
2915 one key $K = H_K(Z)$ rather than both $K_0$ and $K_1$.
2916
2917 To do this, it is essential that the plaintext messages (or additional data)
2918 clearly distinguish between the messages sent as part of the key-exchange
2919 protocol and messages sent over the `secure channel'. Otherwise, there is a
2920 protocol-interference attack: an adversary can replay key-exchange
2921 ciphertexts to insert the corresponding plaintexts into the channel.
2922
2923 We offer a sketch proof of this claim in appendix~\ref{sec:sc-proof}.
2924
2925 %%%--------------------------------------------------------------------------
2926
2927 \section{Conclusions}
2928 \label{sec:conc}
2929
2930 We have presented new protocols for identification and authenticated
2931 key-exchange, and proven their security. We have shown them to be efficient
2932 and simple. We have also shown that our key-exchange protocol is deniable.
2933 Finally, we have shown how to modify the key-exchange protocol for practical
2934 use, and proven that this modification is still secure.
2935
2936 %%%--------------------------------------------------------------------------
2937
2938 \section{Acknowledgements}
2939
2940 The Wrestlers Protocol is named after the Wrestlers pub in Cambridge where
2941 Clive Jones and I worked out the initial design.
2942
2943 %%%--------------------------------------------------------------------------
2944
2945 \bibliography{mdw-crypto,cryptography2000,cryptography,rfc,std}
2946
2947 %%%--------------------------------------------------------------------------
2948
2949 \appendix
2950 \section{Proofs}
2951
2952 \subsection{Proof of theorem~\ref{thm:sk}}
2953 \label{sec:sk-proof}
2954
2955 Before we embark on the proof proper, let us settle on some notation. Let
2956 $P_i$ be a party. Then we write $x_i$ for $P_i$'s private key and $X_i = x_i
2957 P$ is $P_i$'s public key. Let $S = (P_i, P_j, s)$ be a session. We write
2958 $r_S$ for the random value chosen at the start of the session, and $R_S$,
2959 $c_S$ etc.\ are the corresponding derived values in that session.
2960
2961 The proof uses a sequence of games. For each game~$\G{i}$, let $V_i$ be the
2962 event that some pair of unexposed, matching sessions both complete but output
2963 different keys, and let $W_i$ be the event that the adversary's final output
2964 equals the game's hidden bit~$b^*$. To save on repetition, let us write
2965 \[ \diff{i}{j} = \max(|\Pr[V_i] - \Pr[V_j]|, |\Pr[W_i] - \Pr[W_j]|). \]
2966 Obviously,
2967 \[ \diff{i}{j} \le \sum_{i\le k<j} \diff{k}{k + 1}. \]
2968
2969 Here's a quick overview of the games we use.
2970 \begin{itemize}
2971 \item $\G0$ is the original SK-security game.
2972 \item In $\G1$, we abort the game unless all parties' public keys are
2973 distinct. Since keys are generated at random, parties are unlikely to be
2974 given the same key by accident.
2975 \item In $\G2$, we change the way sessions respond to challenge messages, by
2976 using the extractor to fake up answers to most challenges. Since the
2977 extractor is good, the adversary is unlikely to notice.
2978 \item In $\G3$, we abort the game if the adversary ever queries $H_K(\cdot)$
2979 on the Diffie-Hellman secret $r_S r_T P$ shared between two unexposed
2980 matching sessions. We show that this is unlikely to happen if the
2981 Diffie-Hellman problem is hard.
2982 \item In $\G4$, we abort the game if any unexposed session \emph{accepts} a
2983 response message which wasn't sent by a matching session.
2984 \end{itemize}
2985 Finally, we show that the adversary has no advantage in $\G4$. The theorem
2986 follows.
2987
2988 For ease of comprehension, we postpone the detailed proofs of some of the
2989 steps until after we conclude the main proof.
2990
2991 Let $A$ be a given adversary which runs in time~$t$, creates at most~$q_S$
2992 sessions, delivers at most~$q_M$ messages, and makes at most~$q_I$ queries to
2993 its $H_I(\cdot, \cdot, \cdot, \cdot)$ oracle and at most~$q_K$ queries to its
2994 $H_K(\cdot)$ oracle. Let $\G0$ be the original SK-security game of
2995 definition~\ref{def:sk}, played with adversary~$A$.
2996
2997 Game~$\G1$ is the same as game~$\G0$ except, if the initialization function
2998 reports two parties as having the same public key (i.e., we have $X_i \ne
2999 X_j$ where $0 \le i < j < n$), we stop the game immediately and without
3000 crediting the adversary with a win. This only happens when the corresponding
3001 private keys are equal, i.e., $x_i = x_j$, and since the initialization
3002 function chooses private keys uniformly at random, this happens with
3003 probability at most $\binom{n}{2}/|G|$. Since if this doesn't happen, the
3004 game is identical to $\G0$, we can apply lemma~\ref{lem:shoup}, and see that
3005 \begin{equation}
3006 \label{eq:sk-g0-g1}
3007 \diff{0}{1} \le \frac{1}{|G|} \binom{n}{2} = \frac{n (n - 1)}{2 |G|}.
3008 \end{equation}
3009 In game~$\G1$ and onwards, we can assume that public keys for distinct
3010 parties are themselves distinct. Note that the game now takes at most
3011 $O(q_I)$ times longer to process each message delivered by the adversary.
3012 This is where the $O(q_I q_M)$ term comes from in the theorem statement.
3013
3014 Game~$\G2$ is the same as game~$\G1$, except that we change the way that we
3015 make parties respond to \cookie{challenge} messages $(\cookie{challenge}, R,
3016 c)$. Specifically, suppose that $S = (P_i, P_j, s)$ is a session.
3017 \begin{itemize}
3018 \item Suppose $T = (P_j, P_i, s)$ is the matching session of $S$. The game
3019 proceeds as before if $(R, c) = (R_T, c_T)$ is the challenge issued by $T$.
3020 \item Otherwise, we run the extractor $T_\Wident$ on the adversary's history
3021 so far of oracle queries $H_I(X_i, s, R, \cdot)$ to determine a pair $(r,
3022 Y)$. If $r = \bot$ then we discard the message. Otherwise, we add $R$ to
3023 the list~$C$, and return a fake response to the adversary by computing $K =
3024 H_K(r R_S)$ and handing the adversary $(\cookie{response}, R_S, E_K(Y))$.
3025 \end{itemize}
3026 The following lemma shows how this affects the adversary's probabilities of
3027 winning.
3028 \begin{lemma}
3029 \label{lem:sk-g1-g2}
3030 \begin{equation}
3031 \label{eq:sk-g1-g2}
3032 \diff{1}{2} \le \frac{q_M}{2^{\ell_I}}.
3033 \end{equation}
3034 \end{lemma}
3035
3036 Let us say that a session $S = (P_i, P_j, s)$ is \emph{ripe} if
3037 \begin{itemize}
3038 \item there is a matching session $T = (P_j, P_i, s)$, and
3039 \item $S$ is unexposed.
3040 \end{itemize}
3041 Suppose that $S$ is a ripe session, and that it has a matching session~$T$:
3042 let $Z_S = Z_T = r_S r_T P$.
3043
3044 Game~$\G3$ is the same as $\G2$, except that the game is immediately aborted
3045 if ever the adversary queries its random oracle $H_K(\cdot)$ at a value $Z_S$
3046 for any ripe session~$S$. The following lemma shows how this affects the
3047 adversary's probabilities of winning.
3048 \begin{lemma}
3049 \label{lem:sk-g2-g3}
3050 For some $t'$ within the bounds given in the theorem statement we have
3051 \begin{equation}
3052 \label{eq:sk-g2-g3}
3053 \diff{2}{3} \le q_S \InSec{mcdh}(G; t', q_K).
3054 \end{equation}
3055 \end{lemma}
3056
3057 Game~$\G4$ is the same as $\G3$ except that the game is immediately aborted
3058 if ever the adversary sends a response message to a ripe session~$S$ which
3059 wasn't output by its matching session as a response to $S$'s challenge, with
3060 the result that $S$ completes.
3061
3062 Let's make this more precise. Let $U$ and $V$ be a pair of matching
3063 sessions. Let $C_U = (\cookie{challenge}, R_U, c_U$ be the challenge message
3064 sent by $U$. Let $M_T$ be the set of messages which $T$ has sent upon
3065 delivery of $C_U$. Then, in $\G4$, we abort the game if, for any pair $S$
3066 and~$T$ of matching, unexposed sessions, $S$ has completed as a result of
3067 being sent a message $\mu \notin M_T$. We have the following lemma.
3068 \begin{lemma}
3069 \label{lem:sk-g3-g4}
3070 For a $t'$ within the stated bounds, we have
3071 \begin{equation}
3072 \label{eq:sk-g3-g4}
3073 \diff{3}{4} \le q_S \bigl( \InSec{ind-cca}(\E; t', q_M, q_M) +
3074 n \cdot \InSec{mcdh}(G; t', q_M + q_I) \bigr)
3075 \end{equation}
3076 \end{lemma}
3077
3078 Finally, let us consider the state we're in with $\G4$.
3079 \begin{itemize}
3080 \item No ripe session completes except as a result the adversary faithfully
3081 delivering messages between it and its matching session.
3082 \item The adversary never queries $Z_S$ for any ripe session~$S$. If we set
3083 $K_S = (K_{S, 0}, K_{S, 1}) = H_K(Z_S)$, then $K_{S, 1}$ is the key output
3084 by $S$ when it completes.
3085 \item If $S$ and $T$ are matching ripe sessions, then $K_S = K_T$, since $Z_S
3086 = r_S R_T = r_T R_S = Z_T$.
3087 \item For any ripe session~$S$, $K_{S, 1}$ is uniformly distributed in
3088 $\Bin^\kappa$ and independent of the adversary's view.
3089 \item If $S = (P_i, P_j, s)$ and $T = (P_j, P_i, s)$ are matching ripe
3090 sessions, then $Z_S$ depends only $r_S$ and $r_T$. Hence, once $S$ and~$T$
3091 complete, and erase their states, $Z_S$ is independent of everything except
3092 the messages sent between the two sessions. In particular, $Z_S$ is
3093 independent of the long-term secrets $x_i$ and~$x_j$, so if either player
3094 is later corrupted, the key $K_{S, 1}$ remains independent of the
3095 adversary's view.
3096 \item Hence, the keys output by unexposed sessions are indistinguishable from
3097 freshly-generated random strings, and remain so indefinitely.
3098 \end{itemize}
3099 We conclude that, for any adversary $A$,
3100 \begin{equation}
3101 \label{eq:sk-g4}
3102 \Pr[V_4] = 0 \qquad \text{and} \qquad \Pr[W_4] = \frac{1}{2}.
3103 \end{equation}
3104 Putting equations~\ref{eq:sk-g0-g1}--\ref{eq:sk-g4} together, we find
3105 \begin{spliteqn}
3106 \Adv{sk}{\Wident^{G, \E}}(A) \le
3107 2 q_S \bigl(\InSec{ind-cca}(\E; t', q_M, q_M) + {} \\
3108 \InSec{mcdh}(G; t', q_K) +
3109 n \,\InSec{mcdh}(G; t', q_M + q_I) \bigr) + {}
3110 \frac{n (n - 1)}{|G|} +
3111 \frac{2 q_M}{2^{\ell_I}}.
3112 \end{spliteqn}
3113 The theorem follows, since $A$ was chosen arbitrarily.
3114
3115
3116 \begin{proof}[Proof of lemma~\ref{lem:sk-g1-g2}]
3117 The two games $\G1$ and~$\G2$ differ only in whether they accept or reject
3118 particular challenge messages $(\cookie{challenge}, R, c)$.
3119
3120 We claim firstly that no message is \emph{accepted} by $\G2$ which would
3121 have been rejected by $\G1$. To prove the claim, it is sufficient to note
3122 that the extractor's output, if not $\bot$, is always correct, and hence if
3123 $\G2$ accepts a message then $\G1$ would have done so too.
3124
3125 Since $\G2$ also behaves identically when the adversary submits to~$S$ the
3126 challenge from the matching session~$T$, we have nothing to prove in this
3127 case. Let $F$ be the event that the adversary submits a message
3128 $(\cookie{challenge}, R, c)$ to a session~$S$ which $S$ would have accepted
3129 in $\G1$ but would be rejected by the new rule in~$\G2$. By
3130 lemma~\ref{lem:shoup} we have $\diff{1}{2} \le \Pr[F]$. To prove the
3131 current lemma, therefore, we must show that $\Pr[F] \le q_M/2^{\ell_I}$.
3132
3133 Rather than consider individual challenge messages, we consider
3134 \emph{classes} of messages. We shall refer to a quadruple~$\Cid = (i, j,
3135 s, R)$ as a \emph{class-id}, and define some useful functions:
3136 \begin{itemize}
3137 \item the class's \emph{session} $\Csession(\Cid) = (P_i, P_j, s)$;
3138 \item the class's \emph{index} $\Cindex(\Cid)$ is $r \in I$ where $R = r
3139 P$, which is well-defined by lemma~\ref{lem:unique-dl};
3140 \item the class's \emph{query} $\Cquery(\Cid) = (X_j, s, R, x_i R)$;
3141 \item the class's \emph{hash} $\Chash(\Cid) = H_I(\Cquery(\Cid)) = H_I(X_j,
3142 s, R, x_i R)$;
3143 \item the class's \emph{check-value} $\Ccheck(\Cid) = \Chash(\Cid) \xor
3144 \Cindex(\Cid)$;
3145 \item the class's \emph{check-set} $\Ccheckset(\Cid)$ is the set of
3146 check-values $c$ such that a message $(\cookie{challenge}, R, c)$ was
3147 sent to session $S = (P_i, P_j, s)$; and
3148 \item the class's \emph{count} $\Ccount(\Cid) = |\Ccheckset(\Cid)|$.
3149 \end{itemize}
3150
3151 Consider any class-id~$\Cid = (i, j, s, R)$. A natural question which
3152 arises is: which participants have issued $\Cid$'s query, i.e., queried
3153 $H_I$ at $\Cquery(\Cid)$?
3154
3155 We can characterise the $H_I(\cdot, \cdot, \cdot, \cdot)$ queries of a
3156 session $U = (P_{i'}, P_{j'}, s')$ as follows:
3157 \begin{itemize}
3158 \item computing the check-value for the challenge $R_U$ by querying
3159 $H_I(X_{i'}, s', R_U, r_U X_{j'})$, and
3160 \item checking an incoming challenge~$R'$ by querying $H_I(X_{j'}, s', R',
3161 x_{i'} R')$.
3162 \end{itemize}
3163 The class~$\Cid$'s query $\Cquery(\Cid)$ is $U$'s check-value query if
3164 \[ (j, i, s, R) = (i', j', s', R_U) \]
3165 i.e., $U$ is the matching session of $\Csession(\Cid)$, and moreover $R =
3166 R_U$ is the challenge value issued by $U$. For any $c \in
3167 \Ccheckset(\Cid)$, if $c = \Ccheck(\Cid)$ then $(\cookie{challenge}, R, c)$
3168 is precisely the challenge message issued by~$U$ to $\Csession(\Cid)$; the
3169 rules for handling this message didn't change. However, if $c \ne
3170 \Ccheck(\Cid)$ then the message would have been rejected in $\G1$, and we
3171 have already shown that $\G2$ continues to reject all messages rejected by
3172 $\G1$.
3173
3174 Let us say that a class-id~$\Cid = (i, j, s, R)$ is \emph{bad} if
3175 \begin{enumerate}
3176 \item the value $R$ is not the challenge issued by $\Csession(\Cid)$'s
3177 matching session, and
3178 \item the adversary has not issued $\Cid$'s query $\Cquery(\Cid)$,
3179 \emph{but}
3180 \item $\Ccheck(\Cid) \in \Ccheckset(\Cid)$, so one of the check-values
3181 submitted to $S$ was actually correct.
3182 \end{enumerate}
3183 We claim that our extractor will work perfectly unless some class-id is
3184 bad. Certainly, if $R$ was issued by the matching session, there is
3185 nothing to prove; if the adversary has issued the relevant query then the
3186 extractor will recover $\Cindex(\Cid)$ just fine; and if $\Ccheck(\Cid)
3187 \notin \Ccheckset(\Cid)$ then all messages in the class would have been
3188 rejected by $\G1$ anyway.
3189
3190 Let $B(\Cid)$ be the event that the class~$\Cid$ is bad. We claim that
3191 \[ \Pr[B(\Cid)] \le \frac{\Ccount(\Cid)}{2^{\ell_I}}. \]
3192 The present lemma follows, since
3193 \[ \diff{1}{2}
3194 \le \Pr[F]
3195 \le \sum_\Cid \Pr[B(\Cid)]
3196 \le \sum_\Cid \frac{\Ccount(\Cid)}{2^{\ell_I}}
3197 = \frac{1}{2^{\ell_I}} \sum_\Cid \Ccount(\Cid)
3198 \le \frac{q_M}{2^{\ell_I}}
3199 \]
3200 as required.
3201
3202 Now observe that, in $\G2$, sessions don't actually check incoming
3203 challenges in this way any more -- instead we run the extractor. So, to
3204 prove the claim, we consider a class~$\Cid$ where properties~1 and~2 above
3205 hold. The correct hash $\Chash(\Cid)$ is then independent of the rest of
3206 the game, so the probability that $\Ccheck(\Cid) \in \Ccheckset(\Cid)$ is
3207 precisely $\Ccount(\Cid)/2^{\ell_I}$ as required.
3208
3209 This completes the proof the lemma.
3210 \end{proof}
3211
3212 \begin{proof}[Proof of lemma~\ref{lem:sk-g2-g3}]
3213 Let $F$ be the event that the adversary makes a query $H_K(Z_S)$ for some
3214 ripe session~$S$. Since $\G3$ proceeds exactly as $\G2$ did unless $F_2$
3215 occurs, we apply lemma~\ref{lem:shoup}, which tells us that $\diff{2}{3}
3216 \le \Pr[F_2]$. We must therefore bound this probability.
3217
3218 To do this, we consider a new game~$\G3'$, which is the same as $\G3$,
3219 except that, at the start of the game, we choose a random number $k \inr
3220 \Nupto{q_S}$. For $0 \le i < q_S$, let $S_i$ be the $i$th session created
3221 by the adversary. We define $F'$ to be the event that the adversary
3222 queries $H_K(Z_{S_k})$ when $S_k$ is ripe.
3223
3224 The lemma now follows from these two claims.
3225
3226 \begin{claim}
3227 $\Pr[F] \le q_S \Pr[F']$.
3228 \end{claim}
3229 To see this, for any session~$S$, let $F_S$ be the event that the adversary
3230 queries~$H_K(Z_S)$ when $S$ is ripe. Then
3231 \[ \Pr[F] \le \sum_{0\le i<q_S} \Pr[F_{S_i}]. \]
3232 Hence,
3233 \[ \Pr[F'] = \Pr[F_{S_k}] = \sum_{0\le i<q_S} \Pr[F_{S_i}] \Pr[k = i]
3234 = \frac{1}{q_S} \sum_{0\le i<q_S} \Pr[F_{S_i}]
3235 \ge \frac{\Pr[F]}{q_S}
3236 \]
3237 proving the claim.
3238
3239 \begin{claim}
3240 For some $t' = t + O(n) + O(q_S q_M) + O(q_I) + O(q_K)$, we have
3241 $\Pr[F'] \le \InSec{mcdh}(G; t', q_K).$
3242 \end{claim}
3243 To prove this claim, we construct an adversary~$B$ which solves the MCDH
3244 problem in~$G$. The adversary works as follows.
3245 \begin{enumerate}
3246 \item It is given a pair $(R^*, S^*) = (r^* P, s^* P)$ of group elements;
3247 its objective is to make a verification-oracle query $V(Z^*)$ where $Z^*
3248 = r^* s^* P$.
3249 \item It sets up a simulation of the game~$\G3'$, by running the
3250 $\id{init}$ function, and simulating all of the parties. In particular,
3251 it chooses a random~$k \in \Nupto{q_S}$.
3252 \item It sets up accurate simulations of the random oracles $H_K(\cdot)$
3253 and $H_I(\cdot, \cdot, \cdot, \cdot)$, which choose random outputs for
3254 new, fresh inputs. However, whenever $A$ queries $H_K(\cdot)$ on a group
3255 element $Z$, $B$ also queries $V(Z)$.
3256 \item It runs $A$ in its simulated game. It responds to all of $A$'s
3257 instructions faithfully, until the $k$th session-creation.
3258 \item When creating the $k$th session $S = S_k = (P_i, P_j, s)$, $B$ has
3259 party~$P_i$ choose $R^*$ as its challenge, rather than choosing $r_S$ and
3260 setting $R_S = r_S P$. Because it simulates all the parties, $B$ can
3261 compute $Y_S = x_j R$, which is still correct.
3262 \item If $A$ requests the creation of a matching session $T = (P_j, P_i,
3263 s)$ then $B$ has party~$P_j$ choose $S^*$ as its challenge. Again, $B$
3264 computes $Y_T = x_i S^*$.
3265 \item If $A$ ever corrupts the parties $P_i$ or $P_j$, or reveals the
3266 session state of $S$ or $T$ then $B$ stops the simulation abruptly and
3267 halts.
3268 \end{enumerate}
3269 Adversary $B$'s running time is within the bounds required of $t'$, and $B$
3270 makes at most $q_K$ queries to $V(\cdot)$; we therefore have
3271 \[ \Pr[F'] \le \Succ{mcdh}{G}(B) \le \InSec{mcdh}(G; t', q_K) \]
3272 as required.
3273 \end{proof}
3274
3275 \begin{proof}[Proof of lemma~\ref{lem:sk-g3-g4}]
3276 Let $F_4$ be the event under which we abort the game~$\G4$. Clearly, if
3277 $F$ doesn't occur, games $\G3$ and $\G4$ proceed identically, so we can
3278 apply lemma~\ref{lem:shoup} to see that $\diff{3}{4} \le \Pr[F_4]$.
3279 Bounding $\Pr[F_4]$, however, is somewhat complicated. We use a further
3280 sequence of games.
3281
3282 Firstly, let $\G5$ be like $\G4$ with the exception that we choose, at
3283 random, an integer~$k \inr \Nupto{q_S}$. As we did in the proof for
3284 lemma~\ref{lem:sk-g3-g4}, let $S_i$ be the $i$th session created by the
3285 adversary. For each session~$S_i$, let $T_i$ be its matching session, if
3286 there is one. We define $F_5$ to be the event that
3287 \begin{itemize}
3288 \item $S_k$ completes immediately following delivery of a message $\mu
3289 \notin M_{T_k}$, and
3290 \item $S_k$ was ripe at this point.
3291 \end{itemize}
3292 For games~$\G{i}$, for $i > 5$, we define the event $F_i$ to be the event
3293 corresponding to $F_5$ in $\G{i}$. Note that if $S_k$ \emph{is} sent a
3294 message in $M_{T_k}$ then $S_k$ immediately completes.
3295
3296 \begin{claim}
3297 $\Pr[F_4] \le \Pr[F_5]/q_S$.
3298 \end{claim}
3299 This claim is proven exactly as we did for claim~1 of
3300 lemma~\ref{lem:sk-g2-g3}.
3301
3302 Let~$\G6$ be the same as $\G5$ except that we change the encrypted
3303 responses of session~$S_k$ and its matching session~$T_k$. Let $K^* =
3304 (K_0^*, K_1^*) = H_K(Z_S)$. Then, rather than sending $(\cookie{response},
3305 R_S, E_{K_0^*}(Y_T))$, session~$S$ sends $(\cookie{response}, R_S,
3306 E_{K_0^*}(1^{\ell_G}))$.
3307 \begin{claim}
3308 $|\Pr[F_6] - \Pr[F_5]| \le \InSec{ind-cca}(\E; t', q_M, q_M).$
3309 \end{claim}
3310 To prove this claim, we construct an adversary~$B$ which attacks the
3311 IND-CCA security of our encryption scheme $\E$. The adversary~$B$ works as
3312 follows.
3313 \begin{enumerate}
3314 \item It is given no input, but a pair of oracles $E(\cdot, \cdot)$ and
3315 $D(\cdot)$; the former encrypts either the left or right input, according
3316 to a hidden bit, and the latter decrypts ciphertexts other than those
3317 returned by $E(\cdot, \cdot)$. Its goal is to guess the hidden bit.
3318 \item It sets up a simulation of the game~$\G5$, by running the $\id{init}$
3319 function, and simulating all of the parties. In particular, it chooses a
3320 random $k \in \Nupto{q_S}$.
3321 \item It sets up accurate simulations of the random oracles $H_K(\cdot)$
3322 and $H_I(\cdot, \cdot, \cdot, \cdot)$.
3323 \item It runs $A$ in its simulated game. It responds to all of $A$'s
3324 instructions faithfully, except for the matching sessions $S_k$ and
3325 $T_k$. Let $S = S_k = (P_i, P_j, s)$, and $T = T_k = (P_j, P_i, s)$.
3326 \item Suppose $T$ is sent the message $C_S = (\cookie{challenge}, R_S,
3327 c_S)$. Rather than computing $K^* = H_K(r_T R_S)$ and performing the
3328 encryption properly, $B$ queries its left-or-right encryption oracle
3329 $E(\cdot, \cdot)$ on $E(1^{\ell_G}, x_j R_S)$, and sends the resulting
3330 ciphertext~$\chi$ back to~$S$ as part of a message $(\cookie{response},
3331 R_T, \chi)$. The set $M_T$ is precisely the set of messages constructed
3332 in this fashion. (Recall that challenge messages other than $C_S$ aren't
3333 actually delivered to $T$, since we simulate the responses using the
3334 extractor, as of $\G2$.)
3335 \item Suppose $S$ is sent a message $M = (\cookie{response}, R_T, \chi) \in
3336 M_T$. We immediately stop the simulation, and $B$ returns $0$.
3337 \item Suppose, instead, that $S$ is sent some message $M' =
3338 (\cookie{response}, R, \chi) \notin M_T$. There are two cases to
3339 consider. If $R = R_T$ then we must have $\chi$ distinct from the
3340 ciphertexts returned by the $E(\cdot, \cdot)$ oracle, so we can invoke
3341 the decryption oracle $D(\cdot)$ on $\chi$ to obtain a response $Y$.
3342 Alternatively, if $R \ne R_T$, we can compute the key $K = (K_0, K_1) =
3343 H_K(r_S R)$, and recover $Y = D_{K_0}(\chi)$. In either case, if $Y =
3344 r_S X_j)$ then $S$ would complete at this point: $B$ stops the simulation
3345 and returns $1$.
3346 \item If $A$ exposes $S$ (by corrupting $P_i$ or~$P_j$, or revealing $S$ or
3347 $T$) then we stop the simulation and $B$ returns $0$.
3348 \item Finally, if the game stops, either because $A$ halts, or because of
3349 one of the special rules introduced in earlier games, $B$ returns $0$.
3350 \end{enumerate}
3351 It is clear that $B$'s oracle queries are acceptable, since $|x_j R_S| =
3352 \ell_G$ by definition, and $B$ never queries $D(\cdot)$ on a ciphertext
3353 returned by its encryption oracle. By the rules of~$\G3$, we know that the
3354 game stops immediately if $A$ ever queries $Z_S$, so the key $K^*$ is
3355 independent of everything in $A$'s view except the ciphertexts $\chi$
3356 output by $S$ and $T$. Therefore, if the hidden bit of the IND-CCA game is
3357 $1$, $B$ accurately simulates $\G5$, whereas if the bit is $0$ then $B$
3358 accurately simulates $\G6$. We issue no more that $q_M$ encryption or
3359 decryption queries. Finally, $B$'s running time is within the bounds
3360 allowed for $t'$. Therefore,
3361 \[ \Adv{ind-cca}{\E}(B) = \Pr[F_5] - \Pr[F_6]
3362 \le \InSec{ind-cca}(\E; t', q_M, q_M). \]
3363 We construct the adversary~$\bar{B}$ which is the same as $B$ above, except
3364 that $\bar{B}$ returns $0$ whenever $B$ returns~$1$, and \emph{vice versa}.
3365 Clearly
3366 \[ \Adv{ind-cca}{\E}(\bar{B})
3367 = (1 - \Pr[F_5]) - (1 - \Pr[F_6])
3368 = \Pr[F_6] - \Pr[F_5]
3369 \le \InSec{ind-cca}(\E; t', q_M, q_M).
3370 \]
3371 This proves the claim.
3372
3373 Let $\G7$ be the same as $\G6$, except that at the start of the game we
3374 choose a random $m \in \Nupto{n}$, and when the adversary creates the
3375 session $S = S_k = (P_i, P_j, s)$, we abort the game unless $j = m$.
3376 Clearly we have $\Pr[F_6] = n \Pr[F_7]$.
3377
3378 Finally, we can explicitly bound $F_6$. In $\G6$, the adversary's view is
3379 independent of the correct response $Y_S = r_S X_S = x_j R_S$ to $S$'s
3380 challenge. Therefore, if $A$ manages to send any message $\mu \notin M_T$
3381 which causes $S$ to complete, then it has impersonated~$P_j$.
3382 \begin{claim}
3383 $\Pr[F_7] \le \InSec{mcdh}(G; t', q_M + q_I)$.
3384 \end{claim}
3385 The present lemma follows from this and the previous claims.
3386
3387 To prove the claim formally, we construct an adversary~$B'$, which behaves
3388 as follows.
3389 \begin{enumerate}
3390 \item It is given as input a public key $X^*$ and a single challenge $(R^*,
3391 c^*)$, a random oracle $H^*_I(\cdot, \cdot)$, and an oracle $V(\cdot,
3392 \cdot)$, which verifies responses $(R, Y)$. Its goal is to invoke
3393 $V(\cdot, \cdot)$ with a correct response to the challenge.
3394 \item It chooses a random $k \in \Nupto{q_S}$ and $m \in \Nupto{n}$. It
3395 sets up a simulation of the game~$\G7$, by running the $\id{init}$
3396 function, and simulating all of the parties, except that it gives party
3397 $P_m$ the public key $X^*$. This makes no difference, since $P_m$
3398 doesn't actually need to give any `honest' responses because of the
3399 change we made in $\G6$.
3400 \item It sets up accurate simulations of the random oracles $H_K(\cdot)$
3401 and $H_I(\cdot, \cdot, \cdot, \cdot)$, with one exception -- see below.
3402 \item It runs $A$ in its simulated game. It responds to all of $A$'s
3403 instructions faithfully, except for the session $S_k$. Let $S = S_k =
3404 (P_i, P_j, s)$, and let $T = T_k = (P_j, P_i, s)$ be its matching
3405 session.
3406 \item When session~$S$ is created, $B'$ checks that $j = m$, and if not
3407 stops the simulation and halts. Otherwise, $B'$ invokes its oracle~$C()$
3408 to obtain a pair $(R, c)$. Session~$S$ sends $C_S = (\cookie{challenge},
3409 R, c)$ as its challenge to~$T$.
3410 \item When $A$ makes a query $H_I(X^*, s, R, Y)$, $B$ answers it by
3411 querying its \emph{own} random oracle $H^*_I(R, Y)$.
3412 \item When $S$ receives a message $(\cookie{response}, R, \chi)$, we
3413 compute $(K_0, K_1) = r_S R$, and $Y = D_{K_0}(\chi)$. If $Y \ne \bot$
3414 then $B'$ calls $V(R, Y)$.
3415 \item If $A$ reveals $S$ or corrupts $P_i$ or $P_j$ then $B'$ stops the
3416 simulation immediately and halts.
3417 \end{enumerate}
3418 The running time of $B'$ is within the bounds required of $t'$; it makes at
3419 most $q_I$ random-oracle and at most $q_M$ verification queries. Clearly
3420 $B'$ succeeds whenever $F_7$ occurs. The claim follows from
3421 theorem~\ref{thm:wident-sound}.
3422 \end{proof}
3423
3424
3425 \subsection{Proof of theorem~\ref{thm:sk2}}
3426 \label{sec:sk2-proof}
3427
3428 The proof is almost identical to the proof of theorem~\ref{thm:sk}, in
3429 appendix~\ref{sec:sk-proof}. Unfortunately a black-box reduction doesn't
3430 seem possible.
3431
3432 We use the games and notation of section~\ref{sec:sk-proof}.
3433
3434 The change to the check-value calculation doesn't affect key-generation at
3435 all, so the transition to $\G1$ goes through as before.
3436
3437 The transition from $\G1$ to $\G2$ -- answering challenges using the
3438 extractor -- needs a little care. Let $S = (P_i, P_j, s)$ be a session, and
3439 consider an incoming message $(\cookie{challenge}, R, c)$.
3440 \begin{itemize}
3441 \item If $T = (P_j, P_i, s)$ is the matching session to $S$, and $R = R_T$ is
3442 the public challenge value of $T$, and $c = r_T \xor H_I(R_S, X_j, s, R_T,
3443 r_T X_i)$ is the check-value output by $T$ when it received
3444 $(\cookie{pre-challenge}, R_S)$ as input, then $S$ replies as it did in
3445 $\G1$.
3446 \item If the challenge message is any other message, then we use the
3447 extractor.
3448 \end{itemize}
3449 As in lemma~\ref{lem:sk-g1-g2}, we examine which sessions could have queried
3450 $H_I(R_S, X_j, s, R, x_i R)$, and for the same reasons conclude that only the
3451 matching session would have done this, and only in response to the
3452 pre-challenge $R_S$. It follows that $\diff{1}{2} \le q_M/2^{\ell_I}$ as
3453 before.
3454
3455 The remaining game steps go through unchanged. In particular, we conclude
3456 that a ripe session will only complete if the adversary has transmitted
3457 messages from its matching session correctly, and the session key is
3458 independent of the adversary's view. The theorem follows.
3459
3460
3461 \subsection{Sketch proof of single-key protocol for secure channels}
3462 \label{sec:sc-proof}
3463
3464 We want to show that the Wrestlers Key-Exchange protocol, followed by use of
3465 the encryption scheme $\E$, with the \emph{same} key $K = K_0$, still
3466 provides secure channels.
3467
3468 \subsubsection{Secure channels definition}
3469 We (very briefly!) recall the \cite{Canetti:2001:AKE} definition of a secure
3470 channels protocol. We again play a game with the adversary. At the
3471 beginning, we choose a bit $b^* \inr \{0, 1\}$ at random. We allow the
3472 adversary the ability to establish \emph{secure channels} sessions within the
3473 parties. Furthermore, for any running session $S = (P_i, P_j, s)$, we allow
3474 the adversary to request $S$ to send a message~$\mu$ through its secure
3475 channel. Finally, the adversary is allowed to choose one ripe
3476 \emph{challenge} session, and ask for it to send of one of a \emph{pair} of
3477 messages $(\mu_0, \mu_1)$, subject to the restriction that $|\mu_0| =
3478 |\mu_1|$; the session sends message $\mu_{b^*}$. The adversary may not
3479 expose the challenge session.
3480
3481 The adversary wins if (a)~it can guess the bit $b^*$, or (b)~it can cause a
3482 ripe session $S$ (i.e., an unexposed, running session), with a matching
3483 session~$T$ to output a message other than one that it requested that $T$
3484 send.
3485
3486 \subsubsection{Protocol definition}
3487 The protocol begins with Wrestlers key-exchange. The encryption in the
3488 key-exchange protocol is performed as $E_K(\cookie{kx}, \cdot)$; encryption
3489 for secure channels is performed as $E_K(\cookie{sc}, i, o, \cdot)$, where
3490 $i$ is a sequence number to prevent replays and $o \in \{S, T\}$ identifies
3491 the sender.
3492
3493 \subsubsection{Proof sketch}
3494 We use the games and notation of appendix~\ref{sec:sk-proof}.
3495
3496 The proof goes through as far as the step between $\G5$ and $\G6$ in the
3497 proof of lemma~\ref{lem:sk-g3-g4}. Here we make the obvious adjustments to
3498 our adversary against the IND-CCA security of $\E$. (Our adversary will need
3499 to use the left-or-right oracle for messages sent using the secure channel
3500 built on $K^*$. That's OK.)
3501
3502 In $\G4$, we know that ripe sessions agree the correct key, and the adversary
3503 never queries the random oracle, so the key is independent of the adversary's
3504 view.
3505
3506 We define a new game $\G8$, which is like $\G4$, except that we stop the game
3507 if the adversary ever forges a message sent over the secure channel. That
3508 is, if a ripe session $S$ ever announces receipt of a message not sent (at
3509 the adversary's request) by its matching session $T$. Let $F_8$ be the event
3510 that a forgery occurs. We apply lemma~\ref{lem:shoup}, which tells us that
3511 $\diff{4}{8} \le \Pr[F_8]$. To bound $F_8$, we isolate a session at random
3512 (as in lemmata \ref{lem:sk-g2-g3} and~\ref{lem:sk-g3-g4}), which tells us
3513 that
3514 \begin{equation}
3515 \label{eq:sc-g4-g8}
3516 \diff{4}{8} \le q_S \cdot \InSec{int-ptxt}(\E; t', q_M, q_M)
3517 \end{equation}
3518 Finally, we can bound the adversary's advantage at guessing the hidden bit
3519 $b^*$. We isolate (we hope) the challenge session $S$ by choosing a target
3520 session at random, as before. Let $K_* = H_K(Z_S)$ be the key agreed by the
3521 session (if it becomes ripe). We define an adversary $B$ against the IND-CCA
3522 security of $\E$. The adversary $B$ simulates the game. If the adversary
3523 exposes the target session, or doesn't choose it as the challenge session,
3524 $B$ fails (and exits 0); otherwise it uses the left-or-right encryption
3525 oracle to encrypt both of the adversary's message choices, and outputs the
3526 adversary's choice. Let $b$ be the adversary's output, and let $\epsilon$ be
3527 the advantage of our IND-CCA distinguisher. Then
3528 \begin{eqnarray*}[rl]
3529 \Pr[b = b^*]
3530 & = \Pr[b = b^* \wedge b^* = 1] + \Pr[b = b^* \wedge b^* = 0] \\
3531 & = \frac{1}{2}\bigl( \Pr[b = b^* \mid b^* = 1] +
3532 \Pr[b = b^* \mid b^* = 0] \bigr) \\
3533 & = \frac{1}{2}\bigl( \Pr[b = b^* \mid b^* = 1] +
3534 (1 - \Pr[b \ne b^* \mid b^* = 0]) \bigr) \\
3535 & = \frac{1}{2}\bigl( \Pr[b = 1 \mid b^* = 1] -
3536 \Pr[b = 1 \mid b^* = 0] + 1 \bigr) \\
3537 & = \frac{1}{2}\bigl(1 + q_S\,\Adv{ind-cca}{\E}(B) \bigr) \\
3538 & \le \frac{1}{2} \bigl( 1 + q_S\,\InSec{ind-cca}(\E; t', q_M, q_M) \bigr).
3539 \eqnumber
3540 \end{eqnarray*}
3541
3542 \end{document}
3543
3544 %%% Local Variables:
3545 %%% mode: latex
3546 %%% TeX-master: t
3547 %%% End: