wrestlers.tex: Hack the fancy preamble configuration so that it works.
[doc/wrestlers] / wrestlers.tex
CommitLineData
a6e375a6
MW
1%%% -*-latex-*-
2%%%
3%%% The Wrestlers Protocol: secure, deniable key-exchange
4%%%
5%%% (c) 2006 Mark Wooding
6%%%
7
b12a993f
MW
8\makeatletter
9\def\@doit#1{#1}
10\ifx\iffancystyle\xxundefined\expandafter\@doit\else\expandafter\@gobble\fi
11{\newif\iffancystyle\fancystyletrue}
12\ifx\ifshort\xxundefined\expandafter\@doit\else\expandafter\@gobble\fi
13{\newif\ifshort\shortfalse}
14\iffancystyle\expandafter\@gobble\else\expandafter\@doit\fi{\newif\ifpdfing}
15\makeatother
32609df3
MW
16
17\typeout{Options:}
18\typeout{ Fancy style: \iffancystyle ON\else OFF\fi}
19\typeout{ Short version: \ifshort ON\else OFF\fi}
20
a6e375a6
MW
21\errorcontextlines=\maxdimen
22\showboxdepth=\maxdimen
23\showboxbreadth=\maxdimen
24
25\iffancystyle
32609df3
MW
26 \documentclass{strayman}
27 \parskip=0pt plus 1pt \parindent=1.2em
a6e375a6
MW
28 \usepackage[T1]{fontenc}
29 \usepackage[palatino, helvetica, courier, maths=cmr]{mdwfonts}
30 \usepackage[within = subsection, mdwmargin]{mdwthm}
31 \usepackage{mdwlist}
32 \usepackage{sverb}
32609df3 33 \ifpdfing\else
d8139f2c
MW
34 \PassOptionsToPackage{dvips}{xy}
35 \fi
a6e375a6 36\else
32609df3
MW
37 \PassOptionsToClass{runningheads}{llncs}
38 \documentclass{llncs}
a6e375a6
MW
39\fi
40
41\PassOptionsToPackage{show}{slowbox}
42%\PassOptionsToPackage{hide}{slowbox}
32609df3 43\usepackage{mdwtab, mdwmath, crypto}
a6e375a6
MW
44\usepackage{slowbox}
45\usepackage{amssymb, amstext}
46\usepackage{url, multicol}
47\usepackage{tabularx}
48\DeclareUrlCommand\email{\urlstyle{tt}}
49\ifslowboxshow
50 \usepackage[all]{xy}
51 \turnradius{4pt}
52\fi
32609df3 53\usepackage{mathenv}
a6e375a6 54
32609df3 55\newcommand{\Nupto}[1]{\{0, 1, \ldots, #1 - 1\}}
a6e375a6 56\iffancystyle
32609df3 57 \let\next\title
a6e375a6 58\else
32609df3 59 \def\next[#1]{\titlerunning{#1}\title}
a6e375a6
MW
60\fi
61\next
32609df3
MW
62 [The Wrestlers Protocol]
63 {The Wrestlers Protocol%
64 \ifshort\thanks{This is an extended abstract; the full version
65 \cite{Wooding:2006:WP} is available from
66 \texttt{http://eprint.iacr.org/2006/386/}.}\fi \\
a6e375a6
MW
67 A simple, practical, secure, deniable protocol for key-exchange}
68\iffancystyle
69 \author{Mark Wooding \\ \email{mdw@distorted.org.uk}}
70\else
71 \author{Mark Wooding}
72 \institute{\email{mdw@distorted.org.uk}}
73\fi
74
75\iffancystyle
76 \bibliographystyle{mdwalpha}
77 \let\epsilon\varepsilon
78 \let\emptyset\varnothing
79 \let\le\leqslant\let\leq\le
80 \let\ge\geqslant\let\geq\ge
81 \numberwithin{table}{section}
82 \numberwithin{figure}{section}
83 \numberwithin{equation}{subsection}
84 \let\random\$
85\else
32609df3 86 \bibliographystyle{splncs}
a6e375a6
MW
87 \expandafter\let\csname claim*\endcsname\claim
88 \expandafter\let\csname endclaim*\endcsname\endclaim
89\fi
90
a6e375a6
MW
91\let\Bin\Sigma
92\let\emptystring\lambda
93\edef\Pr{\expandafter\noexpand\Pr\nolimits}
94\newcommand{\bitsto}{\mathbin{..}}
95\newcommand{\E}{{\mathcal{E}}}
96\newcommand{\M}{{\mathcal{M}}}
97\iffancystyle
98 \def\description{%
99 \basedescript{%
100 \let\makelabel\textit%
101 \desclabelstyle\multilinelabel%
102 \desclabelwidth{1in}%
103 }%
104 }
105\fi
106\def\fixme#1{\marginpar{FIXME}[FIXME: #1]}
107\def\hex#1{\texttt{#1}_{x}}
108
32609df3
MW
109\newenvironment{longproof}[1]{%
110 \ifshort#1\expandafter\ignore
111 \else\proof\fi
112}{%
113 \ifshort\else\endproof\fi
114}
115
a6e375a6
MW
116\def\dbox#1{%
117 \vtop{%
118 \def\\{\unskip\egroup\hbox\bgroup\strut\ignorespaces}%
119 \hbox{\strut#1}%
120 }%
121}
122
123\def\Wident{\Xid{W}{ident}}
124\def\Wkx{\Xid{W}{kx}}
125\def\Wdkx{\Xid{W}{dkx}}
126\def\Func#1#2{\mathcal{F}[#1\to#2]}
127\def\diff#1#2{\Delta_{#1, #2}}
128\def\Hid{H_{\textit{ID}}}
129
130%% protocol run diagrams
131\def\PRaction#1{#1\ar[r]}
132\def\PRcreatex#1{\PRaction{\textsf{Create session\space}#1}}
133\def\PRcreate#1#2#3{\PRcreatex{(\text{#1},\text{#2},#3)}}
134\def\PRreceivex#1{\PRaction{\textsf{Receive\space}#1}}
135\def\PRreceive#1#2{\PRreceivex{\msg{#1}{#2}}}
136\def\PRsession#1{\relax\mathstrut#1\ar[r]}
137\def\msg#1#2{(\cookie{#1},#2)}
138\def\PRresult#1{#1}
139\def\PRsendx#1{\PRresult{\textsf{Send\space}#1}}
140\def\PRsend#1#2{\PRsendx{\msg{#1}{#2}}}
141\def\PRcomplete#1{\textsf{Complete:\space}#1}
142\def\PRstate#1{\textsf{State:\space}#1}
143\def\PRignore{\textsf{(ignored)}}
144\def\PRreveal{\textsf{Session-state reveal}\ar[r]}
145\def\protocolrun#1{\[\xymatrix @R=0pt @C=2em {#1}\]}
146
32609df3
MW
147\def\protocol{%
148 \unskip\bigskip
149 \begin{tabular*}{\linewidth}%
150 {@{\qquad}l@{\extracolsep{0ptplus1fil}}r@{\qquad}}}
151\def\endprotocol{\end{tabular*}}
152\def\send#1#2{\noalign{%
153 \centerline{\xy\ar @{#1}|*+{\mathstrut#2}<.5\linewidth, 0pt>\endxy}}}
154
a6e375a6
MW
155%% class-ids for proof of extractor lemma
156\let\Cid=\Lambda
157\let\Csession=S
158\let\Cindex=r
159\let\Cquery=Q
160\let\Chash=H
161\let\Ccheck=c
162\let\Ccheckset=V
163\let\Ccount=\nu
164
dff0fad2
MW
165\def\HG#1{\mathbf{H}_{#1}}
166
32609df3
MW
167\iffancystyle\else
168 \let\xsssec\subsubsection\def\subsubsection#1{\xsssec[#1]{#1.}}
169\fi
170
a6e375a6
MW
171\begin{document}
172
173%%%--------------------------------------------------------------------------
174
175\maketitle
d8139f2c 176\iffancystyle \thispagestyle{empty} \fi
a6e375a6
MW
177
178\begin{abstract}
179 We describe and prove (in the random-oracle model) the security of a simple
180 but efficient zero-knowledge identification scheme, whose security is based
181 on the computational Diffie-Hellman problem. Unlike other recent proposals
182 for efficient identification protocols, we don't need any additional
183 assumptions, such as the Knowledge of Exponent assumption.
184
185 From this beginning, we build a simple key-exchange protocol, and prove
186 that it achieves `SK-security' -- and hence security in Canetti's Universal
187 Composability framework.
188
189 Finally, we show how to turn the simple key-exchange protocol into a
190 slightly more complex one which provides a number of valuable `real-life'
191 properties, without damaging its security.
192\end{abstract}
193
194\iffancystyle
195\newpage
d8139f2c 196\thispagestyle{empty}
a6e375a6
MW
197\columnsep=2em \columnseprule=0pt
198\tableofcontents[\begin{multicols}{2}\raggedright][\end{multicols}]
199%%\listoffigures[\begin{multicols}{2}\raggedright][\end{multicols}]
200%% \listoftables[\begin{multicols}{2}\raggedright][\end{multicols}]
201\newpage
202\fi
203
204%%%--------------------------------------------------------------------------
205
206\section{Introduction}
207
dff0fad2
MW
208This paper proposes protocols for \emph{identification} and
209\emph{authenticated key-exchange}.
a6e375a6 210
dff0fad2
MW
211An identification protocol allows one party, say Bob, to be sure that he's
212really talking to another party, say Alice. It assumes that Bob has some way
213of recognising Alice; for instance, he might know her public key. Our
214protocol requires only two messages -- a challenge and a response -- and has
215a number of useful properties. It is very similar to, though designed
32609df3
MW
216independently of, a recent protocol by Stinson and Wu
217\cite{Stinson:2006:EST}; we discuss their protocol and compare it to ours in
218\ifshort the full version of this paper. \else
219section~\ref{sec:stinson-ident}. \fi
a6e375a6 220
dff0fad2
MW
221Identification protocols are typically less useful than they sound. As Shoup
222\cite{Shoup:1999:OFM} points out, it provides a `secure ping', by which Bob
223can know that Alice is `there', but provides no guarantee that any other
224communication was sent to or reached her. However, there are situations
225where this an authentic channel between two entities -- e.g., a device and a
226smartcard -- where a simple identification protocol can still be useful.
a6e375a6 227
dff0fad2
MW
228An authenticated key-exchange protocol lets Alice and Bob agree on a shared
229secret, known to them alone, even if there is an enemy who can read and
230intercept all of their messages, and substitute messages of her own. Once
231they have agreed on their shared secret, of course, they can use standard
232symmetric cryptography techniques to ensure the privacy and authenticity of
233their messages.
a6e375a6
MW
234
235
236\subsection{Desirable properties of our protocols}
237
238Our identification protocol has a number of desirable properties.
239\begin{itemize}
240\item It is \emph{simple} to understand and implement. In particular, it
241 requires only two messages.
242\item It is fairly \emph{efficient}, requiring two scalar multiplications by
243 each of the prover and verifier.
244\item It is provably \emph{secure} (in the random oracle model), assuming the
245 intractability of the computational Diffie-Hellman problem.
246\end{itemize}
247
dff0fad2 248Our key-exchange protocol also has a number of desirable
d8139f2c 249properties.
a6e375a6
MW
250\begin{itemize}
251\item It is fairly \emph{simple} to understand and implement, though there
252 are a few subtleties. In particular, it is \emph{symmetrical}. We have
253 implemented a virtual private network system based on this protocol.
254\item It is \emph{efficient}, requiring four scalar multiplications by each
255 participant. The communication can be reduced to three messages by
256 breaking the protocol's symmetry.
257\item It is provably \emph{secure} (again, in the random oracle model),
258 assuming the intractability of the computational Diffie-Hellman problem,
259 and the security of a symmetric encryption scheme.
260\item It provides \emph{perfect forward secrecy}. That is, even if a user's
261 long-term secrets are compromised, past sessions remain secure.
262\item It is \emph{deniable}. It is possible to construct simulated
263 transcripts of protocol executions between any number of parties without
264 knowing any of their private keys. The simulated transcripts are (almost)
265 indistinguishable from real protocol transcripts. Hence, a transcript
266 does not provide useful evidence that a given party was really involved in
267 a given protocol execution.
268\end{itemize}
269
32609df3 270\ifshort\else
dff0fad2 271\subsection{Asymptotic and concrete security results}
a6e375a6 272
dff0fad2
MW
273Most security definitions for identification (particularly zero-knowledge)
274and key-exchange in the literature are \emph{asymptotic}. That is, they
275consider a family of related protocols, indexed by a \emph{security
276parameter}~$k$; they that any \emph{polynomially-bounded} adversary has only
277\emph{negligible} advantage. A polynomially-bounded adversary is one whose
278running time is a bounded by some polynomial~$t(k)$. The security definition
279requires that, for any such polynomially-bounded adversary, and any
280polynomial $p(k)$, the adversary's advantage is less than $p(k)$ for all
281sufficiently large values of $k$.
282
283Such asymptotic notions are theoretically interesting, and have obvious
284connections to complexity theory. Unfortunately, such an asymptotic result
285tells us nothing at all about the security of a particular instance of a
286protocol, or what parameter sizes one needs to choose for a given level of
287security against a particular kind of adversary. Koblitz and Menezes
288\cite{Koblitz:2006:ALP} (among other useful observations) give examples of
289protocols, proven to meet asymptotic notions of security, whose security
290proofs guarantee nothing at all for the kinds of parameters typically used in
291practice.
292
293Since, above all, we're interested in analysing a practical and implemented
294protocol, we follow here the `practice-oriented provable security' approach
295largely inspired by Bellare and Rogaway, and exemplified by
296\cite{Bellare:1994:SCB,Bellare:1995:XMN,Bellare:1995:OAE,Bellare:1996:ESD,%
297Bellare:1996:KHF,Bellare:2000:CST}; see also \cite{Bellare:1999:POP}.
298Rather than attempting to say, formally, whether or not a protocol is
299`secure', we associate with each protocol an `insecurity function' which
300gives an upper bound on the advantage of any adversary attacking the protocol
301within given resource bounds.
32609df3 302\fi
a6e375a6 303
dff0fad2 304\subsection{Formal models for key-exchange}
a6e375a6 305
32609df3
MW
306\ifshort
307
308The first model for studying the \emph{computational} security of
309key-exchange protocols (rather than using protocol-analysis logics like that
310of \cite{Burrows:1989:LA}) was given by Bellare and Rogaway
311\cite{Bellare:1994:EAK}; the model has since been enhanced, both by the
312original authors and others, in \cite{Bellare:1995:PSS,%
313Blake-Wilson:1997:KAP,Blake-Wilson:1998:EAA}. The model defines security
314in terms of a game: key-exchange protocols are secure if an adversary can't
315distinguish the key agreed by a chosen `challenge session' from a key chosen
316independently at random. Other models for key-exchange have been proposed in
317\cite{Bellare:1998:MAD} and \cite{Shoup:1999:OFM}; these use a different
318notion of security, involving implementation of an ideal functionality.
319
320\else
321
dff0fad2
MW
322Many proposed key-exchange protocols have turned out to have subtle security
323flaws. The idea of using formal methods to analyse key-exchange protocols
324begins with the logic of Burrows, Abadi and Needham \cite{Burrows:1989:LA}.
325Their approach requires a `formalising' step, in which one expresses in the
326logic the contents of the message flows, and the \emph{beliefs} of the
327participants.
328
329Bellare and Rogaway \cite{Bellare:1994:EAK} describe a model for studying the
330computational security of authentication and key-exchange protocols in a
331concurrent setting, i.e., where multiple parties are running several
332instances of a protocol simultaneously. They define a notion of security in
333this setting, and show that several simple protocols achieve this notion.
334Their original paper dealt with pairs of parties using symmetric
335cryptography; they extended their definitions in \cite{Bellare:1995:PSS} to
336study three-party protocols involving a trusted key-distribution centre.
337
338Blake-Wilson, Johnson and Menezes \cite{Blake-Wilson:1997:KAP} applied the
339model of \cite{Bellare:1994:EAK} to key-exchange protocols using asymmetric
340cryptography, and Blake-Wilson and Menezes \cite{Blake-Wilson:1998:EAA}
341applied it to protocols based on the Diffie-Hellman protocol.
342
343The security notion of \cite{Bellare:1994:EAK} is based on a \emph{game}, in
344which an adversary nominates a \emph{challenge session}, and is given either
345the key agreed by the participants of the challenge session, or a random
346value independently sampled from an appropriate distribution. The
347adversary's advantage -- and hence the insecurity of the protocol -- is
348measured by its success probability in guessing whether the value it was
349given is really the challenge key. This challenge-session notion was also
350used by the subsequent papers described above.
351
352Bellare, Canetti and Krawczyk \cite{Bellare:1998:MAD} described a pair of
353models which they called the \textsc{am} (for `authenticated links model')
354and \textsc{um} (`unauthenticated links model'). They propose a modular
355approach to the design of key-exchange protocols, whereby one first designs a
356protocol and proves its security in the \textsc{am}, and then applies a
357authenticating `compiler' to the protocol which they prove yields a protocol
358secure in the realistic \textsc{um}. Their security notion is new. They
359define an `ideal model', in which an adversary is limited to assigning
360sessions fresh, random and unknown keys, or matching up one session with
361another, so that both have the same key. They define a protocol to be secure
362if, for any adversary~$A$ in the \textsc{am} or \textsc{um}, there is an
363ideal adversary~$I$, such that the outputs of $A$ and $I$ are computationally
364indistinguishable.
365
366In \cite{Shoup:1999:OFM}, Shoup presents a new model for key-exchange, also
367based on the idea of simulation. He analyses the previous models,
368particularly \cite{Bellare:1994:EAK} and \cite{Bellare:1998:MAD}, and
369highlights some of their inadequacies.
370
32609df3
MW
371\fi
372
dff0fad2 373Canetti and Krawczyk \cite{Canetti:2001:AKE} describe a new notion of
32609df3 374security in the model of \cite{Bellare:1998:MAD}, based on the
dff0fad2
MW
375challenge-session notion of \cite{Bellare:1994:EAK}. The security notion,
376called `SK-security', seems weaker in various ways than those of earlier
377works such as \cite{Bellare:1994:EAK} or \cite{Shoup:1999:OFM}. However, the
378authors show that their notion suffices for constructing `secure channel'
379protocols, which they also define.
380
32609df3 381\ifshort\else
d8139f2c 382In \cite{Canetti:2001:UCS}, Canetti describes the `universal composition'
dff0fad2
MW
383framework. Here, security notions are simulation-based: one defines security
384notions by presenting an `ideal functionality'. A protocol securely
385implements a particular functionality if, for any adversary interacting with
386parties who use the protocol, there is an adversary which interacts with
387parties using the ideal functionality such that no `environment' can
388distinguish the two. The environment is allowed to interact freely with the
389adversary throughout, differentiating this approach from that of
390\cite{Bellare:1998:MAD} and \cite{Shoup:1999:OFM}, where the distinguisher
391was given only transcripts of the adversary's interaction with the parties.
392With security defined in this way, it's possible to prove a `universal
393composition theorem': one can construct a protocol, based upon various ideal
394functionalities, and then `plug in' secure implementations of the ideal
395functionalities and appeal to the theorem to prove the security of the entire
396protocol. The UC framework gives rise to very strong notions of security,
397due to the interactive nature of the `environment' distinguisher.
32609df3 398\fi
dff0fad2
MW
399
400Canetti and Krawczyk \cite{Canetti:2002:UCN} show that the SK-security notion
401of \cite{Canetti:2001:AKE} is \emph{equivalent} to a `relaxed' notion of
32609df3
MW
402key-exchange security in the UC framework\ifshort\space of
403\cite{Canetti:2001:UCS}\fi, and suffices for the construction of UC secure
404channels.
dff0fad2
MW
405
406The result of \cite{Canetti:2002:UCN} gives us confidence that SK-security is
407the `right' notion of security for key-exchange protocols. Accordingly,
408SK-security is the standard against which we analyse our key-exchange
409protocol.
a6e375a6
MW
410
411
412\subsection{Outline of the paper}
413
414The remaining sections of this paper are as follows.
415\begin{itemize}
416\item Section \ref{sec:prelim} provides the essential groundwork for the rest
417 of the paper. It introduces important notation, and describes security
418 notions and intractability assumptions.
419\item Section \ref{sec:zk-ident} describes our zero-knowledge identification
420 protocol and proves its security.
421\item Section \ref{sec:kx} describes the simple version of our key-exchange
422 protocol, and proves its security and deniability. It also describes some
423 minor modifications which bring practical benefits without damaging
32609df3 424 security.
dff0fad2 425\item Finally, section \ref{sec:conc} presents our conclusions.
a6e375a6
MW
426\end{itemize}
427
32609df3
MW
428\ifshort
429The full version of this paper describes how to make our protocols
430identity-based by using bilinear pairings using the techniques introduced in
431\cite{Boneh:2003:IBE}. It also contains proofs of the various theorems
432stated here.
433\fi
434
a6e375a6
MW
435%%%--------------------------------------------------------------------------
436
437\section{Preliminaries}
438\label{sec:prelim}
439
32609df3
MW
440\ifshort
441\subsection{Basics}
442\let\prelimsec\subsubsection
443\else
444\let\prelimsec\subsection
445\fi
446
447\prelimsec{Miscellaneous notation}
a6e375a6
MW
448
449We write $\Func{D}{R}$ for the set of all functions with domain $D$ and range
450$R$.
a6e375a6 451
32609df3 452\prelimsec{Groups}
a6e375a6
MW
453
454Let $(G, +)$ be a cyclic group\footnote{
455 We find that additive group notation is easier to read. In particular, in
456 multiplicative groups, one ends up with many interesting things tucked away
457 in little superscripts.}%
458of prime order $q$, and generated by an element $P$. We shall write the
459identity of $G$ as $0_G$, or simply as $0$ when no ambiguity is likely to
460arise. Thus, we have $\langle P \rangle = G$ and $q P = 0$. Any $X \in G$
32609df3 461can be written as $X = x P$ for some $x \in \{0, 1, \ldots, q - 1\}$.
a6e375a6 462
32609df3
MW
463We consider a cyclic group of order $n$ as a $\Z/n\Z$-module, and in
464particular our group $G$ can be seen as a vector space over $\gf{q}$. This
465makes the notation slightly more convenient.
a6e375a6 466
32609df3 467\prelimsec{Bit strings and encodings}
a6e375a6
MW
468\label{sec:bitenc}
469
470Let $\Bin = \{0, 1\}$ be the set of binary digits. Then $\Bin^n$ is the set
471of $n$-bit strings, and $\Bin^*$ the set of all (finite) bit strings. If $x
472\in \Bin^n$ is a bit string, we write its length as $|x| = n$. For a bit
473string $x \in \Bin^n$, and for $0 \le i < n$, we write $x[i]$ as the $i$th
474bit of $x$. The empty string is denoted $\emptystring$.
475
476Let $x$ and $y$ be two bit strings. If $|x| = |y| = n$, we write $x \xor y$
32609df3
MW
477to mean the bitwise exclusive-or of $x$ and $y$\ifshort\else: if $z = x \xor
478y$ then $|z| = n$, and $z[i] = (x[i] + y[i]) \bmod 2$ for $0 \le i < n$\fi.
479We write $x \cat y$ to mean the concatenation of $x$ and $y$\ifshort\else: if
480$z = x \cat y$ then $|z| = |x| + |y|$ and $z[i] = x[i]$ if $0 \le i < |x|$
481and $z[i] = y[i - |x|]$ if $|x| < i \le |x| + |y|$\fi.
a6e375a6
MW
482
483Finally, we let $\bot$ be a value distinct from any bit string.
484
485We shall want to encode group elements $X \in G$ and indices $x \in I =
32609df3
MW
486\gf{q}$ as bit strings.
487\ifshort
488To this end, we shall assume the existence of efficient, unambiguous
489encodings of group elements as $\ell_G$-bit strings, and indices as
490$\ell_I$-bit strings. To reduce clutter, we shall leave encoding and
491decoding as implicit operations.
492\else
493To this end, we shall assume the existence of
a6e375a6
MW
494integers $\ell_G, \ell_I > 0$ and functions
495\[
496 e_S\colon S \to \Bin^{\ell_S}
497 \quad \textrm{and} \quad
498 d_S\colon \Bin^{\ell_S} \to S \cup \{ \bot \}
499 \qquad
32609df3 500 \textrm{for } S \in \{ G, \F \}.
a6e375a6
MW
501\]
502with the following properties.
503\begin{itemize}
504\item The functions are \emph{unique} and \emph{unambiguous}, i.e., for any
505 $t \in \Bin^{\ell_S}$, we have
506 \[ d_S(t) = \begin{cases}
507 s & if there is some $s \in S$ such that $t = e_S(s)$, or \\
508 \bot & if no such $s$ exists.
509 \end{cases}
510 \]
511\item The functions should be \emph{efficient} to compute. Indeed, we shall
512 be assuming that the time taken for encoding and decoding is essentially
513 trivial.
514\end{itemize}
515Note that, as we have defined them, all encodings of group elements are the
516same length, and similarly for encodings of indices. This is necessary for
517the security of our protocols.
518
519We shall frequently abuse notation by omitting the encoding and decoding
520functions where it is obvious that they are required.
32609df3 521\fi
a6e375a6 522
32609df3
MW
523\ifshort\else
524\prelimsec{Games, adversaries, and oracles}
a6e375a6
MW
525\label{sec:games}
526
527Many of the security definitions and results given here make use of
528\emph{games}, played with an \emph{adversary}. An adversary is a
529probabilistic algorithm. In some games, the adversary is additionally
530equipped with \emph{oracles}, which perform computations with values chosen
531by the adversary and secrets chosen by the game but not revealed to the
532adversary. We impose limits on the adversary's resource usage: in
533particular, the total time it takes, and the number of queries it makes to
534its various oracles. Throughout, we include the size of the adversary's
535program as part of its `time', in order to model adversaries which contain
536large precomputed tables.
537
538The games provide models of someone trying to attack a construction or
539protocol. For security, we will either define a notion of `winning' the
540game, and require that all adversaries have only a very small probability of
541winning, or we consider two different games and require that no adversary can
32609df3 542distinguish between the two except with very small probability.
a6e375a6
MW
543
544Our proofs make frequent use of sequences of games; see
545\cite{Shoup:2004:SGT,Bellare:2004:CBG}. The presentation owes much to Shoup
546\cite{Shoup:2004:SGT}. We begin with a game $\G0$ based directly on a
547relevant security definition, and construct a sequence of games $\G1$, $\G2$,
548\dots, each slightly different from the last. We define all of the games in
549a sequence over the same underlying probability space -- the random coins
550tossed by the algorithms involved -- though different games may have slightly
551differently-defined events and random variables. Our goal in doing this is
552to bound the probability of the adversary winning the initial game $\G0$ by
553simultaneously (a) relating the probability of this event to that of
554corresponding events in subsequent games, and (b) simplifying the game until
555the probability of the corresponding event can be computed directly.
556
557The following simple lemma from \cite{Shoup:2001:OR} will be frequently
558useful.
559\begin{lemma}[Difference Lemma]
560 \label{lem:shoup}
32609df3
MW
561 Let $S$, $T$, $F$ be events. Suppose $\Pr[S \mid \bar F] =
562 \Pr[T \mid \bar F]$. Then $|\Pr[S] - \Pr[T]| \le \Pr[F]$.
a6e375a6
MW
563\end{lemma}
564\begin{proof}
565 A simple calculation:
566 \begin{eqnarray*}[rl]
567 |\Pr[S] - \Pr[T]|
32609df3
MW
568 & = |(\Pr[S \mid F]\Pr[F] + \Pr[S \mid \bar F]\Pr[\bar F]) -
569 (\Pr[T \mid F]\Pr[F] + \Pr[T \mid \bar F]\Pr[\bar F])| \\
570 & = \Pr[F] \cdot |\Pr[S \mid F] - \Pr[T \mid F]| \\
a6e375a6
MW
571 & \le \Pr[F]
572 \end{eqnarray*}
573 and we're done!
574\end{proof}
32609df3 575\fi
a6e375a6
MW
576
577
32609df3 578\prelimsec{The random oracle model}
a6e375a6
MW
579\label{sec:ro}
580
32609df3 581\ifshort\else
a6e375a6
MW
582In particular, most of our results will make use of the \emph{random oracle}
583model \cite{Bellare:1993:ROP}, in which all the participants, including the
584adversary, have access to a number of `random oracles'. A random oracle with
585domain $D$ and range $R$ is an oracle which computes a function chosen
586uniformly at random from the set of all such functions. (In the original
587paper \cite{Bellare:1993:ROP}, random oracles are considered having domain
588$\Bin^*$ and range $\Bin^\omega$; we use finite random oracles here, because
589they're easier to work with.)
590
591Given a protocol proven secure in the random oracle model, we can instantiate
592each random oracle by a supposedly-secure hash function and be fairly
593confident that either our protocol will be similarly secure, or one of the
594hash functions we chose has some unfortunate property.
595
596Proofs in the random oracle must be interpreted carefully. For example,
597Canetti, Goldreich and Halevi \cite{Canetti:2004:ROM} show that there are
598schemes which can be proven secure in the random oracle model but provably
599have no secure instantiation in the standard model.
32609df3 600\fi
a6e375a6 601
32609df3
MW
602The random oracle model \ifshort\cite{Bellare:1993:ROP} \fi is useful for
603constructing reductions and simulators for two main reasons.
a6e375a6
MW
604\begin{enumerate}
605\item One can use the transcript of an adversary's queries to random oracles
606 in order to extract knowledge from it.
607\item One can `program' a random oracle so as to avoid being bound by prior
608 `commitments', or to guide an adversary towards solving a selected instance
609 of some problem.
610\end{enumerate}
611Our proofs only make use of the first feature. This becomes particularly
612important when we consider issues of zero-knowledge and deniability in a
613concurrent setting, because we want to be able to claim that we retain these
614features when the random oracle is instantiated using a cryptographic hash
615function, and hash functions definitely aren't `programmable' in this way!
616The former property seems rather more defensible -- one would indeed hope
617that the only sensible way of working out (anything about) the hash of a
618particular string is to actually compute the hash function, and the random
619oracle model is, we hope, just giving us a `hook' into this process.
620
32609df3 621\ifshort\else
a6e375a6
MW
622(Our protocols can be modified to make use of bilinear pairings so as to
623provide identity-based identification and key-exchange, using the techniques
d8139f2c 624of \cite{Boneh:2003:IBE}. Proving the security of the modifications we
a6e375a6
MW
625discuss would involve `programming' random oracles, but this doesn't affect
626the zero-knowledge or deniability of the resulting protocols.)
32609df3 627\fi
a6e375a6
MW
628
629
32609df3
MW
630\ifshort\else
631\prelimsec{Notation for algorithms}
a6e375a6
MW
632
633We shall have occasion to describe algorithms by means of a pseudocode. Our
634choice of pseudocode is unlikely to be particularly controversial. We let $x
635\gets y$ denote the action of setting $x$ to the value $y$; similarly, $x
636\getsr Y$ denotes the action of sampling $x$ from the set $Y$ uniformly at
637random.
638
639The expression $a \gets A^{O(\cdot, x)}(y)$ means `assign to $a$ the value
640output by algorithm $A$ on input $y$, and with oracle access to the algorithm
641which, given input $z$, computes $O(z, x)$'.
642
643We make use of conditional (\IF-\ELSE) and looping (\FOR-\DO and \WHILE-\DO)
644constructions; in order to reduce the amount of space taken up, the bodies of
645such constructions are shown by indentation only.
646
647We don't declare the types of our variables explicitly, assuming that these
648will be obvious by inspection; also, we don't describe our variables' scopes
649explicitly, leaving the reader to determine these from context.
650
651Finally, the notation $\Pr[\textit{algorithm} : \textit{condition}]$ denotes
652the probability that \textit{condition} is true after running the given
653\textit{algorithm}.
32609df3 654\fi
a6e375a6 655
32609df3 656\prelimsec{Diffie-Hellman problems}
a6e375a6
MW
657\label{sec:dhp}
658
659The security of our protocols is related to the hardness of the
660computational, decisional, and gap Diffie-Hellman problems in the group $G$.
661We define these problems and what it means for them to be `hard' here.
662
663The \emph{computational} Diffie-Hellman problem (CDH) is as follows: given
664two group elements $X = x P$ and $Y = y P$, find $Z = x y P$.
32609df3 665\ifshort\else
a6e375a6
MW
666\begin{definition}[The computational Diffie-Hellman problem]
667 Let $(G, +)$ be a cyclic group generated by $P$. For any adversary $A$, we
668 say that $A$'s \emph{success probability} at solving the computational
669 Diffie-Hellman problem in $G$ is
670 \[ \Succ{cdh}{G}(A) =
32609df3 671 \Pr[ x \getsr I; y \getsr \Z/\#G\Z : A(x P, y P) = x y P ]
a6e375a6
MW
672 \]
673 where the probability is taken over the random choices of $x$ and $y$ and
674 any random decisions made by $A$. We say that the \emph{CDH insecurity
675 function} of $G$ is
676 \[ \InSec{cdh}(G; t) = \max_A \Succ{cdh}{G}(A) \]
677 where the maximum is taken over adversaries which complete in time $t$.
678\end{definition}
679Certainly, if one can compute discrete logarithms in the group $G$ (i.e.,
680given $x P$, find $x$), then one can solve the computational Diffie-Hellman
681problem. The converse is not clear, though. Shoup \cite{Shoup:1997:LBD}
682gives us some confidence in the difficulty of the problem by showing that a
683\emph{generic} adversary -- i.e., one which makes no use of the specific
32609df3 684structure of a group -- has success probability no greater than $q^2/\#G$.
a6e375a6
MW
685
686This isn't quite sufficient for our purposes. Our proofs will be able to
687come up with (possibly) a large number of guesses for the correct answer, and
688at most one of them will be correct. Unfortunately, working out which one is
689right seems, in general, to be difficult. This is the \emph{decision}
690Diffie-Hellman problem (DDH), which \cite{Shoup:1997:LBD} shows, in the
691generic group model, is about as hard as CDH. (See \cite{Boneh:1998:DDP} for
692a survey of the decision Diffie-Hellman problem.)
32609df3 693\par\fi
a6e375a6
MW
694Our reference problem will be a `multiple-guess computational Diffie-Hellman
695problem' (MCDH), which is captured by a game as follows. An adversary is
696given a pair of group elements $(x P, y P)$, and an oracle $V(\cdot)$ which
697accepts group elements as input. The adversary wins the game if it queries
698$V(x y P)$.
a6e375a6
MW
699
700\begin{figure}
701 \begin{program}
702 $\Game{mcdh}{G}(A)$: \+ \\
703 $w \gets 0$; \\
32609df3 704 $x \getsr \Z/\#G\Z$; $y \getsr \Z/\#G\Z$; \\
a6e375a6
MW
705 $A^{V(\cdot)}(x P, y P)$; \\
706 \RETURN $w$;
707 \next
708 Function $V(Z)$: \+ \\
709 \IF $Z = x y P$ \THEN \\ \ind
710 $w \gets 1$; \\
711 \RETURN $1$; \- \\
712 \RETURN $0$;
713 \end{program}
714
715 \caption{The multiple-guess computational Diffie-Hellman problem:
716 $\Game{mcdh}{G}(A)$}
717 \label{fig:mcdh}
718\end{figure}
719
32609df3
MW
720\begin{definition}[The multiple-guess computational Diffie-Hellman problem]
721 \label{def:mcdh}
722 Let $(G, +)$ be a cyclic group generated by $P$. For some adversary $A$,
723 we say that $A$'s \emph{success probability} at solving the multiple-guess
724 computational Diffie-Hellman problem in $G$ is
725 \[ \Succ{mcdh}{G}(A) = \Pr[\Game{mcdh}{G}(A) = 1] \]
726 where $\Game{mcdh}{G}(A)$ is shown in figure~\ref{fig:mcdh}. We say that
727 the \emph{MCDH insecurity function of $G$} is
728 \[ \InSec{mcdh}(G; t, q_V) = \max_A \Succ{mcdh}{G}(A) \]
729 where the maximum is taken over adversaries which complete in time $t$ and
730 make at most $q_V$-oracle queries.
731\end{definition}
732\ifshort
733We can (loosely) relate the difficulty of MCDH to the difficulty of
734the standard CDH problem, in which the adversary is allowed only a single
735guess.
736\else
a6e375a6
MW
737Note that our MCDH problem is not quite the `gap Diffie-Hellman problem'
738(GDH). The gap problem measures the intractibility of solving CDH even with
739the assistance of an oracle for solving (restricted) decision Diffie-Hellman
740problems in the group. Specifically, the adversary is given $(X, Y) = (x P,
741y P)$ and tries to find $Z = x y P$, as for CDH, but now it has access to an
742oracle $D(R, S)$ which answers $1$ if $S = x R$ and $0$ otherwise.
743
744Clearly MCDH is at least as hard as GDH, since our simple verification oracle
745$V(Z)$ can be simulated with the gap problem's DDH oracle, as $D(Y, Z)$.
746However, we can (loosely) relate the difficulty of MCDH to the difficulty of
747CDH.
32609df3 748\fi
a6e375a6
MW
749\begin{proposition}[Comparison of MCDH and CDH security]
750 For any cyclic group $(G, +)$,
32609df3
MW
751 \[ \InSec{mcdh}(G; t, q_V) \le
752 \ifshort q_V\,\InSec{mcdh}(G; t + O(q_V), 1) \else
753 q_V\,\InSec{cdh}(G; t + O(q_V)) \fi.
754 \]
a6e375a6 755\end{proposition}
32609df3
MW
756\begin{longproof}{The proof of this proposition may be found in the full
757 version of this paper.}
a6e375a6
MW
758 Let $A$ be an adversary attacking the multiple-guess computational
759 Diffie-Hellman problem in $G$, and suppose that it runs in time $t$ and
760 issues $q_V$ queries to its verification oracle.
761
762 We use a sequence of games. Game $\G0$ is the original MCDH attack game.
763 In each game $\G{i}$, we let the event $S_i$ be the probability that the
764 adversary wins the game.
765
766 Game $\G1$ is the same as $\G0$, except that we change the behaviour of the
767 verification oracle. Specifically, we make the oracle always return $0$.
768 We claim that this doesn't affect the adversary's probability of winning,
769 i.e., $\Pr[S_1] = \Pr[S_0]$. To see this, note that if none of the
770 adversary's $V(\cdot)$ queries was correct, then there is no change in the
771 game; conversely, if any query was correct, then the adversary will have
772 won regardless of its subsequent behaviour (which may differ arbitrarily
773 between the two games).
774
775 We are now ready to construct from $A$ an adversary $B$ attacking the
776 standard computational Diffie-Hellman problem.
777 \begin{program}
778 Adversary $B(X, Y)$: \+ \\
779 $n \gets 0$; \\
780 \FOR $i \in \Nupto{q_V}$ \DO $Q_i \gets 0$; \\
781 $A^{V(\cdot)}$; \\
782 $r \getsr \Nupto{n}$; \\
783 \RETURN $Q_r$;
784 \next
785 Function $D(Z')$: \+ \\
786 $Q_n \gets Z'$; \\
787 $n \gets n + 1$; \\
788 \RETURN $0$;
789 \end{program}
790 Observe that $B$ provides $A$ with an accurate simulation of game $\G1$.
32609df3
MW
791 Moreover, at the end of the algorithm, we have $0 < n \le q_V$, and the
792 values $Q_0$, $Q_1$, \dots, $Q_{n-1}$ are the values of $A$'s oracle
793 queries. Hence, with probability $Pr[S_1]$, at least of one of the $Q_i$
794 is the correct answer to the CDH problem. Let $\epsilon = \Pr[S_1] =
795 \Pr[S_0]$; we claim that $B$'s probability of success is at least
796 $\epsilon/q_V$. The proposition follows directly from this claim and that,
797 because $A$ was chosen arbitrarily, we can maximize and count resources.
a6e375a6
MW
798
799 We now prove the above claim. For $0 \le i < q_V$, let $W_i$ be the
800 event that $Q_i = x y P$, i.e., that $Q_i$ is the correct response. A
801 simple union bound shows that
802 \[ \sum_{0\le i<j} \Pr[W_i \mid n = j] \ge \epsilon. \]
803 We now perform a calculation:
804 \begin{eqnarray*}[rl]
805 \Succ{cdh}{G}(B)
806 & = \sum_{0\le i<q_V} \Pr[W_i \land r = i] \\
807 & = \sum_{0<j\le q_V} \Pr[n = j]
808 \biggl( \sum_{0\le i<j} \Pr[W_i \land r = i \mid n = j] \biggr) \\
809 & = \sum_{0<j\le q_V} \Pr[n = j]
810 \biggl( \frac{1}{j} \sum_{0\le i<j} \Pr[W_i \mid n = j] \biggr) \\
811 &\ge \sum_{0<j\le q_V} \Pr[n = j] \frac{\epsilon}{j} \\
812 &\ge \frac{\epsilon}{q_V} \sum_{0<j\le q_V} \Pr[n = j] \\
813 & = \frac{\epsilon}{q_V}.
814 \end{eqnarray*}
815 which completes the proof.
32609df3 816\end{longproof}
a6e375a6 817
32609df3
MW
818\ifshort\else
819\prelimsec{Example groups and encodings}
a6e375a6
MW
820
821For nonnegative integers $0 \le n < 2^\ell$, there is a natural binary
822encoding $N_\ell\colon \Nupto{2^\ell} \to \Bin^\ell$ which we can define
823recursively as follows.
824\[ N_0(0) = \emptystring \qquad
825 N_\ell(n) = \begin{cases}
826 N_{\ell-1}(n) \cat 0 & if $0 \le n < 2^{\ell-1}$ \\
827 N_{\ell-1}(n - 2^{\ell-1}) \cat 1 & if $2^{\ell-1} \le n < 2^\ell$.
828 \end{cases}
829\]
830Given an encoding $a = N_\ell(n)$ we can recover $n$ as
831\[ n = \sum_{0\le i<\ell} a[i] 2^i. \]
832Hence, given some limit $L \le 2^\ell$, we can encode elements of $\Nupto{L}$
833using the functions $(e, d)$:
834\[ e(L, \ell, n) = N_\ell(n) \qquad
835 d(L, \ell, a) = \begin{cases}
836 N_\ell(a) & if $N_\ell(a) < L$ \\
837 \bot & otherwise
838 \end{cases}
839\]
840The reader can verify that the functions $e(L, \ell, \cdot)$ and $d(L, \ell,
841\cdot)$ satisfy the requirements of section~\ref{sec:bitenc}.
842
32609df3
MW
843Given some $q$ with $q < 2^{\ell_I}$, then, we can define an encoding
844$(e_\F, d_\F)$ by $e_\F(n) = e(q, \ell_I, n)$ and $d_\F(a) = d(q, \ell_I,
845a)$.
846
847Let $p$ and $q$ be primes, with $q \mid (p - 1)$. Then there is an order-$q$
848subgroup of $\F_p^*$. In practice, an order-$q$ element can be found easily
849by taking elements $h \in \F_p^*$ at random and computing $g = h^{(p-1)/2}$
850until $g \ne 1$; then $G = \langle g \rangle$ is a group of $q$ elements.
851Assuming that $p$ and $q$ are sufficiently large, the Diffie-Hellman problems
852seem to be difficult in $G$. Some texts recommend additional restrictions on
853$p$, in particular that $(p - 1)/2q$ be either prime or the product of large
854primes. Primes of this form protect against small-subgroup attacks; but our
855protocols are naturally immune to these attacks, so such precautions are
856unnecessary here. Elements of $G$ can be encoded readily, since each element
857$n + p\Z$ of $\F_p = \Z/p\Z$ has an obvious `representative' integer $n$ such
858that $0 \le n < p$, and given $2^{\ell_G} > p$, we can encode $n$ as $e(p,
859\ell_G, n)$, as above.
a6e375a6
MW
860
861Alternatively, let $\F = \gf{p^f}$ be a finite field, and $E$ be an elliptic
862curve defined over $\F$ such that the group $E(\F)$ of $\F$-rational points
863of $E$ has a prime-order cyclic subgroup $G$. Elements of $G$ can be
864represented as pairs of elements of $\F$. If $f = 1$, i.e., $\F = \Z/p\Z$
865then field elements can be encoded as above. If $p = 2$, we can represent
866the field as $\F_2/(p(x))$ for some irreducible polynomial $p(x) \in \F_2[x]$
867of degree $f$. An element $r \in \F$ can then be represented by a polynomial
868$r(x)$ with degree less than $f$, and coefficients $c_i \in \{0, 1\}$, i.e.,
869\[ r(x) = \sum_{0\le i<f} c_i x^i \]
870and hence we can uniquely encode $r$ as an $f$-bit string $a$ such that $a[i]
871= c_i$.
32609df3 872\fi
a6e375a6
MW
873
874
32609df3 875\prelimsec{Symmetric encryption}
a6e375a6
MW
876\label{sec:sym-enc}
877
878Our key-exchange protocol requires a symmetric encryption scheme. Our
879definition is fairly standard, except that, rather than specifying a
880key-generation algorithm, we assume that key generation simply involves
881selecting a string of a given length uniformly at random.
882\begin{definition}[Symmetric encryption schemes]
883 A \emph{symmetric encryption scheme} $\E = (\kappa, E, D)$ consists of:
884 \begin{itemize}
885 \item an integer $\kappa \ge 0$,
886 \item a randomized \emph{encryption algorithm} $E$ which, on input $K \in
887 \Bin^\kappa$ and $p \in \Bin^*$ outputs some $c \in \Bin^*$, written $c
888 \gets E_K(p)$;
889 \item a \emph{decryption algorithm} $D$ which, on input $K \in \Bin^\kappa$
890 and $c \in \Bin^*$ outputs some $p' \in \Bin^* \cup \{\bot\}$, written
891 $p' \gets D_K(c)$.
892 \end{itemize}
893 Furthermore, a symmetric encryption scheme must be \emph{sound}: that is,
894 if $c \gets E_K(p)$ for some $K \in \Bin^\kappa$ and $p \in \Bin^*$, and
895 $p' \gets D_K(c)$ then $p = p'$.
896\end{definition}
897Our security notion for symmetric encryption is the standard notion of
898left-or-right indistinguishability of ciphertexts under chosen-ciphertext
899attack.
32609df3 900\begin{definition}[IND-CCA]
dff0fad2 901 \label{def:ind-cca}
a6e375a6
MW
902 Let $\E = (\kappa, E, D)$ be a symmetric encryption scheme, and $A$ be an
903 adversary. Let $\id{lr}_b(x_0, x_1) = x_b$ for $b \in \{0, 1\}$. Let
904 \[ P_b =
dff0fad2
MW
905 \Pr[K \getsr \Bin^\kappa;
906 b \gets A^{E_K(\id{lr}_b(\cdot, \cdot)), D_K(\cdot)}() :
907 b = 1]
a6e375a6
MW
908 \]
909 An adversary is \emph{valid} if
910 \begin{itemize}
911 \item for any query to its encryption oracle $E_K(\id{lr}_b(x_0, x_1))$ we
912 have $|x_0| = |x_1|$, and
913 \item no query to the decryption oracle $D_K(\cdot)$ is equal to any reply
914 from an encryption query.
915 \end{itemize}
916 If $A$ is valid, then we define its \emph{advantage} in attacking the
917 security of $\E$ as follows
918 \[ \Adv{ind-cca}{\E} = P_1 - P_0. \]
919 Further, we define the \emph{IND-CCA insecurity function of $\E$} to be
920 \[ \InSec{ind-cca}(\E; t, q_E, q_D) = \max_A \Adv{ind-cca}{\E}(A) \]
921 where the maximum is taken over all valid adversaries $A$ which run in time
922 $t$, and issue at most $q_E$ encryption and $q_D$ decryption queries.
923\end{definition}
924
925
926\subsection{Simulations}
927\label{sec:sim}
928
929In section~\ref{sec:zk-ident}, we shall prove that our identification
930protocol is zero-knowledge; in section~\ref{sec:denial}, we show that our
931key-exchange protocol is deniable. In both of these proofs, we shall need to
32609df3
MW
932demonstrate \emph{simulatability}.
933
934\ifshort
935
936We consider an adversary~$A$ interacting with a `world'~$W$; we model both as
937probabilistic algorithms. Both $A$ and~$W$ are given a common input~$c$; the
938world is additionally given a private input~$w$; these are chosen by a
939randomized initialization function $I$. The adversary is additionally given
940an auxiliary input~$u$ computed from $w$ by a randomized algorithm~$U$. All
941these algorithms -- the adversary and the world, but also the initialization
942and auxiliary-input algorithms $I$ and~$U$ -- have access to a number of
943random oracles $\mathcal{H} = (H_0, H_1, \ldots, H_{n-1})$. The adversary
944eventually decides to stop interacting, and produces an output~$a$.
945
946A \emph{simulator} for $A$'s interaction with $W$ is an algorithm $S^A$ which
947attempts to produce a similar output distribution, but without interacting
948with $W$. The simulator is given the same inputs $(c, u)$ as we gave
949to~$A$, and $S$ is also allowed to query the random oracles~$\mathcal{H}$.
950
951To measure the effectiveness of a simulator, we consider a distinguisher~$D$
952which is given $(c, u, a)$, and access to $\mathcal{H}$, and returns a bit
953$b$ representing its verdict as to whether the output $a$ was produced by the
954adversary or the simulator.
955
956\else
a6e375a6
MW
957
958\subsubsection{General framework}
959Consider a game in which an adversary~$A$ interacts with some `world'~$W$,
960which we shall represent as a probabilistic algorithm. The world may in fact
961represent a number of honest parties communicating in a concurrent fashion,
962but we can consider them as a single algorithm for our present purposes.
963
964Initially the world and the adversary are both given the same \emph{common
965input}~$c$; in addition, the world is given a \emph{private input}~$w$.
966Both $c$ and~$w$ are computed by an \emph{initialization function}~$I$, which
967is considered to be part of the definition of the game. Finally, the
968adversary decides somehow that it has finished interacting, and outputs a
969value~$a$. All this we notate as
970\[ (w, c) \gets I(); a \gets A^{W(w, c)}(c). \]
971This game is \emph{simulatable} if there is an algorithm~$S$ -- the
972\emph{simulator} -- which can compute the same things as~$A$, but all by
973itself without interacting with the world. That is, we run the simulator on
974the common input~$c$, allowing it to interact in some way with the
975adversary~$A$, and finally giving us an output~$s$.
976\[ (w, c) \gets I(); s \gets S^A(c). \]
977We shall say that the simulator is \emph{effective} if it's difficult to tell
978whether a given string was output by the adversary after interacting with the
979world, or by the simulator running by itself. That is, for any algorithm~$D$
980-- a \emph{distinguisher} -- running in some bounded amount of time, its
981advantage
982\begin{spliteqn*}
983 \Pr[(w, c) \gets I(); a \gets A^{W(w, c)}(c);
984 b \gets D(c, a) : b = 1] - {} \\
985 \Pr[(w, c) \gets I(); s \gets S^A(c); b \gets D(c, s) : b = 1]
986\end{spliteqn*}
987is small. (Note that we gave the distinguisher the common input as well as
988the output of the adversary or the simulator.)
989
990It's usual to study \emph{transcripts} of interactions in these kinds of
991settings. We are considering arbitrary adversarial outputs here, so this
992certainly includes adversaries which output a transcript of their
993interactions. Indeed, for any adversary~$A$, we could construct an
994adversary~$A_T$ which performs the same computation, and outputs the same
995result, but also includes a complete transcript of $A$'s interaction with the
996world. Therefore we're just providing additional generality.
997
998\subsubsection{Random oracles}
999We shall be considering interactions in which all the parties have access to
1000several random oracles. We could simply say that the random oracles are part
1001of the world~$W$. In the setting described above, only the adversary
1002actually interacts with the world (and therefore would be able to query
1003random oracles). The simulator would be forced to `make up' its own random
1004oracle, and the distinguisher would have to study the distributions of the
1005random-oracle queries and their responses to make up its mind about which it
1006was given.
1007
1008However, this would be a poor model for the real world, since once we
1009instantiate the random oracle with a hash function, we know that everyone
1010would in actually be able to compute the hash function for themselves. Thus
1011a distinguisher in the real world would be able to tell the difference
1012immediately between a real interaction and the simulated transcript, since
1013the `random oracle' queries recorded in the latter would be wrong!
1014
1015Therefore we decide not to include the random oracles as part of the world,
1016but instead allow all the participants -- adversary, simulator and
1017distinguisher -- access to them. If we denote by~$\mathcal{H} = (H_0, H_1,
1018\ldots, H_{n-1})$ the collection of random oracles under consideration, the
1019expression for the distinguisher's advantage becomes
1020\begin{spliteqn*}
1021 \Pr[(w, c) \gets I(); a \gets A^{W(w, c), \mathcal{H}}(c);
1022 b \gets D^{\mathcal{H}}(c, a) : b = 1] - {} \\
1023 \Pr[(w, c) \gets I(); s \gets S^{A, \mathcal{H}}(c);
1024 b \gets D^{\mathcal{H}}(c, s) : b = 1].
1025\end{spliteqn*}
1026
1027\subsubsection{Auxiliary inputs}
1028If an adversary's output can be effectively simulated, then we can
1029confidently state that the adversary `learnt' very little of substance from
1030its interaction, and certainly very little it can \emph{prove} to anyone
1031else. However, as we have described the setting so far, we fix an adversary
1032before we choose inputs to the world, so our model says little about what an
1033adversary which has already acquired some knowledge might learn beyond that.
1034For example, an adversary might overhear some other conversation between
1035honest parties and be able to use this to its advantage.
1036
1037To this end, we give the adversary an \emph{auxiliary input}~$u$, computed by
1038an algorithm~$U$. We give $U$ both $c$ and $w$, in order to allow the
1039adversary to gain some (possibly partial) knowledge of the secrets of the
1040other parties. We also allow $U$ access to the random oracles~$\mathcal{H}$,
1041because clearly in the `real world' it would be ridiculous to forbid such an
1042algorithm from computing a publicly-known hash function.
1043
1044The simulator and distinguisher are also given the auxiliary input. The
1045simulator is meant to represent the adversary's ability to compute things on
1046its own, without interacting with the world, and since the adversary is given
1047the auxiliary input, the simulator must be too. The distinguisher must be
32609df3
MW
1048given the auxiliary input because otherwise the simulator could just `make
1049up' plausible-looking inputs.
1050
1051\fi
1052
1053\ifshort
1054Because we're interested in a concrete, quantitative analysis, we must
1055constrain the resource usage of the various algorithms described above.
1056Specifically, we shall be interested in
1057\else
a6e375a6
MW
1058
1059\subsubsection{Resource limits}
1060We shall not allow our algorithms to perform completely arbitrary
1061computations and interactions. Instead, we impose limits on the amount of
1062time they are allowed to take, the number of random-oracle queries they make,
1063and so on. Specifically, we are interested in
32609df3 1064\fi
a6e375a6
MW
1065\begin{itemize}
1066\item the time $t_A$ taken by the adversary and $t_D$ taken by the
1067 distinguisher,
1068\item the number of oracle queries $\mathcal{Q}_A = (q_{A,0}, q_{A,1},
1069 \ldots, q_{A,n-1})$ made by the adversary, and $\mathcal{Q}_D$ made by the
1070 distinguisher,
1071\item a number of resource bounds $\mathcal{R}$ on the adversary's
1072 interaction with the world (e.g., number of messages of various kinds sent
1073 and received), and
1074\item a number of bounds $\mathcal{U}$ on the contents of the adversary's
1075 auxiliary input~$u$.
1076\end{itemize}
1077Sometimes we shall not be interested in proving simulatability of adversaries
1078with auxiliary inputs. We write $\mathcal{U} = 0$ to indicate that auxiliary
32609df3
MW
1079input is not allowed.
1080
1081\ifshort\else
a6e375a6
MW
1082
1083\subsubsection{World syntax}
1084It will be worth our while being more precise about what a `world' actually
1085is, syntactically. We define a world to be a single, randomized algorithm
1086taking inputs $(\iota, \sigma, \tau, \mu) \in (\Bin^*)^4$; the algorithm's
1087output is a pair $(\sigma', \rho) \in (\Bin^*)^2$. We show how the
1088adversary's interaction is mapped on to this world algorithm in
1089figure~\ref{fig:sim-world}.
1090\begin{itemize}
1091\item The `input' $\iota$ is the result of the initialization function~$I$.
1092 That is, it is the pair $(w, c)$ of the world's private input and the
1093 common input.
1094\item The `state' $\sigma$ is empty on the world's first invocation; on each
1095 subsequent call, the value of the world's output $\sigma'$ is passed back.
1096 In this way, the world can maintain state.
32609df3 1097\item The `type $\tau$ is a token giving the type of invocation this is.
a6e375a6
MW
1098\item The `message' $\mu$ is any other information passed in; its form will
1099 typically depend on the type~$\tau$ of the invocation.
1100\item The `new state' $\sigma'$ is the value of $\sigma$ to pass to the next
1101 invocation of the world.
1102\item The `reply $\rho$ is the actual output of the invocation.
1103\end{itemize}
1104There are two special invocation types. The adversary is \emph{forbidden}
1105from making special invocations.
1106\begin{itemize}
1107\item The special invocation type $\cookie{init}$ is used to allow the world to
1108 prepare an initial state. The world is invoked as
1109 \[ W^{\mathcal{H}}(\iota, \emptystring, \cookie{init}, \emptystring) \]
1110 and should output an initial state $\sigma'$. The world's reply $\rho$ is
1111 ignored. (Recall that $\emptystring$ represents the empty string.)
1112\item The special invocation type $\cookie{random}$ is used to inform the
1113 world that the adversary has issued a random oracle query. The world is
1114 invoked as
1115 \[ W^{\mathcal{H}}(\iota, \sigma, \cookie{random}, (i, x, h)) \]
1116 to indicate that the adversary has queried its random oracle $H_i(\cdot)$
1117 on the input $x$, giving output~$h$. The world may output an updated state
1118 $\sigma'$; its reply $\rho$ is ignored.
1119\end{itemize}
1120The latter special query is a technical device used to allow the `fake-world'
1121simulators we define below to be aware of the adversary's random oracle
1122queries without being able to `program' the random oracle. Including it here
1123does little harm, and simplifies the overall exposition.
1124
1125\begin{figure}
1126 \begin{program}
1127 Interaction $A^{W(w, c), \mathcal{H}}(c, u)$: \+ \\
1128 $(\sigma, \rho) \gets
1129 W((w, c), \emptystring, \cookie{init}, \emptystring)$; \\
1130 $a \gets A^{\id{world}(\cdot, \cdot),
1131 \id{random}(\cdot, \cdot)}(c, u)$; \\
1132 \RETURN $a$;
1133 \newline
1134 Function $\id{world}(\tau, \mu)$: \+ \\
1135 \IF $\tau \in \{\cookie{init}, \cookie{random}\}$ \THEN
1136 \RETURN $\bot$; \\
1137 $(\sigma, \rho) \gets W((w, c), \sigma, \tau, \mu)$; \\
1138 \RETURN $\rho$; \-
1139 \next
1140 Function $\id{random}(i, x)$: \+ \\
1141 $h \gets H_i(x)$; \\
1142 $(\sigma, \rho) \gets
1143 W((w, c), \sigma, \cookie{random}, (i, x, h))$; \\
1144 \RETURN $h$;
1145 \end{program}
1146
1147 \caption{Interacting with a world: Interaction $A^{W, \mathcal{H}}$}
1148 \label{fig:sim-world}
1149\end{figure}
1150
1151\subsubsection{Definitions}
1152We are now ready to begin making definitions.
32609df3
MW
1153\fi
1154
a6e375a6
MW
1155\begin{definition}[Simulation security]
1156 \label{def:sim}
1157 Consider the game described above, with the initialization function~$I$,
1158 and the world~$W$: let $A$ be an adversary, and let~$U$ be an
1159 auxiliary-input function; let $S$ be a simulator, and let $D$ be a
1160 distinguisher. We define $D$'s \emph{advantage against $S$'s simulation of
1161 $A$'s interaction with~$W$ with auxiliary inputs provided by~$U$} to be
1162 \[ \Adv{sim}{W, I, S}(A, U, D) =
1163 \Pr[\Game{real}{W, I, S}(A, U, D) = 1] -
1164 \Pr[\Game{sim}{W, I, S}(A, U, D)= 1]
1165 \]
1166 where the games are as shown in figure~\ref{fig:sim}.
1167 Furthermore, we define the \emph{simulator's insecurity function} to be
1168 \[ \InSec{sim}(W, I, S;
1169 t_D, t_A, \mathcal{Q}_D, \mathcal{Q}_A, \mathcal{R}, \mathcal{U}) =
1170 \max_{D, A, U} \Adv{sim}{W, I, S}(A, U, D)
1171 \]
1172 where the maximum is taken over all distinguishers~$D$ running in
1173 time~$t_D$ and making at most $\mathcal{Q}_D$ random-oracle queries, and
1174 all adversaries~$A$ running in time~$t_A$, making at most $\mathcal{Q}_A$
1175 random-oracle queries, not exceeding the other stated resource
1176 bounds~$\mathcal{R}$ on its interaction with~$W$, and auxiliary-input
1177 functions producing output not exceeding the stated bounds~$\mathcal{U}$.
1178\end{definition}
1179\begin{remark}
1180 The usual definitions of zero-knowledge, for example, require the simulator
1181 to work for all choices of inputs (common, private and auxiliary), rather
1182 than for random choices. Our definition therefore looks weaker. Our proof
1183 of zero-knowledge actually carries through to the traditional
1184 stronger-looking definition. Critically, however, the standard
1185 universal quantification over inputs fails to capture deniability in the
1186 random oracle model, since the inputs can't therefore depend on the random
1187 oracle. Our formulation therefore actually gives \emph{stronger}
32609df3 1188 deniability than the usual one.
a6e375a6
MW
1189\end{remark}
1190
1191\begin{figure}
1192 \begin{program}
1193 $\Game{real}{W, I, S}(A, U, D)$: \+ \\
1194 $(w, c) \gets I()$; \\
1195 $u \gets U^{\mathcal{H}}(w, c)$; \\
1196 $a \gets A^{W(w, c), \mathcal{H}}(c, u)$; \\
1197 $b \gets D^{\mathcal{H}}(c, u, a)$; \\
1198 \RETURN $b$;
1199 \next
1200 $\Game{sim}{W, I, S}(A, U, D)$: \+ \\
1201 $(w, c) \gets I()$; \\
1202 $u \gets U^{\mathcal{H}}(w, c)$; \\
1203 $s \gets S^{A, \mathcal{H}}(c, u)$; \\
1204 $b \gets D^{\mathcal{H}}(c, u, s)$; \\
1205 \RETURN $b$;
1206 \end{program}
1207
1208 \caption{Games for simulation: $\Game{real}{W, I}$ and $\Game{sim}{W, I}$}
1209 \label{fig:sim}
1210\end{figure}
1211
32609df3 1212\ifshort\else
a6e375a6
MW
1213\subsubsection{Fake-world simulators}
1214The simulators we shall be considering in the present paper are of a specific
1215type which we call `fake-world simulators'. They work by running the
1216adversary in a fake `cardboard cut-out' world, and attempting to extract
1217enough information from the adversary's previous interactions and random
1218oracle queries to maintain a convincing illusion.
1219
1220That is, the behaviour of a fake-world simulator~$S$ is simply to allow the
1221adversary to interact with a `fake world'~$W'$, which was not given the world
1222private input. That is, there is some world $W'$ such that
1223\[ S^{A, \mathcal{H}}(c, u) \equiv A^{W'(u, c), \mathcal{H}}(c, u) \]
1224Fake-world simulators are convenient because they allow us to remove from
1225consideration the distinguisher~$D$ as the following definition shows.
1226\begin{definition}[Fake-world simulation security]
1227 \label{def:fakesim}
1228 Let $I$, $W$ and $U$ be as in definition~\ref{def:sim}. Let $A$ be an
1229 adversary which outputs a single bit. Let $S$ be a fake-world simulator.
1230 We define $A$'s \emph{advantage against $S$'s fake-world simulation of $W$
1231 with auxiliary inputs provided by~$U$} to be
1232 \begin{spliteqn*}
1233 \Adv{fw}{W, I, S}(A, U) =
1234 \Pr[(w, c) \gets I(); u \gets U^{\mathcal{H}}(w, c);
1235 b \gets A^{W(w, c), \mathcal{H}}(c, u) : b = 1] - {} \\
1236 \Pr[(w, c) \gets I(); u \gets U^{\mathcal{H}}(w, c);
1237 b \gets S^{A, \mathcal{H}}(c, u) : b = 1]
1238 \end{spliteqn*}
1239 Furthermore, we define the \emph{simulator's insecurity function} to be
1240 \[ \InSec{fw}(W, I, S;
1241 t_D, t, \mathcal{Q}, \mathcal{R}, \mathcal{U}) =
1242 \max_{A, U} \Adv{fw}{W, I, S}(A, U)
1243 \]
1244 where the maximum is taken over all adversaries~$A$ running in time~$t$,
1245 making at most $\mathcal{Q}$ random-oracle queries, not exceeding the other
1246 stated resource bounds~$\mathcal{R}$ on its interaction with~$W$, and
1247 auxiliary-input functions producing output not exceeding the stated
1248 bounds~$\mathcal{U}$.
1249\end{definition}
1250It remains for us to demonstrate that this is a valid way of analysing
1251simulators; the following simple proposition shows that this is indeed the
1252case.
1253\begin{proposition}[Fake-world simulation]
1254 \label{prop:fakesim}
1255 Let $I$ be an initialization function and let $W$ be a world. Then, for
1256 any fake-world simulator~$S$,
1257 \[ \InSec{sim}(W, I, S; t_D, t_A, \mathcal{Q}_D, \mathcal{Q}_A,
1258 \mathcal{R}, \mathcal{U}) \le
1259 \InSec{fw}(W, I, S; t_A + t_D, \mathcal{Q}_D + \mathcal{Q}_A,
1260 \mathcal{R}, \mathcal{U})
1261 \]
1262 (where addition of query bounds $\mathcal{Q}$ is done elementwise).
1263\end{proposition}
1264\begin{proof}
1265 Let $W$ and $I$ as in the proposition statement be given; also let a
1266 distinguisher~$D$ running in time~$t_D$ and making $\mathcal{Q}_D$
1267 random-oracle queries, an adversary~$A$ running in time~$t_A$ and making
1268 $\mathcal{Q}_A$ random-oracle queries and interacting with its world within
1269 the stated bounds~$\mathcal{R}$, an auxiliary-input function~$U$ satisfying
1270 the constraints~$\mathcal{U}$ on its output, and a fake-world simulator~$S$
1271 all be given.
1272
1273 We construct an adversary~$B$ outputting a single bit as follows
1274 \begin{program}
1275 Adversary $B^{W, \mathcal{H}}(c, u)$: \+ \\
1276 $a \gets A^{W, \mathcal{H}}(c, u)$; \\
1277 $b \gets D^{\mathcal{H}}(c, u, a)$; \\
1278 \RETURN $b$;
1279 \end{program}
1280 A glance at definitions \ref{def:sim} and~\ref{def:fakesim} and the
1281 resources used by $B$ shows that
1282 \[ \Adv{sim}{W, I, S}(A, U) = \Adv{fw}{W, I, S}(B, U)
1283 \le \InSec{fw}(W, I, S; t_D + t_A, \mathcal{Q}_D + \mathcal{Q}_A,
1284 \mathcal{R}, \mathcal{U})
1285 \]
1286 as required.
1287\end{proof}
32609df3 1288\fi
a6e375a6
MW
1289
1290%%%--------------------------------------------------------------------------
1291
1292\section{A zero-knowledge identification scheme}
1293\label{sec:zk-ident}
1294
1295
1296\subsection{Description}
1297
1298Here we present a simple zero-knowledge identification scheme. Fix some
32609df3
MW
1299group $G$ with prime order $q = \#G$. Suppose Alice chooses a private key $x
1300\inr \gf{q}$, and publishes the corresponding public key $X = x P$. Let
1301$H_I\colon G^2 \to \Bin^{\ell_I}$ be a secure hash function. Here's a simple
1302protocol which lets her prove her identity to Bob.
a6e375a6 1303\begin{enumerate}
32609df3
MW
1304\item Bob selects a random $r \inr \gf{q}$, and computes $R = r P$, $Y = r X$,
1305 and $c = r \xor H_I(R, Y)$. He sends the pair $(R, c)$ to Alice as his
1306 \emph{challenge}.
a6e375a6
MW
1307\item Alice receives $(R, c)$. She computes $Y' = x R$ and $r' = c \xor
1308 H_I(R', Y')$, and checks that $R = r' P$. If so, she sends $Y'$ as her
1309 \emph{response}; otherwise she sends $\bot$.
1310\item Bob receives $Y'$ from Alice. He checks that $Y' = Y$. If so, he
1311 accepts that he's talking to Alice; otherwise he becomes suspicious.
1312\end{enumerate}
1313We name this the Wrestlers Identification Protocol in~$G$, $\Wident^G$ (we
1314drop the superscript to refer to the protocol in general, or when no
1315ambiguity is likely to result). A summary is shown in
1316figure~\ref{fig:wident}.
1317
1318\begin{figure}
1319 \begin{description}
32609df3 1320 \item[Setup] Group $G = \langle P \rangle$; $\#G = q$ is prime.
a6e375a6 1321 $H_I(\cdot, \cdot)$ is a secure hash.
32609df3 1322 \item[Private key] $x \inr \gf{q}$.
a6e375a6 1323 \item[Public key] $X = x P$.
32609df3 1324 \item[Challenge] $(R, c)$ where $r \inr \gf{q}$, $R = r P$, $c = r \xor
a6e375a6
MW
1325 H_I(R, r X)$.
1326 \item[Response] $x R = r X$ if $R = (c \xor H_I(R, x R)) P$; otherwise
1327 $\bot$.
1328 \end{description}
1329
1330 \caption{Summary of the Wrestlers Identification Protocol, $\Wident$}
1331 \label{fig:wident}
1332\end{figure}
1333
1334
1335\subsection{Security}
1336
1337In order to evaluate the security of our protocol, we present a formal
1338description of the algorithms involved in figure~\ref{fig:wident}. Here, the
1339hash function $H_I(\cdot, \cdot)$ is modelled as a random oracle.
1340
1341\begin{figure}
1342 \begin{program}
1343 Function $\id{setup}()$: \+ \\
32609df3 1344 $x \getsr \gf{q}$; \\
a6e375a6
MW
1345 $X \gets x P$; \\
1346 \RETURN $(x, X)$;
32609df3 1347 \ifshort\newline\else\next\fi
a6e375a6 1348 Function $\id{challenge}^{H_I(\cdot, \cdot)}(R, c, X)$: \+ \\
32609df3 1349 $r \getsr \gf{q}$; \\
a6e375a6
MW
1350 $R \gets r P$; $Y \gets r X$; \\
1351 $h \gets H_I(R, Y)$; $c \gets r \xor h$; \\
1352 \RETURN $(Y, R, c)$; \- \\[\medskipamount]
1353 Function $\id{verify}(Y, Y')$: \+ \\
1354 \IF $Y' = Y$ \THEN \RETURN $1$; \\
1355 \RETURN $0$;
1356 \next
1357 Function $\id{response}^{H_I(\cdot, \cdot)}(R, c, x)$: \+ \\
1358 $Y' \gets x R$; \\
1359 $h \gets H_I(R', Y')$; $r' \gets c \xor h$; \\
1360 \IF $R \ne r' P$ \THEN \RETURN $\bot$; \\
1361 \RETURN $Y'$;
1362 \end{program}
1363
1364 \caption{Functions implementing $\Wident$ in the random oracle model}
1365 \label{fig:wident-ro}
1366\end{figure}
1367
1368\subsubsection{Completeness}
1369Suppose that Bob really is talking to Alice. Note that $Y' = x R = x (r P) =
1370r (x P) = r X = Y$. Hence $r' = c \xor H_I(R', Y') = c \xor H_I(R, Y) = r$,
1371so $r' P = r P = R$, so Alice returns $Y' = Y$ to Bob. Therefore $\Wident$
1372is \emph{complete}: if Bob really is communicating with Alice then he
1373accepts.
1374
1375\subsubsection{Soundness}
1376We next show that impersonating Alice is difficult. The natural way to prove
1377this would be to give an adversary a challenge and prove that its probability
1378of giving a correct response is very small. However, we prove a stronger
1379result: we show that if the adversary can respond correctly to any of a large
1380collection of challenges then it can solve the MCDH problem.
1381
1382Consider the game $\Game{imp}{\Wident}$ shown in
1383figure~\ref{fig:wident-sound}. An adversary's probability of successfully
1384impersonating Alice in our protocol, by correctly responding to any one of
1385$n$ challenges, is exactly its probability of winning the game (i.e., causing
1386it to return $1$).
1387
1388\begin{figure}
1389 \begin{program}
1390 $\Game{imp-$n$}{\Wident}(A)$: \+ \\
1391 $H_I \getsr \Func{G^2}{\Bin^{\ell_I}}$; \\
1392 $(x, X) \gets \id{setup}()$; \\
1393 $\id{win} \gets 0$; \\
1394 $\Xid{R}{map} \gets \emptyset$; \\
1395 $\mathbf{c} \gets \id{challenges}(n)$; \\
1396 $(R', Y') \gets A^{H_I(\cdot, \cdot), \id{check}(\cdot, \cdot)}
1397 (X, \mathbf{c})$; \\
1398 \RETURN $\id{win}$;
1399 \newline
1400 Function $\id{challenges}(n)$: \+ \\
1401 \FOR $i \in \Nupto{n}$ \DO \\ \ind
1402 $(Y, R, c) \gets \id{challenge}^{H_I(\cdot, \cdot)}$; \\
1403 $\Xid{R}{map} \gets \Xid{R}{map} \cup \{ R \mapsto Y \}$; \\
1404 $\mathbf{c}[i] \gets (R, c)$; \- \\
1405 \RETURN $\mathbf{c}$; \next
1406 Function $\id{check}(R', Y')$: \\
1407 \IF $R' \notin \dom \Xid{R}{map}$ \THEN \RETURN $0$; \\
1408 $Y \gets \Xid{R}{map}(R')$; \\
1409 \IF $\id{verify}(Y, Y')$ \THEN \\ \ind
1410 $\id{win} \gets 1$; \\
1411 \RETURN $1$; \- \\
1412 \RETURN $0$;
1413 \end{program}
1414
1415 \caption{Soundness of $\Wident$: $\Game{imp-$n$}{\Wident}(A)$}
1416 \label{fig:wident-sound}
1417\end{figure}
1418
1419\begin{theorem}[Soundness of $\Wident$]
1420 \label{thm:wident-sound}
1421 Let $A$ be any adversary running in time $t$ and making $q_I$ queries to
1422 its random oracle, and $q_V$ queries to its verification oracle. Let $G$
1423 be a cyclic group. Then
1424 \[ \Pr[\Game{imp-$n$}{\Wident^G}(A) = 1] \le
1425 \InSec{mcdh}(G; t', q_I + q_V)
1426 \]
1427 where $t' = t + O(q_I) + O(q_V)$.
1428\end{theorem}
1429\begin{remark}
1430 Note that the security bound here is \emph{independent} of the value of
1431 $n$.
1432\end{remark}
32609df3
MW
1433\begin{longproof}{The proof of this theorem can be found in the full version
1434 of the paper.}
a6e375a6
MW
1435 We prove this by defining a sequence of games $\G{i}$. The first will be
1436 the same as the attack game $\Game{imp-$n$}{\Wident}(A)$ and the others
1437 will differ from it in minor ways. In each game $\G{i}$, let $S_i$ be the
1438 event that $A$ wins the game -- i.e., that it successfully impersonates the
1439 holder of the private key~$x$.
32609df3 1440
a6e375a6
MW
1441 Let game $\G0$ be the attack game $\Game{imp}{\Wident}(A)$, and let $(R',
1442 Y')$ be the output of $A$ in the game.
1443
1444 We define a new game $\G1$ which is the same as $\G0$, except that we query
1445 the random oracle $H_I$ at $(R', Y')$ whenever the adversary queries
1446 $\id{check}(R', Y')$. (We don't charge the adversary for this.) This
1447 obviously doesn't affect the adversary's probability of winning, so
1448 $\Pr[S_1] = \Pr[S_0]$.
1449
1450 Game $\G2$ is like $\G1$, except that we change the way we generate
1451 challenges and check their responses. Specifically, we new functions
1452 $\id{challenges}_2$ and $\id{check}_2$, as shown in
1453 figure~\ref{fig:wident-sound-2}.
1454
1455 \begin{figure}
1456 \begin{program}
1457 Function $\id{challenges}_2(n)$: \+ \\
1458 $r^* \getsr I$; $R^* \gets r^* P$; $Y^* \gets r^* X$; \\
1459 \FOR $i \in \Nupto{n}$ \DO \\ \ind
1460 $r \getsr I$; $R \gets r R^*$; $Y \gets r Y^*$; \\
1461 $h \gets H_I(R, Y)$; $c \gets r \xor h$; \\
1462 $\Xid{R}{map} \gets \Xid{R}{map} \cup \{ R \mapsto r \}$; \\
1463 $\mathbf{c}[i] \gets (R, c)$; \- \\
1464 \RETURN $\mathbf{c}$;
1465 \next
1466 Function $\id{check}_2(R', Y')$: \+ \\
1467 \IF $R' \notin \dom \Xid{R}{map}$ \THEN \RETURN $0$; \\
1468 $r \gets \Xid{R}{map}(R')$; \\
1469 \IF $\id{verify}(Y^*, Y'/r)$ \THEN \\ \ind
1470 $\id{win} \gets 1$; \\
1471 \RETURN $1$; \- \\
1472 \RETURN $0$;
1473 \end{program}
1474
1475 \caption{Soundness of $\Wident$: $\id{challenges}_2$ and $\id{check}_2$}
1476 \label{fig:wident-sound-2}
1477 \end{figure}
1478
1479 While we're generating and checking challenges in a more complicated way
1480 here, we're not actually changing the distribution of the challenges, or
1481 changing the winning condition. Hence $\Pr[S_2] = \Pr[S_1]$.
1482
1483 Now we change the rules again. Let $\G3$ be the same as $\G2$ except that
1484 we change the winning condition. Instead, we say that the adversary wins
1485 if any of the queries to its random oracle $H_I(R', Y')$ would be a correct
1486 response -- i.e., $\id{check}_2(R', Y')$ would return $1$. Since we query
1487 the oracle on $(R', Y')$ on its behalf at the end of the game, no adversary
1488 can do worse in this game than it does in $\G2$, so we have $\Pr[S_3] \ge
1489 \Pr[S_2]$. (It's not hard to see that this only helps quite stupid
1490 adversaries. We can transform any adversary into one for which equality
1491 holds here.)
1492
1493 Finally, let $\G4$ be the same as $\G3$ except that we change the way we
1494 generate challenges again: rather than computing $h$ and setting $c \gets h
1495 \xor r$, we just choose $c$ at random. Specifically, we use the new
1496 function, $\id{challenges}_4$, shown in figure~\ref{fig:wident-sound-4}.
1497
1498 \begin{figure}
1499 \begin{program}
1500 Function $\id{challenges}_4(n)$: \+ \\
1501 $r^* \getsr I$; $R^* \gets r^* P$; $Y^* \gets r^* X$; \\
1502 \FOR $i \in \Nupto{n}$ \DO \\ \ind
1503 $r \getsr I$; $R \gets r R^*$; \\
1504 $c \getsr \Bin^{\ell_I}$; \\
1505 $\Xid{R}{map} \gets \Xid{R}{map} \cup \{ R \mapsto r \}$; \\
1506 $\mathbf{c}[i] \gets (R, c)$; \- \\
1507 \RETURN $\mathbf{c}$;
1508 \end{program}
1509
1510 \caption{Soundness of $\Wident$: $\id{challenges}_4$}
1511 \label{fig:wident-sound-4}
1512 \end{figure}
1513
1514 Since $H_I(\cdot, \cdot)$ is a random function, the adversary can only
1515 distinguish $\G4$ from $\G3$ if it queries its random oracle at some $(R, r
1516 Y^*)$. But if it does this, then by the rule introduced in $\G3$ it has
1517 already won. Therefore we must have $\Pr[S_4] = \Pr[S_3]$.
1518
1519 Our $\id{challenges}_4$ function is interesting, since it doesn't actually
1520 make use of $r^*$ or $Y^*$ when generating its challenges. This gives us
1521 the clue we need to bound $\Pr[S_4]$: we can use adversary $A$ to solve the
1522 multiple-guess Diffie-Hellman problem in $G$ by simulating the game $\G4$.
1523 Specifically, we define the adversary $B$ as shown in
1524 figure~\ref{fig:wident-sound-cdh}. That is, for each query $A$ makes to
1525 its random oracle at a new pair $(R', Y')$, we see whether this gives us
1526 the answer we're looking for. We have $\Pr[S_0] \le \Pr[S_4] =
1527 \Succ{mcdh}{G}(B) \le \InSec{gdh}(G; t', q_I + q_V)$ as required.
1528
1529 \begin{figure}
1530 \begin{program}
1531 Adversary $B^{V(\cdot)}(X, R^*)$: \+ \\
1532 $F \gets \emptyset$; $\Xid{R}{map} \gets \emptyset$; \\
1533 \FOR $i \in \Nupto{n}$ \DO \\ \ind
1534 $r \getsr I$; $R \gets r R^*$; $c \getsr \Bin^{\ell_I}$; \\
1535 $\Xid{R}{map} \gets \Xid{R}{map} \cup \{ R \mapsto r \}$; \\
1536 $\mathbf{c}[i] \gets (R, c)$; \- \\
1537 $(R', Y') \gets A^{H_I(\cdot, \cdot), \id{check}(\cdot, \cdot)}
1538 (X, \mathbf{c})$; \\
1539 \IF $Y' \neq \bot$ \THEN $H_I(R', Y')$;
1540 \next
1541 Oracle $H_I(R', Y')$: \+ \\
1542 \IF $(R', Y') \in \dom F$ \THEN \\ \quad
1543 $h \gets F(x)$; \\
1544 \ELSE \\ \ind
1545 $\id{check}(R', Y')$; \\
1546 $h \getsr \Bin^{\ell_I}$;
1547 $F \gets F \cup \{ (R', Y') \mapsto h \}$; \- \\
1548 \RETURN $h$;
1549 \- \\[\medskipamount]
1550 Oracle $\id{check}(R', Y')$: \+ \\
1551 \IF $R' \in \dom \Xid{R}{map}$ \THEN
1552 $V(Y'/\Xid{R}{map}(R'))$;
1553 \end{program}
1554
1555 \caption{Soundness of $\Wident$: reduction from MCDH}
1556 \label{fig:wident-sound-cdh}
1557 \end{figure}
32609df3 1558\end{longproof}
a6e375a6
MW
1559
1560\subsubsection{Zero-knowledge}
1561Finally we must prove that $\Wident$ is (statistical) zero-knowledge -- i.e.,
1562that, except with very small probability, Bob learns nothing of use to him
1563except that he's interacting with Alice. To do this, we show that, for any
1564algorithm $B$ which Bob might use to produce his challenge to the real Alice,
1565there exists a simulator $S$ which produces transcripts distributed very
1566similarly to transcripts of real conversations between $B$ and Alice, the
1567difference being that $S$ doesn't know Alice's key. We shall show that the
1568statistical difference between the two distributions is $2^{-\ell_I}$.
1569
1570The intuition here is that Bob ought to know what answer Alice is going to
1571give him when he constructs his challenge. This is certainly true if he's
1572honest: his challenge is $R = r P$ for some $r$ he knows, so he won't learn
1573anything useful when Alice responds with $x R = r X$. However, if Bob sends
1574a challenge $R$ when he doesn't know the corresponding $r$, he learns
1575something potentially useful. The accompanying check value $c = r \xor
1576H_I(R, r X)$ keeps him honest.
1577
1578To show this, we present an \emph{extractor} which, given any challenge $(R,
1579c)$ Bob can construct, and his history of random-oracle queries, either
1580returns a pair $(r, Y)$ such that $R = r P$ and $Y = r X$, or $\bot$;
1581moreover, the probability that Alice returns a response $Y' \ne \bot$ given
1582the challenge $(R, c)$ is $2^{-\ell}$. We can, of course, readily convert
1583this extractor into a simulator to prove the zero-knowledge property of our
1584protocol.
1585
1586We shall actually consider a slightly more complex setting. We grant Bob
1587access to an oracle which produces random, correctly-formed challenges. We
1588require this to model the legitimate challenges of other parties when we
32609df3 1589analyse the security of our key exchange protocol.
a6e375a6
MW
1590
1591\begin{definition}[Discrete-log extractors]
1592 Let $T$, $B$ be randomized algorithms. Define the game
1593 $\Game{dl-ext}{G}(T, B)$ as shown in figure~\ref{fig:dlext}. The
1594 \emph{success probability of $T$ as a discrete-log extractor against $B$}
1595 is defined as
1596 \[ \Succ{dl-ext}{G}(T, B) = \Pr[\Game{dl-ext}{G}(T, B) = 1]. \]
1597\end{definition}
a6e375a6
MW
1598
1599\begin{figure}
1600 \begin{program}
1601 $\Game{dl-ext}{G}(T, B):$ \+ \\
1602 $H_I \getsr \Func{G^2}{\Bin^{\ell_I}}$;
1603 $Q_H \gets \emptyset$; $Q_C \gets \emptyset$; \\
1604 $(x, X) \gets \id{setup}()$; \\
1605 $(R, c) \gets B^{\Xid{H_I}{trap}(\cdot, \cdot), C()}(x, X)$; \\
1606 $(r, Y) \gets T(R, c, Q_H)$; \\
1607 $Y' \gets x R$; $h' \gets H_I(R, Y')$; $r' \gets c \xor h'$; \\
1608 \IF $r \ne \bot$ \THEN \\ \quad
1609 \IF $Y = \bot \lor R \ne r P \lor Y \ne Y'$ \THEN \RETURN $0$; \\
32609df3 1610 \IF $R = r' P$ \THEN $(r^*, Y^*) \gets (r', Y')$; \\
a6e375a6
MW
1611 \ELSE $(r^*, Y^*) \gets (\bot, \bot)$; \\
1612 \IF $(R, c) \in Q_C$ \THEN \RETURN $1$; \\
1613 \IF $(r, Y) = (r', Y')$ \THEN \RETURN $1$; \\
1614 \RETURN $0$;
1615 \next
1616 Oracle $\Xid{H_I}{trap}(R', Y')$: \+ \\
1617 $h \gets H_I(R', Y')$; \\
1618 $Q_H \gets Q_H \cup \{(R', Y', h)\}$; \\
1619 \RETURN $h$; \- \\[\medskipamount]
1620 Oracle $C()$: \+ \\
32609df3 1621 $r \getsr \gf{q}$; \\
a6e375a6
MW
1622 $R \gets r P$; $c \gets r \xor H_I(R, r X)$; \\
1623 $Q_C \gets Q_C \cup \{(R, c)\}$; \\
32609df3 1624 \RETURN $(R, c)$
a6e375a6
MW
1625 \end{program}
1626
1627 \caption{Discrete log extraction game: $\Game{dl-ext}{G}(T, B)$}
1628 \label{fig:dlext}
1629\end{figure}
1630
32609df3
MW
1631Let's unpack this definition slightly. We make the following demands of our
1632extractor.
1633\begin{itemize}
1634\item It is given a bare `transcript' of $B$'s execution. In particular, it
1635 is given only its output and a list of $B$'s random-oracle queries in no
1636 particular order.
1637\item While the extractor is not given the private key~$x$, the adversary~$B$
1638 is given the private key.
1639\item We require that, if the extractor produces values $r, Y \ne \bot$ then
1640 $r$ and $Y$ are \emph{correct}; i.e., that $R = r P$ and $Y = x R$.
1641\item The extractor is explicitly \emph{not} given the outputs of the
1642 challenge-generation oracle $C()$, nor of the random-oracle queries issued
1643 by $C()$. However, we allow the extractor to fail (i.e., return $\bot$) if
1644 $B$ simply parrots one of its $C$-outputs.
1645\item The extractor is allowed -- indeed \emph{required} -- to fail if the
1646 challenge $(R, c)$ is \emph{invalid} (i.e., Alice would return $\bot$ given
1647 the challenge).
1648\end{itemize}
1649The resulting definition bears a striking similarity to the concept of
1650\emph{plaintext awareness} in \cite{Bellare:1998:RAN}.
1651
a6e375a6
MW
1652Such an extractor indeed exists, as the following lemma states.
1653\begin{lemma}[Effectiveness of extractor $T_\Wident$]
1654 \label{lem:dlext}
32609df3
MW
1655 There exists a \emph{universal discrete-log extractor} $T_\Wident$, shown
1656 in figure~\ref{fig:twident}, such that, for any algorithm $B$,
a6e375a6
MW
1657 \[ \Succ{dl-ext}{G}(T_\Wident, B) \ge 1 - \frac{1}{2^{\ell_I}}. \]
1658 Moreover, if $B$ issues at most $q_H$ random-oracle queries, then the
1659 running time of $T_\Wident$ is $O(q_H)$.
1660\end{lemma}
32609df3
MW
1661\ifshort
1662The proof of this lemma is given in the full version of this paper.
1663\else
a6e375a6
MW
1664We prove this result at the end of the section. For now, let us see how to
1665prove that $\Wident$ is zero-knowledge.
32609df3
MW
1666\fi
1667
1668\begin{figure}
1669 \begin{program}
1670 Extractor $T_\Wident(R, c, Q_H)$: \+ \\
1671 \FOR $(R', Y', h)$ \IN $Q_H$ \DO \\ \ind
1672 $r \gets h \xor c$; \\
1673 \IF $R = R' = r P \land Y' = r X$ \THEN \RETURN $(r, Y')$; \- \\
1674 \RETURN $(\bot, \bot)$;
1675 \end{program}
1676
1677 \caption{The discrete-log extractor $T_\Wident$}
1678 \label{fig:twident}
1679\end{figure}
a6e375a6
MW
1680
1681We use the set-up described in section~\ref{sec:sim}. Our initialization
32609df3 1682function~$I_\Wident$ just chooses a random $x \in \gf{q}$ as the world
a6e375a6
MW
1683private input and sets $X = x P$ as the common input. In the `real world',
1684the adversary is allowed to submit a (single) challenge to the prover; it is
1685given the prover's response, and must then compute its output. This is shown
1686on the left hand side of figure~\ref{fig:wident-sim}.
1687
1688The zero-knowledge property of the scheme is described by the following
1689theorem.
1690\begin{theorem}[Statistical zero-knowledge of $\Wident$]
1691 \label{thm:wident-zk}
1692 Let $I_\Wident$, $W_\Wident$ and $S_\Wident$ be the real-prover world and
1693 simulator shown in figure~\ref{fig:wident-sim}. Then, for any~$t$,
1694 $q_I$ and $q_C$,
1695 \[ \InSec{sim}(W_\Wident, I_\Wident, S_\Wident; t, q_I, q_C, 0) \le
1696 \frac{q_C}{2^\ell_I}.
1697 \]
1698 where $q_C$ is the maximum number of challenges allowed by the adversary.
1699\end{theorem}
32609df3 1700\begin{longproof}{}
a6e375a6
MW
1701 The simulator simply uses the extractor~$T_\Wident$ to extract the answer
1702 from the adversary's history of random oracle queries. Observe that
1703 $S_\Wident$ is a fake-world simulator. By lemma~\ref{lem:dlext}, the
1704 extractor fails with probability only $2^{-\ell_I}$. The theorem follows
1705 by a simple union bound and proposition~\ref{prop:fakesim}.
32609df3 1706\end{longproof}
a6e375a6 1707
32609df3 1708%\ifshort\else
a6e375a6
MW
1709\begin{figure}
1710 \begin{program}
1711 Initialization function $I_\Wident()$: \+ \\
32609df3 1712 $x \getsr \gf{q}$; \\
a6e375a6
MW
1713 $X \gets x P$; \\
1714 \RETURN $(x, X)$;
1715 \- \\[\medskipamount]
1716 Real-prover world $W_\Wident^{H_I(\cdot, \cdot)}
1717 ((x, X), \sigma, \tau, \mu)$: \+ \\
1718 \IF $\tau = \cookie{challenge}$ \THEN \\ \ind
1719 $(R, c) \gets \mu$; \\
1720 $Y \gets \id{response}^{H_I(\cdot, \cdot)}(R, c, x)$; \\
1721 \RETURN $(1, Y)$; \- \\
1722 \ELSE \\ \ind
1723 \RETURN $(\sigma, \bot)$;
1724 \next
1725 Simulator $S_\Wident$'s fake world \\
1726 \hspace{1in} $W_{\text{sim}}^{H_I(\cdot, \cdot)}
1727 ((X, u), \sigma, \tau, \mu)$: \+ \\
1728 \IF $\tau = \cookie{init}$ \THEN \\ \ind
1729 \RETURN $(\emptyset, \emptystring)$; \- \\
1730 $Q_H \gets \sigma$; \\
1731 \IF $\tau = \cookie{challenge}$ \THEN \\ \ind
1732 $(R, c) \gets \mu$; \\
1733 $(r, Y) \gets T_\Wident(R, c, Q_H)$; \\
1734 \RETURN $(Q_H, Y)$; \- \\
1735 \ELSE \IF $\tau = \cookie{random}$ \THEN \\ \ind
1736 $(i, (R', Y'), h) \gets \mu$; \\
1737 $Q_H \gets Q_H \cup \{(R', Y', h)\}$; \\
1738 \RETURN $(Q_H, \emptystring)$; \- \\
1739 \ELSE \\ \ind
1740 \RETURN $(\sigma, \bot)$;
1741 \end{program}
1742
1743 \caption{Real-prover and simulator for zero-knowledge of $\Wident$}
1744 \label{fig:wident-sim}
1745\end{figure}
32609df3 1746%\fi
a6e375a6 1747
32609df3
MW
1748\ifshort\else
1749We now return to proving that the extractor $T_\Wident$ functions as claimed.
dff0fad2 1750The following two trivial lemmata will be useful, both now and later.
a6e375a6
MW
1751\begin{lemma}[Uniqueness of discrete-logs]
1752 \label{lem:unique-dl}
1753 Let $G = \langle P \rangle$ be a cyclic group. For any $X \in G$ there is
32609df3 1754 a unique $x \in \gf{q}$ where $X = x P$.
a6e375a6
MW
1755\end{lemma}
1756\begin{proof}
1757 Certainly such an $x$ exists, since $G$ is cyclic and finite. Suppose $X =
1758 x P = x' P$: then $0 = x P - x' P = (x - x') P$. Hence $(x - x')$ is a
32609df3 1759 multiple of $q$, i.e., $x = x'$.
a6e375a6
MW
1760\end{proof}
1761\begin{lemma}[Uniqueness of check values]
1762 \label{lem:unique-c}
32609df3
MW
1763 Let $G = \langle P \rangle$ be a cyclic group of prime order $q$; let $H_I$
1764 be a function $H_I\colon \Bin^{2\ell_G} \to \Bin^{\ell_I}$. Fix some $x
1765 \in \gf{q}$ and define the set
a6e375a6
MW
1766 \[ V_x = \bigl\{\, (R, c) \in G \times \Bin^{\ell_I} \bigm|
1767 R = \bigl( c \xor H_I(R, x R) \bigr) P \,\bigr\}.
1768 \]
1769 Then, for any $R$, $c$, $c'$, if $(R, c) \in V_x$ and $(R, c') \in V_x$
32609df3 1770 then $c = c'$.
a6e375a6
MW
1771\end{lemma}
1772\begin{proof}
32609df3
MW
1773 From lemma~\ref{lem:unique-dl}, we see that there is a unique $r \in \gf{q}$
1774 for which $R = r P$. Now, if $(R, c) \in V_x$, we must have $r = c \xor
1775 H_I(R, x R)$. It follows that $c = r \xor H_I(R, x R)$.
a6e375a6
MW
1776\end{proof}
1777
1778\begin{proof}[Proof of lemma~\ref{lem:dlext}]
a6e375a6
MW
1779 Let $B$ be any randomized algorithm, and let $(R, c, Q_H)$ be as given to
1780 the extractor by $\Game{dl-ext}{G}(T_\Wident, B)$. Let the quantities
1781 $H_I$, $Q_C$, $r$, $r'$, $x$ and $X$ be as in that game.
1782
1783 Suppose that the extractor returns values $(r, Y) \ne (\bot, \bot)$. Let
1784 $h = r \xor c$; then there must be a query $(R, Y, h) \in Q_H$, and we have
1785 $R = r P$ and $Y = r X = r (x P) = x (r P) = x R = Y'$, so the extractor's
1786 output must be correct unless it fails.
1787
1788 Furthermore, in the case where the extractor did not fail, we have $h =
1789 H_I(R, Y) = H_I(R, Y')$ and $c = r \xor h$, so the challenge was valid.
1790 Therefore, if the challenge was invalid, the extractor will fail.
1791
1792 We now deal with the challenge-generation oracle. Suppose that $(R, c')
1793 \in Q_C$ for some $c'$. Now, if $c = c'$ then $(R, c')$ is a repeat of
1794 some challenge from the challenge-generation oracle, and the extractor is
1795 permitted to fail. On the other hand, suppose $c \ne c'$; then, the
1796 challenge $(R, c)$ must be invalid by lemma~\ref{lem:unique-c}, so the
1797 extractor is required to fail, and we have established that indeed it will.
1798 From now on, suppose that $R$ is distinct from all the $R$-values returned
1799 by $C()$.
1800
1801 Let $Y = x R$. Suppose that $B$ queried its random oracle at $(R, Y)$.
1802 Let $h = H_I(Y)$, so $r' = c \xor h$. If the challenge is valid then $R =
1803 r' P$; therefore $Y = x R = x r' P = r' X$, so we have $(R, Y, h) \in Q_H$
1804 with $R = r P$ and $Y = r X$. Hence the extractor returns $r = r'$ as
1805 required.
1806
1807 It remains to deal with the case where there is no random-oracle query at
1808 $(R, Y)$. But then $h = H_I(R, Y)$ is uniformly distributed, and
1809 independent of the entire game up to this point. Let $r$ be the correct
1810 discrete log of $R$; by lemma~\ref{lem:unique-dl} there is only one
1811 possible value. The extractor always fails under these circumstances, but
1812 a correct responder would reply with probability
1813 \[ \Pr[h = c \xor r] = \frac{1}{2^{\ell_I}}. \]
1814 This concludes the proof.
1815\end{proof}
1816\begin{remark}
1817 Note that the fact that the algorithm~$B$ was given the private key is
1818 irrelevant to the above argument. However, we shall need this property
1819 when we come to prove deniability for the key-exchange protocol.
1820\end{remark}
1821\begin{remark}
1822 It's easy to see from the above proof that the extractor works flawlessly
1823 on the `honest verifier' algorithm $\id{challenge}$ shown in
1824 figure~\ref{fig:wident-ro}. This shows that $\Wident$ is perfect
1825 zero-knowledge against honest verifiers. We're much more interested in
1826 dishonest verifiers, though.
1827\end{remark}
32609df3 1828\fi
a6e375a6
MW
1829
1830
32609df3 1831\ifshort\else
a6e375a6
MW
1832\subsection{An identity-based identification scheme}
1833\label{sec:wident-id}
1834
d8139f2c 1835Boneh and Franklin \cite{Boneh:2003:IBE} showed how to construct an
a6e375a6
MW
1836identity-based encryption scheme using bilinear pairings. The resulting
1837encryption scheme looks somewhat like a pairing-based version of ElGamal's
d8139f2c 1838encryption scheme \cite{ElGamal:1985:PKC}. We can easily apply their
a6e375a6
MW
1839techniques to our identification protocol, and thereby obtain an
1840identity-based identification scheme. Providing the necessary formalisms to
1841prove theorems analogous to our theorems~\ref{thm:wident-sound}
1842and~\ref{thm:wident-zk} would take us too far from our objectives; but given
1843appropriate security notions, we can readily adapt our existing proofs to the
1844new setting.
1845
1846\subsubsection{Bilinear pairings}
1847Before we describe the necessary modifications to the protocol, we first give
1848a (very brief!) summary of cryptographic pairings. (The Boneh-Franklin paper
d8139f2c 1849\cite{Boneh:2003:IBE} gives more detail; also \cite{Menezes:2005:IPB}
a6e375a6
MW
1850provides a useful introduction to the topic.)
1851
1852Let $(G, +)$, $(G', +)$ and $(G_T, \times)$ be cyclic groups with prime order
1853$q$; let $P \in G$ and $P' \in G'$ be elements of order $q$ in $G$ and $G'$
1854respectively. We say that a mapping $\hat{e}\colon G \times G' \to G_T$ is a
1855\emph{non-degenerate bilinear pairing} if it satisfies the following
1856properties.
1857\begin{description}
1858\item[Bilinearity] For all $R \in G$ and $S', T' \in G'$, we have $\hat{e}(R,
dff0fad2
MW
1859 S' + T') = \hat{e}(R, S')\,\hat{e}(R, T')$; and for all $R, S \in G$ and $T'
1860 \in G'$ we have $\hat{e}(R + S, T') = \hat{e}(R, T')\,\hat{e}(S, T')$.
a6e375a6
MW
1861\item[Non-degeneracy] $\hat{e}(P, P') \ne 1$.
1862\end{description}
1863For practical use, we also want $\hat{e}(\cdot, \cdot)$ to be efficient to
1864compute. The reader can verify that $\hat{e}(a P, b P') = \hat{e}(P,
1865P')^{ab}$. It is permitted for the two groups $G$ and $G'$ to be equal.
1866
1867We require a different intractability assumption, specifically that the
1868\emph{bilinear} Diffie-Hellman problem (BDH) -- given $(a P, b P, a P', c P')
1869\in G^2 \times G'^2$, find $\hat{e}(P, P')^{abc} \in G_T$ -- is difficult.
1870This implies the difficulty of the computational Diffie-Hellman problem in
1871all three of $G$, $G'$ and~$G_T$.
1872
1873\subsubsection{The identity-based scheme}
1874We need a trusted authority; following \cite{Schneier:1996:ACP} we shall call
32609df3 1875him Trent. Trent's private key is $t \in \gf{q}$; his public key is $T =
a6e375a6
MW
1876t P$.
1877
1878Finally, we need cryptographic hash functions $H_I\colon G \times G_T \to
1879\Bin^{\ell_I}$ and $\Hid\colon \Bin^* \to G'$; a formal security analysis
1880would model these as random oracles.
1881
1882Alice's public key is $A = \Hid(\texttt{Alice}) \in G'$. Her private key is
1883$K_A = t A \in G'$ -- she needs Trent to give this to her. Bob can interact
1884with Alice in order to verify her identity as follows.
1885\begin{enumerate}
1886\item Bob computes $\gamma_A = \hat{e}(T, A) \in G_T$. (He can do this once
1887 and store the result if he wants, but it's not that onerous to work it out
1888 each time.)
32609df3 1889\item Bob chooses $r \inr \gf{q}$, and sets $R = r P$. He also computes
a6e375a6
MW
1890 $\psi = \gamma_A^r$, $h = H_I(R, \psi)$ and $c = r \xor h$. He sends his
1891 challenge $(R, c)$ to Alice.
1892\item Alice receives $(R', c')$. She computes $\psi' = \hat{e}(R, K_A)$, $h'
1893 = H_I(R', \psi')$, and $r' = c' \xor h')$. She checks that $R' = r' P$; if
1894 so, she sends $\psi'$ back to Bob; otherwise she refuses to talk to him.
1895\item Bob receives $\psi'$. If $\psi = \psi'$ then he accepts that he's
1896 talking to Alice.
1897\end{enumerate}
1898This works because $\psi = \gamma_A^r = \hat{e}(T, A)^r = \hat{e}(t P, A)^r =
1899\hat{e}(r P, A)^t = \hat{e}(R, t A) = \psi'$.
1900
1901\subsubsection{Informal analysis}
1902An analogue to lemma~\ref{lem:dlext} can be proven to show how to extract $r$
1903from a verifier's random-oracle queries; statistical zero knowledge would
1904then follow easily, as in theorem~\ref{thm:wident-zk}. Soundness is
1905intuitively clear, since an adversary must compute $\psi = \hat{e}(P,
1906P')^{art}$ given $A = a P'$, $R = r P$ and $T = t P$, which is an instance of
1907the BDH problem. An analogue of theorem~\ref{thm:wident-sound} would have to
1908prove this for an adversary capable of making identity requests as well as
1909obtaining challenges. Finally, our key-exchange protocol can be constructed
1910out of this identity-based identification scheme, yielding an identity-based
1911authenticated key-exchange protocol. We leave it to the reader to work
1912through the details.
32609df3 1913\fi
a6e375a6
MW
1914
1915
32609df3 1916\ifshort\else
a6e375a6
MW
1917\subsection{Comparison with the protocol of Stinson and Wu}
1918\label{sec:stinson-ident}
1919
1920Our protocol is similar to a recent proposal by Stinson and Wu
1921\cite{Stinson:2006:EST}. They restrict their attention to Schnorr groups $G
32609df3
MW
1922\subset \F_p^*$. Let $\gamma$ be an element of order $q = \#G$. The
1923prover's private key is $a \inr \gf{q}$ and her public key is $\alpha =
1924\gamma^a$. In their protocol, the challenger chooses $r \inr \gf{q}$, computes
1925$\rho = \gamma^r$ and $\psi = \alpha^r$, and sends a challenge $(\rho,
1926H(\psi))$. The prover checks that $\rho^q \ne 1$, computes $\psi = \rho^a$,
1927checks the hash, and sends $\psi$ back by way of response. They prove their
1928protocol's security in the random-oracle model.
a6e375a6
MW
1929
1930Both the Wrestlers protocol and Stinson-Wu require both prover and verifier
1931to compute two exponentiations (or scalar multiplications) each. The
32609df3 1932sizes of the messages used by the two protocols are also identical.
a6e375a6
MW
1933
1934(An earlier version of the Stinson-Wu protocol used a cofactor
1935exponentiation: if we set $f = (p - 1)/q$, then we use $\psi = \alpha^{rf}) =
1936\rho^{af} = \gamma^{afr}$. This is more efficient in typical elliptic curve
1937subgroups, since the cofactor of such subgroups is usually small: indeed,
1938\cite{SEC1} recommends rejecting groups with cofactor $f > 4$. However, in
1939the Schnorr groups used by Stinson and Wu, the cofactor is much larger than
1940$q$, and their new variant is more efficient.)
1941
1942We note that the zero-knowledge property of the Stinson-Wu protocol requires
1943the Diffie-Hellman knowledge of exponent assumption (KEA). Very briefly:
1944suppose $A$ is a randomized algorithm which takes as input $X \in G$ and
1945outputs a pair $(r P, r X)$; intuitively, the KEA asserts $A$ must have done
1946this by choosing $r$ somehow and then computing its output from it.
1947Formally, it asserts the existence of an `extractor' algorithm which takes as
1948input the element $X$ and the random coins used by $A$ and outputs $r$ with
1949high probability. This is a very strong assumption, and one which is
1950unnecessary for our protocol, since we can present an \emph{explicit}
1951extractor.
1952
1953The KEA assumption as stated in \cite{Stinson:2006:EST} allows the extractor
1954to fail with some negligible probability, over and above the probability that
8b8b8839 1955a dishonest verifier managed to guess the correct $h = H(\psi)$ without
a6e375a6
MW
1956making this random-oracle query. Not only does our protocol achieve zero-
1957knowledge without the KEA, our extractor is, in this sense, `perfect'.
1958
1959Our protocol is just as strong as Stinson-Wu under attack from active
1960intruders: see table~\ref{tab:wident-active} for a very brief sketch of the
1961case-analysis which would be the basis of a proof of this.
1962
1963\begin{table}
1964 \begin{tabular}[C]{|*{3}{c|}p{8cm}|}
1965 \hlx{hv[1]}
1966 \multicolumn{2}{|c|}{\textbf{Challenge}} &
1967 \textbf{Response} &
1968 \textbf{Security}
1969 \\ \hlx{v[1]hv}
1970 %% unpleasant hacking to make the R and c columns the same width :-(
1971 \settowidth{\dimen0}{\textbf{Challenge}}%
32609df3 1972 \dimen0=.5\dimen0
a6e375a6
MW
1973 \advance\dimen0by-\tabcolsep
1974 \advance\dimen0by-.5\arrayrulewidth
1975 \hbox to\dimen0{\hfil$R$\hfil}
1976 & $c$ & $Y$ & Nothing to prove. \\ \hlx{v}
1977 $R$ & $c'$ & --- & Prover rejects by lemma~\ref{lem:unique-c};
1978 $Y'$ probably wrong by
1979 theorem~\ref{thm:wident-sound}. \\ \hlx{v}
1980 $R$ & $c$ & $Y'$ & Response is incorrect. \\ \hlx{v}
1981 $R'$ & --- & $Y$ & Response is incorrect. \\ \hlx{v}
1982 $R'$ & $c$ & $Y'$ & Prover rejects with probability $1 - 2^{-\ell_I}$;
1983 $Y'$ probably wrong by
1984 theorem~\ref{thm:wident-sound}. \\ \hlx{v}
1985 $R'$ & $c'$ & $Y'$ & Simulate prover using extractor
1986 (lemma~\ref{lem:dlext}); $Y'$ probably wrong by
1987 theorem~\ref{thm:wident-sound}. \\ \hlx{vh}
1988 \end{tabular}
32609df3 1989
a6e375a6
MW
1990 \caption{Security of $\Wident$ against active intruders (summary)}
1991 \label{tab:wident-active}
1992\end{table}
1993
1994An identity-based analogue of Stinson-Wu can be defined using a bilinear
1995pairing, just as we did in section~\ref{sec:wident-id}. However, to prove
1996the zero-knowledge property, one needs to make a bilinear analogue of the
1997knowledge of exponent assumption.
1998
1999We suspect that a key-exchange protocol like ours can be constructed using
2000Stinson-Wu rather than the Wrestlers identification scheme. We haven't,
2001however, gone through all the details, since we believe our protocol is just
2002as efficient and is based on much more conservative assumptions.
32609df3 2003\fi
a6e375a6
MW
2004
2005%%%--------------------------------------------------------------------------
2006
2007\section{A simple key-exchange protocol}
2008\label{sec:kx}
2009
2010In this section, we describe a simple key-exchange protocol built out of the
2011identification protocol shown previously.
2012
2013The key-exchange protocol arises from the following observation. If Bob
2014sends a challenge, presumably to Alice, and gets a correct response, then not
2015only did he really send the challenge to Alice but he knows that she received
2016it correctly.
2017
2018So, if Alice and Bob authenticate each other, by the end of it, they should
2019each have chosen a random private value, sent the corresponding public value
2020to the other, and been convinced that it arrived safely.
2021
2022Unfortunately, life isn't quite this kind, and we have to do some more work
2023to make this scheme secure.
2024
2025
2026Our key exchange protocol essentially consists of two parallel instances of
2027the identification protocol. If Alice receives a correct response to her
2028challenge, she will know that Bob received her challenge correctly, and
2029\emph{vice versa}. If we let Alice's challenge be $R_0 = r_0 P$ and Bob's
2030challenge be $R_1 = r_1 P$ then each can compute a shared secret $Z = r_0 R_1
2031= r_0 r_1 P = r_1 R_0$ unknown to an adversary. There are, unfortunately, a
2032few subtleties involved in turning this intuition into a secure key-exchange
2033protocol, which we now describe.
2034
2035
2036\subsection{Overview}
2037\label{sec:kx-overview}
2038
2039We present a quick, informal description of our basic key-exchange protocol.
2040In addition to our group $G$, we shall also need a secure symmetric
2041encryption scheme $\E = (\kappa, E, D)$, and two secure hash functions
2042$H_I\colon \Bin^{2\ell_G} \to \Bin^{\ell_I}$ and $H_K\colon \Bin^{\ell_G+1}
2043\to \Bin^\kappa$.
2044
2045Suppose that Alice's and Bob's private keys are $a$ and $b$ respectively, and
2046their public keys are $A = a P$ and $B = b P$.
2047\begin{enumerate}
32609df3
MW
2048\item Alice chooses a random index $r \inr \gf{q}$. She computes $R = r P$ and
2049 $c = r \xor H_I(R, r B)$. She sends the pair $(R, c)$ to Bob.
2050\item Similarly, Bob chooses a random $s \inr \gf{q}$. He computes $S = s P$
2051 and $d = s \xor H_I(S, s A)$. He sends $(S, d)$ to Alice.
a6e375a6
MW
2052\item Alice receives $(S', d')$ from Bob. She computes $s' = d' \xor H_I(S',
2053 a S')$, and verifies that $S' = s' P$. If so, she computes $K_A = H_K(0
2054 \cat r S')$, and sends $R, E_{K_A}(a S')$ to Bob.
2055\item Similarly, Bob receives $(R', c')$ from Alice. He verifies that $R' =
2056 \bigl( c' \xor H_I(R', b R') \bigr) P$. If so, he computes $K_B = H_K(0
2057 \cat s R')$ and sends S, $E_{K_B}(b R')$ to Alice.
2058\item Alice receives a ciphertext $(S'', \chi_B)$ from Bob. She checks that
2059 $S'' = S'$, decrypts $\chi_B$, and checks that $D_{K_A}(\chi_B) = r B$. If
2060 so, she uses $H_K(1 \cat r S')$ as her shared secret.
2061\item Similarly, Bob receives $(R'', \chi_A)$ from Alice, and checks $R'' =
2062 R'$ and $D_{K_B}(\chi_A) = s A$. If so, he uses $H_K(1 \cat s R')$ as his
2063 shared secret.
2064\end{enumerate}
2065This is the Wrestlers Key Exchange protocol, $\Wkx^{G, \E}$ (again, we omit
2066the superscripts when referring to the general protocol, or when confusion is
2067unlikely). A diagrammatic summary of the protocol is shown in
2068figure~\ref{fig:wkx}.
2069
2070\begin{figure}
2071 \begin{description}
32609df3 2072 \item[Setup] Group $G = \langle P \rangle$; $\#G = q$ is prime.
a6e375a6
MW
2073 $H_I(\cdot, \cdot)$ and $H_K(\cdot)$ are secure hashes. $\E = (\kappa,
2074 E, D)$ is an IND-CCA2 symmetric encryption scheme.
2075 \item[Parties] $U_i$ for $0 \le i < n$.
32609df3 2076 \item[Private keys] $x_i \inr \gf{q}$.
a6e375a6
MW
2077 \item[Public keys] $X_i = x_i P$.
2078 \end{description}
32609df3
MW
2079 \begin{protocol}
2080 $r_i \getsr I$; $R_i \gets r_i P$; &
2081 $r_j \getsr I$; $R_j \gets r_j P$; \\
2082 $c_i \gets r_i \xor H_I(R_i, r_i X_j)$; &
2083 $c_j \gets r_j \xor H_I(R_j, r_j X_i)$; \\
2084 \send{->}{(R_i, c_i)}
2085 \send{<-}{(R_j, c_j)}
2086 Check $R_j = \bigl(c_j \xor H_I(x_i R_j)\bigr) P$; &
2087 Check $R_i = \bigl(c_i \xor H_I(x_j R_i)\bigr) P$; \\
2088 $Z \gets r_i R_j$; $(K_0, K_1) \gets H_K(Z)$; &
2089 $Z \gets r_j R_i$; $(K_0, K_1) \gets H_K(Z)$; \\
2090 $\chi_i \gets E_{K_0}(x_i R_j)$; &
2091 $\chi_j \gets E_{K_0}(x_j R_i)$; \\
2092 \send{->}{(R_i, \chi_i)}
2093 \send{<-}{(R_j, \chi_j)}
2094 Check $D_{K_0}(\chi_j) = r_i X_j$; &
2095 Check $D_{K_0}(\chi_i) = r_j X_i$; \\
2096 Shared key is $K_1$. & Shared key is $K_1$.
2097 \end{protocol}
a6e375a6
MW
2098
2099 \caption{Summary of the Wrestlers Key Exchange protocol, $\Wkx$}
2100 \label{fig:wkx}
2101\end{figure}
2102
2103Assume, for the moment, that Alice and Bob's messages are relayed honestly.
2104Then:
2105\begin{itemize}
2106\item $a S' = a S = a (s P) = s (a P) = s A$, so $s' = d' \xor H_I(S' a S') =
2107 d \xor H_I(S, s A) = s$, and $S' = S = s P = s' P$, and therefore Alice
2108 responds to Bob's message;
2109\item similarly $b R' = r B$, so $r' = r$ and $R' = r' P$, and therefore Bob
2110 responds to Alice's message;
2111\item $b R' = b R = b (r P) = r (b P) = r B$, and $a S' = a S = a (s P) = s
2112 (a P) = s A$, and therefore both parties compute their responses correctly;
32609df3 2113 and
a6e375a6
MW
2114\item $r S' = r S = r (s P) = s (r P) = s R = s R'$, so $K_A = K_B$, and
2115 therefore they can decrypt each others' responses, and agree the same
2116 shared secret.
2117\end{itemize}
2118This shows that the protocol is basically valid, but says little about its
2119security. The remainder of this section will describe our protocol in more
2120formal detail, and prove its security in a model with multiple parties and an
2121adversary who controls the network.
2122
2123Observe that the protocol as we've presented here is \emph{symmetrical}.
2124There's no notion of `initiator' or `responder'. There are a total of four
2125messages which must be sent before both parties accept. However, this can be
2126reduced to three by breaking the symmetry of the protocol and combining one
2127or other party's challenge and response messages. We choose to analyse the
2128symmetrical version, since to do so, it suffices to consider only the two
2129different kinds of messages. Since our security model allows the messages to
2130be adversarially delayed and reordered, it is easy to show that the security
2131of an optimized, asymmetrical protocol is no worse than the symmetrical
2132version we present here.
2133
2134
2135\subsection{Security model and security definition}
2136\label{sec:um}
2137
2138Our model is very similar to that of Canetti and Krawczyk
dff0fad2 2139\cite{Canetti:2001:AKE}, though we have modified it in two ways.
a6e375a6
MW
2140\begin{enumerate}
2141\item We allow all the participants (including the adversary) in the protocol
2142 access to the various random oracles required to implement it.
2143\item Since we want to analyse a specific, practical scheme, asymptotic
2144 results are useless. We measure the adversary's resource usage carefully,
2145 and produce a quantitative bound on the adversary's advantage in the
2146 SK-security game.
2147\end{enumerate}
2148
32609df3
MW
2149\ifshort
2150
2151Readers interested in the details of the model should see Canetti and
2152Krawczyk's paper \cite{Canetti:2001:AKE}, or the full version of this paper.
2153
2154\else
2155
a6e375a6
MW
2156\subsubsection{Overview}
2157We briefly describe our modified model, pointing out the changes we have
2158made, and how they apply to our protocol. Much of Canetti and Krawczyk's
2159model (for example, the local and global outputs) is useful for proving more
2160general security properties such as demonstrating that SK-security suffices
2161for constructing secure channels, and we shall not concern ourselves with
2162such details. Other parts deal with issues such as security parameters and
2163ensuring that all the computation is polynomially bounded, which are
2164irrelevant since we are dealing with a single concrete protocol rather than a
2165family of them.
2166
2167The entities in the model are the \emph{adversary}~$A$, and a (fixed) number
2168of \emph{parties}~$P_i$, for $0 \le i < n$. If the protocol under
2169consideration makes use of random oracles, then all the participants -- the
2170adversary and the parties -- are all allowed access to the random oracles.
2171
2172The parties and the adversary play a `game'. At the beginning of the game,
2173the participants are given some inputs computed by a randomized
2174\emph{initialization procedure}~$\id{init}$. This produces as output a pair
2175$(i_U, \mathbf{i})$; the value $i_U$ is the \emph{global input}, and is given
2176to all the participants including the adversary. The vector $\mathbf{i}$ has
2177$n$ components, and party $P_i$ is given $(i_U, \mathbf{i}[i])$ as input.
2178
2179\subsubsection{Sessions}
2180Parties don't act directly. Instead, each party runs a number of
32609df3
MW
2181\emph{sessions}. A session is represented by a triple $S = (P_i, P_j, s)$,
2182where $i, j \in \Nupto{n}$ identify the owning party and a \emph{partner},
2183and $s \in \Bin^{\ell_S}$ is a \emph{session-id}. (The original model
2184includes a r\^ole, for distinguishing between initiators and responders. Our
2185protocol is symmetrical, so this distinction isn't useful.) If $P_i$ runs a
2186session $S = (P_i, P_j, s)$ and $P_j$ runs a session $S' = (P_j, P_i, s)$
2187then we say that $S$ and $S'$ are \emph{matching}, and that $P_j$ is $P_i$'s
a6e375a6
MW
2188\emph{partner} for the session.
2189
2190At most one participant in the game is \emph{active} at any given time.
2191Initially the adversary is active. The adversary may \emph{activate} a
2192session in one of two ways.
2193\begin{enumerate}
2194\item It may \emph{create a session} of a party~$P_i$, by selecting a
2195 session-id~$s \in \Bin^{\ell_S}$ and a partner $j$. There is no
2196 requirement that $P_j$ ever have a matching session. However, all sessions
2197 of a party must be distinct, i.e., sessions with the same partner must have
2198 different session-ids.
2199\item It may \emph{deliver a message}~$\mu \in \Bin^*$, from party~$P_j$, to
2200 an existing session~$S = (P_i, P_j, s)$. There is no requirement that any
2201 party previously sent $\mu$: the adversary is free to make up messages as
2202 it sees fit.
2203\end{enumerate}
2204The adversary becomes inactive, and the session becomes active. The session
2205performs some computation, according to its protocol, and may request a
2206message~$\mu$ be delivered to the matching session running in its partner
2207(which may not exist). The session may also \emph{terminate}. In the case
2208we are interested in, of key-exchange protocols, a session~$S = (P_i, P_j,
2209s)$ may terminate in one of two ways:
2210\begin{enumerate}
2211\item it may \emph{complete}, outputting $(i, j, s, K)$, for some
2212 \emph{session key}~$K$, or
2213\item it may \emph{abort}, outputting $(i, j, s, \bot)$.
2214\end{enumerate}
2215Once it has performed these actions, the session deactivates and the
2216adversary becomes active again. The adversary is given the message~$\mu$, if
2217any, and informed of whether the session completed or aborted, but, in the
2218case of completion, not of the value of the key~$K$. A session is
2219\emph{running} if it has been created and has not yet terminated.
2220
2221\subsubsection{Other adversarial actions}
2222As well as activating sessions, the adversary has other capabilities, as
2223follows.
2224\begin{itemize}
2225\item It may \emph{expire} any session~$S$, causing the owning party to
2226 `forget' the session key output by that session.
2227\item It may \emph{corrupt} any party~$P_i$, at will: the adversary learns
2228 the entire state of the corrupted party, including its initial
2229 input~$\mathbf{i}[i]$, the state of any sessions it was running at the
2230 time, and the session keys of any completed but unexpired sessions. Once
2231 corrupted, a party can no longer be activated. Of course, the adversary
2232 can continue to send messages allegedly from the corrupted party.
2233\item It may \emph{reveal the state} of a running session~$S$, learning any
2234 interesting values specific to that session, but \emph{not} the owning
2235 party's long-term secrets.
2236\item It may \emph{reveal the session-key} of a completed session~$S$.
2237\item It may elect to be \emph{challenged} with a completed session~$S$,
2238 provided. Challenge sessions form part of the security notion for
2239 key-exchange protocols. See below for more details.
2240\end{itemize}
2241We say that a session $S = (P_i, P_j, s)$ is \emph{locally exposed} if
2242\begin{itemize}
2243\item it has had its state revealed,
2244\item it has had its session-key revealed, or
2245\item $P_i$ has been corrupted, and $S$ had not been expired when this
2246 happened.
2247\end{itemize}
2248A session is \emph{exposed} if it is locally exposed, or if its matching
2249session exists and has been locally exposed.
2250
2251At the beginning of the game, a bit $b^*$ is chosen at random. The adversary
2252may choose to be \emph{challenged} with any completed, unexposed
2253session;\footnote{%
2254 The original Canetti-Krawczyk definition restricts the adversary to a
2255 single challenge session, but our proof works independent of the number of
2256 challenge sessions, so we get a stronger result by relaxing the requirement
2257 here.)}
2258the adversary is then given either the session's key -- if $b^* = 1$ -- or a
2259string chosen at random and independently of the game so far from a
2260protocol-specific distribution -- if $b^* = 0$. At the end of the game, the
2261adversary outputs a single bit~$b$.
2262
2263\subsubsection{SK-security}
2264We've now described the game; it is time to explain the adversary's goal in
2265it. The adversary \emph{wins} the game if either
2266\begin{enumerate}
2267\item two unexposed, matching sessions complete, but output different
2268 keys,\footnote{%
2269 The original Canetti-Krawczyk definition differs slightly here. It
2270 requires that `if two \emph{uncorrupted} parties complete matching
2271 sessions then they both output the same key' [original emphasis]. This
2272 can't be taken at face value, since none of the protocols they claim to
2273 be secure actually meet this requirement: they meet only the weaker
2274 requirement that parties completing matching sessions output different
2275 keys with negligible probability. We assume here that this is what they
2276 meant.}
2277 or
2278\item the adversary correctly guesses the hidden bit~$b^*$.
2279\end{enumerate}
2280More formally, we make the following definition.
32609df3 2281\fi
a6e375a6
MW
2282\begin{definition}[SK-security]
2283 \label{def:sk}
2284 Let $\Pi^{H_0(\cdot), H_1(\cdot), \ldots}$ be a key-exchange protocol
2285 which makes use of random oracles $H_0(\cdot)$, $H_1(\cdot)$, \dots, and
32609df3
MW
2286 let $A$ be an adversary playing the game described \ifshort in
2287 \cite{Canetti:2001:AKE}\else previously\fi, where $n$
a6e375a6
MW
2288 parties run the protocol~$\Pi$. Let $V$ be the event that any pair of
2289 matching, unexposed sessions completed, but output different session keys.
2290 Let $W$ be the event that the adversary's output bit matches the game's
2291 hidden bit~$b^*$. We define the adversary's \emph{advantage against the
2292 SK-security of the protocol~$\Pi$} to be
2293 \[ \Adv{sk}{\Pi}(A, n) = \max(\Pr[V], 2\Pr[W] - 1). \]
2294 Furthermore, we define the \emph{SK insecurity function of the
2295 protocol~$\Pi$} to be
2296 \[ \InSec{sk}(\Pi; t, n, q_S, q_M, q_{H_0}, q_{H_1}, \dots) =
2297 \max_A \Adv{sk}{\Pi}(A, n)
2298 \]
2299 where the maximum is taken over all adversaries~$A$ with total running
2300 time~$t$ (not including time taken by the parties), create at most $q_S$
2301 sessions, deliver at most $q_M$~messages, and (if applicable) make at most
2302 $q_{H_i}$ random-oracle queries to each random oracle $H_i(\cdot)$.
2303\end{definition}
2304
2305
2306\subsection{Security}
2307
2308In order to analyse our protocol $\Wkx^{G, \E}$ properly, we must describe
32609df3 2309exactly how it fits into our formal model.
a6e375a6
MW
2310
2311\subsubsection{Sessions and session-ids}
2312Our formal model introduced the concept of sessions, which the informal
2313description of section~\ref{sec:kx-overview} neglected to do. (One could
2314argue that we described a single session only.) As we shall show in
2315section~\ref{sec:kx-insecure}, our protocol is \emph{insecure} unless we
2316carefully modify it to distinguish between multiple sessions.
2317
2318In the model, distinct key-exchange sessions are given distinct partners and
2319session-ids. In order to prevent sessions interfering with each other, we
2320shall make explicit use of the session-ids.
2321
2322Suppose the session-ids are $\ell_S$-bit strings. We expand the domain of
2323the random oracle $H_I$ so that it's now
2324\[ H_I\colon G \times \Bin^{\ell_S} \times G \times G \to \Bin_{\ell_I}. \]
2325
2326\subsubsection{Messages}
2327We split the messages our protocols into two parts: a \emph{type}~$\tau$ and
2328a \emph{body}~$\mu$. We assume some convenient, unambiguous encoding of
2329pairs $(\tau, \mu)$ as bit-strings. For readability, we present message
2330types as text strings, e.g., `\cookie{challenge}', though in practice one
2331could use numerical codes instead.
2332
2333The message body itself may be a tuple of values, which, again, we assume are
2334encoded as bit-strings in some convenient and unambiguous fashion. We shall
2335abuse the notation for the sake of readability by dropping a layer of nesting
2336in this case: for example, we write $(\cookie{hello}, x, y, z)$ rather than
2337$\bigl(\cookie{hello}, (x, y, z)\bigr)$.
2338
2339\subsubsection{The protocol}
2340Our protocol is represented by three functions, shown in
2341figure~\ref{fig:wkx-formal}.
2342\begin{itemize}
2343\item $\id{init}(n)$ is the initialization function, as described in
2344 section~\ref{sec:um}. It outputs a pair $(\mathbf{p}, \mathbf{i})$, where
2345 $\mathbf{i}[i]$ is the private key of party~$P_i$ and $\mathbf{p}[i]$ is
2346 the corresponding public key. Only $P_i$ is given $\mathbf{i}[i]$, whereas
2347 all parties and the adversary are given $\mathbf{p}$.
2348\item $\id{new-session}^{H_I(\cdot, \cdot, \cdot, \cdot), H_K(\cdot)}
2349 (\mathbf{p}, x, i, j, s)$ is the new-session function. This is executed by
2350 party~$P_i$ when the adversary decides to create a new session~$S = (P_i,
2351 P_j, s)$. It is also given the relevant outputs of $\id{init}$, and
2352 allowed access to the random oracles $H_I$ and $H_K$.
2353\item $\id{message}^{H_I(\cdot, \cdot, \cdot, \cdot), H_K(\cdot)} (\tau,
2354 \mu)$ is the incoming-message function. This is executed by a session when
2355 the adversary decides to deliver a message $(\tau, \mu)$ to it. It makes
2356 use of the subsidiary functions $\id{msg-challenge}$ and
2357 $\id{msg-response}$ to handle the messages.
2358\end{itemize}
2359We observe that the protocol never aborts. We could make it do so if it
2360receives an invalid message, but we prefer to ignore invalid messages for the
2361sake of robustness.\footnote{%
2362 Note that this protocol would therefore require modification before it was
2363 acceptable in the simulation-based model of \cite{Shoup:1999:OFM}. There
2364 it is required that a key-exchange protocol terminate after a
2365 polynomially-bounded number of messages are delivered to it.}
2366
2367\begin{figure}
2368 \begin{program}
2369 Function $\id{init}(n)$: \+ \\
2370 \FOR $i \in \Nupto{n}$ \DO \\ \ind
32609df3 2371 $x \getsr \gf{q}$; \\
a6e375a6
MW
2372 $\mathbf{i}[i] \gets x$; \\
2373 $\mathbf{p}[i] \gets x P$; \- \\
2374 \RETURN $(\mathbf{p}, \mathbf{i})$;
2375 \- \\[\medskipamount]
2376 Function $\id{new-session}^{H_I(\cdot, \cdot, \cdot, \cdot), H_K(\cdot)}
2377 (\mathbf{p}, x, i, j, s)$: \+ \\
2378 $X \gets \mathbf{p}[i]$;
2379 $X' \gets \mathbf{p}[j]$;
2380 $C \gets \emptyset$; \\
32609df3 2381 $r \getsr \gf{q}$;
a6e375a6
MW
2382 $R \gets r P$;
2383 $Y \gets r X'$; \\
2384 $h \gets H_I(X, s, R, Y)$;
2385 $c \gets r \xor h$; \\
2386 \SEND $(\cookie{challenge}, R, c)$;
2387 \- \\[\medskipamount]
2388 Function $\id{message}^{H_I(\cdot, \cdot, \cdot, \cdot), H_K(\cdot)}
2389 (\tau, \mu)$: \+ \\
2390 \IF $\tau = \cookie{challenge}$ \THEN $\id{msg-challenge}(\mu)$; \\
2391 \ELSE \IF $\tau = \cookie{response}$ \THEN $\id{msg-response}(\mu)$;
2392 \next
2393 Function $\id{msg-challenge}(\mu)$: \+ \\
2394 $(R', c') \gets \mu$; \\
2395 $Y' \gets x R'$; \\
2396 $h' \gets H_I(X', s, R', Y')$; \\
2397 $r' \gets c' \xor h'$; \\
2398 \IF $R' \ne r' P$ \THEN \RETURN; \\
2399 $C \gets C \cup \{R\}$; \\
2400 $Z \gets r R'$; \\
2401 $(K_0, K_1) \gets H_K(Z)$; \\
2402 $\chi \gets E_{K_0}(Y')$; \\
2403 \SEND $(\cookie{response}, R, \chi)$;
2404 \- \\[\medskipamount]
2405 Function $\id{msg-response}(\mu)$: \+ \\
2406 $(R', \chi') \gets \mu$; \\
2407 \IF $R' \notin C$ \THEN \RETURN; \\
2408 $Z \gets r R'$; \\
2409 $(K_0, K_1) \gets H_K(Z)$; \\
2410 $Y' \gets D_{K_0}(\chi')$; \\
2411 \IF $Y' \ne Y$ \THEN \RETURN; \\
2412 \OUTPUT $K_1$;
2413 \STOP;
2414 \end{program}
32609df3 2415
a6e375a6
MW
2416 \caption{Formalization of $\Wkx$}
2417 \label{fig:wkx-formal}
2418\end{figure}
2419
2420\subsubsection{Session states}
2421We must specify what the adversary obtains when it chooses to reveal a
2422session's state. Given the program in figure~\ref{fig:wkx-formal}, we can
2423see that the session state consists of the variables $(x, X, X', r, R, Y,
2424C)$.
2425
2426However, $x$ is the owning party's long-term secret, and it seems
2427unreasonable to disclose this to an adversary who stops short of total
2428corruption.
2429
2430The public keys $X$ and $X'$ are part of the adversary's input anyway, so
2431revealing them doesn't help. Similarly, the set $C$ of valid challenges
2432could have been computed easily by the adversary, since a group element $R'
2433\in C$ if and only if the session $S$ responded to some message
2434$(\cookie{challenge}, R', c')$.
2435
2436The value $R = r P$ is easily computed given $r$, and besides is sent in the
2437clear in the session's first message. The expected response $Y = r X'$ is
2438also easily computed from $r$. The converse is also true, since $r$ can be
2439recovered from $R$ and $c$ in the session's challenge message and the value
2440$Y$. Besides, $r$ is necessary for computing $Z$ in response to incoming
2441challenges.
2442
2443We conclude that the only `interesting' session state is $r$.
2444
2445\subsubsection{Security}
2446Having formally presented the protocol, we can now state our main theorem
32609df3
MW
2447about its security. The proof is given in \ifshort the full version of the
2448paper\else appendix~\ref{sec:sk-proof}\fi.
a6e375a6
MW
2449\begin{theorem}[SK-security of $\Wkx$]
2450 \label{thm:sk}
2451 Let $G$ be a cyclic group. Let $\E = (\kappa, E, D)$ be a symmetric
2452 encryption scheme. Then
2453 \begin{spliteqn*}
2454 \InSec{sk}(\Wkx^{G, \E}; t, n, q_S, q_M, q_I, q_K) \le
dff0fad2
MW
2455 2 q_S \bigl( \InSec{ind-cca}(\E; t', q_M, q_M) + {} \\
2456 \InSec{mcdh}(G; t', q_K) +
2457 n \,\InSec{mcdh}(G; t', q_M + q_I) \bigr) +
32609df3 2458 \frac{n (n - 1)}{q} +
dff0fad2 2459 \frac{2 q_M}{2^{\ell_I}}.
a6e375a6
MW
2460 \end{spliteqn*}
2461 where $t' = t + O(n) + O(q_S) + O(q_M q_I) + O(q_K)$.
2462\end{theorem}
2463
2464
32609df3 2465\ifshort\else
a6e375a6
MW
2466\subsection{Insecure protocol variants}
2467\label{sec:kx-insecure}
2468
2469It's important to feed the session-id and verifier's public key to the random
2470oracle $H_I$ when constructing the check-value~$c$. Without these, the
2471protocol is vulnerable to attack. In this section, we consider some variants
2472of our protocol which hash less information, and show attacks against them.
2473
2474To simplify the presentation, we shall consider Alice and Bob, and a third
2475character Carol. We shall be attacking a pair of matching sessions $A$
2476and~$B$, run by Alice and Bob respectively. Let Alice and Bob's private keys
2477be $x_A$ and~$x_B$, and let their public keys be $X_A = x_A P$ and $X_B = x_B
2478P$ respectively.
2479
2480\subsubsection{Protocol diagram notation}
2481In order to keep the presentation as clear as possible, we use a simple
2482diagrammatic notation for describing protocol runs. A line of the form
2483\protocolrun{\textit{action} \ar[r] & \PRsession{S} & \textit{result}}
2484states that the adversary performs the given \textit{action} on session~$S$,
2485with the stated \textit{result}. The \textit{action} may be
2486\begin{itemize}
2487\item \textsf{Create session $(P_i, P_j, s)$}: the session is created,
2488 running in party~$P_i$, with partner~$P_j$ and session-id~$s$.
2489\item \textsf{Receive $\mu$}: the session is activated with an incoming message~$\mu$.
2490\item \textsf{Session-state reveal}: The adversary requests the session's
2491 internal state.
2492\end{itemize}
2493The \textit{result} may be
2494\begin{itemize}
2495\item \textsf{Send $\mu'$}: the session requests the delivery of the
2496 message~$\mu'$.
2497\item \textsf{Complete: $K$}: the session completes, outputting the key~$K$.
2498\item \textsf{State: $\sigma$}: the session's state is revealed to
2499 be~$\sigma$.
2500\item \textsf{(Ignore)}: the result of the action is unimportant.
2501\end{itemize}
2502
2503\subsubsection{Omitting the session-id}
2504Consider a protocol variant where session $S$ sets $h_S = H_I(X_N, R_S,
2505Y_S)$, where $N$ is the session's partner. That is, we've omitted the
2506session-id from the hash. An adversary can cross over two sessions between
2507Alice and Bob. Here's how.
2508
2509The attack assumes that Alice and Bob set up two pairs of matching sessions
2510with each other, thus.
2511\protocolrun{
2512 \PRcreate{Alice}{Bob}{s} & \PRsession{A} &
2513 \PRsend{challenge}{R_A, c_A} \\
2514 \PRcreate{Bob}{Alice}{s} & \PRsession{B} &
2515 \PRsend{challenge}{R_B, c_B} \\
2516 \PRcreate{Alice}{Bob}{s'} & \PRsession{A'} &
2517 \PRsend{challenge}{R_{A'}, c_{A'}} \\
2518 \PRcreate{Bob}{Alice}{s'} & \PRsession{B'} &
2519 \PRsend{challenge}{R_{B'}, c_{B'}}
2520}
2521Observe that the session pairs use distinct session-ids $s \ne s'$, so this
2522is allowed. Now the adversary crosses over the challenges, using the second
2523pair of sessions to provide responses to the challenges issued by the first
2524pair. By revealing the state in the second pair of sessions, the adversary
2525can work out the (probably different) session keys accepted by the first
2526pair.
2527\protocolrun{
2528 \PRreceive{challenge}{R_B, c_B} & \PRsession{A'} &
2529 \PRsend{response}{R_{A'}, E_{K_{A'B,0}}(x_A R_B)} \\
2530 \PRreceive{challenge}{R_A, c_A} & \PRsession{B'} &
2531 \PRsend{response}{R_{B'}, E_{K_{B'A,0}}(x_B R_A)} \\
2532 \PRreceive{challenge}{R_{A'}, c_{A'}} & \PRsession{A} & \PRignore \\
2533 \PRreceive{challenge}{R_{B'}, c_{B'}} & \PRsession{B} & \PRignore \\
2534 \PRreveal & \PRsession{A'} & r_{A'} \\
2535 \PRreveal & \PRsession{B'} & r_{B'} \\
2536 \PRreceive{response}{R_{B'}, E_{K_{B'A,0}}(x_B R_A)} & \PRsession{A} &
2537 \PRcomplete{K_{AB',1}} \\
2538 \PRreceive{response}{R_{A'}, E_{K_{A'B,0}}(x_A R_B)} & \PRsession{B} &
2539 \PRcomplete{K_{BA',1}} \\
2540}
2541The adversary can now compute $K_{AB'} = H_K(r_{B'} R_A)$ and $K_{B'A} =
2542H_K(r_{A'} R_B)$. Safely in possession of both keys, the adversary can now
2543read and impersonate freely.
2544
2545\subsubsection{Omitting the partner's public key}
2546Now consider a protocol variant where session $S$ sets $h_S = H_I(s, R_S,
2547Y_S)$, where $s$ is the session-id. An adversary can use a sessions with
2548Carol to attack a session between Alice and Bob. Here's how the sessions are
2549set up.
2550\protocolrun{
2551 \PRcreate{Alice}{Bob}{s} & \PRsession{A} &
2552 \PRsend{challenge}{R_A, c_A} \\
2553 \PRcreate{Bob}{Alice}{s} & \PRsession{B} &
2554 \PRsend{challenge}{R_B, c_B} \\
2555 \PRcreate{Alice}{Carol}{s} & \PRsession{A'} &
2556 \PRsend{challenge}{R_{A'}, c_{A'}} \\
2557 \PRcreate{Bob}{Carol}{s} & \PRsession{B'} &
2558 \PRsend{challenge}{R_{B'}, c_{B'}}
2559}
2560Although each of Alice and Bob have two sessions with session-id~$s$, this is
2561allowed, since they are with different partners. The rest of the attack in
2562fact proceeds identically to the previous case.
32609df3 2563\fi
a6e375a6
MW
2564
2565\subsection{Deniability}
2566\label{sec:denial}
2567
2568We have claimed that the Wrestlers key-exchange protocol is \emph{deniable}.
2569In this section, we define what we mean, explain the limits of the
2570deniablility of the protocol as currently presented, fix the protocol with an
2571additional pass (which can be collapsed to a single message flow by breaking
2572the protocol's symmetry), and prove the deniability of the resulting
2573protocol.
2574
2575Our notion of deniability is taken from Di~Raimondo, Gennaro and Krawczyk
2576\cite{DiRaimondo:2006:DAK}, except that, as usual, we opt for a concrete
2577security approach.
2578
2579\subsubsection{Discussion}
2580Our definition for deniability is that, for any adversary interacting with
2581participants in the protocol, a simulator exists which can compute the same
2582things as the adversary. In particular, since an adversary could output a
2583transcript of the interactions between itself and the parties, it would
2584follow that a simulator could do this too. If the simulator is effective,
2585its output is indistinguishable from that of the real adversary, and hence no
2586`judge' (distinguisher) should be persuaded by evidence presented by someone
2587who claims to have witnessed or participated in an interaction.
2588
2589We work again the model described in~\ref{sec:um}. That is, our adversary
2590has complete control over the ordering and delivery of all messages. The
2591adversary is also able, at any time, to reveal the state of any session.
2592However, deniability is obviously impossible against an adversary who can
2593\emph{corrupt} other parties, since simulating such an adversary's actions
2594would necessarily require the simulator to compute private keys corresponding
2595to the known public keys, and this is (we believe) difficult, because an
2596efficient algorithm for doing this could easily attack our protocol, which we
2597already proved secure. Therefore, we forbid the adversary from corrupting
2598parties.
2599
2600In order to allow the adversary to participate in the protocol, rather than
2601merely observing it, we need to give it one or more private keys. We could
2602modify the initialization function \id{init} from figure~\ref{fig:wkx-formal}
2603to give the adversary a private key, but there's an easier way: we can just
2604give the adversary a number of private keys in its auxiliary input.
2605
2606\subsubsection{Definitions}
2607Let $\Pi$ be a key-exchange protocol, in the model described in
2608section~\ref{sec:um}. We use the simulation framework of
2609section~\ref{sec:sim}. We define the initialization function $I_\Pi$ to be
2610the initialization function of $\Pi$, as before, and the corresponding
2611world~$W_\Pi(\iota, \sigma, \tau, \mu)$ is a fairly straightforward mapping
2612of the adversary's possible actions to the simulation model:
2613\begin{itemize}
2614\item The invocation $\cookie{new-session}$ with $\mu = (i, j, s)$ creates a
2615 new session on party~$P_i$, with partner~$P_j$ and session-id~$s$. The
2616 reply $\rho = (\delta, m)$ is a \emph{decision} $\delta \in
2617 \{\cookie{continue}, \cookie{abort}, \cookie{complete}\}$ and an output
2618 message $m \in \Bin^* \cup \{\bot\}$. If $m \ne \bot$ then $m$ is a
2619 message to be sent to the matching session (if any).
2620\item The invocation $\cookie{deliver}$ with $\mu = (i, j, s, m)$ delivers
2621 message $m$ to the session $S = (P_i, P_j, s)$. The response $\rho$ is as
2622 for $\cookie{new-session}$ invocations.
2623\item The invocation $\cookie{reveal-session-state}$ with $\mu = (i, j, s)$
2624 reveals to the adversary the state of the running session $S = (P_i, P_j,
2625 s)$. The response $\rho$ is the session's state if $S$ is indeed a running
2626 session, or $\bot$ otherwise.
2627\item The invocation $\cookie{reveal-session-key}$ with $\mu = (i, j, s)$
2628 reveals to the adversary the session-key of the completed session~$S =
2629 (P_i, P_j, s)$. The response $\rho$ is the session key~$K$ if the session
2630 is indeed complete, or $\bot$ otherwise.
2631\end{itemize}
2632There are no invocations corresponding to the adversary actions of corrupting
2633parties (since deniability against an corrupting adversary is impossible, as
2634discussed earlier), or session expiry or challenging (since they are useless
2635in this context).
2636
2637We measure the deniability of a protocol~$\Pi$, using a given simulator~$S$,
2638by the insecurity function $\InSec{sim}(W_\Pi, I_\Pi, S; t_D, t_A,
2639\mathcal{Q}_D, \mathcal{Q}_A, \mathcal{R}, \mathcal{U})$ of
2640definition~\ref{def:sim}. The interaction bounds $\mathcal{R} = (q_S, q_M)$
2641we place on the adversary are on the number ($q_S$) of \cookie{new-session}
2642and ($q_M$) \cookie{deliver} invocations it makes.
2643
2644We shall (informally) say that a protocol~$\Pi$ is deniable if there is a
2645simulator~$S_\Pi$ for which the insecurity function is small for appropriate
2646resource bounds on the adversary and distinguisher.
2647
2648\subsubsection{The current protocol}
2649As it stands, $\Wkx$ isn't deniable, according to our definition, for
2650arbitrary auxiliary inputs. Let's see why.
2651
2652Suppose that Bob is an informant for the secret police, and wants to convince
2653a judge that Alice is involved in subversive activities in cyberspace.
2654Unfortunately, Bob's job is difficult, because of the strong zero-knowledge
2655nature of the Wrestlers identification protocol. However, Bob can work with
2656the judge to bring him the evidence necessary to convict Alice. Here's how.
2657
2658Alice's public key is $A$, and Bob's public key is $B$. The judge chooses
32609df3 2659some session-id $s$, and $r \inr \gf{q}$. He computes $R = r P$ and $c =
a6e375a6
MW
2660r \xor H_I(B, s, R, r A)$, and gives Bob the triple $(s, R, c)$, keeping $r$
2661secret. Bob can now persuade Alice to enter into a key-exchange with him,
2662with session-id $s$. He uses $(R, c)$ as his challenge message. When Alice
2663sends back her response $(R', \chi)$ (because Bob's challenge is correctly
2664formed), Bob aborts and shows $(R', \chi)$ to the judge. The judge knows $r$
2665and can therefore decrypt $\chi$ and check the response. If it's wrong, then
2666Bob was cheating and gets demoted -- he can't get the right answer by himself
2667because that would require him to impersonate Alice. If it's right, Alice is
2668really a subversive element and `disappears' in the night.
2669
2670We shall show in theorem~\ref{thm:denial} below that this is basically the
2671only attack against the deniability of the protocol. However, we can do
2672better.
2673
2674\subsubsection{Fixing deniability}
2675We can fix the protocol to remove even the attack discussed above. The
2676change is simple: we feed \emph{both} parties' challenges to the hash
2677function~$H_I$ rather than just the sender's. We use a five-argument hash
2678function (random oracle) $H_I\colon G^2 \times \Bin^{\ell_S} \times G^2 \to
2679\Bin^{\ell_I}$. We introduce a new message pass at the beginning of the
2680protocol: each session simply sends its challenge point $R = r P$ in the
2681clear as a `pre-challenge'. The actual challenge is $R$ and $c = r \xor
2682H_I(X, R', s, R, c)$, where $R'$ is the challenge of the matching session.
2683
2684By breaking symmetry, we can reduce the communication complexity of this
2685variant to four messages. As before, we analyse the symmetrical version.
2686The extra flow might seem a high price to pay, but we shall see that it has
2687additional benefits beyond deniability.
2688
2689A summary of the new protocol is shown in figure~\ref{fig:wdkx}, and the
2690formal description is shown in figure~\ref{fig:wdkx-formal}.
2691
2692\begin{figure}
2693 \begin{description}
32609df3 2694 \item[Setup] Group $G = \langle P \rangle$; $\#G = q$ is prime.
a6e375a6
MW
2695 $H_I(\cdot, \cdot, \cdot, \cdot, \cdot)$ and $H_K(cdot)$ are secure
2696 hashes. $\E = (\kappa, E, D)$ is an IND-CCA2 symmetric encryption
2697 scheme.
2698 \item[Parties] $U_i$ for $0 \le i < n$.
32609df3 2699 \item[Private keys] $x_i \inr \gf{q}$.
a6e375a6
MW
2700 \item[Public keys] $X_i = x_i P$.
2701 \end{description}
32609df3
MW
2702
2703 \begin{protocol}
2704 $r_i \getsr I$; $R_i \gets r_i P$; &
2705 $r_j \getsr I$; $R_j \gets r_j P$; \\
2706 \send{->}{R_i}
2707 \send{<-}{R_j}
2708 $c_i \gets r_i \xor H_I(R_j, X_i, s, R_i, r_i X_j)$; &
2709 $c_j \gets r_j \xor H_I(R_i, X_j, s, R_j, r_j X_i)$; \\
2710 \send{->}{(R_i, c_i)}
2711 \send{<-}{(R_j, c_j)}
2712 Check $R_j = \bigl(c_j \xor H_I(x_i R_j)\bigr) P$; &
2713 Check $R_i = \bigl(c_i \xor H_I(x_j R_i)\bigr) P$; \\
2714 $Z \gets r_i R_j$; $(K_0, K_1) \gets H_K(Z)$; &
2715 $Z \gets r_j R_i$; $(K_0, K_1) \gets H_K(Z)$; \\
2716 $\chi_i \gets E_{K_0}(x_i R_j)$; &
2717 $\chi_j \gets E_{K_0}(x_j R_i)$; \\
2718 \send{->}{(R_i, \chi_i)}
2719 \send{<-}{(R_j, \chi_j)}
2720 Check $D_{K_0}(\chi_j) = r_i X_j$; &
2721 Check $D_{K_0}(\chi_i) = r_j X_i$; \\
2722 Shared key is $K_1$. & Shared key is $K_1$.
2723 \end{protocol}
a6e375a6
MW
2724
2725 \caption{Summary of the Deniable Wrestlers Key Exchange protocol, $\Wdkx$}
2726 \label{fig:wdkx}
2727\end{figure}
2728
2729\begin{figure}
2730 \begin{program}
2731 Function $\id{init}(n)$: \+ \\
2732 \FOR $i \in \Nupto{n}$ \DO \\ \ind
32609df3 2733 $x \getsr \gf{q}$; \\
a6e375a6
MW
2734 $\mathbf{i}[i] \gets x$; \\
2735 $\mathbf{p}[i] \gets x P$; \- \\
2736 \RETURN $(\mathbf{p}, \mathbf{i})$;
2737 \- \\[\medskipamount]
2738 Function $\id{new-session}^{H_I(\cdot, \cdot, \cdot, \cdot, \cdot), H_K(\cdot)}
2739 (\mathbf{p}, x, i, j, s)$: \+ \\
2740 $X \gets \mathbf{p}[i]$;
2741 $X' \gets \mathbf{p}[j]$;
2742 $C \gets \emptyset$; \\
32609df3 2743 $r \getsr \gf{q}$;
a6e375a6
MW
2744 $R \gets r P$;
2745 $Y \gets r X'$; \\
2746 \SEND $(\cookie{pre-challange}, R)$;
2747 \- \\[\medskipamount]
2748 Function $\id{message}^{H_I(\cdot, \cdot, \cdot, \cdot, \cdot), H_K(\cdot)}
2749 (\tau, \mu)$: \+ \\
2750 \IF $\tau = \cookie{pre-challenge}$ \THEN
2751 $\id{msg-pre-challenge}(\mu)$; \\
2752 \ELSE \IF $\tau = \cookie{challenge}$ \THEN
2753 $\id{msg-challenge}(\mu)$; \\
2754 \ELSE \IF $\tau = \cookie{response}$ \THEN $\id{msg-response}(\mu)$;
2755 \next
2756 Function $\id{msg-pre-challenge}(\mu)$: \+ \\
2757 $R' \gets \mu$; \\
2758 $h \gets H_I(R', X, s, R, c)$; \\
2759 $c \gets r \xor h$; \\
2760 \SEND $(\id{msg-challenge}, R, c)$;
2761 \- \\[\medskipamount]
2762 Function $\id{msg-challenge}(\mu)$: \+ \\
2763 $(R', c') \gets \mu$; \\
2764 $Y' \gets x R'$; \\
2765 $h' \gets H_I(R, X', s, R', Y')$; \\
2766 $r' \gets c' \xor h'$; \\
2767 \IF $R' \ne r' P$ \THEN \RETURN; \\
2768 $C \gets C \cup \{R\}$; \\
2769 $Z \gets r R'$; \\
2770 $(K_0, K_1) \gets H_K(Z)$; \\
2771 $\chi \gets E_{K_0}(Y')$; \\
2772 \SEND $(\cookie{response}, R, \chi)$;
2773 \- \\[\medskipamount]
2774 Function $\id{msg-response}(\mu)$: \+ \\
2775 $(R', \chi') \gets \mu$; \\
2776 \IF $R' \notin C$ \THEN \RETURN; \\
2777 $Z \gets r R'$; \\
2778 $(K_0, K_1) \gets H_K(Z)$; \\
2779 $Y' \gets D_{K_0}(\chi')$; \\
2780 \IF $Y' \ne Y$ \THEN \RETURN; \\
2781 \OUTPUT $K_1$;
2782 \STOP;
2783 \end{program}
32609df3 2784
a6e375a6
MW
2785 \caption{Deniable key-exchange: formalization of $\Wdkx$}
2786 \label{fig:wdkx-formal}
2787\end{figure}
2788
2789The security of this variant is given by the following theorem, whose proof
32609df3
MW
2790is \ifshort given in the full version of this paper\else in
2791appendix~\ref{sec:sk2-proof}\fi.
a6e375a6
MW
2792\begin{theorem}[SK-security of $\Wdkx$]
2793 \label{thm:sk2}
2794 Let $G$ be a cyclic group. Let $\E = (\kappa, E, D)$ be a symmetric
2795 encryption scheme. Then
2796 \[ \InSec{sk}(\Wdkx^{G, \E}; t, n, q_S, q_M, q_I, q_K) =
2797 \InSec{sk}(\Wkx^{G, \E}; t, n, q_S, q_M, q_I, q_K)
2798 \]
2799\end{theorem}
2800
2801\subsubsection{Deniability of the Wrestlers protocols}
2802In order to quantify the level of deniability our protocols provide, we shall
2803impose a limit on the auxiliary input to the adversary. In particular, we
2804shall use $\mathcal{U}$ of definition~\ref{def:sim} to count the number of
2805\emph{challenges} in the auxiliary input. That is, $\mathcal{U} = n_C$ is
2806the number of tuples $(i, j, s, R', R, c)$ for which there is an $r$ such
2807that $R = r P$ and $c = r \xor H_I(R', X_j, s, R, r X_i)$ (or without the
2808$R'$ for $\Wkx$).
2809
2810With this restriction in place, we can state the following theorem about the
2811deniability of our protocols.
2812\begin{theorem}[Deniability of $\Wkx$ and $\Wdkx$]
2813 \label{thm:denial}
2814 There exist simulators $S_\Wkx$ and $\Wdkx$ such that
2815 \[ \InSec{sim}(W_{\Wkx^{G, \E}}, I_{\Wkx^{G, \E}}, S_{\Wkx^{G, \E}};
2816 t_D, t_A, \mathcal{Q}_D, \mathcal{Q}_A, (q_S, q_M), 0) \le
2817 \frac{q_M}{2^{\ell_I}}
2818 \]
2819 and
32609df3
MW
2820 \iffancystyle\[\else\begin{spliteqn*}\fi
2821 \InSec{sim}(W_{\Wdkx^{G, \E}}, I_{\Wdkx^{G, \E}}, S_{\Wdkx^{G, \E}};
2822 t_D, t_A, \mathcal{Q}_D, \mathcal{Q}_A, (q_S, q_M), n_C) \le
2823 \iffancystyle\else\\\fi
2824 \frac{n_C q_S}{\#G} +
a6e375a6 2825 \frac{q_M}{2^{\ell_I}}.
32609df3 2826 \iffancystyle\]\else\end{spliteqn*}\fi
a6e375a6
MW
2827 The running time of the simulators is $O(t_A) + O(\mathcal{Q}_A q_M)$.
2828\end{theorem}
32609df3
MW
2829\begin{longproof}{The proof of this theorem can be found in the full version
2830 of this paper.}
a6e375a6
MW
2831 The simulators $S_\Wkx$ and $S_\Wdkx$ are very similar. We describe both
2832 here. Both are fake-world simulators, working as follows.
2833 \begin{enumerate}
2834 \item Initially, it constructs simulated parties $P_i$, for $0 \le i < n$,
2835 giving each the public key $X_i$ from the common input.
2836 \item Suppose the adversary requests creation of a new session $S = (P_i,
2837 P_j, s)$. Then the simulator creates a new session, including a random
32609df3 2838 value $r_S \inr \gf{q}$, and computes $R_S = r_S P$, and $Y_S = r_S
a6e375a6
MW
2839 X_j$. For $\Wdkx$, it sends the message $(\cookie{pre-challenge}, R_S)$;
2840 for $\Wkx$, it additionally computes $h = H_I(X_i, s, R_S, Y_S)$ and
2841 sends $(\cookie{challenge}, R_S, r_S \xor h)$.
2842 \item Suppose, for $\Wdkx$, the adversary sends a message
2843 $(\cookie{pre-challenge}, R')$ to a session~$S = (P_i, P_j, s)$. The
2844 simulator computes $h = H_I(R', X_i, s, R_S, Y_S)$, and sends
2845 $(\cookie{challenge}, R_S, r_S \xor h)$.
2846 \item Suppose the adversary sends a message $(\cookie{challenge}, R', c')$
2847 to session $S = (P_i, P_j, s)$. The simulator doesn't know $x_i$.
2848 \begin{enumerate}
2849 \item If $R' = R_T$ for some other simulated session $T$, then the
2850 simulator knows $r_T$ such that $R_T = r_T P$. Let $Y' = r_T X_i$.
2851 The simulator computes $h = H_I(X_j, s, R', Y')$ (resp.\ $h = H_I(R_S,
2852 X_j, s, R', Y')$) for $\Wkx$ (resp.\ $\Wdkx$) and checks that $r_T = c'
2853 \xor h$. If not, the simulator discards the message. Otherwise, it
2854 computes $(K_0, K_1) = H_K(r_S R')$, and sends the message
2855 $(\cookie{response}, R, E_{K_0}(Y'))$.
2856 \item \label{en:simextract} Otherwise the simulator runs the extractor
2857 $T_\Wident$ on the adversary's history of queries $H_I(X_j, s, R',
2858 \cdot)$ (resp.\ $H_I(R_S, X_j, s, R', \cdot)$) for $\Wkx$ (resp.\
2859 $\Wdkx$). The extractor returns $(r', Y')$. If $Y' = \bot$ then the
2860 simulator ignores the message. Otherwise, the simulator computes
2861 $(K_0, K_1) = H_K(r R')$ and sends back $(\cookie{response}, R,
2862 E_{K_0}(Y'))$.
2863 \end{enumerate}
2864 \item Suppose the adversary sends a message $(\cookie{response}, R', \chi)$
2865 to session $S = (P_i, P_j, s)$. The simulator computes $(K_0, K_1) =
2866 H_K(r_S R')$, and decrypts $Y' = D_{K_0}(\chi)$. If $Y' \ne Y_S$ then
2867 the simulator discards the message. Otherwise, it makes the simulated
2868 session complete, and outputs key $K_1$.
2869 \item Finally, if the adversary reveals a session's state, the simulator
2870 reveals $r_S$ as required; if the adversary reveals a session-key, the
2871 simulator reveals the $K_1$ it output.
2872 \end{enumerate}
2873 The only point where the simulation fails to be perfect is in
2874 \ref{en:simextract}. Let $R'$ and $c'$ be the values from an incoming
2875 challenge message to session $S = (P_i, P_j, s)$. Let $r'$ be such that
2876 $R' = r' P$ and let $Y' = r' X_i$. If a random-oracle query $H_I(X_j, s,
2877 R', Y')$ (or $H_I(R_S, X_j, s, R', Y')$ for $\Wdkx$) has been issued, then
2878 there are a number of possibilities. Let $h'$ be the result of this query.
2879 \begin{itemize}
2880 \item The adversary made this query. Then the extractor will find it and
2881 return $Y'$ if $c' = h' \xor r'$, or $\bot$ otherwise.
2882 \item Some simulated session $U = (P_{i'}, P_{j'}, s')$ made this query.
2883 But simulated sessions only make $H_I$-queries when constructing
2884 challenges, so $R' = R_U$ for some session~$U$. But the simulator does
2885 something different in that case.
2886 \item In $\Wdkx$, the quadruple $(s, R_S, R', c')$ came from the
2887 adversary's auxiliary input. In this case the simulator must fail. But
2888 $R_S = r_S P$, and $r_S$ was chosen uniformly at random. If there are at
2889 most $n_C$ challenge sets in the auxiliary input then this happens with
32609df3 2890 probability at most $n_C/\#G$ for any given session.
a6e375a6
MW
2891 \end{itemize}
2892 We conclude that the simulator fails with probability
32609df3 2893 \[ \frac{q_M}{2^{\ell_I}} + \frac{q_S n_C}{\#G}. \]
a6e375a6
MW
2894 (Note that we only consider $n_C = 0$ for $\Wkx$.) No adversary can
2895 distinguish the simulator from a real interaction unless the simulator
2896 fails, and the simulator is a fake-world simulator. We therefore apply
2897 proposition~\ref{prop:fakesim}; the theorem follows.
32609df3 2898\end{longproof}
a6e375a6 2899
32609df3 2900\ifshort\else
a6e375a6
MW
2901\subsection{Practical issues}
2902\label{sec:practice}
2903
2904\subsubsection{Denial of service from spoofers}
2905The adversary we considered in~\ref{sec:um} is very powerful. Proving
2906security against such a powerful adversary is good and useful. However,
2907there are other useful security properties we might like against weaker
2908adversaries.
2909
2910Eavesdropping on the Internet is actually nontrivial. One needs control of
2911one of the intermediate routers between two communicating parties. (There
2912are tricks one can play involving subversion of DNS servers, but this is also
2913nontrivial.) However, injecting packets with bogus source addresses is very
2914easy.
2915
dff0fad2 2916Layering the protocol over TCP \cite{RFC0793} ameliorates this problem because
a6e375a6
MW
2917an adversary needs to guess or eavesdrop in order to obtain the correct
2918sequence numbers for a spoofed packet; but the Wrestlers protocol is
2919sufficiently simple that we'd like to be able to run it over UDP
dff0fad2 2920\cite{RFC0768}, for which spoofing is trivial.
a6e375a6
MW
2921
2922Therefore, it's possible for anyone on the 'net to send Alice a spurious
2923challenge message $(R, c)$. She will then compute $Y = a R$, recover $r' = c
2924\xor H_I(\ldots, R, Y)$ check that $R = r' P$ and so on. That's at least two
2925scalar multiplications to respond to a spoofed packet, and even with very
2926efficient group operations, coping with this kind of simple denial-of-service
2927attack might be difficult.
2928
2929A straightforward solution is to use the Deniable variant of the protocol,
2930and require a challenge to quote its matching session's challenge $R'$ in its
2931challenge. That is, upon receiving a $(\cookie{pre-challenge}, R')$, the
2932session sends $(\cookie{challenge}, R', R, c)$. Alice then rejects any
2933\emph{incoming} challenge message which doesn't quote her current challenge
2934value. Now only eavesdroppers can force her to perform expensive
2935computations.
2936
2937Indeed, one need not quote the entire challenge $R'$: it suffices to send
2938some short hard-to-guess hash of it, maybe just the bottom 128 bits or so.
2939
2940This can't reduce security. Consider any adversary attacking this protocol
2941variant. We can construct an adversary which attacks the original protocol
2942just as efficiently. The new adversary attaches fake $R'$ values to
2943challenges output by other parties, and strips them off on delivery,
2944discarding messages with incorrect $R'$ values.
2945
a6e375a6
MW
2946\subsubsection{Key confirmation}
2947Consider an application which uses the Wrestlers protocol to re-exchange keys
2948periodically. The application can be willing to \emph{receive} incoming
2949messages using the new key as soon as the key exchange completes
2950successfully; however, it should refrain from \emph{sending} messages under
2951the new key until it knows that its partner has also completed. The partner
2952may not have received the final response message, and therefore be unwilling
2953to accept a new key; it will therefore (presumably) reject incoming messages
2954under this new key.
2955
2956While key confirmation is unnecessary for \emph{security}, it has
2957\emph{practical} value, since it solves the above problem. If the
2958application sends a \cookie{switch} message when it `completes', it can
2959signal its partner that it is indeed ready to accept messages under the new
dff0fad2
MW
2960key. Our implementation sends $(\cookie{switch-rq}, E_{K_0}(H_S(0, R, R')))$
2961as its switch message; the exact contents aren't important. Our
2962retransmission policy (below) makes use of an additional message
2963\cookie{switch-ok}, which can be defined similarly.
2964
2965It's not hard to show that this doesn't adversely affect the security of the
2966protocol, since the encrypted message is computed only from public values.
2967In the security proof, we modify the generation of \cookie{response}
2968messages, so that the plaintexts are a constant string rather than the true
2969responses, guaranteeing that the messages give no information about the
2970actual response. To show this is unlikely to matter, we present an adversary
2971attacking the encryption scheme by encrypting either genuine responses or
2972fake constant strings. Since the adversary can't distinguish which is being
2973encrypted (by the definition of IND-CCA security,
2974definition~\ref{def:ind-cca}), the change goes unnoticed. In order to allow
2975incorporate our switch messages, we need only modify this adversary, to
2976implement the modified protocol. This is certainly possible, since the
2977messages contain (hashes of) public values. We omit the details.
2978
d8139f2c
MW
2979However, while the extra message doesn't affect the security of our protocol,
2980it would be annoying if an adversary could forge the switch request message,
2981since this would be a denial of service. In the strong adversarial model,
2982this doesn't matter, since the adversary can deny service anyway, but it's a
2983concern against less powerful adversaries. Most IND-CCA symmetric encryption
2984schemes also provide integrity of plaintexts \cite{Bellare:2000:AER} (e.g.,
2985the encrypt-then-MAC generic composition approach \cite{Bellare:2000:AER,%
2986Krawczyk:2001:OEA}, and the authenticated-encryption modes of
2987\cite{Rogaway:2003:OCB,Bellare:2004:EAX,McGrew:2004:SPG}), so this isn't a
2988great imposition.
2989
dff0fad2
MW
2990\subsubsection{Optimization and piggybacking}
2991We can optimize the number of messages sent by combining them. Here's one
2992possible collection of combined messages:
2993\begin{description}
2994\item [\cookie{pre-challenge}] $R$
2995\item [\cookie{challenge}] $R'$, $R$, $c = H_I(R', X, s, R, c) \xor r$
2996\item [\cookie{response}] $R'$, $R$, $c$, $E_{K_0}(x R')$
2997\item [\cookie{switch}] $R'$, $E_{K_0}(x R', H_S(0, R, R'))$
2998\item [\cookie{switch-ok}] $R'$, $E_{K_0}(H_S(1, R, R'))$
2999\end{description}
3000The combination is safe:
3001\begin{itemize}
3002\item the \cookie{switch} and \cookie{switch-ok} messages are safe by the
3003 argument above; and
3004\item the other recombinations can be performed and undone in a `black box'
3005 way, by an appropriately defined SK-security adversary.
3006\end{itemize}
a6e375a6
MW
3007
3008\subsubsection{Unreliable transports}
dff0fad2 3009The Internet UDP \cite{RFC0768} is a simple, unreliable protocol for
a6e375a6
MW
3010transmitting datagrams. However, it's very efficient, and rather attractive
3011as a transport for datagram-based applications such as virtual private
dff0fad2
MW
3012networks (VPNs). Since UDP is a best-effort rather than a reliable
3013transport, it can occasionally drop packets. Therefore it is necessary for a
3014UDP application to be able to retransmit messages which appear to have been
3015lost.
a6e375a6 3016
dff0fad2
MW
3017We recommend the following simple retransmission policy for running the
3018Wrestlers protocol over UDP.
3019\begin{itemize}
3020\item Initially, send out the \cookie{pre-challenge} message every minute.
3021\item On receipt of a \cookie{pre-challenge} message, send the corresponding
3022 full \cookie{challenge}, but don't retain any state.
3023\item On receipt of a (valid) \cookie{challenge}, record the challenge value
3024 $R'$ in a table, together with $K = (K_0, K_1)$ and the response $Y' = x
3025 R'$. If the table is full, overwrite an existing entry at random. Send
3026 the corresponding \cookie{response} message, and retransmit it every ten
3027 seconds or so.
3028\item On receipt of a (valid) \cookie{response}, discard any other
3029 challenges, and stop sending \cookie{pre-challenge} and \cookie{response}
3030 retransmits. At this point, the basic protocol described above would
3031 \emph{accept}, so the key $K_1$ is known to be good. Send the
3032 \cookie{switch} message, including its response to the (now known-good)
3033 sender's challenge.
3034\item On receipt of a (valid) \cookie{switch}, send back a \cookie{switch-ok}
3035 message and stop retransmissions. It is now safe to start sending messages
3036 under $K_1$.
3037\item On receipt of a (valid) \cookie{switch-ok}, stop retransmissions. It
3038 is now safe to start sending messages under $K_1$.
3039\end{itemize}
a6e375a6
MW
3040
3041\subsubsection{Key reuse}
dff0fad2
MW
3042Suppose our symmetric encryption scheme $\E$ is not only IND-CCA secure
3043(definition~\ref{def:ind-cca}) but also provides integrity of plaintexts
d8139f2c
MW
3044\cite{Bellare:2000:AER} (or, alternatively, is an AEAD scheme
3045\cite{Rogaway:2002:AEA}. Then we can use it to construct a secure channel,
3046by including message type and sequence number fields in the plaintexts, along
3047with the message body. If we do this, we can actually get away with just the
3048one key $K = H_K(Z)$ rather than both $K_0$ and $K_1$.
dff0fad2
MW
3049
3050To do this, it is essential that the plaintext messages (or additional data)
3051clearly distinguish between the messages sent as part of the key-exchange
3052protocol and messages sent over the `secure channel'. Otherwise, there is a
d8139f2c
MW
3053protocol-interference attack: an adversary can replay key-exchange
3054ciphertexts to insert the corresponding plaintexts into the channel.
dff0fad2
MW
3055
3056We offer a sketch proof of this claim in appendix~\ref{sec:sc-proof}.
32609df3 3057\fi
dff0fad2
MW
3058
3059%%%--------------------------------------------------------------------------
a6e375a6 3060
dff0fad2
MW
3061\section{Conclusions}
3062\label{sec:conc}
3063
3064We have presented new protocols for identification and authenticated
3065key-exchange, and proven their security. We have shown them to be efficient
3066and simple. We have also shown that our key-exchange protocol is deniable.
3067Finally, we have shown how to modify the key-exchange protocol for practical
3068use, and proven that this modification is still secure.
a6e375a6
MW
3069
3070%%%--------------------------------------------------------------------------
3071
3072\section{Acknowledgements}
3073
3074The Wrestlers Protocol is named after the Wrestlers pub in Cambridge where
3075Clive Jones and I worked out the initial design.
3076
3077%%%--------------------------------------------------------------------------
3078
d8139f2c 3079\bibliography{mdw-crypto,cryptography2000,cryptography,rfc,std}
a6e375a6
MW
3080
3081%%%--------------------------------------------------------------------------
3082
32609df3 3083\ifshort\def\next{\end{document}}\expandafter\next\fi
a6e375a6
MW
3084\appendix
3085\section{Proofs}
3086
3087\subsection{Proof of theorem~\ref{thm:sk}}
3088\label{sec:sk-proof}
3089
3090Before we embark on the proof proper, let us settle on some notation. Let
3091$P_i$ be a party. Then we write $x_i$ for $P_i$'s private key and $X_i = x_i
3092P$ is $P_i$'s public key. Let $S = (P_i, P_j, s)$ be a session. We write
3093$r_S$ for the random value chosen at the start of the session, and $R_S$,
3094$c_S$ etc.\ are the corresponding derived values in that session.
3095
a6e375a6
MW
3096The proof uses a sequence of games. For each game~$\G{i}$, let $V_i$ be the
3097event that some pair of unexposed, matching sessions both complete but output
3098different keys, and let $W_i$ be the event that the adversary's final output
3099equals the game's hidden bit~$b^*$. To save on repetition, let us write
3100\[ \diff{i}{j} = \max(|\Pr[V_i] - \Pr[V_j]|, |\Pr[W_i] - \Pr[W_j]|). \]
3101Obviously,
3102\[ \diff{i}{j} \le \sum_{i\le k<j} \diff{k}{k + 1}. \]
3103
3104Here's a quick overview of the games we use.
3105\begin{itemize}
3106\item $\G0$ is the original SK-security game.
3107\item In $\G1$, we abort the game unless all parties' public keys are
3108 distinct. Since keys are generated at random, parties are unlikely to be
3109 given the same key by accident.
3110\item In $\G2$, we change the way sessions respond to challenge messages, by
3111 using the extractor to fake up answers to most challenges. Since the
3112 extractor is good, the adversary is unlikely to notice.
3113\item In $\G3$, we abort the game if the adversary ever queries $H_K(\cdot)$
3114 on the Diffie-Hellman secret $r_S r_T P$ shared between two unexposed
3115 matching sessions. We show that this is unlikely to happen if the
3116 Diffie-Hellman problem is hard.
3117\item In $\G4$, we abort the game if any unexposed session \emph{accepts} a
3118 response message which wasn't sent by a matching session.
3119\end{itemize}
3120Finally, we show that the adversary has no advantage in $\G4$. The theorem
3121follows.
3122
3123For ease of comprehension, we postpone the detailed proofs of some of the
3124steps until after we conclude the main proof.
3125
3126Let $A$ be a given adversary which runs in time~$t$, creates at most~$q_S$
3127sessions, delivers at most~$q_M$ messages, and makes at most~$q_I$ queries to
3128its $H_I(\cdot, \cdot, \cdot, \cdot)$ oracle and at most~$q_K$ queries to its
3129$H_K(\cdot)$ oracle. Let $\G0$ be the original SK-security game of
3130definition~\ref{def:sk}, played with adversary~$A$.
3131
3132Game~$\G1$ is the same as game~$\G0$ except, if the initialization function
3133reports two parties as having the same public key (i.e., we have $X_i \ne
3134X_j$ where $0 \le i < j < n$), we stop the game immediately and without
3135crediting the adversary with a win. This only happens when the corresponding
3136private keys are equal, i.e., $x_i = x_j$, and since the initialization
3137function chooses private keys uniformly at random, this happens with
32609df3 3138probability at most $\binom{n}{2}/\#G$. Since if this doesn't happen, the
a6e375a6
MW
3139game is identical to $\G0$, we can apply lemma~\ref{lem:shoup}, and see that
3140\begin{equation}
3141 \label{eq:sk-g0-g1}
32609df3 3142 \diff{0}{1} \le \frac{1}{\#G} \binom{n}{2} = \frac{n (n - 1)}{2 \#G}.
a6e375a6
MW
3143\end{equation}
3144In game~$\G1$ and onwards, we can assume that public keys for distinct
dff0fad2
MW
3145parties are themselves distinct. Note that the game now takes at most
3146$O(q_I)$ times longer to process each message delivered by the adversary.
3147This is where the $O(q_I q_M)$ term comes from in the theorem statement.
a6e375a6
MW
3148
3149Game~$\G2$ is the same as game~$\G1$, except that we change the way that we
3150make parties respond to \cookie{challenge} messages $(\cookie{challenge}, R,
3151c)$. Specifically, suppose that $S = (P_i, P_j, s)$ is a session.
3152\begin{itemize}
3153\item Suppose $T = (P_j, P_i, s)$ is the matching session of $S$. The game
3154 proceeds as before if $(R, c) = (R_T, c_T)$ is the challenge issued by $T$.
3155\item Otherwise, we run the extractor $T_\Wident$ on the adversary's history
3156 so far of oracle queries $H_I(X_i, s, R, \cdot)$ to determine a pair $(r,
3157 Y)$. If $r = \bot$ then we discard the message. Otherwise, we add $R$ to
3158 the list~$C$, and return a fake response to the adversary by computing $K =
3159 H_K(r R_S)$ and handing the adversary $(\cookie{response}, R_S, E_K(Y))$.
3160\end{itemize}
3161The following lemma shows how this affects the adversary's probabilities of
3162winning.
3163\begin{lemma}
3164 \label{lem:sk-g1-g2}
3165 \begin{equation}
3166 \label{eq:sk-g1-g2}
3167 \diff{1}{2} \le \frac{q_M}{2^{\ell_I}}.
3168 \end{equation}
3169\end{lemma}
3170
3171Let us say that a session $S = (P_i, P_j, s)$ is \emph{ripe} if
3172\begin{itemize}
3173\item there is a matching session $T = (P_j, P_i, s)$, and
3174\item $S$ is unexposed.
3175\end{itemize}
3176Suppose that $S$ is a ripe session, and that it has a matching session~$T$:
3177let $Z_S = Z_T = r_S r_T P$.
3178
3179Game~$\G3$ is the same as $\G2$, except that the game is immediately aborted
3180if ever the adversary queries its random oracle $H_K(\cdot)$ at a value $Z_S$
3181for any ripe session~$S$. The following lemma shows how this affects the
3182adversary's probabilities of winning.
3183\begin{lemma}
3184 \label{lem:sk-g2-g3}
dff0fad2 3185 For some $t'$ within the bounds given in the theorem statement we have
a6e375a6
MW
3186 \begin{equation}
3187 \label{eq:sk-g2-g3}
3188 \diff{2}{3} \le q_S \InSec{mcdh}(G; t', q_K).
3189 \end{equation}
3190\end{lemma}
3191
3192Game~$\G4$ is the same as $\G3$ except that the game is immediately aborted
3193if ever the adversary sends a response message to a ripe session~$S$ which
3194wasn't output by its matching session as a response to $S$'s challenge, with
3195the result that $S$ completes.
3196
3197Let's make this more precise. Let $U$ and $V$ be a pair of matching
3198sessions. Let $C_U = (\cookie{challenge}, R_U, c_U$ be the challenge message
3199sent by $U$. Let $M_T$ be the set of messages which $T$ has sent upon
3200delivery of $C_U$. Then, in $\G4$, we abort the game if, for any pair $S$
3201and~$T$ of matching, unexposed sessions, $S$ has completed as a result of
3202being sent a message $\mu \notin M_T$. We have the following lemma.
3203\begin{lemma}
3204 \label{lem:sk-g3-g4}
dff0fad2 3205 For a $t'$ within the stated bounds, we have
a6e375a6
MW
3206 \begin{equation}
3207 \label{eq:sk-g3-g4}
3208 \diff{3}{4} \le q_S \bigl( \InSec{ind-cca}(\E; t', q_M, q_M) +
3209 n \cdot \InSec{mcdh}(G; t', q_M + q_I) \bigr)
3210 \end{equation}
3211\end{lemma}
3212
3213Finally, let us consider the state we're in with $\G4$.
3214\begin{itemize}
3215\item No ripe session completes except as a result the adversary faithfully
3216 delivering messages between it and its matching session.
3217\item The adversary never queries $Z_S$ for any ripe session~$S$. If we set
3218 $K_S = (K_{S, 0}, K_{S, 1}) = H_K(Z_S)$, then $K_{S, 1}$ is the key output
3219 by $S$ when it completes.
3220\item If $S$ and $T$ are matching ripe sessions, then $K_S = K_T$, since $Z_S
3221 = r_S R_T = r_T R_S = Z_T$.
3222\item For any ripe session~$S$, $K_{S, 1}$ is uniformly distributed in
3223 $\Bin^\kappa$ and independent of the adversary's view.
3224\item If $S = (P_i, P_j, s)$ and $T = (P_j, P_i, s)$ are matching ripe
3225 sessions, then $Z_S$ depends only $r_S$ and $r_T$. Hence, once $S$ and~$T$
3226 complete, and erase their states, $Z_S$ is independent of everything except
3227 the messages sent between the two sessions. In particular, $Z_S$ is
3228 independent of the long-term secrets $x_i$ and~$x_j$, so if either player
3229 is later corrupted, the key $K_{S, 1}$ remains independent of the
3230 adversary's view.
3231\item Hence, the keys output by unexposed sessions are indistinguishable from
3232 freshly-generated random strings, and remain so indefinitely.
3233\end{itemize}
3234We conclude that, for any adversary $A$,
3235\begin{equation}
3236 \label{eq:sk-g4}
3237 \Pr[V_4] = 0 \qquad \text{and} \qquad \Pr[W_4] = \frac{1}{2}.
3238\end{equation}
3239Putting equations~\ref{eq:sk-g0-g1}--\ref{eq:sk-g4} together, we find
32609df3 3240\begingroup \splitright=4em minus 4em
a6e375a6 3241\begin{spliteqn}
dff0fad2
MW
3242 \Adv{sk}{\Wident^{G, \E}}(A) \le
3243 2 q_S \bigl(\InSec{ind-cca}(\E; t', q_M, q_M) + {} \\
3244 \InSec{mcdh}(G; t', q_K) +
3245 n \,\InSec{mcdh}(G; t', q_M + q_I) \bigr) + {}
32609df3 3246 \frac{n (n - 1)}{\#G} +
a6e375a6 3247 \frac{2 q_M}{2^{\ell_I}}.
32609df3 3248\end{spliteqn} \endgroup
a6e375a6
MW
3249The theorem follows, since $A$ was chosen arbitrarily.
3250
3251
3252\begin{proof}[Proof of lemma~\ref{lem:sk-g1-g2}]
3253 The two games $\G1$ and~$\G2$ differ only in whether they accept or reject
3254 particular challenge messages $(\cookie{challenge}, R, c)$.
3255
3256 We claim firstly that no message is \emph{accepted} by $\G2$ which would
3257 have been rejected by $\G1$. To prove the claim, it is sufficient to note
3258 that the extractor's output, if not $\bot$, is always correct, and hence if
3259 $\G2$ accepts a message then $\G1$ would have done so too.
3260
3261 Since $\G2$ also behaves identically when the adversary submits to~$S$ the
3262 challenge from the matching session~$T$, we have nothing to prove in this
3263 case. Let $F$ be the event that the adversary submits a message
3264 $(\cookie{challenge}, R, c)$ to a session~$S$ which $S$ would have accepted
3265 in $\G1$ but would be rejected by the new rule in~$\G2$. By
3266 lemma~\ref{lem:shoup} we have $\diff{1}{2} \le \Pr[F]$. To prove the
3267 current lemma, therefore, we must show that $\Pr[F] \le q_M/2^{\ell_I}$.
3268
3269 Rather than consider individual challenge messages, we consider
3270 \emph{classes} of messages. We shall refer to a quadruple~$\Cid = (i, j,
3271 s, R)$ as a \emph{class-id}, and define some useful functions:
3272 \begin{itemize}
3273 \item the class's \emph{session} $\Csession(\Cid) = (P_i, P_j, s)$;
3274 \item the class's \emph{index} $\Cindex(\Cid)$ is $r \in I$ where $R = r
3275 P$, which is well-defined by lemma~\ref{lem:unique-dl};
3276 \item the class's \emph{query} $\Cquery(\Cid) = (X_j, s, R, x_i R)$;
3277 \item the class's \emph{hash} $\Chash(\Cid) = H_I(\Cquery(\Cid)) = H_I(X_j,
3278 s, R, x_i R)$;
3279 \item the class's \emph{check-value} $\Ccheck(\Cid) = \Chash(\Cid) \xor
3280 \Cindex(\Cid)$;
3281 \item the class's \emph{check-set} $\Ccheckset(\Cid)$ is the set of
3282 check-values $c$ such that a message $(\cookie{challenge}, R, c)$ was
3283 sent to session $S = (P_i, P_j, s)$; and
3284 \item the class's \emph{count} $\Ccount(\Cid) = |\Ccheckset(\Cid)|$.
3285 \end{itemize}
3286
3287 Consider any class-id~$\Cid = (i, j, s, R)$. A natural question which
3288 arises is: which participants have issued $\Cid$'s query, i.e., queried
3289 $H_I$ at $\Cquery(\Cid)$?
3290
3291 We can characterise the $H_I(\cdot, \cdot, \cdot, \cdot)$ queries of a
3292 session $U = (P_{i'}, P_{j'}, s')$ as follows:
3293 \begin{itemize}
3294 \item computing the check-value for the challenge $R_U$ by querying
3295 $H_I(X_{i'}, s', R_U, r_U X_{j'})$, and
3296 \item checking an incoming challenge~$R'$ by querying $H_I(X_{j'}, s', R',
3297 x_{i'} R')$.
3298 \end{itemize}
3299 The class~$\Cid$'s query $\Cquery(\Cid)$ is $U$'s check-value query if
3300 \[ (j, i, s, R) = (i', j', s', R_U) \]
3301 i.e., $U$ is the matching session of $\Csession(\Cid)$, and moreover $R =
3302 R_U$ is the challenge value issued by $U$. For any $c \in
3303 \Ccheckset(\Cid)$, if $c = \Ccheck(\Cid)$ then $(\cookie{challenge}, R, c)$
3304 is precisely the challenge message issued by~$U$ to $\Csession(\Cid)$; the
3305 rules for handling this message didn't change. However, if $c \ne
3306 \Ccheck(\Cid)$ then the message would have been rejected in $\G1$, and we
3307 have already shown that $\G2$ continues to reject all messages rejected by
3308 $\G1$.
3309
3310 Let us say that a class-id~$\Cid = (i, j, s, R)$ is \emph{bad} if
3311 \begin{enumerate}
3312 \item the value $R$ is not the challenge issued by $\Csession(\Cid)$'s
3313 matching session, and
3314 \item the adversary has not issued $\Cid$'s query $\Cquery(\Cid)$,
3315 \emph{but}
3316 \item $\Ccheck(\Cid) \in \Ccheckset(\Cid)$, so one of the check-values
3317 submitted to $S$ was actually correct.
3318 \end{enumerate}
3319 We claim that our extractor will work perfectly unless some class-id is
3320 bad. Certainly, if $R$ was issued by the matching session, there is
3321 nothing to prove; if the adversary has issued the relevant query then the
3322 extractor will recover $\Cindex(\Cid)$ just fine; and if $\Ccheck(\Cid)
3323 \notin \Ccheckset(\Cid)$ then all messages in the class would have been
3324 rejected by $\G1$ anyway.
3325
3326 Let $B(\Cid)$ be the event that the class~$\Cid$ is bad. We claim that
3327 \[ \Pr[B(\Cid)] \le \frac{\Ccount(\Cid)}{2^{\ell_I}}. \]
3328 The present lemma follows, since
3329 \[ \diff{1}{2}
3330 \le \Pr[F]
3331 \le \sum_\Cid \Pr[B(\Cid)]
3332 \le \sum_\Cid \frac{\Ccount(\Cid)}{2^{\ell_I}}
3333 = \frac{1}{2^{\ell_I}} \sum_\Cid \Ccount(\Cid)
3334 \le \frac{q_M}{2^{\ell_I}}
3335 \]
32609df3 3336 as required.
a6e375a6
MW
3337
3338 Now observe that, in $\G2$, sessions don't actually check incoming
3339 challenges in this way any more -- instead we run the extractor. So, to
3340 prove the claim, we consider a class~$\Cid$ where properties~1 and~2 above
3341 hold. The correct hash $\Chash(\Cid)$ is then independent of the rest of
3342 the game, so the probability that $\Ccheck(\Cid) \in \Ccheckset(\Cid)$ is
3343 precisely $\Ccount(\Cid)/2^{\ell_I}$ as required.
3344
3345 This completes the proof the lemma.
3346\end{proof}
3347
3348\begin{proof}[Proof of lemma~\ref{lem:sk-g2-g3}]
3349 Let $F$ be the event that the adversary makes a query $H_K(Z_S)$ for some
3350 ripe session~$S$. Since $\G3$ proceeds exactly as $\G2$ did unless $F_2$
3351 occurs, we apply lemma~\ref{lem:shoup}, which tells us that $\diff{2}{3}
3352 \le \Pr[F_2]$. We must therefore bound this probability.
3353
3354 To do this, we consider a new game~$\G3'$, which is the same as $\G3$,
3355 except that, at the start of the game, we choose a random number $k \inr
3356 \Nupto{q_S}$. For $0 \le i < q_S$, let $S_i$ be the $i$th session created
3357 by the adversary. We define $F'$ to be the event that the adversary
3358 queries $H_K(Z_{S_k})$ when $S_k$ is ripe.
3359
3360 The lemma now follows from these two claims.
3361
3362 \begin{claim}
3363 $\Pr[F] \le q_S \Pr[F']$.
3364 \end{claim}
3365 To see this, for any session~$S$, let $F_S$ be the event that the adversary
3366 queries~$H_K(Z_S)$ when $S$ is ripe. Then
3367 \[ \Pr[F] \le \sum_{0\le i<q_S} \Pr[F_{S_i}]. \]
3368 Hence,
3369 \[ \Pr[F'] = \Pr[F_{S_k}] = \sum_{0\le i<q_S} \Pr[F_{S_i}] \Pr[k = i]
3370 = \frac{1}{q_S} \sum_{0\le i<q_S} \Pr[F_{S_i}]
3371 \ge \frac{\Pr[F]}{q_S}
3372 \]
3373 proving the claim.
3374
3375 \begin{claim}
3376 For some $t' = t + O(n) + O(q_S q_M) + O(q_I) + O(q_K)$, we have
3377 $\Pr[F'] \le \InSec{mcdh}(G; t', q_K).$
3378 \end{claim}
3379 To prove this claim, we construct an adversary~$B$ which solves the MCDH
3380 problem in~$G$. The adversary works as follows.
3381 \begin{enumerate}
3382 \item It is given a pair $(R^*, S^*) = (r^* P, s^* P)$ of group elements;
3383 its objective is to make a verification-oracle query $V(Z^*)$ where $Z^*
3384 = r^* s^* P$.
3385 \item It sets up a simulation of the game~$\G3'$, by running the
3386 $\id{init}$ function, and simulating all of the parties. In particular,
3387 it chooses a random~$k \in \Nupto{q_S}$.
3388 \item It sets up accurate simulations of the random oracles $H_K(\cdot)$
3389 and $H_I(\cdot, \cdot, \cdot, \cdot)$, which choose random outputs for
3390 new, fresh inputs. However, whenever $A$ queries $H_K(\cdot)$ on a group
3391 element $Z$, $B$ also queries $V(Z)$.
3392 \item It runs $A$ in its simulated game. It responds to all of $A$'s
3393 instructions faithfully, until the $k$th session-creation.
3394 \item When creating the $k$th session $S = S_k = (P_i, P_j, s)$, $B$ has
3395 party~$P_i$ choose $R^*$ as its challenge, rather than choosing $r_S$ and
3396 setting $R_S = r_S P$. Because it simulates all the parties, $B$ can
3397 compute $Y_S = x_j R$, which is still correct.
3398 \item If $A$ requests the creation of a matching session $T = (P_j, P_i,
3399 s)$ then $B$ has party~$P_j$ choose $S^*$ as its challenge. Again, $B$
3400 computes $Y_T = x_i S^*$.
3401 \item If $A$ ever corrupts the parties $P_i$ or $P_j$, or reveals the
3402 session state of $S$ or $T$ then $B$ stops the simulation abruptly and
3403 halts.
3404 \end{enumerate}
dff0fad2
MW
3405 Adversary $B$'s running time is within the bounds required of $t'$, and $B$
3406 makes at most $q_K$ queries to $V(\cdot)$; we therefore have
a6e375a6
MW
3407 \[ \Pr[F'] \le \Succ{mcdh}{G}(B) \le \InSec{mcdh}(G; t', q_K) \]
3408 as required.
3409\end{proof}
3410
3411\begin{proof}[Proof of lemma~\ref{lem:sk-g3-g4}]
3412 Let $F_4$ be the event under which we abort the game~$\G4$. Clearly, if
3413 $F$ doesn't occur, games $\G3$ and $\G4$ proceed identically, so we can
3414 apply lemma~\ref{lem:shoup} to see that $\diff{3}{4} \le \Pr[F_4]$.
3415 Bounding $\Pr[F_4]$, however, is somewhat complicated. We use a further
3416 sequence of games.
3417
3418 Firstly, let $\G5$ be like $\G4$ with the exception that we choose, at
3419 random, an integer~$k \inr \Nupto{q_S}$. As we did in the proof for
3420 lemma~\ref{lem:sk-g3-g4}, let $S_i$ be the $i$th session created by the
3421 adversary. For each session~$S_i$, let $T_i$ be its matching session, if
3422 there is one. We define $F_5$ to be the event that
3423 \begin{itemize}
3424 \item $S_k$ completes immediately following delivery of a message $\mu
3425 \notin M_{T_k}$, and
3426 \item $S_k$ was ripe at this point.
3427 \end{itemize}
3428 For games~$\G{i}$, for $i > 5$, we define the event $F_i$ to be the event
3429 corresponding to $F_5$ in $\G{i}$. Note that if $S_k$ \emph{is} sent a
3430 message in $M_{T_k}$ then $S_k$ immediately completes.
3431
3432 \begin{claim}
3433 $\Pr[F_4] \le \Pr[F_5]/q_S$.
3434 \end{claim}
3435 This claim is proven exactly as we did for claim~1 of
3436 lemma~\ref{lem:sk-g2-g3}.
3437
3438 Let~$\G6$ be the same as $\G5$ except that we change the encrypted
3439 responses of session~$S_k$ and its matching session~$T_k$. Let $K^* =
3440 (K_0^*, K_1^*) = H_K(Z_S)$. Then, rather than sending $(\cookie{response},
3441 R_S, E_{K_0^*}(Y_T))$, session~$S$ sends $(\cookie{response}, R_S,
3442 E_{K_0^*}(1^{\ell_G}))$.
3443 \begin{claim}
3444 $|\Pr[F_6] - \Pr[F_5]| \le \InSec{ind-cca}(\E; t', q_M, q_M).$
3445 \end{claim}
3446 To prove this claim, we construct an adversary~$B$ which attacks the
3447 IND-CCA security of our encryption scheme $\E$. The adversary~$B$ works as
3448 follows.
3449 \begin{enumerate}
3450 \item It is given no input, but a pair of oracles $E(\cdot, \cdot)$ and
3451 $D(\cdot)$; the former encrypts either the left or right input, according
3452 to a hidden bit, and the latter decrypts ciphertexts other than those
3453 returned by $E(\cdot, \cdot)$. Its goal is to guess the hidden bit.
3454 \item It sets up a simulation of the game~$\G5$, by running the $\id{init}$
3455 function, and simulating all of the parties. In particular, it chooses a
3456 random $k \in \Nupto{q_S}$.
3457 \item It sets up accurate simulations of the random oracles $H_K(\cdot)$
3458 and $H_I(\cdot, \cdot, \cdot, \cdot)$.
3459 \item It runs $A$ in its simulated game. It responds to all of $A$'s
3460 instructions faithfully, except for the matching sessions $S_k$ and
3461 $T_k$. Let $S = S_k = (P_i, P_j, s)$, and $T = T_k = (P_j, P_i, s)$.
3462 \item Suppose $T$ is sent the message $C_S = (\cookie{challenge}, R_S,
3463 c_S)$. Rather than computing $K^* = H_K(r_T R_S)$ and performing the
3464 encryption properly, $B$ queries its left-or-right encryption oracle
3465 $E(\cdot, \cdot)$ on $E(1^{\ell_G}, x_j R_S)$, and sends the resulting
3466 ciphertext~$\chi$ back to~$S$ as part of a message $(\cookie{response},
3467 R_T, \chi)$. The set $M_T$ is precisely the set of messages constructed
3468 in this fashion. (Recall that challenge messages other than $C_S$ aren't
3469 actually delivered to $T$, since we simulate the responses using the
3470 extractor, as of $\G2$.)
3471 \item Suppose $S$ is sent a message $M = (\cookie{response}, R_T, \chi) \in
3472 M_T$. We immediately stop the simulation, and $B$ returns $0$.
3473 \item Suppose, instead, that $S$ is sent some message $M' =
3474 (\cookie{response}, R, \chi) \notin M_T$. There are two cases to
3475 consider. If $R = R_T$ then we must have $\chi$ distinct from the
3476 ciphertexts returned by the $E(\cdot, \cdot)$ oracle, so we can invoke
3477 the decryption oracle $D(\cdot)$ on $\chi$ to obtain a response $Y$.
3478 Alternatively, if $R \ne R_T$, we can compute the key $K = (K_0, K_1) =
3479 H_K(r_S R)$, and recover $Y = D_{K_0}(\chi)$. In either case, if $Y =
3480 r_S X_j)$ then $S$ would complete at this point: $B$ stops the simulation
3481 and returns $1$.
3482 \item If $A$ exposes $S$ (by corrupting $P_i$ or~$P_j$, or revealing $S$ or
3483 $T$) then we stop the simulation and $B$ returns $0$.
3484 \item Finally, if the game stops, either because $A$ halts, or because of
3485 one of the special rules introduced in earlier games, $B$ returns $0$.
3486 \end{enumerate}
3487 It is clear that $B$'s oracle queries are acceptable, since $|x_j R_S| =
3488 \ell_G$ by definition, and $B$ never queries $D(\cdot)$ on a ciphertext
3489 returned by its encryption oracle. By the rules of~$\G3$, we know that the
3490 game stops immediately if $A$ ever queries $Z_S$, so the key $K^*$ is
3491 independent of everything in $A$'s view except the ciphertexts $\chi$
3492 output by $S$ and $T$. Therefore, if the hidden bit of the IND-CCA game is
3493 $1$, $B$ accurately simulates $\G5$, whereas if the bit is $0$ then $B$
dff0fad2
MW
3494 accurately simulates $\G6$. We issue no more that $q_M$ encryption or
3495 decryption queries. Finally, $B$'s running time is within the bounds
3496 allowed for $t'$. Therefore,
a6e375a6
MW
3497 \[ \Adv{ind-cca}{\E}(B) = \Pr[F_5] - \Pr[F_6]
3498 \le \InSec{ind-cca}(\E; t', q_M, q_M). \]
3499 We construct the adversary~$\bar{B}$ which is the same as $B$ above, except
3500 that $\bar{B}$ returns $0$ whenever $B$ returns~$1$, and \emph{vice versa}.
3501 Clearly
3502 \[ \Adv{ind-cca}{\E}(\bar{B})
3503 = (1 - \Pr[F_5]) - (1 - \Pr[F_6])
3504 = \Pr[F_6] - \Pr[F_5]
3505 \le \InSec{ind-cca}(\E; t', q_M, q_M).
3506 \]
3507 This proves the claim.
3508
3509 Let $\G7$ be the same as $\G6$, except that at the start of the game we
3510 choose a random $m \in \Nupto{n}$, and when the adversary creates the
3511 session $S = S_k = (P_i, P_j, s)$, we abort the game unless $j = m$.
3512 Clearly we have $\Pr[F_6] = n \Pr[F_7]$.
32609df3 3513
a6e375a6
MW
3514 Finally, we can explicitly bound $F_6$. In $\G6$, the adversary's view is
3515 independent of the correct response $Y_S = r_S X_S = x_j R_S$ to $S$'s
3516 challenge. Therefore, if $A$ manages to send any message $\mu \notin M_T$
3517 which causes $S$ to complete, then it has impersonated~$P_j$.
3518 \begin{claim}
3519 $\Pr[F_7] \le \InSec{mcdh}(G; t', q_M + q_I)$.
3520 \end{claim}
3521 The present lemma follows from this and the previous claims.
3522
3523 To prove the claim formally, we construct an adversary~$B'$, which behaves
3524 as follows.
3525 \begin{enumerate}
3526 \item It is given as input a public key $X^*$ and a single challenge $(R^*,
3527 c^*)$, a random oracle $H^*_I(\cdot, \cdot)$, and an oracle $V(\cdot,
3528 \cdot)$, which verifies responses $(R, Y)$. Its goal is to invoke
3529 $V(\cdot, \cdot)$ with a correct response to the challenge.
3530 \item It chooses a random $k \in \Nupto{q_S}$ and $m \in \Nupto{n}$. It
3531 sets up a simulation of the game~$\G7$, by running the $\id{init}$
3532 function, and simulating all of the parties, except that it gives party
3533 $P_m$ the public key $X^*$. This makes no difference, since $P_m$
3534 doesn't actually need to give any `honest' responses because of the
3535 change we made in $\G6$.
3536 \item It sets up accurate simulations of the random oracles $H_K(\cdot)$
3537 and $H_I(\cdot, \cdot, \cdot, \cdot)$, with one exception -- see below.
3538 \item It runs $A$ in its simulated game. It responds to all of $A$'s
3539 instructions faithfully, except for the session $S_k$. Let $S = S_k =
3540 (P_i, P_j, s)$, and let $T = T_k = (P_j, P_i, s)$ be its matching
3541 session.
3542 \item When session~$S$ is created, $B'$ checks that $j = m$, and if not
3543 stops the simulation and halts. Otherwise, $B'$ invokes its oracle~$C()$
3544 to obtain a pair $(R, c)$. Session~$S$ sends $C_S = (\cookie{challenge},
3545 R, c)$ as its challenge to~$T$.
3546 \item When $A$ makes a query $H_I(X^*, s, R, Y)$, $B$ answers it by
3547 querying its \emph{own} random oracle $H^*_I(R, Y)$.
3548 \item When $S$ receives a message $(\cookie{response}, R, \chi)$, we
3549 compute $(K_0, K_1) = r_S R$, and $Y = D_{K_0}(\chi)$. If $Y \ne \bot$
3550 then $B'$ calls $V(R, Y)$.
3551 \item If $A$ reveals $S$ or corrupts $P_i$ or $P_j$ then $B'$ stops the
3552 simulation immediately and halts.
3553 \end{enumerate}
dff0fad2
MW
3554 The running time of $B'$ is within the bounds required of $t'$; it makes at
3555 most $q_I$ random-oracle and at most $q_M$ verification queries. Clearly
3556 $B'$ succeeds whenever $F_7$ occurs. The claim follows from
3557 theorem~\ref{thm:wident-sound}.
a6e375a6
MW
3558\end{proof}
3559
3560
3561\subsection{Proof of theorem~\ref{thm:sk2}}
3562\label{sec:sk2-proof}
3563
3564The proof is almost identical to the proof of theorem~\ref{thm:sk}, in
3565appendix~\ref{sec:sk-proof}. Unfortunately a black-box reduction doesn't
32609df3 3566seem possible.
a6e375a6
MW
3567
3568We use the games and notation of section~\ref{sec:sk-proof}.
3569
3570The change to the check-value calculation doesn't affect key-generation at
3571all, so the transition to $\G1$ goes through as before.
3572
3573The transition from $\G1$ to $\G2$ -- answering challenges using the
3574extractor -- needs a little care. Let $S = (P_i, P_j, s)$ be a session, and
3575consider an incoming message $(\cookie{challenge}, R, c)$.
3576\begin{itemize}
3577\item If $T = (P_j, P_i, s)$ is the matching session to $S$, and $R = R_T$ is
3578 the public challenge value of $T$, and $c = r_T \xor H_I(R_S, X_j, s, R_T,
3579 r_T X_i)$ is the check-value output by $T$ when it received
3580 $(\cookie{pre-challenge}, R_S)$ as input, then $S$ replies as it did in
3581 $\G1$.
3582\item If the challenge message is any other message, then we use the
3583 extractor.
3584\end{itemize}
3585As in lemma~\ref{lem:sk-g1-g2}, we examine which sessions could have queried
3586$H_I(R_S, X_j, s, R, x_i R)$, and for the same reasons conclude that only the
3587matching session would have done this, and only in response to the
3588pre-challenge $R_S$. It follows that $\diff{1}{2} \le q_M/2^{\ell_I}$ as
3589before.
3590
3591The remaining game steps go through unchanged. In particular, we conclude
3592that a ripe session will only complete if the adversary has transmitted
3593messages from its matching session correctly, and the session key is
3594independent of the adversary's view. The theorem follows.
3595
dff0fad2
MW
3596
3597\subsection{Sketch proof of single-key protocol for secure channels}
3598\label{sec:sc-proof}
3599
3600We want to show that the Wrestlers Key-Exchange protocol, followed by use of
3601the encryption scheme $\E$, with the \emph{same} key $K = K_0$, still
3602provides secure channels.
3603
3604\subsubsection{Secure channels definition}
3605We (very briefly!) recall the \cite{Canetti:2001:AKE} definition of a secure
3606channels protocol. We again play a game with the adversary. At the
3607beginning, we choose a bit $b^* \inr \{0, 1\}$ at random. We allow the
3608adversary the ability to establish \emph{secure channels} sessions within the
3609parties. Furthermore, for any running session $S = (P_i, P_j, s)$, we allow
3610the adversary to request $S$ to send a message~$\mu$ through its secure
3611channel. Finally, the adversary is allowed to choose one ripe
3612\emph{challenge} session, and ask for it to send of one of a \emph{pair} of
3613messages $(\mu_0, \mu_1)$, subject to the restriction that $|\mu_0| =
3614|\mu_1|$; the session sends message $\mu_{b^*}$. The adversary may not
3615expose the challenge session.
3616
3617The adversary wins if (a)~it can guess the bit $b^*$, or (b)~it can cause a
3618ripe session $S$ (i.e., an unexposed, running session), with a matching
3619session~$T$ to output a message other than one that it requested that $T$
3620send.
3621
3622\subsubsection{Protocol definition}
3623The protocol begins with Wrestlers key-exchange. The encryption in the
3624key-exchange protocol is performed as $E_K(\cookie{kx}, \cdot)$; encryption
3625for secure channels is performed as $E_K(\cookie{sc}, i, o, \cdot)$, where
3626$i$ is a sequence number to prevent replays and $o \in \{S, T\}$ identifies
3627the sender.
3628
3629\subsubsection{Proof sketch}
3630We use the games and notation of appendix~\ref{sec:sk-proof}.
3631
3632The proof goes through as far as the step between $\G5$ and $\G6$ in the
3633proof of lemma~\ref{lem:sk-g3-g4}. Here we make the obvious adjustments to
3634our adversary against the IND-CCA security of $\E$. (Our adversary will need
3635to use the left-or-right oracle for messages sent using the secure channel
3636built on $K^*$. That's OK.)
3637
3638In $\G4$, we know that ripe sessions agree the correct key, and the adversary
3639never queries the random oracle, so the key is independent of the adversary's
3640view.
3641
3642We define a new game $\G8$, which is like $\G4$, except that we stop the game
3643if the adversary ever forges a message sent over the secure channel. That
3644is, if a ripe session $S$ ever announces receipt of a message not sent (at
3645the adversary's request) by its matching session $T$. Let $F_8$ be the event
3646that a forgery occurs. We apply lemma~\ref{lem:shoup}, which tells us that
3647$\diff{4}{8} \le \Pr[F_8]$. To bound $F_8$, we isolate a session at random
3648(as in lemmata \ref{lem:sk-g2-g3} and~\ref{lem:sk-g3-g4}), which tells us
3649that
3650\begin{equation}
3651 \label{eq:sc-g4-g8}
3652 \diff{4}{8} \le q_S \cdot \InSec{int-ptxt}(\E; t', q_M, q_M)
3653\end{equation}
3654Finally, we can bound the adversary's advantage at guessing the hidden bit
3655$b^*$. We isolate (we hope) the challenge session $S$ by choosing a target
32609df3 3656session at random, as before. Let $K^* = H_K(Z_S)$ be the key agreed by the
dff0fad2
MW
3657session (if it becomes ripe). We define an adversary $B$ against the IND-CCA
3658security of $\E$. The adversary $B$ simulates the game. If the adversary
3659exposes the target session, or doesn't choose it as the challenge session,
3660$B$ fails (and exits 0); otherwise it uses the left-or-right encryption
3661oracle to encrypt both of the adversary's message choices, and outputs the
3662adversary's choice. Let $b$ be the adversary's output, and let $\epsilon$ be
3663the advantage of our IND-CCA distinguisher. Then
3664\begin{eqnarray*}[rl]
3665 \Pr[b = b^*]
3666 & = \Pr[b = b^* \wedge b^* = 1] + \Pr[b = b^* \wedge b^* = 0] \\
3667 & = \frac{1}{2}\bigl( \Pr[b = b^* \mid b^* = 1] +
3668 \Pr[b = b^* \mid b^* = 0] \bigr) \\
3669 & = \frac{1}{2}\bigl( \Pr[b = b^* \mid b^* = 1] +
3670 (1 - \Pr[b \ne b^* \mid b^* = 0]) \bigr) \\
3671 & = \frac{1}{2}\bigl( \Pr[b = 1 \mid b^* = 1] -
3672 \Pr[b = 1 \mid b^* = 0] + 1 \bigr) \\
3673 & = \frac{1}{2}\bigl(1 + q_S\,\Adv{ind-cca}{\E}(B) \bigr) \\
3674 & \le \frac{1}{2} \bigl( 1 + q_S\,\InSec{ind-cca}(\E; t', q_M, q_M) \bigr).
3675 \eqnumber
3676\end{eqnarray*}
3677
a6e375a6
MW
3678\end{document}
3679
32609df3 3680%%% Local Variables:
a6e375a6
MW
3681%%% mode: latex
3682%%% TeX-master: t
32609df3 3683%%% End: