34292b1d |
1 | /* |
2 | * winhandl.c: Module to give Windows front ends the general |
3 | * ability to deal with consoles, pipes, serial ports, or any other |
4 | * type of data stream accessed through a Windows API HANDLE rather |
5 | * than a WinSock SOCKET. |
6 | * |
7 | * We do this by spawning a subthread to continuously try to read |
8 | * from the handle. Every time a read successfully returns some |
9 | * data, the subthread sets an event object which is picked up by |
10 | * the main thread, and the main thread then sets an event in |
11 | * return to instruct the subthread to resume reading. |
12 | * |
13 | * Output works precisely the other way round, in a second |
14 | * subthread. The output subthread should not be attempting to |
15 | * write all the time, because it hasn't always got data _to_ |
16 | * write; so the output thread waits for an event object notifying |
17 | * it to _attempt_ a write, and then it sets an event in return |
18 | * when one completes. |
19 | */ |
20 | |
34292b1d |
21 | #include <assert.h> |
22 | |
23 | #include "putty.h" |
24 | |
25 | /* ---------------------------------------------------------------------- |
26 | * Generic definitions. |
27 | */ |
28 | |
29 | /* |
30 | * Maximum amount of backlog we will allow to build up on an input |
31 | * handle before we stop reading from it. |
32 | */ |
33 | #define MAX_BACKLOG 32768 |
34 | |
35 | struct handle_generic { |
36 | /* |
37 | * Initial fields common to both handle_input and handle_output |
38 | * structures. |
39 | * |
40 | * The three HANDLEs are set up at initialisation time and are |
41 | * thereafter read-only to both main thread and subthread. |
42 | * `moribund' is only used by the main thread; `done' is |
43 | * written by the main thread before signalling to the |
44 | * subthread. `defunct' and `busy' are used only by the main |
45 | * thread. |
46 | */ |
47 | HANDLE h; /* the handle itself */ |
48 | HANDLE ev_to_main; /* event used to signal main thread */ |
49 | HANDLE ev_from_main; /* event used to signal back to us */ |
50 | int moribund; /* are we going to kill this soon? */ |
51 | int done; /* request subthread to terminate */ |
52 | int defunct; /* has the subthread already gone? */ |
53 | int busy; /* operation currently in progress? */ |
0e03ceff |
54 | void *privdata; /* for client to remember who they are */ |
34292b1d |
55 | }; |
56 | |
57 | /* ---------------------------------------------------------------------- |
58 | * Input threads. |
59 | */ |
60 | |
61 | /* |
62 | * Data required by an input thread. |
63 | */ |
64 | struct handle_input { |
65 | /* |
66 | * Copy of the handle_generic structure. |
67 | */ |
68 | HANDLE h; /* the handle itself */ |
69 | HANDLE ev_to_main; /* event used to signal main thread */ |
70 | HANDLE ev_from_main; /* event used to signal back to us */ |
71 | int moribund; /* are we going to kill this soon? */ |
72 | int done; /* request subthread to terminate */ |
73 | int defunct; /* has the subthread already gone? */ |
74 | int busy; /* operation currently in progress? */ |
0e03ceff |
75 | void *privdata; /* for client to remember who they are */ |
34292b1d |
76 | |
77 | /* |
bdebd7e9 |
78 | * Data set at initialisation and then read-only. |
79 | */ |
80 | int flags; |
81 | |
82 | /* |
34292b1d |
83 | * Data set by the input thread before signalling ev_to_main, |
84 | * and read by the main thread after receiving that signal. |
85 | */ |
86 | char buffer[4096]; /* the data read from the handle */ |
87 | DWORD len; /* how much data that was */ |
88 | int readret; /* lets us know about read errors */ |
89 | |
90 | /* |
91 | * Callback function called by this module when data arrives on |
92 | * an input handle. |
93 | */ |
94 | handle_inputfn_t gotdata; |
95 | }; |
96 | |
97 | /* |
98 | * The actual thread procedure for an input thread. |
99 | */ |
100 | static DWORD WINAPI handle_input_threadfunc(void *param) |
101 | { |
102 | struct handle_input *ctx = (struct handle_input *) param; |
bdebd7e9 |
103 | OVERLAPPED ovl, *povl; |
758a1377 |
104 | HANDLE oev; |
c606abbc |
105 | int readlen; |
bdebd7e9 |
106 | |
758a1377 |
107 | if (ctx->flags & HANDLE_FLAG_OVERLAPPED) { |
bdebd7e9 |
108 | povl = &ovl; |
758a1377 |
109 | oev = CreateEvent(NULL, TRUE, FALSE, NULL); |
110 | } else { |
bdebd7e9 |
111 | povl = NULL; |
758a1377 |
112 | } |
34292b1d |
113 | |
c606abbc |
114 | if (ctx->flags & HANDLE_FLAG_UNITBUFFER) |
115 | readlen = 1; |
116 | else |
117 | readlen = sizeof(ctx->buffer); |
118 | |
34292b1d |
119 | while (1) { |
758a1377 |
120 | if (povl) { |
bdebd7e9 |
121 | memset(povl, 0, sizeof(OVERLAPPED)); |
758a1377 |
122 | povl->hEvent = oev; |
123 | } |
c606abbc |
124 | ctx->readret = ReadFile(ctx->h, ctx->buffer, readlen, |
bdebd7e9 |
125 | &ctx->len, povl); |
758a1377 |
126 | if (povl && !ctx->readret && GetLastError() == ERROR_IO_PENDING) { |
127 | WaitForSingleObject(povl->hEvent, INFINITE); |
128 | ctx->readret = GetOverlappedResult(ctx->h, povl, &ctx->len, FALSE); |
129 | } |
bdebd7e9 |
130 | |
34292b1d |
131 | if (!ctx->readret) |
132 | ctx->len = 0; |
133 | |
bdebd7e9 |
134 | if (ctx->readret && ctx->len == 0 && |
135 | (ctx->flags & HANDLE_FLAG_IGNOREEOF)) |
136 | continue; |
137 | |
34292b1d |
138 | SetEvent(ctx->ev_to_main); |
139 | |
140 | if (!ctx->len) |
141 | break; |
142 | |
143 | WaitForSingleObject(ctx->ev_from_main, INFINITE); |
144 | if (ctx->done) |
145 | break; /* main thread told us to shut down */ |
146 | } |
147 | |
758a1377 |
148 | if (povl) |
149 | CloseHandle(oev); |
150 | |
34292b1d |
151 | return 0; |
152 | } |
153 | |
154 | /* |
155 | * This is called after a succcessful read, or from the |
156 | * `unthrottle' function. It decides whether or not to begin a new |
157 | * read operation. |
158 | */ |
159 | static void handle_throttle(struct handle_input *ctx, int backlog) |
160 | { |
50ab783a |
161 | if (ctx->defunct) |
162 | return; |
34292b1d |
163 | |
164 | /* |
165 | * If there's a read operation already in progress, do nothing: |
166 | * when that completes, we'll come back here and be in a |
167 | * position to make a better decision. |
168 | */ |
169 | if (ctx->busy) |
170 | return; |
171 | |
172 | /* |
173 | * Otherwise, we must decide whether to start a new read based |
174 | * on the size of the backlog. |
175 | */ |
176 | if (backlog < MAX_BACKLOG) { |
177 | SetEvent(ctx->ev_from_main); |
178 | ctx->busy = TRUE; |
179 | } |
180 | } |
181 | |
182 | /* ---------------------------------------------------------------------- |
183 | * Output threads. |
184 | */ |
185 | |
186 | /* |
187 | * Data required by an output thread. |
188 | */ |
189 | struct handle_output { |
190 | /* |
191 | * Copy of the handle_generic structure. |
192 | */ |
193 | HANDLE h; /* the handle itself */ |
194 | HANDLE ev_to_main; /* event used to signal main thread */ |
195 | HANDLE ev_from_main; /* event used to signal back to us */ |
196 | int moribund; /* are we going to kill this soon? */ |
197 | int done; /* request subthread to terminate */ |
198 | int defunct; /* has the subthread already gone? */ |
199 | int busy; /* operation currently in progress? */ |
0e03ceff |
200 | void *privdata; /* for client to remember who they are */ |
34292b1d |
201 | |
202 | /* |
bdebd7e9 |
203 | * Data set at initialisation and then read-only. |
204 | */ |
205 | int flags; |
206 | |
207 | /* |
34292b1d |
208 | * Data set by the main thread before signalling ev_from_main, |
209 | * and read by the input thread after receiving that signal. |
210 | */ |
211 | char *buffer; /* the data to write */ |
212 | DWORD len; /* how much data there is */ |
213 | |
214 | /* |
215 | * Data set by the input thread before signalling ev_to_main, |
216 | * and read by the main thread after receiving that signal. |
217 | */ |
218 | DWORD lenwritten; /* how much data we actually wrote */ |
219 | int writeret; /* return value from WriteFile */ |
220 | |
221 | /* |
222 | * Data only ever read or written by the main thread. |
223 | */ |
224 | bufchain queued_data; /* data still waiting to be written */ |
225 | |
226 | /* |
227 | * Callback function called when the backlog in the bufchain |
228 | * drops. |
229 | */ |
230 | handle_outputfn_t sentdata; |
231 | }; |
232 | |
233 | static DWORD WINAPI handle_output_threadfunc(void *param) |
234 | { |
235 | struct handle_output *ctx = (struct handle_output *) param; |
bdebd7e9 |
236 | OVERLAPPED ovl, *povl; |
237 | |
238 | if (ctx->flags & HANDLE_FLAG_OVERLAPPED) |
239 | povl = &ovl; |
240 | else |
241 | povl = NULL; |
34292b1d |
242 | |
243 | while (1) { |
244 | WaitForSingleObject(ctx->ev_from_main, INFINITE); |
245 | if (ctx->done) { |
246 | SetEvent(ctx->ev_to_main); |
247 | break; |
248 | } |
bdebd7e9 |
249 | if (povl) |
250 | memset(povl, 0, sizeof(OVERLAPPED)); |
34292b1d |
251 | ctx->writeret = WriteFile(ctx->h, ctx->buffer, ctx->len, |
bdebd7e9 |
252 | &ctx->lenwritten, povl); |
253 | if (povl && !ctx->writeret && GetLastError() == ERROR_IO_PENDING) |
254 | ctx->writeret = GetOverlappedResult(ctx->h, povl, |
255 | &ctx->lenwritten, TRUE); |
256 | |
34292b1d |
257 | SetEvent(ctx->ev_to_main); |
258 | if (!ctx->writeret) |
259 | break; |
260 | } |
261 | |
262 | return 0; |
263 | } |
264 | |
265 | static void handle_try_output(struct handle_output *ctx) |
266 | { |
267 | void *senddata; |
268 | int sendlen; |
269 | |
270 | if (!ctx->busy && bufchain_size(&ctx->queued_data)) { |
271 | bufchain_prefix(&ctx->queued_data, &senddata, &sendlen); |
272 | ctx->buffer = senddata; |
273 | ctx->len = sendlen; |
274 | SetEvent(ctx->ev_from_main); |
275 | ctx->busy = TRUE; |
276 | } |
277 | } |
278 | |
279 | /* ---------------------------------------------------------------------- |
280 | * Unified code handling both input and output threads. |
281 | */ |
282 | |
283 | struct handle { |
284 | int output; |
285 | union { |
286 | struct handle_generic g; |
287 | struct handle_input i; |
288 | struct handle_output o; |
289 | } u; |
290 | }; |
291 | |
292 | static tree234 *handles_by_evtomain; |
293 | |
294 | static int handle_cmp_evtomain(void *av, void *bv) |
295 | { |
296 | struct handle *a = (struct handle *)av; |
297 | struct handle *b = (struct handle *)bv; |
298 | |
299 | if ((unsigned)a->u.g.ev_to_main < (unsigned)b->u.g.ev_to_main) |
300 | return -1; |
301 | else if ((unsigned)a->u.g.ev_to_main > (unsigned)b->u.g.ev_to_main) |
302 | return +1; |
303 | else |
304 | return 0; |
305 | } |
306 | |
307 | static int handle_find_evtomain(void *av, void *bv) |
308 | { |
309 | HANDLE *a = (HANDLE *)av; |
310 | struct handle *b = (struct handle *)bv; |
311 | |
312 | if ((unsigned)*a < (unsigned)b->u.g.ev_to_main) |
313 | return -1; |
314 | else if ((unsigned)*a > (unsigned)b->u.g.ev_to_main) |
315 | return +1; |
316 | else |
317 | return 0; |
318 | } |
319 | |
0e03ceff |
320 | struct handle *handle_input_new(HANDLE handle, handle_inputfn_t gotdata, |
bdebd7e9 |
321 | void *privdata, int flags) |
34292b1d |
322 | { |
323 | struct handle *h = snew(struct handle); |
324 | |
325 | h->output = FALSE; |
326 | h->u.i.h = handle; |
327 | h->u.i.ev_to_main = CreateEvent(NULL, FALSE, FALSE, NULL); |
328 | h->u.i.ev_from_main = CreateEvent(NULL, FALSE, FALSE, NULL); |
329 | h->u.i.gotdata = gotdata; |
34292b1d |
330 | h->u.i.defunct = FALSE; |
331 | h->u.i.moribund = FALSE; |
332 | h->u.i.done = FALSE; |
0e03ceff |
333 | h->u.i.privdata = privdata; |
bdebd7e9 |
334 | h->u.i.flags = flags; |
34292b1d |
335 | |
336 | if (!handles_by_evtomain) |
337 | handles_by_evtomain = newtree234(handle_cmp_evtomain); |
338 | add234(handles_by_evtomain, h); |
339 | |
340 | CreateThread(NULL, 0, handle_input_threadfunc, |
341 | &h->u.i, 0, NULL); |
2ceabd36 |
342 | h->u.i.busy = TRUE; |
34292b1d |
343 | |
344 | return h; |
345 | } |
346 | |
0e03ceff |
347 | struct handle *handle_output_new(HANDLE handle, handle_outputfn_t sentdata, |
bdebd7e9 |
348 | void *privdata, int flags) |
34292b1d |
349 | { |
350 | struct handle *h = snew(struct handle); |
351 | |
352 | h->output = TRUE; |
353 | h->u.o.h = handle; |
354 | h->u.o.ev_to_main = CreateEvent(NULL, FALSE, FALSE, NULL); |
355 | h->u.o.ev_from_main = CreateEvent(NULL, FALSE, FALSE, NULL); |
356 | h->u.o.busy = FALSE; |
357 | h->u.o.defunct = FALSE; |
358 | h->u.o.moribund = FALSE; |
359 | h->u.o.done = FALSE; |
0e03ceff |
360 | h->u.o.privdata = privdata; |
34292b1d |
361 | bufchain_init(&h->u.o.queued_data); |
362 | h->u.o.sentdata = sentdata; |
bdebd7e9 |
363 | h->u.o.flags = flags; |
34292b1d |
364 | |
365 | if (!handles_by_evtomain) |
366 | handles_by_evtomain = newtree234(handle_cmp_evtomain); |
367 | add234(handles_by_evtomain, h); |
368 | |
369 | CreateThread(NULL, 0, handle_output_threadfunc, |
370 | &h->u.i, 0, NULL); |
371 | |
372 | return h; |
373 | } |
374 | |
375 | int handle_write(struct handle *h, const void *data, int len) |
376 | { |
377 | assert(h->output); |
378 | bufchain_add(&h->u.o.queued_data, data, len); |
379 | handle_try_output(&h->u.o); |
380 | return bufchain_size(&h->u.o.queued_data); |
381 | } |
382 | |
383 | HANDLE *handle_get_events(int *nevents) |
384 | { |
385 | HANDLE *ret; |
386 | struct handle *h; |
387 | int i, n, size; |
388 | |
389 | /* |
390 | * Go through our tree counting the handle objects currently |
391 | * engaged in useful activity. |
392 | */ |
393 | ret = NULL; |
394 | n = size = 0; |
395 | if (handles_by_evtomain) { |
396 | for (i = 0; (h = index234(handles_by_evtomain, i)) != NULL; i++) { |
397 | if (h->u.g.busy) { |
398 | if (n >= size) { |
399 | size += 32; |
400 | ret = sresize(ret, size, HANDLE); |
401 | } |
402 | ret[n++] = h->u.g.ev_to_main; |
403 | } |
404 | } |
405 | } |
406 | |
407 | *nevents = n; |
408 | return ret; |
409 | } |
410 | |
411 | static void handle_destroy(struct handle *h) |
412 | { |
413 | if (h->output) |
414 | bufchain_clear(&h->u.o.queued_data); |
415 | CloseHandle(h->u.g.ev_from_main); |
416 | CloseHandle(h->u.g.ev_to_main); |
417 | del234(handles_by_evtomain, h); |
418 | sfree(h); |
419 | } |
420 | |
421 | void handle_free(struct handle *h) |
422 | { |
423 | /* |
424 | * If the handle is currently busy, we cannot immediately free |
425 | * it. Instead we must wait until it's finished its current |
426 | * operation, because otherwise the subthread will write to |
427 | * invalid memory after we free its context from under it. |
428 | */ |
429 | assert(h && !h->u.g.moribund); |
430 | if (h->u.g.busy) { |
431 | /* |
432 | * Just set the moribund flag, which will be noticed next |
433 | * time an operation completes. |
434 | */ |
435 | h->u.g.moribund = TRUE; |
436 | } else if (h->u.g.defunct) { |
437 | /* |
438 | * There isn't even a subthread; we can go straight to |
439 | * handle_destroy. |
440 | */ |
441 | handle_destroy(h); |
442 | } else { |
443 | /* |
444 | * The subthread is alive but not busy, so we now signal it |
445 | * to die. Set the moribund flag to indicate that it will |
446 | * want destroying after that. |
447 | */ |
448 | h->u.g.moribund = TRUE; |
449 | h->u.g.done = TRUE; |
c969e831 |
450 | h->u.g.busy = TRUE; |
34292b1d |
451 | SetEvent(h->u.g.ev_from_main); |
452 | } |
453 | } |
454 | |
455 | void handle_got_event(HANDLE event) |
456 | { |
457 | struct handle *h; |
458 | |
459 | assert(handles_by_evtomain); |
460 | h = find234(handles_by_evtomain, &event, handle_find_evtomain); |
461 | if (!h) { |
462 | /* |
463 | * This isn't an error condition. If two or more event |
464 | * objects were signalled during the same select operation, |
465 | * and processing of the first caused the second handle to |
466 | * be closed, then it will sometimes happen that we receive |
467 | * an event notification here for a handle which is already |
468 | * deceased. In that situation we simply do nothing. |
469 | */ |
470 | return; |
471 | } |
472 | |
473 | if (h->u.g.moribund) { |
474 | /* |
475 | * A moribund handle is already treated as dead from the |
476 | * external user's point of view, so do nothing with the |
477 | * actual event. Just signal the thread to die if |
478 | * necessary, or destroy the handle if not. |
479 | */ |
480 | if (h->u.g.done) { |
481 | handle_destroy(h); |
482 | } else { |
483 | h->u.g.done = TRUE; |
c969e831 |
484 | h->u.g.busy = TRUE; |
34292b1d |
485 | SetEvent(h->u.g.ev_from_main); |
486 | } |
487 | return; |
488 | } |
489 | |
490 | if (!h->output) { |
491 | int backlog; |
492 | |
493 | h->u.i.busy = FALSE; |
494 | |
495 | /* |
496 | * A signal on an input handle means data has arrived. |
497 | */ |
498 | if (h->u.i.len == 0) { |
499 | /* |
500 | * EOF, or (nearly equivalently) read error. |
501 | */ |
502 | h->u.i.gotdata(h, NULL, (h->u.i.readret ? 0 : -1)); |
503 | h->u.i.defunct = TRUE; |
504 | } else { |
505 | backlog = h->u.i.gotdata(h, h->u.i.buffer, h->u.i.len); |
506 | handle_throttle(&h->u.i, backlog); |
507 | } |
508 | } else { |
509 | h->u.o.busy = FALSE; |
510 | |
511 | /* |
512 | * A signal on an output handle means we have completed a |
513 | * write. Call the callback to indicate that the output |
514 | * buffer size has decreased, or to indicate an error. |
515 | */ |
516 | if (!h->u.o.writeret) { |
517 | /* |
518 | * Write error. Send a negative value to the callback, |
519 | * and mark the thread as defunct (because the output |
520 | * thread is terminating by now). |
521 | */ |
522 | h->u.o.sentdata(h, -1); |
523 | h->u.o.defunct = TRUE; |
524 | } else { |
525 | bufchain_consume(&h->u.o.queued_data, h->u.o.lenwritten); |
526 | h->u.o.sentdata(h, bufchain_size(&h->u.o.queued_data)); |
527 | handle_try_output(&h->u.o); |
528 | } |
529 | } |
530 | } |
531 | |
532 | void handle_unthrottle(struct handle *h, int backlog) |
533 | { |
534 | assert(!h->output); |
535 | handle_throttle(&h->u.i, backlog); |
536 | } |
537 | |
538 | int handle_backlog(struct handle *h) |
539 | { |
540 | assert(h->output); |
541 | return bufchain_size(&h->u.o.queued_data); |
542 | } |
0e03ceff |
543 | |
544 | void *handle_get_privdata(struct handle *h) |
545 | { |
546 | return h->u.g.privdata; |
547 | } |