34292b1d |
1 | /* |
2 | * winhandl.c: Module to give Windows front ends the general |
3 | * ability to deal with consoles, pipes, serial ports, or any other |
4 | * type of data stream accessed through a Windows API HANDLE rather |
5 | * than a WinSock SOCKET. |
6 | * |
7 | * We do this by spawning a subthread to continuously try to read |
8 | * from the handle. Every time a read successfully returns some |
9 | * data, the subthread sets an event object which is picked up by |
10 | * the main thread, and the main thread then sets an event in |
11 | * return to instruct the subthread to resume reading. |
12 | * |
13 | * Output works precisely the other way round, in a second |
14 | * subthread. The output subthread should not be attempting to |
15 | * write all the time, because it hasn't always got data _to_ |
16 | * write; so the output thread waits for an event object notifying |
17 | * it to _attempt_ a write, and then it sets an event in return |
18 | * when one completes. |
8e32bfe0 |
19 | * |
20 | * (It's terribly annoying having to spawn a subthread for each |
21 | * direction of each handle. Technically it isn't necessary for |
22 | * serial ports, since we could use overlapped I/O within the main |
23 | * thread and wait directly on the event objects in the OVERLAPPED |
24 | * structures. However, we can't use this trick for some types of |
25 | * file handle at all - for some reason Windows restricts use of |
26 | * OVERLAPPED to files which were opened with the overlapped flag - |
27 | * and so we must use threads for those. This being the case, it's |
28 | * simplest just to use threads for everything rather than trying |
29 | * to keep track of multiple completely separate mechanisms.) |
34292b1d |
30 | */ |
31 | |
34292b1d |
32 | #include <assert.h> |
33 | |
34 | #include "putty.h" |
35 | |
36 | /* ---------------------------------------------------------------------- |
37 | * Generic definitions. |
38 | */ |
39 | |
40 | /* |
41 | * Maximum amount of backlog we will allow to build up on an input |
42 | * handle before we stop reading from it. |
43 | */ |
44 | #define MAX_BACKLOG 32768 |
45 | |
46 | struct handle_generic { |
47 | /* |
48 | * Initial fields common to both handle_input and handle_output |
49 | * structures. |
50 | * |
51 | * The three HANDLEs are set up at initialisation time and are |
52 | * thereafter read-only to both main thread and subthread. |
53 | * `moribund' is only used by the main thread; `done' is |
54 | * written by the main thread before signalling to the |
55 | * subthread. `defunct' and `busy' are used only by the main |
56 | * thread. |
57 | */ |
58 | HANDLE h; /* the handle itself */ |
59 | HANDLE ev_to_main; /* event used to signal main thread */ |
60 | HANDLE ev_from_main; /* event used to signal back to us */ |
61 | int moribund; /* are we going to kill this soon? */ |
62 | int done; /* request subthread to terminate */ |
63 | int defunct; /* has the subthread already gone? */ |
64 | int busy; /* operation currently in progress? */ |
0e03ceff |
65 | void *privdata; /* for client to remember who they are */ |
34292b1d |
66 | }; |
67 | |
68 | /* ---------------------------------------------------------------------- |
69 | * Input threads. |
70 | */ |
71 | |
72 | /* |
73 | * Data required by an input thread. |
74 | */ |
75 | struct handle_input { |
76 | /* |
77 | * Copy of the handle_generic structure. |
78 | */ |
79 | HANDLE h; /* the handle itself */ |
80 | HANDLE ev_to_main; /* event used to signal main thread */ |
81 | HANDLE ev_from_main; /* event used to signal back to us */ |
82 | int moribund; /* are we going to kill this soon? */ |
83 | int done; /* request subthread to terminate */ |
84 | int defunct; /* has the subthread already gone? */ |
85 | int busy; /* operation currently in progress? */ |
0e03ceff |
86 | void *privdata; /* for client to remember who they are */ |
34292b1d |
87 | |
88 | /* |
bdebd7e9 |
89 | * Data set at initialisation and then read-only. |
90 | */ |
91 | int flags; |
92 | |
93 | /* |
34292b1d |
94 | * Data set by the input thread before signalling ev_to_main, |
95 | * and read by the main thread after receiving that signal. |
96 | */ |
97 | char buffer[4096]; /* the data read from the handle */ |
98 | DWORD len; /* how much data that was */ |
99 | int readret; /* lets us know about read errors */ |
100 | |
101 | /* |
102 | * Callback function called by this module when data arrives on |
103 | * an input handle. |
104 | */ |
105 | handle_inputfn_t gotdata; |
106 | }; |
107 | |
108 | /* |
109 | * The actual thread procedure for an input thread. |
110 | */ |
111 | static DWORD WINAPI handle_input_threadfunc(void *param) |
112 | { |
113 | struct handle_input *ctx = (struct handle_input *) param; |
bdebd7e9 |
114 | OVERLAPPED ovl, *povl; |
758a1377 |
115 | HANDLE oev; |
c606abbc |
116 | int readlen; |
bdebd7e9 |
117 | |
758a1377 |
118 | if (ctx->flags & HANDLE_FLAG_OVERLAPPED) { |
bdebd7e9 |
119 | povl = &ovl; |
758a1377 |
120 | oev = CreateEvent(NULL, TRUE, FALSE, NULL); |
121 | } else { |
bdebd7e9 |
122 | povl = NULL; |
758a1377 |
123 | } |
34292b1d |
124 | |
c606abbc |
125 | if (ctx->flags & HANDLE_FLAG_UNITBUFFER) |
126 | readlen = 1; |
127 | else |
128 | readlen = sizeof(ctx->buffer); |
129 | |
34292b1d |
130 | while (1) { |
758a1377 |
131 | if (povl) { |
bdebd7e9 |
132 | memset(povl, 0, sizeof(OVERLAPPED)); |
758a1377 |
133 | povl->hEvent = oev; |
134 | } |
c606abbc |
135 | ctx->readret = ReadFile(ctx->h, ctx->buffer, readlen, |
bdebd7e9 |
136 | &ctx->len, povl); |
758a1377 |
137 | if (povl && !ctx->readret && GetLastError() == ERROR_IO_PENDING) { |
138 | WaitForSingleObject(povl->hEvent, INFINITE); |
139 | ctx->readret = GetOverlappedResult(ctx->h, povl, &ctx->len, FALSE); |
140 | } |
bdebd7e9 |
141 | |
34292b1d |
142 | if (!ctx->readret) |
143 | ctx->len = 0; |
144 | |
bdebd7e9 |
145 | if (ctx->readret && ctx->len == 0 && |
146 | (ctx->flags & HANDLE_FLAG_IGNOREEOF)) |
147 | continue; |
148 | |
34292b1d |
149 | SetEvent(ctx->ev_to_main); |
150 | |
151 | if (!ctx->len) |
152 | break; |
153 | |
154 | WaitForSingleObject(ctx->ev_from_main, INFINITE); |
155 | if (ctx->done) |
156 | break; /* main thread told us to shut down */ |
157 | } |
158 | |
758a1377 |
159 | if (povl) |
160 | CloseHandle(oev); |
161 | |
34292b1d |
162 | return 0; |
163 | } |
164 | |
165 | /* |
166 | * This is called after a succcessful read, or from the |
167 | * `unthrottle' function. It decides whether or not to begin a new |
168 | * read operation. |
169 | */ |
170 | static void handle_throttle(struct handle_input *ctx, int backlog) |
171 | { |
50ab783a |
172 | if (ctx->defunct) |
173 | return; |
34292b1d |
174 | |
175 | /* |
176 | * If there's a read operation already in progress, do nothing: |
177 | * when that completes, we'll come back here and be in a |
178 | * position to make a better decision. |
179 | */ |
180 | if (ctx->busy) |
181 | return; |
182 | |
183 | /* |
184 | * Otherwise, we must decide whether to start a new read based |
185 | * on the size of the backlog. |
186 | */ |
187 | if (backlog < MAX_BACKLOG) { |
188 | SetEvent(ctx->ev_from_main); |
189 | ctx->busy = TRUE; |
190 | } |
191 | } |
192 | |
193 | /* ---------------------------------------------------------------------- |
194 | * Output threads. |
195 | */ |
196 | |
197 | /* |
198 | * Data required by an output thread. |
199 | */ |
200 | struct handle_output { |
201 | /* |
202 | * Copy of the handle_generic structure. |
203 | */ |
204 | HANDLE h; /* the handle itself */ |
205 | HANDLE ev_to_main; /* event used to signal main thread */ |
206 | HANDLE ev_from_main; /* event used to signal back to us */ |
207 | int moribund; /* are we going to kill this soon? */ |
208 | int done; /* request subthread to terminate */ |
209 | int defunct; /* has the subthread already gone? */ |
210 | int busy; /* operation currently in progress? */ |
0e03ceff |
211 | void *privdata; /* for client to remember who they are */ |
34292b1d |
212 | |
213 | /* |
bdebd7e9 |
214 | * Data set at initialisation and then read-only. |
215 | */ |
216 | int flags; |
217 | |
218 | /* |
34292b1d |
219 | * Data set by the main thread before signalling ev_from_main, |
220 | * and read by the input thread after receiving that signal. |
221 | */ |
222 | char *buffer; /* the data to write */ |
223 | DWORD len; /* how much data there is */ |
224 | |
225 | /* |
226 | * Data set by the input thread before signalling ev_to_main, |
227 | * and read by the main thread after receiving that signal. |
228 | */ |
229 | DWORD lenwritten; /* how much data we actually wrote */ |
230 | int writeret; /* return value from WriteFile */ |
231 | |
232 | /* |
233 | * Data only ever read or written by the main thread. |
234 | */ |
235 | bufchain queued_data; /* data still waiting to be written */ |
236 | |
237 | /* |
238 | * Callback function called when the backlog in the bufchain |
239 | * drops. |
240 | */ |
241 | handle_outputfn_t sentdata; |
242 | }; |
243 | |
244 | static DWORD WINAPI handle_output_threadfunc(void *param) |
245 | { |
246 | struct handle_output *ctx = (struct handle_output *) param; |
bdebd7e9 |
247 | OVERLAPPED ovl, *povl; |
248 | |
249 | if (ctx->flags & HANDLE_FLAG_OVERLAPPED) |
250 | povl = &ovl; |
251 | else |
252 | povl = NULL; |
34292b1d |
253 | |
254 | while (1) { |
255 | WaitForSingleObject(ctx->ev_from_main, INFINITE); |
256 | if (ctx->done) { |
257 | SetEvent(ctx->ev_to_main); |
258 | break; |
259 | } |
bdebd7e9 |
260 | if (povl) |
261 | memset(povl, 0, sizeof(OVERLAPPED)); |
34292b1d |
262 | ctx->writeret = WriteFile(ctx->h, ctx->buffer, ctx->len, |
bdebd7e9 |
263 | &ctx->lenwritten, povl); |
264 | if (povl && !ctx->writeret && GetLastError() == ERROR_IO_PENDING) |
265 | ctx->writeret = GetOverlappedResult(ctx->h, povl, |
266 | &ctx->lenwritten, TRUE); |
267 | |
34292b1d |
268 | SetEvent(ctx->ev_to_main); |
269 | if (!ctx->writeret) |
270 | break; |
271 | } |
272 | |
273 | return 0; |
274 | } |
275 | |
276 | static void handle_try_output(struct handle_output *ctx) |
277 | { |
278 | void *senddata; |
279 | int sendlen; |
280 | |
281 | if (!ctx->busy && bufchain_size(&ctx->queued_data)) { |
282 | bufchain_prefix(&ctx->queued_data, &senddata, &sendlen); |
283 | ctx->buffer = senddata; |
284 | ctx->len = sendlen; |
285 | SetEvent(ctx->ev_from_main); |
286 | ctx->busy = TRUE; |
287 | } |
288 | } |
289 | |
290 | /* ---------------------------------------------------------------------- |
291 | * Unified code handling both input and output threads. |
292 | */ |
293 | |
294 | struct handle { |
295 | int output; |
296 | union { |
297 | struct handle_generic g; |
298 | struct handle_input i; |
299 | struct handle_output o; |
300 | } u; |
301 | }; |
302 | |
303 | static tree234 *handles_by_evtomain; |
304 | |
305 | static int handle_cmp_evtomain(void *av, void *bv) |
306 | { |
307 | struct handle *a = (struct handle *)av; |
308 | struct handle *b = (struct handle *)bv; |
309 | |
310 | if ((unsigned)a->u.g.ev_to_main < (unsigned)b->u.g.ev_to_main) |
311 | return -1; |
312 | else if ((unsigned)a->u.g.ev_to_main > (unsigned)b->u.g.ev_to_main) |
313 | return +1; |
314 | else |
315 | return 0; |
316 | } |
317 | |
318 | static int handle_find_evtomain(void *av, void *bv) |
319 | { |
320 | HANDLE *a = (HANDLE *)av; |
321 | struct handle *b = (struct handle *)bv; |
322 | |
323 | if ((unsigned)*a < (unsigned)b->u.g.ev_to_main) |
324 | return -1; |
325 | else if ((unsigned)*a > (unsigned)b->u.g.ev_to_main) |
326 | return +1; |
327 | else |
328 | return 0; |
329 | } |
330 | |
0e03ceff |
331 | struct handle *handle_input_new(HANDLE handle, handle_inputfn_t gotdata, |
bdebd7e9 |
332 | void *privdata, int flags) |
34292b1d |
333 | { |
334 | struct handle *h = snew(struct handle); |
335 | |
336 | h->output = FALSE; |
337 | h->u.i.h = handle; |
338 | h->u.i.ev_to_main = CreateEvent(NULL, FALSE, FALSE, NULL); |
339 | h->u.i.ev_from_main = CreateEvent(NULL, FALSE, FALSE, NULL); |
340 | h->u.i.gotdata = gotdata; |
34292b1d |
341 | h->u.i.defunct = FALSE; |
342 | h->u.i.moribund = FALSE; |
343 | h->u.i.done = FALSE; |
0e03ceff |
344 | h->u.i.privdata = privdata; |
bdebd7e9 |
345 | h->u.i.flags = flags; |
34292b1d |
346 | |
347 | if (!handles_by_evtomain) |
348 | handles_by_evtomain = newtree234(handle_cmp_evtomain); |
349 | add234(handles_by_evtomain, h); |
350 | |
351 | CreateThread(NULL, 0, handle_input_threadfunc, |
352 | &h->u.i, 0, NULL); |
2ceabd36 |
353 | h->u.i.busy = TRUE; |
34292b1d |
354 | |
355 | return h; |
356 | } |
357 | |
0e03ceff |
358 | struct handle *handle_output_new(HANDLE handle, handle_outputfn_t sentdata, |
bdebd7e9 |
359 | void *privdata, int flags) |
34292b1d |
360 | { |
361 | struct handle *h = snew(struct handle); |
362 | |
363 | h->output = TRUE; |
364 | h->u.o.h = handle; |
365 | h->u.o.ev_to_main = CreateEvent(NULL, FALSE, FALSE, NULL); |
366 | h->u.o.ev_from_main = CreateEvent(NULL, FALSE, FALSE, NULL); |
367 | h->u.o.busy = FALSE; |
368 | h->u.o.defunct = FALSE; |
369 | h->u.o.moribund = FALSE; |
370 | h->u.o.done = FALSE; |
0e03ceff |
371 | h->u.o.privdata = privdata; |
34292b1d |
372 | bufchain_init(&h->u.o.queued_data); |
373 | h->u.o.sentdata = sentdata; |
bdebd7e9 |
374 | h->u.o.flags = flags; |
34292b1d |
375 | |
376 | if (!handles_by_evtomain) |
377 | handles_by_evtomain = newtree234(handle_cmp_evtomain); |
378 | add234(handles_by_evtomain, h); |
379 | |
380 | CreateThread(NULL, 0, handle_output_threadfunc, |
381 | &h->u.i, 0, NULL); |
382 | |
383 | return h; |
384 | } |
385 | |
386 | int handle_write(struct handle *h, const void *data, int len) |
387 | { |
388 | assert(h->output); |
389 | bufchain_add(&h->u.o.queued_data, data, len); |
390 | handle_try_output(&h->u.o); |
391 | return bufchain_size(&h->u.o.queued_data); |
392 | } |
393 | |
394 | HANDLE *handle_get_events(int *nevents) |
395 | { |
396 | HANDLE *ret; |
397 | struct handle *h; |
398 | int i, n, size; |
399 | |
400 | /* |
401 | * Go through our tree counting the handle objects currently |
402 | * engaged in useful activity. |
403 | */ |
404 | ret = NULL; |
405 | n = size = 0; |
406 | if (handles_by_evtomain) { |
407 | for (i = 0; (h = index234(handles_by_evtomain, i)) != NULL; i++) { |
408 | if (h->u.g.busy) { |
409 | if (n >= size) { |
410 | size += 32; |
411 | ret = sresize(ret, size, HANDLE); |
412 | } |
413 | ret[n++] = h->u.g.ev_to_main; |
414 | } |
415 | } |
416 | } |
417 | |
418 | *nevents = n; |
419 | return ret; |
420 | } |
421 | |
422 | static void handle_destroy(struct handle *h) |
423 | { |
424 | if (h->output) |
425 | bufchain_clear(&h->u.o.queued_data); |
426 | CloseHandle(h->u.g.ev_from_main); |
427 | CloseHandle(h->u.g.ev_to_main); |
428 | del234(handles_by_evtomain, h); |
429 | sfree(h); |
430 | } |
431 | |
432 | void handle_free(struct handle *h) |
433 | { |
434 | /* |
435 | * If the handle is currently busy, we cannot immediately free |
436 | * it. Instead we must wait until it's finished its current |
437 | * operation, because otherwise the subthread will write to |
438 | * invalid memory after we free its context from under it. |
439 | */ |
440 | assert(h && !h->u.g.moribund); |
441 | if (h->u.g.busy) { |
442 | /* |
443 | * Just set the moribund flag, which will be noticed next |
444 | * time an operation completes. |
445 | */ |
446 | h->u.g.moribund = TRUE; |
447 | } else if (h->u.g.defunct) { |
448 | /* |
449 | * There isn't even a subthread; we can go straight to |
450 | * handle_destroy. |
451 | */ |
452 | handle_destroy(h); |
453 | } else { |
454 | /* |
455 | * The subthread is alive but not busy, so we now signal it |
456 | * to die. Set the moribund flag to indicate that it will |
457 | * want destroying after that. |
458 | */ |
459 | h->u.g.moribund = TRUE; |
460 | h->u.g.done = TRUE; |
c969e831 |
461 | h->u.g.busy = TRUE; |
34292b1d |
462 | SetEvent(h->u.g.ev_from_main); |
463 | } |
464 | } |
465 | |
466 | void handle_got_event(HANDLE event) |
467 | { |
468 | struct handle *h; |
469 | |
470 | assert(handles_by_evtomain); |
471 | h = find234(handles_by_evtomain, &event, handle_find_evtomain); |
472 | if (!h) { |
473 | /* |
474 | * This isn't an error condition. If two or more event |
475 | * objects were signalled during the same select operation, |
476 | * and processing of the first caused the second handle to |
477 | * be closed, then it will sometimes happen that we receive |
478 | * an event notification here for a handle which is already |
479 | * deceased. In that situation we simply do nothing. |
480 | */ |
481 | return; |
482 | } |
483 | |
484 | if (h->u.g.moribund) { |
485 | /* |
486 | * A moribund handle is already treated as dead from the |
487 | * external user's point of view, so do nothing with the |
488 | * actual event. Just signal the thread to die if |
489 | * necessary, or destroy the handle if not. |
490 | */ |
491 | if (h->u.g.done) { |
492 | handle_destroy(h); |
493 | } else { |
494 | h->u.g.done = TRUE; |
c969e831 |
495 | h->u.g.busy = TRUE; |
34292b1d |
496 | SetEvent(h->u.g.ev_from_main); |
497 | } |
498 | return; |
499 | } |
500 | |
501 | if (!h->output) { |
502 | int backlog; |
503 | |
504 | h->u.i.busy = FALSE; |
505 | |
506 | /* |
507 | * A signal on an input handle means data has arrived. |
508 | */ |
509 | if (h->u.i.len == 0) { |
510 | /* |
511 | * EOF, or (nearly equivalently) read error. |
512 | */ |
513 | h->u.i.gotdata(h, NULL, (h->u.i.readret ? 0 : -1)); |
514 | h->u.i.defunct = TRUE; |
515 | } else { |
516 | backlog = h->u.i.gotdata(h, h->u.i.buffer, h->u.i.len); |
517 | handle_throttle(&h->u.i, backlog); |
518 | } |
519 | } else { |
520 | h->u.o.busy = FALSE; |
521 | |
522 | /* |
523 | * A signal on an output handle means we have completed a |
524 | * write. Call the callback to indicate that the output |
525 | * buffer size has decreased, or to indicate an error. |
526 | */ |
527 | if (!h->u.o.writeret) { |
528 | /* |
529 | * Write error. Send a negative value to the callback, |
530 | * and mark the thread as defunct (because the output |
531 | * thread is terminating by now). |
532 | */ |
533 | h->u.o.sentdata(h, -1); |
534 | h->u.o.defunct = TRUE; |
535 | } else { |
536 | bufchain_consume(&h->u.o.queued_data, h->u.o.lenwritten); |
537 | h->u.o.sentdata(h, bufchain_size(&h->u.o.queued_data)); |
538 | handle_try_output(&h->u.o); |
539 | } |
540 | } |
541 | } |
542 | |
543 | void handle_unthrottle(struct handle *h, int backlog) |
544 | { |
545 | assert(!h->output); |
546 | handle_throttle(&h->u.i, backlog); |
547 | } |
548 | |
549 | int handle_backlog(struct handle *h) |
550 | { |
551 | assert(h->output); |
552 | return bufchain_size(&h->u.o.queued_data); |
553 | } |
0e03ceff |
554 | |
555 | void *handle_get_privdata(struct handle *h) |
556 | { |
557 | return h->u.g.privdata; |
558 | } |