Lely core libraries 1.9.2
poll.c
Go to the documentation of this file.
1
24#include "io.h"
25#include <lely/io/poll.h>
26#include <lely/util/cmp.h>
27#include <lely/util/errnum.h>
28#include <lely/util/rbtree.h>
29#ifdef _WIN32
30#include <lely/io/sock.h>
31#else
32#include <lely/io/pipe.h>
33#endif
34#include "handle.h"
35
36#include <assert.h>
37#include <stdlib.h>
38
39#if _POSIX_C_SOURCE >= 200112L
40#if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
41#include <sys/epoll.h>
42#else
43#include <poll.h>
44#endif
45#endif
46
48struct __io_poll {
49#ifndef LELY_NO_THREADS
51 mtx_t mtx;
52#endif
54 struct rbtree tree;
55#if defined(_WIN32) || _POSIX_C_SOURCE >= 200112L
58#endif
59#if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
61 int epfd;
62#endif
63};
64
66struct io_watch {
68 struct rbnode node;
77 int keep;
78};
79
80#ifdef LELY_NO_THREADS
81#define io_poll_lock(poll)
82#define io_poll_unlock(poll)
83#else
84static void io_poll_lock(io_poll_t *poll);
85static void io_poll_unlock(io_poll_t *poll);
86#endif
87
88static struct io_watch *io_poll_insert(
89 io_poll_t *poll, struct io_handle *handle);
90static void io_poll_remove(io_poll_t *poll, struct io_watch *watch);
91
92#if _POSIX_C_SOURCE >= 200112L \
93 && !(defined(__linux__) && defined(HAVE_SYS_EPOLL_H))
94static int _poll(struct pollfd *fds, nfds_t nfds, int timeout);
95#endif
96
97void *
98__io_poll_alloc(void)
99{
100 void *ptr = malloc(sizeof(struct __io_poll));
101 if (__unlikely(!ptr))
102 set_errc(errno2c(errno));
103 return ptr;
104}
105
106void
107__io_poll_free(void *ptr)
108{
109 free(ptr);
110}
111
112struct __io_poll *
113__io_poll_init(struct __io_poll *poll)
114{
115 assert(poll);
116
117 int errc = 0;
118
119#ifndef LELY_NO_THREADS
120 mtx_init(&poll->mtx, mtx_plain);
121#endif
122
123#if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
124 // Track attributes with the I/O device handle.
125 rbtree_init(&poll->tree, ptr_cmp);
126#else
127 // Track attributes with native file descriptor.
128#ifdef _WIN32
129 rbtree_init(&poll->tree, ptr_cmp);
130#else
131 rbtree_init(&poll->tree, int_cmp);
132#endif
133#endif
134
135#if defined(_WIN32) || _POSIX_C_SOURCE >= 200112L
136 // Create a self-pipe for signal events.
137#ifdef _WIN32
138 // clang-format off
140 poll->pipe) == -1)) {
141 // clang-format on
142#else
143 if (__unlikely(io_open_pipe(poll->pipe) == -1)) {
144#endif
145 errc = get_errc();
146 goto error_open_pipe;
147 }
148
149 // Make the both ends of the self-pipe non-blocking.
150 if (__unlikely(io_set_flags(poll->pipe[0], IO_FLAG_NONBLOCK) == -1)) {
151 errc = get_errc();
152 goto error_set_flags;
153 }
154 if (__unlikely(io_set_flags(poll->pipe[1], IO_FLAG_NONBLOCK) == -1)) {
155 errc = get_errc();
156 goto error_set_flags;
157 }
158#endif
159
160#if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
161 poll->epfd = epoll_create1(EPOLL_CLOEXEC);
162 if (__unlikely(poll->epfd == -1)) {
163 errc = get_errc();
164 goto error_epoll_create1;
165 }
166
167 // Register the read end of the self-pipe with epoll.
168 struct epoll_event ev = { .events = EPOLLIN,
169 .data.ptr = poll->pipe[0] };
170 // clang-format off
171 if (__unlikely(epoll_ctl(poll->epfd, EPOLL_CTL_ADD, poll->pipe[0]->fd,
172 &ev) == -1)) {
173 // clang-format on
174 errc = get_errc();
175 goto error_epoll_ctl;
176 }
177#endif
178
179 return poll;
180
181#if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
182error_epoll_ctl:
183 close(poll->epfd);
184error_epoll_create1:
185#endif
186#if defined(_WIN32) || _POSIX_C_SOURCE >= 200112L
187error_set_flags:
188 io_close(poll->pipe[1]);
189 io_close(poll->pipe[0]);
190error_open_pipe:
191#endif
192 set_errc(errc);
193 return NULL;
194}
195
196void
197__io_poll_fini(struct __io_poll *poll)
198{
199 assert(poll);
200
201 rbtree_foreach (&poll->tree, node)
202 io_poll_remove(poll, structof(node, struct io_watch, node));
203
204#if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
205 close(poll->epfd);
206#endif
207
208#if defined(_WIN32) || _POSIX_C_SOURCE >= 200112L
209 io_close(poll->pipe[1]);
210 io_close(poll->pipe[0]);
211#endif
212
213#ifndef LELY_NO_THREADS
214 mtx_destroy(&poll->mtx);
215#endif
216}
217
218io_poll_t *
220{
221 int errc = 0;
222
223 io_poll_t *poll = __io_poll_alloc();
224 if (__unlikely(!poll)) {
225 errc = get_errc();
226 goto error_alloc_poll;
227 }
228
229 if (__unlikely(!__io_poll_init(poll))) {
230 errc = get_errc();
231 goto error_init_poll;
232 }
233
234 return poll;
235
236error_init_poll:
237 __io_poll_free(poll);
238error_alloc_poll:
239 set_errc(errc);
240 return NULL;
241}
242
243void
245{
246 if (poll) {
247 __io_poll_fini(poll);
248 __io_poll_free(poll);
249 }
250}
251
252int
253io_poll_watch(io_poll_t *poll, io_handle_t handle, struct io_event *event,
254 int keep)
255{
256 assert(poll);
257
258 if (__unlikely(!handle)) {
260 return -1;
261 }
262
263 assert(handle->vtab);
264 switch (handle->vtab->type) {
265#if defined(__linux__) && defined(HAVE_LINUX_CAN_H)
266 case IO_TYPE_CAN:
267#endif
268#if _POSIX_C_SOURCE >= 200112L
269 case IO_TYPE_FILE:
270 case IO_TYPE_PIPE:
271 case IO_TYPE_SERIAL:
272#endif
273#if defined(_WIN32) || _POSIX_C_SOURCE >= 200112L
274 case IO_TYPE_SOCK:
275#endif
276 break;
277 default: set_errnum(ERRNUM_INVAL); return -1;
278 }
279
280 int errc = 0;
281
282 io_poll_lock(poll);
283
284 // Check if the I/O device has already been registered.
285#if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
286 struct rbnode *node = rbtree_find(&poll->tree, handle);
287#else
288 struct rbnode *node = rbtree_find(&poll->tree, &handle->fd);
289#endif
290 struct io_watch *watch =
291 node ? structof(node, struct io_watch, node) : NULL;
292 // If event is not NULL, register the device or update the events being
293 // watched. If event is NULL, remove the device.
294 if (event) {
295 if (!watch) {
296 watch = io_poll_insert(poll, handle);
297 if (__unlikely(!watch)) {
298 errc = get_errc();
299 goto error_watch;
300 }
301 }
302
303 // Update the events being watched.
304 watch->event = *event;
305 watch->keep = keep;
306
307#if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
308 // Modify or add the event to the epoll instance depending on
309 // whether the file descriptor is already registered.
310 int op = node ? EPOLL_CTL_MOD : EPOLL_CTL_ADD;
311
312 struct epoll_event ev = { 0, { NULL } };
313 if (event->events & IO_EVENT_READ)
314 ev.events |= EPOLLIN | EPOLLRDHUP | EPOLLPRI;
315 if (event->events & IO_EVENT_WRITE)
316 ev.events |= EPOLLOUT;
317 ev.data.ptr = watch->handle;
318
319 // clang-format off
320 if (__unlikely(epoll_ctl(poll->epfd, op, watch->handle->fd, &ev)
321 == -1)) {
322 // clang-format on
323 errc = get_errc();
324 goto error_epoll_ctl;
325 }
326#endif
327 } else {
328 if (__unlikely(!watch)) {
329 errc = errnum2c(ERRNUM_INVAL);
330 goto error_watch;
331 }
332
333#if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
334 // Delete the event from the epoll instance.
335 epoll_ctl(poll->epfd, EPOLL_CTL_DEL, watch->handle->fd, NULL);
336#endif
337 io_poll_remove(poll, watch);
338 }
339
340 io_poll_unlock(poll);
341
342 return 0;
343
344#if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
345error_epoll_ctl:
346 epoll_ctl(poll->epfd, EPOLL_CTL_DEL, watch->handle->fd, NULL);
347#endif
348 io_poll_remove(poll, watch);
349error_watch:
350 io_poll_unlock(poll);
351 set_errc(errc);
352 return -1;
353}
354
355int
356io_poll_wait(io_poll_t *poll, int maxevents, struct io_event *events,
357 int timeout)
358{
359 assert(poll);
360
361 if (__unlikely(maxevents < 0)) {
363 return -1;
364 }
365
366 if (__unlikely(!maxevents || !events))
367 return 0;
368
369 int nevents = 0;
370#if defined(_WIN32) || _POSIX_C_SOURCE >= 200112L
371 unsigned char sig = 0;
372#endif
373
374#ifdef _WIN32
375 fd_set readfds;
376 FD_ZERO(&readfds);
377
378 int nwritefds = 0;
379 fd_set writefds;
380 FD_ZERO(&writefds);
381
382 fd_set errorfds;
383 FD_ZERO(&errorfds);
384
385 FD_SET((SOCKET)poll->pipe[0]->fd, &readfds);
386
387 io_poll_lock(poll);
388 struct rbnode *node = rbtree_first(&poll->tree);
389 while (node) {
390 struct io_watch *watch = structof(node, struct io_watch, node);
392 // Skip abandoned device handles.
393 if (io_handle_unique(watch->handle)) {
394 io_poll_remove(poll, watch);
395 continue;
396 }
397
398 SOCKET fd = (SOCKET)watch->handle->fd;
399 if (watch->event.events & IO_EVENT_READ)
400 FD_SET(fd, &readfds);
401 if (watch->event.events & IO_EVENT_WRITE) {
402 nwritefds++;
403 FD_SET(fd, &writefds);
404 }
405 FD_SET(fd, &errorfds);
406 }
407 io_poll_unlock(poll);
408
409 struct timeval tv = { .tv_sec = timeout / 1000,
410 .tv_usec = (timeout % 1000) * 1000 };
411 int result = select(0, &readfds, nwritefds ? &writefds : NULL,
412 &errorfds, timeout >= 0 ? &tv : NULL);
413 if (__unlikely(result == -1))
414 return -1;
415
416 // Check the read end of the self-pipe.
417 if (FD_ISSET((SOCKET)poll->pipe[0]->fd, &readfds))
418 sig = 1;
419
420 io_poll_lock(poll);
421 node = rbtree_first(&poll->tree);
422 while (node && nevents < maxevents) {
423 struct io_watch *watch = structof(node, struct io_watch, node);
425 // Skip abandoned device handles.
426 if (io_handle_unique(watch->handle)) {
427 io_poll_remove(poll, watch);
428 continue;
429 }
430
431 events[nevents].events = 0;
432 if (FD_ISSET((SOCKET)watch->handle->fd, &readfds)
433 && (watch->event.events & IO_EVENT_READ))
434 events[nevents].events |= IO_EVENT_READ;
435 if (FD_ISSET((SOCKET)watch->handle->fd, &writefds)
436 && (watch->event.events & IO_EVENT_WRITE))
437 events[nevents].events |= IO_EVENT_WRITE;
438 if (FD_ISSET((SOCKET)watch->handle->fd, &errorfds))
439 events[nevents].events |= IO_EVENT_ERROR;
440 // Ignore non-events.
441 if (!events[nevents].events)
442 continue;
443
444 events[nevents].u = watch->event.u;
445 nevents++;
446
447 if (!watch->keep)
448 io_poll_remove(poll, watch);
449 }
450 io_poll_unlock(poll);
451#elif _POSIX_C_SOURCE >= 200112L
452#if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
453 struct epoll_event ev[maxevents];
454 int nfds;
455 int errsv = errno;
456 do {
457 errno = errsv;
458 nfds = epoll_wait(poll->epfd, ev, maxevents,
459 timeout >= 0 ? timeout : -1);
460 } while (__unlikely(nfds == -1 && errno == EINTR));
461 if (__unlikely(nfds == -1))
462 return -1;
463
464 io_poll_lock(poll);
465 for (int i = 0; i < nfds; i++) {
466 // Ignore signal events; they are handled below.
467 if (ev[i].data.ptr == poll->pipe[0]) {
468 sig = 1;
469 continue;
470 }
471
472 struct rbnode *node = rbtree_find(&poll->tree, ev[i].data.ptr);
473 if (__unlikely(!node))
474 continue;
475 struct io_watch *watch = structof(node, struct io_watch, node);
476
477 if (!io_handle_unique(watch->handle)) {
478 events[nevents].events = 0;
479 // We consider hang up and high-priority (OOB) data an
480 // error.
481 // clang-format off
482 if (ev[i].events & (EPOLLRDHUP | EPOLLPRI | EPOLLERR
483 | EPOLLHUP))
484 // clang-format on
485 events[nevents].events |= IO_EVENT_ERROR;
486 if (ev[i].events & EPOLLIN)
487 events[nevents].events |= IO_EVENT_READ;
488 if (ev[i].events & EPOLLOUT)
489 events[nevents].events |= IO_EVENT_WRITE;
490 events[nevents].u = watch->event.u;
491 nevents++;
492 }
493
494 if (io_handle_unique(watch->handle) || !watch->keep) {
495 epoll_ctl(poll->epfd, EPOLL_CTL_DEL, watch->handle->fd,
496 NULL);
497 io_poll_remove(poll, watch);
498 }
499 }
500 io_poll_unlock(poll);
501#else
502 io_poll_lock(poll);
503 struct pollfd fds[rbtree_size(&poll->tree) + 1];
504 nfds_t nfds = 0;
505 // Watch the read end of the self-pipe.
506 fds[nfds].fd = poll->pipe[0]->fd;
507 fds[nfds].events = POLLIN;
508 nfds++;
509 struct rbnode *node = rbtree_first(&poll->tree);
510 while (node) {
511 struct io_watch *watch = structof(node, struct io_watch, node);
513 // Skip abandoned device handles.
514 if (io_handle_unique(watch->handle)) {
515 io_poll_remove(poll, watch);
516 continue;
517 }
518
519 fds[nfds].fd = watch->handle->fd;
520 fds[nfds].events = 0;
521 if (watch->event.events & IO_EVENT_READ)
522 fds[nfds].events |= POLLIN | POLLPRI;
523 if (watch->event.events & IO_EVENT_WRITE)
524 fds[nfds].events |= POLLOUT;
525 nfds++;
526 }
527 io_poll_unlock(poll);
528
529 int n;
530 int errsv = errno;
531 do {
532 errno = errsv;
533 n = _poll(fds, nfds, timeout >= 0 ? timeout : -1);
534 } while (__unlikely(n == -1 && errno == EINTR));
535 if (__unlikely(n == -1))
536 return -1;
537 maxevents = MIN(n, maxevents);
538
539 io_poll_lock(poll);
540 for (nfds_t nfd = 0; nfd < nfds && nevents < maxevents; nfd++) {
541 // Ignore signal events; they are handled below.
542 if (fds[nfd].fd == poll->pipe[0]->fd) {
543 sig = 1;
544 continue;
545 }
546
547 events[nevents].events = 0;
548 // We consider hang up and high-priority (OOB) data an error.
549 if (fds[nfd].revents & (POLLPRI | POLLERR | POLLHUP | POLLNVAL))
550 events[nevents].events |= IO_EVENT_ERROR;
551 // We don't distinguish between normal and high-priority data.
552 if (fds[nfd].revents & POLLIN)
553 events[nevents].events |= IO_EVENT_READ;
554 if (fds[nfd].revents & POLLOUT)
555 events[nevents].events |= IO_EVENT_WRITE;
556 // Ignore non-events.
557 if (!events[nevents].events)
558 continue;
559
560 struct rbnode *node = rbtree_find(&poll->tree, &fds[nfd].fd);
561 if (__unlikely(!node))
562 continue;
563 struct io_watch *watch = structof(node, struct io_watch, node);
564
565 if (!io_handle_unique(watch->handle)) {
566 events[nevents].u = watch->event.u;
567 nevents++;
568 }
569
570 if (io_handle_unique(watch->handle) || !watch->keep)
571 io_poll_remove(poll, watch);
572 }
573 io_poll_unlock(poll);
574#endif // __linux__ && HAVE_SYS_EPOLL_H
575#else
576 (void)timeout;
577#endif // _WIN32
578
579#if defined(_WIN32) || _POSIX_C_SOURCE >= 200112L
580 if (sig) {
581 // If one or more signals were received, generate the
582 // corresponding events.
583 while (nevents < maxevents
584 && io_read(poll->pipe[0], &sig, 1) == 1) {
585 events[nevents].events = IO_EVENT_SIGNAL;
586 events[nevents].u.sig = sig;
587 nevents++;
588 }
589 }
590#endif
591
592 return nevents;
593}
594
595#if defined(_WIN32) || _POSIX_C_SOURCE >= 200112L
596int
597io_poll_signal(io_poll_t *poll, unsigned char sig)
598{
599 assert(poll);
600
601 return io_write(poll->pipe[1], &sig, 1) == 1 ? 0 : -1;
602}
603#endif
604
605#ifndef LELY_NO_THREADS
606
607static void
608io_poll_lock(io_poll_t *poll)
609{
610 assert(poll);
611
612 mtx_lock(&poll->mtx);
613}
614
615static void
616io_poll_unlock(io_poll_t *poll)
617{
618 assert(poll);
619
620 mtx_unlock(&poll->mtx);
621}
622
623#endif // !LELY_NO_THREADS
624
625static struct io_watch *
626io_poll_insert(io_poll_t *poll, struct io_handle *handle)
627{
628 assert(poll);
629 assert(handle);
630
631 struct io_watch *watch = malloc(sizeof(*watch));
632 if (__unlikely(!watch))
633 return NULL;
634
636#if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
637 watch->node.key = watch->handle;
638#else
639 watch->node.key = &watch->handle->fd;
640#endif
641 rbtree_insert(&poll->tree, &watch->node);
642
643 return watch;
644}
645
646static void
647io_poll_remove(io_poll_t *poll, struct io_watch *watch)
648{
649 assert(poll);
650 assert(watch);
651
652 struct io_handle *handle = watch->handle;
653 rbtree_remove(&poll->tree, &watch->node);
654 free(watch);
655 io_handle_release(handle);
656}
657
658#if _POSIX_C_SOURCE >= 200112L \
659 && !(defined(__linux__) && defined(HAVE_SYS_EPOLL_H))
660static int
661_poll(struct pollfd *fds, nfds_t nfds, int timeout)
662{
663 return poll(fds, nfds, timeout);
664}
665#endif
This header file is part of the utilities library; it contains the comparison function definitions.
This header file is part of the utilities library; it contains the native and platform-independent er...
int errnum2c(errnum_t errnum)
Transforms a platform-independent error number to a native error code.
Definition: errnum.c:825
@ ERRNUM_BADF
Bad file descriptor.
Definition: errnum.h:90
@ ERRNUM_INVAL
Invalid argument.
Definition: errnum.h:129
int get_errc(void)
Returns the last (thread-specific) native error code set by a system call or library function.
Definition: errnum.c:947
void set_errc(int errc)
Sets the current (thread-specific) native error code to errc.
Definition: errnum.c:957
int errno2c(int errnum)
Transforms a standard C error number to a native error code.
Definition: errnum.c:43
void set_errnum(errnum_t errnum)
Sets the current (thread-specific) platform-independent error number to errnum.
Definition: errnum.h:375
#define __unlikely(x)
Indicates to the compiler that the expression is most-likely false.
Definition: features.h:286
This is the internal header file of the I/O handle declarations.
@ IO_TYPE_PIPE
A pipe.
Definition: io.h:51
@ IO_TYPE_FILE
A regular file.
Definition: io.h:49
@ IO_TYPE_SERIAL
A serial I/O device.
Definition: io.h:53
@ IO_TYPE_CAN
A CAN device.
Definition: io.h:47
@ IO_TYPE_SOCK
A network socket.
Definition: io.h:55
int io_close(io_handle_t handle)
Closes an I/O device.
Definition: io.c:74
ssize_t io_read(io_handle_t handle, void *buf, size_t nbytes)
Performs a read operation.
Definition: io.c:150
io_handle_t io_handle_acquire(io_handle_t handle)
Increments the reference count of an I/O device handle.
Definition: handle.c:32
int io_handle_unique(io_handle_t handle)
Returns 1 if there is only a single reference to the specified I/O device handle, and 0 otherwise.
Definition: handle.c:67
int io_set_flags(io_handle_t handle, int flags)
Sets the flags of an I/O device.
Definition: io.c:124
ssize_t io_write(io_handle_t handle, const void *buf, size_t nbytes)
Performs a write operation.
Definition: io.c:167
@ IO_FLAG_NONBLOCK
Perform I/O operations in non-blocking mode.
Definition: io.h:62
void io_handle_release(io_handle_t handle)
Decrements the reference count of an I/O device handle.
Definition: handle.c:47
#define structof(ptr, type, member)
Obtains the address of a structure from the address of one of its members.
Definition: util.h:93
#define MIN(a, b)
Returns the minimum of a and b.
Definition: util.h:57
This header file is part of the I/O library; it contains the pipe declarations.
int io_open_pipe(io_handle_t handle_vector[2])
Opens a pipe.
Definition: pipe.c:50
int io_poll_watch(io_poll_t *poll, io_handle_t handle, struct io_event *event, int keep)
Registers an I/O device with an I/O polling interface and instructs it to watch for certain events.
Definition: poll.c:253
void io_poll_destroy(io_poll_t *poll)
Destroys an I/O polling interface.
Definition: poll.c:244
io_poll_t * io_poll_create(void)
Creates a new I/O polling interface.
Definition: poll.c:219
int io_poll_signal(io_poll_t *poll, unsigned char sig)
Generates a signal event.
Definition: poll.c:597
int io_poll_wait(io_poll_t *poll, int maxevents, struct io_event *events, int timeout)
Waits at most timeout milliseconds for at most maxevents I/O events to occur for any of the I/O devic...
Definition: poll.c:356
This header file is part of the I/O library; it contains I/O polling interface declarations.
@ IO_EVENT_READ
An event signaling that a file descriptor is ready for reading normal-priority (non-OOB) data.
Definition: poll.h:41
@ IO_EVENT_WRITE
An event signaling that a file descriptor is ready for writing normal-priority (non-OOB) data.
Definition: poll.h:46
@ IO_EVENT_SIGNAL
An event representing the occurrence of a signal.
Definition: poll.h:29
@ IO_EVENT_ERROR
An event signaling that an error has occurred for a file descriptor.
Definition: poll.h:36
This header file is part of the utilities library; it contains the red-black tree declarations.
void rbtree_insert(struct rbtree *tree, struct rbnode *node)
Inserts a node into a red-black tree.
Definition: rbtree.c:108
struct rbnode * rbtree_find(const struct rbtree *tree, const void *key)
Finds a node in a red-black tree.
Definition: rbtree.c:308
struct rbnode * rbtree_first(const struct rbtree *tree)
Returns a pointer to the first (leftmost) node in a red-black tree.
Definition: rbtree.c:324
struct rbnode * rbnode_next(const struct rbnode *node)
Returns a pointer to the next (in-order) node in a red-black tree with respect to node.
Definition: rbtree.c:91
void rbtree_init(struct rbtree *tree, rbtree_cmp_t *cmp)
Initializes a red-black tree.
Definition: rbtree.h:238
size_t rbtree_size(const struct rbtree *tree)
Returns the size (in number of nodes) of a red-black tree.
Definition: rbtree.h:252
void rbtree_remove(struct rbtree *tree, struct rbnode *node)
Removes a node from a red-black tree.
Definition: rbtree.c:188
#define rbtree_foreach(tree, node)
Iterates over each node in a red-black tree in ascending order.
Definition: rbtree.h:226
This header file is part of the I/O library; it contains the network socket declarations.
@ IO_SOCK_STREAM
A stream-oriented connection-mode socket type.
Definition: sock.h:43
int io_open_socketpair(int domain, int type, io_handle_t handle_vector[2])
Opens a pair of connected sockets.
Definition: sock.c:181
@ IO_SOCK_IPV4
An IPv4 socket.
Definition: sock.h:31
This is the internal header file of the I/O library.
This header file is part of the C11 and POSIX compatibility library; it includes <stdlib....
An I/O polling interface.
Definition: poll.c:48
io_handle_t pipe[2]
A self-pipe used to generate signal events.
Definition: poll.c:57
struct rbtree tree
The tree containing the I/O device handles being watched.
Definition: poll.c:54
mtx_t mtx
The mutex protecting tree.
Definition: poll.c:51
int epfd
The epoll file descriptor.
Definition: poll.c:61
An I/O event.
Definition: poll.h:50
unsigned char sig
The signal number (if events == IO_EVENT_SIGNAL).
Definition: poll.h:60
int events
The events that should be watched or have been triggered (either IO_EVENT_SIGNAL, or any combination ...
Definition: poll.h:56
union io_event::@10 u
Signal attributes depending on the value of events.
int type
The type of the device (one of IO_TYPE_CAN, IO_TYPE_FILE, IO_TYPE_PIPE, IO_TYPE_SERIAL or IO_TYPE_SOC...
Definition: handle.h:79
An I/O device handle.
Definition: handle.h:41
int fd
The native file descriptor.
Definition: handle.h:56
const struct io_handle_vtab * vtab
A pointer to the virtual table.
Definition: handle.h:43
The attributes of an I/O device handle being watched.
Definition: poll.c:66
struct rbnode node
The node in the tree of file descriptors.
Definition: poll.c:68
struct io_handle * handle
A pointer to the I/O device handle.
Definition: poll.c:70
struct io_event event
The events being watched.
Definition: poll.c:72
int keep
A flag indicating whether to keep watching the file descriptor after an event occurs.
Definition: poll.c:77
A node in a red-black tree.
Definition: rbtree.h:52
const void * key
A pointer to the key for this node.
Definition: rbtree.h:58
A red-black tree.
Definition: rbtree.h:90
int mtx_init(mtx_t *mtx, int type)
Creates a mutex object with properties indicated by type, which must have one of the four values:
int mtx_lock(mtx_t *mtx)
Blocks until it locks the mutex at mtx.
@ mtx_plain
A mutex type that supports neither timeout nor test and return.
Definition: threads.h:109
int mtx_unlock(mtx_t *mtx)
Unlocks the mutex at mtx.
void mtx_destroy(mtx_t *mtx)
Releases any resources used by the mutex at mtx.