diff options
-rw-r--r-- | Changes | 4 | ||||
-rw-r--r-- | ev.c | 13 | ||||
-rw-r--r-- | ev.h | 10 | ||||
-rw-r--r-- | ev.pod | 10 | ||||
-rw-r--r-- | ev_epoll.c | 6 | ||||
-rw-r--r-- | ev_kqueue.c | 2 | ||||
-rw-r--r-- | ev_poll.c | 6 | ||||
-rw-r--r-- | ev_port.c | 2 | ||||
-rw-r--r-- | ev_select.c | 2 | ||||
-rw-r--r-- | ev_vars.h | 5 | ||||
-rw-r--r-- | ev_wrap.h | 8 |
11 files changed, 52 insertions, 16 deletions
@@ -2,6 +2,7 @@ Revision history for libev, a high-performance and full-featured event loop. TODO: ev_walk TODO: remain +TODO: on_call_pending, on_suspend_resume ev_invoke_pending (EV_P) - ev_unloop and ev_loop wrongly used a global variable to exit loops, instead of using a per-loop variable (bug caught by accident...). - Denis F. Latypoff corrected many typos in example code snippets. @@ -11,7 +12,8 @@ TODO: remain use it (reported by ry@tinyclouds). - use GetSystemTimeAsFileTime instead of _timeb on windows, for slightly higher accuracy. - - properly declare ev_loop_verify even when !EV_MULTIPLICITY. + - properly declare ev_loop_verify and ev_now_update even when + !EV_MULTIPLICITY. - implement ev_loop_depth. - do not compile in any priority code when EV_MAXPRI == EV_MINPRI. @@ -1413,6 +1413,7 @@ loop_init (EV_P_ unsigned int flags) mn_now = get_clock (); now_floor = mn_now; rtmn_diff = ev_rt_now - mn_now; + invoke_cb = ev_invoke_pending; io_blocktime = 0.; timeout_blocktime = 0.; @@ -1787,8 +1788,8 @@ ev_invoke (EV_P_ void *w, int revents) EV_CB_INVOKE ((W)w, revents); } -inline_speed void -call_pending (EV_P) +void +ev_invoke_pending (EV_P) { int pri; @@ -2040,7 +2041,7 @@ ev_loop (EV_P_ int flags) loop_done = EVUNLOOP_CANCEL; - call_pending (EV_A); /* in case we recurse, ensure ordering stays nice and clean */ + invoke_cb (EV_A); /* in case we recurse, ensure ordering stays nice and clean */ do { @@ -2063,7 +2064,7 @@ ev_loop (EV_P_ int flags) if (forkcnt) { queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK); - call_pending (EV_A); + invoke_cb (EV_A); } #endif @@ -2071,7 +2072,7 @@ ev_loop (EV_P_ int flags) if (expect_false (preparecnt)) { queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE); - call_pending (EV_A); + invoke_cb (EV_A); } /* we might have forked, so reify kernel state if necessary */ @@ -2152,7 +2153,7 @@ ev_loop (EV_P_ int flags) if (expect_false (checkcnt)) queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK); - call_pending (EV_A); + invoke_cb (EV_A); } while (expect_true ( activecnt @@ -469,7 +469,6 @@ void ev_loop_destroy (EV_P); void ev_loop_fork (EV_P); ev_tstamp ev_now (EV_P); /* time w.r.t. timers and the eventloop, updated after each poll */ -void ev_now_update (EV_P); #else @@ -503,11 +502,14 @@ void ev_default_destroy (void); /* destroy the default loop */ /* you can actually call it at any time, anywhere :) */ void ev_default_fork (void); -unsigned int ev_backend (EV_P); /* backend in use by loop */ -unsigned int ev_loop_count (EV_P); /* number of loop iterations */ -unsigned int ev_loop_depth (EV_P); /* #ev_loop enters - #ev_loop leaves */ +unsigned int ev_backend (EV_P); /* backend in use by loop */ +unsigned int ev_loop_count (EV_P); /* number of loop iterations */ +unsigned int ev_loop_depth (EV_P); /* #ev_loop enters - #ev_loop leaves */ void ev_loop_verify (EV_P); /* abort if loop data corrupted */ +void ev_now_update (EV_P); /* update event loop time */ +void ev_invoke_pending (EV_P); /* invoke all pending watchers */ + #if EV_WALK_ENABLE /* walk (almost) all watchers in the loop of a given type, invoking the */ /* callback on every such watcher. The callback might stop the watcher, */ @@ -3673,9 +3673,13 @@ defined to be C<0>, then they are not. =item EV_MINIMAL If you need to shave off some kilobytes of code at the expense of some -speed, define this symbol to C<1>. Currently this is used to override some -inlining decisions, saves roughly 30% code size on amd64. It also selects a -much smaller 2-heap for timer management over the default 4-heap. +speed (but with the full API), define this symbol to C<1>. Currently this +is used to override some inlining decisions, saves roughly 30% code size +on amd64. It also selects a much smaller 2-heap for timer management over +the default 4-heap. + +You can save even more by disabling watcher types you do not need and +setting C<EV_MAXPRI> == C<EV_MINPRI>. =item EV_PID_HASHSIZE @@ -119,7 +119,11 @@ static void epoll_poll (EV_P_ ev_tstamp timeout) { int i; - int eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, (int)ceil (timeout * 1000.)); + int eventcnt; + + if (expect_false (suspend_cb)) suspend_cb (EV_A); + eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, (int)ceil (timeout * 1000.)); + if (expect_false (resume_cb)) resume_cb (EV_A); if (expect_false (eventcnt < 0)) { diff --git a/ev_kqueue.c b/ev_kqueue.c index 63828d6..3a4824f 100644 --- a/ev_kqueue.c +++ b/ev_kqueue.c @@ -93,9 +93,11 @@ kqueue_poll (EV_P_ ev_tstamp timeout) kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); } + if (expect_false (suspend_cb)) suspend_cb (EV_A); ts.tv_sec = (time_t)timeout; ts.tv_nsec = (long)((timeout - (ev_tstamp)ts.tv_sec) * 1e9); res = kevent (backend_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts); + if (expect_false (resume_cb)) resume_cb (EV_A); kqueue_changecnt = 0; if (expect_false (res < 0)) @@ -89,7 +89,11 @@ static void poll_poll (EV_P_ ev_tstamp timeout) { struct pollfd *p; - int res = poll (polls, pollcnt, (int)ceil (timeout * 1000.)); + int res; + + if (expect_false (suspend_cb)) suspend_cb (EV_A); + res = poll (polls, pollcnt, (int)ceil (timeout * 1000.)); + if (expect_false (resume_cb)) resume_cb (EV_A); if (expect_false (res < 0)) { @@ -85,9 +85,11 @@ port_poll (EV_P_ ev_tstamp timeout) struct timespec ts; uint_t nget = 1; + if (expect_false (suspend_cb)) suspend_cb (EV_A); ts.tv_sec = (time_t)timeout; ts.tv_nsec = (long)(timeout - (ev_tstamp)ts.tv_sec) * 1e9; res = port_getn (backend_fd, port_events, port_eventmax, &nget, &ts); + if (expect_false (resume_cb)) resume_cb (EV_A); if (res == -1) { diff --git a/ev_select.c b/ev_select.c index 173c286..ffb4d30 100644 --- a/ev_select.c +++ b/ev_select.c @@ -140,6 +140,7 @@ select_poll (EV_P_ ev_tstamp timeout) int res; int fd_setsize; + if (expect_false (suspend_cb)) suspend_cb (EV_A); tv.tv_sec = (long)timeout; tv.tv_usec = (long)((timeout - (ev_tstamp)tv.tv_sec) * 1e6); @@ -166,6 +167,7 @@ select_poll (EV_P_ ev_tstamp timeout) #else res = select (vec_max * NFDBITS, (fd_set *)vec_ro, (fd_set *)vec_wo, 0, &tv); #endif + if (expect_false (resume_cb)) resume_cb (EV_A); if (expect_false (res < 0)) { @@ -168,5 +168,10 @@ VARx(char, fs_2625) /* whether we are running in linux 2.6.25 or newer */ VAR (fs_hash, ANFS fs_hash [EV_INOTIFY_HASHSIZE]) #endif +VARx(void *, userdata) +VAR (suspend_cb, void (*suspend_cb)(EV_P)) +VAR (resume_cb , void (*resume_cb) (EV_P)) +VAR (invoke_cb , void (*invoke_cb) (EV_P)) + #undef VARx @@ -79,6 +79,10 @@ #define fs_w ((loop)->fs_w) #define fs_2625 ((loop)->fs_2625) #define fs_hash ((loop)->fs_hash) +#define userdata ((loop)->userdata) +#define suspend_cb ((loop)->suspend_cb) +#define resume_cb ((loop)->resume_cb) +#define invoke_cb ((loop)->invoke_cb) #else #undef EV_WRAP_H #undef now_floor @@ -159,4 +163,8 @@ #undef fs_w #undef fs_2625 #undef fs_hash +#undef userdata +#undef suspend_cb +#undef resume_cb +#undef invoke_cb #endif |