From db2ba1d67df543c8e0dbfc578005b065983bdc94 Mon Sep 17 00:00:00 2001 From: root Date: Sat, 22 Dec 2007 05:47:56 +0000 Subject: *** empty log message *** --- Symbols.ev | 3 +++ ev.3 | 48 +++++++++++++++++++++++++++++++-- ev.c | 91 +++++++++++++++++++++++++++++++++++++++++++++++++++++++------- ev.h | 8 ++++-- ev.pod | 45 +++++++++++++++++++++++++++++++ ev_vars.h | 6 ++++- ev_wrap.h | 8 ++++-- libev.m4 | 8 ++++++ 8 files changed, 200 insertions(+), 17 deletions(-) diff --git a/Symbols.ev b/Symbols.ev index 501722b..d95926a 100644 --- a/Symbols.ev +++ b/Symbols.ev @@ -37,9 +37,12 @@ ev_prepare_stop ev_recommended_backends ev_ref ev_set_allocator +ev_set_io_collect_interval ev_set_syserr_cb +ev_set_timeout_collect_interval ev_signal_start ev_signal_stop +ev_sleep ev_stat_start ev_stat_stat ev_stat_stop diff --git a/ev.3 b/ev.3 index 371d846..7689fc0 100644 --- a/ev.3 +++ b/ev.3 @@ -129,7 +129,7 @@ .\" ======================================================================== .\" .IX Title "EV 1" -.TH EV 1 "2007-12-21" "perl v5.8.8" "User Contributed Perl Documentation" +.TH EV 1 "2007-12-22" "perl v5.8.8" "User Contributed Perl Documentation" .SH "NAME" libev \- a high performance full\-featured event loop written in C .SH "SYNOPSIS" @@ -257,6 +257,11 @@ library in any way. Returns the current time as libev would use it. Please note that the \&\f(CW\*(C`ev_now\*(C'\fR function is usually faster and also often returns the timestamp you actually want to know. +.IP "void ev_sleep (ev_tstamp interval)" 4 +.IX Item "void ev_sleep (ev_tstamp interval)" +Sleep for the given interval: The current thread will be blocked until +either it is interrupted or the given time interval has passed. Basically +this is a subsecond-resolution \f(CW\*(C`sleep ()\*(C'\fR. .IP "int ev_version_major ()" 4 .IX Item "int ev_version_major ()" .PD 0 @@ -465,7 +470,7 @@ but it scales phenomenally better. While poll and select usually scale like O(total_fds) where n is the total number of fds (or the highest fd), epoll scales either O(1) or O(active_fds). The epoll design has a number of shortcomings, such as silently dropping events in some hard-to-detect -cases and rewuiring a syscall per fd change, no fork support and bad +cases and rewiring a syscall per fd change, no fork support and bad support for dup: .Sp While stopping, setting and starting an I/O watcher in the same iteration @@ -726,6 +731,41 @@ Example: For some weird reason, unregister the above signal handler again. \& ev_ref (loop); \& ev_signal_stop (loop, &exitsig); .Ve +.IP "ev_set_io_collect_interval (ev_tstamp interval)" 4 +.IX Item "ev_set_io_collect_interval (ev_tstamp interval)" +.PD 0 +.IP "ev_set_timeout_collect_interval (ev_tstamp interval)" 4 +.IX Item "ev_set_timeout_collect_interval (ev_tstamp interval)" +.PD +These advanced functions influence the time that libev will spend waiting +for events. Both are by default \f(CW0\fR, meaning that libev will try to +invoke timer/periodic callbacks and I/O callbacks with minimum latency. +.Sp +Setting these to a higher value (the \f(CW\*(C`interval\*(C'\fR \fImust\fR be >= \f(CW0\fR) +allows libev to delay invocation of I/O and timer/periodic callbacks to +increase efficiency of loop iterations. +.Sp +The background is that sometimes your program runs just fast enough to +handle one (or very few) event(s) per loop iteration. While this makes +the program responsive, it also wastes a lot of \s-1CPU\s0 time to poll for new +events, especially with backends like \f(CW\*(C`select ()\*(C'\fR which have a high +overhead for the actual polling but can deliver many events at once. +.Sp +By setting a higher \fIio collect interval\fR you allow libev to spend more +time collecting I/O events, so you can handle more events per iteration, +at the cost of increasing latency. Timeouts (both \f(CW\*(C`ev_periodic\*(C'\fR and +\&\f(CW\*(C`ev_timer\*(C'\fR) will be not affected. +.Sp +Likewise, by setting a higher \fItimeout collect interval\fR you allow libev +to spend more time collecting timeouts, at the expense of increased +latency (the watcher callback will be called later). \f(CW\*(C`ev_io\*(C'\fR watchers +will not be affected. +.Sp +Many programs can usually benefit by setting the io collect interval to +a value near \f(CW0.1\fR or so, which is often enough for interactive servers +(of course not for games), likewise for timeouts. It usually doesn't make +much sense to set it to a lower value than \f(CW0.01\fR, as this approsaches +the timing granularity of most systems. .SH "ANATOMY OF A WATCHER" .IX Header "ANATOMY OF A WATCHER" A watcher is a structure that you create and register to record your @@ -2503,6 +2543,10 @@ runtime if successful). Otherwise no use of the realtime clock option will be attempted. This effectively replaces \f(CW\*(C`gettimeofday\*(C'\fR by \f(CW\*(C`clock_get (CLOCK_REALTIME, ...)\*(C'\fR and will not normally affect correctness. See the note about libraries in the description of \f(CW\*(C`EV_USE_MONOTONIC\*(C'\fR, though. +.IP "\s-1EV_USE_NANOSLEEP\s0" 4 +.IX Item "EV_USE_NANOSLEEP" +If defined to be \f(CW1\fR, libev will assume that \f(CW\*(C`nanosleep ()\*(C'\fR is available +and will use it for delays. Otherwise it will use \f(CW\*(C`select ()\*(C'\fR. .IP "\s-1EV_USE_SELECT\s0" 4 .IX Item "EV_USE_SELECT" If undefined or defined to be \f(CW1\fR, libev will compile in support for the diff --git a/ev.c b/ev.c index 3f9fe2b..14213df 100644 --- a/ev.c +++ b/ev.c @@ -56,6 +56,14 @@ extern "C" { # endif # endif +# ifndef EV_USE_NANOSLEEP +# if HAVE_NANOSLEEP +# define EV_USE_NANOSLEEP 1 +# else +# define EV_USE_NANOSLEEP 0 +# endif +# endif + # ifndef EV_USE_SELECT # if HAVE_SELECT && HAVE_SYS_SELECT_H # define EV_USE_SELECT 1 @@ -148,6 +156,10 @@ extern "C" { # define EV_USE_REALTIME 0 #endif +#ifndef EV_USE_NANOSLEEP +# define EV_USE_NANOSLEEP 0 +#endif + #ifndef EV_USE_SELECT # define EV_USE_SELECT 1 #endif @@ -209,6 +221,12 @@ extern "C" { # define EV_USE_INOTIFY 0 #endif +#if !EV_USE_NANOSLEEP +# ifndef _WIN32 +# include +# endif +#endif + #if EV_USE_INOTIFY # include #endif @@ -410,6 +428,33 @@ ev_now (EV_P) } #endif +void +ev_sleep (ev_tstamp delay) +{ + if (delay > 0.) + { +#if EV_USE_NANOSLEEP + struct timespec ts; + + ts.tv_sec = (time_t)delay; + ts.tv_nsec = (long)((delay - (ev_tstamp)(ts.tv_sec)) * 1e9); + + nanosleep (&ts, 0); +#elif defined(_WIN32) + Sleep (delay * 1e3); +#else + struct timeval tv; + + tv.tv_sec = (time_t)delay; + tv.tv_usec = (long)((delay - (ev_tstamp)(tv.tv_sec)) * 1e6); + + select (0, 0, 0, 0, &tv); +#endif + } +} + +/*****************************************************************************/ + int inline_size array_nextsize (int elem, int cur, int cnt) { @@ -944,6 +989,18 @@ ev_loop_count (EV_P) return loop_count; } +void +ev_set_io_collect_interval (EV_P_ ev_tstamp interval) +{ + io_blocktime = interval; +} + +void +ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval) +{ + timeout_blocktime = interval; +} + static void noinline loop_init (EV_P_ unsigned int flags) { @@ -962,6 +1019,9 @@ loop_init (EV_P_ unsigned int flags) now_floor = mn_now; rtmn_diff = ev_rt_now - mn_now; + io_blocktime = 0.; + timeout_blocktime = 0.; + /* pid check not overridable via env */ #ifndef _WIN32 if (flags & EVFLAG_FORKCHECK) @@ -1458,39 +1518,50 @@ ev_loop (EV_P_ int flags) /* calculate blocking time */ { - ev_tstamp block; + ev_tstamp waittime = 0.; + ev_tstamp sleeptime = 0.; - if (expect_false (flags & EVLOOP_NONBLOCK || idleall || !activecnt)) - block = 0.; /* do not block at all */ - else + if (expect_true (!(flags & EVLOOP_NONBLOCK || idleall || !activecnt))) { /* update time to cancel out callback processing overhead */ time_update (EV_A_ 1e100); - block = MAX_BLOCKTIME; + waittime = MAX_BLOCKTIME; if (timercnt) { ev_tstamp to = ((WT)timers [0])->at - mn_now + backend_fudge; - if (block > to) block = to; + if (waittime > to) waittime = to; } #if EV_PERIODIC_ENABLE if (periodiccnt) { ev_tstamp to = ((WT)periodics [0])->at - ev_rt_now + backend_fudge; - if (block > to) block = to; + if (waittime > to) waittime = to; } #endif - if (expect_false (block < 0.)) block = 0.; + if (expect_false (waittime < timeout_blocktime)) + waittime = timeout_blocktime; + + sleeptime = waittime - backend_fudge; + + if (expect_true (sleeptime > io_blocktime)) + sleeptime = io_blocktime; + + if (sleeptime) + { + ev_sleep (sleeptime); + waittime -= sleeptime; + } } ++loop_count; - backend_poll (EV_A_ block); + backend_poll (EV_A_ waittime); /* update ev_rt_now, do magic */ - time_update (EV_A_ block); + time_update (EV_A_ waittime + sleeptime); } /* queue pending timers and reschedule them */ diff --git a/ev.h b/ev.h index 2b59019..f28d981 100644 --- a/ev.h +++ b/ev.h @@ -346,6 +346,7 @@ unsigned int ev_recommended_backends (void); unsigned int ev_embeddable_backends (void); ev_tstamp ev_time (void); +void ev_sleep (ev_tstamp delay); /* sleep for a while */ /* Sets the allocation function to use, works like realloc. * It is used to allocate and free memory. @@ -403,8 +404,8 @@ void ev_default_destroy (void); /* destroy the default loop */ /* you can actually call it at any time, anywhere :) */ void ev_default_fork (void); -unsigned int ev_backend (EV_P); -unsigned int ev_loop_count (EV_P); +unsigned int ev_backend (EV_P); /* backend in use by loop */ +unsigned int ev_loop_count (EV_P); /* number of loop iterations */ #endif #define EVLOOP_NONBLOCK 1 /* do not block/wait */ @@ -417,6 +418,9 @@ unsigned int ev_loop_count (EV_P); void ev_loop (EV_P_ int flags); void ev_unloop (EV_P_ int how); /* set to 1 to break out of event loop, set to 2 to break out of all event loops */ +void ev_set_io_collect_interval (EV_P_ ev_tstamp interval); /* sleep at least this time, default 0 */ +void ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval); /* sleep at least this time, default 0 */ + /* * ref/unref can be used to add or remove a refcount on the mainloop. every watcher * keeps one reference. if you have a long-runing watcher you never unregister that diff --git a/ev.pod b/ev.pod index 1a53dca..d1d8348 100644 --- a/ev.pod +++ b/ev.pod @@ -117,6 +117,12 @@ Returns the current time as libev would use it. Please note that the C function is usually faster and also often returns the timestamp you actually want to know. +=item ev_sleep (ev_tstamp interval) + +Sleep for the given interval: The current thread will be blocked until +either it is interrupted or the given time interval has passed. Basically +this is a subsecond-resolution C. + =item int ev_version_major () =item int ev_version_minor () @@ -571,6 +577,40 @@ Example: For some weird reason, unregister the above signal handler again. ev_ref (loop); ev_signal_stop (loop, &exitsig); +=item ev_set_io_collect_interval (loop, ev_tstamp interval) + +=item ev_set_timeout_collect_interval (loop, ev_tstamp interval) + +These advanced functions influence the time that libev will spend waiting +for events. Both are by default C<0>, meaning that libev will try to +invoke timer/periodic callbacks and I/O callbacks with minimum latency. + +Setting these to a higher value (the C I be >= C<0>) +allows libev to delay invocation of I/O and timer/periodic callbacks to +increase efficiency of loop iterations. + +The background is that sometimes your program runs just fast enough to +handle one (or very few) event(s) per loop iteration. While this makes +the program responsive, it also wastes a lot of CPU time to poll for new +events, especially with backends like C. + =item EV_USE_SELECT If undefined or defined to be C<1>, libev will compile in support for the diff --git a/ev_vars.h b/ev_vars.h index 36e7485..d653213 100644 --- a/ev_vars.h +++ b/ev_vars.h @@ -3,14 +3,18 @@ VARx(ev_tstamp, now_floor) /* last time we refreshed rt_time */ VARx(ev_tstamp, mn_now) /* monotonic clock "now" */ VARx(ev_tstamp, rtmn_diff) /* difference realtime - monotonic time */ + +VARx(ev_tstamp, io_blocktime); +VARx(ev_tstamp, timeout_blocktime); + VARx(int, backend) VARx(int, activecnt) /* total number of active events ("refcount") */ VARx(unsigned int, loop_count); /* total number of loop iterations/blocks */ +VARx(int, backend_fd) VARx(ev_tstamp, backend_fudge) /* assumed typical timer resolution */ VAR (backend_modify, void (*backend_modify)(EV_P_ int fd, int oev, int nev)) VAR (backend_poll , void (*backend_poll)(EV_P_ ev_tstamp timeout)) -VARx(int, backend_fd) #if !defined(_WIN32) || EV_GENWRAP VARx(pid_t, curpid) diff --git a/ev_wrap.h b/ev_wrap.h index ba028ae..f9bc61e 100644 --- a/ev_wrap.h +++ b/ev_wrap.h @@ -4,13 +4,15 @@ #define now_floor ((loop)->now_floor) #define mn_now ((loop)->mn_now) #define rtmn_diff ((loop)->rtmn_diff) +#define io_blocktime ((loop)->io_blocktime) +#define timeout_blocktime ((loop)->timeout_blocktime) #define backend ((loop)->backend) #define activecnt ((loop)->activecnt) #define loop_count ((loop)->loop_count) +#define backend_fd ((loop)->backend_fd) #define backend_fudge ((loop)->backend_fudge) #define backend_modify ((loop)->backend_modify) #define backend_poll ((loop)->backend_poll) -#define backend_fd ((loop)->backend_fd) #define curpid ((loop)->curpid) #define postfork ((loop)->postfork) #define vec_ri ((loop)->vec_ri) @@ -67,13 +69,15 @@ #undef now_floor #undef mn_now #undef rtmn_diff +#undef io_blocktime +#undef timeout_blocktime #undef backend #undef activecnt #undef loop_count +#undef backend_fd #undef backend_fudge #undef backend_modify #undef backend_poll -#undef backend_fd #undef curpid #undef postfork #undef vec_ri diff --git a/libev.m4 b/libev.m4 index 322b659..8099d3b 100644 --- a/libev.m4 +++ b/libev.m4 @@ -14,6 +14,14 @@ AC_CHECK_FUNC(clock_gettime, [], [ fi ]) +AC_CHECK_FUNC(nanonsleep, [], [ + if test -z "$LIBEV_M4_AVOID_LIBRT"; then + AC_CHECK_LIB(rt, nanonsleep) + unset ac_cv_func_nanonsleep + AC_CHECK_FUNCS(nanonsleep) + fi +]) + AC_CHECK_LIB(m, ceil) -- cgit v1.2.3