summaryrefslogtreecommitdiff
path: root/src/openvpn/multi.h
diff options
context:
space:
mode:
authorAlberto Gonzalez Iniesta <agi@inittab.org>2016-12-27 18:25:47 +0100
committerAlberto Gonzalez Iniesta <agi@inittab.org>2016-12-27 18:25:47 +0100
commit79f3537f69e125f19f59c36aa090120a63186a54 (patch)
tree2089a3b7dac990841dbc2e4d9b2f535b82dbb0af /src/openvpn/multi.h
parentf2137fedb30cb87448eb03b2f288920df6187571 (diff)
parent3a2bbdb05ca6a6996e424c9fb225cb0d53804125 (diff)
Merge tag 'upstream/2.4.0'
Upstream version 2.4.0
Diffstat (limited to 'src/openvpn/multi.h')
-rw-r--r--src/openvpn/multi.h445
1 files changed, 237 insertions, 208 deletions
diff --git a/src/openvpn/multi.h b/src/openvpn/multi.h
index 0d369f3..b4ffd69 100644
--- a/src/openvpn/multi.h
+++ b/src/openvpn/multi.h
@@ -5,7 +5,7 @@
* packet encryption, packet authentication, and
* packet compression.
*
- * Copyright (C) 2002-2010 OpenVPN Technologies, Inc. <sales@openvpn.net>
+ * Copyright (C) 2002-2017 OpenVPN Technologies, Inc. <sales@openvpn.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
@@ -51,17 +51,17 @@
*/
struct multi_reap
{
- int bucket_base;
- int buckets_per_pass;
- time_t last_call;
+ int bucket_base;
+ int buckets_per_pass;
+ time_t last_call;
};
struct deferred_signal_schedule_entry
{
- struct schedule_entry se;
- int signal_received;
- struct timeval wakeup;
+ struct schedule_entry se;
+ int signal_received;
+ struct timeval wakeup;
};
/**
@@ -75,46 +75,46 @@ struct deferred_signal_schedule_entry
* server-mode.
*/
struct multi_instance {
- struct schedule_entry se; /* this must be the first element of the structure */
- struct gc_arena gc;
- bool defined;
- bool halt;
- int refcount;
- int route_count; /* number of routes (including cached routes) owned by this instance */
- time_t created; /**< Time at which a VPN tunnel instance
+ struct schedule_entry se; /* this must be the first element of the structure */
+ struct gc_arena gc;
+ bool defined;
+ bool halt;
+ int refcount;
+ int route_count; /* number of routes (including cached routes) owned by this instance */
+ time_t created; /**< Time at which a VPN tunnel instance
* was created. This parameter is set
* by the \c multi_create_instance()
* function. */
- struct timeval wakeup; /* absolute time */
- struct mroute_addr real; /**< External network address of the
+ struct timeval wakeup; /* absolute time */
+ struct mroute_addr real; /**< External network address of the
* remote peer. */
- ifconfig_pool_handle vaddr_handle;
- char msg_prefix[MULTI_PREFIX_MAX_LENGTH];
+ ifconfig_pool_handle vaddr_handle;
+ char msg_prefix[MULTI_PREFIX_MAX_LENGTH];
- /* queued outgoing data in Server/TCP mode */
- unsigned int tcp_rwflags;
- struct mbuf_set *tcp_link_out_deferred;
- bool socket_set_called;
+ /* queued outgoing data in Server/TCP mode */
+ unsigned int tcp_rwflags;
+ struct mbuf_set *tcp_link_out_deferred;
+ bool socket_set_called;
- in_addr_t reporting_addr; /* IP address shown in status listing */
- struct in6_addr reporting_addr_ipv6; /* IPv6 address in status listing */
+ in_addr_t reporting_addr; /* IP address shown in status listing */
+ struct in6_addr reporting_addr_ipv6; /* IPv6 address in status listing */
- bool did_open_context;
- bool did_real_hash;
- bool did_iter;
+ bool did_open_context;
+ bool did_real_hash;
+ bool did_iter;
#ifdef MANAGEMENT_DEF_AUTH
- bool did_cid_hash;
- struct buffer_list *cc_config;
+ bool did_cid_hash;
+ struct buffer_list *cc_config;
#endif
- bool connection_established_flag;
- bool did_iroutes;
- int n_clients_delta; /* added to multi_context.n_clients when instance is closed */
+ bool connection_established_flag;
+ bool did_iroutes;
+ int n_clients_delta; /* added to multi_context.n_clients when instance is closed */
- struct context context; /**< The context structure storing state
+ struct context context; /**< The context structure storing state
* for this VPN tunnel. */
#ifdef ENABLE_ASYNC_PUSH
- int inotify_watch; /* watch descriptor for acf */
+ int inotify_watch; /* watch descriptor for acf */
#endif
};
@@ -130,66 +130,66 @@ struct multi_instance {
* server-mode.
*/
struct multi_context {
-# define MC_UNDEF 0
-# define MC_SINGLE_THREADED (1<<0)
-# define MC_MULTI_THREADED_MASTER (1<<1)
-# define MC_MULTI_THREADED_WORKER (1<<2)
-# define MC_MULTI_THREADED_SCHEDULER (1<<3)
-# define MC_WORK_THREAD (MC_MULTI_THREADED_WORKER|MC_MULTI_THREADED_SCHEDULER)
- int thread_mode;
-
- struct multi_instance** instances; /**< Array of multi_instances. An instance can be
+#define MC_UNDEF 0
+#define MC_SINGLE_THREADED (1<<0)
+#define MC_MULTI_THREADED_MASTER (1<<1)
+#define MC_MULTI_THREADED_WORKER (1<<2)
+#define MC_MULTI_THREADED_SCHEDULER (1<<3)
+#define MC_WORK_THREAD (MC_MULTI_THREADED_WORKER|MC_MULTI_THREADED_SCHEDULER)
+ int thread_mode;
+
+ struct multi_instance **instances; /**< Array of multi_instances. An instance can be
* accessed using peer-id as an index. */
- struct hash *hash; /**< VPN tunnel instances indexed by real
+ struct hash *hash; /**< VPN tunnel instances indexed by real
* address of the remote peer. */
- struct hash *vhash; /**< VPN tunnel instances indexed by
+ struct hash *vhash; /**< VPN tunnel instances indexed by
* virtual address of remote hosts. */
- struct hash *iter; /**< VPN tunnel instances indexed by real
+ struct hash *iter; /**< VPN tunnel instances indexed by real
* address of the remote peer, optimized
* for iteration. */
- struct schedule *schedule;
- struct mbuf_set *mbuf; /**< Set of buffers for passing data
+ struct schedule *schedule;
+ struct mbuf_set *mbuf; /**< Set of buffers for passing data
* channel packets between VPN tunnel
* instances. */
- struct multi_tcp *mtcp; /**< State specific to OpenVPN using TCP
+ struct multi_tcp *mtcp; /**< State specific to OpenVPN using TCP
* as external transport. */
- struct ifconfig_pool *ifconfig_pool;
- struct frequency_limit *new_connection_limiter;
- struct mroute_helper *route_helper;
- struct multi_reap *reaper;
- struct mroute_addr local;
- bool enable_c2c;
- int max_clients;
- int tcp_queue_limit;
- int status_file_version;
- int n_clients; /* current number of authenticated clients */
+ struct ifconfig_pool *ifconfig_pool;
+ struct frequency_limit *new_connection_limiter;
+ struct mroute_helper *route_helper;
+ struct multi_reap *reaper;
+ struct mroute_addr local;
+ bool enable_c2c;
+ int max_clients;
+ int tcp_queue_limit;
+ int status_file_version;
+ int n_clients; /* current number of authenticated clients */
#ifdef MANAGEMENT_DEF_AUTH
- struct hash *cid_hash;
- unsigned long cid_counter;
+ struct hash *cid_hash;
+ unsigned long cid_counter;
#endif
- struct multi_instance *pending;
- struct multi_instance *earliest_wakeup;
- struct multi_instance **mpp_touched;
- struct context_buffers *context_buffers;
- time_t per_second_trigger;
+ struct multi_instance *pending;
+ struct multi_instance *earliest_wakeup;
+ struct multi_instance **mpp_touched;
+ struct context_buffers *context_buffers;
+ time_t per_second_trigger;
- struct context top; /**< Storage structure for process-wide
+ struct context top; /**< Storage structure for process-wide
* configuration. */
- /*
- * Timer object for stale route check
- */
- struct event_timeout stale_routes_check_et;
+ /*
+ * Timer object for stale route check
+ */
+ struct event_timeout stale_routes_check_et;
#ifdef ENABLE_ASYNC_PUSH
- /* mapping between inotify watch descriptors and multi_instances */
- struct hash *inotify_watchers;
+ /* mapping between inotify watch descriptors and multi_instances */
+ struct hash *inotify_watchers;
#endif
- struct deferred_signal_schedule_entry deferred_shutdown_signal;
+ struct deferred_signal_schedule_entry deferred_shutdown_signal;
};
/*
@@ -197,15 +197,15 @@ struct multi_context {
*/
struct multi_route
{
- struct mroute_addr addr;
- struct multi_instance *instance;
+ struct mroute_addr addr;
+ struct multi_instance *instance;
-# define MULTI_ROUTE_CACHE (1<<0)
-# define MULTI_ROUTE_AGEABLE (1<<1)
- unsigned int flags;
+#define MULTI_ROUTE_CACHE (1<<0)
+#define MULTI_ROUTE_AGEABLE (1<<1)
+ unsigned int flags;
- unsigned int cache_generation;
- time_t last_reference;
+ unsigned int cache_generation;
+ time_t last_reference;
};
@@ -221,25 +221,28 @@ struct multi_route
*
* @param top - Top-level context structure.
*/
-void tunnel_server (struct context *top);
+void tunnel_server(struct context *top);
-const char *multi_instance_string (const struct multi_instance *mi, bool null, struct gc_arena *gc);
+const char *multi_instance_string(const struct multi_instance *mi, bool null, struct gc_arena *gc);
/*
* Called by mtcp.c, mudp.c, or other (to be written) protocol drivers
*/
-void multi_init (struct multi_context *m, struct context *t, bool tcp_mode, int thread_mode);
-void multi_uninit (struct multi_context *m);
+void multi_init(struct multi_context *m, struct context *t, bool tcp_mode, int thread_mode);
-void multi_top_init (struct multi_context *m, const struct context *top);
-void multi_top_free (struct multi_context *m);
+void multi_uninit(struct multi_context *m);
-struct multi_instance *multi_create_instance (struct multi_context *m, const struct mroute_addr *real);
-void multi_close_instance (struct multi_context *m, struct multi_instance *mi, bool shutdown);
+void multi_top_init(struct multi_context *m, const struct context *top);
-bool multi_process_timeout (struct multi_context *m, const unsigned int mpp_flags);
+void multi_top_free(struct multi_context *m);
+
+struct multi_instance *multi_create_instance(struct multi_context *m, const struct mroute_addr *real);
+
+void multi_close_instance(struct multi_context *m, struct multi_instance *mi, bool shutdown);
+
+bool multi_process_timeout(struct multi_context *m, const unsigned int mpp_flags);
/**
* Handles peer floating.
@@ -249,7 +252,7 @@ bool multi_process_timeout (struct multi_context *m, const unsigned int mpp_flag
* existing peer. Updates multi_instance with new address,
* updates hashtables in multi_context.
*/
-void multi_process_float (struct multi_context* m, struct multi_instance* mi);
+void multi_process_float(struct multi_context *m, struct multi_instance *mi);
#define MPP_PRE_SELECT (1<<0)
#define MPP_CONDITIONAL_PRE_SELECT (1<<1)
@@ -279,7 +282,7 @@ void multi_process_float (struct multi_context* m, struct multi_instance* mi);
* signal during processing.
* - False, if the VPN tunnel instance \a mi was closed.
*/
-bool multi_process_post (struct multi_context *m, struct multi_instance *mi, const unsigned int flags);
+bool multi_process_post(struct multi_context *m, struct multi_instance *mi, const unsigned int flags);
/**************************************************************************/
@@ -305,7 +308,7 @@ bool multi_process_post (struct multi_context *m, struct multi_instance *mi, con
* the case when using UDP transport.
* @param mpp_flags - Fast I/O optimization flags.
*/
-bool multi_process_incoming_link (struct multi_context *m, struct multi_instance *instance, const unsigned int mpp_flags);
+bool multi_process_incoming_link(struct multi_context *m, struct multi_instance *instance, const unsigned int mpp_flags);
/**
@@ -323,27 +326,28 @@ bool multi_process_incoming_link (struct multi_context *m, struct multi_instance
* @param m - The single \c multi_context structure.
* @param mpp_flags - Fast I/O optimization flags.
*/
-bool multi_process_incoming_tun (struct multi_context *m, const unsigned int mpp_flags);
+bool multi_process_incoming_tun(struct multi_context *m, const unsigned int mpp_flags);
+
+void multi_process_drop_outgoing_tun(struct multi_context *m, const unsigned int mpp_flags);
-void multi_process_drop_outgoing_tun (struct multi_context *m, const unsigned int mpp_flags);
+void multi_print_status(struct multi_context *m, struct status_output *so, const int version);
-void multi_print_status (struct multi_context *m, struct status_output *so, const int version);
+struct multi_instance *multi_get_queue(struct mbuf_set *ms);
-struct multi_instance *multi_get_queue (struct mbuf_set *ms);
+void multi_add_mbuf(struct multi_context *m,
+ struct multi_instance *mi,
+ struct mbuf_buffer *mb);
-void multi_add_mbuf (struct multi_context *m,
- struct multi_instance *mi,
- struct mbuf_buffer *mb);
+void multi_ifconfig_pool_persist(struct multi_context *m, bool force);
-void multi_ifconfig_pool_persist (struct multi_context *m, bool force);
+bool multi_process_signal(struct multi_context *m);
-bool multi_process_signal (struct multi_context *m);
+void multi_close_instance_on_signal(struct multi_context *m, struct multi_instance *mi);
-void multi_close_instance_on_signal (struct multi_context *m, struct multi_instance *mi);
+void init_management_callback_multi(struct multi_context *m);
-void init_management_callback_multi (struct multi_context *m);
-void uninit_management_callback_multi (struct multi_context *m);
+void uninit_management_callback_multi(struct multi_context *m);
#ifdef ENABLE_ASYNC_PUSH
@@ -354,20 +358,25 @@ void uninit_management_callback_multi (struct multi_context *m);
* @param m multi_context
* @param mpp_flags
*/
-void multi_process_file_closed (struct multi_context *m, const unsigned int mpp_flags);
+void multi_process_file_closed(struct multi_context *m, const unsigned int mpp_flags);
+
#endif
/*
* Return true if our output queue is not full
*/
static inline bool
-multi_output_queue_ready (const struct multi_context *m,
- const struct multi_instance *mi)
+multi_output_queue_ready(const struct multi_context *m,
+ const struct multi_instance *mi)
{
- if (mi->tcp_link_out_deferred)
- return mbuf_len (mi->tcp_link_out_deferred) <= m->tcp_queue_limit;
- else
- return true;
+ if (mi->tcp_link_out_deferred)
+ {
+ return mbuf_len(mi->tcp_link_out_deferred) <= m->tcp_queue_limit;
+ }
+ else
+ {
+ return true;
+ }
}
/*
@@ -376,46 +385,52 @@ multi_output_queue_ready (const struct multi_context *m,
* the to_link buffer.
*/
static inline struct multi_instance *
-multi_process_outgoing_link_pre (struct multi_context *m)
+multi_process_outgoing_link_pre(struct multi_context *m)
{
- struct multi_instance *mi = NULL;
+ struct multi_instance *mi = NULL;
- if (m->pending)
- mi = m->pending;
- else if (mbuf_defined (m->mbuf))
- mi = multi_get_queue (m->mbuf);
- return mi;
+ if (m->pending)
+ {
+ mi = m->pending;
+ }
+ else if (mbuf_defined(m->mbuf))
+ {
+ mi = multi_get_queue(m->mbuf);
+ }
+ return mi;
}
/*
* Per-client route quota management
*/
-void route_quota_exceeded (const struct multi_context *m, const struct multi_instance *mi);
+void route_quota_exceeded(const struct multi_context *m, const struct multi_instance *mi);
static inline void
-route_quota_inc (struct multi_instance *mi)
+route_quota_inc(struct multi_instance *mi)
{
- ++mi->route_count;
+ ++mi->route_count;
}
static inline void
-route_quota_dec (struct multi_instance *mi)
+route_quota_dec(struct multi_instance *mi)
{
- --mi->route_count;
+ --mi->route_count;
}
/* can we add a new route? */
static inline bool
-route_quota_test (const struct multi_context *m, const struct multi_instance *mi)
+route_quota_test(const struct multi_context *m, const struct multi_instance *mi)
{
- if (mi->route_count >= mi->context.options.max_routes_per_client)
+ if (mi->route_count >= mi->context.options.max_routes_per_client)
+ {
+ route_quota_exceeded(m, mi);
+ return false;
+ }
+ else
{
- route_quota_exceeded (m, mi);
- return false;
+ return true;
}
- else
- return true;
}
/*
@@ -423,73 +438,83 @@ route_quota_test (const struct multi_context *m, const struct multi_instance *mi
*/
static inline void
-multi_instance_inc_refcount (struct multi_instance *mi)
+multi_instance_inc_refcount(struct multi_instance *mi)
{
- ++mi->refcount;
+ ++mi->refcount;
}
static inline void
-multi_instance_dec_refcount (struct multi_instance *mi)
+multi_instance_dec_refcount(struct multi_instance *mi)
{
- if (--mi->refcount <= 0)
+ if (--mi->refcount <= 0)
{
- gc_free (&mi->gc);
- free (mi);
+ gc_free(&mi->gc);
+ free(mi);
}
}
static inline void
-multi_route_del (struct multi_route *route)
+multi_route_del(struct multi_route *route)
{
- struct multi_instance *mi = route->instance;
- route_quota_dec (mi);
- multi_instance_dec_refcount (mi);
- free (route);
+ struct multi_instance *mi = route->instance;
+ route_quota_dec(mi);
+ multi_instance_dec_refcount(mi);
+ free(route);
}
static inline bool
-multi_route_defined (const struct multi_context *m,
- const struct multi_route *r)
+multi_route_defined(const struct multi_context *m,
+ const struct multi_route *r)
{
- if (r->instance->halt)
- return false;
- else if ((r->flags & MULTI_ROUTE_CACHE)
- && r->cache_generation != m->route_helper->cache_generation)
- return false;
- else if ((r->flags & MULTI_ROUTE_AGEABLE)
- && r->last_reference + m->route_helper->ageable_ttl_secs < now)
- return false;
- else
- return true;
+ if (r->instance->halt)
+ {
+ return false;
+ }
+ else if ((r->flags & MULTI_ROUTE_CACHE)
+ && r->cache_generation != m->route_helper->cache_generation)
+ {
+ return false;
+ }
+ else if ((r->flags & MULTI_ROUTE_AGEABLE)
+ && r->last_reference + m->route_helper->ageable_ttl_secs < now)
+ {
+ return false;
+ }
+ else
+ {
+ return true;
+ }
}
/*
* Takes prefix away from multi_instance.
*/
void
-ungenerate_prefix (struct multi_instance *mi);
+ungenerate_prefix(struct multi_instance *mi);
/*
* Set a msg() function prefix with our current client instance ID.
*/
static inline void
-set_prefix (struct multi_instance *mi)
+set_prefix(struct multi_instance *mi)
{
#ifdef MULTI_DEBUG_EVENT_LOOP
- if (mi->msg_prefix[0])
- printf ("[%s]\n", mi->msg_prefix);
+ if (mi->msg_prefix[0])
+ {
+ printf("[%s]\n", mi->msg_prefix);
+ }
#endif
- msg_set_prefix (mi->msg_prefix[0] ? mi->msg_prefix : NULL);
+ msg_set_prefix(mi->msg_prefix[0] ? mi->msg_prefix : NULL);
}
static inline void
-clear_prefix (void)
+clear_prefix(void)
{
#ifdef MULTI_DEBUG_EVENT_LOOP
- printf ("[NULL]\n");
+ printf("[NULL]\n");
#endif
- msg_set_prefix (NULL);
+ msg_set_prefix(NULL);
}
/*
@@ -513,21 +538,25 @@ clear_prefix (void)
#define MULTI_CACHE_ROUTE_TTL 60
static inline void
-multi_reap_process (const struct multi_context *m)
+multi_reap_process(const struct multi_context *m)
{
- void multi_reap_process_dowork (const struct multi_context *m);
- if (m->reaper->last_call != now)
- multi_reap_process_dowork (m);
+ void multi_reap_process_dowork(const struct multi_context *m);
+
+ if (m->reaper->last_call != now)
+ {
+ multi_reap_process_dowork(m);
+ }
}
static inline void
-multi_process_per_second_timers (struct multi_context *m)
+multi_process_per_second_timers(struct multi_context *m)
{
- if (m->per_second_trigger != now)
+ if (m->per_second_trigger != now)
{
- void multi_process_per_second_timers_dowork (struct multi_context *m);
- multi_process_per_second_timers_dowork (m);
- m->per_second_trigger = now;
+ void multi_process_per_second_timers_dowork(struct multi_context *m);
+
+ multi_process_per_second_timers_dowork(m);
+ m->per_second_trigger = now;
}
}
@@ -540,27 +569,27 @@ multi_process_per_second_timers (struct multi_context *m)
* to current time.
*/
static inline void
-multi_get_timeout (struct multi_context *m, struct timeval *dest)
+multi_get_timeout(struct multi_context *m, struct timeval *dest)
{
- struct timeval tv, current;
+ struct timeval tv, current;
- CLEAR (tv);
- m->earliest_wakeup = (struct multi_instance *) schedule_get_earliest_wakeup (m->schedule, &tv);
- if (m->earliest_wakeup)
+ CLEAR(tv);
+ m->earliest_wakeup = (struct multi_instance *) schedule_get_earliest_wakeup(m->schedule, &tv);
+ if (m->earliest_wakeup)
{
- ASSERT (!openvpn_gettimeofday (&current, NULL));
- tv_delta (dest, &current, &tv);
- if (dest->tv_sec >= REAP_MAX_WAKEUP)
- {
- m->earliest_wakeup = NULL;
- dest->tv_sec = REAP_MAX_WAKEUP;
- dest->tv_usec = 0;
- }
+ ASSERT(!openvpn_gettimeofday(&current, NULL));
+ tv_delta(dest, &current, &tv);
+ if (dest->tv_sec >= REAP_MAX_WAKEUP)
+ {
+ m->earliest_wakeup = NULL;
+ dest->tv_sec = REAP_MAX_WAKEUP;
+ dest->tv_usec = 0;
+ }
}
- else
+ else
{
- dest->tv_sec = REAP_MAX_WAKEUP;
- dest->tv_usec = 0;
+ dest->tv_sec = REAP_MAX_WAKEUP;
+ dest->tv_usec = 0;
}
}
@@ -583,46 +612,46 @@ multi_get_timeout (struct multi_context *m, struct timeval *dest)
* - Falls, if the \c multi_instance was closed.
*/
static inline bool
-multi_process_outgoing_tun (struct multi_context *m, const unsigned int mpp_flags)
+multi_process_outgoing_tun(struct multi_context *m, const unsigned int mpp_flags)
{
- struct multi_instance *mi = m->pending;
- bool ret = true;
+ struct multi_instance *mi = m->pending;
+ bool ret = true;
- ASSERT (mi);
+ ASSERT(mi);
#ifdef MULTI_DEBUG_EVENT_LOOP
- printf ("%s -> TUN len=%d\n",
- id(mi),
- mi->context.c2.to_tun.len);
+ printf("%s -> TUN len=%d\n",
+ id(mi),
+ mi->context.c2.to_tun.len);
#endif
- set_prefix (mi);
- process_outgoing_tun (&mi->context);
- ret = multi_process_post (m, mi, mpp_flags);
- clear_prefix ();
- return ret;
+ set_prefix(mi);
+ process_outgoing_tun(&mi->context);
+ ret = multi_process_post(m, mi, mpp_flags);
+ clear_prefix();
+ return ret;
}
static inline bool
-multi_process_outgoing_link_dowork (struct multi_context *m, struct multi_instance *mi, const unsigned int mpp_flags)
+multi_process_outgoing_link_dowork(struct multi_context *m, struct multi_instance *mi, const unsigned int mpp_flags)
{
- bool ret = true;
- set_prefix (mi);
- process_outgoing_link (&mi->context);
- ret = multi_process_post (m, mi, mpp_flags);
- clear_prefix ();
- return ret;
+ bool ret = true;
+ set_prefix(mi);
+ process_outgoing_link(&mi->context);
+ ret = multi_process_post(m, mi, mpp_flags);
+ clear_prefix();
+ return ret;
}
/*
* Check for signals.
*/
-#define MULTI_CHECK_SIG(m) EVENT_LOOP_CHECK_SIGNAL (&(m)->top, multi_process_signal, (m))
+#define MULTI_CHECK_SIG(m) EVENT_LOOP_CHECK_SIGNAL(&(m)->top, multi_process_signal, (m))
static inline void
-multi_set_pending (struct multi_context *m, struct multi_instance *mi)
+multi_set_pending(struct multi_context *m, struct multi_instance *mi)
{
- m->pending = mi;
+ m->pending = mi;
}
#endif /* P2MP_SERVER */