summaryrefslogtreecommitdiff
path: root/src/openvpn/multi.h
blob: d7e5c2981175da45c9501cfe29b17dbd86cdbfda (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
/*
 *  OpenVPN -- An application to securely tunnel IP networks
 *             over a single TCP/UDP port, with support for SSL/TLS-based
 *             session authentication and key exchange,
 *             packet encryption, packet authentication, and
 *             packet compression.
 *
 *  Copyright (C) 2002-2018 OpenVPN Inc <sales@openvpn.net>
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License version 2
 *  as published by the Free Software Foundation.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License along
 *  with this program; if not, write to the Free Software Foundation, Inc.,
 *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */

/**
 * @file Header file for server-mode related structures and functions.
 */

#ifndef MULTI_H
#define MULTI_H

#if P2MP_SERVER

#include "init.h"
#include "forward.h"
#include "mroute.h"
#include "mbuf.h"
#include "list.h"
#include "schedule.h"
#include "pool.h"
#include "mudp.h"
#include "mtcp.h"
#include "perf.h"

#define MULTI_PREFIX_MAX_LENGTH 256

/*
 * Walk (don't run) through the routing table,
 * deleting old entries, and possibly multi_instance
 * structs as well which have been marked for deletion.
 */
struct multi_reap
{
    int bucket_base;
    int buckets_per_pass;
    time_t last_call;
};


struct deferred_signal_schedule_entry
{
    struct schedule_entry se;
    int signal_received;
    struct timeval wakeup;
};

/**
 * Server-mode state structure for one single VPN tunnel.
 *
 * This structure is used by OpenVPN processes running in server-mode to
 * store state information related to one single VPN tunnel.
 *
 * The @ref tunnel_state "Structure of VPN tunnel state storage" related
 * page describes the role the structure plays when OpenVPN is running in
 * server-mode.
 */
struct multi_instance {
    struct schedule_entry se;  /* this must be the first element of the structure */
    struct gc_arena gc;
    bool defined;
    bool halt;
    int refcount;
    int route_count;           /* number of routes (including cached routes) owned by this instance */
    time_t created;             /**< Time at which a VPN tunnel instance
                                 *   was created.  This parameter is set
                                 *   by the \c multi_create_instance()
                                 *   function. */
    struct timeval wakeup;     /* absolute time */
    struct mroute_addr real;    /**< External network address of the
                                 *   remote peer. */
    ifconfig_pool_handle vaddr_handle;
    char msg_prefix[MULTI_PREFIX_MAX_LENGTH];

    /* queued outgoing data in Server/TCP mode */
    unsigned int tcp_rwflags;
    struct mbuf_set *tcp_link_out_deferred;
    bool socket_set_called;

    in_addr_t reporting_addr;     /* IP address shown in status listing */
    struct in6_addr reporting_addr_ipv6; /* IPv6 address in status listing */

    bool did_open_context;
    bool did_real_hash;
    bool did_iter;
#ifdef MANAGEMENT_DEF_AUTH
    bool did_cid_hash;
    struct buffer_list *cc_config;
#endif
    bool connection_established_flag;
    bool did_iroutes;
    int n_clients_delta; /* added to multi_context.n_clients when instance is closed */

    struct context context;     /**< The context structure storing state
                                 *   for this VPN tunnel. */

#ifdef ENABLE_ASYNC_PUSH
    int inotify_watch; /* watch descriptor for acf */
#endif
};


/**
 * Main OpenVPN server state structure.
 *
 * This structure is used by OpenVPN processes running in server-mode to
 * store all the VPN tunnel and process-wide state.
 *
 * The @ref tunnel_state "Structure of VPN tunnel state storage" related
 * page describes the role the structure plays when OpenVPN is running in
 * server-mode.
 */
struct multi_context {
#define MC_UNDEF                      0
#define MC_SINGLE_THREADED            (1<<0)
#define MC_MULTI_THREADED_MASTER      (1<<1)
#define MC_MULTI_THREADED_WORKER      (1<<2)
#define MC_MULTI_THREADED_SCHEDULER   (1<<3)
#define MC_WORK_THREAD                (MC_MULTI_THREADED_WORKER|MC_MULTI_THREADED_SCHEDULER)
    int thread_mode;

    struct multi_instance **instances;  /**< Array of multi_instances. An instance can be
                                         * accessed using peer-id as an index. */

    struct hash *hash;          /**< VPN tunnel instances indexed by real
                                 *   address of the remote peer. */
    struct hash *vhash;         /**< VPN tunnel instances indexed by
                                 *   virtual address of remote hosts. */
    struct hash *iter;          /**< VPN tunnel instances indexed by real
                                 *   address of the remote peer, optimized
                                 *   for iteration. */
    struct schedule *schedule;
    struct mbuf_set *mbuf;      /**< Set of buffers for passing data
                                 *   channel packets between VPN tunnel
                                 *   instances. */
    struct multi_tcp *mtcp;     /**< State specific to OpenVPN using TCP
                                 *   as external transport. */
    struct ifconfig_pool *ifconfig_pool;
    struct frequency_limit *new_connection_limiter;
    struct mroute_helper *route_helper;
    struct multi_reap *reaper;
    struct mroute_addr local;
    bool enable_c2c;
    int max_clients;
    int tcp_queue_limit;
    int status_file_version;
    int n_clients; /* current number of authenticated clients */

#ifdef MANAGEMENT_DEF_AUTH
    struct hash *cid_hash;
    unsigned long cid_counter;
#endif

    struct multi_instance *pending;
    struct multi_instance *earliest_wakeup;
    struct multi_instance **mpp_touched;
    struct context_buffers *context_buffers;
    time_t per_second_trigger;

    struct context top;         /**< Storage structure for process-wide
                                 *   configuration. */

    /*
     * Timer object for stale route check
     */
    struct event_timeout stale_routes_check_et;

#ifdef ENABLE_ASYNC_PUSH
    /* mapping between inotify watch descriptors and multi_instances */
    struct hash *inotify_watchers;
#endif

    struct deferred_signal_schedule_entry deferred_shutdown_signal;
};

/*
 * Host route
 */
struct multi_route
{
    struct mroute_addr addr;
    struct multi_instance *instance;

#define MULTI_ROUTE_CACHE   (1<<0)
#define MULTI_ROUTE_AGEABLE (1<<1)
    unsigned int flags;

    unsigned int cache_generation;
    time_t last_reference;
};


/**************************************************************************/
/**
 * Main event loop for OpenVPN in server mode.
 * @ingroup eventloop
 *
 * This function calls the appropriate main event loop function depending
 * on the transport protocol used:
 *  - \c tunnel_server_udp()
 *  - \c tunnel_server_tcp()
 *
 * @param top          - Top-level context structure.
 */
void tunnel_server(struct context *top);


const char *multi_instance_string(const struct multi_instance *mi, bool null, struct gc_arena *gc);

/*
 * Called by mtcp.c, mudp.c, or other (to be written) protocol drivers
 */

void multi_init(struct multi_context *m, struct context *t, bool tcp_mode, int thread_mode);

void multi_uninit(struct multi_context *m);

void multi_top_init(struct multi_context *m, const struct context *top);

void multi_top_free(struct multi_context *m);

struct multi_instance *multi_create_instance(struct multi_context *m, const struct mroute_addr *real);

void multi_close_instance(struct multi_context *m, struct multi_instance *mi, bool shutdown);

bool multi_process_timeout(struct multi_context *m, const unsigned int mpp_flags);

/**
 * Handles peer floating.
 *
 * If peer is floated to a taken address, either drops packet
 * (if peer that owns address has different CN) or disconnects
 * existing peer. Updates multi_instance with new address,
 * updates hashtables in multi_context.
 */
void multi_process_float(struct multi_context *m, struct multi_instance *mi);

#define MPP_PRE_SELECT             (1<<0)
#define MPP_CONDITIONAL_PRE_SELECT (1<<1)
#define MPP_CLOSE_ON_SIGNAL        (1<<2)
#define MPP_RECORD_TOUCH           (1<<3)


/**************************************************************************/
/**
 * Perform postprocessing of a VPN tunnel instance.
 *
 * After some VPN tunnel activity has taken place, the VPN tunnel's state
 * may need updating and some follow-up action may be required.  This
 * function controls the necessary postprocessing.  It is called by many
 * other functions that handle VPN tunnel related activity, such as \c
 * multi_process_incoming_link(), \c multi_process_outgoing_link(), \c
 * multi_process_incoming_tun(), \c multi_process_outgoing_tun(), and \c
 * multi_process_timeout(), among others.
 *
 * @param m            - The single \c multi_context structure.
 * @param mi           - The \c multi_instance of the VPN tunnel to be
 *                       postprocessed.
 * @param flags        - Fast I/O optimization flags.
 *
 * @return
 *  - True, if the VPN tunnel instance \a mi was not closed due to a
 *    signal during processing.
 *  - False, if the VPN tunnel instance \a mi was closed.
 */
bool multi_process_post(struct multi_context *m, struct multi_instance *mi, const unsigned int flags);


/**************************************************************************/
/**
 * Demultiplex and process a packet received over the external network
 * interface.
 * @ingroup external_multiplexer
 *
 * This function determines which VPN tunnel instance the incoming packet
 * is associated with, and then calls \c process_incoming_link() to handle
 * it.  Afterwards, if the packet is destined for a broadcast/multicast
 * address or a remote host reachable through a different VPN tunnel, this
 * function takes care of sending it they are.
 *
 * @note This function is only used by OpenVPN processes which are running
 *     in server mode, and can therefore sustain multiple active VPN
 *     tunnels.
 *
 * @param m            - The single \c multi_context structure.
 * @param instance     - The VPN tunnel state structure associated with
 *                       the incoming packet, if known, as is the case
 *                       when using TCP transport. Otherwise NULL, as is
 *                       the case when using UDP transport.
 * @param mpp_flags    - Fast I/O optimization flags.
 */
bool multi_process_incoming_link(struct multi_context *m, struct multi_instance *instance, const unsigned int mpp_flags);


/**
 * Determine the destination VPN tunnel of a packet received over the
 * virtual tun/tap network interface and then process it accordingly.
 * @ingroup internal_multiplexer
 *
 * This function determines which VPN tunnel instance the packet is
 * destined for, and then calls \c process_outgoing_tun() to handle it.
 *
 * @note This function is only used by OpenVPN processes which are running
 *     in server mode, and can therefore sustain multiple active VPN
 *     tunnels.
 *
 * @param m            - The single \c multi_context structure.
 * @param mpp_flags    - Fast I/O optimization flags.
 */
bool multi_process_incoming_tun(struct multi_context *m, const unsigned int mpp_flags);


void multi_process_drop_outgoing_tun(struct multi_context *m, const unsigned int mpp_flags);

void multi_print_status(struct multi_context *m, struct status_output *so, const int version);

struct multi_instance *multi_get_queue(struct mbuf_set *ms);

void multi_add_mbuf(struct multi_context *m,
                    struct multi_instance *mi,
                    struct mbuf_buffer *mb);

void multi_ifconfig_pool_persist(struct multi_context *m, bool force);

bool multi_process_signal(struct multi_context *m);

void multi_close_instance_on_signal(struct multi_context *m, struct multi_instance *mi);

void init_management_callback_multi(struct multi_context *m);

void uninit_management_callback_multi(struct multi_context *m);


#ifdef ENABLE_ASYNC_PUSH
/**
 * Called when inotify event is fired, which happens when acf file is closed or deleted.
 * Continues authentication and sends push_repl
 *
 * @param m multi_context
 * @param mpp_flags
 */
void multi_process_file_closed(struct multi_context *m, const unsigned int mpp_flags);

#endif

/*
 * Return true if our output queue is not full
 */
static inline bool
multi_output_queue_ready(const struct multi_context *m,
                         const struct multi_instance *mi)
{
    if (mi->tcp_link_out_deferred)
    {
        return mbuf_len(mi->tcp_link_out_deferred) <= m->tcp_queue_limit;
    }
    else
    {
        return true;
    }
}

/*
 * Determine which instance has pending output
 * and prepare the output for sending in
 * the to_link buffer.
 */
static inline struct multi_instance *
multi_process_outgoing_link_pre(struct multi_context *m)
{
    struct multi_instance *mi = NULL;

    if (m->pending)
    {
        mi = m->pending;
    }
    else if (mbuf_defined(m->mbuf))
    {
        mi = multi_get_queue(m->mbuf);
    }
    return mi;
}

/*
 * Per-client route quota management
 */

void route_quota_exceeded(const struct multi_context *m, const struct multi_instance *mi);

static inline void
route_quota_inc(struct multi_instance *mi)
{
    ++mi->route_count;
}

static inline void
route_quota_dec(struct multi_instance *mi)
{
    --mi->route_count;
}

/* can we add a new route? */
static inline bool
route_quota_test(const struct multi_context *m, const struct multi_instance *mi)
{
    if (mi->route_count >= mi->context.options.max_routes_per_client)
    {
        route_quota_exceeded(m, mi);
        return false;
    }
    else
    {
        return true;
    }
}

/*
 * Instance reference counting
 */

static inline void
multi_instance_inc_refcount(struct multi_instance *mi)
{
    ++mi->refcount;
}

static inline void
multi_instance_dec_refcount(struct multi_instance *mi)
{
    if (--mi->refcount <= 0)
    {
        gc_free(&mi->gc);
        free(mi);
    }
}

static inline void
multi_route_del(struct multi_route *route)
{
    struct multi_instance *mi = route->instance;
    route_quota_dec(mi);
    multi_instance_dec_refcount(mi);
    free(route);
}

static inline bool
multi_route_defined(const struct multi_context *m,
                    const struct multi_route *r)
{
    if (r->instance->halt)
    {
        return false;
    }
    else if ((r->flags & MULTI_ROUTE_CACHE)
             && r->cache_generation != m->route_helper->cache_generation)
    {
        return false;
    }
    else if ((r->flags & MULTI_ROUTE_AGEABLE)
             && r->last_reference + m->route_helper->ageable_ttl_secs < now)
    {
        return false;
    }
    else
    {
        return true;
    }
}

/*
 * Takes prefix away from multi_instance.
 */
void
ungenerate_prefix(struct multi_instance *mi);

/*
 * Set a msg() function prefix with our current client instance ID.
 */

static inline void
set_prefix(struct multi_instance *mi)
{
#ifdef MULTI_DEBUG_EVENT_LOOP
    if (mi->msg_prefix[0])
    {
        printf("[%s]\n", mi->msg_prefix);
    }
#endif
    msg_set_prefix(mi->msg_prefix[0] ? mi->msg_prefix : NULL);
}

static inline void
clear_prefix(void)
{
#ifdef MULTI_DEBUG_EVENT_LOOP
    printf("[NULL]\n");
#endif
    msg_set_prefix(NULL);
}

/*
 * Instance Reaper
 *
 * Reaper constants.  The reaper is the process where the virtual address
 * and virtual route hash table is scanned for dead entries which are
 * then removed.  The hash table could potentially be quite large, so we
 * don't want to reap in a single pass.
 */

#define REAP_MAX_WAKEUP   10  /* Do reap pass at least once per n seconds */
#define REAP_DIVISOR     256  /* How many passes to cover whole hash table */
#define REAP_MIN          16  /* Minimum number of buckets per pass */
#define REAP_MAX        1024  /* Maximum number of buckets per pass */

/*
 * Mark a cached host route for deletion after this
 * many seconds without any references.
 */
#define MULTI_CACHE_ROUTE_TTL 60

static inline void
multi_reap_process(const struct multi_context *m)
{
    void multi_reap_process_dowork(const struct multi_context *m);

    if (m->reaper->last_call != now)
    {
        multi_reap_process_dowork(m);
    }
}

static inline void
multi_process_per_second_timers(struct multi_context *m)
{
    if (m->per_second_trigger != now)
    {
        void multi_process_per_second_timers_dowork(struct multi_context *m);

        multi_process_per_second_timers_dowork(m);
        m->per_second_trigger = now;
    }
}

/*
 * Compute earliest timeout expiry from the set of
 * all instances.  Output:
 *
 * m->earliest_wakeup : instance needing the earliest service.
 * dest               : earliest timeout as a delta in relation
 *                      to current time.
 */
static inline void
multi_get_timeout(struct multi_context *m, struct timeval *dest)
{
    struct timeval tv, current;

    CLEAR(tv);
    m->earliest_wakeup = (struct multi_instance *) schedule_get_earliest_wakeup(m->schedule, &tv);
    if (m->earliest_wakeup)
    {
        ASSERT(!openvpn_gettimeofday(&current, NULL));
        tv_delta(dest, &current, &tv);
        if (dest->tv_sec >= REAP_MAX_WAKEUP)
        {
            m->earliest_wakeup = NULL;
            dest->tv_sec = REAP_MAX_WAKEUP;
            dest->tv_usec = 0;
        }
    }
    else
    {
        dest->tv_sec = REAP_MAX_WAKEUP;
        dest->tv_usec = 0;
    }
}


/**
 * Send a packet over the virtual tun/tap network interface to its locally
 * reachable destination.
 * @ingroup internal_multiplexer
 *
 * This function calls \c process_outgoing_tun() to perform the actual
 * sending of the packet.  Afterwards, it calls \c multi_process_post() to
 * perform server-mode postprocessing.
 *
 * @param m            - The single \c multi_context structure.
 * @param mpp_flags    - Fast I/O optimization flags.
 *
 * @return
 *  - True, if the \c multi_instance associated with the packet sent was
 *    not closed due to a signal during processing.
 *  - Falls, if the \c multi_instance was closed.
 */
static inline bool
multi_process_outgoing_tun(struct multi_context *m, const unsigned int mpp_flags)
{
    struct multi_instance *mi = m->pending;
    bool ret = true;

    ASSERT(mi);
#ifdef MULTI_DEBUG_EVENT_LOOP
    printf("%s -> TUN len=%d\n",
           id(mi),
           mi->context.c2.to_tun.len);
#endif
    set_prefix(mi);
    process_outgoing_tun(&mi->context);
    ret = multi_process_post(m, mi, mpp_flags);
    clear_prefix();
    return ret;
}



static inline bool
multi_process_outgoing_link_dowork(struct multi_context *m, struct multi_instance *mi, const unsigned int mpp_flags)
{
    bool ret = true;
    set_prefix(mi);
    process_outgoing_link(&mi->context);
    ret = multi_process_post(m, mi, mpp_flags);
    clear_prefix();
    return ret;
}

/*
 * Check for signals.
 */
#define MULTI_CHECK_SIG(m) EVENT_LOOP_CHECK_SIGNAL(&(m)->top, multi_process_signal, (m))

static inline void
multi_set_pending(struct multi_context *m, struct multi_instance *mi)
{
    m->pending = mi;
}

#endif /* P2MP_SERVER */
#endif /* MULTI_H */