@@ -46,6 +46,9 @@
#include "mstats.h"
+#include <sys/select.h>
+#include <sys/time.h>
+
counter_type link_read_bytes_global; /* GLOBAL */
counter_type link_write_bytes_global; /* GLOBAL */
@@ -78,6 +81,32 @@ show_wait_status(struct context *c)
#endif /* ifdef ENABLE_DEBUG */
+bool check_bulk_mode(struct context *c)
+{
+ if ((c->c2.frame.bulk_size > 0) && (c->c1.tuntap != NULL) &&
(c->c2.buffers != NULL))
+ {
+ return true;
+ }
+ return false;
+}
+
+void xfer_io(struct context *c, struct context *b)
+{
+ int plen = 0;
+ if (check_bulk_mode(b))
+ {
+ int leng = (b->c2.buffers->bufs_indx + 1);
+ for (int x = 0; x < leng; ++x)
+ {
+ plen = BLEN(&b->c2.bufs[x]);
+ if (plen < 1) { c->c2.bufs[x].len = 0; }
+ else { c->c2.bufs[x] = b->c2.bufs[x]; }
+ }
+ c->c2.buffers->bufs_indx = b->c2.buffers->bufs_indx;
+ b->c2.buffers->bufs_indx = -1;
+ }
+}
+
static void
check_tls_errors_co(struct context *c)
{
@@ -605,6 +634,21 @@ buffer_turnover(const uint8_t *orig_buf, struct buffer
*dest_stub, struct buffer
}
}
+uint8_t *buff_prepsize(uint8_t *buff, int *size)
+{
+ buff[0] = ((*size >> 8) & 0xff);
+ buff[1] = ((*size >> 0) & 0xff);
+ buff += 2;
+ return buff;
+}
+
+uint8_t *buff_postsize(uint8_t *buff, int *size)
+{
+ *size = ((buff[0] << 8) + (buff[1] << 0));
+ buff += 2;
+ return buff;
+}
+
/*
* Compress, fragment, encrypt and HMAC-sign an outgoing packet.
* Input: c->c2.buf
@@ -1031,6 +1075,7 @@ process_incoming_link_part1(struct context *c, struct
link_socket_info *lsi, boo
fprintf(stderr, "R");
}
#endif
+
msg(D_LINK_RW, "%s READ [%d] from %s: %s", proto2ascii(lsi->proto,
lsi->af, true),
BLEN(&c->c2.buf), print_link_socket_actual(&c->c2.from, &gc),
PROTO_DUMP(&c->c2.buf, &gc));
@@ -1211,6 +1256,28 @@ process_incoming_link_part2(struct context *c,
struct link_socket_info *lsi,
}
}
+void process_incoming_link_part3(struct context *c)
+{
+ int leng = BLEN(&c->c2.to_tun);
+ if (leng > 0)
+ {
+ if (check_bulk_mode(c))
+ {
+ c->c2.buffers->send_tun_max.len = 0;
+ uint8_t *temp = BPTR(&c->c2.to_tun);
+ if ((temp[0] == 0xff) && (temp[1] == 0x13) && (temp[2] ==
0x37) && (temp[3] == 0xff))
+ {
+ c->c2.buffers->send_tun_max.offset = TUN_BAT_OFF;
+ c->c2.buffers->send_tun_max.len = leng;
+ bcopy(BPTR(&c->c2.to_tun),
BPTR(&c->c2.buffers->send_tun_max), leng);
+ //dmsg(M_INFO, "FWD BAT LINK 0 [%d] [%d] [%d] [%d] [%d]",
BLEN(&c->c2.buf), BLEN(&c->c2.to_tun), BLEN(&c->c2.buffers->read_link_buf),
BLEN(&c->c2.buffers->read_link_buf), BLEN(&c->c2.buffers->send_tun_max));
+ }
+ c->c2.to_tun.offset += 6;
+ c->c2.buf.offset += 6;
+ }
+ }
+}
+
static void
process_incoming_link(struct context *c, struct link_socket *sock)
{
@@ -1221,6 +1288,7 @@ process_incoming_link(struct context *c, struct
link_socket *sock)
process_incoming_link_part1(c, lsi, false);
process_incoming_link_part2(c, lsi, orig_buf);
+ process_incoming_link_part3(c);
perf_pop();
}
@@ -1321,7 +1389,7 @@ process_incoming_dco(struct context *c)
*/
void
-read_incoming_tun(struct context *c)
+read_incoming_tun_part2(struct context *c)
{
/*
* Setup for read() call on TUN/TAP device.
@@ -1382,6 +1450,54 @@ read_incoming_tun(struct context *c)
perf_pop();
}
+void read_incoming_tun_part3(struct context *c)
+{
+ fd_set rfds;
+ struct timeval timo;
+ if (check_bulk_mode(c))
+ {
+ int plen = 0;
+ int fdno = c->c1.tuntap->fd;
+ while ((c->c2.buffers->bufs_indx + 1) < TUN_BAT_MIN)
+ {
+ int leng = plen;
+ int indx = (c->c2.buffers->bufs_indx + 1);
+ if (leng < 1)
+ {
+ FD_ZERO(&rfds);
+ FD_SET(fdno, &rfds);
+ timo.tv_sec = 0;
+ timo.tv_usec = 0;
+ select(fdno+1, &rfds, NULL, NULL, &timo);
+ if (FD_ISSET(fdno, &rfds))
+ {
+ read_incoming_tun_part2(c);
+ plen = BLEN(&c->c2.buf);
+ } else { break; }
+ }
+ //dmsg(M_INFO, "FWD BAT READ 0 [%d] [%d] [%d] [%d] [%d]",
c->c2.buffers->bufs_indx + 1, fdno, BLEN(&c->c2.buf),
BLEN(&c->c2.buffers->read_tun_buf), BLEN(&c->c2.buffers->read_tun_max));
+ leng = plen;
+ if (leng > 0)
+ {
+ c->c2.buffers->read_tun_bufs[indx].offset = TUN_BAT_OFF;
+ c->c2.buffers->read_tun_bufs[indx].len = leng;
+ bcopy(BPTR(&c->c2.buf),
BPTR(&c->c2.buffers->read_tun_bufs[indx]), leng);
+ c->c2.bufs[indx] = c->c2.buffers->read_tun_bufs[indx];
+ c->c2.buffers->bufs_indx = indx;
+ } else { break; }
+ plen = 0;
+ }
+ }
+}
+
+void read_incoming_tun(struct context *c)
+{
+ if (c->c2.frame.bulk_size <= 0) {
+ read_incoming_tun_part2(c);
+ }
+ read_incoming_tun_part3(c);
+}
+
/**
* Drops UDP packets which OS decided to route via tun.
*
@@ -1469,7 +1585,7 @@ drop_if_recursive_routing(struct context *c, struct
buffer *buf)
*/
void
-process_incoming_tun(struct context *c, struct link_socket *out_sock)
+process_incoming_tun_part2(struct context *c, struct link_socket *out_sock)
{
struct gc_arena gc = gc_new();
@@ -1488,7 +1604,7 @@ process_incoming_tun(struct context *c, struct
link_socket *out_sock)
#endif
/* Show packet content */
- dmsg(D_TUN_RW, "TUN READ [%d]", BLEN(&c->c2.buf));
+ dmsg(D_TUN_RW, "TUN READ [%d] [%d]", BLEN(&c->c2.buf),
c->c2.frame.buf.payload_size);
if (c->c2.buf.len > 0)
{
@@ -1512,7 +1628,9 @@ process_incoming_tun(struct context *c, struct
link_socket *out_sock)
}
if (c->c2.buf.len > 0)
{
+ if ((c->c2.buffers == NULL) || (c->c2.buffers->flag_ciph != -2)) {
encrypt_sign(c, true);
+ }
}
else
{
@@ -1522,6 +1640,65 @@ process_incoming_tun(struct context *c, struct
link_socket *out_sock)
gc_free(&gc);
}
+void process_incoming_tun_part3(struct context *c, struct link_socket
*out_sock)
+{
+ if (check_bulk_mode(c))
+ {
+ c->c2.buffers->flag_ciph = -2;
+ c->c2.buffers->read_tun_max.offset = TUN_BAT_OFF;
+ c->c2.buffers->read_tun_max.len = 0;
+ uint8_t *temp = BPTR(&c->c2.buffers->read_tun_max);
+ int plen = 0, fdno = c->c1.tuntap->fd;
+ int maxl = 0, leng = (c->c2.buffers->bufs_indx + 1);
+ if ((fdno > 0) && (leng > 0))
+ {
+ for (int x = 0; x < leng; ++x)
+ {
+ c->c2.buf = c->c2.bufs[x];
+ //dmsg(M_INFO, "FWD BAT INPT 0 [%d] [%d] [%d] [%d] [%d]",
x, fdno, BLEN(&c->c2.buf), BLEN(&c->c2.buffers->read_tun_buf),
BLEN(&c->c2.bufs[x]));
+ process_incoming_tun_part2(c, out_sock);
+ if (BLEN(&c->c2.buf) < 1)
+ {
+ c->c2.bufs[x].len = 0;
+ }
+ }
+ for (int x = 0; x < leng; ++x)
+ {
+ plen = c->c2.bufs[x].len;
+ if (plen > 0)
+ {
+ if (maxl < 1)
+ {
+ temp[0] = 0xff; temp[1] = 0x13; temp[2] = 0x37;
temp[3] = 0xff;
+ temp += 4; maxl += 4;
+ }
+ temp = buff_prepsize(temp, &plen);
+ bcopy(BPTR(&c->c2.bufs[x]), temp, plen);
+ temp += plen; maxl += (plen + 2);
+ }
+ }
+ if (maxl > 0)
+ {
+ c->c2.buffers->read_tun_max.offset = TUN_BAT_OFF;
+ c->c2.buffers->read_tun_max.len = maxl;
+ c->c2.buf = c->c2.buffers->read_tun_max;
+ //dmsg(M_INFO, "FWD BAT INPT 1 [%d] [%d] [%d] [%d] [%d]",
maxl, fdno, BLEN(&c->c2.buf), BLEN(&c->c2.buffers->read_tun_buf),
BLEN(&c->c2.buffers->read_tun_max));
+ encrypt_sign(c, true);
+ }
+ }
+ c->c2.buffers->bufs_indx = -1;
+ c->c2.buffers->flag_ciph = -1;
+ }
+}
+
+void process_incoming_tun(struct context *c, struct link_socket *out_sock)
+{
+ if (c->c2.frame.bulk_size <= 0) {
+ process_incoming_tun_part2(c, out_sock);
+ }
+ process_incoming_tun_part3(c, out_sock);
+}
+
/**
* Forges a IPv6 ICMP packet with a no route to host error code from the
* IPv6 packet in buf and sends it directly back to the client via the tun
@@ -1748,7 +1925,7 @@ process_outgoing_link(struct context *c, struct
link_socket *sock)
perf_push(PERF_PROC_OUT_LINK);
- if (c->c2.to_link.len > 0 && c->c2.to_link.len <=
c->c2.frame.buf.payload_size)
+ if (c->c2.to_link.len > 0 && (c->c2.to_link.len <=
c->c2.frame.buf.payload_size || c->c2.frame.bulk_size > 0))
{
/*
* Setup for call to send/sendto which will send
@@ -1793,6 +1970,7 @@ process_outgoing_link(struct context *c, struct
link_socket *sock)
fprintf(stderr, "W");
}
#endif
+
msg(D_LINK_RW, "%s WRITE [%d] to %s: %s",
proto2ascii(sock->info.proto, sock->info.af, true),
BLEN(&c->c2.to_link),
print_link_socket_actual(c->c2.to_link_addr, &gc),
PROTO_DUMP(&c->c2.to_link, &gc));
@@ -1892,7 +2070,7 @@ process_outgoing_link(struct context *c, struct
link_socket *sock)
*/
void
-process_outgoing_tun(struct context *c, struct link_socket *in_sock)
+process_outgoing_tun_part2(struct context *c, struct link_socket *in_sock)
{
/*
* Set up for write() call to TUN/TAP
@@ -1912,7 +2090,7 @@ process_outgoing_tun(struct context *c, struct
link_socket *in_sock)
process_ip_header(c, PIP_MSSFIX | PIPV4_EXTRACT_DHCP_ROUTER |
PIPV4_CLIENT_NAT | PIP_OUTGOING,
&c->c2.to_tun, in_sock);
- if (c->c2.to_tun.len <= c->c2.frame.buf.payload_size)
+ if (c->c2.to_tun.len <= c->c2.frame.buf.payload_size ||
c->c2.frame.bulk_size > 0)
{
/*
* Write to TUN/TAP device.
@@ -1925,7 +2103,8 @@ process_outgoing_tun(struct context *c, struct
link_socket *in_sock)
fprintf(stderr, "w");
}
#endif
- dmsg(D_TUN_RW, "TUN WRITE [%d]", BLEN(&c->c2.to_tun));
+
+ dmsg(D_TUN_RW, "TUN WRITE [%d] [%d]", BLEN(&c->c2.to_tun),
c->c2.frame.buf.payload_size);
#ifdef PACKET_TRUNCATION_CHECK
ipv4_packet_size_verify(BPTR(&c->c2.to_tun), BLEN(&c->c2.to_tun),
TUNNEL_TYPE(c->c1.tuntap),
@@ -1981,6 +2160,39 @@ process_outgoing_tun(struct context *c, struct
link_socket *in_sock)
perf_pop();
}
+void process_outgoing_tun_part3(struct context *c, struct link_socket
*in_sock)
+{
+ if (check_bulk_mode(c))
+ {
+ int maxl = 0, plen = 0;
+ int leng = BLEN(&c->c2.buffers->send_tun_max);
+ uint8_t *temp = BPTR(&c->c2.buffers->send_tun_max);
+ temp += 4; maxl += 4;
+ for (int x = 0; x < TUN_BAT_MAX; ++x)
+ {
+ temp = buff_postsize(temp, &plen);
+ if ((leng > 0) && (plen > 0) && ((maxl + plen) < leng))
+ {
+ c->c2.to_tun = c->c2.buffers->to_tun_max;
+ c->c2.to_tun.offset = TUN_BAT_OFF;
+ c->c2.to_tun.len = plen;
+ bcopy(temp, BPTR(&c->c2.to_tun), plen);
+ temp += plen; maxl += (plen + 2);
+ //dmsg(M_INFO, "FWD BAT OUTP 1 [%d] [%d] [%d] [%d]", x,
BLEN(&c->c2.buf), BLEN(&c->c2.to_tun), BLEN(&c->c2.buffers->read_link_buf));
+ process_outgoing_tun_part2(c, in_sock);
+ } else { break; }
+ }
+ }
+}
+
+void process_outgoing_tun(struct context *c, struct link_socket *in_sock)
+{
+ if (c->c2.frame.bulk_size <= 0) {
+ process_outgoing_tun_part2(c, in_sock);
+ }
+ process_outgoing_tun_part3(c, in_sock);
+}
+
void
pre_select(struct context *c)
{
@@ -79,6 +79,8 @@ void pre_select(struct context *c);
void process_io(struct context *c, struct link_socket *sock);
+void xfer_io(struct context *c, struct context *b);
+
/**********************************************************************/
/**
@@ -196,6 +198,8 @@ bool process_incoming_link_part1(struct context *c,
struct link_socket_info *lsi
void process_incoming_link_part2(struct context *c, struct
link_socket_info *lsi,
const uint8_t *orig_buf);
+void process_incoming_link_part3(struct context *c);
+
/**
* Transfers \c float_sa data extracted from an incoming DCO
* PEER_FLOAT_NTF to \c out_osaddr for later processing.
@@ -2971,6 +2971,10 @@ frame_finalize_options(struct context *c, const
struct options *o)
tailroom += COMP_EXTRA_BUFFER(payload_size);
#endif
+ if (frame->bulk_size > 0) {
+ payload_size = frame->tun_mtu;
+ }
+
frame->buf.payload_size = payload_size;
frame->buf.headroom = headroom;
frame->buf.tailroom = tailroom;
@@ -3473,6 +3477,9 @@ do_init_frame_tls(struct context *c)
if (c->c2.tls_multi)
{
tls_multi_init_finalize(c->c2.tls_multi, c->options.ce.tls_mtu);
+ if (c->c2.frame.bulk_size > 0) {
+ c->c2.tls_multi->opt.frame.buf.payload_size =
c->c2.frame.tun_mtu;
+ }
ASSERT(c->c2.tls_multi->opt.frame.buf.payload_size <=
c->c2.frame.buf.payload_size);
frame_print(&c->c2.tls_multi->opt.frame, D_MTU_INFO, "Control
Channel MTU parms");
@@ -3536,6 +3543,14 @@ do_init_frame(struct context *c)
c->c2.frame.extra_tun += c->options.ce.tun_mtu_extra;
}
+ /*
+ * Adjust bulk size based on the --bulk-mode parameter.
+ */
+ if (c->options.ce.bulk_mode)
+ {
+ c->c2.frame.bulk_size = c->options.ce.tun_mtu;
+ }
+
/*
* Fill in the blanks in the frame parameters structure,
* make sure values are rational, etc.
@@ -3676,9 +3691,40 @@ init_context_buffers(const struct frame *frame)
size_t buf_size = BUF_SIZE(frame);
+ if (frame->bulk_size > 0) {
+ buf_size = BAT_SIZE(TUN_BAT_MAX, frame->tun_mtu,
frame->buf.headroom + frame->buf.tailroom);
+ }
+
+ dmsg(M_INFO, "MEM NEW [%ld] [%d+%d+%d]", buf_size,
frame->buf.headroom, frame->buf.payload_size, frame->buf.tailroom);
+
b->read_link_buf = alloc_buf(buf_size);
b->read_tun_buf = alloc_buf(buf_size);
+ if (frame->bulk_size > 0) {
+ for (int x = 0; x < TUN_BAT_MAX; ++x)
+ {
+ size_t part_size = BUF_SIZE(frame);
+ b->read_tun_bufs[x] = alloc_buf(part_size);
+ b->read_tun_bufs[x].offset = TUN_BAT_OFF;
+ b->read_tun_bufs[x].len = 0;
+ }
+
+ b->read_tun_max = alloc_buf(buf_size);
+ b->read_tun_max.offset = TUN_BAT_OFF;
+ b->read_tun_max.len = 0;
+
+ b->send_tun_max = alloc_buf(buf_size);
+ b->send_tun_max.offset = TUN_BAT_OFF;
+ b->send_tun_max.len = 0;
+
+ b->to_tun_max = alloc_buf(buf_size);
+ b->to_tun_max.offset = TUN_BAT_OFF;
+ b->to_tun_max.len = 0;
+ }
+
+ b->bufs_indx = -1;
+ b->flag_ciph = -1;
+
b->aux_buf = alloc_buf(buf_size);
b->encrypt_buf = alloc_buf(buf_size);
@@ -41,9 +41,15 @@ void
alloc_buf_sock_tun(struct buffer *buf, const struct frame *frame)
{
/* allocate buffer for overlapped I/O */
- *buf = alloc_buf(BUF_SIZE(frame));
+ size_t alen = BUF_SIZE(frame);
+ size_t blen = frame->buf.payload_size;
+ if (frame->bulk_size > 0) {
+ alen = BAT_SIZE(TUN_BAT_MAX, frame->tun_mtu, TUN_BAT_OFF);
+ blen = BAT_SIZE(TUN_BAT_MAX, frame->tun_mtu, TUN_BAT_NOP);
+ }
+ *buf = alloc_buf(alen);
ASSERT(buf_init(buf, frame->buf.headroom));
- buf->len = frame->buf.payload_size;
+ buf->len = blen;
ASSERT(buf_safe(buf, 0));
}
@@ -58,6 +58,14 @@
*/
#define TUN_MTU_MIN 100
+/*
+ * Bulk mode static define values.
+ */
+#define TUN_BAT_MIN 6
+#define TUN_BAT_MAX 9
+#define TUN_BAT_OFF 256
+#define TUN_BAT_NOP 0
+
/*
* Default MTU of network over which tunnel data will pass by TCP/UDP.
*/
@@ -152,6 +160,10 @@ struct frame
* which defaults to 0 for tun and 32
* (\c TAP_MTU_EXTRA_DEFAULT) for tap.
* */
+
+ int bulk_size; /**< Signal to the init frame function
+ * to allow for bulk mode TCP transfers.
+ * */
};
/* Forward declarations, to prevent includes */
@@ -171,6 +183,7 @@ struct options;
* larger than the headroom.
*/
#define BUF_SIZE(f) ((f)->buf.headroom + (f)->buf.payload_size +
(f)->buf.tailroom)
+#define BAT_SIZE(a, b, c) ((a * b) + c)
/*
* Function prototypes.
@@ -3414,6 +3414,7 @@ multi_process_incoming_link(struct multi_context *m,
struct multi_instance *inst
}
process_incoming_link_part2(c, lsi, orig_buf);
+ process_incoming_link_part3(c);
}
perf_pop();
@@ -3558,9 +3559,7 @@ multi_process_incoming_tun(struct multi_context *m,
const unsigned int mpp_flags
const int dev_type = TUNNEL_TYPE(m->top.c1.tuntap);
int16_t vid = 0;
-#ifdef MULTI_DEBUG_EVENT_LOOP
- printf("TUN -> TCP/UDP [%d]\n", BLEN(&m->top.c2.buf));
-#endif
+ msg(D_MULTI_DEBUG, "TUN -> TCP/UDP [%d]", BLEN(&m->top.c2.buf));
if (m->pending)
{
@@ -3610,6 +3609,8 @@ multi_process_incoming_tun(struct multi_context *m,
const unsigned int mpp_flags
{
/* transfer packet pointer from top-level
context buffer to instance */
c->c2.buf = m->top.c2.buf;
+ /* todo determine if to call this
(multi_process_incoming_tun) for each bulk item read? */
+ xfer_io(c, &m->top);
}
else
{
@@ -112,6 +112,14 @@ struct context_buffers
*/
struct buffer read_link_buf;
struct buffer read_tun_buf;
+
+ struct buffer read_tun_bufs[TUN_BAT_MAX];
+ struct buffer read_tun_max;
+ struct buffer send_tun_max;
+ struct buffer to_tun_max;
+
+ int bufs_indx;
+ int flag_ciph;
};
/*
@@ -376,6 +384,8 @@ struct context_2
struct buffer to_tun;
struct buffer to_link;
+ struct buffer bufs[TUN_BAT_MAX];
+
/* should we print R|W|r|w to console on packet transfers? */
bool log_rw;
@@ -304,6 +304,7 @@ static const char usage_message[] =
" 'maybe' -- Use per-route hints\n"
" 'yes' -- Always DF (Don't Fragment)\n"
"--mtu-test : Empirically measure and report MTU.\n"
+ "--bulk-mode : Use bulk TUN/TCP reads/writes.\n"
#ifdef ENABLE_FRAGMENT
"--fragment max : Enable internal datagram fragmentation so that no
UDP\n"
" datagrams are sent which are larger than max
bytes.\n"
@@ -3005,6 +3006,9 @@ options_postprocess_mutate_ce(struct options *o,
struct connection_entry *ce)
ce->tun_mtu_extra_defined = true;
ce->tun_mtu_extra = TAP_MTU_EXTRA_DEFAULT;
}
+ if (ce->proto != PROTO_TCP && ce->proto != PROTO_TCP_SERVER &&
ce->proto != PROTO_TCP_CLIENT) {
+ ce->bulk_mode = false;
+ }
}
/*
@@ -9926,6 +9930,10 @@ add_option(struct options *options, char *p[], bool
is_inline, const char *file,
goto err;
}
}
+ else if (streq(p[0], "bulk-mode"))
+ {
+ options->ce.bulk_mode = true;
+ }
else
{
int i;
@@ -174,6 +174,9 @@ struct connection_entry
/* Allow only client that support resending the wrapped client key */
bool tls_crypt_v2_force_cookie;
+
+ /* Bulk mode allows for multiple tun reads + larger tcp writes */
+ bool bulk_mode;
};
struct remote_entry
I was still working on looking into some potential limited access to gerrit. I manually ported the diff from the 2.6 source code I was able to compile and run with over to the generalized master branch which doesn't contain any configure/makefile so I am not able to build/run that version at the moment (untested). I made some changes to try and optimize some parts and cover more edge cases and I also put in a command line option to use this feature otherwise it should not affect the rest of the code base if the option is omitted. In the meantime I created an initial example pull request to gauge if anyone is actually interested in this work and/or if anyone has any general initial thoughts or feedback on code quality as I am actually using this right now personally. In the case that anyone is interested in such a change I will paste the generated patch diff below otherwise I will continue to run this experimental build at home here and see what happens! :) Thanks, Jon C https://github.com/OpenVPN/openvpn/pull/814/files $ cat 0001-bulk-mode.patch From 1b15b4aed623e7490d72ed7e21c3873a05630dd1 Mon Sep 17 00:00:00 2001 From: Jon Chiappetta <root@fossjon.com> Date: Wed, 6 Aug 2025 16:33:18 -0400 Subject: [PATCH] bulk mode --- src/openvpn/forward.c | 226 ++++++++++++++++++++++++++++++++++++++++-- src/openvpn/forward.h | 4 + src/openvpn/init.c | 46 +++++++++ src/openvpn/mtu.c | 10 +- src/openvpn/mtu.h | 13 +++ src/openvpn/multi.c | 7 +- src/openvpn/openvpn.h | 10 ++ src/openvpn/options.c | 8 ++ src/openvpn/options.h | 3 + 9 files changed, 315 insertions(+), 12 deletions(-) -- 2.39.5 (Apple Git-154)