bgpd: pimp imsg pipes

classic Classic list List threaded Threaded
8 messages Options
Reply | Threaded
Open this post in threaded view
|

bgpd: pimp imsg pipes

Claudio Jeker
I noticed that by default the send and recv socket buffers for
socketpair(2) is rather low (4k IIRC). The result is a fairly inefficent
write/read behaviour on the imsg sockets. Increasing SO_SNDBUF and
SO_RCVBUF seems to help increase the data sent and received per syscall.

Another option would be to make the default socketbuffer watermarks for
socketpair(2) a bit less limited. Then all imsg users would benefit at the
same time.
--
:wq Claudio

Index: bgpd.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/bgpd.c,v
retrieving revision 1.219
diff -u -p -r1.219 bgpd.c
--- bgpd.c 29 May 2019 08:48:00 -0000 1.219
+++ bgpd.c 4 Jun 2019 09:47:18 -0000
@@ -46,6 +46,7 @@ int send_filterset(struct imsgbuf *, st
 int reconfigure(char *, struct bgpd_config *);
 int dispatch_imsg(struct imsgbuf *, int, struct bgpd_config *);
 int control_setup(struct bgpd_config *);
+static void getsockpair(int [2]);
 int imsg_send_sockets(struct imsgbuf *, struct imsgbuf *);
 
 int cflags;
@@ -203,12 +204,8 @@ main(int argc, char *argv[])
 
  log_info("startup");
 
- if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK,
-    PF_UNSPEC, pipe_m2s) == -1)
- fatal("socketpair");
- if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK,
-    PF_UNSPEC, pipe_m2r) == -1)
- fatal("socketpair");
+ getsockpair(pipe_m2s);
+ getsockpair(pipe_m2r);
 
  /* fork children */
  rde_pid = start_child(PROC_RDE, saved_argv0, pipe_m2r[1], debug,
@@ -1073,18 +1070,54 @@ handle_pollfd(struct pollfd *pfd, struct
  return (0);
 }
 
+static void
+getsockpair(int pipe[2])
+{
+ int bsize, i;
+
+ if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK,
+    PF_UNSPEC, pipe) == -1)
+ fatal("socketpair");
+
+ /* increase socketpair buffers */
+ for (i = 0; i < 2; i++) {
+ for (bsize = MAX_SOCK_BUF; bsize >= 16 * 1024; bsize /= 2) {
+ if (setsockopt(pipe[i], SOL_SOCKET, SO_RCVBUF,
+    &bsize, sizeof(bsize)) == -1) {
+ if (errno != ENOBUFS)
+ fatal("setsockopt(SO_RCVBUF, %d)",
+    bsize);
+ continue;
+ }
+ break;
+ }
+ if (bsize != MAX_SOCK_BUF)
+ log_warn("non optimal SO_RCVBUF size of %d", bsize);
+ }
+ for (i = 0; i < 2; i++) {
+ for (bsize = MAX_SOCK_BUF; bsize >= 16 * 1024; bsize /= 2) {
+ if (setsockopt(pipe[i], SOL_SOCKET, SO_SNDBUF,
+    &bsize, sizeof(bsize)) == -1) {
+ if (errno != ENOBUFS)
+ fatal("setsockopt(SO_SNDBUF, %d)",
+    bsize);
+ continue;
+ }
+ break;
+ }
+ if (bsize != MAX_SOCK_BUF)
+ log_warn("non optimal SO_SNDBUF size of %d", bsize);
+ }
+}
+
 int
 imsg_send_sockets(struct imsgbuf *se, struct imsgbuf *rde)
 {
  int pipe_s2r[2];
  int pipe_s2r_ctl[2];
 
- if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK,
-     PF_UNSPEC, pipe_s2r) == -1)
- return (-1);
- if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK,
-     PF_UNSPEC, pipe_s2r_ctl) == -1)
- return (-1);
+ getsockpair(pipe_s2r);
+ getsockpair(pipe_s2r_ctl);
 
  if (imsg_compose(se, IMSG_SOCKET_CONN, 0, 0, pipe_s2r[0],
     NULL, 0) == -1)
Index: bgpd.h
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/bgpd.h,v
retrieving revision 1.386
diff -u -p -r1.386 bgpd.h
--- bgpd.h 17 Jun 2019 13:35:42 -0000 1.386
+++ bgpd.h 17 Jun 2019 13:56:15 -0000
@@ -49,7 +49,7 @@
 #define MIN_HOLDTIME 3
 #define READ_BUF_SIZE 65535
 #define RT_BUF_SIZE 16384
-#define MAX_RTSOCK_BUF (2 * 1024 * 1024)
+#define MAX_SOCK_BUF (2 * 1024 * 1024)
 #define MAX_COMM_MATCH 3
 
 #define BGPD_OPT_VERBOSE 0x0001
Index: kroute.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/kroute.c,v
retrieving revision 1.236
diff -u -p -r1.236 kroute.c
--- kroute.c 6 May 2019 09:49:26 -0000 1.236
+++ kroute.c 3 Jun 2019 20:16:59 -0000
@@ -236,7 +236,7 @@ kr_init(void)
     &default_rcvbuf, &optlen) == -1)
  log_warn("%s: getsockopt SOL_SOCKET SO_RCVBUF", __func__);
  else
- for (rcvbuf = MAX_RTSOCK_BUF;
+ for (rcvbuf = MAX_SOCK_BUF;
     rcvbuf > default_rcvbuf &&
     setsockopt(kr_state.fd, SOL_SOCKET, SO_RCVBUF,
     &rcvbuf, sizeof(rcvbuf)) == -1 && errno == ENOBUFS;

Reply | Threaded
Open this post in threaded view
|

Re: bgpd: pimp imsg pipes

Martin Pieuchot
On 17/06/19(Mon) 21:43, Claudio Jeker wrote:
> I noticed that by default the send and recv socket buffers for
> socketpair(2) is rather low (4k IIRC). The result is a fairly inefficent
> write/read behaviour on the imsg sockets. Increasing SO_SNDBUF and
> SO_RCVBUF seems to help increase the data sent and received per syscall.
>
> Another option would be to make the default socketbuffer watermarks for
> socketpair(2) a bit less limited. Then all imsg users would benefit at the
> same time.

What's the downside of making the default socketbuffer watermarks
bigger?  Wasting resources?  How did you figure out that the socket
buffers were too small?  Is that something we could apply to other
daemons?

Reply | Threaded
Open this post in threaded view
|

Re: bgpd: pimp imsg pipes

Claudio Jeker
On Mon, Jun 17, 2019 at 05:00:32PM -0300, Martin Pieuchot wrote:

> On 17/06/19(Mon) 21:43, Claudio Jeker wrote:
> > I noticed that by default the send and recv socket buffers for
> > socketpair(2) is rather low (4k IIRC). The result is a fairly inefficent
> > write/read behaviour on the imsg sockets. Increasing SO_SNDBUF and
> > SO_RCVBUF seems to help increase the data sent and received per syscall.
> >
> > Another option would be to make the default socketbuffer watermarks for
> > socketpair(2) a bit less limited. Then all imsg users would benefit at the
> > same time.
>
> What's the downside of making the default socketbuffer watermarks
> bigger?  Wasting resources?  How did you figure out that the socket
> buffers were too small?  Is that something we could apply to other
> daemons?
>

First about the how I found it. Looking at ktrace output and noticing that
write is only pushing 4k of data and read is also only pulling 4k data.
Afterwards I used netstat -vP to verify the socketbuffer limits.

Bigger watermarks could cause higher pressure on the mbuf/mcluster pools.
Now as long as the receiver is processing the data there should not be a
problem but once the reader stops more data queues up in the socketbuffer.
It would make sense to use something similar to pipe(2)'s method of big
and small buffers for socketpair(2). It would also be possible to auto
scale the buffers but that is a fair bit harder to implement.

I guess a lot of processes could benefit from increased buffers. For
example firefox and chrome are heavy users of unix sockets. I remember
that we already added something in Xorg to bump the buffer size.
A quick check tells me that most unix sockets have sb_hiwat set to 4096.

Makes me wonder what the other BSD use for buffer sizes for the different
UNIX socket types.
--
:wq Claudio

Reply | Threaded
Open this post in threaded view
|

Re: bgpd: pimp imsg pipes

Sebastian Benoit-3
Claudio Jeker([hidden email]) on 2019.06.17 22:38:00 +0200:

> On Mon, Jun 17, 2019 at 05:00:32PM -0300, Martin Pieuchot wrote:
> > On 17/06/19(Mon) 21:43, Claudio Jeker wrote:
> > > I noticed that by default the send and recv socket buffers for
> > > socketpair(2) is rather low (4k IIRC). The result is a fairly inefficent
> > > write/read behaviour on the imsg sockets. Increasing SO_SNDBUF and
> > > SO_RCVBUF seems to help increase the data sent and received per syscall.
> > >
> > > Another option would be to make the default socketbuffer watermarks for
> > > socketpair(2) a bit less limited. Then all imsg users would benefit at the
> > > same time.
> >
> > What's the downside of making the default socketbuffer watermarks
> > bigger?  Wasting resources?  How did you figure out that the socket
> > buffers were too small?  Is that something we could apply to other
> > daemons?
> >
>
> First about the how I found it. Looking at ktrace output and noticing that
> write is only pushing 4k of data and read is also only pulling 4k data.
> Afterwards I used netstat -vP to verify the socketbuffer limits.
>
> Bigger watermarks could cause higher pressure on the mbuf/mcluster pools.
> Now as long as the receiver is processing the data there should not be a
> problem but once the reader stops more data queues up in the socketbuffer.
> It would make sense to use something similar to pipe(2)'s method of big
> and small buffers for socketpair(2). It would also be possible to auto
> scale the buffers but that is a fair bit harder to implement.
>
> I guess a lot of processes could benefit from increased buffers. For
> example firefox and chrome are heavy users of unix sockets. I remember
> that we already added something in Xorg to bump the buffer size.
> A quick check tells me that most unix sockets have sb_hiwat set to 4096.
>
> Makes me wonder what the other BSD use for buffer sizes for the different
> UNIX socket types.

FreeBSD: sysctl
net.local.stream.recvspace: 8192
net.local.dgram.recvspace: 4096
net.local.seqpacket.recvspace: 8192
net.local.stream.sendspace: 8192

Linux: 212992 (/proc/sys/net/core/[rw]mem_default)


> --
> :wq Claudio
>

Reply | Threaded
Open this post in threaded view
|

Re: bgpd: pimp imsg pipes

Theo de Raadt-2
In reply to this post by Martin Pieuchot
Martin Pieuchot <[hidden email]> wrote:

> On 17/06/19(Mon) 21:43, Claudio Jeker wrote:
> > I noticed that by default the send and recv socket buffers for
> > socketpair(2) is rather low (4k IIRC). The result is a fairly inefficent
> > write/read behaviour on the imsg sockets. Increasing SO_SNDBUF and
> > SO_RCVBUF seems to help increase the data sent and received per syscall.
> >
> > Another option would be to make the default socketbuffer watermarks for
> > socketpair(2) a bit less limited. Then all imsg users would benefit at the
> > same time.
>
> What's the downside of making the default socketbuffer watermarks
> bigger?

It is kernel memory.

> Wasting resources?

People in the past did assessments to decide what numbers made
reasonable sense for kernel memory availability at the time.  They did
this to avoid the potential of denial of service due to resource
failure, including the worst case of exhaustion, and failure to recover.

Until someone does a new assessment, those should stand.

The alternative is deadlock, for our most heavy users.

Is it unreasonable to prefer low performance over potential of
deadlock or crashing?

> How did you figure out that the socket buffers were too small?

He profiled.

> Is that something we could apply to other daemons?

See a previous point.


Reply | Threaded
Open this post in threaded view
|

Re: bgpd: pimp imsg pipes

Theo de Raadt-2
In reply to this post by Claudio Jeker
Claudio Jeker <[hidden email]> wrote:

> I guess a lot of processes could benefit from increased buffers. For
> example firefox and chrome are heavy users of unix sockets. I remember
> that we already added something in Xorg to bump the buffer size.
> A quick check tells me that most unix sockets have sb_hiwat set to 4096.

OK, so let's crank it to 1G, and see what happens when people run chrome.

NOT.

Reply | Threaded
Open this post in threaded view
|

Re: bgpd: pimp imsg pipes

Theo de Raadt-2
In reply to this post by Claudio Jeker
Not without further measurement of all applications.

Imagine if this gets commited, and causes kernel memory shortage
and deadlock.

Shall we all then point fingers at you for experimenting live?
I know I will.

Claudio Jeker <[hidden email]> wrote:

> I noticed that by default the send and recv socket buffers for
> socketpair(2) is rather low (4k IIRC). The result is a fairly inefficent
> write/read behaviour on the imsg sockets. Increasing SO_SNDBUF and
> SO_RCVBUF seems to help increase the data sent and received per syscall.
>
> Another option would be to make the default socketbuffer watermarks for
> socketpair(2) a bit less limited. Then all imsg users would benefit at the
> same time.
> --
> :wq Claudio
>
> Index: bgpd.c
> ===================================================================
> RCS file: /cvs/src/usr.sbin/bgpd/bgpd.c,v
> retrieving revision 1.219
> diff -u -p -r1.219 bgpd.c
> --- bgpd.c 29 May 2019 08:48:00 -0000 1.219
> +++ bgpd.c 4 Jun 2019 09:47:18 -0000
> @@ -46,6 +46,7 @@ int send_filterset(struct imsgbuf *, st
>  int reconfigure(char *, struct bgpd_config *);
>  int dispatch_imsg(struct imsgbuf *, int, struct bgpd_config *);
>  int control_setup(struct bgpd_config *);
> +static void getsockpair(int [2]);
>  int imsg_send_sockets(struct imsgbuf *, struct imsgbuf *);
>  
>  int cflags;
> @@ -203,12 +204,8 @@ main(int argc, char *argv[])
>  
>   log_info("startup");
>  
> - if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK,
> -    PF_UNSPEC, pipe_m2s) == -1)
> - fatal("socketpair");
> - if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK,
> -    PF_UNSPEC, pipe_m2r) == -1)
> - fatal("socketpair");
> + getsockpair(pipe_m2s);
> + getsockpair(pipe_m2r);
>  
>   /* fork children */
>   rde_pid = start_child(PROC_RDE, saved_argv0, pipe_m2r[1], debug,
> @@ -1073,18 +1070,54 @@ handle_pollfd(struct pollfd *pfd, struct
>   return (0);
>  }
>  
> +static void
> +getsockpair(int pipe[2])
> +{
> + int bsize, i;
> +
> + if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK,
> +    PF_UNSPEC, pipe) == -1)
> + fatal("socketpair");
> +
> + /* increase socketpair buffers */
> + for (i = 0; i < 2; i++) {
> + for (bsize = MAX_SOCK_BUF; bsize >= 16 * 1024; bsize /= 2) {
> + if (setsockopt(pipe[i], SOL_SOCKET, SO_RCVBUF,
> +    &bsize, sizeof(bsize)) == -1) {
> + if (errno != ENOBUFS)
> + fatal("setsockopt(SO_RCVBUF, %d)",
> +    bsize);
> + continue;
> + }
> + break;
> + }
> + if (bsize != MAX_SOCK_BUF)
> + log_warn("non optimal SO_RCVBUF size of %d", bsize);
> + }
> + for (i = 0; i < 2; i++) {
> + for (bsize = MAX_SOCK_BUF; bsize >= 16 * 1024; bsize /= 2) {
> + if (setsockopt(pipe[i], SOL_SOCKET, SO_SNDBUF,
> +    &bsize, sizeof(bsize)) == -1) {
> + if (errno != ENOBUFS)
> + fatal("setsockopt(SO_SNDBUF, %d)",
> +    bsize);
> + continue;
> + }
> + break;
> + }
> + if (bsize != MAX_SOCK_BUF)
> + log_warn("non optimal SO_SNDBUF size of %d", bsize);
> + }
> +}
> +
>  int
>  imsg_send_sockets(struct imsgbuf *se, struct imsgbuf *rde)
>  {
>   int pipe_s2r[2];
>   int pipe_s2r_ctl[2];
>  
> - if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK,
> -     PF_UNSPEC, pipe_s2r) == -1)
> - return (-1);
> - if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK,
> -     PF_UNSPEC, pipe_s2r_ctl) == -1)
> - return (-1);
> + getsockpair(pipe_s2r);
> + getsockpair(pipe_s2r_ctl);
>  
>   if (imsg_compose(se, IMSG_SOCKET_CONN, 0, 0, pipe_s2r[0],
>      NULL, 0) == -1)
> Index: bgpd.h
> ===================================================================
> RCS file: /cvs/src/usr.sbin/bgpd/bgpd.h,v
> retrieving revision 1.386
> diff -u -p -r1.386 bgpd.h
> --- bgpd.h 17 Jun 2019 13:35:42 -0000 1.386
> +++ bgpd.h 17 Jun 2019 13:56:15 -0000
> @@ -49,7 +49,7 @@
>  #define MIN_HOLDTIME 3
>  #define READ_BUF_SIZE 65535
>  #define RT_BUF_SIZE 16384
> -#define MAX_RTSOCK_BUF (2 * 1024 * 1024)
> +#define MAX_SOCK_BUF (2 * 1024 * 1024)
>  #define MAX_COMM_MATCH 3
>  
>  #define BGPD_OPT_VERBOSE 0x0001
> Index: kroute.c
> ===================================================================
> RCS file: /cvs/src/usr.sbin/bgpd/kroute.c,v
> retrieving revision 1.236
> diff -u -p -r1.236 kroute.c
> --- kroute.c 6 May 2019 09:49:26 -0000 1.236
> +++ kroute.c 3 Jun 2019 20:16:59 -0000
> @@ -236,7 +236,7 @@ kr_init(void)
>      &default_rcvbuf, &optlen) == -1)
>   log_warn("%s: getsockopt SOL_SOCKET SO_RCVBUF", __func__);
>   else
> - for (rcvbuf = MAX_RTSOCK_BUF;
> + for (rcvbuf = MAX_SOCK_BUF;
>      rcvbuf > default_rcvbuf &&
>      setsockopt(kr_state.fd, SOL_SOCKET, SO_RCVBUF,
>      &rcvbuf, sizeof(rcvbuf)) == -1 && errno == ENOBUFS;
>

Reply | Threaded
Open this post in threaded view
|

Re: bgpd: pimp imsg pipes

Claudio Jeker
In reply to this post by Claudio Jeker
On Mon, Jun 17, 2019 at 09:43:00PM +0200, Claudio Jeker wrote:
> I noticed that by default the send and recv socket buffers for
> socketpair(2) is rather low (4k IIRC). The result is a fairly inefficent
> write/read behaviour on the imsg sockets. Increasing SO_SNDBUF and
> SO_RCVBUF seems to help increase the data sent and received per syscall.
>
> Another option would be to make the default socketbuffer watermarks for
> socketpair(2) a bit less limited. Then all imsg users would benefit at the
> same time.

Here is an updated version of this diff. It does not blow up the socket
buffer to the maximum size possible but instead uses 4 times the
READ_BUF_SIZE (or 256kB). This seems to be enough to make both the
sender and receiver run as efficent as possible.

Additionally tune the session messgage low and high value. msgbuf_write()
can send up to 1000 messages in one go. So a low water mark of 50 is to
low to be efficent.  In my testing this makes bgpd a bit quicker at
sending updates out.
--
:wq Claudio

Index: bgpd.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/bgpd.c,v
retrieving revision 1.219
diff -u -p -r1.219 bgpd.c
--- bgpd.c 29 May 2019 08:48:00 -0000 1.219
+++ bgpd.c 10 Jul 2019 19:36:51 -0000
@@ -46,6 +46,7 @@ int send_filterset(struct imsgbuf *, st
 int reconfigure(char *, struct bgpd_config *);
 int dispatch_imsg(struct imsgbuf *, int, struct bgpd_config *);
 int control_setup(struct bgpd_config *);
+static void getsockpair(int [2]);
 int imsg_send_sockets(struct imsgbuf *, struct imsgbuf *);
 
 int cflags;
@@ -203,12 +204,8 @@ main(int argc, char *argv[])
 
  log_info("startup");
 
- if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK,
-    PF_UNSPEC, pipe_m2s) == -1)
- fatal("socketpair");
- if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK,
-    PF_UNSPEC, pipe_m2r) == -1)
- fatal("socketpair");
+ getsockpair(pipe_m2s);
+ getsockpair(pipe_m2r);
 
  /* fork children */
  rde_pid = start_child(PROC_RDE, saved_argv0, pipe_m2r[1], debug,
@@ -1073,18 +1070,51 @@ handle_pollfd(struct pollfd *pfd, struct
  return (0);
 }
 
+static void
+getsockpair(int pipe[2])
+{
+ int bsize, i;
+
+ if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK,
+    PF_UNSPEC, pipe) == -1)
+ fatal("socketpair");
+
+ for (i = 0; i < 2; i++) {
+ for (bsize = MAX_SOCK_BUF; bsize >= 16 * 1024; bsize /= 2) {
+ if (setsockopt(pipe[i], SOL_SOCKET, SO_RCVBUF,
+    &bsize, sizeof(bsize)) == -1) {
+ if (errno != ENOBUFS)
+ fatal("setsockopt(SO_RCVBUF, %d)",
+    bsize);
+ log_warn("setsockopt(SO_RCVBUF, %d)", bsize);
+ continue;
+ }
+ break;
+ }
+ }
+ for (i = 0; i < 2; i++) {
+ for (bsize = MAX_SOCK_BUF; bsize >= 16 * 1024; bsize /= 2) {
+ if (setsockopt(pipe[i], SOL_SOCKET, SO_SNDBUF,
+    &bsize, sizeof(bsize)) == -1) {
+ if (errno != ENOBUFS)
+ fatal("setsockopt(SO_SNDBUF, %d)",
+    bsize);
+ log_warn("setsockopt(SO_SNDBUF, %d)", bsize);
+ continue;
+ }
+ break;
+ }
+ }
+}
+
 int
 imsg_send_sockets(struct imsgbuf *se, struct imsgbuf *rde)
 {
  int pipe_s2r[2];
  int pipe_s2r_ctl[2];
 
- if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK,
-     PF_UNSPEC, pipe_s2r) == -1)
- return (-1);
- if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK,
-     PF_UNSPEC, pipe_s2r_ctl) == -1)
- return (-1);
+ getsockpair(pipe_s2r);
+ getsockpair(pipe_s2r_ctl);
 
  if (imsg_compose(se, IMSG_SOCKET_CONN, 0, 0, pipe_s2r[0],
     NULL, 0) == -1)
Index: bgpd.h
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/bgpd.h,v
retrieving revision 1.388
diff -u -p -r1.388 bgpd.h
--- bgpd.h 22 Jun 2019 05:36:40 -0000 1.388
+++ bgpd.h 10 Jul 2019 19:36:02 -0000
@@ -48,6 +48,7 @@
 #define MAX_PKTSIZE 4096
 #define MIN_HOLDTIME 3
 #define READ_BUF_SIZE 65535
+#define MAX_SOCK_BUF (4 * READ_BUF_SIZE)
 #define RT_BUF_SIZE 16384
 #define MAX_RTSOCK_BUF (2 * 1024 * 1024)
 #define MAX_COMM_MATCH 3
@@ -110,8 +111,8 @@
  * IMSG_XON message will be sent and the RDE will produce more messages again.
  */
 #define RDE_RUNNER_ROUNDS 100
-#define SESS_MSG_HIGH_MARK 300
-#define SESS_MSG_LOW_MARK 50
+#define SESS_MSG_HIGH_MARK 2000
+#define SESS_MSG_LOW_MARK 500
 #define CTL_MSG_HIGH_MARK 500
 #define CTL_MSG_LOW_MARK 100