diff --git a/src/framed.rs b/src/framed.rs index 5d9d301..f940a0d 100644 --- a/src/framed.rs +++ b/src/framed.rs @@ -16,7 +16,13 @@ use crate::{ codecs::NetlinkMessageCodec, sys::{AsyncSocket, SocketAddr}, }; -use netlink_packet_core::{NetlinkDeserializable, NetlinkMessage, NetlinkSerializable}; +use netlink_packet_core::{ + NetlinkDeserializable, NetlinkMessage, NetlinkSerializable, NetlinkHeader, + NLMSG_OVERRUN, NetlinkPayload, +}; + +/// Buffer overrun condition +const ENOBUFS: i32 = 105; pub struct NetlinkFramed { socket: S, @@ -63,6 +69,34 @@ where *in_addr = match ready!(socket.poll_recv_from(cx, reader)) { Ok(addr) => addr, + // When receiving messages in multicast mode (i.e. we subscribed to + // notifications), the kernel will not wait for us to read datagrams before + // sending more. The receive buffer has a finite size, so once it is full (no + // more message can fit in), new messages will be dropped and recv calls will + // return `ENOBUFS`. + // This needs to be handled for applications to resynchronize with the contents + // of the kernel if necessary. + // We don't need to do anything special: + // - contents of the reader is still valid because we won't have partial messages + // in there anyways (large enough buffer) + // - contents of the socket's internal buffer is still valid because the kernel + // won't put partial data in it + Err(e) if e.raw_os_error() == Some(ENOBUFS) => { + warn!("netlink socket buffer full"); + return Poll::Ready(Some(( + NetlinkMessage { + header: NetlinkHeader { + length: std::mem::size_of::() as u32, + message_type: NLMSG_OVERRUN, + flags: 0, + sequence_number: 0, + port_number: 0, + }, + payload: NetlinkPayload::Overrun(Vec::new()), + }, + SocketAddr::new(0, 0), + ))); + } Err(e) => { error!("failed to read from netlink socket: {:?}", e); return Poll::Ready(None);