summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2020-02-16 19:21:56 -0800
committerDavid S. Miller <davem@davemloft.net>2020-02-16 19:21:56 -0800
commit105ba8ae51e0178462b4cdf2f82c6e98fac19c04 (patch)
tree45c25559bb0f029fdf9124ce469801077719a187
parent95439795d896dc0b07099a27429c14711eb57abb (diff)
parent5c80736eb84941b393338b8af1e032a829d78654 (diff)
downloadwireguard-linux-trimmed-105ba8ae51e0178462b4cdf2f82c6e98fac19c04.tar.gz
wireguard-linux-trimmed-105ba8ae51e0178462b4cdf2f82c6e98fac19c04.zip
Merge branch 'wireguard-fixes'
Jason A. Donenfeld says: ==================== wireguard fixes for 5.6-rc2 Here are four fixes for wireguard collected since rc1: 1) Some small cleanups to the test suite to help massively parallel builds. 2) A change in how we reset our load calculation to avoid a more expensive comparison, suggested by Matt Dunwoodie. 3) I've been loading more and more of wireguard's surface into syzkaller, trying to get our coverage as complete as possible, leading in this case to a fix for mtu=0 devices. 4) A removal of superfluous code, pointed out by Eric Dumazet. v2 fixes a logical problem in the patch for (3) pointed out by Eric Dumazet. v3 replaces some non-obvious bitmath in (3) with a more obvious expression, and adds patch (4). ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/wireguard/device.c7
-rw-r--r--drivers/net/wireguard/receive.c7
-rw-r--r--drivers/net/wireguard/send.c16
-rw-r--r--drivers/net/wireguard/socket.c1
4 files changed, 20 insertions, 11 deletions
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index 43db442..cdc9696 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -258,6 +258,8 @@ static void wg_setup(struct net_device *dev)
enum { WG_NETDEV_FEATURES = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA };
+ const int overhead = MESSAGE_MINIMUM_LENGTH + sizeof(struct udphdr) +
+ max(sizeof(struct ipv6hdr), sizeof(struct iphdr));
dev->netdev_ops = &netdev_ops;
dev->hard_header_len = 0;
@@ -271,9 +273,8 @@ static void wg_setup(struct net_device *dev)
dev->features |= WG_NETDEV_FEATURES;
dev->hw_features |= WG_NETDEV_FEATURES;
dev->hw_enc_features |= WG_NETDEV_FEATURES;
- dev->mtu = ETH_DATA_LEN - MESSAGE_MINIMUM_LENGTH -
- sizeof(struct udphdr) -
- max(sizeof(struct ipv6hdr), sizeof(struct iphdr));
+ dev->mtu = ETH_DATA_LEN - overhead;
+ dev->max_mtu = round_down(INT_MAX, MESSAGE_PADDING_MULTIPLE) - overhead;
SET_NETDEV_DEVTYPE(dev, &device_type);
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
index 9c6bab9..4a15389 100644
--- a/drivers/net/wireguard/receive.c
+++ b/drivers/net/wireguard/receive.c
@@ -118,10 +118,13 @@ static void wg_receive_handshake_packet(struct wg_device *wg,
under_load = skb_queue_len(&wg->incoming_handshakes) >=
MAX_QUEUED_INCOMING_HANDSHAKES / 8;
- if (under_load)
+ if (under_load) {
last_under_load = ktime_get_coarse_boottime_ns();
- else if (last_under_load)
+ } else if (last_under_load) {
under_load = !wg_birthdate_has_expired(last_under_load, 1);
+ if (!under_load)
+ last_under_load = 0;
+ }
mac_state = wg_cookie_validate_packet(&wg->cookie_checker, skb,
under_load);
if ((under_load && mac_state == VALID_MAC_WITH_COOKIE) ||
diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
index c132605..7348c10 100644
--- a/drivers/net/wireguard/send.c
+++ b/drivers/net/wireguard/send.c
@@ -143,16 +143,22 @@ static void keep_key_fresh(struct wg_peer *peer)
static unsigned int calculate_skb_padding(struct sk_buff *skb)
{
+ unsigned int padded_size, last_unit = skb->len;
+
+ if (unlikely(!PACKET_CB(skb)->mtu))
+ return ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE) - last_unit;
+
/* We do this modulo business with the MTU, just in case the networking
* layer gives us a packet that's bigger than the MTU. In that case, we
* wouldn't want the final subtraction to overflow in the case of the
- * padded_size being clamped.
+ * padded_size being clamped. Fortunately, that's very rarely the case,
+ * so we optimize for that not happening.
*/
- unsigned int last_unit = skb->len % PACKET_CB(skb)->mtu;
- unsigned int padded_size = ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE);
+ if (unlikely(last_unit > PACKET_CB(skb)->mtu))
+ last_unit %= PACKET_CB(skb)->mtu;
- if (padded_size > PACKET_CB(skb)->mtu)
- padded_size = PACKET_CB(skb)->mtu;
+ padded_size = min(PACKET_CB(skb)->mtu,
+ ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE));
return padded_size - last_unit;
}
diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
index 262f3b5..b0d6541 100644
--- a/drivers/net/wireguard/socket.c
+++ b/drivers/net/wireguard/socket.c
@@ -432,7 +432,6 @@ void wg_socket_reinit(struct wg_device *wg, struct sock *new4,
wg->incoming_port = ntohs(inet_sk(new4)->inet_sport);
mutex_unlock(&wg->socket_update_lock);
synchronize_rcu();
- synchronize_net();
sock_free(old4);
sock_free(old6);
}