summaryrefslogtreecommitdiff
path: root/drivers/net/wireguard/queueing.c
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2021-11-29 10:39:26 -0500
committerJakub Kicinski <kuba@kernel.org>2021-11-29 19:50:50 -0800
commit0188f76c8d48dd77fad54172b9459c2e21020c7b (patch)
treee37e0ece841e7b3b68e4a4b6a9a7acd7cbeb2139 /drivers/net/wireguard/queueing.c
parent7ba34d3e96f64a6ad7f164e9b930abf6e45874d4 (diff)
downloadwireguard-linux-trimmed-0188f76c8d48dd77fad54172b9459c2e21020c7b.tar.gz
wireguard-linux-trimmed-0188f76c8d48dd77fad54172b9459c2e21020c7b.zip
wireguard: receive: use ring buffer for incoming handshakes
Apparently the spinlock on incoming_handshake's skb_queue is highly contended, and a torrent of handshake or cookie packets can bring the data plane to its knees, simply by virtue of enqueueing the handshake packets to be processed asynchronously. So, we try switching this to a ring buffer to hopefully have less lock contention. This alleviates the problem somewhat, though it still isn't perfect, so future patches will have to improve this further. However, it at least doesn't completely diminish the data plane. Reported-by: Streun Fabio <fstreun@student.ethz.ch> Reported-by: Joel Wanner <joel.wanner@inf.ethz.ch> Fixes: a8f1bc7bdea3 ("net: WireGuard secure network tunnel") Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/wireguard/queueing.c')
-rw-r--r--drivers/net/wireguard/queueing.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c
index 48e7b98..1de413b 100644
--- a/drivers/net/wireguard/queueing.c
+++ b/drivers/net/wireguard/queueing.c
@@ -38,11 +38,11 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
return 0;
}
-void wg_packet_queue_free(struct crypt_queue *queue)
+void wg_packet_queue_free(struct crypt_queue *queue, bool purge)
{
free_percpu(queue->worker);
- WARN_ON(!__ptr_ring_empty(&queue->ring));
- ptr_ring_cleanup(&queue->ring, NULL);
+ WARN_ON(!purge && !__ptr_ring_empty(&queue->ring));
+ ptr_ring_cleanup(&queue->ring, purge ? (void(*)(void*))kfree_skb : NULL);
}
#define NEXT(skb) ((skb)->prev)