summaryrefslogtreecommitdiff
path: root/drivers/net/wireguard/queueing.h
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2021-02-23 15:54:08 -0800
committerJakub Kicinski <kuba@kernel.org>2021-02-23 15:59:35 -0800
commitea085e5da33496b0aa965481787dc56410e5eda7 (patch)
tree9c09875378fc2a23dd0b8d4f13bad8b98ab9836d /drivers/net/wireguard/queueing.h
parent1bb13d02e6fff32818ccde759efe88c70afd008e (diff)
parentb955554b47aff90e56ba402dfce4122cb9b28a4e (diff)
downloadwireguard-linux-trimmed-ea085e5da33496b0aa965481787dc56410e5eda7.tar.gz
wireguard-linux-trimmed-ea085e5da33496b0aa965481787dc56410e5eda7.zip
Merge branch 'wireguard-fixes-for-5-12-rc1'
Jason Donenfeld says: ==================== wireguard fixes for 5.12-rc1 This series has a collection of fixes that have piled up for a little while now, that I unfortunately didn't get a chance to send out earlier. 1) Removes unlikely() from IS_ERR(), since it's already implied. 2) Remove a bogus sparse annotation that hasn't been needed for years. 3) Addition test in the test suite for stressing parallel ndo_start_xmit. 4) Slight struct reordering in preparation for subsequent fix. 5) If skb->protocol is bogus, we no longer attempt to send icmp messages. 6) Massive memory usage fix, hit by larger deployments. 7) Fix typo in kconfig dependency logic. (1) and (2) are tiny cleanups, and (3) is just a test, so if you're trying to reduce churn, you could not backport these. But (4), (5), (6), and (7) fix problems and should be applied to stable. IMO, it's probably easiest to just apply them all to stable. ==================== Link: https://lore.kernel.org/r/20210222162549.3252778-1-Jason@zx2c4.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/wireguard/queueing.h')
-rw-r--r--drivers/net/wireguard/queueing.h45
1 files changed, 33 insertions, 12 deletions
diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
index dfb674e..4ef2944 100644
--- a/drivers/net/wireguard/queueing.h
+++ b/drivers/net/wireguard/queueing.h
@@ -17,12 +17,13 @@ struct wg_device;
struct wg_peer;
struct multicore_worker;
struct crypt_queue;
+struct prev_queue;
struct sk_buff;
/* queueing.c APIs: */
int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
- bool multicore, unsigned int len);
-void wg_packet_queue_free(struct crypt_queue *queue, bool multicore);
+ unsigned int len);
+void wg_packet_queue_free(struct crypt_queue *queue);
struct multicore_worker __percpu *
wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
@@ -135,8 +136,31 @@ static inline int wg_cpumask_next_online(int *next)
return cpu;
}
+void wg_prev_queue_init(struct prev_queue *queue);
+
+/* Multi producer */
+bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb);
+
+/* Single consumer */
+struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue);
+
+/* Single consumer */
+static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue)
+{
+ if (queue->peeked)
+ return queue->peeked;
+ queue->peeked = wg_prev_queue_dequeue(queue);
+ return queue->peeked;
+}
+
+/* Single consumer */
+static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue)
+{
+ queue->peeked = NULL;
+}
+
static inline int wg_queue_enqueue_per_device_and_peer(
- struct crypt_queue *device_queue, struct crypt_queue *peer_queue,
+ struct crypt_queue *device_queue, struct prev_queue *peer_queue,
struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
{
int cpu;
@@ -145,8 +169,9 @@ static inline int wg_queue_enqueue_per_device_and_peer(
/* We first queue this up for the peer ingestion, but the consumer
* will wait for the state to change to CRYPTED or DEAD before.
*/
- if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb)))
+ if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb)))
return -ENOSPC;
+
/* Then we queue it up in the device queue, which consumes the
* packet as soon as it can.
*/
@@ -157,9 +182,7 @@ static inline int wg_queue_enqueue_per_device_and_peer(
return 0;
}
-static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue,
- struct sk_buff *skb,
- enum packet_state state)
+static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state)
{
/* We take a reference, because as soon as we call atomic_set, the
* peer can be freed from below us.
@@ -167,14 +190,12 @@ static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue,
struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
atomic_set_release(&PACKET_CB(skb)->state, state);
- queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu,
- peer->internal_id),
- peer->device->packet_crypt_wq, &queue->work);
+ queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id),
+ peer->device->packet_crypt_wq, &peer->transmit_packet_work);
wg_peer_put(peer);
}
-static inline void wg_queue_enqueue_per_peer_napi(struct sk_buff *skb,
- enum packet_state state)
+static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state)
{
/* We take a reference, because as soon as we call atomic_set, the
* peer can be freed from below us.