@@ -201,7 +201,7 @@ void ovpn_decrypt_post(void *data, int ret)
skb = NULL;
drop:
if (unlikely(skb))
- dev_dstats_rx_dropped(peer->ovpn->dev);
+ ovpn_dev_dstats_rx_dropped(peer->ovpn->dev);
kfree_skb(skb);
drop_nocount:
if (likely(peer))
@@ -225,7 +225,7 @@ void ovpn_recv(struct ovpn_peer *peer, struct sk_buff *skb)
net_info_ratelimited("%s: no available key for peer %u, key-id: %u\n",
netdev_name(peer->ovpn->dev), peer->id,
key_id);
- dev_dstats_rx_dropped(peer->ovpn->dev);
+ ovpn_dev_dstats_rx_dropped(peer->ovpn->dev);
kfree_skb(skb);
ovpn_peer_put(peer);
return;
@@ -301,7 +301,7 @@ void ovpn_encrypt_post(void *data, int ret)
rcu_read_unlock();
err:
if (unlikely(skb))
- dev_dstats_tx_dropped(peer->ovpn->dev);
+ ovpn_dev_dstats_tx_dropped(peer->ovpn->dev);
if (likely(peer))
ovpn_peer_put(peer);
if (likely(ks))
@@ -343,7 +343,7 @@ static void ovpn_send(struct ovpn_priv *ovpn, struct sk_buff *skb,
*/
skb_list_walk_safe(skb, curr, next) {
if (unlikely(!ovpn_encrypt_one(peer, curr))) {
- dev_dstats_tx_dropped(ovpn->dev);
+ ovpn_dev_dstats_tx_dropped(ovpn->dev);
kfree_skb(curr);
}
}
@@ -414,7 +414,7 @@ netdev_tx_t ovpn_net_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!curr)) {
net_err_ratelimited("%s: skb_share_check failed for payload packet\n",
netdev_name(dev));
- dev_dstats_tx_dropped(ovpn->dev);
+ ovpn_dev_dstats_tx_dropped(ovpn->dev);
continue;
}
@@ -440,7 +440,7 @@ netdev_tx_t ovpn_net_xmit(struct sk_buff *skb, struct net_device *dev)
drop:
ovpn_peer_put(peer);
drop_no_peer:
- dev_dstats_tx_dropped(ovpn->dev);
+ ovpn_dev_dstats_tx_dropped(ovpn->dev);
skb_tx_error(skb);
kfree_skb_list(skb);
return NETDEV_TX_OK;
@@ -11,6 +11,8 @@
#ifndef _NET_OVPN_OVPNSTATS_H_
#define _NET_OVPN_OVPNSTATS_H_
+#include <linux/netdevice.h>
+
/* one stat */
struct ovpn_peer_stat {
atomic64_t bytes;
@@ -44,4 +46,18 @@ static inline void ovpn_peer_stats_increment_tx(struct ovpn_peer_stats *stats,
ovpn_peer_stats_increment(&stats->tx, n);
}
+static inline void ovpn_dev_dstats_tx_dropped(struct net_device *dev)
+{
+ local_bh_disable();
+ dev_dstats_tx_dropped(dev);
+ local_bh_enable();
+}
+
+static inline void ovpn_dev_dstats_rx_dropped(struct net_device *dev)
+{
+ local_bh_disable();
+ dev_dstats_rx_dropped(dev);
+ local_bh_enable();
+}
+
#endif /* _NET_OVPN_OVPNSTATS_H_ */
@@ -152,7 +152,7 @@ static void ovpn_tcp_rcv(struct strparser *strp, struct sk_buff *skb)
if (WARN_ON(!ovpn_peer_hold(peer)))
goto err_nopeer;
schedule_work(&peer->tcp.defer_del_work);
- dev_dstats_rx_dropped(peer->ovpn->dev);
+ ovpn_dev_dstats_rx_dropped(peer->ovpn->dev);
err_nopeer:
kfree_skb(skb);
}
@@ -298,9 +298,9 @@ static void ovpn_tcp_send_sock(struct ovpn_peer *peer, struct sock *sk)
} while (peer->tcp.out_msg.len > 0);
if (!peer->tcp.out_msg.len) {
- preempt_disable();
+ local_bh_disable();
dev_dstats_tx_add(peer->ovpn->dev, skb->len);
- preempt_enable();
+ local_bh_enable();
}
kfree_skb(peer->tcp.out_msg.skb);
@@ -331,7 +331,7 @@ static void ovpn_tcp_send_sock_skb(struct ovpn_peer *peer, struct sock *sk,
ovpn_tcp_send_sock(peer, sk);
if (peer->tcp.out_msg.skb) {
- dev_dstats_tx_dropped(peer->ovpn->dev);
+ ovpn_dev_dstats_tx_dropped(peer->ovpn->dev);
kfree_skb(skb);
return;
}
@@ -353,7 +353,7 @@ void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct sock *sk,
if (sock_owned_by_user(sk)) {
if (skb_queue_len(&peer->tcp.out_queue) >=
READ_ONCE(net_hotdata.max_backlog)) {
- dev_dstats_tx_dropped(peer->ovpn->dev);
+ ovpn_dev_dstats_tx_dropped(peer->ovpn->dev);
kfree_skb(skb);
goto unlock;
}
@@ -125,7 +125,7 @@ static int ovpn_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
return 0;
drop:
- dev_dstats_rx_dropped(ovpn->dev);
+ ovpn_dev_dstats_rx_dropped(ovpn->dev);
drop_noovpn:
kfree_skb(skb);
return 0;
ovpn updates dev->dstats from both process and softirq contexts. In particular, TCP paths may run from socket callbacks, workqueues or strparser work, while UDP receive and ovpn's ndo_start_xmit path may update the same per-device dstats from BH context. Add ovpn device drop-stat helpers that disable BHs around dev_dstats_rx_dropped() and dev_dstats_tx_dropped(), and use them for drop accounting. The successful RX dev_dstats_rx_add() update is already covered by the BH-disabled section around gro_cells_receive(). For the successful TCP TX dev_dstats_tx_add() update, replace the existing preempt-disabled section with a BH-disabled one. Signed-off-by: Ralf Lici <ralf@mandelbit.com> --- drivers/net/ovpn/io.c | 12 ++++++------ drivers/net/ovpn/stats.h | 16 ++++++++++++++++ drivers/net/ovpn/tcp.c | 10 +++++----- drivers/net/ovpn/udp.c | 2 +- 4 files changed, 28 insertions(+), 12 deletions(-)