diff --git a/drivers/net/ovpn/io.c b/drivers/net/ovpn/io.c
index 22c555dd962e..3e6c06b9a5ae 100644
--- a/drivers/net/ovpn/io.c
+++ b/drivers/net/ovpn/io.c
@@ -59,9 +59,6 @@ static bool ovpn_is_keepalive(struct sk_buff *skb)
  */
 static void ovpn_netdev_write(struct ovpn_peer *peer, struct sk_buff *skb)
 {
-	unsigned int pkt_len;
-	int ret;
-
 	/*
 	 * GSO state from the transport layer is not valid for the tunnel/data
 	 * path. Reset all GSO fields to prevent any further GSO processing
@@ -89,20 +86,17 @@ static void ovpn_netdev_write(struct ovpn_peer *peer, struct sk_buff *skb)
 	skb_reset_transport_header(skb);
 	skb_reset_inner_headers(skb);
 
-	/* cause packet to be "received" by the interface */
-	pkt_len = skb->len;
+	/* update RX stats with the size of decrypted packet */
+	ovpn_peer_stats_increment_rx(&peer->vpn_stats, skb->len);
 	/* we may get here in process context in case of TCP connections,
-	 * therefore we have to disable BHs to ensure gro_cells_receive()
-	 * and dev_dstats_rx_add() do not get corrupted or enter deadlock
+	 * therefore we have to disable BHs to ensure dev_dstats_rx_add()
+	 * does not get corrupted
 	 */
 	local_bh_disable();
-	ret = gro_cells_receive(&peer->ovpn->gro_cells, skb);
-	if (likely(ret == NET_RX_SUCCESS)) {
-		/* update RX stats with the size of decrypted packet */
-		ovpn_peer_stats_increment_rx(&peer->vpn_stats, pkt_len);
-		dev_dstats_rx_add(peer->ovpn->dev, pkt_len);
-	}
+	dev_dstats_rx_add(peer->ovpn->dev, skb->len);
 	local_bh_enable();
+
+	napi_gro_receive(&peer->napi, skb);
 }
 
 void ovpn_decrypt_post(void *data, int ret)
diff --git a/drivers/net/ovpn/main.c b/drivers/net/ovpn/main.c
index 2e0420febda0..57b743cebc79 100644
--- a/drivers/net/ovpn/main.c
+++ b/drivers/net/ovpn/main.c
@@ -74,30 +74,12 @@ static int ovpn_mp_alloc(struct ovpn_priv *ovpn)
 static int ovpn_net_init(struct net_device *dev)
 {
 	struct ovpn_priv *ovpn = netdev_priv(dev);
-	int err = gro_cells_init(&ovpn->gro_cells, dev);
 
-	if (err < 0)
-		return err;
-
-	err = ovpn_mp_alloc(ovpn);
-	if (err < 0) {
-		gro_cells_destroy(&ovpn->gro_cells);
-		return err;
-	}
-
-	return 0;
-}
-
-static void ovpn_net_uninit(struct net_device *dev)
-{
-	struct ovpn_priv *ovpn = netdev_priv(dev);
-
-	gro_cells_destroy(&ovpn->gro_cells);
+	return ovpn_mp_alloc(ovpn);
 }
 
 static const struct net_device_ops ovpn_netdev_ops = {
 	.ndo_init		= ovpn_net_init,
-	.ndo_uninit		= ovpn_net_uninit,
 	.ndo_start_xmit		= ovpn_net_xmit,
 };
 
diff --git a/drivers/net/ovpn/ovpnpriv.h b/drivers/net/ovpn/ovpnpriv.h
index 5898f6adada7..703e90d1dafc 100644
--- a/drivers/net/ovpn/ovpnpriv.h
+++ b/drivers/net/ovpn/ovpnpriv.h
@@ -39,7 +39,6 @@ struct ovpn_peer_collection {
  * @lock: protect this object
  * @peers: data structures holding multi-peer references
  * @peer: in P2P mode, this is the only remote peer
- * @gro_cells: pointer to the Generic Receive Offload cell
  * @keepalive_work: struct used to schedule keepalive periodic job
  */
 struct ovpn_priv {
@@ -48,7 +47,6 @@ struct ovpn_priv {
 	spinlock_t lock; /* protect writing to the ovpn_priv object */
 	struct ovpn_peer_collection *peers;
 	struct ovpn_peer __rcu *peer;
-	struct gro_cells gro_cells;
 	struct delayed_work keepalive_work;
 };
 
diff --git a/drivers/net/ovpn/peer.c b/drivers/net/ovpn/peer.c
index c02dfab51a6e..bb23dc4e4721 100644
--- a/drivers/net/ovpn/peer.c
+++ b/drivers/net/ovpn/peer.c
@@ -10,6 +10,7 @@
 #include <linux/skbuff.h>
 #include <linux/list.h>
 #include <linux/hashtable.h>
+#include <net/hotdata.h>
 #include <net/ip6_route.h>
 
 #include "ovpnpriv.h"
@@ -82,6 +83,53 @@ static void ovpn_peer_keepalive_send(struct work_struct *work)
 	local_bh_enable();
 }
 
+bool ovpn_enqueue_encap(struct ovpn_peer *peer, struct sk_buff *skb)
+{
+	if (skb_queue_len(&peer->encap_q) >=
+	    READ_ONCE(net_hotdata.max_backlog))
+		return false;
+
+	skb_queue_tail(&peer->encap_q, skb);
+	napi_schedule(&peer->napi);
+	return true;
+}
+
+static int ovpn_peer_encap_poll(struct napi_struct *napi, int budget)
+{
+	struct ovpn_peer *peer = container_of(napi, struct ovpn_peer, napi);
+	struct sk_buff *skb;
+	int work = 0;
+
+	while (work < budget && (skb = skb_dequeue(&peer->encap_q))) {
+		ovpn_recv(peer, skb);
+		++work;
+	}
+
+	if (work < budget)
+		napi_complete_done(napi, work);
+
+	return work;
+}
+
+static int ovpn_peer_napi_init(struct ovpn_peer *peer, struct net_device *dev)
+{
+	skb_queue_head_init(&peer->encap_q);
+
+	set_bit(NAPI_STATE_NO_BUSY_POLL, &peer->napi.state);
+	netif_napi_add(dev, &peer->napi, ovpn_peer_encap_poll);
+	napi_enable(&peer->napi);
+
+	return 0;
+}
+
+static void ovpn_peer_napi_uninit(struct ovpn_peer *peer)
+{
+	napi_disable(&peer->napi);
+	netif_napi_del(&peer->napi);
+
+	__skb_queue_purge(&peer->encap_q);
+}
+
 /**
  * ovpn_peer_new - allocate and initialize a new peer object
  * @ovpn: the openvpn instance inside which the peer should be created
@@ -126,6 +174,17 @@ struct ovpn_peer *ovpn_peer_new(struct ovpn_priv *ovpn, u32 id)
 		return ERR_PTR(ret);
 	}
 
+	ret = ovpn_peer_napi_init(peer, ovpn->dev);
+	if (ret < 0) {
+		netdev_err(ovpn->dev,
+			   "cannot initialize NAPI for peer %u\n",
+			   peer->id);
+		ovpn_peer_napi_uninit(peer);
+		dst_cache_destroy(&peer->dst_cache);
+		kfree(peer);
+		return ERR_PTR(ret);
+	}
+
 	netdev_hold(ovpn->dev, &peer->dev_tracker, GFP_KERNEL);
 
 	return peer;
@@ -356,6 +415,7 @@ static void ovpn_peer_release_rcu(struct rcu_head *head)
  */
 void ovpn_peer_release(struct ovpn_peer *peer)
 {
+	ovpn_peer_napi_uninit(peer);
 	ovpn_crypto_state_release(&peer->crypto);
 	spin_lock_bh(&peer->lock);
 	ovpn_bind_reset(peer, NULL);
diff --git a/drivers/net/ovpn/peer.h b/drivers/net/ovpn/peer.h
index 328401570cba..eb0ec9605ced 100644
--- a/drivers/net/ovpn/peer.h
+++ b/drivers/net/ovpn/peer.h
@@ -46,6 +46,8 @@
  * @crypto: the crypto configuration (ciphers, keys, etc..)
  * @dst_cache: cache for dst_entry used to send to peer
  * @bind: remote peer binding
+ * @encap_q: queue of encapsulated packets awaiting processing/decapsulation
+ * @napi: NAPI context for handling packet reception
  * @keepalive_interval: seconds after which a new keepalive should be sent
  * @keepalive_xmit_exp: future timestamp when next keepalive should be sent
  * @last_sent: timestamp of the last successfully sent packet
@@ -100,6 +102,8 @@ struct ovpn_peer {
 	struct ovpn_crypto_state crypto;
 	struct dst_cache dst_cache;
 	struct ovpn_bind __rcu *bind;
+	struct sk_buff_head encap_q;
+	struct napi_struct napi;
 	unsigned long keepalive_interval;
 	unsigned long keepalive_xmit_exp;
 	time64_t last_sent;
@@ -154,6 +158,8 @@ void ovpn_peer_hash_vpn_ip(struct ovpn_peer *peer);
 bool ovpn_peer_check_by_src(struct ovpn_priv *ovpn, struct sk_buff *skb,
 			    struct ovpn_peer *peer);
 
+bool ovpn_enqueue_encap(struct ovpn_peer *peer, struct sk_buff *skb);
+
 void ovpn_peer_keepalive_set(struct ovpn_peer *peer, u32 interval, u32 timeout);
 void ovpn_peer_keepalive_work(struct work_struct *work);
 
diff --git a/drivers/net/ovpn/tcp.c b/drivers/net/ovpn/tcp.c
index 65054cc84be5..87f9d785f44c 100644
--- a/drivers/net/ovpn/tcp.c
+++ b/drivers/net/ovpn/tcp.c
@@ -145,13 +145,16 @@ static void ovpn_tcp_rcv(struct strparser *strp, struct sk_buff *skb)
 	if (WARN_ON(!ovpn_peer_hold(peer)))
 		goto err_nopeer;
 
-	ovpn_recv(peer, skb);
+	if (unlikely(!ovpn_enqueue_encap(peer, skb)))
+		goto drop;
+
 	return;
 err:
 	/* take reference for deferred peer deletion. should never fail */
 	if (WARN_ON(!ovpn_peer_hold(peer)))
 		goto err_nopeer;
 	schedule_work(&peer->tcp.defer_del_work);
+drop:
 	dev_dstats_rx_dropped(peer->ovpn->dev);
 err_nopeer:
 	kfree_skb(skb);
diff --git a/drivers/net/ovpn/udp.c b/drivers/net/ovpn/udp.c
index b5d1ca014732..8adff0b90430 100644
--- a/drivers/net/ovpn/udp.c
+++ b/drivers/net/ovpn/udp.c
@@ -121,7 +121,10 @@ static int ovpn_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 
 	/* pop off outer UDP header */
 	__skb_pull(skb, sizeof(struct udphdr));
-	ovpn_recv(peer, skb);
+
+	if (unlikely(!ovpn_enqueue_encap(peer, skb)))
+		goto drop;
+
 	return 0;
 
 drop:
