@@ -351,15 +351,39 @@ static void ovpn_send(struct ovpn_priv *ovpn, struct sk_buff *skb,
ovpn_peer_put(peer);
}
+static struct sk_buff *ovpn_skb_list_clone(struct sk_buff *skb)
+{
+ struct sk_buff *copy, *curr, *next, *head = NULL, *prev = NULL;
+
+ skb_list_walk_safe(skb, curr, next) {
+ copy = skb_clone(curr, GFP_ATOMIC);
+ if (unlikely(!copy)) {
+ kfree_skb_list(head);
+ return NULL;
+ }
+
+ if (unlikely(!head))
+ head = copy;
+ else
+ prev->next = copy;
+
+ prev = copy;
+ }
+
+ return head;
+}
+
/* Send user data to the network
*/
netdev_tx_t ovpn_net_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ovpn_priv *ovpn = netdev_priv(dev);
- struct sk_buff *segments, *curr, *next;
+ struct sk_buff *segments, *curr, *next, *to_send;
struct sk_buff_head skb_list;
- unsigned int tx_bytes = 0;
+ struct llist_head mcast_list;
+ struct llist_node *node, *n;
struct ovpn_peer *peer;
+ unsigned int tx_bytes = 0;
__be16 proto;
int ret;
@@ -372,8 +396,9 @@ netdev_tx_t ovpn_net_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop_no_peer;
/* retrieve peer serving the destination IP of this packet */
- peer = ovpn_peer_get_by_dst(ovpn, skb);
- if (unlikely(!peer)) {
+ init_llist_head(&mcast_list);
+ ovpn_peer_list_get_by_dst(ovpn, skb, &mcast_list);
+ if (unlikely(llist_empty(&mcast_list))) {
switch (skb->protocol) {
case htons(ETH_P_IP):
net_dbg_ratelimited("%s: no peer to send data to dst=%pI4\n",
@@ -427,18 +452,34 @@ netdev_tx_t ovpn_net_xmit(struct sk_buff *skb, struct net_device *dev)
* incremented the counter for each failure in the loop
*/
if (unlikely(skb_queue_empty(&skb_list))) {
- ovpn_peer_put(peer);
+ llist_for_each_safe(node, n, mcast_list.first) {
+ peer = llist_entry(node, struct ovpn_peer, mcast_entry);
+ ovpn_peer_put(peer);
+ }
return NETDEV_TX_OK;
}
skb_list.prev->next = NULL;
- ovpn_peer_stats_increment_tx(&peer->vpn_stats, tx_bytes);
- ovpn_send(ovpn, skb_list.next, peer);
+ llist_for_each_safe(node, n, mcast_list.first) {
+ peer = llist_entry(node, struct ovpn_peer, mcast_entry);
+
+ to_send = n ? ovpn_skb_list_clone(skb_list.next) : skb_list.next;
+ if (likely(to_send)) {
+ ovpn_peer_stats_increment_tx(&peer->vpn_stats, tx_bytes);
+ ovpn_send(ovpn, to_send, peer);
+ } else {
+ dev_dstats_tx_dropped(ovpn->dev);
+ ovpn_peer_put(peer);
+ }
+ }
return NETDEV_TX_OK;
drop:
- ovpn_peer_put(peer);
+ llist_for_each_safe(node, n, mcast_list.first) {
+ peer = llist_entry(node, struct ovpn_peer, mcast_entry);
+ ovpn_peer_put(peer);
+ }
drop_no_peer:
dev_dstats_tx_dropped(ovpn->dev);
skb_tx_error(skb);
@@ -155,7 +155,7 @@ static void ovpn_setup(struct net_device *dev)
dev->max_mtu = IP_MAX_MTU - OVPN_HEAD_ROOM;
dev->type = ARPHRD_NONE;
- dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST | IFF_BROADCAST;
dev->priv_flags |= IFF_NO_QUEUE;
/* when routing packets to a LAN behind a client, we rely on the
* route entry that originally brought the packet into ovpn, so
@@ -718,23 +718,49 @@ static void ovpn_peer_remove(struct ovpn_peer *peer,
llist_add(&peer->release_entry, release_list);
}
+static void ovpn_peer_list_get_all(struct ovpn_priv *ovpn,
+ struct llist_head *list)
+{
+ struct ovpn_peer *peer;
+ int bkt;
+
+ rcu_read_lock();
+ hash_for_each_rcu(ovpn->peers->by_id, bkt, peer, hash_entry_id) {
+ if (ovpn_peer_hold(peer))
+ llist_add(&peer->mcast_entry, list);
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * TO DO: At the moment the list contain all the peers,
+ * after IGMP snooping is implemented we want to select only the peers
+ * subscribed to a specific multicast group.
+ */
+static void ovpn_peer_list_get_by_mcast_group(struct ovpn_priv *ovpn,
+ struct llist_head *list)
+{
+ ovpn_peer_list_get_all(ovpn, list);
+}
+
/**
- * ovpn_peer_get_by_dst - Lookup peer to send skb to
+ * ovpn_peer_list_get_by_dst - Lookup peers to send skb to
* @ovpn: the private data representing the current VPN session
* @skb: the skb to extract the destination address from
+ * @list: the head of the list to fill with the target peers
*
- * This function takes a tunnel packet and looks up the peer to send it to
- * after encapsulation. The skb is expected to be the in-tunnel packet, without
- * any OpenVPN related header.
+ * This function takes a tunnel packet and looks up the peers to send it to
+ * after encapsulation and add them to `list'. The skb is expected to be the
+ * in-tunnel packet, without any OpenVPN related header.
*
* Assume that the IP header is accessible in the skb data.
*
- * Return: the peer if found or NULL otherwise.
*/
-struct ovpn_peer *ovpn_peer_get_by_dst(struct ovpn_priv *ovpn,
- struct sk_buff *skb)
+void ovpn_peer_list_get_by_dst(struct ovpn_priv *ovpn, struct sk_buff *skb,
+ struct llist_head *list)
{
struct ovpn_peer *peer = NULL;
+ unsigned int addr_type;
struct in6_addr addr6;
__be32 addr4;
@@ -744,29 +770,45 @@ struct ovpn_peer *ovpn_peer_get_by_dst(struct ovpn_priv *ovpn,
if (ovpn->mode == OVPN_MODE_P2P) {
rcu_read_lock();
peer = rcu_dereference(ovpn->peer);
- if (unlikely(peer && !ovpn_peer_hold(peer)))
- peer = NULL;
+ if (likely(peer && ovpn_peer_hold(peer)))
+ llist_add(&peer->mcast_entry, list);
rcu_read_unlock();
- return peer;
+ return;
}
- rcu_read_lock();
switch (skb->protocol) {
case htons(ETH_P_IP):
addr4 = ovpn_nexthop_from_skb4(skb);
+ rcu_read_lock();
peer = ovpn_peer_get_by_vpn_addr4(ovpn, addr4);
- break;
+
+ if (peer)
+ break;
+
+ rcu_read_unlock();
+ addr_type = inet_dev_addr_type(dev_net(ovpn->dev), ovpn->dev, addr4);
+ if (addr_type == RTN_MULTICAST)
+ ovpn_peer_list_get_by_mcast_group(ovpn, list);
+ else if (addr_type == RTN_BROADCAST)
+ ovpn_peer_list_get_all(ovpn, list);
+ return;
case htons(ETH_P_IPV6):
addr6 = ovpn_nexthop_from_skb6(skb);
+ rcu_read_lock();
peer = ovpn_peer_get_by_vpn_addr6(ovpn, &addr6);
- break;
+
+ if (peer)
+ break;
+
+ rcu_read_unlock();
+ if (ipv6_addr_is_multicast(&addr6))
+ ovpn_peer_list_get_by_mcast_group(ovpn, list);
+ return;
}
- if (unlikely(peer && !ovpn_peer_hold(peer)))
- peer = NULL;
+ if (likely(peer && ovpn_peer_hold(peer)))
+ llist_add(&peer->mcast_entry, list);
rcu_read_unlock();
-
- return peer;
}
/**
@@ -113,6 +113,7 @@ struct ovpn_peer {
struct kref refcount;
struct rcu_head rcu;
struct llist_node release_entry;
+ struct llist_node mcast_entry;
struct work_struct keepalive_work;
};
@@ -148,8 +149,8 @@ void ovpn_peers_free(struct ovpn_priv *ovpn, struct sock *sock,
struct ovpn_peer *ovpn_peer_get_by_transp_addr(struct ovpn_priv *ovpn,
struct sk_buff *skb);
struct ovpn_peer *ovpn_peer_get_by_id(struct ovpn_priv *ovpn, u32 peer_id);
-struct ovpn_peer *ovpn_peer_get_by_dst(struct ovpn_priv *ovpn,
- struct sk_buff *skb);
+void ovpn_peer_list_get_by_dst(struct ovpn_priv *ovpn, struct sk_buff *skb,
+ struct llist_head *list);
void ovpn_peer_hash_vpn_ip(struct ovpn_peer *peer);
bool ovpn_peer_check_by_src(struct ovpn_priv *ovpn, struct sk_buff *skb,
struct ovpn_peer *peer);
The ovpn DCO driver currently drops all multicast/broadcast packets because it does not set IFF_MULTICAST and IFF_BROADCAST on the netdevice and always performs a unicast peer lookup in ovpn_net_xmit(). This prevents multicast routing daemons such as smcroute from using an ovpn interface as a multicast VIF and makes it impossible to forward multicastand broadcast traffic to VPN clients. Add the minimal infrastructure needed to get multicast/broadcast working: - Set IFF_MULTICAST and IFF_BROADCAST in ovpn_setup(). - Detect multicast and broadcast destinations in ovpn_peer_list_get_by_dst() and create a list with the target peers. - Introduce ovpn_skb_list_clone() to clone GSO segment lists and replicate the packet to every connected peer in ovpn_net_xmit(). For now multicast traffic is flooded to all peers. A future enhancement will replace the flood with a subscription table driven by IGMP snooping. Signed-off-by: Marco Baffo <marco@mandelbit.com> --- drivers/net/ovpn/io.c | 57 ++++++++++++++++++++++++++----- drivers/net/ovpn/main.c | 2 +- drivers/net/ovpn/peer.c | 76 ++++++++++++++++++++++++++++++++--------- drivers/net/ovpn/peer.h | 5 +-- 4 files changed, 112 insertions(+), 28 deletions(-)