// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IPV4 GSO/GRO offload support
* Linux INET implementation
*
* UDPv4 GSO support
*/
#include <linux/skbuff.h>
#include <net/gro.h>
#include <net/gso.h>
#include <net/udp.h>
#include <net/protocol.h>
#include <net/inet_common.h>
#include <net/udp_tunnel.h>
#if IS_ENABLED(CONFIG_NET_UDP_TUNNEL)
/*
* Dummy GRO tunnel callback, exists mainly to avoid dangling/NULL
* values for the udp tunnel static call.
*/
static struct sk_buff *dummy_gro_rcv(struct sock *sk,
struct list_head *head,
struct sk_buff *skb)
{
NAPI_GRO_CB(skb)->flush = 1;
return NULL;
}
typedef struct sk_buff *(*udp_tunnel_gro_rcv_t)(struct sock *sk,
struct list_head *head,
struct sk_buff *skb);
struct udp_tunnel_type_entry {
udp_tunnel_gro_rcv_t gro_receive;
refcount_t count;
};
#define UDP_MAX_TUNNEL_TYPES (IS_ENABLED(CONFIG_GENEVE) + \
IS_ENABLED(CONFIG_VXLAN) * 2 + \
IS_ENABLED(CONFIG_NET_FOU) * 2 + \
IS_ENABLED(CONFIG_XFRM) * 2)
DEFINE_STATIC_CALL(udp_tunnel_gro_rcv, dummy_gro_rcv);
static DEFINE_STATIC_KEY_FALSE(udp_tunnel_static_call);
static DEFINE_MUTEX(udp_tunnel_gro_type_lock);
static struct udp_tunnel_type_entry udp_tunnel_gro_types[UDP_MAX_TUNNEL_TYPES];
static unsigned int udp_tunnel_gro_type_nr;
static DEFINE_SPINLOCK(udp_tunnel_gro_lock);
void udp_tunnel_update_gro_lookup(struct net *net, struct sock *sk, bool add)
{
bool is_ipv6 = sk->sk_family == AF_INET6;
struct udp_sock *tup, *up = udp_sk(sk);
struct udp_tunnel_gro *udp_tunnel_gro;
spin_lock(&udp_tunnel_gro_lock);
udp_tunnel_gro = &net->ipv4.udp_tunnel_gro[is_ipv6];
if (add)
hlist_add_head(&up->tunnel_list, &udp_tunnel_gro->list);
else if (up->tunnel_list.pprev)
hlist_del_init(&up->tunnel_list);
if (udp_tunnel_gro->list.first &&
!udp_tunnel_gro->list.first->next) {
tup = hlist_entry(udp_tunnel_gro->list.first, struct udp_sock,
tunnel_list);
rcu_assign_pointer(udp_tunnel_gro->sk, (struct sock *)tup);
} else {
RCU_INIT_POINTER(udp_tunnel_gro->sk, NULL);
}
spin_unlock(&udp_tunnel_gro_lock);
}
EXPORT_SYMBOL_GPL(udp_tunnel_update_gro_lookup);
void udp_tunnel_update_gro_rcv(struct sock *sk, bool add)
{
struct udp_tunnel_type_entry *cur = NULL;
struct udp_sock *up = udp_sk(sk);
int i, old_gro_type_nr;
if (!UDP_MAX_TUNNEL_TYPES || !up->gro_receive)
return;
mutex_lock(&udp_tunnel_gro_type_lock);
/* Check if the static call is permanently disabled. */
if (udp_tunnel_gro_type_nr > UDP_MAX_TUNNEL_TYPES)
goto out;
for (i = 0; i < udp_tunnel_gro_type_nr; i++)
if (udp_tunnel_gro_types[i].gro_receive == up->gro_receive)
cur = &udp_tunnel_gro_types[i];
old_gro_type_nr = udp_tunnel_gro_type_nr;
if (add) {
/*
* Update the matching entry, if found, or add a new one
* if needed
*/
if (cur) {
refcount_inc(&cur->count);
goto out;
}
if (unlikely(udp_tunnel_gro_type_nr == UDP_MAX_TUNNEL_TYPES)) {
pr_err_once("Too many UDP tunnel types, please increase UDP_MAX_TUNNEL_TYPES\n");
/* Ensure static call will never be enabled */
udp_tunnel_gro_type_nr = UDP_MAX_TUNNEL_TYPES + 1;
} else {
cur = &udp_tunnel_gro_types[udp_tunnel_gro_type_nr++];
refcount_set(&cur->count, 1);
cur->gro_receive = up->gro_receive;
}
} else {
/*
* The stack cleanups only successfully added tunnel, the
* lookup on removal should never fail.
*/
if (WARN_ON_ONCE(!cur))
goto out;
if (!refcount_dec_and_test(&cur->count))
goto out;
/* Avoid gaps, so that the enable tunnel has always id 0 */
*cur = udp_tunnel_gro_types[--udp_tunnel_gro_type_nr];
}
if (udp_tunnel_gro_type_nr == 1) {
static_call_update(udp_tunnel_gro_rcv,
udp_tunnel_gro_types[0].gro_receive);
static_branch_enable(&udp_tunnel_static_call);
} else if (old_gro_type_nr == 1) {
static_branch_disable(&udp_tunnel_static_call);
static_call_update(udp_tunnel_gro_rcv, dummy_gro_rcv);
}
out:
mutex_unlock(&udp_tunnel_gro_type_lock);
}
EXPORT_SYMBOL_GPL(udp_tunnel_update_gro_rcv);
static struct sk_buff *udp_tunnel_gro_rcv<