15b497af4SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2546ac1ffSJohn Fastabend /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io 3546ac1ffSJohn Fastabend */ 4546ac1ffSJohn Fastabend 5546ac1ffSJohn Fastabend /* Devmaps primary use is as a backend map for XDP BPF helper call 6546ac1ffSJohn Fastabend * bpf_redirect_map(). Because XDP is mostly concerned with performance we 7546ac1ffSJohn Fastabend * spent some effort to ensure the datapath with redirect maps does not use 8546ac1ffSJohn Fastabend * any locking. This is a quick note on the details. 9546ac1ffSJohn Fastabend * 10546ac1ffSJohn Fastabend * We have three possible paths to get into the devmap control plane bpf 11546ac1ffSJohn Fastabend * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall 12546ac1ffSJohn Fastabend * will invoke an update, delete, or lookup operation. To ensure updates and 13546ac1ffSJohn Fastabend * deletes appear atomic from the datapath side xchg() is used to modify the 14546ac1ffSJohn Fastabend * netdev_map array. Then because the datapath does a lookup into the netdev_map 15546ac1ffSJohn Fastabend * array (read-only) from an RCU critical section we use call_rcu() to wait for 16546ac1ffSJohn Fastabend * an rcu grace period before free'ing the old data structures. This ensures the 17546ac1ffSJohn Fastabend * datapath always has a valid copy. However, the datapath does a "flush" 18546ac1ffSJohn Fastabend * operation that pushes any pending packets in the driver outside the RCU 19546ac1ffSJohn Fastabend * critical section. Each bpf_dtab_netdev tracks these pending operations using 20d5df2830SToke Høiland-Jørgensen * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until 21d5df2830SToke Høiland-Jørgensen * this list is empty, indicating outstanding flush operations have completed. 22546ac1ffSJohn Fastabend * 23546ac1ffSJohn Fastabend * BPF syscalls may race with BPF program calls on any of the update, delete 24546ac1ffSJohn Fastabend * or lookup operations. As noted above the xchg() operation also keep the 25546ac1ffSJohn Fastabend * netdev_map consistent in this case. From the devmap side BPF programs 26546ac1ffSJohn Fastabend * calling into these operations are the same as multiple user space threads 27546ac1ffSJohn Fastabend * making system calls. 282ddf71e2SJohn Fastabend * 292ddf71e2SJohn Fastabend * Finally, any of the above may race with a netdev_unregister notifier. The 302ddf71e2SJohn Fastabend * unregister notifier must search for net devices in the map structure that 312ddf71e2SJohn Fastabend * contain a reference to the net device and remove them. This is a two step 322ddf71e2SJohn Fastabend * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b) 332ddf71e2SJohn Fastabend * check to see if the ifindex is the same as the net_device being removed. 344cc7b954SJohn Fastabend * When removing the dev a cmpxchg() is used to ensure the correct dev is 354cc7b954SJohn Fastabend * removed, in the case of a concurrent update or delete operation it is 364cc7b954SJohn Fastabend * possible that the initially referenced dev is no longer in the map. As the 374cc7b954SJohn Fastabend * notifier hook walks the map we know that new dev references can not be 384cc7b954SJohn Fastabend * added by the user because core infrastructure ensures dev_get_by_index() 394cc7b954SJohn Fastabend * calls will fail at this point. 406f9d451aSToke Høiland-Jørgensen * 416f9d451aSToke Høiland-Jørgensen * The devmap_hash type is a map type which interprets keys as ifindexes and 426f9d451aSToke Høiland-Jørgensen * indexes these using a hashmap. This allows maps that use ifindex as key to be 436f9d451aSToke Høiland-Jørgensen * densely packed instead of having holes in the lookup array for unused 446f9d451aSToke Høiland-Jørgensen * ifindexes. The setup and packet enqueue/send code is shared between the two 456f9d451aSToke Høiland-Jørgensen * types of devmap; only the lookup and insertion is different. 46546ac1ffSJohn Fastabend */ 47546ac1ffSJohn Fastabend #include <linux/bpf.h> 4867f29e07SJesper Dangaard Brouer #include <net/xdp.h> 49546ac1ffSJohn Fastabend #include <linux/filter.h> 5067f29e07SJesper Dangaard Brouer #include <trace/events/xdp.h> 51546ac1ffSJohn Fastabend 526e71b04aSChenbo Feng #define DEV_CREATE_FLAG_MASK \ 536e71b04aSChenbo Feng (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) 546e71b04aSChenbo Feng 5575ccae62SToke Høiland-Jørgensen struct xdp_dev_bulk_queue { 565d053f9dSJesper Dangaard Brouer struct xdp_frame *q[DEV_MAP_BULK_SIZE]; 57d5df2830SToke Høiland-Jørgensen struct list_head flush_node; 5875ccae62SToke Høiland-Jørgensen struct net_device *dev; 5938edddb8SJesper Dangaard Brouer struct net_device *dev_rx; 605d053f9dSJesper Dangaard Brouer unsigned int count; 615d053f9dSJesper Dangaard Brouer }; 625d053f9dSJesper Dangaard Brouer 63546ac1ffSJohn Fastabend struct bpf_dtab_netdev { 6467f29e07SJesper Dangaard Brouer struct net_device *dev; /* must be first member, due to tracepoint */ 656f9d451aSToke Høiland-Jørgensen struct hlist_node index_hlist; 66546ac1ffSJohn Fastabend struct bpf_dtab *dtab; 67fbee97feSDavid Ahern struct bpf_prog *xdp_prog; 68af4d045cSDaniel Borkmann struct rcu_head rcu; 6975ccae62SToke Høiland-Jørgensen unsigned int idx; 707f1c0426SDavid Ahern struct bpf_devmap_val val; 71546ac1ffSJohn Fastabend }; 72546ac1ffSJohn Fastabend 73546ac1ffSJohn Fastabend struct bpf_dtab { 74546ac1ffSJohn Fastabend struct bpf_map map; 75071cdeceSToke Høiland-Jørgensen struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */ 762ddf71e2SJohn Fastabend struct list_head list; 776f9d451aSToke Høiland-Jørgensen 786f9d451aSToke Høiland-Jørgensen /* these are only used for DEVMAP_HASH type maps */ 796f9d451aSToke Høiland-Jørgensen struct hlist_head *dev_index_head; 806f9d451aSToke Høiland-Jørgensen spinlock_t index_lock; 816f9d451aSToke Høiland-Jørgensen unsigned int items; 826f9d451aSToke Høiland-Jørgensen u32 n_buckets; 83546ac1ffSJohn Fastabend }; 84546ac1ffSJohn Fastabend 851d233886SToke Høiland-Jørgensen static DEFINE_PER_CPU(struct list_head, dev_flush_list); 864cc7b954SJohn Fastabend static DEFINE_SPINLOCK(dev_map_lock); 872ddf71e2SJohn Fastabend static LIST_HEAD(dev_map_list); 882ddf71e2SJohn Fastabend 8999c51064SToke Høiland-Jørgensen static struct hlist_head *dev_map_create_hash(unsigned int entries, 9099c51064SToke Høiland-Jørgensen int numa_node) 916f9d451aSToke Høiland-Jørgensen { 926f9d451aSToke Høiland-Jørgensen int i; 936f9d451aSToke Høiland-Jørgensen struct hlist_head *hash; 946f9d451aSToke Høiland-Jørgensen 95*7dd5d437SBui Quang Minh hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node); 966f9d451aSToke Høiland-Jørgensen if (hash != NULL) 976f9d451aSToke Høiland-Jørgensen for (i = 0; i < entries; i++) 986f9d451aSToke Høiland-Jørgensen INIT_HLIST_HEAD(&hash[i]); 996f9d451aSToke Høiland-Jørgensen 1006f9d451aSToke Høiland-Jørgensen return hash; 1016f9d451aSToke Høiland-Jørgensen } 1026f9d451aSToke Høiland-Jørgensen 103071cdeceSToke Høiland-Jørgensen static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab, 104071cdeceSToke Høiland-Jørgensen int idx) 105071cdeceSToke Høiland-Jørgensen { 106071cdeceSToke Høiland-Jørgensen return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)]; 107071cdeceSToke Høiland-Jørgensen } 108071cdeceSToke Høiland-Jørgensen 109fca16e51SToke Høiland-Jørgensen static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) 110546ac1ffSJohn Fastabend { 111fbee97feSDavid Ahern u32 valsize = attr->value_size; 112546ac1ffSJohn Fastabend 113fbee97feSDavid Ahern /* check sanity of attributes. 2 value sizes supported: 114fbee97feSDavid Ahern * 4 bytes: ifindex 115fbee97feSDavid Ahern * 8 bytes: ifindex + prog fd 116fbee97feSDavid Ahern */ 117546ac1ffSJohn Fastabend if (attr->max_entries == 0 || attr->key_size != 4 || 118fbee97feSDavid Ahern (valsize != offsetofend(struct bpf_devmap_val, ifindex) && 119fbee97feSDavid Ahern valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) || 120fbee97feSDavid Ahern attr->map_flags & ~DEV_CREATE_FLAG_MASK) 121fca16e51SToke Høiland-Jørgensen return -EINVAL; 122546ac1ffSJohn Fastabend 1230cdbb4b0SToke Høiland-Jørgensen /* Lookup returns a pointer straight to dev->ifindex, so make sure the 1240cdbb4b0SToke Høiland-Jørgensen * verifier prevents writes from the BPF side 1250cdbb4b0SToke Høiland-Jørgensen */ 1260cdbb4b0SToke Høiland-Jørgensen attr->map_flags |= BPF_F_RDONLY_PROG; 1270cdbb4b0SToke Høiland-Jørgensen 128546ac1ffSJohn Fastabend 129bd475643SJakub Kicinski bpf_map_init_from_attr(&dtab->map, attr); 130546ac1ffSJohn Fastabend 1316f9d451aSToke Høiland-Jørgensen if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 1326f9d451aSToke Høiland-Jørgensen dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries); 1336f9d451aSToke Høiland-Jørgensen 1346f9d451aSToke Høiland-Jørgensen if (!dtab->n_buckets) /* Overflow check */ 1356f9d451aSToke Høiland-Jørgensen return -EINVAL; 1366f9d451aSToke Høiland-Jørgensen } 1376f9d451aSToke Høiland-Jørgensen 138071cdeceSToke Høiland-Jørgensen if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 13999c51064SToke Høiland-Jørgensen dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets, 14099c51064SToke Høiland-Jørgensen dtab->map.numa_node); 141071cdeceSToke Høiland-Jørgensen if (!dtab->dev_index_head) 142844f157fSRoman Gushchin return -ENOMEM; 143071cdeceSToke Høiland-Jørgensen 144071cdeceSToke Høiland-Jørgensen spin_lock_init(&dtab->index_lock); 145071cdeceSToke Høiland-Jørgensen } else { 146*7dd5d437SBui Quang Minh dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries * 14796eabe7aSMartin KaFai Lau sizeof(struct bpf_dtab_netdev *), 14896eabe7aSMartin KaFai Lau dtab->map.numa_node); 149546ac1ffSJohn Fastabend if (!dtab->netdev_map) 150844f157fSRoman Gushchin return -ENOMEM; 1516f9d451aSToke Høiland-Jørgensen } 1526f9d451aSToke Høiland-Jørgensen 153fca16e51SToke Høiland-Jørgensen return 0; 154fca16e51SToke Høiland-Jørgensen } 155fca16e51SToke Høiland-Jørgensen 156fca16e51SToke Høiland-Jørgensen static struct bpf_map *dev_map_alloc(union bpf_attr *attr) 157fca16e51SToke Høiland-Jørgensen { 158fca16e51SToke Høiland-Jørgensen struct bpf_dtab *dtab; 159fca16e51SToke Høiland-Jørgensen int err; 160fca16e51SToke Høiland-Jørgensen 161fca16e51SToke Høiland-Jørgensen if (!capable(CAP_NET_ADMIN)) 162fca16e51SToke Høiland-Jørgensen return ERR_PTR(-EPERM); 163fca16e51SToke Høiland-Jørgensen 1641440290aSRoman Gushchin dtab = kzalloc(sizeof(*dtab), GFP_USER | __GFP_ACCOUNT); 165fca16e51SToke Høiland-Jørgensen if (!dtab) 166fca16e51SToke Høiland-Jørgensen return ERR_PTR(-ENOMEM); 167fca16e51SToke Høiland-Jørgensen 168fca16e51SToke Høiland-Jørgensen err = dev_map_init_map(dtab, attr); 169fca16e51SToke Høiland-Jørgensen if (err) { 170546ac1ffSJohn Fastabend kfree(dtab); 171582db7e0STobias Klauser return ERR_PTR(err); 172546ac1ffSJohn Fastabend } 173546ac1ffSJohn Fastabend 174fca16e51SToke Høiland-Jørgensen spin_lock(&dev_map_lock); 175fca16e51SToke Høiland-Jørgensen list_add_tail_rcu(&dtab->list, &dev_map_list); 176fca16e51SToke Høiland-Jørgensen spin_unlock(&dev_map_lock); 177fca16e51SToke Høiland-Jørgensen 178fca16e51SToke Høiland-Jørgensen return &dtab->map; 179fca16e51SToke Høiland-Jørgensen } 180fca16e51SToke Høiland-Jørgensen 181546ac1ffSJohn Fastabend static void dev_map_free(struct bpf_map *map) 182546ac1ffSJohn Fastabend { 183546ac1ffSJohn Fastabend struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 1840536b852SBjörn Töpel int i; 185546ac1ffSJohn Fastabend 186546ac1ffSJohn Fastabend /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, 187546ac1ffSJohn Fastabend * so the programs (can be more than one that used this map) were 18842a84a8cSJohn Fastabend * disconnected from events. The following synchronize_rcu() guarantees 18942a84a8cSJohn Fastabend * both rcu read critical sections complete and waits for 19042a84a8cSJohn Fastabend * preempt-disable regions (NAPI being the relevant context here) so we 19142a84a8cSJohn Fastabend * are certain there will be no further reads against the netdev_map and 19242a84a8cSJohn Fastabend * all flush operations are complete. Flush operations can only be done 19342a84a8cSJohn Fastabend * from NAPI context for this reason. 194546ac1ffSJohn Fastabend */ 195274043c6SDaniel Borkmann 196274043c6SDaniel Borkmann spin_lock(&dev_map_lock); 197274043c6SDaniel Borkmann list_del_rcu(&dtab->list); 198274043c6SDaniel Borkmann spin_unlock(&dev_map_lock); 199274043c6SDaniel Borkmann 200546ac1ffSJohn Fastabend synchronize_rcu(); 201546ac1ffSJohn Fastabend 2022baae354SEric Dumazet /* Make sure prior __dev_map_entry_free() have completed. */ 2032baae354SEric Dumazet rcu_barrier(); 2042baae354SEric Dumazet 205071cdeceSToke Høiland-Jørgensen if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 206071cdeceSToke Høiland-Jørgensen for (i = 0; i < dtab->n_buckets; i++) { 207071cdeceSToke Høiland-Jørgensen struct bpf_dtab_netdev *dev; 208071cdeceSToke Høiland-Jørgensen struct hlist_head *head; 209071cdeceSToke Høiland-Jørgensen struct hlist_node *next; 210071cdeceSToke Høiland-Jørgensen 211071cdeceSToke Høiland-Jørgensen head = dev_map_index_hash(dtab, i); 212071cdeceSToke Høiland-Jørgensen 213071cdeceSToke Høiland-Jørgensen hlist_for_each_entry_safe(dev, next, head, index_hlist) { 214071cdeceSToke Høiland-Jørgensen hlist_del_rcu(&dev->index_hlist); 215fbee97feSDavid Ahern if (dev->xdp_prog) 216fbee97feSDavid Ahern bpf_prog_put(dev->xdp_prog); 217071cdeceSToke Høiland-Jørgensen dev_put(dev->dev); 218071cdeceSToke Høiland-Jørgensen kfree(dev); 219071cdeceSToke Høiland-Jørgensen } 220071cdeceSToke Høiland-Jørgensen } 221071cdeceSToke Høiland-Jørgensen 22299c51064SToke Høiland-Jørgensen bpf_map_area_free(dtab->dev_index_head); 223071cdeceSToke Høiland-Jørgensen } else { 224546ac1ffSJohn Fastabend for (i = 0; i < dtab->map.max_entries; i++) { 225546ac1ffSJohn Fastabend struct bpf_dtab_netdev *dev; 226546ac1ffSJohn Fastabend 227546ac1ffSJohn Fastabend dev = dtab->netdev_map[i]; 228546ac1ffSJohn Fastabend if (!dev) 229546ac1ffSJohn Fastabend continue; 230546ac1ffSJohn Fastabend 231fbee97feSDavid Ahern if (dev->xdp_prog) 232fbee97feSDavid Ahern bpf_prog_put(dev->xdp_prog); 233546ac1ffSJohn Fastabend dev_put(dev->dev); 234546ac1ffSJohn Fastabend kfree(dev); 235546ac1ffSJohn Fastabend } 236546ac1ffSJohn Fastabend 237546ac1ffSJohn Fastabend bpf_map_area_free(dtab->netdev_map); 238071cdeceSToke Høiland-Jørgensen } 239071cdeceSToke Høiland-Jørgensen 240546ac1ffSJohn Fastabend kfree(dtab); 241546ac1ffSJohn Fastabend } 242546ac1ffSJohn Fastabend 243546ac1ffSJohn Fastabend static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 244546ac1ffSJohn Fastabend { 245546ac1ffSJohn Fastabend struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 246546ac1ffSJohn Fastabend u32 index = key ? *(u32 *)key : U32_MAX; 247af4d045cSDaniel Borkmann u32 *next = next_key; 248546ac1ffSJohn Fastabend 249546ac1ffSJohn Fastabend if (index >= dtab->map.max_entries) { 250546ac1ffSJohn Fastabend *next = 0; 251546ac1ffSJohn Fastabend return 0; 252546ac1ffSJohn Fastabend } 253546ac1ffSJohn Fastabend 254546ac1ffSJohn Fastabend if (index == dtab->map.max_entries - 1) 255546ac1ffSJohn Fastabend return -ENOENT; 256546ac1ffSJohn Fastabend *next = index + 1; 257546ac1ffSJohn Fastabend return 0; 258546ac1ffSJohn Fastabend } 259546ac1ffSJohn Fastabend 260e6a4750fSBjörn Töpel static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key) 2616f9d451aSToke Høiland-Jørgensen { 2626f9d451aSToke Høiland-Jørgensen struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 2636f9d451aSToke Høiland-Jørgensen struct hlist_head *head = dev_map_index_hash(dtab, key); 2646f9d451aSToke Høiland-Jørgensen struct bpf_dtab_netdev *dev; 2656f9d451aSToke Høiland-Jørgensen 266485ec2eaSAmol Grover hlist_for_each_entry_rcu(dev, head, index_hlist, 267485ec2eaSAmol Grover lockdep_is_held(&dtab->index_lock)) 2686f9d451aSToke Høiland-Jørgensen if (dev->idx == key) 2696f9d451aSToke Høiland-Jørgensen return dev; 2706f9d451aSToke Høiland-Jørgensen 2716f9d451aSToke Høiland-Jørgensen return NULL; 2726f9d451aSToke Høiland-Jørgensen } 2736f9d451aSToke Høiland-Jørgensen 2746f9d451aSToke Høiland-Jørgensen static int dev_map_hash_get_next_key(struct bpf_map *map, void *key, 2756f9d451aSToke Høiland-Jørgensen void *next_key) 2766f9d451aSToke Høiland-Jørgensen { 2776f9d451aSToke Høiland-Jørgensen struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 2786f9d451aSToke Høiland-Jørgensen u32 idx, *next = next_key; 2796f9d451aSToke Høiland-Jørgensen struct bpf_dtab_netdev *dev, *next_dev; 2806f9d451aSToke Høiland-Jørgensen struct hlist_head *head; 2816f9d451aSToke Høiland-Jørgensen int i = 0; 2826f9d451aSToke Høiland-Jørgensen 2836f9d451aSToke Høiland-Jørgensen if (!key) 2846f9d451aSToke Høiland-Jørgensen goto find_first; 2856f9d451aSToke Høiland-Jørgensen 2866f9d451aSToke Høiland-Jørgensen idx = *(u32 *)key; 2876f9d451aSToke Høiland-Jørgensen 2886f9d451aSToke Høiland-Jørgensen dev = __dev_map_hash_lookup_elem(map, idx); 2896f9d451aSToke Høiland-Jørgensen if (!dev) 2906f9d451aSToke Høiland-Jørgensen goto find_first; 2916f9d451aSToke Høiland-Jørgensen 2926f9d451aSToke Høiland-Jørgensen next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)), 2936f9d451aSToke Høiland-Jørgensen struct bpf_dtab_netdev, index_hlist); 2946f9d451aSToke Høiland-Jørgensen 2956f9d451aSToke Høiland-Jørgensen if (next_dev) { 2966f9d451aSToke Høiland-Jørgensen *next = next_dev->idx; 2976f9d451aSToke Høiland-Jørgensen return 0; 2986f9d451aSToke Høiland-Jørgensen } 2996f9d451aSToke Høiland-Jørgensen 3006f9d451aSToke Høiland-Jørgensen i = idx & (dtab->n_buckets - 1); 3016f9d451aSToke Høiland-Jørgensen i++; 3026f9d451aSToke Høiland-Jørgensen 3036f9d451aSToke Høiland-Jørgensen find_first: 3046f9d451aSToke Høiland-Jørgensen for (; i < dtab->n_buckets; i++) { 3056f9d451aSToke Høiland-Jørgensen head = dev_map_index_hash(dtab, i); 3066f9d451aSToke Høiland-Jørgensen 3076f9d451aSToke Høiland-Jørgensen next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), 3086f9d451aSToke Høiland-Jørgensen struct bpf_dtab_netdev, 3096f9d451aSToke Høiland-Jørgensen index_hlist); 3106f9d451aSToke Høiland-Jørgensen if (next_dev) { 3116f9d451aSToke Høiland-Jørgensen *next = next_dev->idx; 3126f9d451aSToke Høiland-Jørgensen return 0; 3136f9d451aSToke Høiland-Jørgensen } 3146f9d451aSToke Høiland-Jørgensen } 3156f9d451aSToke Høiland-Jørgensen 3166f9d451aSToke Høiland-Jørgensen return -ENOENT; 3176f9d451aSToke Høiland-Jørgensen } 3186f9d451aSToke Høiland-Jørgensen 319fbee97feSDavid Ahern bool dev_map_can_have_prog(struct bpf_map *map) 320fbee97feSDavid Ahern { 321fbee97feSDavid Ahern if ((map->map_type == BPF_MAP_TYPE_DEVMAP || 322fbee97feSDavid Ahern map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) && 323fbee97feSDavid Ahern map->value_size != offsetofend(struct bpf_devmap_val, ifindex)) 324fbee97feSDavid Ahern return true; 325fbee97feSDavid Ahern 326fbee97feSDavid Ahern return false; 327fbee97feSDavid Ahern } 328fbee97feSDavid Ahern 329ebc4ecd4SBjörn Töpel static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags) 3305d053f9dSJesper Dangaard Brouer { 33175ccae62SToke Høiland-Jørgensen struct net_device *dev = bq->dev; 332fdc13979SLorenzo Bianconi int sent = 0, err = 0; 3335d053f9dSJesper Dangaard Brouer int i; 3345d053f9dSJesper Dangaard Brouer 3355d053f9dSJesper Dangaard Brouer if (unlikely(!bq->count)) 336ebc4ecd4SBjörn Töpel return; 3375d053f9dSJesper Dangaard Brouer 3385d053f9dSJesper Dangaard Brouer for (i = 0; i < bq->count; i++) { 3395d053f9dSJesper Dangaard Brouer struct xdp_frame *xdpf = bq->q[i]; 3405d053f9dSJesper Dangaard Brouer 3415d053f9dSJesper Dangaard Brouer prefetch(xdpf); 3425d053f9dSJesper Dangaard Brouer } 3435d053f9dSJesper Dangaard Brouer 344c1ece6b2SJesper Dangaard Brouer sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags); 345735fc405SJesper Dangaard Brouer if (sent < 0) { 346fdc13979SLorenzo Bianconi /* If ndo_xdp_xmit fails with an errno, no frames have 347fdc13979SLorenzo Bianconi * been xmit'ed. 348fdc13979SLorenzo Bianconi */ 349e74de52eSJesper Dangaard Brouer err = sent; 350735fc405SJesper Dangaard Brouer sent = 0; 35138edddb8SJesper Dangaard Brouer } 3525d053f9dSJesper Dangaard Brouer 353fdc13979SLorenzo Bianconi /* If not all frames have been transmitted, it is our 354fdc13979SLorenzo Bianconi * responsibility to free them 355735fc405SJesper Dangaard Brouer */ 356fdc13979SLorenzo Bianconi for (i = sent; unlikely(i < bq->count); i++) 357fdc13979SLorenzo Bianconi xdp_return_frame_rx_napi(bq->q[i]); 358735fc405SJesper Dangaard Brouer 359fdc13979SLorenzo Bianconi trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, bq->count - sent, err); 360fdc13979SLorenzo Bianconi bq->dev_rx = NULL; 361fdc13979SLorenzo Bianconi bq->count = 0; 362fdc13979SLorenzo Bianconi __list_del_clearprev(&bq->flush_node); 3635d053f9dSJesper Dangaard Brouer } 3645d053f9dSJesper Dangaard Brouer 3651d233886SToke Høiland-Jørgensen /* __dev_flush is called from xdp_do_flush() which _must_ be signaled 36611393cc9SJohn Fastabend * from the driver before returning from its napi->poll() routine. The poll() 36711393cc9SJohn Fastabend * routine is called either from busy_poll context or net_rx_action signaled 36811393cc9SJohn Fastabend * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the 369d5df2830SToke Høiland-Jørgensen * net device can be torn down. On devmap tear down we ensure the flush list 370d5df2830SToke Høiland-Jørgensen * is empty before completing to ensure all flush operations have completed. 371b23bfa56SJohn Fastabend * When drivers update the bpf program they may need to ensure any flush ops 372b23bfa56SJohn Fastabend * are also complete. Using synchronize_rcu or call_rcu will suffice for this 373b23bfa56SJohn Fastabend * because both wait for napi context to exit. 37411393cc9SJohn Fastabend */ 3751d233886SToke Høiland-Jørgensen void __dev_flush(void) 37611393cc9SJohn Fastabend { 3771d233886SToke Høiland-Jørgensen struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); 37875ccae62SToke Høiland-Jørgensen struct xdp_dev_bulk_queue *bq, *tmp; 37911393cc9SJohn Fastabend 380d5df2830SToke Høiland-Jørgensen list_for_each_entry_safe(bq, tmp, flush_list, flush_node) 3810536b852SBjörn Töpel bq_xmit_all(bq, XDP_XMIT_FLUSH); 38211393cc9SJohn Fastabend } 38311393cc9SJohn Fastabend 384546ac1ffSJohn Fastabend /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or 385546ac1ffSJohn Fastabend * update happens in parallel here a dev_put wont happen until after reading the 386546ac1ffSJohn Fastabend * ifindex. 387546ac1ffSJohn Fastabend */ 388e6a4750fSBjörn Töpel static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key) 389546ac1ffSJohn Fastabend { 390546ac1ffSJohn Fastabend struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 39167f29e07SJesper Dangaard Brouer struct bpf_dtab_netdev *obj; 392546ac1ffSJohn Fastabend 393af4d045cSDaniel Borkmann if (key >= map->max_entries) 394546ac1ffSJohn Fastabend return NULL; 395546ac1ffSJohn Fastabend 39667f29e07SJesper Dangaard Brouer obj = READ_ONCE(dtab->netdev_map[key]); 39767f29e07SJesper Dangaard Brouer return obj; 39867f29e07SJesper Dangaard Brouer } 39967f29e07SJesper Dangaard Brouer 4005d053f9dSJesper Dangaard Brouer /* Runs under RCU-read-side, plus in softirq under NAPI protection. 4015d053f9dSJesper Dangaard Brouer * Thus, safe percpu variable access. 4025d053f9dSJesper Dangaard Brouer */ 403ebc4ecd4SBjörn Töpel static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf, 40438edddb8SJesper Dangaard Brouer struct net_device *dev_rx) 4055d053f9dSJesper Dangaard Brouer { 4061d233886SToke Høiland-Jørgensen struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); 40775ccae62SToke Høiland-Jørgensen struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq); 4085d053f9dSJesper Dangaard Brouer 4095d053f9dSJesper Dangaard Brouer if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) 4100536b852SBjörn Töpel bq_xmit_all(bq, 0); 4115d053f9dSJesper Dangaard Brouer 41238edddb8SJesper Dangaard Brouer /* Ingress dev_rx will be the same for all xdp_frame's in 41338edddb8SJesper Dangaard Brouer * bulk_queue, because bq stored per-CPU and must be flushed 41438edddb8SJesper Dangaard Brouer * from net_device drivers NAPI func end. 41538edddb8SJesper Dangaard Brouer */ 41638edddb8SJesper Dangaard Brouer if (!bq->dev_rx) 41738edddb8SJesper Dangaard Brouer bq->dev_rx = dev_rx; 41838edddb8SJesper Dangaard Brouer 4195d053f9dSJesper Dangaard Brouer bq->q[bq->count++] = xdpf; 420d5df2830SToke Høiland-Jørgensen 421d5df2830SToke Høiland-Jørgensen if (!bq->flush_node.prev) 422d5df2830SToke Høiland-Jørgensen list_add(&bq->flush_node, flush_list); 4235d053f9dSJesper Dangaard Brouer } 4245d053f9dSJesper Dangaard Brouer 4251d233886SToke Høiland-Jørgensen static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 42638edddb8SJesper Dangaard Brouer struct net_device *dev_rx) 42767f29e07SJesper Dangaard Brouer { 42867f29e07SJesper Dangaard Brouer struct xdp_frame *xdpf; 429d8d7218aSToshiaki Makita int err; 43067f29e07SJesper Dangaard Brouer 43167f29e07SJesper Dangaard Brouer if (!dev->netdev_ops->ndo_xdp_xmit) 43267f29e07SJesper Dangaard Brouer return -EOPNOTSUPP; 43367f29e07SJesper Dangaard Brouer 434d8d7218aSToshiaki Makita err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data); 435d8d7218aSToshiaki Makita if (unlikely(err)) 436d8d7218aSToshiaki Makita return err; 437d8d7218aSToshiaki Makita 4381b698fa5SLorenzo Bianconi xdpf = xdp_convert_buff_to_frame(xdp); 43967f29e07SJesper Dangaard Brouer if (unlikely(!xdpf)) 44067f29e07SJesper Dangaard Brouer return -EOVERFLOW; 44167f29e07SJesper Dangaard Brouer 442ebc4ecd4SBjörn Töpel bq_enqueue(dev, xdpf, dev_rx); 443ebc4ecd4SBjörn Töpel return 0; 444546ac1ffSJohn Fastabend } 445546ac1ffSJohn Fastabend 446fbee97feSDavid Ahern static struct xdp_buff *dev_map_run_prog(struct net_device *dev, 447fbee97feSDavid Ahern struct xdp_buff *xdp, 448fbee97feSDavid Ahern struct bpf_prog *xdp_prog) 449fbee97feSDavid Ahern { 45064b59025SDavid Ahern struct xdp_txq_info txq = { .dev = dev }; 451fbee97feSDavid Ahern u32 act; 452fbee97feSDavid Ahern 45326afa0a4SDavid Ahern xdp_set_data_meta_invalid(xdp); 45464b59025SDavid Ahern xdp->txq = &txq; 45564b59025SDavid Ahern 456fbee97feSDavid Ahern act = bpf_prog_run_xdp(xdp_prog, xdp); 457fbee97feSDavid Ahern switch (act) { 458fbee97feSDavid Ahern case XDP_PASS: 459fbee97feSDavid Ahern return xdp; 460fbee97feSDavid Ahern case XDP_DROP: 461fbee97feSDavid Ahern break; 462fbee97feSDavid Ahern default: 463fbee97feSDavid Ahern bpf_warn_invalid_xdp_action(act); 464fbee97feSDavid Ahern fallthrough; 465fbee97feSDavid Ahern case XDP_ABORTED: 466fbee97feSDavid Ahern trace_xdp_exception(dev, xdp_prog, act); 467fbee97feSDavid Ahern break; 468fbee97feSDavid Ahern } 469fbee97feSDavid Ahern 470fbee97feSDavid Ahern xdp_return_buff(xdp); 471fbee97feSDavid Ahern return NULL; 472fbee97feSDavid Ahern } 473fbee97feSDavid Ahern 4741d233886SToke Høiland-Jørgensen int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, 4751d233886SToke Høiland-Jørgensen struct net_device *dev_rx) 4761d233886SToke Høiland-Jørgensen { 4771d233886SToke Høiland-Jørgensen return __xdp_enqueue(dev, xdp, dev_rx); 4781d233886SToke Høiland-Jørgensen } 4791d233886SToke Høiland-Jørgensen 4801d233886SToke Høiland-Jørgensen int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 4811d233886SToke Høiland-Jørgensen struct net_device *dev_rx) 4821d233886SToke Høiland-Jørgensen { 4831d233886SToke Høiland-Jørgensen struct net_device *dev = dst->dev; 4841d233886SToke Høiland-Jørgensen 485fbee97feSDavid Ahern if (dst->xdp_prog) { 486fbee97feSDavid Ahern xdp = dev_map_run_prog(dev, xdp, dst->xdp_prog); 487fbee97feSDavid Ahern if (!xdp) 488fbee97feSDavid Ahern return 0; 489fbee97feSDavid Ahern } 4901d233886SToke Høiland-Jørgensen return __xdp_enqueue(dev, xdp, dev_rx); 4911d233886SToke Høiland-Jørgensen } 4921d233886SToke Høiland-Jørgensen 4936d5fc195SToshiaki Makita int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, 4946d5fc195SToshiaki Makita struct bpf_prog *xdp_prog) 4956d5fc195SToshiaki Makita { 4966d5fc195SToshiaki Makita int err; 4976d5fc195SToshiaki Makita 498d8d7218aSToshiaki Makita err = xdp_ok_fwd_dev(dst->dev, skb->len); 4996d5fc195SToshiaki Makita if (unlikely(err)) 5006d5fc195SToshiaki Makita return err; 5016d5fc195SToshiaki Makita skb->dev = dst->dev; 5026d5fc195SToshiaki Makita generic_xdp_tx(skb, xdp_prog); 5036d5fc195SToshiaki Makita 5046d5fc195SToshiaki Makita return 0; 5056d5fc195SToshiaki Makita } 5066d5fc195SToshiaki Makita 507af4d045cSDaniel Borkmann static void *dev_map_lookup_elem(struct bpf_map *map, void *key) 50811393cc9SJohn Fastabend { 50967f29e07SJesper Dangaard Brouer struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key); 510af4d045cSDaniel Borkmann 5117f1c0426SDavid Ahern return obj ? &obj->val : NULL; 512af4d045cSDaniel Borkmann } 513af4d045cSDaniel Borkmann 5146f9d451aSToke Høiland-Jørgensen static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key) 5156f9d451aSToke Høiland-Jørgensen { 5166f9d451aSToke Høiland-Jørgensen struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map, 5176f9d451aSToke Høiland-Jørgensen *(u32 *)key); 5187f1c0426SDavid Ahern return obj ? &obj->val : NULL; 5196f9d451aSToke Høiland-Jørgensen } 5206f9d451aSToke Høiland-Jørgensen 521546ac1ffSJohn Fastabend static void __dev_map_entry_free(struct rcu_head *rcu) 522546ac1ffSJohn Fastabend { 523af4d045cSDaniel Borkmann struct bpf_dtab_netdev *dev; 524546ac1ffSJohn Fastabend 525af4d045cSDaniel Borkmann dev = container_of(rcu, struct bpf_dtab_netdev, rcu); 526fbee97feSDavid Ahern if (dev->xdp_prog) 527fbee97feSDavid Ahern bpf_prog_put(dev->xdp_prog); 528af4d045cSDaniel Borkmann dev_put(dev->dev); 529af4d045cSDaniel Borkmann kfree(dev); 530546ac1ffSJohn Fastabend } 531546ac1ffSJohn Fastabend 532546ac1ffSJohn Fastabend static int dev_map_delete_elem(struct bpf_map *map, void *key) 533546ac1ffSJohn Fastabend { 534546ac1ffSJohn Fastabend struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 535546ac1ffSJohn Fastabend struct bpf_dtab_netdev *old_dev; 536546ac1ffSJohn Fastabend int k = *(u32 *)key; 537546ac1ffSJohn Fastabend 538546ac1ffSJohn Fastabend if (k >= map->max_entries) 539546ac1ffSJohn Fastabend return -EINVAL; 540546ac1ffSJohn Fastabend 541af4d045cSDaniel Borkmann /* Use call_rcu() here to ensure any rcu critical sections have 54242a84a8cSJohn Fastabend * completed as well as any flush operations because call_rcu 54342a84a8cSJohn Fastabend * will wait for preempt-disable region to complete, NAPI in this 54442a84a8cSJohn Fastabend * context. And additionally, the driver tear down ensures all 54542a84a8cSJohn Fastabend * soft irqs are complete before removing the net device in the 54642a84a8cSJohn Fastabend * case of dev_put equals zero. 547546ac1ffSJohn Fastabend */ 548546ac1ffSJohn Fastabend old_dev = xchg(&dtab->netdev_map[k], NULL); 549546ac1ffSJohn Fastabend if (old_dev) 550546ac1ffSJohn Fastabend call_rcu(&old_dev->rcu, __dev_map_entry_free); 551546ac1ffSJohn Fastabend return 0; 552546ac1ffSJohn Fastabend } 553546ac1ffSJohn Fastabend 5546f9d451aSToke Høiland-Jørgensen static int dev_map_hash_delete_elem(struct bpf_map *map, void *key) 5556f9d451aSToke Høiland-Jørgensen { 5566f9d451aSToke Høiland-Jørgensen struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 5576f9d451aSToke Høiland-Jørgensen struct bpf_dtab_netdev *old_dev; 5586f9d451aSToke Høiland-Jørgensen int k = *(u32 *)key; 5596f9d451aSToke Høiland-Jørgensen unsigned long flags; 5606f9d451aSToke Høiland-Jørgensen int ret = -ENOENT; 5616f9d451aSToke Høiland-Jørgensen 5626f9d451aSToke Høiland-Jørgensen spin_lock_irqsave(&dtab->index_lock, flags); 5636f9d451aSToke Høiland-Jørgensen 5646f9d451aSToke Høiland-Jørgensen old_dev = __dev_map_hash_lookup_elem(map, k); 5656f9d451aSToke Høiland-Jørgensen if (old_dev) { 5666f9d451aSToke Høiland-Jørgensen dtab->items--; 5676f9d451aSToke Høiland-Jørgensen hlist_del_init_rcu(&old_dev->index_hlist); 5686f9d451aSToke Høiland-Jørgensen call_rcu(&old_dev->rcu, __dev_map_entry_free); 5696f9d451aSToke Høiland-Jørgensen ret = 0; 5706f9d451aSToke Høiland-Jørgensen } 5716f9d451aSToke Høiland-Jørgensen spin_unlock_irqrestore(&dtab->index_lock, flags); 5726f9d451aSToke Høiland-Jørgensen 5736f9d451aSToke Høiland-Jørgensen return ret; 5746f9d451aSToke Høiland-Jørgensen } 5756f9d451aSToke Høiland-Jørgensen 576fca16e51SToke Høiland-Jørgensen static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net, 577fca16e51SToke Høiland-Jørgensen struct bpf_dtab *dtab, 5787f1c0426SDavid Ahern struct bpf_devmap_val *val, 579fca16e51SToke Høiland-Jørgensen unsigned int idx) 580546ac1ffSJohn Fastabend { 581fbee97feSDavid Ahern struct bpf_prog *prog = NULL; 582fca16e51SToke Høiland-Jørgensen struct bpf_dtab_netdev *dev; 583546ac1ffSJohn Fastabend 5841440290aSRoman Gushchin dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev), 5851440290aSRoman Gushchin GFP_ATOMIC | __GFP_NOWARN, 58675ccae62SToke Høiland-Jørgensen dtab->map.numa_node); 587546ac1ffSJohn Fastabend if (!dev) 588fca16e51SToke Høiland-Jørgensen return ERR_PTR(-ENOMEM); 589546ac1ffSJohn Fastabend 5907f1c0426SDavid Ahern dev->dev = dev_get_by_index(net, val->ifindex); 5917f1c0426SDavid Ahern if (!dev->dev) 5927f1c0426SDavid Ahern goto err_out; 593546ac1ffSJohn Fastabend 594281920b7SJesper Dangaard Brouer if (val->bpf_prog.fd > 0) { 595fbee97feSDavid Ahern prog = bpf_prog_get_type_dev(val->bpf_prog.fd, 596fbee97feSDavid Ahern BPF_PROG_TYPE_XDP, false); 597fbee97feSDavid Ahern if (IS_ERR(prog)) 598fbee97feSDavid Ahern goto err_put_dev; 599fbee97feSDavid Ahern if (prog->expected_attach_type != BPF_XDP_DEVMAP) 600fbee97feSDavid Ahern goto err_put_prog; 601fbee97feSDavid Ahern } 602fbee97feSDavid Ahern 603fca16e51SToke Høiland-Jørgensen dev->idx = idx; 604546ac1ffSJohn Fastabend dev->dtab = dtab; 605fbee97feSDavid Ahern if (prog) { 606fbee97feSDavid Ahern dev->xdp_prog = prog; 607fbee97feSDavid Ahern dev->val.bpf_prog.id = prog->aux->id; 608fbee97feSDavid Ahern } else { 609fbee97feSDavid Ahern dev->xdp_prog = NULL; 610fbee97feSDavid Ahern dev->val.bpf_prog.id = 0; 611fbee97feSDavid Ahern } 6127f1c0426SDavid Ahern dev->val.ifindex = val->ifindex; 613fca16e51SToke Høiland-Jørgensen 614fca16e51SToke Høiland-Jørgensen return dev; 615fbee97feSDavid Ahern err_put_prog: 616fbee97feSDavid Ahern bpf_prog_put(prog); 617fbee97feSDavid Ahern err_put_dev: 618fbee97feSDavid Ahern dev_put(dev->dev); 6197f1c0426SDavid Ahern err_out: 6207f1c0426SDavid Ahern kfree(dev); 6217f1c0426SDavid Ahern return ERR_PTR(-EINVAL); 622fca16e51SToke Høiland-Jørgensen } 623fca16e51SToke Høiland-Jørgensen 624fca16e51SToke Høiland-Jørgensen static int __dev_map_update_elem(struct net *net, struct bpf_map *map, 625fca16e51SToke Høiland-Jørgensen void *key, void *value, u64 map_flags) 626fca16e51SToke Høiland-Jørgensen { 627fca16e51SToke Høiland-Jørgensen struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 628fca16e51SToke Høiland-Jørgensen struct bpf_dtab_netdev *dev, *old_dev; 629281920b7SJesper Dangaard Brouer struct bpf_devmap_val val = {}; 630fca16e51SToke Høiland-Jørgensen u32 i = *(u32 *)key; 631fca16e51SToke Høiland-Jørgensen 632fca16e51SToke Høiland-Jørgensen if (unlikely(map_flags > BPF_EXIST)) 633fca16e51SToke Høiland-Jørgensen return -EINVAL; 634fca16e51SToke Høiland-Jørgensen if (unlikely(i >= dtab->map.max_entries)) 635fca16e51SToke Høiland-Jørgensen return -E2BIG; 636fca16e51SToke Høiland-Jørgensen if (unlikely(map_flags == BPF_NOEXIST)) 637fca16e51SToke Høiland-Jørgensen return -EEXIST; 638fca16e51SToke Høiland-Jørgensen 6397f1c0426SDavid Ahern /* already verified value_size <= sizeof val */ 6407f1c0426SDavid Ahern memcpy(&val, value, map->value_size); 6417f1c0426SDavid Ahern 6427f1c0426SDavid Ahern if (!val.ifindex) { 643fca16e51SToke Høiland-Jørgensen dev = NULL; 644fbee97feSDavid Ahern /* can not specify fd if ifindex is 0 */ 645281920b7SJesper Dangaard Brouer if (val.bpf_prog.fd > 0) 646fbee97feSDavid Ahern return -EINVAL; 647fca16e51SToke Høiland-Jørgensen } else { 6487f1c0426SDavid Ahern dev = __dev_map_alloc_node(net, dtab, &val, i); 649fca16e51SToke Høiland-Jørgensen if (IS_ERR(dev)) 650fca16e51SToke Høiland-Jørgensen return PTR_ERR(dev); 651546ac1ffSJohn Fastabend } 652546ac1ffSJohn Fastabend 653546ac1ffSJohn Fastabend /* Use call_rcu() here to ensure rcu critical sections have completed 654546ac1ffSJohn Fastabend * Remembering the driver side flush operation will happen before the 655546ac1ffSJohn Fastabend * net device is removed. 656546ac1ffSJohn Fastabend */ 657546ac1ffSJohn Fastabend old_dev = xchg(&dtab->netdev_map[i], dev); 658546ac1ffSJohn Fastabend if (old_dev) 659546ac1ffSJohn Fastabend call_rcu(&old_dev->rcu, __dev_map_entry_free); 660546ac1ffSJohn Fastabend 661546ac1ffSJohn Fastabend return 0; 662546ac1ffSJohn Fastabend } 663546ac1ffSJohn Fastabend 664fca16e51SToke Høiland-Jørgensen static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, 665fca16e51SToke Høiland-Jørgensen u64 map_flags) 666fca16e51SToke Høiland-Jørgensen { 667fca16e51SToke Høiland-Jørgensen return __dev_map_update_elem(current->nsproxy->net_ns, 668fca16e51SToke Høiland-Jørgensen map, key, value, map_flags); 669fca16e51SToke Høiland-Jørgensen } 670fca16e51SToke Høiland-Jørgensen 6716f9d451aSToke Høiland-Jørgensen static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map, 6726f9d451aSToke Høiland-Jørgensen void *key, void *value, u64 map_flags) 6736f9d451aSToke Høiland-Jørgensen { 6746f9d451aSToke Høiland-Jørgensen struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 6756f9d451aSToke Høiland-Jørgensen struct bpf_dtab_netdev *dev, *old_dev; 676281920b7SJesper Dangaard Brouer struct bpf_devmap_val val = {}; 6776f9d451aSToke Høiland-Jørgensen u32 idx = *(u32 *)key; 6786f9d451aSToke Høiland-Jørgensen unsigned long flags; 679af58e7eeSToke Høiland-Jørgensen int err = -EEXIST; 6806f9d451aSToke Høiland-Jørgensen 6817f1c0426SDavid Ahern /* already verified value_size <= sizeof val */ 6827f1c0426SDavid Ahern memcpy(&val, value, map->value_size); 6837f1c0426SDavid Ahern 6847f1c0426SDavid Ahern if (unlikely(map_flags > BPF_EXIST || !val.ifindex)) 6856f9d451aSToke Høiland-Jørgensen return -EINVAL; 6866f9d451aSToke Høiland-Jørgensen 687af58e7eeSToke Høiland-Jørgensen spin_lock_irqsave(&dtab->index_lock, flags); 688af58e7eeSToke Høiland-Jørgensen 6896f9d451aSToke Høiland-Jørgensen old_dev = __dev_map_hash_lookup_elem(map, idx); 6906f9d451aSToke Høiland-Jørgensen if (old_dev && (map_flags & BPF_NOEXIST)) 691af58e7eeSToke Høiland-Jørgensen goto out_err; 6926f9d451aSToke Høiland-Jørgensen 6937f1c0426SDavid Ahern dev = __dev_map_alloc_node(net, dtab, &val, idx); 694af58e7eeSToke Høiland-Jørgensen if (IS_ERR(dev)) { 695af58e7eeSToke Høiland-Jørgensen err = PTR_ERR(dev); 696af58e7eeSToke Høiland-Jørgensen goto out_err; 697af58e7eeSToke Høiland-Jørgensen } 6986f9d451aSToke Høiland-Jørgensen 6996f9d451aSToke Høiland-Jørgensen if (old_dev) { 7006f9d451aSToke Høiland-Jørgensen hlist_del_rcu(&old_dev->index_hlist); 7016f9d451aSToke Høiland-Jørgensen } else { 7026f9d451aSToke Høiland-Jørgensen if (dtab->items >= dtab->map.max_entries) { 7036f9d451aSToke Høiland-Jørgensen spin_unlock_irqrestore(&dtab->index_lock, flags); 7046f9d451aSToke Høiland-Jørgensen call_rcu(&dev->rcu, __dev_map_entry_free); 7056f9d451aSToke Høiland-Jørgensen return -E2BIG; 7066f9d451aSToke Høiland-Jørgensen } 7076f9d451aSToke Høiland-Jørgensen dtab->items++; 7086f9d451aSToke Høiland-Jørgensen } 7096f9d451aSToke Høiland-Jørgensen 7106f9d451aSToke Høiland-Jørgensen hlist_add_head_rcu(&dev->index_hlist, 7116f9d451aSToke Høiland-Jørgensen dev_map_index_hash(dtab, idx)); 7126f9d451aSToke Høiland-Jørgensen spin_unlock_irqrestore(&dtab->index_lock, flags); 7136f9d451aSToke Høiland-Jørgensen 7146f9d451aSToke Høiland-Jørgensen if (old_dev) 7156f9d451aSToke Høiland-Jørgensen call_rcu(&old_dev->rcu, __dev_map_entry_free); 7166f9d451aSToke Høiland-Jørgensen 7176f9d451aSToke Høiland-Jørgensen return 0; 718af58e7eeSToke Høiland-Jørgensen 719af58e7eeSToke Høiland-Jørgensen out_err: 720af58e7eeSToke Høiland-Jørgensen spin_unlock_irqrestore(&dtab->index_lock, flags); 721af58e7eeSToke Høiland-Jørgensen return err; 7226f9d451aSToke Høiland-Jørgensen } 7236f9d451aSToke Høiland-Jørgensen 7246f9d451aSToke Høiland-Jørgensen static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value, 7256f9d451aSToke Høiland-Jørgensen u64 map_flags) 7266f9d451aSToke Høiland-Jørgensen { 7276f9d451aSToke Høiland-Jørgensen return __dev_map_hash_update_elem(current->nsproxy->net_ns, 7286f9d451aSToke Høiland-Jørgensen map, key, value, map_flags); 7296f9d451aSToke Høiland-Jørgensen } 7306f9d451aSToke Høiland-Jørgensen 731e6a4750fSBjörn Töpel static int dev_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags) 732e6a4750fSBjörn Töpel { 733e6a4750fSBjörn Töpel return __bpf_xdp_redirect_map(map, ifindex, flags, __dev_map_lookup_elem); 734e6a4750fSBjörn Töpel } 735e6a4750fSBjörn Töpel 736e6a4750fSBjörn Töpel static int dev_hash_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags) 737e6a4750fSBjörn Töpel { 738e6a4750fSBjörn Töpel return __bpf_xdp_redirect_map(map, ifindex, flags, __dev_map_hash_lookup_elem); 739e6a4750fSBjörn Töpel } 740e6a4750fSBjörn Töpel 7412872e9acSAndrey Ignatov static int dev_map_btf_id; 742546ac1ffSJohn Fastabend const struct bpf_map_ops dev_map_ops = { 743f4d05259SMartin KaFai Lau .map_meta_equal = bpf_map_meta_equal, 744546ac1ffSJohn Fastabend .map_alloc = dev_map_alloc, 745546ac1ffSJohn Fastabend .map_free = dev_map_free, 746546ac1ffSJohn Fastabend .map_get_next_key = dev_map_get_next_key, 747546ac1ffSJohn Fastabend .map_lookup_elem = dev_map_lookup_elem, 748546ac1ffSJohn Fastabend .map_update_elem = dev_map_update_elem, 749546ac1ffSJohn Fastabend .map_delete_elem = dev_map_delete_elem, 750e8d2bec0SDaniel Borkmann .map_check_btf = map_check_no_btf, 7512872e9acSAndrey Ignatov .map_btf_name = "bpf_dtab", 7522872e9acSAndrey Ignatov .map_btf_id = &dev_map_btf_id, 753e6a4750fSBjörn Töpel .map_redirect = dev_map_redirect, 754546ac1ffSJohn Fastabend }; 7552ddf71e2SJohn Fastabend 7562872e9acSAndrey Ignatov static int dev_map_hash_map_btf_id; 7576f9d451aSToke Høiland-Jørgensen const struct bpf_map_ops dev_map_hash_ops = { 758f4d05259SMartin KaFai Lau .map_meta_equal = bpf_map_meta_equal, 7596f9d451aSToke Høiland-Jørgensen .map_alloc = dev_map_alloc, 7606f9d451aSToke Høiland-Jørgensen .map_free = dev_map_free, 7616f9d451aSToke Høiland-Jørgensen .map_get_next_key = dev_map_hash_get_next_key, 7626f9d451aSToke Høiland-Jørgensen .map_lookup_elem = dev_map_hash_lookup_elem, 7636f9d451aSToke Høiland-Jørgensen .map_update_elem = dev_map_hash_update_elem, 7646f9d451aSToke Høiland-Jørgensen .map_delete_elem = dev_map_hash_delete_elem, 7656f9d451aSToke Høiland-Jørgensen .map_check_btf = map_check_no_btf, 7662872e9acSAndrey Ignatov .map_btf_name = "bpf_dtab", 7672872e9acSAndrey Ignatov .map_btf_id = &dev_map_hash_map_btf_id, 768e6a4750fSBjörn Töpel .map_redirect = dev_hash_map_redirect, 7696f9d451aSToke Høiland-Jørgensen }; 7706f9d451aSToke Høiland-Jørgensen 771ce197d83SToke Høiland-Jørgensen static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab, 772ce197d83SToke Høiland-Jørgensen struct net_device *netdev) 773ce197d83SToke Høiland-Jørgensen { 774ce197d83SToke Høiland-Jørgensen unsigned long flags; 775ce197d83SToke Høiland-Jørgensen u32 i; 776ce197d83SToke Høiland-Jørgensen 777ce197d83SToke Høiland-Jørgensen spin_lock_irqsave(&dtab->index_lock, flags); 778ce197d83SToke Høiland-Jørgensen for (i = 0; i < dtab->n_buckets; i++) { 779ce197d83SToke Høiland-Jørgensen struct bpf_dtab_netdev *dev; 780ce197d83SToke Høiland-Jørgensen struct hlist_head *head; 781ce197d83SToke Høiland-Jørgensen struct hlist_node *next; 782ce197d83SToke Høiland-Jørgensen 783ce197d83SToke Høiland-Jørgensen head = dev_map_index_hash(dtab, i); 784ce197d83SToke Høiland-Jørgensen 785ce197d83SToke Høiland-Jørgensen hlist_for_each_entry_safe(dev, next, head, index_hlist) { 786ce197d83SToke Høiland-Jørgensen if (netdev != dev->dev) 787ce197d83SToke Høiland-Jørgensen continue; 788ce197d83SToke Høiland-Jørgensen 789ce197d83SToke Høiland-Jørgensen dtab->items--; 790ce197d83SToke Høiland-Jørgensen hlist_del_rcu(&dev->index_hlist); 791ce197d83SToke Høiland-Jørgensen call_rcu(&dev->rcu, __dev_map_entry_free); 792ce197d83SToke Høiland-Jørgensen } 793ce197d83SToke Høiland-Jørgensen } 794ce197d83SToke Høiland-Jørgensen spin_unlock_irqrestore(&dtab->index_lock, flags); 795ce197d83SToke Høiland-Jørgensen } 796ce197d83SToke Høiland-Jørgensen 7972ddf71e2SJohn Fastabend static int dev_map_notification(struct notifier_block *notifier, 7982ddf71e2SJohn Fastabend ulong event, void *ptr) 7992ddf71e2SJohn Fastabend { 8002ddf71e2SJohn Fastabend struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 8012ddf71e2SJohn Fastabend struct bpf_dtab *dtab; 80275ccae62SToke Høiland-Jørgensen int i, cpu; 8032ddf71e2SJohn Fastabend 8042ddf71e2SJohn Fastabend switch (event) { 80575ccae62SToke Høiland-Jørgensen case NETDEV_REGISTER: 80675ccae62SToke Høiland-Jørgensen if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq) 80775ccae62SToke Høiland-Jørgensen break; 80875ccae62SToke Høiland-Jørgensen 80975ccae62SToke Høiland-Jørgensen /* will be freed in free_netdev() */ 8107d4553b6SJun'ichi Nomura netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue); 81175ccae62SToke Høiland-Jørgensen if (!netdev->xdp_bulkq) 81275ccae62SToke Høiland-Jørgensen return NOTIFY_BAD; 81375ccae62SToke Høiland-Jørgensen 81475ccae62SToke Høiland-Jørgensen for_each_possible_cpu(cpu) 81575ccae62SToke Høiland-Jørgensen per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev; 81675ccae62SToke Høiland-Jørgensen break; 8172ddf71e2SJohn Fastabend case NETDEV_UNREGISTER: 8184cc7b954SJohn Fastabend /* This rcu_read_lock/unlock pair is needed because 8194cc7b954SJohn Fastabend * dev_map_list is an RCU list AND to ensure a delete 8204cc7b954SJohn Fastabend * operation does not free a netdev_map entry while we 8214cc7b954SJohn Fastabend * are comparing it against the netdev being unregistered. 8224cc7b954SJohn Fastabend */ 8234cc7b954SJohn Fastabend rcu_read_lock(); 8244cc7b954SJohn Fastabend list_for_each_entry_rcu(dtab, &dev_map_list, list) { 825ce197d83SToke Høiland-Jørgensen if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 826ce197d83SToke Høiland-Jørgensen dev_map_hash_remove_netdev(dtab, netdev); 827ce197d83SToke Høiland-Jørgensen continue; 828ce197d83SToke Høiland-Jørgensen } 829ce197d83SToke Høiland-Jørgensen 8302ddf71e2SJohn Fastabend for (i = 0; i < dtab->map.max_entries; i++) { 8314cc7b954SJohn Fastabend struct bpf_dtab_netdev *dev, *odev; 8322ddf71e2SJohn Fastabend 8334cc7b954SJohn Fastabend dev = READ_ONCE(dtab->netdev_map[i]); 834f592f804STaehee Yoo if (!dev || netdev != dev->dev) 8352ddf71e2SJohn Fastabend continue; 8364cc7b954SJohn Fastabend odev = cmpxchg(&dtab->netdev_map[i], dev, NULL); 8374cc7b954SJohn Fastabend if (dev == odev) 8382ddf71e2SJohn Fastabend call_rcu(&dev->rcu, 8392ddf71e2SJohn Fastabend __dev_map_entry_free); 8402ddf71e2SJohn Fastabend } 8412ddf71e2SJohn Fastabend } 8424cc7b954SJohn Fastabend rcu_read_unlock(); 8432ddf71e2SJohn Fastabend break; 8442ddf71e2SJohn Fastabend default: 8452ddf71e2SJohn Fastabend break; 8462ddf71e2SJohn Fastabend } 8472ddf71e2SJohn Fastabend return NOTIFY_OK; 8482ddf71e2SJohn Fastabend } 8492ddf71e2SJohn Fastabend 8502ddf71e2SJohn Fastabend static struct notifier_block dev_map_notifier = { 8512ddf71e2SJohn Fastabend .notifier_call = dev_map_notification, 8522ddf71e2SJohn Fastabend }; 8532ddf71e2SJohn Fastabend 8542ddf71e2SJohn Fastabend static int __init dev_map_init(void) 8552ddf71e2SJohn Fastabend { 85696360004SBjörn Töpel int cpu; 85796360004SBjörn Töpel 85867f29e07SJesper Dangaard Brouer /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */ 85967f29e07SJesper Dangaard Brouer BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) != 86067f29e07SJesper Dangaard Brouer offsetof(struct _bpf_dtab_netdev, dev)); 8612ddf71e2SJohn Fastabend register_netdevice_notifier(&dev_map_notifier); 86296360004SBjörn Töpel 86396360004SBjörn Töpel for_each_possible_cpu(cpu) 8641d233886SToke Høiland-Jørgensen INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu)); 8652ddf71e2SJohn Fastabend return 0; 8662ddf71e2SJohn Fastabend } 8672ddf71e2SJohn Fastabend 8682ddf71e2SJohn Fastabend subsys_initcall(dev_map_init); 869