1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 
4 #include <linux/rhashtable.h>
5 #include <net/flow_offload.h>
6 #include "en/tc_priv.h"
7 #include "act_stats.h"
8 #include "en/fs.h"
9 
10 struct mlx5e_tc_act_stats_handle {
11 	struct rhashtable ht;
12 	spinlock_t ht_lock; /* protects hashtable */
13 };
14 
15 struct mlx5e_tc_act_stats {
16 	unsigned long		tc_act_cookie;
17 
18 	struct mlx5_fc		*counter;
19 	u64			lastpackets;
20 	u64			lastbytes;
21 
22 	struct rhash_head	hash;
23 	struct rcu_head		rcu_head;
24 };
25 
26 static const struct rhashtable_params act_counters_ht_params = {
27 	.head_offset = offsetof(struct mlx5e_tc_act_stats, hash),
28 	.key_offset = 0,
29 	.key_len = offsetof(struct mlx5e_tc_act_stats, counter),
30 	.automatic_shrinking = true,
31 };
32 
33 struct mlx5e_tc_act_stats_handle *
34 mlx5e_tc_act_stats_create(void)
35 {
36 	struct mlx5e_tc_act_stats_handle *handle;
37 	int err;
38 
39 	handle = kvzalloc(sizeof(*handle), GFP_KERNEL);
40 	if (!handle)
41 		return ERR_PTR(-ENOMEM);
42 
43 	err = rhashtable_init(&handle->ht, &act_counters_ht_params);
44 	if (err)
45 		goto err;
46 
47 	spin_lock_init(&handle->ht_lock);
48 	return handle;
49 err:
50 	kvfree(handle);
51 	return ERR_PTR(err);
52 }
53 
54 void mlx5e_tc_act_stats_free(struct mlx5e_tc_act_stats_handle *handle)
55 {
56 	rhashtable_destroy(&handle->ht);
57 	kvfree(handle);
58 }
59 
60 static int
61 mlx5e_tc_act_stats_add(struct mlx5e_tc_act_stats_handle *handle,
62 		       unsigned long act_cookie,
63 		       struct mlx5_fc *counter)
64 {
65 	struct mlx5e_tc_act_stats *act_stats, *old_act_stats;
66 	struct rhashtable *ht = &handle->ht;
67 	u64 lastused;
68 	int err = 0;
69 
70 	act_stats = kvzalloc(sizeof(*act_stats), GFP_KERNEL);
71 	if (!act_stats)
72 		return -ENOMEM;
73 
74 	act_stats->tc_act_cookie = act_cookie;
75 	act_stats->counter = counter;
76 
77 	mlx5_fc_query_cached_raw(counter,
78 				 &act_stats->lastbytes,
79 				 &act_stats->lastpackets, &lastused);
80 
81 	rcu_read_lock();
82 	old_act_stats = rhashtable_lookup_get_insert_fast(ht,
83 							  &act_stats->hash,
84 							  act_counters_ht_params);
85 	if (IS_ERR(old_act_stats)) {
86 		err = PTR_ERR(old_act_stats);
87 		goto err_hash_insert;
88 	} else if (old_act_stats) {
89 		err = -EEXIST;
90 		goto err_hash_insert;
91 	}
92 	rcu_read_unlock();
93 
94 	return 0;
95 
96 err_hash_insert:
97 	rcu_read_unlock();
98 	kvfree(act_stats);
99 	return err;
100 }
101 
102 void
103 mlx5e_tc_act_stats_del_flow(struct mlx5e_tc_act_stats_handle *handle,
104 			    struct mlx5e_tc_flow *flow)
105 {
106 	struct mlx5_flow_attr *attr;
107 	struct mlx5e_tc_act_stats *act_stats;
108 	int i;
109 
110 	if (!flow_flag_test(flow, USE_ACT_STATS))
111 		return;
112 
113 	list_for_each_entry(attr, &flow->attrs, list) {
114 		for (i = 0; i < attr->tc_act_cookies_count; i++) {
115 			struct rhashtable *ht = &handle->ht;
116 
117 			spin_lock(&handle->ht_lock);
118 			act_stats = rhashtable_lookup_fast(ht,
119 							   &attr->tc_act_cookies[i],
120 							   act_counters_ht_params);
121 			if (act_stats &&
122 			    rhashtable_remove_fast(ht, &act_stats->hash,
123 						   act_counters_ht_params) == 0)
124 				kvfree_rcu(act_stats, rcu_head);
125 
126 			spin_unlock(&handle->ht_lock);
127 		}
128 	}
129 }
130 
131 int
132 mlx5e_tc_act_stats_add_flow(struct mlx5e_tc_act_stats_handle *handle,
133 			    struct mlx5e_tc_flow *flow)
134 {
135 	struct mlx5_fc *curr_counter = NULL;
136 	unsigned long last_cookie = 0;
137 	struct mlx5_flow_attr *attr;
138 	int err;
139 	int i;
140 
141 	if (!flow_flag_test(flow, USE_ACT_STATS))
142 		return 0;
143 
144 	list_for_each_entry(attr, &flow->attrs, list) {
145 		if (attr->counter)
146 			curr_counter = attr->counter;
147 
148 		for (i = 0; i < attr->tc_act_cookies_count; i++) {
149 			/* jump over identical ids (e.g. pedit)*/
150 			if (last_cookie == attr->tc_act_cookies[i])
151 				continue;
152 
153 			err = mlx5e_tc_act_stats_add(handle, attr->tc_act_cookies[i], curr_counter);
154 			if (err)
155 				goto out_err;
156 			last_cookie = attr->tc_act_cookies[i];
157 		}
158 	}
159 
160 	return 0;
161 out_err:
162 	mlx5e_tc_act_stats_del_flow(handle, flow);
163 	return err;
164 }
165 
166 int
167 mlx5e_tc_act_stats_fill_stats(struct mlx5e_tc_act_stats_handle *handle,
168 			      struct flow_offload_action *fl_act)
169 {
170 	struct rhashtable *ht = &handle->ht;
171 	struct mlx5e_tc_act_stats *item;
172 	struct mlx5e_tc_act_stats key;
173 	u64 pkts, bytes, lastused;
174 	int err = 0;
175 
176 	key.tc_act_cookie = fl_act->cookie;
177 
178 	rcu_read_lock();
179 	item = rhashtable_lookup(ht, &key, act_counters_ht_params);
180 	if (!item) {
181 		rcu_read_unlock();
182 		err = -ENOENT;
183 		goto err_out;
184 	}
185 
186 	mlx5_fc_query_cached_raw(item->counter,
187 				 &bytes, &pkts, &lastused);
188 
189 	flow_stats_update(&fl_act->stats,
190 			  bytes - item->lastbytes,
191 			  pkts - item->lastpackets,
192 			  0, lastused, FLOW_ACTION_HW_STATS_DELAYED);
193 
194 	item->lastpackets = pkts;
195 	item->lastbytes = bytes;
196 	rcu_read_unlock();
197 
198 	return 0;
199 
200 err_out:
201 	return err;
202 }
203