xref: /linux/drivers/net/ethernet/netronome/nfp/ccm.c (revision 44f57d78)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2016-2019 Netronome Systems, Inc. */
3 
4 #include <linux/bitops.h>
5 
6 #include "ccm.h"
7 #include "nfp_app.h"
8 #include "nfp_net.h"
9 
10 #define NFP_CCM_TYPE_REPLY_BIT		7
11 #define __NFP_CCM_REPLY(req)		(BIT(NFP_CCM_TYPE_REPLY_BIT) | (req))
12 
13 #define ccm_warn(app, msg...)	nn_dp_warn(&(app)->ctrl->dp, msg)
14 
15 #define NFP_CCM_TAG_ALLOC_SPAN	(U16_MAX / 4)
16 
17 static bool nfp_ccm_all_tags_busy(struct nfp_ccm *ccm)
18 {
19 	u16 used_tags;
20 
21 	used_tags = ccm->tag_alloc_next - ccm->tag_alloc_last;
22 
23 	return used_tags > NFP_CCM_TAG_ALLOC_SPAN;
24 }
25 
26 static int nfp_ccm_alloc_tag(struct nfp_ccm *ccm)
27 {
28 	/* CCM is for FW communication which is request-reply.  To make sure
29 	 * we don't reuse the message ID too early after timeout - limit the
30 	 * number of requests in flight.
31 	 */
32 	if (unlikely(nfp_ccm_all_tags_busy(ccm))) {
33 		ccm_warn(ccm->app, "all FW request contexts busy!\n");
34 		return -EAGAIN;
35 	}
36 
37 	WARN_ON(__test_and_set_bit(ccm->tag_alloc_next, ccm->tag_allocator));
38 	return ccm->tag_alloc_next++;
39 }
40 
41 static void nfp_ccm_free_tag(struct nfp_ccm *ccm, u16 tag)
42 {
43 	WARN_ON(!__test_and_clear_bit(tag, ccm->tag_allocator));
44 
45 	while (!test_bit(ccm->tag_alloc_last, ccm->tag_allocator) &&
46 	       ccm->tag_alloc_last != ccm->tag_alloc_next)
47 		ccm->tag_alloc_last++;
48 }
49 
50 static struct sk_buff *__nfp_ccm_reply(struct nfp_ccm *ccm, u16 tag)
51 {
52 	unsigned int msg_tag;
53 	struct sk_buff *skb;
54 
55 	skb_queue_walk(&ccm->replies, skb) {
56 		msg_tag = nfp_ccm_get_tag(skb);
57 		if (msg_tag == tag) {
58 			nfp_ccm_free_tag(ccm, tag);
59 			__skb_unlink(skb, &ccm->replies);
60 			return skb;
61 		}
62 	}
63 
64 	return NULL;
65 }
66 
67 static struct sk_buff *
68 nfp_ccm_reply(struct nfp_ccm *ccm, struct nfp_app *app, u16 tag)
69 {
70 	struct sk_buff *skb;
71 
72 	nfp_ctrl_lock(app->ctrl);
73 	skb = __nfp_ccm_reply(ccm, tag);
74 	nfp_ctrl_unlock(app->ctrl);
75 
76 	return skb;
77 }
78 
79 static struct sk_buff *
80 nfp_ccm_reply_drop_tag(struct nfp_ccm *ccm, struct nfp_app *app, u16 tag)
81 {
82 	struct sk_buff *skb;
83 
84 	nfp_ctrl_lock(app->ctrl);
85 	skb = __nfp_ccm_reply(ccm, tag);
86 	if (!skb)
87 		nfp_ccm_free_tag(ccm, tag);
88 	nfp_ctrl_unlock(app->ctrl);
89 
90 	return skb;
91 }
92 
93 static struct sk_buff *
94 nfp_ccm_wait_reply(struct nfp_ccm *ccm, struct nfp_app *app,
95 		   enum nfp_ccm_type type, int tag)
96 {
97 	struct sk_buff *skb;
98 	int i, err;
99 
100 	for (i = 0; i < 50; i++) {
101 		udelay(4);
102 		skb = nfp_ccm_reply(ccm, app, tag);
103 		if (skb)
104 			return skb;
105 	}
106 
107 	err = wait_event_interruptible_timeout(ccm->wq,
108 					       skb = nfp_ccm_reply(ccm, app,
109 								   tag),
110 					       msecs_to_jiffies(5000));
111 	/* We didn't get a response - try last time and atomically drop
112 	 * the tag even if no response is matched.
113 	 */
114 	if (!skb)
115 		skb = nfp_ccm_reply_drop_tag(ccm, app, tag);
116 	if (err < 0) {
117 		ccm_warn(app, "%s waiting for response to 0x%02x: %d\n",
118 			 err == ERESTARTSYS ? "interrupted" : "error",
119 			 type, err);
120 		return ERR_PTR(err);
121 	}
122 	if (!skb) {
123 		ccm_warn(app, "timeout waiting for response to 0x%02x\n", type);
124 		return ERR_PTR(-ETIMEDOUT);
125 	}
126 
127 	return skb;
128 }
129 
130 struct sk_buff *
131 nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb,
132 		    enum nfp_ccm_type type, unsigned int reply_size)
133 {
134 	struct nfp_app *app = ccm->app;
135 	struct nfp_ccm_hdr *hdr;
136 	int reply_type, tag;
137 
138 	nfp_ctrl_lock(app->ctrl);
139 	tag = nfp_ccm_alloc_tag(ccm);
140 	if (tag < 0) {
141 		nfp_ctrl_unlock(app->ctrl);
142 		dev_kfree_skb_any(skb);
143 		return ERR_PTR(tag);
144 	}
145 
146 	hdr = (void *)skb->data;
147 	hdr->ver = NFP_CCM_ABI_VERSION;
148 	hdr->type = type;
149 	hdr->tag = cpu_to_be16(tag);
150 
151 	__nfp_app_ctrl_tx(app, skb);
152 
153 	nfp_ctrl_unlock(app->ctrl);
154 
155 	skb = nfp_ccm_wait_reply(ccm, app, type, tag);
156 	if (IS_ERR(skb))
157 		return skb;
158 
159 	reply_type = nfp_ccm_get_type(skb);
160 	if (reply_type != __NFP_CCM_REPLY(type)) {
161 		ccm_warn(app, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
162 			 reply_type, __NFP_CCM_REPLY(type));
163 		goto err_free;
164 	}
165 	/* 0 reply_size means caller will do the validation */
166 	if (reply_size && skb->len != reply_size) {
167 		ccm_warn(app, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
168 			 type, skb->len, reply_size);
169 		goto err_free;
170 	}
171 
172 	return skb;
173 err_free:
174 	dev_kfree_skb_any(skb);
175 	return ERR_PTR(-EIO);
176 }
177 
178 void nfp_ccm_rx(struct nfp_ccm *ccm, struct sk_buff *skb)
179 {
180 	struct nfp_app *app = ccm->app;
181 	unsigned int tag;
182 
183 	if (unlikely(skb->len < sizeof(struct nfp_ccm_hdr))) {
184 		ccm_warn(app, "cmsg drop - too short %d!\n", skb->len);
185 		goto err_free;
186 	}
187 
188 	nfp_ctrl_lock(app->ctrl);
189 
190 	tag = nfp_ccm_get_tag(skb);
191 	if (unlikely(!test_bit(tag, ccm->tag_allocator))) {
192 		ccm_warn(app, "cmsg drop - no one is waiting for tag %u!\n",
193 			 tag);
194 		goto err_unlock;
195 	}
196 
197 	__skb_queue_tail(&ccm->replies, skb);
198 	wake_up_interruptible_all(&ccm->wq);
199 
200 	nfp_ctrl_unlock(app->ctrl);
201 	return;
202 
203 err_unlock:
204 	nfp_ctrl_unlock(app->ctrl);
205 err_free:
206 	dev_kfree_skb_any(skb);
207 }
208 
209 int nfp_ccm_init(struct nfp_ccm *ccm, struct nfp_app *app)
210 {
211 	ccm->app = app;
212 	skb_queue_head_init(&ccm->replies);
213 	init_waitqueue_head(&ccm->wq);
214 	return 0;
215 }
216 
217 void nfp_ccm_clean(struct nfp_ccm *ccm)
218 {
219 	WARN_ON(!skb_queue_empty(&ccm->replies));
220 }
221