1 /*
2 * Copyright (c) 2014, Cisco Systems, Inc. All rights reserved.
3 *
4 * LICENSE_BEGIN
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
33 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
35 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 *
38 * LICENSE_END
39 *
40 *
41 */
42
43 #include <errno.h>
44
45
46 #include "usd.h"
47 #include "usd_util.h"
48 #include "cq_enet_desc.h"
49
50 static inline void
find_rx_lengths(struct usd_rq * rq,uint16_t q_index,size_t * posted_len_o,size_t * len_in_pkt_o)51 find_rx_lengths(
52 struct usd_rq *rq,
53 uint16_t q_index,
54 size_t *posted_len_o,
55 size_t *len_in_pkt_o)
56 {
57 dma_addr_t bus_addr;
58 u16 len;
59 u8 type;
60 size_t rcvbuf_len;
61 uint16_t i;
62
63 i = q_index;
64 rcvbuf_len = 0;
65 do {
66 rq_enet_desc_dec( (struct rq_enet_desc *)
67 ((uintptr_t)rq->urq_desc_ring + (i<<4)),
68 &bus_addr, &type, &len);
69 rcvbuf_len += len;
70 i = (i - 1) & rq->urq_post_index_mask;
71 } while (type == RQ_ENET_TYPE_NOT_SOP);
72
73 *posted_len_o = rcvbuf_len;
74 *len_in_pkt_o = ntohs(((struct usd_udp_hdr *)bus_addr)->uh_ip.tot_len) +
75 sizeof(struct ether_header);
76 }
77
78 static inline int
usd_desc_to_rq_comp(struct usd_cq_impl * cq,struct cq_desc * desc,uint16_t qid,uint16_t q_index,struct usd_completion * comp)79 usd_desc_to_rq_comp(
80 struct usd_cq_impl *cq,
81 struct cq_desc *desc,
82 uint16_t qid,
83 uint16_t q_index,
84 struct usd_completion *comp)
85 {
86 struct usd_rq *rq;
87 struct usd_qp_impl *qp;
88 struct cq_enet_rq_desc *edesc;
89 uint16_t bytes_written_flags;
90 uint32_t bytes_written;
91 uint32_t ci_flags;
92 uint32_t ipudpok;
93 unsigned credits;
94 size_t len_in_pkt;
95 size_t rcvbuf_len;
96
97 edesc = (struct cq_enet_rq_desc *)desc;
98 rq = cq->ucq_rq_map[qid];
99 qp = usd_container_of(rq, struct usd_qp_impl, uq_rq);
100
101 bytes_written_flags = le16_to_cpu(edesc->bytes_written_flags);
102 bytes_written = bytes_written_flags & CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
103 ci_flags = le16_to_cpu(edesc->completed_index_flags);
104
105 if (ci_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) {
106 comp->uc_bytes = bytes_written + rq->urq_accum_bytes;
107 rq->urq_accum_bytes = 0;
108 } else {
109 rq->urq_accum_bytes += bytes_written;
110 return -1;
111 }
112
113 comp->uc_context = rq->urq_context[q_index];
114 comp->uc_qp = &qp->uq_qp;
115
116 ipudpok = CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK |
117 CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK;
118
119 if (bytes_written_flags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED ||
120 (edesc->flags & ipudpok) != ipudpok) {
121 if (((edesc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) == 0) &&
122 bytes_written == 0) {
123 find_rx_lengths(rq, q_index, &rcvbuf_len, &len_in_pkt);
124
125 /*
126 * If only the paddings to meet 64-byte minimum eth frame
127 * requirement are truncated, do not mark packet as
128 * error due to truncation.
129 * The usnic hdr should not be split into multiple receive buffer
130 *
131 * If we could afford the extra cycles, we would also compute the
132 * UDP checksum here and compare it to the UDP header.
133 */
134 if (rcvbuf_len >= 60 || len_in_pkt > rcvbuf_len) {
135 comp->uc_status = USD_COMPSTAT_ERROR_TRUNC;
136 }
137 else {
138 comp->uc_status = USD_COMPSTAT_SUCCESS;
139 /* TRUNC means bytes_written==0, so fix this too */
140 comp->uc_bytes = len_in_pkt;
141 }
142 } else {
143 comp->uc_status = USD_COMPSTAT_ERROR_CRC;
144 }
145 } else {
146 if (comp->uc_bytes <= 60) {
147 /*
148 * The sender may have attempted to send a small frame (<64-bytes)
149 * that was padded out to 64-bytes by the sending VIC.
150 * If we posted a recv buffer >= 60 bytes then we wouldn't see
151 * truncation, but the bytes_written by the VIC will be larger than
152 * the bytes the sender actually requested to send. Fix that up
153 * here.
154 */
155 find_rx_lengths(rq, q_index, &rcvbuf_len, &len_in_pkt);
156 comp->uc_bytes = len_in_pkt;
157 }
158 comp->uc_status = USD_COMPSTAT_SUCCESS;
159 }
160
161 /* needs a little work in multi-SGE case, all credits currently not
162 * reported as released until next RX
163 */
164 credits = (q_index - rq->urq_last_comp) & rq->urq_post_index_mask;
165 rq->urq_recv_credits += credits;
166 rq->urq_last_comp = q_index;
167
168 return 0;
169 }
170
171 static inline void
usd_desc_to_wq_comp(struct usd_cq_impl * cq,uint16_t qid,uint16_t q_index,struct usd_completion * comp)172 usd_desc_to_wq_comp(
173 struct usd_cq_impl *cq,
174 uint16_t qid,
175 uint16_t q_index,
176 struct usd_completion *comp)
177 {
178 struct usd_wq *wq;
179 struct usd_qp_impl *qp;
180 struct usd_wq_post_info *info;
181 unsigned credits;
182
183 wq = cq->ucq_wq_map[qid];
184 qp = usd_container_of(wq, struct usd_qp_impl, uq_wq);
185 comp->uc_qp = &qp->uq_qp;
186
187 info = &wq->uwq_post_info[(q_index+1)&wq->uwq_post_index_mask];
188 comp->uc_context = info->wp_context;
189 comp->uc_bytes = info->wp_len;
190 comp->uc_status = USD_COMPSTAT_SUCCESS;
191
192 credits = (q_index - wq->uwq_last_comp) & wq->uwq_post_index_mask;
193 wq->uwq_send_credits += credits;
194 wq->uwq_last_comp = q_index;
195 }
196
197 int
usd_poll_cq_multi(struct usd_cq * ucq,int max_comps,struct usd_completion * comps)198 usd_poll_cq_multi(
199 struct usd_cq *ucq,
200 int max_comps,
201 struct usd_completion *comps)
202 {
203 int ret;
204 int n;
205
206 for (n = 0; n < max_comps; ++n) {
207 ret = usd_poll_cq(ucq, comps + n);
208 if (ret == -EAGAIN) {
209 return n;
210 }
211 }
212 return max_comps;
213 }
214
215 int
usd_poll_cq(struct usd_cq * ucq,struct usd_completion * comp)216 usd_poll_cq(
217 struct usd_cq *ucq,
218 struct usd_completion *comp)
219 {
220 struct usd_cq_impl *cq;
221 struct cq_desc *cq_desc;
222 uint8_t color;
223 uint8_t last_color;
224 uint8_t type_color;
225 uint8_t type;
226 uint16_t qid;
227 uint16_t q_index;
228
229 cq = to_cqi(ucq);
230
231 retry:
232 /* check for a completion */
233 cq_desc = (struct cq_desc *)((uint8_t *)cq->ucq_desc_ring +
234 (cq->ucq_next_desc << 4));
235 last_color = cq->ucq_last_color;
236
237 type_color = cq_desc->type_color;
238 type = type_color & 0x7f;
239 color = type_color >> CQ_DESC_COLOR_SHIFT;
240 qid = le16_to_cpu(cq_desc->q_number) & CQ_DESC_Q_NUM_MASK;
241 q_index = le16_to_cpu(cq_desc->completed_index) & CQ_DESC_COMP_NDX_MASK;
242
243 if (color == last_color) {
244 return -EAGAIN;
245 } else {
246
247 /* bookkeeping */
248 cq->ucq_next_desc++;
249 cq->ucq_last_color ^= (cq->ucq_next_desc >> cq->ucq_color_shift);
250 cq->ucq_next_desc &= cq->ucq_cqe_mask;
251
252 rmb();
253
254 comp->uc_type = (enum usd_completion_type) type;
255
256 if (type == USD_COMPTYPE_RECV) {
257 if (usd_desc_to_rq_comp(cq, cq_desc, qid, q_index, comp) == -1) {
258 goto retry;
259 }
260 } else if (type == USD_COMPTYPE_SEND) {
261 usd_desc_to_wq_comp(cq, qid, q_index, comp);
262 } else {
263 comp->uc_status = USD_COMPSTAT_ERROR_INTERNAL;
264 }
265 return 0;
266 }
267 }
268
269 /*
270 * Allow application to unmask interrupt explicitly
271 */
usd_poll_req_notify(struct usd_cq * ucq)272 int usd_poll_req_notify(struct usd_cq *ucq)
273 {
274 struct usd_cq_impl *cq;
275
276 cq = to_cqi(ucq);
277
278 /*
279 * application uses a signal thread waiting for one completion FD,
280 * then calling this function to unmask the interrupt source. If multiple
281 * cqs are associated with the FD/interrupt, this may be unneccesarilly
282 * called for subsequent cqs at each poll/wait, but it's OK. A lock isn't
283 * used here to prevent simultaneous unmasking among multiple threads as
284 * it's not a valid use case.
285 * Also this call happens at data path, it's assumed that removing a
286 * interrupt source from cq happens at control path tear down stage, when
287 * data path is already finished.
288 */
289 if (cq->comp_fd != -1 && cq->ucq_intr != NULL)
290 vnic_intr_unmask(&cq->ucq_intr->uci_vintr);
291
292 return 0;
293 }
294