1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Texas Instruments ICSSG Ethernet Driver
4 *
5 * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
6 * Copyright (C) Siemens AG, 2024
7 *
8 */
9
10 #include <linux/dma-mapping.h>
11 #include <linux/dma/ti-cppi5.h>
12 #include <linux/etherdevice.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/of.h>
16 #include <linux/of_mdio.h>
17 #include <linux/phy.h>
18 #include <linux/remoteproc/pruss.h>
19 #include <linux/regmap.h>
20 #include <linux/remoteproc.h>
21
22 #include "icssg_prueth.h"
23 #include "../k3-cppi-desc-pool.h"
24
25 /* Netif debug messages possible */
26 #define PRUETH_EMAC_DEBUG (NETIF_MSG_DRV | \
27 NETIF_MSG_PROBE | \
28 NETIF_MSG_LINK | \
29 NETIF_MSG_TIMER | \
30 NETIF_MSG_IFDOWN | \
31 NETIF_MSG_IFUP | \
32 NETIF_MSG_RX_ERR | \
33 NETIF_MSG_TX_ERR | \
34 NETIF_MSG_TX_QUEUED | \
35 NETIF_MSG_INTR | \
36 NETIF_MSG_TX_DONE | \
37 NETIF_MSG_RX_STATUS | \
38 NETIF_MSG_PKTDATA | \
39 NETIF_MSG_HW | \
40 NETIF_MSG_WOL)
41
42 #define prueth_napi_to_emac(napi) container_of(napi, struct prueth_emac, napi_rx)
43
prueth_cleanup_rx_chns(struct prueth_emac * emac,struct prueth_rx_chn * rx_chn,int max_rflows)44 void prueth_cleanup_rx_chns(struct prueth_emac *emac,
45 struct prueth_rx_chn *rx_chn,
46 int max_rflows)
47 {
48 if (rx_chn->desc_pool)
49 k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
50
51 if (rx_chn->rx_chn)
52 k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
53 }
54
prueth_cleanup_tx_chns(struct prueth_emac * emac)55 void prueth_cleanup_tx_chns(struct prueth_emac *emac)
56 {
57 int i;
58
59 for (i = 0; i < emac->tx_ch_num; i++) {
60 struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
61
62 if (tx_chn->desc_pool)
63 k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
64
65 if (tx_chn->tx_chn)
66 k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
67
68 /* Assume prueth_cleanup_tx_chns() is called at the
69 * end after all channel resources are freed
70 */
71 memset(tx_chn, 0, sizeof(*tx_chn));
72 }
73 }
74
prueth_ndev_del_tx_napi(struct prueth_emac * emac,int num)75 void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num)
76 {
77 int i;
78
79 for (i = 0; i < num; i++) {
80 struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
81
82 if (tx_chn->irq)
83 free_irq(tx_chn->irq, tx_chn);
84 netif_napi_del(&tx_chn->napi_tx);
85 }
86 }
87
prueth_xmit_free(struct prueth_tx_chn * tx_chn,struct cppi5_host_desc_t * desc)88 void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
89 struct cppi5_host_desc_t *desc)
90 {
91 struct cppi5_host_desc_t *first_desc, *next_desc;
92 dma_addr_t buf_dma, next_desc_dma;
93 u32 buf_dma_len;
94
95 first_desc = desc;
96 next_desc = first_desc;
97
98 cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
99 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
100
101 dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len,
102 DMA_TO_DEVICE);
103
104 next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
105 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
106 while (next_desc_dma) {
107 next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
108 next_desc_dma);
109 cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
110 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
111
112 dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
113 DMA_TO_DEVICE);
114
115 next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
116 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
117
118 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
119 }
120
121 k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
122 }
123
emac_tx_complete_packets(struct prueth_emac * emac,int chn,int budget,bool * tdown)124 int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
125 int budget, bool *tdown)
126 {
127 struct net_device *ndev = emac->ndev;
128 struct cppi5_host_desc_t *desc_tx;
129 struct netdev_queue *netif_txq;
130 struct prueth_tx_chn *tx_chn;
131 unsigned int total_bytes = 0;
132 struct sk_buff *skb;
133 dma_addr_t desc_dma;
134 int res, num_tx = 0;
135 void **swdata;
136
137 tx_chn = &emac->tx_chns[chn];
138
139 while (true) {
140 res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
141 if (res == -ENODATA)
142 break;
143
144 /* teardown completion */
145 if (cppi5_desc_is_tdcm(desc_dma)) {
146 if (atomic_dec_and_test(&emac->tdown_cnt))
147 complete(&emac->tdown_complete);
148 *tdown = true;
149 break;
150 }
151
152 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
153 desc_dma);
154 swdata = cppi5_hdesc_get_swdata(desc_tx);
155
156 /* was this command's TX complete? */
157 if (emac->is_sr1 && *(swdata) == emac->cmd_data) {
158 prueth_xmit_free(tx_chn, desc_tx);
159 continue;
160 }
161
162 skb = *(swdata);
163 prueth_xmit_free(tx_chn, desc_tx);
164
165 ndev = skb->dev;
166 ndev->stats.tx_packets++;
167 ndev->stats.tx_bytes += skb->len;
168 total_bytes += skb->len;
169 napi_consume_skb(skb, budget);
170 num_tx++;
171 }
172
173 if (!num_tx)
174 return 0;
175
176 netif_txq = netdev_get_tx_queue(ndev, chn);
177 netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
178
179 if (netif_tx_queue_stopped(netif_txq)) {
180 /* If the TX queue was stopped, wake it now
181 * if we have enough room.
182 */
183 __netif_tx_lock(netif_txq, smp_processor_id());
184 if (netif_running(ndev) &&
185 (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
186 MAX_SKB_FRAGS))
187 netif_tx_wake_queue(netif_txq);
188 __netif_tx_unlock(netif_txq);
189 }
190
191 return num_tx;
192 }
193
emac_tx_timer_callback(struct hrtimer * timer)194 static enum hrtimer_restart emac_tx_timer_callback(struct hrtimer *timer)
195 {
196 struct prueth_tx_chn *tx_chns =
197 container_of(timer, struct prueth_tx_chn, tx_hrtimer);
198
199 enable_irq(tx_chns->irq);
200 return HRTIMER_NORESTART;
201 }
202
emac_napi_tx_poll(struct napi_struct * napi_tx,int budget)203 static int emac_napi_tx_poll(struct napi_struct *napi_tx, int budget)
204 {
205 struct prueth_tx_chn *tx_chn = prueth_napi_to_tx_chn(napi_tx);
206 struct prueth_emac *emac = tx_chn->emac;
207 bool tdown = false;
208 int num_tx_packets;
209
210 num_tx_packets = emac_tx_complete_packets(emac, tx_chn->id, budget,
211 &tdown);
212
213 if (num_tx_packets >= budget)
214 return budget;
215
216 if (napi_complete_done(napi_tx, num_tx_packets)) {
217 if (unlikely(tx_chn->tx_pace_timeout_ns && !tdown)) {
218 hrtimer_start(&tx_chn->tx_hrtimer,
219 ns_to_ktime(tx_chn->tx_pace_timeout_ns),
220 HRTIMER_MODE_REL_PINNED);
221 } else {
222 enable_irq(tx_chn->irq);
223 }
224 }
225
226 return num_tx_packets;
227 }
228
prueth_tx_irq(int irq,void * dev_id)229 static irqreturn_t prueth_tx_irq(int irq, void *dev_id)
230 {
231 struct prueth_tx_chn *tx_chn = dev_id;
232
233 disable_irq_nosync(irq);
234 napi_schedule(&tx_chn->napi_tx);
235
236 return IRQ_HANDLED;
237 }
238
prueth_ndev_add_tx_napi(struct prueth_emac * emac)239 int prueth_ndev_add_tx_napi(struct prueth_emac *emac)
240 {
241 struct prueth *prueth = emac->prueth;
242 int i, ret;
243
244 for (i = 0; i < emac->tx_ch_num; i++) {
245 struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
246
247 netif_napi_add_tx(emac->ndev, &tx_chn->napi_tx, emac_napi_tx_poll);
248 hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC,
249 HRTIMER_MODE_REL_PINNED);
250 tx_chn->tx_hrtimer.function = &emac_tx_timer_callback;
251 ret = request_irq(tx_chn->irq, prueth_tx_irq,
252 IRQF_TRIGGER_HIGH, tx_chn->name,
253 tx_chn);
254 if (ret) {
255 netif_napi_del(&tx_chn->napi_tx);
256 dev_err(prueth->dev, "unable to request TX IRQ %d\n",
257 tx_chn->irq);
258 goto fail;
259 }
260 }
261
262 return 0;
263 fail:
264 prueth_ndev_del_tx_napi(emac, i);
265 return ret;
266 }
267
prueth_init_tx_chns(struct prueth_emac * emac)268 int prueth_init_tx_chns(struct prueth_emac *emac)
269 {
270 static const struct k3_ring_cfg ring_cfg = {
271 .elm_size = K3_RINGACC_RING_ELSIZE_8,
272 .mode = K3_RINGACC_RING_MODE_RING,
273 .flags = 0,
274 .size = PRUETH_MAX_TX_DESC,
275 };
276 struct k3_udma_glue_tx_channel_cfg tx_cfg;
277 struct device *dev = emac->prueth->dev;
278 struct net_device *ndev = emac->ndev;
279 int ret, slice, i;
280 u32 hdesc_size;
281
282 slice = prueth_emac_slice(emac);
283 if (slice < 0)
284 return slice;
285
286 init_completion(&emac->tdown_complete);
287
288 hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE,
289 PRUETH_NAV_SW_DATA_SIZE);
290 memset(&tx_cfg, 0, sizeof(tx_cfg));
291 tx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE;
292 tx_cfg.tx_cfg = ring_cfg;
293 tx_cfg.txcq_cfg = ring_cfg;
294
295 for (i = 0; i < emac->tx_ch_num; i++) {
296 struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
297
298 /* To differentiate channels for SLICE0 vs SLICE1 */
299 snprintf(tx_chn->name, sizeof(tx_chn->name),
300 "tx%d-%d", slice, i);
301
302 tx_chn->emac = emac;
303 tx_chn->id = i;
304 tx_chn->descs_num = PRUETH_MAX_TX_DESC;
305
306 tx_chn->tx_chn =
307 k3_udma_glue_request_tx_chn(dev, tx_chn->name,
308 &tx_cfg);
309 if (IS_ERR(tx_chn->tx_chn)) {
310 ret = PTR_ERR(tx_chn->tx_chn);
311 tx_chn->tx_chn = NULL;
312 netdev_err(ndev,
313 "Failed to request tx dma ch: %d\n", ret);
314 goto fail;
315 }
316
317 tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn);
318 tx_chn->desc_pool =
319 k3_cppi_desc_pool_create_name(tx_chn->dma_dev,
320 tx_chn->descs_num,
321 hdesc_size,
322 tx_chn->name);
323 if (IS_ERR(tx_chn->desc_pool)) {
324 ret = PTR_ERR(tx_chn->desc_pool);
325 tx_chn->desc_pool = NULL;
326 netdev_err(ndev, "Failed to create tx pool: %d\n", ret);
327 goto fail;
328 }
329
330 ret = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
331 if (ret < 0) {
332 netdev_err(ndev, "failed to get tx irq\n");
333 goto fail;
334 }
335 tx_chn->irq = ret;
336
337 snprintf(tx_chn->name, sizeof(tx_chn->name), "%s-tx%d",
338 dev_name(dev), tx_chn->id);
339 }
340
341 return 0;
342
343 fail:
344 prueth_cleanup_tx_chns(emac);
345 return ret;
346 }
347
prueth_init_rx_chns(struct prueth_emac * emac,struct prueth_rx_chn * rx_chn,char * name,u32 max_rflows,u32 max_desc_num)348 int prueth_init_rx_chns(struct prueth_emac *emac,
349 struct prueth_rx_chn *rx_chn,
350 char *name, u32 max_rflows,
351 u32 max_desc_num)
352 {
353 struct k3_udma_glue_rx_channel_cfg rx_cfg;
354 struct device *dev = emac->prueth->dev;
355 struct net_device *ndev = emac->ndev;
356 u32 fdqring_id, hdesc_size;
357 int i, ret = 0, slice;
358 int flow_id_base;
359
360 slice = prueth_emac_slice(emac);
361 if (slice < 0)
362 return slice;
363
364 /* To differentiate channels for SLICE0 vs SLICE1 */
365 snprintf(rx_chn->name, sizeof(rx_chn->name), "%s%d", name, slice);
366
367 hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE,
368 PRUETH_NAV_SW_DATA_SIZE);
369 memset(&rx_cfg, 0, sizeof(rx_cfg));
370 rx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE;
371 rx_cfg.flow_id_num = max_rflows;
372 rx_cfg.flow_id_base = -1; /* udmax will auto select flow id base */
373
374 /* init all flows */
375 rx_chn->dev = dev;
376 rx_chn->descs_num = max_desc_num;
377
378 rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, rx_chn->name,
379 &rx_cfg);
380 if (IS_ERR(rx_chn->rx_chn)) {
381 ret = PTR_ERR(rx_chn->rx_chn);
382 rx_chn->rx_chn = NULL;
383 netdev_err(ndev, "Failed to request rx dma ch: %d\n", ret);
384 goto fail;
385 }
386
387 rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn);
388 rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev,
389 rx_chn->descs_num,
390 hdesc_size,
391 rx_chn->name);
392 if (IS_ERR(rx_chn->desc_pool)) {
393 ret = PTR_ERR(rx_chn->desc_pool);
394 rx_chn->desc_pool = NULL;
395 netdev_err(ndev, "Failed to create rx pool: %d\n", ret);
396 goto fail;
397 }
398
399 flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
400 if (emac->is_sr1 && !strcmp(name, "rxmgm")) {
401 emac->rx_mgm_flow_id_base = flow_id_base;
402 netdev_dbg(ndev, "mgm flow id base = %d\n", flow_id_base);
403 } else {
404 emac->rx_flow_id_base = flow_id_base;
405 netdev_dbg(ndev, "flow id base = %d\n", flow_id_base);
406 }
407
408 fdqring_id = K3_RINGACC_RING_ID_ANY;
409 for (i = 0; i < rx_cfg.flow_id_num; i++) {
410 struct k3_ring_cfg rxring_cfg = {
411 .elm_size = K3_RINGACC_RING_ELSIZE_8,
412 .mode = K3_RINGACC_RING_MODE_RING,
413 .flags = 0,
414 };
415 struct k3_ring_cfg fdqring_cfg = {
416 .elm_size = K3_RINGACC_RING_ELSIZE_8,
417 .flags = K3_RINGACC_RING_SHARED,
418 };
419 struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
420 .rx_cfg = rxring_cfg,
421 .rxfdq_cfg = fdqring_cfg,
422 .ring_rxq_id = K3_RINGACC_RING_ID_ANY,
423 .src_tag_lo_sel =
424 K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
425 };
426
427 rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
428 rx_flow_cfg.rx_cfg.size = max_desc_num;
429 rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
430 rx_flow_cfg.rxfdq_cfg.mode = emac->prueth->pdata.fdqring_mode;
431
432 ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
433 i, &rx_flow_cfg);
434 if (ret) {
435 netdev_err(ndev, "Failed to init rx flow%d %d\n",
436 i, ret);
437 goto fail;
438 }
439 if (!i)
440 fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
441 i);
442 ret = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
443 if (ret <= 0) {
444 if (!ret)
445 ret = -ENXIO;
446 netdev_err(ndev, "Failed to get rx dma irq");
447 goto fail;
448 }
449 rx_chn->irq[i] = ret;
450 }
451
452 return 0;
453
454 fail:
455 prueth_cleanup_rx_chns(emac, rx_chn, max_rflows);
456 return ret;
457 }
458
prueth_dma_rx_push(struct prueth_emac * emac,struct sk_buff * skb,struct prueth_rx_chn * rx_chn)459 int prueth_dma_rx_push(struct prueth_emac *emac,
460 struct sk_buff *skb,
461 struct prueth_rx_chn *rx_chn)
462 {
463 struct net_device *ndev = emac->ndev;
464 struct cppi5_host_desc_t *desc_rx;
465 u32 pkt_len = skb_tailroom(skb);
466 dma_addr_t desc_dma;
467 dma_addr_t buf_dma;
468 void **swdata;
469
470 desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
471 if (!desc_rx) {
472 netdev_err(ndev, "rx push: failed to allocate descriptor\n");
473 return -ENOMEM;
474 }
475 desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
476
477 buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len, DMA_FROM_DEVICE);
478 if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
479 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
480 netdev_err(ndev, "rx push: failed to map rx pkt buffer\n");
481 return -EINVAL;
482 }
483
484 cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
485 PRUETH_NAV_PS_DATA_SIZE);
486 k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
487 cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
488
489 swdata = cppi5_hdesc_get_swdata(desc_rx);
490 *swdata = skb;
491
492 return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0,
493 desc_rx, desc_dma);
494 }
495
icssg_ts_to_ns(u32 hi_sw,u32 hi,u32 lo,u32 cycle_time_ns)496 u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns)
497 {
498 u32 iepcount_lo, iepcount_hi, hi_rollover_count;
499 u64 ns;
500
501 iepcount_lo = lo & GENMASK(19, 0);
502 iepcount_hi = (hi & GENMASK(11, 0)) << 12 | lo >> 20;
503 hi_rollover_count = hi >> 11;
504
505 ns = ((u64)hi_rollover_count) << 23 | (iepcount_hi + hi_sw);
506 ns = ns * cycle_time_ns + iepcount_lo;
507
508 return ns;
509 }
510
emac_rx_timestamp(struct prueth_emac * emac,struct sk_buff * skb,u32 * psdata)511 void emac_rx_timestamp(struct prueth_emac *emac,
512 struct sk_buff *skb, u32 *psdata)
513 {
514 struct skb_shared_hwtstamps *ssh;
515 u64 ns;
516
517 if (emac->is_sr1) {
518 ns = (u64)psdata[1] << 32 | psdata[0];
519 } else {
520 u32 hi_sw = readl(emac->prueth->shram.va +
521 TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET);
522 ns = icssg_ts_to_ns(hi_sw, psdata[1], psdata[0],
523 IEP_DEFAULT_CYCLE_TIME_NS);
524 }
525
526 ssh = skb_hwtstamps(skb);
527 memset(ssh, 0, sizeof(*ssh));
528 ssh->hwtstamp = ns_to_ktime(ns);
529 }
530
emac_rx_packet(struct prueth_emac * emac,u32 flow_id)531 static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
532 {
533 struct prueth_rx_chn *rx_chn = &emac->rx_chns;
534 u32 buf_dma_len, pkt_len, port_id = 0;
535 struct net_device *ndev = emac->ndev;
536 struct cppi5_host_desc_t *desc_rx;
537 struct sk_buff *skb, *new_skb;
538 dma_addr_t desc_dma, buf_dma;
539 void **swdata;
540 u32 *psdata;
541 int ret;
542
543 ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
544 if (ret) {
545 if (ret != -ENODATA)
546 netdev_err(ndev, "rx pop: failed: %d\n", ret);
547 return ret;
548 }
549
550 if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown ? */
551 return 0;
552
553 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
554
555 swdata = cppi5_hdesc_get_swdata(desc_rx);
556 skb = *swdata;
557
558 psdata = cppi5_hdesc_get_psdata(desc_rx);
559 /* RX HW timestamp */
560 if (emac->rx_ts_enabled)
561 emac_rx_timestamp(emac, skb, psdata);
562
563 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
564 k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
565 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
566 /* firmware adds 4 CRC bytes, strip them */
567 pkt_len -= 4;
568 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
569
570 dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
571 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
572
573 skb->dev = ndev;
574 new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE);
575 /* if allocation fails we drop the packet but push the
576 * descriptor back to the ring with old skb to prevent a stall
577 */
578 if (!new_skb) {
579 ndev->stats.rx_dropped++;
580 new_skb = skb;
581 } else {
582 /* send the filled skb up the n/w stack */
583 skb_put(skb, pkt_len);
584 skb->protocol = eth_type_trans(skb, ndev);
585 napi_gro_receive(&emac->napi_rx, skb);
586 ndev->stats.rx_bytes += pkt_len;
587 ndev->stats.rx_packets++;
588 }
589
590 /* queue another RX DMA */
591 ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_chns);
592 if (WARN_ON(ret < 0)) {
593 dev_kfree_skb_any(new_skb);
594 ndev->stats.rx_errors++;
595 ndev->stats.rx_dropped++;
596 }
597
598 return ret;
599 }
600
prueth_rx_cleanup(void * data,dma_addr_t desc_dma)601 static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
602 {
603 struct prueth_rx_chn *rx_chn = data;
604 struct cppi5_host_desc_t *desc_rx;
605 struct sk_buff *skb;
606 dma_addr_t buf_dma;
607 u32 buf_dma_len;
608 void **swdata;
609
610 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
611 swdata = cppi5_hdesc_get_swdata(desc_rx);
612 skb = *swdata;
613 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
614 k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
615
616 dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len,
617 DMA_FROM_DEVICE);
618 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
619
620 dev_kfree_skb_any(skb);
621 }
622
prueth_tx_ts_cookie_get(struct prueth_emac * emac)623 static int prueth_tx_ts_cookie_get(struct prueth_emac *emac)
624 {
625 int i;
626
627 /* search and get the next free slot */
628 for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) {
629 if (!emac->tx_ts_skb[i]) {
630 emac->tx_ts_skb[i] = ERR_PTR(-EBUSY); /* reserve slot */
631 return i;
632 }
633 }
634
635 return -EBUSY;
636 }
637
638 /**
639 * emac_ndo_start_xmit - EMAC Transmit function
640 * @skb: SKB pointer
641 * @ndev: EMAC network adapter
642 *
643 * Called by the system to transmit a packet - we queue the packet in
644 * EMAC hardware transmit queue
645 * Doesn't wait for completion we'll check for TX completion in
646 * emac_tx_complete_packets().
647 *
648 * Return: enum netdev_tx
649 */
emac_ndo_start_xmit(struct sk_buff * skb,struct net_device * ndev)650 enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
651 {
652 struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
653 struct prueth_emac *emac = netdev_priv(ndev);
654 struct netdev_queue *netif_txq;
655 struct prueth_tx_chn *tx_chn;
656 dma_addr_t desc_dma, buf_dma;
657 int i, ret = 0, q_idx;
658 bool in_tx_ts = 0;
659 int tx_ts_cookie;
660 void **swdata;
661 u32 pkt_len;
662 u32 *epib;
663
664 pkt_len = skb_headlen(skb);
665 q_idx = skb_get_queue_mapping(skb);
666
667 tx_chn = &emac->tx_chns[q_idx];
668 netif_txq = netdev_get_tx_queue(ndev, q_idx);
669
670 /* Map the linear buffer */
671 buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len, DMA_TO_DEVICE);
672 if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
673 netdev_err(ndev, "tx: failed to map skb buffer\n");
674 ret = NETDEV_TX_OK;
675 goto drop_free_skb;
676 }
677
678 first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
679 if (!first_desc) {
680 netdev_dbg(ndev, "tx: failed to allocate descriptor\n");
681 dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE);
682 goto drop_stop_q_busy;
683 }
684
685 cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
686 PRUETH_NAV_PS_DATA_SIZE);
687 cppi5_hdesc_set_pkttype(first_desc, 0);
688 epib = first_desc->epib;
689 epib[0] = 0;
690 epib[1] = 0;
691 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
692 emac->tx_ts_enabled) {
693 tx_ts_cookie = prueth_tx_ts_cookie_get(emac);
694 if (tx_ts_cookie >= 0) {
695 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
696 /* Request TX timestamp */
697 epib[0] = (u32)tx_ts_cookie;
698 epib[1] = 0x80000000; /* TX TS request */
699 emac->tx_ts_skb[tx_ts_cookie] = skb_get(skb);
700 in_tx_ts = 1;
701 }
702 }
703
704 /* set dst tag to indicate internal qid at the firmware which is at
705 * bit8..bit15. bit0..bit7 indicates port num for directed
706 * packets in case of switch mode operation
707 */
708 cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8)));
709 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
710 cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
711 swdata = cppi5_hdesc_get_swdata(first_desc);
712 *swdata = skb;
713
714 /* Handle the case where skb is fragmented in pages */
715 cur_desc = first_desc;
716 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
717 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
718 u32 frag_size = skb_frag_size(frag);
719
720 next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
721 if (!next_desc) {
722 netdev_err(ndev,
723 "tx: failed to allocate frag. descriptor\n");
724 goto free_desc_stop_q_busy_cleanup_tx_ts;
725 }
726
727 buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
728 DMA_TO_DEVICE);
729 if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
730 netdev_err(ndev, "tx: Failed to map skb page\n");
731 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
732 ret = NETDEV_TX_OK;
733 goto cleanup_tx_ts;
734 }
735
736 cppi5_hdesc_reset_hbdesc(next_desc);
737 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
738 cppi5_hdesc_attach_buf(next_desc,
739 buf_dma, frag_size, buf_dma, frag_size);
740
741 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
742 next_desc);
743 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma);
744 cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
745
746 pkt_len += frag_size;
747 cur_desc = next_desc;
748 }
749 WARN_ON_ONCE(pkt_len != skb->len);
750
751 /* report bql before sending packet */
752 netdev_tx_sent_queue(netif_txq, pkt_len);
753
754 cppi5_hdesc_set_pktlen(first_desc, pkt_len);
755 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
756 /* cppi5_desc_dump(first_desc, 64); */
757
758 skb_tx_timestamp(skb); /* SW timestamp if SKBTX_IN_PROGRESS not set */
759 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
760 if (ret) {
761 netdev_err(ndev, "tx: push failed: %d\n", ret);
762 goto drop_free_descs;
763 }
764
765 if (in_tx_ts)
766 atomic_inc(&emac->tx_ts_pending);
767
768 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
769 netif_tx_stop_queue(netif_txq);
770 /* Barrier, so that stop_queue visible to other cpus */
771 smp_mb__after_atomic();
772
773 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
774 MAX_SKB_FRAGS)
775 netif_tx_wake_queue(netif_txq);
776 }
777
778 return NETDEV_TX_OK;
779
780 cleanup_tx_ts:
781 if (in_tx_ts) {
782 dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]);
783 emac->tx_ts_skb[tx_ts_cookie] = NULL;
784 }
785
786 drop_free_descs:
787 prueth_xmit_free(tx_chn, first_desc);
788
789 drop_free_skb:
790 dev_kfree_skb_any(skb);
791
792 /* error */
793 ndev->stats.tx_dropped++;
794 netdev_err(ndev, "tx: error: %d\n", ret);
795
796 return ret;
797
798 free_desc_stop_q_busy_cleanup_tx_ts:
799 if (in_tx_ts) {
800 dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]);
801 emac->tx_ts_skb[tx_ts_cookie] = NULL;
802 }
803 prueth_xmit_free(tx_chn, first_desc);
804
805 drop_stop_q_busy:
806 netif_tx_stop_queue(netif_txq);
807 return NETDEV_TX_BUSY;
808 }
809
prueth_tx_cleanup(void * data,dma_addr_t desc_dma)810 static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
811 {
812 struct prueth_tx_chn *tx_chn = data;
813 struct cppi5_host_desc_t *desc_tx;
814 struct sk_buff *skb;
815 void **swdata;
816
817 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
818 swdata = cppi5_hdesc_get_swdata(desc_tx);
819 skb = *(swdata);
820 prueth_xmit_free(tx_chn, desc_tx);
821
822 dev_kfree_skb_any(skb);
823 }
824
prueth_rx_irq(int irq,void * dev_id)825 irqreturn_t prueth_rx_irq(int irq, void *dev_id)
826 {
827 struct prueth_emac *emac = dev_id;
828
829 disable_irq_nosync(irq);
830 napi_schedule(&emac->napi_rx);
831
832 return IRQ_HANDLED;
833 }
834
prueth_emac_stop(struct prueth_emac * emac)835 void prueth_emac_stop(struct prueth_emac *emac)
836 {
837 struct prueth *prueth = emac->prueth;
838 int slice;
839
840 switch (emac->port_id) {
841 case PRUETH_PORT_MII0:
842 slice = ICSS_SLICE0;
843 break;
844 case PRUETH_PORT_MII1:
845 slice = ICSS_SLICE1;
846 break;
847 default:
848 netdev_err(emac->ndev, "invalid port\n");
849 return;
850 }
851
852 emac->fw_running = 0;
853 if (!emac->is_sr1)
854 rproc_shutdown(prueth->txpru[slice]);
855 rproc_shutdown(prueth->rtu[slice]);
856 rproc_shutdown(prueth->pru[slice]);
857 }
858
prueth_cleanup_tx_ts(struct prueth_emac * emac)859 void prueth_cleanup_tx_ts(struct prueth_emac *emac)
860 {
861 int i;
862
863 for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) {
864 if (emac->tx_ts_skb[i]) {
865 dev_kfree_skb_any(emac->tx_ts_skb[i]);
866 emac->tx_ts_skb[i] = NULL;
867 }
868 }
869 }
870
emac_napi_rx_poll(struct napi_struct * napi_rx,int budget)871 int emac_napi_rx_poll(struct napi_struct *napi_rx, int budget)
872 {
873 struct prueth_emac *emac = prueth_napi_to_emac(napi_rx);
874 int rx_flow = emac->is_sr1 ?
875 PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA;
876 int flow = emac->is_sr1 ?
877 PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS;
878 int num_rx = 0;
879 int cur_budget;
880 int ret;
881
882 while (flow--) {
883 cur_budget = budget - num_rx;
884
885 while (cur_budget--) {
886 ret = emac_rx_packet(emac, flow);
887 if (ret)
888 break;
889 num_rx++;
890 }
891
892 if (num_rx >= budget)
893 break;
894 }
895
896 if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
897 if (unlikely(emac->rx_pace_timeout_ns)) {
898 hrtimer_start(&emac->rx_hrtimer,
899 ns_to_ktime(emac->rx_pace_timeout_ns),
900 HRTIMER_MODE_REL_PINNED);
901 } else {
902 enable_irq(emac->rx_chns.irq[rx_flow]);
903 }
904 }
905
906 return num_rx;
907 }
908
prueth_prepare_rx_chan(struct prueth_emac * emac,struct prueth_rx_chn * chn,int buf_size)909 int prueth_prepare_rx_chan(struct prueth_emac *emac,
910 struct prueth_rx_chn *chn,
911 int buf_size)
912 {
913 struct sk_buff *skb;
914 int i, ret;
915
916 for (i = 0; i < chn->descs_num; i++) {
917 skb = __netdev_alloc_skb_ip_align(NULL, buf_size, GFP_KERNEL);
918 if (!skb)
919 return -ENOMEM;
920
921 ret = prueth_dma_rx_push(emac, skb, chn);
922 if (ret < 0) {
923 netdev_err(emac->ndev,
924 "cannot submit skb for rx chan %s ret %d\n",
925 chn->name, ret);
926 kfree_skb(skb);
927 return ret;
928 }
929 }
930
931 return 0;
932 }
933
prueth_reset_tx_chan(struct prueth_emac * emac,int ch_num,bool free_skb)934 void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num,
935 bool free_skb)
936 {
937 int i;
938
939 for (i = 0; i < ch_num; i++) {
940 if (free_skb)
941 k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn,
942 &emac->tx_chns[i],
943 prueth_tx_cleanup);
944 k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn);
945 }
946 }
947
prueth_reset_rx_chan(struct prueth_rx_chn * chn,int num_flows,bool disable)948 void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
949 int num_flows, bool disable)
950 {
951 int i;
952
953 for (i = 0; i < num_flows; i++)
954 k3_udma_glue_reset_rx_chn(chn->rx_chn, i, chn,
955 prueth_rx_cleanup, !!i);
956 if (disable)
957 k3_udma_glue_disable_rx_chn(chn->rx_chn);
958 }
959
emac_ndo_tx_timeout(struct net_device * ndev,unsigned int txqueue)960 void emac_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
961 {
962 ndev->stats.tx_errors++;
963 }
964
emac_set_ts_config(struct net_device * ndev,struct ifreq * ifr)965 static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr)
966 {
967 struct prueth_emac *emac = netdev_priv(ndev);
968 struct hwtstamp_config config;
969
970 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
971 return -EFAULT;
972
973 switch (config.tx_type) {
974 case HWTSTAMP_TX_OFF:
975 emac->tx_ts_enabled = 0;
976 break;
977 case HWTSTAMP_TX_ON:
978 emac->tx_ts_enabled = 1;
979 break;
980 default:
981 return -ERANGE;
982 }
983
984 switch (config.rx_filter) {
985 case HWTSTAMP_FILTER_NONE:
986 emac->rx_ts_enabled = 0;
987 break;
988 case HWTSTAMP_FILTER_ALL:
989 case HWTSTAMP_FILTER_SOME:
990 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
991 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
992 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
993 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
994 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
995 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
996 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
997 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
998 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
999 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1000 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1001 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1002 case HWTSTAMP_FILTER_NTP_ALL:
1003 emac->rx_ts_enabled = 1;
1004 config.rx_filter = HWTSTAMP_FILTER_ALL;
1005 break;
1006 default:
1007 return -ERANGE;
1008 }
1009
1010 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1011 -EFAULT : 0;
1012 }
1013
emac_get_ts_config(struct net_device * ndev,struct ifreq * ifr)1014 static int emac_get_ts_config(struct net_device *ndev, struct ifreq *ifr)
1015 {
1016 struct prueth_emac *emac = netdev_priv(ndev);
1017 struct hwtstamp_config config;
1018
1019 config.flags = 0;
1020 config.tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
1021 config.rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
1022
1023 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1024 -EFAULT : 0;
1025 }
1026
emac_ndo_ioctl(struct net_device * ndev,struct ifreq * ifr,int cmd)1027 int emac_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
1028 {
1029 switch (cmd) {
1030 case SIOCGHWTSTAMP:
1031 return emac_get_ts_config(ndev, ifr);
1032 case SIOCSHWTSTAMP:
1033 return emac_set_ts_config(ndev, ifr);
1034 default:
1035 break;
1036 }
1037
1038 return phy_do_ioctl(ndev, ifr, cmd);
1039 }
1040
emac_ndo_get_stats64(struct net_device * ndev,struct rtnl_link_stats64 * stats)1041 void emac_ndo_get_stats64(struct net_device *ndev,
1042 struct rtnl_link_stats64 *stats)
1043 {
1044 struct prueth_emac *emac = netdev_priv(ndev);
1045
1046 emac_update_hardware_stats(emac);
1047
1048 stats->rx_packets = emac_get_stat_by_name(emac, "rx_packets");
1049 stats->rx_bytes = emac_get_stat_by_name(emac, "rx_bytes");
1050 stats->tx_packets = emac_get_stat_by_name(emac, "tx_packets");
1051 stats->tx_bytes = emac_get_stat_by_name(emac, "tx_bytes");
1052 stats->rx_crc_errors = emac_get_stat_by_name(emac, "rx_crc_errors");
1053 stats->rx_over_errors = emac_get_stat_by_name(emac, "rx_over_errors");
1054 stats->multicast = emac_get_stat_by_name(emac, "rx_multicast_frames");
1055
1056 stats->rx_errors = ndev->stats.rx_errors;
1057 stats->rx_dropped = ndev->stats.rx_dropped;
1058 stats->tx_errors = ndev->stats.tx_errors;
1059 stats->tx_dropped = ndev->stats.tx_dropped;
1060 }
1061
emac_ndo_get_phys_port_name(struct net_device * ndev,char * name,size_t len)1062 int emac_ndo_get_phys_port_name(struct net_device *ndev, char *name,
1063 size_t len)
1064 {
1065 struct prueth_emac *emac = netdev_priv(ndev);
1066 int ret;
1067
1068 ret = snprintf(name, len, "p%d", emac->port_id);
1069 if (ret >= len)
1070 return -EINVAL;
1071
1072 return 0;
1073 }
1074
1075 /* get emac_port corresponding to eth_node name */
prueth_node_port(struct device_node * eth_node)1076 int prueth_node_port(struct device_node *eth_node)
1077 {
1078 u32 port_id;
1079 int ret;
1080
1081 ret = of_property_read_u32(eth_node, "reg", &port_id);
1082 if (ret)
1083 return ret;
1084
1085 if (port_id == 0)
1086 return PRUETH_PORT_MII0;
1087 else if (port_id == 1)
1088 return PRUETH_PORT_MII1;
1089 else
1090 return PRUETH_PORT_INVALID;
1091 }
1092
1093 /* get MAC instance corresponding to eth_node name */
prueth_node_mac(struct device_node * eth_node)1094 int prueth_node_mac(struct device_node *eth_node)
1095 {
1096 u32 port_id;
1097 int ret;
1098
1099 ret = of_property_read_u32(eth_node, "reg", &port_id);
1100 if (ret)
1101 return ret;
1102
1103 if (port_id == 0)
1104 return PRUETH_MAC0;
1105 else if (port_id == 1)
1106 return PRUETH_MAC1;
1107 else
1108 return PRUETH_MAC_INVALID;
1109 }
1110
prueth_netdev_exit(struct prueth * prueth,struct device_node * eth_node)1111 void prueth_netdev_exit(struct prueth *prueth,
1112 struct device_node *eth_node)
1113 {
1114 struct prueth_emac *emac;
1115 enum prueth_mac mac;
1116
1117 mac = prueth_node_mac(eth_node);
1118 if (mac == PRUETH_MAC_INVALID)
1119 return;
1120
1121 emac = prueth->emac[mac];
1122 if (!emac)
1123 return;
1124
1125 if (of_phy_is_fixed_link(emac->phy_node))
1126 of_phy_deregister_fixed_link(emac->phy_node);
1127
1128 netif_napi_del(&emac->napi_rx);
1129
1130 pruss_release_mem_region(prueth->pruss, &emac->dram);
1131 destroy_workqueue(emac->cmd_wq);
1132 free_netdev(emac->ndev);
1133 prueth->emac[mac] = NULL;
1134 }
1135
prueth_get_cores(struct prueth * prueth,int slice,bool is_sr1)1136 int prueth_get_cores(struct prueth *prueth, int slice, bool is_sr1)
1137 {
1138 struct device *dev = prueth->dev;
1139 enum pruss_pru_id pruss_id;
1140 struct device_node *np;
1141 int idx = -1, ret;
1142
1143 np = dev->of_node;
1144
1145 switch (slice) {
1146 case ICSS_SLICE0:
1147 idx = 0;
1148 break;
1149 case ICSS_SLICE1:
1150 idx = is_sr1 ? 2 : 3;
1151 break;
1152 default:
1153 return -EINVAL;
1154 }
1155
1156 prueth->pru[slice] = pru_rproc_get(np, idx, &pruss_id);
1157 if (IS_ERR(prueth->pru[slice])) {
1158 ret = PTR_ERR(prueth->pru[slice]);
1159 prueth->pru[slice] = NULL;
1160 return dev_err_probe(dev, ret, "unable to get PRU%d\n", slice);
1161 }
1162 prueth->pru_id[slice] = pruss_id;
1163
1164 idx++;
1165 prueth->rtu[slice] = pru_rproc_get(np, idx, NULL);
1166 if (IS_ERR(prueth->rtu[slice])) {
1167 ret = PTR_ERR(prueth->rtu[slice]);
1168 prueth->rtu[slice] = NULL;
1169 return dev_err_probe(dev, ret, "unable to get RTU%d\n", slice);
1170 }
1171
1172 if (is_sr1)
1173 return 0;
1174
1175 idx++;
1176 prueth->txpru[slice] = pru_rproc_get(np, idx, NULL);
1177 if (IS_ERR(prueth->txpru[slice])) {
1178 ret = PTR_ERR(prueth->txpru[slice]);
1179 prueth->txpru[slice] = NULL;
1180 return dev_err_probe(dev, ret, "unable to get TX_PRU%d\n", slice);
1181 }
1182
1183 return 0;
1184 }
1185
prueth_put_cores(struct prueth * prueth,int slice)1186 void prueth_put_cores(struct prueth *prueth, int slice)
1187 {
1188 if (prueth->txpru[slice])
1189 pru_rproc_put(prueth->txpru[slice]);
1190
1191 if (prueth->rtu[slice])
1192 pru_rproc_put(prueth->rtu[slice]);
1193
1194 if (prueth->pru[slice])
1195 pru_rproc_put(prueth->pru[slice]);
1196 }
1197
1198 #ifdef CONFIG_PM_SLEEP
prueth_suspend(struct device * dev)1199 static int prueth_suspend(struct device *dev)
1200 {
1201 struct prueth *prueth = dev_get_drvdata(dev);
1202 struct net_device *ndev;
1203 int i, ret;
1204
1205 for (i = 0; i < PRUETH_NUM_MACS; i++) {
1206 ndev = prueth->registered_netdevs[i];
1207
1208 if (!ndev)
1209 continue;
1210
1211 if (netif_running(ndev)) {
1212 netif_device_detach(ndev);
1213 ret = ndev->netdev_ops->ndo_stop(ndev);
1214 if (ret < 0) {
1215 netdev_err(ndev, "failed to stop: %d", ret);
1216 return ret;
1217 }
1218 }
1219 }
1220
1221 return 0;
1222 }
1223
prueth_resume(struct device * dev)1224 static int prueth_resume(struct device *dev)
1225 {
1226 struct prueth *prueth = dev_get_drvdata(dev);
1227 struct net_device *ndev;
1228 int i, ret;
1229
1230 for (i = 0; i < PRUETH_NUM_MACS; i++) {
1231 ndev = prueth->registered_netdevs[i];
1232
1233 if (!ndev)
1234 continue;
1235
1236 if (netif_running(ndev)) {
1237 ret = ndev->netdev_ops->ndo_open(ndev);
1238 if (ret < 0) {
1239 netdev_err(ndev, "failed to start: %d", ret);
1240 return ret;
1241 }
1242 netif_device_attach(ndev);
1243 }
1244 }
1245
1246 return 0;
1247 }
1248 #endif /* CONFIG_PM_SLEEP */
1249
1250 const struct dev_pm_ops prueth_dev_pm_ops = {
1251 SET_SYSTEM_SLEEP_PM_OPS(prueth_suspend, prueth_resume)
1252 };
1253