1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * linux/drivers/net/ethernet/ibm/ehea/ehea_main.c
4 *
5 * eHEA ethernet device driver for IBM eServer System p
6 *
7 * (C) Copyright IBM Corp. 2006
8 *
9 * Authors:
10 * Christoph Raisch <raisch@de.ibm.com>
11 * Jan-Bernd Themann <themann@de.ibm.com>
12 * Thomas Klein <tklein@de.ibm.com>
13 */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/device.h>
18 #include <linux/in.h>
19 #include <linux/ip.h>
20 #include <linux/tcp.h>
21 #include <linux/udp.h>
22 #include <linux/if.h>
23 #include <linux/list.h>
24 #include <linux/slab.h>
25 #include <linux/if_ether.h>
26 #include <linux/notifier.h>
27 #include <linux/reboot.h>
28 #include <linux/memory.h>
29 #include <asm/kexec.h>
30 #include <linux/mutex.h>
31 #include <linux/prefetch.h>
32 #include <linux/of.h>
33 #include <linux/of_device.h>
34 #include <linux/platform_device.h>
35
36 #include <net/ip.h>
37
38 #include "ehea.h"
39 #include "ehea_qmr.h"
40 #include "ehea_phyp.h"
41
42
43 MODULE_LICENSE("GPL");
44 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
45 MODULE_DESCRIPTION("IBM eServer HEA Driver");
46 MODULE_VERSION(DRV_VERSION);
47
48
49 static int msg_level = -1;
50 static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
51 static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
52 static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
53 static int sq_entries = EHEA_DEF_ENTRIES_SQ;
54 static int use_mcs = 1;
55 static int prop_carrier_state;
56
57 module_param(msg_level, int, 0);
58 module_param(rq1_entries, int, 0);
59 module_param(rq2_entries, int, 0);
60 module_param(rq3_entries, int, 0);
61 module_param(sq_entries, int, 0);
62 module_param(prop_carrier_state, int, 0);
63 module_param(use_mcs, int, 0);
64
65 MODULE_PARM_DESC(msg_level, "msg_level");
66 MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
67 "port to stack. 1:yes, 0:no. Default = 0 ");
68 MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
69 "[2^x - 1], x = [7..14]. Default = "
70 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
71 MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
72 "[2^x - 1], x = [7..14]. Default = "
73 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
74 MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
75 "[2^x - 1], x = [7..14]. Default = "
76 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
77 MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
78 "[2^x - 1], x = [7..14]. Default = "
79 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
80 MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
81 "Default = 1");
82
83 static int port_name_cnt;
84 static LIST_HEAD(adapter_list);
85 static unsigned long ehea_driver_flags;
86 static DEFINE_MUTEX(dlpar_mem_lock);
87 static struct ehea_fw_handle_array ehea_fw_handles;
88 static struct ehea_bcmc_reg_array ehea_bcmc_regs;
89
90
91 static int ehea_probe_adapter(struct platform_device *dev);
92
93 static void ehea_remove(struct platform_device *dev);
94
95 static const struct of_device_id ehea_module_device_table[] = {
96 {
97 .name = "lhea",
98 .compatible = "IBM,lhea",
99 },
100 {
101 .type = "network",
102 .compatible = "IBM,lhea-ethernet",
103 },
104 {},
105 };
106 MODULE_DEVICE_TABLE(of, ehea_module_device_table);
107
108 static const struct of_device_id ehea_device_table[] = {
109 {
110 .name = "lhea",
111 .compatible = "IBM,lhea",
112 },
113 {},
114 };
115 MODULE_DEVICE_TABLE(of, ehea_device_table);
116
117 static struct platform_driver ehea_driver = {
118 .driver = {
119 .name = "ehea",
120 .owner = THIS_MODULE,
121 .of_match_table = ehea_device_table,
122 },
123 .probe = ehea_probe_adapter,
124 .remove_new = ehea_remove,
125 };
126
ehea_dump(void * adr,int len,char * msg)127 void ehea_dump(void *adr, int len, char *msg)
128 {
129 int x;
130 unsigned char *deb = adr;
131 for (x = 0; x < len; x += 16) {
132 pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
133 msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
134 deb += 16;
135 }
136 }
137
ehea_schedule_port_reset(struct ehea_port * port)138 static void ehea_schedule_port_reset(struct ehea_port *port)
139 {
140 if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
141 schedule_work(&port->reset_task);
142 }
143
ehea_update_firmware_handles(void)144 static void ehea_update_firmware_handles(void)
145 {
146 struct ehea_fw_handle_entry *arr = NULL;
147 struct ehea_adapter *adapter;
148 int num_adapters = 0;
149 int num_ports = 0;
150 int num_portres = 0;
151 int i = 0;
152 int num_fw_handles, k, l;
153
154 /* Determine number of handles */
155 mutex_lock(&ehea_fw_handles.lock);
156
157 list_for_each_entry(adapter, &adapter_list, list) {
158 num_adapters++;
159
160 for (k = 0; k < EHEA_MAX_PORTS; k++) {
161 struct ehea_port *port = adapter->port[k];
162
163 if (!port || (port->state != EHEA_PORT_UP))
164 continue;
165
166 num_ports++;
167 num_portres += port->num_def_qps;
168 }
169 }
170
171 num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
172 num_ports * EHEA_NUM_PORT_FW_HANDLES +
173 num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
174
175 if (num_fw_handles) {
176 arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
177 if (!arr)
178 goto out; /* Keep the existing array */
179 } else
180 goto out_update;
181
182 list_for_each_entry(adapter, &adapter_list, list) {
183 if (num_adapters == 0)
184 break;
185
186 for (k = 0; k < EHEA_MAX_PORTS; k++) {
187 struct ehea_port *port = adapter->port[k];
188
189 if (!port || (port->state != EHEA_PORT_UP) ||
190 (num_ports == 0))
191 continue;
192
193 for (l = 0; l < port->num_def_qps; l++) {
194 struct ehea_port_res *pr = &port->port_res[l];
195
196 arr[i].adh = adapter->handle;
197 arr[i++].fwh = pr->qp->fw_handle;
198 arr[i].adh = adapter->handle;
199 arr[i++].fwh = pr->send_cq->fw_handle;
200 arr[i].adh = adapter->handle;
201 arr[i++].fwh = pr->recv_cq->fw_handle;
202 arr[i].adh = adapter->handle;
203 arr[i++].fwh = pr->eq->fw_handle;
204 arr[i].adh = adapter->handle;
205 arr[i++].fwh = pr->send_mr.handle;
206 arr[i].adh = adapter->handle;
207 arr[i++].fwh = pr->recv_mr.handle;
208 }
209 arr[i].adh = adapter->handle;
210 arr[i++].fwh = port->qp_eq->fw_handle;
211 num_ports--;
212 }
213
214 arr[i].adh = adapter->handle;
215 arr[i++].fwh = adapter->neq->fw_handle;
216
217 if (adapter->mr.handle) {
218 arr[i].adh = adapter->handle;
219 arr[i++].fwh = adapter->mr.handle;
220 }
221 num_adapters--;
222 }
223
224 out_update:
225 kfree(ehea_fw_handles.arr);
226 ehea_fw_handles.arr = arr;
227 ehea_fw_handles.num_entries = i;
228 out:
229 mutex_unlock(&ehea_fw_handles.lock);
230 }
231
ehea_update_bcmc_registrations(void)232 static void ehea_update_bcmc_registrations(void)
233 {
234 unsigned long flags;
235 struct ehea_bcmc_reg_entry *arr = NULL;
236 struct ehea_adapter *adapter;
237 struct ehea_mc_list *mc_entry;
238 int num_registrations = 0;
239 int i = 0;
240 int k;
241
242 spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
243
244 /* Determine number of registrations */
245 list_for_each_entry(adapter, &adapter_list, list)
246 for (k = 0; k < EHEA_MAX_PORTS; k++) {
247 struct ehea_port *port = adapter->port[k];
248
249 if (!port || (port->state != EHEA_PORT_UP))
250 continue;
251
252 num_registrations += 2; /* Broadcast registrations */
253
254 list_for_each_entry(mc_entry, &port->mc_list->list,list)
255 num_registrations += 2;
256 }
257
258 if (num_registrations) {
259 arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
260 if (!arr)
261 goto out; /* Keep the existing array */
262 } else
263 goto out_update;
264
265 list_for_each_entry(adapter, &adapter_list, list) {
266 for (k = 0; k < EHEA_MAX_PORTS; k++) {
267 struct ehea_port *port = adapter->port[k];
268
269 if (!port || (port->state != EHEA_PORT_UP))
270 continue;
271
272 if (num_registrations == 0)
273 goto out_update;
274
275 arr[i].adh = adapter->handle;
276 arr[i].port_id = port->logical_port_id;
277 arr[i].reg_type = EHEA_BCMC_BROADCAST |
278 EHEA_BCMC_UNTAGGED;
279 arr[i++].macaddr = port->mac_addr;
280
281 arr[i].adh = adapter->handle;
282 arr[i].port_id = port->logical_port_id;
283 arr[i].reg_type = EHEA_BCMC_BROADCAST |
284 EHEA_BCMC_VLANID_ALL;
285 arr[i++].macaddr = port->mac_addr;
286 num_registrations -= 2;
287
288 list_for_each_entry(mc_entry,
289 &port->mc_list->list, list) {
290 if (num_registrations == 0)
291 goto out_update;
292
293 arr[i].adh = adapter->handle;
294 arr[i].port_id = port->logical_port_id;
295 arr[i].reg_type = EHEA_BCMC_MULTICAST |
296 EHEA_BCMC_UNTAGGED;
297 if (mc_entry->macaddr == 0)
298 arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
299 arr[i++].macaddr = mc_entry->macaddr;
300
301 arr[i].adh = adapter->handle;
302 arr[i].port_id = port->logical_port_id;
303 arr[i].reg_type = EHEA_BCMC_MULTICAST |
304 EHEA_BCMC_VLANID_ALL;
305 if (mc_entry->macaddr == 0)
306 arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
307 arr[i++].macaddr = mc_entry->macaddr;
308 num_registrations -= 2;
309 }
310 }
311 }
312
313 out_update:
314 kfree(ehea_bcmc_regs.arr);
315 ehea_bcmc_regs.arr = arr;
316 ehea_bcmc_regs.num_entries = i;
317 out:
318 spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
319 }
320
ehea_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)321 static void ehea_get_stats64(struct net_device *dev,
322 struct rtnl_link_stats64 *stats)
323 {
324 struct ehea_port *port = netdev_priv(dev);
325 u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
326 int i;
327
328 for (i = 0; i < port->num_def_qps; i++) {
329 rx_packets += port->port_res[i].rx_packets;
330 rx_bytes += port->port_res[i].rx_bytes;
331 }
332
333 for (i = 0; i < port->num_def_qps; i++) {
334 tx_packets += port->port_res[i].tx_packets;
335 tx_bytes += port->port_res[i].tx_bytes;
336 }
337
338 stats->tx_packets = tx_packets;
339 stats->rx_bytes = rx_bytes;
340 stats->tx_bytes = tx_bytes;
341 stats->rx_packets = rx_packets;
342
343 stats->multicast = port->stats.multicast;
344 stats->rx_errors = port->stats.rx_errors;
345 }
346
ehea_update_stats(struct work_struct * work)347 static void ehea_update_stats(struct work_struct *work)
348 {
349 struct ehea_port *port =
350 container_of(work, struct ehea_port, stats_work.work);
351 struct net_device *dev = port->netdev;
352 struct rtnl_link_stats64 *stats = &port->stats;
353 struct hcp_ehea_port_cb2 *cb2;
354 u64 hret;
355
356 cb2 = (void *)get_zeroed_page(GFP_KERNEL);
357 if (!cb2) {
358 netdev_err(dev, "No mem for cb2. Some interface statistics were not updated\n");
359 goto resched;
360 }
361
362 hret = ehea_h_query_ehea_port(port->adapter->handle,
363 port->logical_port_id,
364 H_PORT_CB2, H_PORT_CB2_ALL, cb2);
365 if (hret != H_SUCCESS) {
366 netdev_err(dev, "query_ehea_port failed\n");
367 goto out_herr;
368 }
369
370 if (netif_msg_hw(port))
371 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
372
373 stats->multicast = cb2->rxmcp;
374 stats->rx_errors = cb2->rxuerr;
375
376 out_herr:
377 free_page((unsigned long)cb2);
378 resched:
379 schedule_delayed_work(&port->stats_work,
380 round_jiffies_relative(msecs_to_jiffies(1000)));
381 }
382
ehea_refill_rq1(struct ehea_port_res * pr,int index,int nr_of_wqes)383 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
384 {
385 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
386 struct net_device *dev = pr->port->netdev;
387 int max_index_mask = pr->rq1_skba.len - 1;
388 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
389 int adder = 0;
390 int i;
391
392 pr->rq1_skba.os_skbs = 0;
393
394 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
395 if (nr_of_wqes > 0)
396 pr->rq1_skba.index = index;
397 pr->rq1_skba.os_skbs = fill_wqes;
398 return;
399 }
400
401 for (i = 0; i < fill_wqes; i++) {
402 if (!skb_arr_rq1[index]) {
403 skb_arr_rq1[index] = netdev_alloc_skb(dev,
404 EHEA_L_PKT_SIZE);
405 if (!skb_arr_rq1[index]) {
406 pr->rq1_skba.os_skbs = fill_wqes - i;
407 break;
408 }
409 }
410 index--;
411 index &= max_index_mask;
412 adder++;
413 }
414
415 if (adder == 0)
416 return;
417
418 /* Ring doorbell */
419 ehea_update_rq1a(pr->qp, adder);
420 }
421
ehea_init_fill_rq1(struct ehea_port_res * pr,int nr_rq1a)422 static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
423 {
424 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
425 struct net_device *dev = pr->port->netdev;
426 int i;
427
428 if (nr_rq1a > pr->rq1_skba.len) {
429 netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
430 return;
431 }
432
433 for (i = 0; i < nr_rq1a; i++) {
434 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
435 if (!skb_arr_rq1[i])
436 break;
437 }
438 /* Ring doorbell */
439 ehea_update_rq1a(pr->qp, i - 1);
440 }
441
ehea_refill_rq_def(struct ehea_port_res * pr,struct ehea_q_skb_arr * q_skba,int rq_nr,int num_wqes,int wqe_type,int packet_size)442 static int ehea_refill_rq_def(struct ehea_port_res *pr,
443 struct ehea_q_skb_arr *q_skba, int rq_nr,
444 int num_wqes, int wqe_type, int packet_size)
445 {
446 struct net_device *dev = pr->port->netdev;
447 struct ehea_qp *qp = pr->qp;
448 struct sk_buff **skb_arr = q_skba->arr;
449 struct ehea_rwqe *rwqe;
450 int i, index, max_index_mask, fill_wqes;
451 int adder = 0;
452 int ret = 0;
453
454 fill_wqes = q_skba->os_skbs + num_wqes;
455 q_skba->os_skbs = 0;
456
457 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
458 q_skba->os_skbs = fill_wqes;
459 return ret;
460 }
461
462 index = q_skba->index;
463 max_index_mask = q_skba->len - 1;
464 for (i = 0; i < fill_wqes; i++) {
465 u64 tmp_addr;
466 struct sk_buff *skb;
467
468 skb = netdev_alloc_skb_ip_align(dev, packet_size);
469 if (!skb) {
470 q_skba->os_skbs = fill_wqes - i;
471 if (q_skba->os_skbs == q_skba->len - 2) {
472 netdev_info(pr->port->netdev,
473 "rq%i ran dry - no mem for skb\n",
474 rq_nr);
475 ret = -ENOMEM;
476 }
477 break;
478 }
479
480 skb_arr[index] = skb;
481 tmp_addr = ehea_map_vaddr(skb->data);
482 if (tmp_addr == -1) {
483 dev_consume_skb_any(skb);
484 q_skba->os_skbs = fill_wqes - i;
485 ret = 0;
486 break;
487 }
488
489 rwqe = ehea_get_next_rwqe(qp, rq_nr);
490 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
491 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
492 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
493 rwqe->sg_list[0].vaddr = tmp_addr;
494 rwqe->sg_list[0].len = packet_size;
495 rwqe->data_segments = 1;
496
497 index++;
498 index &= max_index_mask;
499 adder++;
500 }
501
502 q_skba->index = index;
503 if (adder == 0)
504 goto out;
505
506 /* Ring doorbell */
507 iosync();
508 if (rq_nr == 2)
509 ehea_update_rq2a(pr->qp, adder);
510 else
511 ehea_update_rq3a(pr->qp, adder);
512 out:
513 return ret;
514 }
515
516
ehea_refill_rq2(struct ehea_port_res * pr,int nr_of_wqes)517 static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
518 {
519 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
520 nr_of_wqes, EHEA_RWQE2_TYPE,
521 EHEA_RQ2_PKT_SIZE);
522 }
523
524
ehea_refill_rq3(struct ehea_port_res * pr,int nr_of_wqes)525 static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
526 {
527 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
528 nr_of_wqes, EHEA_RWQE3_TYPE,
529 EHEA_MAX_PACKET_SIZE);
530 }
531
ehea_check_cqe(struct ehea_cqe * cqe,int * rq_num)532 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
533 {
534 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
535 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
536 return 0;
537 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
538 (cqe->header_length == 0))
539 return 0;
540 return -EINVAL;
541 }
542
ehea_fill_skb(struct net_device * dev,struct sk_buff * skb,struct ehea_cqe * cqe,struct ehea_port_res * pr)543 static inline void ehea_fill_skb(struct net_device *dev,
544 struct sk_buff *skb, struct ehea_cqe *cqe,
545 struct ehea_port_res *pr)
546 {
547 int length = cqe->num_bytes_transfered - 4; /*remove CRC */
548
549 skb_put(skb, length);
550 skb->protocol = eth_type_trans(skb, dev);
551
552 /* The packet was not an IPV4 packet so a complemented checksum was
553 calculated. The value is found in the Internet Checksum field. */
554 if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
555 skb->ip_summed = CHECKSUM_COMPLETE;
556 skb->csum = csum_unfold(~cqe->inet_checksum_value);
557 } else
558 skb->ip_summed = CHECKSUM_UNNECESSARY;
559
560 skb_record_rx_queue(skb, pr - &pr->port->port_res[0]);
561 }
562
get_skb_by_index(struct sk_buff ** skb_array,int arr_len,struct ehea_cqe * cqe)563 static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
564 int arr_len,
565 struct ehea_cqe *cqe)
566 {
567 int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
568 struct sk_buff *skb;
569 void *pref;
570 int x;
571
572 x = skb_index + 1;
573 x &= (arr_len - 1);
574
575 pref = skb_array[x];
576 if (pref) {
577 prefetchw(pref);
578 prefetchw(pref + EHEA_CACHE_LINE);
579
580 pref = (skb_array[x]->data);
581 prefetch(pref);
582 prefetch(pref + EHEA_CACHE_LINE);
583 prefetch(pref + EHEA_CACHE_LINE * 2);
584 prefetch(pref + EHEA_CACHE_LINE * 3);
585 }
586
587 skb = skb_array[skb_index];
588 skb_array[skb_index] = NULL;
589 return skb;
590 }
591
get_skb_by_index_ll(struct sk_buff ** skb_array,int arr_len,int wqe_index)592 static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
593 int arr_len, int wqe_index)
594 {
595 struct sk_buff *skb;
596 void *pref;
597 int x;
598
599 x = wqe_index + 1;
600 x &= (arr_len - 1);
601
602 pref = skb_array[x];
603 if (pref) {
604 prefetchw(pref);
605 prefetchw(pref + EHEA_CACHE_LINE);
606
607 pref = (skb_array[x]->data);
608 prefetchw(pref);
609 prefetchw(pref + EHEA_CACHE_LINE);
610 }
611
612 skb = skb_array[wqe_index];
613 skb_array[wqe_index] = NULL;
614 return skb;
615 }
616
ehea_treat_poll_error(struct ehea_port_res * pr,int rq,struct ehea_cqe * cqe,int * processed_rq2,int * processed_rq3)617 static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
618 struct ehea_cqe *cqe, int *processed_rq2,
619 int *processed_rq3)
620 {
621 struct sk_buff *skb;
622
623 if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
624 pr->p_stats.err_tcp_cksum++;
625 if (cqe->status & EHEA_CQE_STAT_ERR_IP)
626 pr->p_stats.err_ip_cksum++;
627 if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
628 pr->p_stats.err_frame_crc++;
629
630 if (rq == 2) {
631 *processed_rq2 += 1;
632 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
633 dev_kfree_skb(skb);
634 } else if (rq == 3) {
635 *processed_rq3 += 1;
636 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
637 dev_kfree_skb(skb);
638 }
639
640 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
641 if (netif_msg_rx_err(pr->port)) {
642 pr_err("Critical receive error for QP %d. Resetting port.\n",
643 pr->qp->init_attr.qp_nr);
644 ehea_dump(cqe, sizeof(*cqe), "CQE");
645 }
646 ehea_schedule_port_reset(pr->port);
647 return 1;
648 }
649
650 return 0;
651 }
652
ehea_proc_rwqes(struct net_device * dev,struct ehea_port_res * pr,int budget)653 static int ehea_proc_rwqes(struct net_device *dev,
654 struct ehea_port_res *pr,
655 int budget)
656 {
657 struct ehea_port *port = pr->port;
658 struct ehea_qp *qp = pr->qp;
659 struct ehea_cqe *cqe;
660 struct sk_buff *skb;
661 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
662 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
663 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
664 int skb_arr_rq1_len = pr->rq1_skba.len;
665 int skb_arr_rq2_len = pr->rq2_skba.len;
666 int skb_arr_rq3_len = pr->rq3_skba.len;
667 int processed, processed_rq1, processed_rq2, processed_rq3;
668 u64 processed_bytes = 0;
669 int wqe_index, last_wqe_index, rq, port_reset;
670
671 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
672 last_wqe_index = 0;
673
674 cqe = ehea_poll_rq1(qp, &wqe_index);
675 while ((processed < budget) && cqe) {
676 ehea_inc_rq1(qp);
677 processed_rq1++;
678 processed++;
679 if (netif_msg_rx_status(port))
680 ehea_dump(cqe, sizeof(*cqe), "CQE");
681
682 last_wqe_index = wqe_index;
683 rmb();
684 if (!ehea_check_cqe(cqe, &rq)) {
685 if (rq == 1) {
686 /* LL RQ1 */
687 skb = get_skb_by_index_ll(skb_arr_rq1,
688 skb_arr_rq1_len,
689 wqe_index);
690 if (unlikely(!skb)) {
691 netif_info(port, rx_err, dev,
692 "LL rq1: skb=NULL\n");
693
694 skb = netdev_alloc_skb(dev,
695 EHEA_L_PKT_SIZE);
696 if (!skb)
697 break;
698 }
699 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
700 cqe->num_bytes_transfered - 4);
701 ehea_fill_skb(dev, skb, cqe, pr);
702 } else if (rq == 2) {
703 /* RQ2 */
704 skb = get_skb_by_index(skb_arr_rq2,
705 skb_arr_rq2_len, cqe);
706 if (unlikely(!skb)) {
707 netif_err(port, rx_err, dev,
708 "rq2: skb=NULL\n");
709 break;
710 }
711 ehea_fill_skb(dev, skb, cqe, pr);
712 processed_rq2++;
713 } else {
714 /* RQ3 */
715 skb = get_skb_by_index(skb_arr_rq3,
716 skb_arr_rq3_len, cqe);
717 if (unlikely(!skb)) {
718 netif_err(port, rx_err, dev,
719 "rq3: skb=NULL\n");
720 break;
721 }
722 ehea_fill_skb(dev, skb, cqe, pr);
723 processed_rq3++;
724 }
725
726 processed_bytes += skb->len;
727
728 if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
729 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
730 cqe->vlan_tag);
731
732 napi_gro_receive(&pr->napi, skb);
733 } else {
734 pr->p_stats.poll_receive_errors++;
735 port_reset = ehea_treat_poll_error(pr, rq, cqe,
736 &processed_rq2,
737 &processed_rq3);
738 if (port_reset)
739 break;
740 }
741 cqe = ehea_poll_rq1(qp, &wqe_index);
742 }
743
744 pr->rx_packets += processed;
745 pr->rx_bytes += processed_bytes;
746
747 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
748 ehea_refill_rq2(pr, processed_rq2);
749 ehea_refill_rq3(pr, processed_rq3);
750
751 return processed;
752 }
753
754 #define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
755
reset_sq_restart_flag(struct ehea_port * port)756 static void reset_sq_restart_flag(struct ehea_port *port)
757 {
758 int i;
759
760 for (i = 0; i < port->num_def_qps; i++) {
761 struct ehea_port_res *pr = &port->port_res[i];
762 pr->sq_restart_flag = 0;
763 }
764 wake_up(&port->restart_wq);
765 }
766
check_sqs(struct ehea_port * port)767 static void check_sqs(struct ehea_port *port)
768 {
769 struct ehea_swqe *swqe;
770 int swqe_index;
771 int i;
772
773 for (i = 0; i < port->num_def_qps; i++) {
774 struct ehea_port_res *pr = &port->port_res[i];
775 int ret;
776 swqe = ehea_get_swqe(pr->qp, &swqe_index);
777 memset(swqe, 0, SWQE_HEADER_SIZE);
778 atomic_dec(&pr->swqe_avail);
779
780 swqe->tx_control |= EHEA_SWQE_PURGE;
781 swqe->wr_id = SWQE_RESTART_CHECK;
782 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
783 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
784 swqe->immediate_data_length = 80;
785
786 ehea_post_swqe(pr->qp, swqe);
787
788 ret = wait_event_timeout(port->restart_wq,
789 pr->sq_restart_flag == 0,
790 msecs_to_jiffies(100));
791
792 if (!ret) {
793 pr_err("HW/SW queues out of sync\n");
794 ehea_schedule_port_reset(pr->port);
795 return;
796 }
797 }
798 }
799
800
ehea_proc_cqes(struct ehea_port_res * pr,int my_quota)801 static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
802 {
803 struct sk_buff *skb;
804 struct ehea_cq *send_cq = pr->send_cq;
805 struct ehea_cqe *cqe;
806 int quota = my_quota;
807 int cqe_counter = 0;
808 int swqe_av = 0;
809 int index;
810 struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev,
811 pr - &pr->port->port_res[0]);
812
813 cqe = ehea_poll_cq(send_cq);
814 while (cqe && (quota > 0)) {
815 ehea_inc_cq(send_cq);
816
817 cqe_counter++;
818 rmb();
819
820 if (cqe->wr_id == SWQE_RESTART_CHECK) {
821 pr->sq_restart_flag = 1;
822 swqe_av++;
823 break;
824 }
825
826 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
827 pr_err("Bad send completion status=0x%04X\n",
828 cqe->status);
829
830 if (netif_msg_tx_err(pr->port))
831 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
832
833 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
834 pr_err("Resetting port\n");
835 ehea_schedule_port_reset(pr->port);
836 break;
837 }
838 }
839
840 if (netif_msg_tx_done(pr->port))
841 ehea_dump(cqe, sizeof(*cqe), "CQE");
842
843 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
844 == EHEA_SWQE2_TYPE)) {
845
846 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
847 skb = pr->sq_skba.arr[index];
848 dev_consume_skb_any(skb);
849 pr->sq_skba.arr[index] = NULL;
850 }
851
852 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
853 quota--;
854
855 cqe = ehea_poll_cq(send_cq);
856 }
857
858 ehea_update_feca(send_cq, cqe_counter);
859 atomic_add(swqe_av, &pr->swqe_avail);
860
861 if (unlikely(netif_tx_queue_stopped(txq) &&
862 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) {
863 __netif_tx_lock(txq, smp_processor_id());
864 if (netif_tx_queue_stopped(txq) &&
865 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))
866 netif_tx_wake_queue(txq);
867 __netif_tx_unlock(txq);
868 }
869
870 wake_up(&pr->port->swqe_avail_wq);
871
872 return cqe;
873 }
874
875 #define EHEA_POLL_MAX_CQES 65535
876
ehea_poll(struct napi_struct * napi,int budget)877 static int ehea_poll(struct napi_struct *napi, int budget)
878 {
879 struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
880 napi);
881 struct net_device *dev = pr->port->netdev;
882 struct ehea_cqe *cqe;
883 struct ehea_cqe *cqe_skb = NULL;
884 int wqe_index;
885 int rx = 0;
886
887 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
888 rx += ehea_proc_rwqes(dev, pr, budget - rx);
889
890 while (rx != budget) {
891 napi_complete(napi);
892 ehea_reset_cq_ep(pr->recv_cq);
893 ehea_reset_cq_ep(pr->send_cq);
894 ehea_reset_cq_n1(pr->recv_cq);
895 ehea_reset_cq_n1(pr->send_cq);
896 rmb();
897 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
898 cqe_skb = ehea_poll_cq(pr->send_cq);
899
900 if (!cqe && !cqe_skb)
901 return rx;
902
903 if (!napi_schedule(napi))
904 return rx;
905
906 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
907 rx += ehea_proc_rwqes(dev, pr, budget - rx);
908 }
909
910 return rx;
911 }
912
ehea_recv_irq_handler(int irq,void * param)913 static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
914 {
915 struct ehea_port_res *pr = param;
916
917 napi_schedule(&pr->napi);
918
919 return IRQ_HANDLED;
920 }
921
ehea_qp_aff_irq_handler(int irq,void * param)922 static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
923 {
924 struct ehea_port *port = param;
925 struct ehea_eqe *eqe;
926 struct ehea_qp *qp;
927 u32 qp_token;
928 u64 resource_type, aer, aerr;
929 int reset_port = 0;
930
931 eqe = ehea_poll_eq(port->qp_eq);
932
933 while (eqe) {
934 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
935 pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
936 eqe->entry, qp_token);
937
938 qp = port->port_res[qp_token].qp;
939
940 resource_type = ehea_error_data(port->adapter, qp->fw_handle,
941 &aer, &aerr);
942
943 if (resource_type == EHEA_AER_RESTYPE_QP) {
944 if ((aer & EHEA_AER_RESET_MASK) ||
945 (aerr & EHEA_AERR_RESET_MASK))
946 reset_port = 1;
947 } else
948 reset_port = 1; /* Reset in case of CQ or EQ error */
949
950 eqe = ehea_poll_eq(port->qp_eq);
951 }
952
953 if (reset_port) {
954 pr_err("Resetting port\n");
955 ehea_schedule_port_reset(port);
956 }
957
958 return IRQ_HANDLED;
959 }
960
ehea_get_port(struct ehea_adapter * adapter,int logical_port)961 static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
962 int logical_port)
963 {
964 int i;
965
966 for (i = 0; i < EHEA_MAX_PORTS; i++)
967 if (adapter->port[i])
968 if (adapter->port[i]->logical_port_id == logical_port)
969 return adapter->port[i];
970 return NULL;
971 }
972
ehea_sense_port_attr(struct ehea_port * port)973 int ehea_sense_port_attr(struct ehea_port *port)
974 {
975 int ret;
976 u64 hret;
977 struct hcp_ehea_port_cb0 *cb0;
978
979 /* may be called via ehea_neq_tasklet() */
980 cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
981 if (!cb0) {
982 pr_err("no mem for cb0\n");
983 ret = -ENOMEM;
984 goto out;
985 }
986
987 hret = ehea_h_query_ehea_port(port->adapter->handle,
988 port->logical_port_id, H_PORT_CB0,
989 EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
990 cb0);
991 if (hret != H_SUCCESS) {
992 ret = -EIO;
993 goto out_free;
994 }
995
996 /* MAC address */
997 port->mac_addr = cb0->port_mac_addr << 16;
998
999 if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
1000 ret = -EADDRNOTAVAIL;
1001 goto out_free;
1002 }
1003
1004 /* Port speed */
1005 switch (cb0->port_speed) {
1006 case H_SPEED_10M_H:
1007 port->port_speed = EHEA_SPEED_10M;
1008 port->full_duplex = 0;
1009 break;
1010 case H_SPEED_10M_F:
1011 port->port_speed = EHEA_SPEED_10M;
1012 port->full_duplex = 1;
1013 break;
1014 case H_SPEED_100M_H:
1015 port->port_speed = EHEA_SPEED_100M;
1016 port->full_duplex = 0;
1017 break;
1018 case H_SPEED_100M_F:
1019 port->port_speed = EHEA_SPEED_100M;
1020 port->full_duplex = 1;
1021 break;
1022 case H_SPEED_1G_F:
1023 port->port_speed = EHEA_SPEED_1G;
1024 port->full_duplex = 1;
1025 break;
1026 case H_SPEED_10G_F:
1027 port->port_speed = EHEA_SPEED_10G;
1028 port->full_duplex = 1;
1029 break;
1030 default:
1031 port->port_speed = 0;
1032 port->full_duplex = 0;
1033 break;
1034 }
1035
1036 port->autoneg = 1;
1037 port->num_mcs = cb0->num_default_qps;
1038
1039 /* Number of default QPs */
1040 if (use_mcs)
1041 port->num_def_qps = cb0->num_default_qps;
1042 else
1043 port->num_def_qps = 1;
1044
1045 if (!port->num_def_qps) {
1046 ret = -EINVAL;
1047 goto out_free;
1048 }
1049
1050 ret = 0;
1051 out_free:
1052 if (ret || netif_msg_probe(port))
1053 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
1054 free_page((unsigned long)cb0);
1055 out:
1056 return ret;
1057 }
1058
ehea_set_portspeed(struct ehea_port * port,u32 port_speed)1059 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1060 {
1061 struct hcp_ehea_port_cb4 *cb4;
1062 u64 hret;
1063 int ret = 0;
1064
1065 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
1066 if (!cb4) {
1067 pr_err("no mem for cb4\n");
1068 ret = -ENOMEM;
1069 goto out;
1070 }
1071
1072 cb4->port_speed = port_speed;
1073
1074 netif_carrier_off(port->netdev);
1075
1076 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1077 port->logical_port_id,
1078 H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
1079 if (hret == H_SUCCESS) {
1080 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
1081
1082 hret = ehea_h_query_ehea_port(port->adapter->handle,
1083 port->logical_port_id,
1084 H_PORT_CB4, H_PORT_CB4_SPEED,
1085 cb4);
1086 if (hret == H_SUCCESS) {
1087 switch (cb4->port_speed) {
1088 case H_SPEED_10M_H:
1089 port->port_speed = EHEA_SPEED_10M;
1090 port->full_duplex = 0;
1091 break;
1092 case H_SPEED_10M_F:
1093 port->port_speed = EHEA_SPEED_10M;
1094 port->full_duplex = 1;
1095 break;
1096 case H_SPEED_100M_H:
1097 port->port_speed = EHEA_SPEED_100M;
1098 port->full_duplex = 0;
1099 break;
1100 case H_SPEED_100M_F:
1101 port->port_speed = EHEA_SPEED_100M;
1102 port->full_duplex = 1;
1103 break;
1104 case H_SPEED_1G_F:
1105 port->port_speed = EHEA_SPEED_1G;
1106 port->full_duplex = 1;
1107 break;
1108 case H_SPEED_10G_F:
1109 port->port_speed = EHEA_SPEED_10G;
1110 port->full_duplex = 1;
1111 break;
1112 default:
1113 port->port_speed = 0;
1114 port->full_duplex = 0;
1115 break;
1116 }
1117 } else {
1118 pr_err("Failed sensing port speed\n");
1119 ret = -EIO;
1120 }
1121 } else {
1122 if (hret == H_AUTHORITY) {
1123 pr_info("Hypervisor denied setting port speed\n");
1124 ret = -EPERM;
1125 } else {
1126 ret = -EIO;
1127 pr_err("Failed setting port speed\n");
1128 }
1129 }
1130 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1131 netif_carrier_on(port->netdev);
1132
1133 free_page((unsigned long)cb4);
1134 out:
1135 return ret;
1136 }
1137
ehea_parse_eqe(struct ehea_adapter * adapter,u64 eqe)1138 static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1139 {
1140 int ret;
1141 u8 ec;
1142 u8 portnum;
1143 struct ehea_port *port;
1144 struct net_device *dev;
1145
1146 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1147 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1148 port = ehea_get_port(adapter, portnum);
1149 if (!port) {
1150 netdev_err(NULL, "unknown portnum %x\n", portnum);
1151 return;
1152 }
1153 dev = port->netdev;
1154
1155 switch (ec) {
1156 case EHEA_EC_PORTSTATE_CHG: /* port state change */
1157
1158 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1159 if (!netif_carrier_ok(dev)) {
1160 ret = ehea_sense_port_attr(port);
1161 if (ret) {
1162 netdev_err(dev, "failed resensing port attributes\n");
1163 break;
1164 }
1165
1166 netif_info(port, link, dev,
1167 "Logical port up: %dMbps %s Duplex\n",
1168 port->port_speed,
1169 port->full_duplex == 1 ?
1170 "Full" : "Half");
1171
1172 netif_carrier_on(dev);
1173 netif_wake_queue(dev);
1174 }
1175 } else
1176 if (netif_carrier_ok(dev)) {
1177 netif_info(port, link, dev,
1178 "Logical port down\n");
1179 netif_carrier_off(dev);
1180 netif_tx_disable(dev);
1181 }
1182
1183 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
1184 port->phy_link = EHEA_PHY_LINK_UP;
1185 netif_info(port, link, dev,
1186 "Physical port up\n");
1187 if (prop_carrier_state)
1188 netif_carrier_on(dev);
1189 } else {
1190 port->phy_link = EHEA_PHY_LINK_DOWN;
1191 netif_info(port, link, dev,
1192 "Physical port down\n");
1193 if (prop_carrier_state)
1194 netif_carrier_off(dev);
1195 }
1196
1197 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1198 netdev_info(dev,
1199 "External switch port is primary port\n");
1200 else
1201 netdev_info(dev,
1202 "External switch port is backup port\n");
1203
1204 break;
1205 case EHEA_EC_ADAPTER_MALFUNC:
1206 netdev_err(dev, "Adapter malfunction\n");
1207 break;
1208 case EHEA_EC_PORT_MALFUNC:
1209 netdev_info(dev, "Port malfunction\n");
1210 netif_carrier_off(dev);
1211 netif_tx_disable(dev);
1212 break;
1213 default:
1214 netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
1215 break;
1216 }
1217 }
1218
ehea_neq_tasklet(struct tasklet_struct * t)1219 static void ehea_neq_tasklet(struct tasklet_struct *t)
1220 {
1221 struct ehea_adapter *adapter = from_tasklet(adapter, t, neq_tasklet);
1222 struct ehea_eqe *eqe;
1223 u64 event_mask;
1224
1225 eqe = ehea_poll_eq(adapter->neq);
1226 pr_debug("eqe=%p\n", eqe);
1227
1228 while (eqe) {
1229 pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
1230 ehea_parse_eqe(adapter, eqe->entry);
1231 eqe = ehea_poll_eq(adapter->neq);
1232 pr_debug("next eqe=%p\n", eqe);
1233 }
1234
1235 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1236 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1237 | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1238
1239 ehea_h_reset_events(adapter->handle,
1240 adapter->neq->fw_handle, event_mask);
1241 }
1242
ehea_interrupt_neq(int irq,void * param)1243 static irqreturn_t ehea_interrupt_neq(int irq, void *param)
1244 {
1245 struct ehea_adapter *adapter = param;
1246 tasklet_hi_schedule(&adapter->neq_tasklet);
1247 return IRQ_HANDLED;
1248 }
1249
1250
ehea_fill_port_res(struct ehea_port_res * pr)1251 static int ehea_fill_port_res(struct ehea_port_res *pr)
1252 {
1253 int ret;
1254 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1255
1256 ehea_init_fill_rq1(pr, pr->rq1_skba.len);
1257
1258 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1259
1260 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1261
1262 return ret;
1263 }
1264
ehea_reg_interrupts(struct net_device * dev)1265 static int ehea_reg_interrupts(struct net_device *dev)
1266 {
1267 struct ehea_port *port = netdev_priv(dev);
1268 struct ehea_port_res *pr;
1269 int i, ret;
1270
1271
1272 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1273 dev->name);
1274
1275 ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
1276 ehea_qp_aff_irq_handler,
1277 0, port->int_aff_name, port);
1278 if (ret) {
1279 netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
1280 port->qp_eq->attr.ist1);
1281 goto out_free_qpeq;
1282 }
1283
1284 netif_info(port, ifup, dev,
1285 "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
1286 port->qp_eq->attr.ist1);
1287
1288
1289 for (i = 0; i < port->num_def_qps; i++) {
1290 pr = &port->port_res[i];
1291 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
1292 "%s-queue%d", dev->name, i);
1293 ret = ibmebus_request_irq(pr->eq->attr.ist1,
1294 ehea_recv_irq_handler,
1295 0, pr->int_send_name, pr);
1296 if (ret) {
1297 netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
1298 i, pr->eq->attr.ist1);
1299 goto out_free_req;
1300 }
1301 netif_info(port, ifup, dev,
1302 "irq_handle 0x%X for function ehea_queue_int %d registered\n",
1303 pr->eq->attr.ist1, i);
1304 }
1305 out:
1306 return ret;
1307
1308
1309 out_free_req:
1310 while (--i >= 0) {
1311 u32 ist = port->port_res[i].eq->attr.ist1;
1312 ibmebus_free_irq(ist, &port->port_res[i]);
1313 }
1314
1315 out_free_qpeq:
1316 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1317 i = port->num_def_qps;
1318
1319 goto out;
1320
1321 }
1322
ehea_free_interrupts(struct net_device * dev)1323 static void ehea_free_interrupts(struct net_device *dev)
1324 {
1325 struct ehea_port *port = netdev_priv(dev);
1326 struct ehea_port_res *pr;
1327 int i;
1328
1329 /* send */
1330
1331 for (i = 0; i < port->num_def_qps; i++) {
1332 pr = &port->port_res[i];
1333 ibmebus_free_irq(pr->eq->attr.ist1, pr);
1334 netif_info(port, intr, dev,
1335 "free send irq for res %d with handle 0x%X\n",
1336 i, pr->eq->attr.ist1);
1337 }
1338
1339 /* associated events */
1340 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1341 netif_info(port, intr, dev,
1342 "associated event interrupt for handle 0x%X freed\n",
1343 port->qp_eq->attr.ist1);
1344 }
1345
ehea_configure_port(struct ehea_port * port)1346 static int ehea_configure_port(struct ehea_port *port)
1347 {
1348 int ret, i;
1349 u64 hret, mask;
1350 struct hcp_ehea_port_cb0 *cb0;
1351
1352 ret = -ENOMEM;
1353 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1354 if (!cb0)
1355 goto out;
1356
1357 cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1358 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1359 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1360 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1361 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1362 PXLY_RC_VLAN_FILTER)
1363 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1364
1365 for (i = 0; i < port->num_mcs; i++)
1366 if (use_mcs)
1367 cb0->default_qpn_arr[i] =
1368 port->port_res[i].qp->init_attr.qp_nr;
1369 else
1370 cb0->default_qpn_arr[i] =
1371 port->port_res[0].qp->init_attr.qp_nr;
1372
1373 if (netif_msg_ifup(port))
1374 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1375
1376 mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1377 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1378
1379 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1380 port->logical_port_id,
1381 H_PORT_CB0, mask, cb0);
1382 ret = -EIO;
1383 if (hret != H_SUCCESS)
1384 goto out_free;
1385
1386 ret = 0;
1387
1388 out_free:
1389 free_page((unsigned long)cb0);
1390 out:
1391 return ret;
1392 }
1393
ehea_gen_smrs(struct ehea_port_res * pr)1394 static int ehea_gen_smrs(struct ehea_port_res *pr)
1395 {
1396 int ret;
1397 struct ehea_adapter *adapter = pr->port->adapter;
1398
1399 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1400 if (ret)
1401 goto out;
1402
1403 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1404 if (ret)
1405 goto out_free;
1406
1407 return 0;
1408
1409 out_free:
1410 ehea_rem_mr(&pr->send_mr);
1411 out:
1412 pr_err("Generating SMRS failed\n");
1413 return -EIO;
1414 }
1415
ehea_rem_smrs(struct ehea_port_res * pr)1416 static int ehea_rem_smrs(struct ehea_port_res *pr)
1417 {
1418 if ((ehea_rem_mr(&pr->send_mr)) ||
1419 (ehea_rem_mr(&pr->recv_mr)))
1420 return -EIO;
1421 else
1422 return 0;
1423 }
1424
ehea_init_q_skba(struct ehea_q_skb_arr * q_skba,int max_q_entries)1425 static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1426 {
1427 int arr_size = sizeof(void *) * max_q_entries;
1428
1429 q_skba->arr = vzalloc(arr_size);
1430 if (!q_skba->arr)
1431 return -ENOMEM;
1432
1433 q_skba->len = max_q_entries;
1434 q_skba->index = 0;
1435 q_skba->os_skbs = 0;
1436
1437 return 0;
1438 }
1439
ehea_init_port_res(struct ehea_port * port,struct ehea_port_res * pr,struct port_res_cfg * pr_cfg,int queue_token)1440 static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1441 struct port_res_cfg *pr_cfg, int queue_token)
1442 {
1443 struct ehea_adapter *adapter = port->adapter;
1444 enum ehea_eq_type eq_type = EHEA_EQ;
1445 struct ehea_qp_init_attr *init_attr = NULL;
1446 int ret = -EIO;
1447 u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
1448
1449 tx_bytes = pr->tx_bytes;
1450 tx_packets = pr->tx_packets;
1451 rx_bytes = pr->rx_bytes;
1452 rx_packets = pr->rx_packets;
1453
1454 memset(pr, 0, sizeof(struct ehea_port_res));
1455
1456 pr->tx_bytes = tx_bytes;
1457 pr->tx_packets = tx_packets;
1458 pr->rx_bytes = rx_bytes;
1459 pr->rx_packets = rx_packets;
1460
1461 pr->port = port;
1462
1463 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1464 if (!pr->eq) {
1465 pr_err("create_eq failed (eq)\n");
1466 goto out_free;
1467 }
1468
1469 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1470 pr->eq->fw_handle,
1471 port->logical_port_id);
1472 if (!pr->recv_cq) {
1473 pr_err("create_cq failed (cq_recv)\n");
1474 goto out_free;
1475 }
1476
1477 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1478 pr->eq->fw_handle,
1479 port->logical_port_id);
1480 if (!pr->send_cq) {
1481 pr_err("create_cq failed (cq_send)\n");
1482 goto out_free;
1483 }
1484
1485 if (netif_msg_ifup(port))
1486 pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
1487 pr->send_cq->attr.act_nr_of_cqes,
1488 pr->recv_cq->attr.act_nr_of_cqes);
1489
1490 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1491 if (!init_attr) {
1492 ret = -ENOMEM;
1493 pr_err("no mem for ehea_qp_init_attr\n");
1494 goto out_free;
1495 }
1496
1497 init_attr->low_lat_rq1 = 1;
1498 init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
1499 init_attr->rq_count = 3;
1500 init_attr->qp_token = queue_token;
1501 init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1502 init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1503 init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1504 init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1505 init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1506 init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1507 init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1508 init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1509 init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1510 init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1511 init_attr->port_nr = port->logical_port_id;
1512 init_attr->send_cq_handle = pr->send_cq->fw_handle;
1513 init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1514 init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1515
1516 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1517 if (!pr->qp) {
1518 pr_err("create_qp failed\n");
1519 ret = -EIO;
1520 goto out_free;
1521 }
1522
1523 if (netif_msg_ifup(port))
1524 pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
1525 init_attr->qp_nr,
1526 init_attr->act_nr_send_wqes,
1527 init_attr->act_nr_rwqes_rq1,
1528 init_attr->act_nr_rwqes_rq2,
1529 init_attr->act_nr_rwqes_rq3);
1530
1531 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1532
1533 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
1534 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1535 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1536 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1537 if (ret)
1538 goto out_free;
1539
1540 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1541 if (ehea_gen_smrs(pr) != 0) {
1542 ret = -EIO;
1543 goto out_free;
1544 }
1545
1546 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1547
1548 kfree(init_attr);
1549
1550 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll);
1551
1552 ret = 0;
1553 goto out;
1554
1555 out_free:
1556 kfree(init_attr);
1557 vfree(pr->sq_skba.arr);
1558 vfree(pr->rq1_skba.arr);
1559 vfree(pr->rq2_skba.arr);
1560 vfree(pr->rq3_skba.arr);
1561 ehea_destroy_qp(pr->qp);
1562 ehea_destroy_cq(pr->send_cq);
1563 ehea_destroy_cq(pr->recv_cq);
1564 ehea_destroy_eq(pr->eq);
1565 out:
1566 return ret;
1567 }
1568
ehea_clean_portres(struct ehea_port * port,struct ehea_port_res * pr)1569 static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1570 {
1571 int ret, i;
1572
1573 if (pr->qp)
1574 netif_napi_del(&pr->napi);
1575
1576 ret = ehea_destroy_qp(pr->qp);
1577
1578 if (!ret) {
1579 ehea_destroy_cq(pr->send_cq);
1580 ehea_destroy_cq(pr->recv_cq);
1581 ehea_destroy_eq(pr->eq);
1582
1583 for (i = 0; i < pr->rq1_skba.len; i++)
1584 dev_kfree_skb(pr->rq1_skba.arr[i]);
1585
1586 for (i = 0; i < pr->rq2_skba.len; i++)
1587 dev_kfree_skb(pr->rq2_skba.arr[i]);
1588
1589 for (i = 0; i < pr->rq3_skba.len; i++)
1590 dev_kfree_skb(pr->rq3_skba.arr[i]);
1591
1592 for (i = 0; i < pr->sq_skba.len; i++)
1593 dev_kfree_skb(pr->sq_skba.arr[i]);
1594
1595 vfree(pr->rq1_skba.arr);
1596 vfree(pr->rq2_skba.arr);
1597 vfree(pr->rq3_skba.arr);
1598 vfree(pr->sq_skba.arr);
1599 ret = ehea_rem_smrs(pr);
1600 }
1601 return ret;
1602 }
1603
write_swqe2_immediate(struct sk_buff * skb,struct ehea_swqe * swqe,u32 lkey)1604 static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
1605 u32 lkey)
1606 {
1607 int skb_data_size = skb_headlen(skb);
1608 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1609 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1610 unsigned int immediate_len = SWQE2_MAX_IMM;
1611
1612 swqe->descriptors = 0;
1613
1614 if (skb_is_gso(skb)) {
1615 swqe->tx_control |= EHEA_SWQE_TSO;
1616 swqe->mss = skb_shinfo(skb)->gso_size;
1617 /*
1618 * For TSO packets we only copy the headers into the
1619 * immediate area.
1620 */
1621 immediate_len = skb_tcp_all_headers(skb);
1622 }
1623
1624 if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
1625 skb_copy_from_linear_data(skb, imm_data, immediate_len);
1626 swqe->immediate_data_length = immediate_len;
1627
1628 if (skb_data_size > immediate_len) {
1629 sg1entry->l_key = lkey;
1630 sg1entry->len = skb_data_size - immediate_len;
1631 sg1entry->vaddr =
1632 ehea_map_vaddr(skb->data + immediate_len);
1633 swqe->descriptors++;
1634 }
1635 } else {
1636 skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1637 swqe->immediate_data_length = skb_data_size;
1638 }
1639 }
1640
write_swqe2_data(struct sk_buff * skb,struct net_device * dev,struct ehea_swqe * swqe,u32 lkey)1641 static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1642 struct ehea_swqe *swqe, u32 lkey)
1643 {
1644 struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1645 skb_frag_t *frag;
1646 int nfrags, sg1entry_contains_frag_data, i;
1647
1648 nfrags = skb_shinfo(skb)->nr_frags;
1649 sg1entry = &swqe->u.immdata_desc.sg_entry;
1650 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
1651 sg1entry_contains_frag_data = 0;
1652
1653 write_swqe2_immediate(skb, swqe, lkey);
1654
1655 /* write descriptors */
1656 if (nfrags > 0) {
1657 if (swqe->descriptors == 0) {
1658 /* sg1entry not yet used */
1659 frag = &skb_shinfo(skb)->frags[0];
1660
1661 /* copy sg1entry data */
1662 sg1entry->l_key = lkey;
1663 sg1entry->len = skb_frag_size(frag);
1664 sg1entry->vaddr =
1665 ehea_map_vaddr(skb_frag_address(frag));
1666 swqe->descriptors++;
1667 sg1entry_contains_frag_data = 1;
1668 }
1669
1670 for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1671
1672 frag = &skb_shinfo(skb)->frags[i];
1673 sgentry = &sg_list[i - sg1entry_contains_frag_data];
1674
1675 sgentry->l_key = lkey;
1676 sgentry->len = skb_frag_size(frag);
1677 sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag));
1678 swqe->descriptors++;
1679 }
1680 }
1681 }
1682
ehea_broadcast_reg_helper(struct ehea_port * port,u32 hcallid)1683 static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1684 {
1685 int ret = 0;
1686 u64 hret;
1687 u8 reg_type;
1688
1689 /* De/Register untagged packets */
1690 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1691 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1692 port->logical_port_id,
1693 reg_type, port->mac_addr, 0, hcallid);
1694 if (hret != H_SUCCESS) {
1695 pr_err("%sregistering bc address failed (tagged)\n",
1696 hcallid == H_REG_BCMC ? "" : "de");
1697 ret = -EIO;
1698 goto out_herr;
1699 }
1700
1701 /* De/Register VLAN packets */
1702 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1703 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1704 port->logical_port_id,
1705 reg_type, port->mac_addr, 0, hcallid);
1706 if (hret != H_SUCCESS) {
1707 pr_err("%sregistering bc address failed (vlan)\n",
1708 hcallid == H_REG_BCMC ? "" : "de");
1709 ret = -EIO;
1710 }
1711 out_herr:
1712 return ret;
1713 }
1714
ehea_set_mac_addr(struct net_device * dev,void * sa)1715 static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1716 {
1717 struct ehea_port *port = netdev_priv(dev);
1718 struct sockaddr *mac_addr = sa;
1719 struct hcp_ehea_port_cb0 *cb0;
1720 int ret;
1721 u64 hret;
1722
1723 if (!is_valid_ether_addr(mac_addr->sa_data)) {
1724 ret = -EADDRNOTAVAIL;
1725 goto out;
1726 }
1727
1728 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1729 if (!cb0) {
1730 pr_err("no mem for cb0\n");
1731 ret = -ENOMEM;
1732 goto out;
1733 }
1734
1735 memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1736
1737 cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1738
1739 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1740 port->logical_port_id, H_PORT_CB0,
1741 EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1742 if (hret != H_SUCCESS) {
1743 ret = -EIO;
1744 goto out_free;
1745 }
1746
1747 eth_hw_addr_set(dev, mac_addr->sa_data);
1748
1749 /* Deregister old MAC in pHYP */
1750 if (port->state == EHEA_PORT_UP) {
1751 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1752 if (ret)
1753 goto out_upregs;
1754 }
1755
1756 port->mac_addr = cb0->port_mac_addr << 16;
1757
1758 /* Register new MAC in pHYP */
1759 if (port->state == EHEA_PORT_UP) {
1760 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1761 if (ret)
1762 goto out_upregs;
1763 }
1764
1765 ret = 0;
1766
1767 out_upregs:
1768 ehea_update_bcmc_registrations();
1769 out_free:
1770 free_page((unsigned long)cb0);
1771 out:
1772 return ret;
1773 }
1774
ehea_promiscuous_error(u64 hret,int enable)1775 static void ehea_promiscuous_error(u64 hret, int enable)
1776 {
1777 if (hret == H_AUTHORITY)
1778 pr_info("Hypervisor denied %sabling promiscuous mode\n",
1779 enable == 1 ? "en" : "dis");
1780 else
1781 pr_err("failed %sabling promiscuous mode\n",
1782 enable == 1 ? "en" : "dis");
1783 }
1784
ehea_promiscuous(struct net_device * dev,int enable)1785 static void ehea_promiscuous(struct net_device *dev, int enable)
1786 {
1787 struct ehea_port *port = netdev_priv(dev);
1788 struct hcp_ehea_port_cb7 *cb7;
1789 u64 hret;
1790
1791 if (enable == port->promisc)
1792 return;
1793
1794 cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
1795 if (!cb7) {
1796 pr_err("no mem for cb7\n");
1797 goto out;
1798 }
1799
1800 /* Modify Pxs_DUCQPN in CB7 */
1801 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1802
1803 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1804 port->logical_port_id,
1805 H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1806 if (hret) {
1807 ehea_promiscuous_error(hret, enable);
1808 goto out;
1809 }
1810
1811 port->promisc = enable;
1812 out:
1813 free_page((unsigned long)cb7);
1814 }
1815
ehea_multicast_reg_helper(struct ehea_port * port,u64 mc_mac_addr,u32 hcallid)1816 static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1817 u32 hcallid)
1818 {
1819 u64 hret;
1820 u8 reg_type;
1821
1822 reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_UNTAGGED;
1823 if (mc_mac_addr == 0)
1824 reg_type |= EHEA_BCMC_SCOPE_ALL;
1825
1826 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1827 port->logical_port_id,
1828 reg_type, mc_mac_addr, 0, hcallid);
1829 if (hret)
1830 goto out;
1831
1832 reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_VLANID_ALL;
1833 if (mc_mac_addr == 0)
1834 reg_type |= EHEA_BCMC_SCOPE_ALL;
1835
1836 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1837 port->logical_port_id,
1838 reg_type, mc_mac_addr, 0, hcallid);
1839 out:
1840 return hret;
1841 }
1842
ehea_drop_multicast_list(struct net_device * dev)1843 static int ehea_drop_multicast_list(struct net_device *dev)
1844 {
1845 struct ehea_port *port = netdev_priv(dev);
1846 struct ehea_mc_list *mc_entry = port->mc_list;
1847 struct list_head *pos;
1848 struct list_head *temp;
1849 int ret = 0;
1850 u64 hret;
1851
1852 list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1853 mc_entry = list_entry(pos, struct ehea_mc_list, list);
1854
1855 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1856 H_DEREG_BCMC);
1857 if (hret) {
1858 pr_err("failed deregistering mcast MAC\n");
1859 ret = -EIO;
1860 }
1861
1862 list_del(pos);
1863 kfree(mc_entry);
1864 }
1865 return ret;
1866 }
1867
ehea_allmulti(struct net_device * dev,int enable)1868 static void ehea_allmulti(struct net_device *dev, int enable)
1869 {
1870 struct ehea_port *port = netdev_priv(dev);
1871 u64 hret;
1872
1873 if (!port->allmulti) {
1874 if (enable) {
1875 /* Enable ALLMULTI */
1876 ehea_drop_multicast_list(dev);
1877 hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1878 if (!hret)
1879 port->allmulti = 1;
1880 else
1881 netdev_err(dev,
1882 "failed enabling IFF_ALLMULTI\n");
1883 }
1884 } else {
1885 if (!enable) {
1886 /* Disable ALLMULTI */
1887 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1888 if (!hret)
1889 port->allmulti = 0;
1890 else
1891 netdev_err(dev,
1892 "failed disabling IFF_ALLMULTI\n");
1893 }
1894 }
1895 }
1896
ehea_add_multicast_entry(struct ehea_port * port,u8 * mc_mac_addr)1897 static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
1898 {
1899 struct ehea_mc_list *ehea_mcl_entry;
1900 u64 hret;
1901
1902 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
1903 if (!ehea_mcl_entry)
1904 return;
1905
1906 INIT_LIST_HEAD(&ehea_mcl_entry->list);
1907
1908 memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
1909
1910 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
1911 H_REG_BCMC);
1912 if (!hret)
1913 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
1914 else {
1915 pr_err("failed registering mcast MAC\n");
1916 kfree(ehea_mcl_entry);
1917 }
1918 }
1919
ehea_set_multicast_list(struct net_device * dev)1920 static void ehea_set_multicast_list(struct net_device *dev)
1921 {
1922 struct ehea_port *port = netdev_priv(dev);
1923 struct netdev_hw_addr *ha;
1924 int ret;
1925
1926 ehea_promiscuous(dev, !!(dev->flags & IFF_PROMISC));
1927
1928 if (dev->flags & IFF_ALLMULTI) {
1929 ehea_allmulti(dev, 1);
1930 goto out;
1931 }
1932 ehea_allmulti(dev, 0);
1933
1934 if (!netdev_mc_empty(dev)) {
1935 ret = ehea_drop_multicast_list(dev);
1936 if (ret) {
1937 /* Dropping the current multicast list failed.
1938 * Enabling ALL_MULTI is the best we can do.
1939 */
1940 ehea_allmulti(dev, 1);
1941 }
1942
1943 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
1944 pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
1945 port->adapter->max_mc_mac);
1946 goto out;
1947 }
1948
1949 netdev_for_each_mc_addr(ha, dev)
1950 ehea_add_multicast_entry(port, ha->addr);
1951
1952 }
1953 out:
1954 ehea_update_bcmc_registrations();
1955 }
1956
xmit_common(struct sk_buff * skb,struct ehea_swqe * swqe)1957 static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
1958 {
1959 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
1960
1961 if (vlan_get_protocol(skb) != htons(ETH_P_IP))
1962 return;
1963
1964 if (skb->ip_summed == CHECKSUM_PARTIAL)
1965 swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM;
1966
1967 swqe->ip_start = skb_network_offset(skb);
1968 swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1;
1969
1970 switch (ip_hdr(skb)->protocol) {
1971 case IPPROTO_UDP:
1972 if (skb->ip_summed == CHECKSUM_PARTIAL)
1973 swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
1974
1975 swqe->tcp_offset = swqe->ip_end + 1 +
1976 offsetof(struct udphdr, check);
1977 break;
1978
1979 case IPPROTO_TCP:
1980 if (skb->ip_summed == CHECKSUM_PARTIAL)
1981 swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
1982
1983 swqe->tcp_offset = swqe->ip_end + 1 +
1984 offsetof(struct tcphdr, check);
1985 break;
1986 }
1987 }
1988
ehea_xmit2(struct sk_buff * skb,struct net_device * dev,struct ehea_swqe * swqe,u32 lkey)1989 static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
1990 struct ehea_swqe *swqe, u32 lkey)
1991 {
1992 swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT;
1993
1994 xmit_common(skb, swqe);
1995
1996 write_swqe2_data(skb, dev, swqe, lkey);
1997 }
1998
ehea_xmit3(struct sk_buff * skb,struct net_device * dev,struct ehea_swqe * swqe)1999 static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2000 struct ehea_swqe *swqe)
2001 {
2002 u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
2003
2004 xmit_common(skb, swqe);
2005
2006 if (!skb->data_len)
2007 skb_copy_from_linear_data(skb, imm_data, skb->len);
2008 else
2009 skb_copy_bits(skb, 0, imm_data, skb->len);
2010
2011 swqe->immediate_data_length = skb->len;
2012 dev_consume_skb_any(skb);
2013 }
2014
ehea_start_xmit(struct sk_buff * skb,struct net_device * dev)2015 static netdev_tx_t ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2016 {
2017 struct ehea_port *port = netdev_priv(dev);
2018 struct ehea_swqe *swqe;
2019 u32 lkey;
2020 int swqe_index;
2021 struct ehea_port_res *pr;
2022 struct netdev_queue *txq;
2023
2024 pr = &port->port_res[skb_get_queue_mapping(skb)];
2025 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2026
2027 swqe = ehea_get_swqe(pr->qp, &swqe_index);
2028 memset(swqe, 0, SWQE_HEADER_SIZE);
2029 atomic_dec(&pr->swqe_avail);
2030
2031 if (skb_vlan_tag_present(skb)) {
2032 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2033 swqe->vlan_tag = skb_vlan_tag_get(skb);
2034 }
2035
2036 pr->tx_packets++;
2037 pr->tx_bytes += skb->len;
2038
2039 if (skb->len <= SWQE3_MAX_IMM) {
2040 u32 sig_iv = port->sig_comp_iv;
2041 u32 swqe_num = pr->swqe_id_counter;
2042 ehea_xmit3(skb, dev, swqe);
2043 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
2044 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
2045 if (pr->swqe_ll_count >= (sig_iv - 1)) {
2046 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
2047 sig_iv);
2048 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2049 pr->swqe_ll_count = 0;
2050 } else
2051 pr->swqe_ll_count += 1;
2052 } else {
2053 swqe->wr_id =
2054 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
2055 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
2056 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
2057 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
2058 pr->sq_skba.arr[pr->sq_skba.index] = skb;
2059
2060 pr->sq_skba.index++;
2061 pr->sq_skba.index &= (pr->sq_skba.len - 1);
2062
2063 lkey = pr->send_mr.lkey;
2064 ehea_xmit2(skb, dev, swqe, lkey);
2065 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2066 }
2067 pr->swqe_id_counter += 1;
2068
2069 netif_info(port, tx_queued, dev,
2070 "post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
2071 if (netif_msg_tx_queued(port))
2072 ehea_dump(swqe, 512, "swqe");
2073
2074 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2075 netif_tx_stop_queue(txq);
2076 swqe->tx_control |= EHEA_SWQE_PURGE;
2077 }
2078
2079 ehea_post_swqe(pr->qp, swqe);
2080
2081 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2082 pr->p_stats.queue_stopped++;
2083 netif_tx_stop_queue(txq);
2084 }
2085
2086 return NETDEV_TX_OK;
2087 }
2088
ehea_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)2089 static int ehea_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
2090 {
2091 struct ehea_port *port = netdev_priv(dev);
2092 struct ehea_adapter *adapter = port->adapter;
2093 struct hcp_ehea_port_cb1 *cb1;
2094 int index;
2095 u64 hret;
2096 int err = 0;
2097
2098 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2099 if (!cb1) {
2100 pr_err("no mem for cb1\n");
2101 err = -ENOMEM;
2102 goto out;
2103 }
2104
2105 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2106 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2107 if (hret != H_SUCCESS) {
2108 pr_err("query_ehea_port failed\n");
2109 err = -EINVAL;
2110 goto out;
2111 }
2112
2113 index = (vid / 64);
2114 cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
2115
2116 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2117 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2118 if (hret != H_SUCCESS) {
2119 pr_err("modify_ehea_port failed\n");
2120 err = -EINVAL;
2121 }
2122 out:
2123 free_page((unsigned long)cb1);
2124 return err;
2125 }
2126
ehea_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)2127 static int ehea_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
2128 {
2129 struct ehea_port *port = netdev_priv(dev);
2130 struct ehea_adapter *adapter = port->adapter;
2131 struct hcp_ehea_port_cb1 *cb1;
2132 int index;
2133 u64 hret;
2134 int err = 0;
2135
2136 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2137 if (!cb1) {
2138 pr_err("no mem for cb1\n");
2139 err = -ENOMEM;
2140 goto out;
2141 }
2142
2143 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2144 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2145 if (hret != H_SUCCESS) {
2146 pr_err("query_ehea_port failed\n");
2147 err = -EINVAL;
2148 goto out;
2149 }
2150
2151 index = (vid / 64);
2152 cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
2153
2154 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2155 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2156 if (hret != H_SUCCESS) {
2157 pr_err("modify_ehea_port failed\n");
2158 err = -EINVAL;
2159 }
2160 out:
2161 free_page((unsigned long)cb1);
2162 return err;
2163 }
2164
ehea_activate_qp(struct ehea_adapter * adapter,struct ehea_qp * qp)2165 static int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2166 {
2167 int ret = -EIO;
2168 u64 hret;
2169 u16 dummy16 = 0;
2170 u64 dummy64 = 0;
2171 struct hcp_modify_qp_cb0 *cb0;
2172
2173 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2174 if (!cb0) {
2175 ret = -ENOMEM;
2176 goto out;
2177 }
2178
2179 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2180 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2181 if (hret != H_SUCCESS) {
2182 pr_err("query_ehea_qp failed (1)\n");
2183 goto out;
2184 }
2185
2186 cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2187 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2188 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2189 &dummy64, &dummy64, &dummy16, &dummy16);
2190 if (hret != H_SUCCESS) {
2191 pr_err("modify_ehea_qp failed (1)\n");
2192 goto out;
2193 }
2194
2195 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2196 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2197 if (hret != H_SUCCESS) {
2198 pr_err("query_ehea_qp failed (2)\n");
2199 goto out;
2200 }
2201
2202 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2203 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2204 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2205 &dummy64, &dummy64, &dummy16, &dummy16);
2206 if (hret != H_SUCCESS) {
2207 pr_err("modify_ehea_qp failed (2)\n");
2208 goto out;
2209 }
2210
2211 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2212 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2213 if (hret != H_SUCCESS) {
2214 pr_err("query_ehea_qp failed (3)\n");
2215 goto out;
2216 }
2217
2218 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2219 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2220 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2221 &dummy64, &dummy64, &dummy16, &dummy16);
2222 if (hret != H_SUCCESS) {
2223 pr_err("modify_ehea_qp failed (3)\n");
2224 goto out;
2225 }
2226
2227 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2228 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2229 if (hret != H_SUCCESS) {
2230 pr_err("query_ehea_qp failed (4)\n");
2231 goto out;
2232 }
2233
2234 ret = 0;
2235 out:
2236 free_page((unsigned long)cb0);
2237 return ret;
2238 }
2239
ehea_port_res_setup(struct ehea_port * port,int def_qps)2240 static int ehea_port_res_setup(struct ehea_port *port, int def_qps)
2241 {
2242 int ret, i;
2243 struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2244 enum ehea_eq_type eq_type = EHEA_EQ;
2245
2246 port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2247 EHEA_MAX_ENTRIES_EQ, 1);
2248 if (!port->qp_eq) {
2249 ret = -EINVAL;
2250 pr_err("ehea_create_eq failed (qp_eq)\n");
2251 goto out_kill_eq;
2252 }
2253
2254 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2255 pr_cfg.max_entries_scq = sq_entries * 2;
2256 pr_cfg.max_entries_sq = sq_entries;
2257 pr_cfg.max_entries_rq1 = rq1_entries;
2258 pr_cfg.max_entries_rq2 = rq2_entries;
2259 pr_cfg.max_entries_rq3 = rq3_entries;
2260
2261 pr_cfg_small_rx.max_entries_rcq = 1;
2262 pr_cfg_small_rx.max_entries_scq = sq_entries;
2263 pr_cfg_small_rx.max_entries_sq = sq_entries;
2264 pr_cfg_small_rx.max_entries_rq1 = 1;
2265 pr_cfg_small_rx.max_entries_rq2 = 1;
2266 pr_cfg_small_rx.max_entries_rq3 = 1;
2267
2268 for (i = 0; i < def_qps; i++) {
2269 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2270 if (ret)
2271 goto out_clean_pr;
2272 }
2273 for (i = def_qps; i < def_qps; i++) {
2274 ret = ehea_init_port_res(port, &port->port_res[i],
2275 &pr_cfg_small_rx, i);
2276 if (ret)
2277 goto out_clean_pr;
2278 }
2279
2280 return 0;
2281
2282 out_clean_pr:
2283 while (--i >= 0)
2284 ehea_clean_portres(port, &port->port_res[i]);
2285
2286 out_kill_eq:
2287 ehea_destroy_eq(port->qp_eq);
2288 return ret;
2289 }
2290
ehea_clean_all_portres(struct ehea_port * port)2291 static int ehea_clean_all_portres(struct ehea_port *port)
2292 {
2293 int ret = 0;
2294 int i;
2295
2296 for (i = 0; i < port->num_def_qps; i++)
2297 ret |= ehea_clean_portres(port, &port->port_res[i]);
2298
2299 ret |= ehea_destroy_eq(port->qp_eq);
2300
2301 return ret;
2302 }
2303
ehea_remove_adapter_mr(struct ehea_adapter * adapter)2304 static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
2305 {
2306 if (adapter->active_ports)
2307 return;
2308
2309 ehea_rem_mr(&adapter->mr);
2310 }
2311
ehea_add_adapter_mr(struct ehea_adapter * adapter)2312 static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
2313 {
2314 if (adapter->active_ports)
2315 return 0;
2316
2317 return ehea_reg_kernel_mr(adapter, &adapter->mr);
2318 }
2319
ehea_up(struct net_device * dev)2320 static int ehea_up(struct net_device *dev)
2321 {
2322 int ret, i;
2323 struct ehea_port *port = netdev_priv(dev);
2324
2325 if (port->state == EHEA_PORT_UP)
2326 return 0;
2327
2328 ret = ehea_port_res_setup(port, port->num_def_qps);
2329 if (ret) {
2330 netdev_err(dev, "port_res_failed\n");
2331 goto out;
2332 }
2333
2334 /* Set default QP for this port */
2335 ret = ehea_configure_port(port);
2336 if (ret) {
2337 netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
2338 goto out_clean_pr;
2339 }
2340
2341 ret = ehea_reg_interrupts(dev);
2342 if (ret) {
2343 netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
2344 goto out_clean_pr;
2345 }
2346
2347 for (i = 0; i < port->num_def_qps; i++) {
2348 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2349 if (ret) {
2350 netdev_err(dev, "activate_qp failed\n");
2351 goto out_free_irqs;
2352 }
2353 }
2354
2355 for (i = 0; i < port->num_def_qps; i++) {
2356 ret = ehea_fill_port_res(&port->port_res[i]);
2357 if (ret) {
2358 netdev_err(dev, "out_free_irqs\n");
2359 goto out_free_irqs;
2360 }
2361 }
2362
2363 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2364 if (ret) {
2365 ret = -EIO;
2366 goto out_free_irqs;
2367 }
2368
2369 port->state = EHEA_PORT_UP;
2370
2371 ret = 0;
2372 goto out;
2373
2374 out_free_irqs:
2375 ehea_free_interrupts(dev);
2376
2377 out_clean_pr:
2378 ehea_clean_all_portres(port);
2379 out:
2380 if (ret)
2381 netdev_info(dev, "Failed starting. ret=%i\n", ret);
2382
2383 ehea_update_bcmc_registrations();
2384 ehea_update_firmware_handles();
2385
2386 return ret;
2387 }
2388
port_napi_disable(struct ehea_port * port)2389 static void port_napi_disable(struct ehea_port *port)
2390 {
2391 int i;
2392
2393 for (i = 0; i < port->num_def_qps; i++)
2394 napi_disable(&port->port_res[i].napi);
2395 }
2396
port_napi_enable(struct ehea_port * port)2397 static void port_napi_enable(struct ehea_port *port)
2398 {
2399 int i;
2400
2401 for (i = 0; i < port->num_def_qps; i++)
2402 napi_enable(&port->port_res[i].napi);
2403 }
2404
ehea_open(struct net_device * dev)2405 static int ehea_open(struct net_device *dev)
2406 {
2407 int ret;
2408 struct ehea_port *port = netdev_priv(dev);
2409
2410 mutex_lock(&port->port_lock);
2411
2412 netif_info(port, ifup, dev, "enabling port\n");
2413
2414 netif_carrier_off(dev);
2415
2416 ret = ehea_up(dev);
2417 if (!ret) {
2418 port_napi_enable(port);
2419 netif_tx_start_all_queues(dev);
2420 }
2421
2422 mutex_unlock(&port->port_lock);
2423 schedule_delayed_work(&port->stats_work,
2424 round_jiffies_relative(msecs_to_jiffies(1000)));
2425
2426 return ret;
2427 }
2428
ehea_down(struct net_device * dev)2429 static int ehea_down(struct net_device *dev)
2430 {
2431 int ret;
2432 struct ehea_port *port = netdev_priv(dev);
2433
2434 if (port->state == EHEA_PORT_DOWN)
2435 return 0;
2436
2437 ehea_drop_multicast_list(dev);
2438 ehea_allmulti(dev, 0);
2439 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2440
2441 ehea_free_interrupts(dev);
2442
2443 port->state = EHEA_PORT_DOWN;
2444
2445 ehea_update_bcmc_registrations();
2446
2447 ret = ehea_clean_all_portres(port);
2448 if (ret)
2449 netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
2450
2451 ehea_update_firmware_handles();
2452
2453 return ret;
2454 }
2455
ehea_stop(struct net_device * dev)2456 static int ehea_stop(struct net_device *dev)
2457 {
2458 int ret;
2459 struct ehea_port *port = netdev_priv(dev);
2460
2461 netif_info(port, ifdown, dev, "disabling port\n");
2462
2463 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2464 cancel_work_sync(&port->reset_task);
2465 cancel_delayed_work_sync(&port->stats_work);
2466 mutex_lock(&port->port_lock);
2467 netif_tx_stop_all_queues(dev);
2468 port_napi_disable(port);
2469 ret = ehea_down(dev);
2470 mutex_unlock(&port->port_lock);
2471 clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2472 return ret;
2473 }
2474
ehea_purge_sq(struct ehea_qp * orig_qp)2475 static void ehea_purge_sq(struct ehea_qp *orig_qp)
2476 {
2477 struct ehea_qp qp = *orig_qp;
2478 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2479 struct ehea_swqe *swqe;
2480 int wqe_index;
2481 int i;
2482
2483 for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2484 swqe = ehea_get_swqe(&qp, &wqe_index);
2485 swqe->tx_control |= EHEA_SWQE_PURGE;
2486 }
2487 }
2488
ehea_flush_sq(struct ehea_port * port)2489 static void ehea_flush_sq(struct ehea_port *port)
2490 {
2491 int i;
2492
2493 for (i = 0; i < port->num_def_qps; i++) {
2494 struct ehea_port_res *pr = &port->port_res[i];
2495 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
2496 int ret;
2497
2498 ret = wait_event_timeout(port->swqe_avail_wq,
2499 atomic_read(&pr->swqe_avail) >= swqe_max,
2500 msecs_to_jiffies(100));
2501
2502 if (!ret) {
2503 pr_err("WARNING: sq not flushed completely\n");
2504 break;
2505 }
2506 }
2507 }
2508
ehea_stop_qps(struct net_device * dev)2509 static int ehea_stop_qps(struct net_device *dev)
2510 {
2511 struct ehea_port *port = netdev_priv(dev);
2512 struct ehea_adapter *adapter = port->adapter;
2513 struct hcp_modify_qp_cb0 *cb0;
2514 int ret = -EIO;
2515 int dret;
2516 int i;
2517 u64 hret;
2518 u64 dummy64 = 0;
2519 u16 dummy16 = 0;
2520
2521 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2522 if (!cb0) {
2523 ret = -ENOMEM;
2524 goto out;
2525 }
2526
2527 for (i = 0; i < (port->num_def_qps); i++) {
2528 struct ehea_port_res *pr = &port->port_res[i];
2529 struct ehea_qp *qp = pr->qp;
2530
2531 /* Purge send queue */
2532 ehea_purge_sq(qp);
2533
2534 /* Disable queue pair */
2535 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2536 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2537 cb0);
2538 if (hret != H_SUCCESS) {
2539 pr_err("query_ehea_qp failed (1)\n");
2540 goto out;
2541 }
2542
2543 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2544 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2545
2546 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2547 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2548 1), cb0, &dummy64,
2549 &dummy64, &dummy16, &dummy16);
2550 if (hret != H_SUCCESS) {
2551 pr_err("modify_ehea_qp failed (1)\n");
2552 goto out;
2553 }
2554
2555 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2556 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2557 cb0);
2558 if (hret != H_SUCCESS) {
2559 pr_err("query_ehea_qp failed (2)\n");
2560 goto out;
2561 }
2562
2563 /* deregister shared memory regions */
2564 dret = ehea_rem_smrs(pr);
2565 if (dret) {
2566 pr_err("unreg shared memory region failed\n");
2567 goto out;
2568 }
2569 }
2570
2571 ret = 0;
2572 out:
2573 free_page((unsigned long)cb0);
2574
2575 return ret;
2576 }
2577
ehea_update_rqs(struct ehea_qp * orig_qp,struct ehea_port_res * pr)2578 static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2579 {
2580 struct ehea_qp qp = *orig_qp;
2581 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2582 struct ehea_rwqe *rwqe;
2583 struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2584 struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2585 struct sk_buff *skb;
2586 u32 lkey = pr->recv_mr.lkey;
2587
2588
2589 int i;
2590 int index;
2591
2592 for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2593 rwqe = ehea_get_next_rwqe(&qp, 2);
2594 rwqe->sg_list[0].l_key = lkey;
2595 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2596 skb = skba_rq2[index];
2597 if (skb)
2598 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2599 }
2600
2601 for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2602 rwqe = ehea_get_next_rwqe(&qp, 3);
2603 rwqe->sg_list[0].l_key = lkey;
2604 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2605 skb = skba_rq3[index];
2606 if (skb)
2607 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2608 }
2609 }
2610
ehea_restart_qps(struct net_device * dev)2611 static int ehea_restart_qps(struct net_device *dev)
2612 {
2613 struct ehea_port *port = netdev_priv(dev);
2614 struct ehea_adapter *adapter = port->adapter;
2615 int ret = 0;
2616 int i;
2617
2618 struct hcp_modify_qp_cb0 *cb0;
2619 u64 hret;
2620 u64 dummy64 = 0;
2621 u16 dummy16 = 0;
2622
2623 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2624 if (!cb0)
2625 return -ENOMEM;
2626
2627 for (i = 0; i < (port->num_def_qps); i++) {
2628 struct ehea_port_res *pr = &port->port_res[i];
2629 struct ehea_qp *qp = pr->qp;
2630
2631 ret = ehea_gen_smrs(pr);
2632 if (ret) {
2633 netdev_err(dev, "creation of shared memory regions failed\n");
2634 goto out;
2635 }
2636
2637 ehea_update_rqs(qp, pr);
2638
2639 /* Enable queue pair */
2640 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2641 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2642 cb0);
2643 if (hret != H_SUCCESS) {
2644 netdev_err(dev, "query_ehea_qp failed (1)\n");
2645 ret = -EFAULT;
2646 goto out;
2647 }
2648
2649 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2650 cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2651
2652 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2653 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2654 1), cb0, &dummy64,
2655 &dummy64, &dummy16, &dummy16);
2656 if (hret != H_SUCCESS) {
2657 netdev_err(dev, "modify_ehea_qp failed (1)\n");
2658 ret = -EFAULT;
2659 goto out;
2660 }
2661
2662 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2663 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2664 cb0);
2665 if (hret != H_SUCCESS) {
2666 netdev_err(dev, "query_ehea_qp failed (2)\n");
2667 ret = -EFAULT;
2668 goto out;
2669 }
2670
2671 /* refill entire queue */
2672 ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2673 ehea_refill_rq2(pr, 0);
2674 ehea_refill_rq3(pr, 0);
2675 }
2676 out:
2677 free_page((unsigned long)cb0);
2678
2679 return ret;
2680 }
2681
ehea_reset_port(struct work_struct * work)2682 static void ehea_reset_port(struct work_struct *work)
2683 {
2684 int ret;
2685 struct ehea_port *port =
2686 container_of(work, struct ehea_port, reset_task);
2687 struct net_device *dev = port->netdev;
2688
2689 mutex_lock(&dlpar_mem_lock);
2690 port->resets++;
2691 mutex_lock(&port->port_lock);
2692 netif_tx_disable(dev);
2693
2694 port_napi_disable(port);
2695
2696 ehea_down(dev);
2697
2698 ret = ehea_up(dev);
2699 if (ret)
2700 goto out;
2701
2702 ehea_set_multicast_list(dev);
2703
2704 netif_info(port, timer, dev, "reset successful\n");
2705
2706 port_napi_enable(port);
2707
2708 netif_tx_wake_all_queues(dev);
2709 out:
2710 mutex_unlock(&port->port_lock);
2711 mutex_unlock(&dlpar_mem_lock);
2712 }
2713
ehea_rereg_mrs(void)2714 static void ehea_rereg_mrs(void)
2715 {
2716 int ret, i;
2717 struct ehea_adapter *adapter;
2718
2719 pr_info("LPAR memory changed - re-initializing driver\n");
2720
2721 list_for_each_entry(adapter, &adapter_list, list)
2722 if (adapter->active_ports) {
2723 /* Shutdown all ports */
2724 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2725 struct ehea_port *port = adapter->port[i];
2726 struct net_device *dev;
2727
2728 if (!port)
2729 continue;
2730
2731 dev = port->netdev;
2732
2733 if (dev->flags & IFF_UP) {
2734 mutex_lock(&port->port_lock);
2735 netif_tx_disable(dev);
2736 ehea_flush_sq(port);
2737 ret = ehea_stop_qps(dev);
2738 if (ret) {
2739 mutex_unlock(&port->port_lock);
2740 goto out;
2741 }
2742 port_napi_disable(port);
2743 mutex_unlock(&port->port_lock);
2744 }
2745 reset_sq_restart_flag(port);
2746 }
2747
2748 /* Unregister old memory region */
2749 ret = ehea_rem_mr(&adapter->mr);
2750 if (ret) {
2751 pr_err("unregister MR failed - driver inoperable!\n");
2752 goto out;
2753 }
2754 }
2755
2756 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2757
2758 list_for_each_entry(adapter, &adapter_list, list)
2759 if (adapter->active_ports) {
2760 /* Register new memory region */
2761 ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
2762 if (ret) {
2763 pr_err("register MR failed - driver inoperable!\n");
2764 goto out;
2765 }
2766
2767 /* Restart all ports */
2768 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2769 struct ehea_port *port = adapter->port[i];
2770
2771 if (port) {
2772 struct net_device *dev = port->netdev;
2773
2774 if (dev->flags & IFF_UP) {
2775 mutex_lock(&port->port_lock);
2776 ret = ehea_restart_qps(dev);
2777 if (!ret) {
2778 check_sqs(port);
2779 port_napi_enable(port);
2780 netif_tx_wake_all_queues(dev);
2781 } else {
2782 netdev_err(dev, "Unable to restart QPS\n");
2783 }
2784 mutex_unlock(&port->port_lock);
2785 }
2786 }
2787 }
2788 }
2789 pr_info("re-initializing driver complete\n");
2790 out:
2791 return;
2792 }
2793
ehea_tx_watchdog(struct net_device * dev,unsigned int txqueue)2794 static void ehea_tx_watchdog(struct net_device *dev, unsigned int txqueue)
2795 {
2796 struct ehea_port *port = netdev_priv(dev);
2797
2798 if (netif_carrier_ok(dev) &&
2799 !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
2800 ehea_schedule_port_reset(port);
2801 }
2802
ehea_sense_adapter_attr(struct ehea_adapter * adapter)2803 static int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2804 {
2805 struct hcp_query_ehea *cb;
2806 u64 hret;
2807 int ret;
2808
2809 cb = (void *)get_zeroed_page(GFP_KERNEL);
2810 if (!cb) {
2811 ret = -ENOMEM;
2812 goto out;
2813 }
2814
2815 hret = ehea_h_query_ehea(adapter->handle, cb);
2816
2817 if (hret != H_SUCCESS) {
2818 ret = -EIO;
2819 goto out_herr;
2820 }
2821
2822 adapter->max_mc_mac = cb->max_mc_mac - 1;
2823 ret = 0;
2824
2825 out_herr:
2826 free_page((unsigned long)cb);
2827 out:
2828 return ret;
2829 }
2830
ehea_get_jumboframe_status(struct ehea_port * port,int * jumbo)2831 static int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2832 {
2833 struct hcp_ehea_port_cb4 *cb4;
2834 u64 hret;
2835 int ret = 0;
2836
2837 *jumbo = 0;
2838
2839 /* (Try to) enable *jumbo frames */
2840 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
2841 if (!cb4) {
2842 pr_err("no mem for cb4\n");
2843 ret = -ENOMEM;
2844 goto out;
2845 } else {
2846 hret = ehea_h_query_ehea_port(port->adapter->handle,
2847 port->logical_port_id,
2848 H_PORT_CB4,
2849 H_PORT_CB4_JUMBO, cb4);
2850 if (hret == H_SUCCESS) {
2851 if (cb4->jumbo_frame)
2852 *jumbo = 1;
2853 else {
2854 cb4->jumbo_frame = 1;
2855 hret = ehea_h_modify_ehea_port(port->adapter->
2856 handle,
2857 port->
2858 logical_port_id,
2859 H_PORT_CB4,
2860 H_PORT_CB4_JUMBO,
2861 cb4);
2862 if (hret == H_SUCCESS)
2863 *jumbo = 1;
2864 }
2865 } else
2866 ret = -EINVAL;
2867
2868 free_page((unsigned long)cb4);
2869 }
2870 out:
2871 return ret;
2872 }
2873
log_port_id_show(struct device * dev,struct device_attribute * attr,char * buf)2874 static ssize_t log_port_id_show(struct device *dev,
2875 struct device_attribute *attr, char *buf)
2876 {
2877 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2878 return sprintf(buf, "%d", port->logical_port_id);
2879 }
2880
2881 static DEVICE_ATTR_RO(log_port_id);
2882
logical_port_release(struct device * dev)2883 static void logical_port_release(struct device *dev)
2884 {
2885 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2886 of_node_put(port->ofdev.dev.of_node);
2887 }
2888
ehea_register_port(struct ehea_port * port,struct device_node * dn)2889 static struct device *ehea_register_port(struct ehea_port *port,
2890 struct device_node *dn)
2891 {
2892 int ret;
2893
2894 port->ofdev.dev.of_node = of_node_get(dn);
2895 port->ofdev.dev.parent = &port->adapter->ofdev->dev;
2896 port->ofdev.dev.bus = &ibmebus_bus_type;
2897
2898 dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
2899 port->ofdev.dev.release = logical_port_release;
2900
2901 ret = of_device_register(&port->ofdev);
2902 if (ret) {
2903 pr_err("failed to register device. ret=%d\n", ret);
2904 put_device(&port->ofdev.dev);
2905 goto out;
2906 }
2907
2908 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
2909 if (ret) {
2910 pr_err("failed to register attributes, ret=%d\n", ret);
2911 goto out_unreg_of_dev;
2912 }
2913
2914 return &port->ofdev.dev;
2915
2916 out_unreg_of_dev:
2917 of_device_unregister(&port->ofdev);
2918 out:
2919 return NULL;
2920 }
2921
ehea_unregister_port(struct ehea_port * port)2922 static void ehea_unregister_port(struct ehea_port *port)
2923 {
2924 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
2925 of_device_unregister(&port->ofdev);
2926 }
2927
2928 static const struct net_device_ops ehea_netdev_ops = {
2929 .ndo_open = ehea_open,
2930 .ndo_stop = ehea_stop,
2931 .ndo_start_xmit = ehea_start_xmit,
2932 .ndo_get_stats64 = ehea_get_stats64,
2933 .ndo_set_mac_address = ehea_set_mac_addr,
2934 .ndo_validate_addr = eth_validate_addr,
2935 .ndo_set_rx_mode = ehea_set_multicast_list,
2936 .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
2937 .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
2938 .ndo_tx_timeout = ehea_tx_watchdog,
2939 };
2940
ehea_setup_single_port(struct ehea_adapter * adapter,u32 logical_port_id,struct device_node * dn)2941 static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
2942 u32 logical_port_id,
2943 struct device_node *dn)
2944 {
2945 int ret;
2946 struct net_device *dev;
2947 struct ehea_port *port;
2948 struct device *port_dev;
2949 int jumbo;
2950
2951 /* allocate memory for the port structures */
2952 dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
2953
2954 if (!dev) {
2955 ret = -ENOMEM;
2956 goto out_err;
2957 }
2958
2959 port = netdev_priv(dev);
2960
2961 mutex_init(&port->port_lock);
2962 port->state = EHEA_PORT_DOWN;
2963 port->sig_comp_iv = sq_entries / 10;
2964
2965 port->adapter = adapter;
2966 port->netdev = dev;
2967 port->logical_port_id = logical_port_id;
2968
2969 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
2970
2971 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
2972 if (!port->mc_list) {
2973 ret = -ENOMEM;
2974 goto out_free_ethdev;
2975 }
2976
2977 INIT_LIST_HEAD(&port->mc_list->list);
2978
2979 ret = ehea_sense_port_attr(port);
2980 if (ret)
2981 goto out_free_mc_list;
2982
2983 netif_set_real_num_rx_queues(dev, port->num_def_qps);
2984 netif_set_real_num_tx_queues(dev, port->num_def_qps);
2985
2986 port_dev = ehea_register_port(port, dn);
2987 if (!port_dev)
2988 goto out_free_mc_list;
2989
2990 SET_NETDEV_DEV(dev, port_dev);
2991
2992 /* initialize net_device structure */
2993 eth_hw_addr_set(dev, (u8 *)&port->mac_addr);
2994
2995 dev->netdev_ops = &ehea_netdev_ops;
2996 ehea_set_ethtool_ops(dev);
2997
2998 dev->hw_features = NETIF_F_SG | NETIF_F_TSO |
2999 NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX;
3000 dev->features = NETIF_F_SG | NETIF_F_TSO |
3001 NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
3002 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
3003 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM;
3004 dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
3005 NETIF_F_IP_CSUM;
3006 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3007
3008 /* MTU range: 68 - 9022 */
3009 dev->min_mtu = ETH_MIN_MTU;
3010 dev->max_mtu = EHEA_MAX_PACKET_SIZE;
3011
3012 INIT_WORK(&port->reset_task, ehea_reset_port);
3013 INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);
3014
3015 init_waitqueue_head(&port->swqe_avail_wq);
3016 init_waitqueue_head(&port->restart_wq);
3017
3018 ret = register_netdev(dev);
3019 if (ret) {
3020 pr_err("register_netdev failed. ret=%d\n", ret);
3021 goto out_unreg_port;
3022 }
3023
3024 ret = ehea_get_jumboframe_status(port, &jumbo);
3025 if (ret)
3026 netdev_err(dev, "failed determining jumbo frame status\n");
3027
3028 netdev_info(dev, "Jumbo frames are %sabled\n",
3029 jumbo == 1 ? "en" : "dis");
3030
3031 adapter->active_ports++;
3032
3033 return port;
3034
3035 out_unreg_port:
3036 ehea_unregister_port(port);
3037
3038 out_free_mc_list:
3039 kfree(port->mc_list);
3040
3041 out_free_ethdev:
3042 free_netdev(dev);
3043
3044 out_err:
3045 pr_err("setting up logical port with id=%d failed, ret=%d\n",
3046 logical_port_id, ret);
3047 return NULL;
3048 }
3049
ehea_shutdown_single_port(struct ehea_port * port)3050 static void ehea_shutdown_single_port(struct ehea_port *port)
3051 {
3052 struct ehea_adapter *adapter = port->adapter;
3053
3054 cancel_work_sync(&port->reset_task);
3055 cancel_delayed_work_sync(&port->stats_work);
3056 unregister_netdev(port->netdev);
3057 ehea_unregister_port(port);
3058 kfree(port->mc_list);
3059 free_netdev(port->netdev);
3060 adapter->active_ports--;
3061 }
3062
ehea_setup_ports(struct ehea_adapter * adapter)3063 static int ehea_setup_ports(struct ehea_adapter *adapter)
3064 {
3065 struct device_node *lhea_dn;
3066 struct device_node *eth_dn;
3067
3068 const u32 *dn_log_port_id;
3069 int i = 0;
3070
3071 lhea_dn = adapter->ofdev->dev.of_node;
3072 for_each_child_of_node(lhea_dn, eth_dn) {
3073 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3074 NULL);
3075 if (!dn_log_port_id) {
3076 pr_err("bad device node: eth_dn name=%pOF\n", eth_dn);
3077 continue;
3078 }
3079
3080 if (ehea_add_adapter_mr(adapter)) {
3081 pr_err("creating MR failed\n");
3082 of_node_put(eth_dn);
3083 return -EIO;
3084 }
3085
3086 adapter->port[i] = ehea_setup_single_port(adapter,
3087 *dn_log_port_id,
3088 eth_dn);
3089 if (adapter->port[i])
3090 netdev_info(adapter->port[i]->netdev,
3091 "logical port id #%d\n", *dn_log_port_id);
3092 else
3093 ehea_remove_adapter_mr(adapter);
3094
3095 i++;
3096 }
3097 return 0;
3098 }
3099
ehea_get_eth_dn(struct ehea_adapter * adapter,u32 logical_port_id)3100 static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3101 u32 logical_port_id)
3102 {
3103 struct device_node *lhea_dn;
3104 struct device_node *eth_dn;
3105 const u32 *dn_log_port_id;
3106
3107 lhea_dn = adapter->ofdev->dev.of_node;
3108 for_each_child_of_node(lhea_dn, eth_dn) {
3109 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3110 NULL);
3111 if (dn_log_port_id)
3112 if (*dn_log_port_id == logical_port_id)
3113 return eth_dn;
3114 }
3115
3116 return NULL;
3117 }
3118
probe_port_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3119 static ssize_t probe_port_store(struct device *dev,
3120 struct device_attribute *attr,
3121 const char *buf, size_t count)
3122 {
3123 struct ehea_adapter *adapter = dev_get_drvdata(dev);
3124 struct ehea_port *port;
3125 struct device_node *eth_dn = NULL;
3126 int i;
3127
3128 u32 logical_port_id;
3129
3130 sscanf(buf, "%d", &logical_port_id);
3131
3132 port = ehea_get_port(adapter, logical_port_id);
3133
3134 if (port) {
3135 netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
3136 logical_port_id);
3137 return -EINVAL;
3138 }
3139
3140 eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3141
3142 if (!eth_dn) {
3143 pr_info("no logical port with id %d found\n", logical_port_id);
3144 return -EINVAL;
3145 }
3146
3147 if (ehea_add_adapter_mr(adapter)) {
3148 pr_err("creating MR failed\n");
3149 of_node_put(eth_dn);
3150 return -EIO;
3151 }
3152
3153 port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
3154
3155 of_node_put(eth_dn);
3156
3157 if (port) {
3158 for (i = 0; i < EHEA_MAX_PORTS; i++)
3159 if (!adapter->port[i]) {
3160 adapter->port[i] = port;
3161 break;
3162 }
3163
3164 netdev_info(port->netdev, "added: (logical port id=%d)\n",
3165 logical_port_id);
3166 } else {
3167 ehea_remove_adapter_mr(adapter);
3168 return -EIO;
3169 }
3170
3171 return (ssize_t) count;
3172 }
3173
remove_port_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3174 static ssize_t remove_port_store(struct device *dev,
3175 struct device_attribute *attr,
3176 const char *buf, size_t count)
3177 {
3178 struct ehea_adapter *adapter = dev_get_drvdata(dev);
3179 struct ehea_port *port;
3180 int i;
3181 u32 logical_port_id;
3182
3183 sscanf(buf, "%d", &logical_port_id);
3184
3185 port = ehea_get_port(adapter, logical_port_id);
3186
3187 if (port) {
3188 netdev_info(port->netdev, "removed: (logical port id=%d)\n",
3189 logical_port_id);
3190
3191 ehea_shutdown_single_port(port);
3192
3193 for (i = 0; i < EHEA_MAX_PORTS; i++)
3194 if (adapter->port[i] == port) {
3195 adapter->port[i] = NULL;
3196 break;
3197 }
3198 } else {
3199 pr_err("removing port with logical port id=%d failed. port not configured.\n",
3200 logical_port_id);
3201 return -EINVAL;
3202 }
3203
3204 ehea_remove_adapter_mr(adapter);
3205
3206 return (ssize_t) count;
3207 }
3208
3209 static DEVICE_ATTR_WO(probe_port);
3210 static DEVICE_ATTR_WO(remove_port);
3211
ehea_create_device_sysfs(struct platform_device * dev)3212 static int ehea_create_device_sysfs(struct platform_device *dev)
3213 {
3214 int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
3215 if (ret)
3216 goto out;
3217
3218 ret = device_create_file(&dev->dev, &dev_attr_remove_port);
3219 out:
3220 return ret;
3221 }
3222
ehea_remove_device_sysfs(struct platform_device * dev)3223 static void ehea_remove_device_sysfs(struct platform_device *dev)
3224 {
3225 device_remove_file(&dev->dev, &dev_attr_probe_port);
3226 device_remove_file(&dev->dev, &dev_attr_remove_port);
3227 }
3228
ehea_reboot_notifier(struct notifier_block * nb,unsigned long action,void * unused)3229 static int ehea_reboot_notifier(struct notifier_block *nb,
3230 unsigned long action, void *unused)
3231 {
3232 if (action == SYS_RESTART) {
3233 pr_info("Reboot: freeing all eHEA resources\n");
3234 ibmebus_unregister_driver(&ehea_driver);
3235 }
3236 return NOTIFY_DONE;
3237 }
3238
3239 static struct notifier_block ehea_reboot_nb = {
3240 .notifier_call = ehea_reboot_notifier,
3241 };
3242
ehea_mem_notifier(struct notifier_block * nb,unsigned long action,void * data)3243 static int ehea_mem_notifier(struct notifier_block *nb,
3244 unsigned long action, void *data)
3245 {
3246 int ret = NOTIFY_BAD;
3247 struct memory_notify *arg = data;
3248
3249 mutex_lock(&dlpar_mem_lock);
3250
3251 switch (action) {
3252 case MEM_CANCEL_OFFLINE:
3253 pr_info("memory offlining canceled");
3254 fallthrough; /* re-add canceled memory block */
3255
3256 case MEM_ONLINE:
3257 pr_info("memory is going online");
3258 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3259 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3260 goto out_unlock;
3261 ehea_rereg_mrs();
3262 break;
3263
3264 case MEM_GOING_OFFLINE:
3265 pr_info("memory is going offline");
3266 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3267 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3268 goto out_unlock;
3269 ehea_rereg_mrs();
3270 break;
3271
3272 default:
3273 break;
3274 }
3275
3276 ehea_update_firmware_handles();
3277 ret = NOTIFY_OK;
3278
3279 out_unlock:
3280 mutex_unlock(&dlpar_mem_lock);
3281 return ret;
3282 }
3283
3284 static struct notifier_block ehea_mem_nb = {
3285 .notifier_call = ehea_mem_notifier,
3286 };
3287
ehea_crash_handler(void)3288 static void ehea_crash_handler(void)
3289 {
3290 int i;
3291
3292 if (ehea_fw_handles.arr)
3293 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3294 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3295 ehea_fw_handles.arr[i].fwh,
3296 FORCE_FREE);
3297
3298 if (ehea_bcmc_regs.arr)
3299 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3300 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3301 ehea_bcmc_regs.arr[i].port_id,
3302 ehea_bcmc_regs.arr[i].reg_type,
3303 ehea_bcmc_regs.arr[i].macaddr,
3304 0, H_DEREG_BCMC);
3305 }
3306
3307 static atomic_t ehea_memory_hooks_registered;
3308
3309 /* Register memory hooks on probe of first adapter */
ehea_register_memory_hooks(void)3310 static int ehea_register_memory_hooks(void)
3311 {
3312 int ret = 0;
3313
3314 if (atomic_inc_return(&ehea_memory_hooks_registered) > 1)
3315 return 0;
3316
3317 ret = ehea_create_busmap();
3318 if (ret) {
3319 pr_info("ehea_create_busmap failed\n");
3320 goto out;
3321 }
3322
3323 ret = register_reboot_notifier(&ehea_reboot_nb);
3324 if (ret) {
3325 pr_info("register_reboot_notifier failed\n");
3326 goto out;
3327 }
3328
3329 ret = register_memory_notifier(&ehea_mem_nb);
3330 if (ret) {
3331 pr_info("register_memory_notifier failed\n");
3332 goto out2;
3333 }
3334
3335 ret = crash_shutdown_register(ehea_crash_handler);
3336 if (ret) {
3337 pr_info("crash_shutdown_register failed\n");
3338 goto out3;
3339 }
3340
3341 return 0;
3342
3343 out3:
3344 unregister_memory_notifier(&ehea_mem_nb);
3345 out2:
3346 unregister_reboot_notifier(&ehea_reboot_nb);
3347 out:
3348 atomic_dec(&ehea_memory_hooks_registered);
3349 return ret;
3350 }
3351
ehea_unregister_memory_hooks(void)3352 static void ehea_unregister_memory_hooks(void)
3353 {
3354 /* Only remove the hooks if we've registered them */
3355 if (atomic_read(&ehea_memory_hooks_registered) == 0)
3356 return;
3357
3358 unregister_reboot_notifier(&ehea_reboot_nb);
3359 if (crash_shutdown_unregister(ehea_crash_handler))
3360 pr_info("failed unregistering crash handler\n");
3361 unregister_memory_notifier(&ehea_mem_nb);
3362 }
3363
ehea_probe_adapter(struct platform_device * dev)3364 static int ehea_probe_adapter(struct platform_device *dev)
3365 {
3366 struct ehea_adapter *adapter;
3367 const u64 *adapter_handle;
3368 int ret;
3369 int i;
3370
3371 ret = ehea_register_memory_hooks();
3372 if (ret)
3373 return ret;
3374
3375 if (!dev || !dev->dev.of_node) {
3376 pr_err("Invalid ibmebus device probed\n");
3377 return -EINVAL;
3378 }
3379
3380 adapter = devm_kzalloc(&dev->dev, sizeof(*adapter), GFP_KERNEL);
3381 if (!adapter) {
3382 ret = -ENOMEM;
3383 dev_err(&dev->dev, "no mem for ehea_adapter\n");
3384 goto out;
3385 }
3386
3387 list_add(&adapter->list, &adapter_list);
3388
3389 adapter->ofdev = dev;
3390
3391 adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
3392 NULL);
3393 if (adapter_handle)
3394 adapter->handle = *adapter_handle;
3395
3396 if (!adapter->handle) {
3397 dev_err(&dev->dev, "failed getting handle for adapter"
3398 " '%pOF'\n", dev->dev.of_node);
3399 ret = -ENODEV;
3400 goto out_free_ad;
3401 }
3402
3403 adapter->pd = EHEA_PD_ID;
3404
3405 platform_set_drvdata(dev, adapter);
3406
3407
3408 /* initialize adapter and ports */
3409 /* get adapter properties */
3410 ret = ehea_sense_adapter_attr(adapter);
3411 if (ret) {
3412 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
3413 goto out_free_ad;
3414 }
3415
3416 adapter->neq = ehea_create_eq(adapter,
3417 EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3418 if (!adapter->neq) {
3419 ret = -EIO;
3420 dev_err(&dev->dev, "NEQ creation failed\n");
3421 goto out_free_ad;
3422 }
3423
3424 tasklet_setup(&adapter->neq_tasklet, ehea_neq_tasklet);
3425
3426 ret = ehea_create_device_sysfs(dev);
3427 if (ret)
3428 goto out_kill_eq;
3429
3430 ret = ehea_setup_ports(adapter);
3431 if (ret) {
3432 dev_err(&dev->dev, "setup_ports failed\n");
3433 goto out_rem_dev_sysfs;
3434 }
3435
3436 ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3437 ehea_interrupt_neq, 0,
3438 "ehea_neq", adapter);
3439 if (ret) {
3440 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3441 goto out_shutdown_ports;
3442 }
3443
3444 /* Handle any events that might be pending. */
3445 tasklet_hi_schedule(&adapter->neq_tasklet);
3446
3447 ret = 0;
3448 goto out;
3449
3450 out_shutdown_ports:
3451 for (i = 0; i < EHEA_MAX_PORTS; i++)
3452 if (adapter->port[i]) {
3453 ehea_shutdown_single_port(adapter->port[i]);
3454 adapter->port[i] = NULL;
3455 }
3456
3457 out_rem_dev_sysfs:
3458 ehea_remove_device_sysfs(dev);
3459
3460 out_kill_eq:
3461 ehea_destroy_eq(adapter->neq);
3462
3463 out_free_ad:
3464 list_del(&adapter->list);
3465
3466 out:
3467 ehea_update_firmware_handles();
3468
3469 return ret;
3470 }
3471
ehea_remove(struct platform_device * dev)3472 static void ehea_remove(struct platform_device *dev)
3473 {
3474 struct ehea_adapter *adapter = platform_get_drvdata(dev);
3475 int i;
3476
3477 for (i = 0; i < EHEA_MAX_PORTS; i++)
3478 if (adapter->port[i]) {
3479 ehea_shutdown_single_port(adapter->port[i]);
3480 adapter->port[i] = NULL;
3481 }
3482
3483 ehea_remove_device_sysfs(dev);
3484
3485 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3486 tasklet_kill(&adapter->neq_tasklet);
3487
3488 ehea_destroy_eq(adapter->neq);
3489 ehea_remove_adapter_mr(adapter);
3490 list_del(&adapter->list);
3491
3492 ehea_update_firmware_handles();
3493 }
3494
check_module_parm(void)3495 static int check_module_parm(void)
3496 {
3497 int ret = 0;
3498
3499 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3500 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3501 pr_info("Bad parameter: rq1_entries\n");
3502 ret = -EINVAL;
3503 }
3504 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3505 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3506 pr_info("Bad parameter: rq2_entries\n");
3507 ret = -EINVAL;
3508 }
3509 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3510 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3511 pr_info("Bad parameter: rq3_entries\n");
3512 ret = -EINVAL;
3513 }
3514 if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3515 (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3516 pr_info("Bad parameter: sq_entries\n");
3517 ret = -EINVAL;
3518 }
3519
3520 return ret;
3521 }
3522
capabilities_show(struct device_driver * drv,char * buf)3523 static ssize_t capabilities_show(struct device_driver *drv, char *buf)
3524 {
3525 return sprintf(buf, "%d", EHEA_CAPABILITIES);
3526 }
3527
3528 static DRIVER_ATTR_RO(capabilities);
3529
ehea_module_init(void)3530 static int __init ehea_module_init(void)
3531 {
3532 int ret;
3533
3534 pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
3535
3536 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3537 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3538
3539 mutex_init(&ehea_fw_handles.lock);
3540 spin_lock_init(&ehea_bcmc_regs.lock);
3541
3542 ret = check_module_parm();
3543 if (ret)
3544 goto out;
3545
3546 ret = ibmebus_register_driver(&ehea_driver);
3547 if (ret) {
3548 pr_err("failed registering eHEA device driver on ebus\n");
3549 goto out;
3550 }
3551
3552 ret = driver_create_file(&ehea_driver.driver,
3553 &driver_attr_capabilities);
3554 if (ret) {
3555 pr_err("failed to register capabilities attribute, ret=%d\n",
3556 ret);
3557 goto out2;
3558 }
3559
3560 return ret;
3561
3562 out2:
3563 ibmebus_unregister_driver(&ehea_driver);
3564 out:
3565 return ret;
3566 }
3567
ehea_module_exit(void)3568 static void __exit ehea_module_exit(void)
3569 {
3570 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3571 ibmebus_unregister_driver(&ehea_driver);
3572 ehea_unregister_memory_hooks();
3573 kfree(ehea_fw_handles.arr);
3574 kfree(ehea_bcmc_regs.arr);
3575 ehea_destroy_busmap();
3576 }
3577
3578 module_init(ehea_module_init);
3579 module_exit(ehea_module_exit);
3580