]> git.openfabrics.org - ~shefty/rdma-dev.git/blob - drivers/net/ethernet/ibm/ehea/ehea_main.c
5d5fb2627184f9b7f303d64c22875182c63d88e6
[~shefty/rdma-dev.git] / drivers / net / ethernet / ibm / ehea / ehea_main.c
1 /*
2  *  linux/drivers/net/ehea/ehea_main.c
3  *
4  *  eHEA ethernet device driver for IBM eServer System p
5  *
6  *  (C) Copyright IBM Corp. 2006
7  *
8  *  Authors:
9  *       Christoph Raisch <raisch@de.ibm.com>
10  *       Jan-Bernd Themann <themann@de.ibm.com>
11  *       Thomas Klein <tklein@de.ibm.com>
12  *
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2, or (at your option)
17  * any later version.
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with this program; if not, write to the Free Software
26  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/in.h>
32 #include <linux/ip.h>
33 #include <linux/tcp.h>
34 #include <linux/udp.h>
35 #include <linux/if.h>
36 #include <linux/list.h>
37 #include <linux/slab.h>
38 #include <linux/if_ether.h>
39 #include <linux/notifier.h>
40 #include <linux/reboot.h>
41 #include <linux/memory.h>
42 #include <asm/kexec.h>
43 #include <linux/mutex.h>
44 #include <linux/prefetch.h>
45
46 #include <net/ip.h>
47
48 #include "ehea.h"
49 #include "ehea_qmr.h"
50 #include "ehea_phyp.h"
51
52
53 MODULE_LICENSE("GPL");
54 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
55 MODULE_DESCRIPTION("IBM eServer HEA Driver");
56 MODULE_VERSION(DRV_VERSION);
57
58
59 static int msg_level = -1;
60 static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
61 static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
62 static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
63 static int sq_entries = EHEA_DEF_ENTRIES_SQ;
64 static int use_mcs = 1;
65 static int prop_carrier_state;
66
67 module_param(msg_level, int, 0);
68 module_param(rq1_entries, int, 0);
69 module_param(rq2_entries, int, 0);
70 module_param(rq3_entries, int, 0);
71 module_param(sq_entries, int, 0);
72 module_param(prop_carrier_state, int, 0);
73 module_param(use_mcs, int, 0);
74
75 MODULE_PARM_DESC(msg_level, "msg_level");
76 MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
77                  "port to stack. 1:yes, 0:no.  Default = 0 ");
78 MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
79                  "[2^x - 1], x = [6..14]. Default = "
80                  __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
81 MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
82                  "[2^x - 1], x = [6..14]. Default = "
83                  __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
84 MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
85                  "[2^x - 1], x = [6..14]. Default = "
86                  __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
87 MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue  "
88                  "[2^x - 1], x = [6..14]. Default = "
89                  __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
90 MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
91                  "Default = 1");
92
93 static int port_name_cnt;
94 static LIST_HEAD(adapter_list);
95 static unsigned long ehea_driver_flags;
96 static DEFINE_MUTEX(dlpar_mem_lock);
97 static struct ehea_fw_handle_array ehea_fw_handles;
98 static struct ehea_bcmc_reg_array ehea_bcmc_regs;
99
100
101 static int __devinit ehea_probe_adapter(struct platform_device *dev,
102                                         const struct of_device_id *id);
103
104 static int __devexit ehea_remove(struct platform_device *dev);
105
106 static struct of_device_id ehea_device_table[] = {
107         {
108                 .name = "lhea",
109                 .compatible = "IBM,lhea",
110         },
111         {},
112 };
113 MODULE_DEVICE_TABLE(of, ehea_device_table);
114
115 static struct of_platform_driver ehea_driver = {
116         .driver = {
117                 .name = "ehea",
118                 .owner = THIS_MODULE,
119                 .of_match_table = ehea_device_table,
120         },
121         .probe = ehea_probe_adapter,
122         .remove = ehea_remove,
123 };
124
125 void ehea_dump(void *adr, int len, char *msg)
126 {
127         int x;
128         unsigned char *deb = adr;
129         for (x = 0; x < len; x += 16) {
130                 pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
131                         msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
132                 deb += 16;
133         }
134 }
135
136 static void ehea_schedule_port_reset(struct ehea_port *port)
137 {
138         if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
139                 schedule_work(&port->reset_task);
140 }
141
142 static void ehea_update_firmware_handles(void)
143 {
144         struct ehea_fw_handle_entry *arr = NULL;
145         struct ehea_adapter *adapter;
146         int num_adapters = 0;
147         int num_ports = 0;
148         int num_portres = 0;
149         int i = 0;
150         int num_fw_handles, k, l;
151
152         /* Determine number of handles */
153         mutex_lock(&ehea_fw_handles.lock);
154
155         list_for_each_entry(adapter, &adapter_list, list) {
156                 num_adapters++;
157
158                 for (k = 0; k < EHEA_MAX_PORTS; k++) {
159                         struct ehea_port *port = adapter->port[k];
160
161                         if (!port || (port->state != EHEA_PORT_UP))
162                                 continue;
163
164                         num_ports++;
165                         num_portres += port->num_def_qps;
166                 }
167         }
168
169         num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
170                          num_ports * EHEA_NUM_PORT_FW_HANDLES +
171                          num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
172
173         if (num_fw_handles) {
174                 arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
175                 if (!arr)
176                         goto out;  /* Keep the existing array */
177         } else
178                 goto out_update;
179
180         list_for_each_entry(adapter, &adapter_list, list) {
181                 if (num_adapters == 0)
182                         break;
183
184                 for (k = 0; k < EHEA_MAX_PORTS; k++) {
185                         struct ehea_port *port = adapter->port[k];
186
187                         if (!port || (port->state != EHEA_PORT_UP) ||
188                             (num_ports == 0))
189                                 continue;
190
191                         for (l = 0; l < port->num_def_qps; l++) {
192                                 struct ehea_port_res *pr = &port->port_res[l];
193
194                                 arr[i].adh = adapter->handle;
195                                 arr[i++].fwh = pr->qp->fw_handle;
196                                 arr[i].adh = adapter->handle;
197                                 arr[i++].fwh = pr->send_cq->fw_handle;
198                                 arr[i].adh = adapter->handle;
199                                 arr[i++].fwh = pr->recv_cq->fw_handle;
200                                 arr[i].adh = adapter->handle;
201                                 arr[i++].fwh = pr->eq->fw_handle;
202                                 arr[i].adh = adapter->handle;
203                                 arr[i++].fwh = pr->send_mr.handle;
204                                 arr[i].adh = adapter->handle;
205                                 arr[i++].fwh = pr->recv_mr.handle;
206                         }
207                         arr[i].adh = adapter->handle;
208                         arr[i++].fwh = port->qp_eq->fw_handle;
209                         num_ports--;
210                 }
211
212                 arr[i].adh = adapter->handle;
213                 arr[i++].fwh = adapter->neq->fw_handle;
214
215                 if (adapter->mr.handle) {
216                         arr[i].adh = adapter->handle;
217                         arr[i++].fwh = adapter->mr.handle;
218                 }
219                 num_adapters--;
220         }
221
222 out_update:
223         kfree(ehea_fw_handles.arr);
224         ehea_fw_handles.arr = arr;
225         ehea_fw_handles.num_entries = i;
226 out:
227         mutex_unlock(&ehea_fw_handles.lock);
228 }
229
230 static void ehea_update_bcmc_registrations(void)
231 {
232         unsigned long flags;
233         struct ehea_bcmc_reg_entry *arr = NULL;
234         struct ehea_adapter *adapter;
235         struct ehea_mc_list *mc_entry;
236         int num_registrations = 0;
237         int i = 0;
238         int k;
239
240         spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
241
242         /* Determine number of registrations */
243         list_for_each_entry(adapter, &adapter_list, list)
244                 for (k = 0; k < EHEA_MAX_PORTS; k++) {
245                         struct ehea_port *port = adapter->port[k];
246
247                         if (!port || (port->state != EHEA_PORT_UP))
248                                 continue;
249
250                         num_registrations += 2; /* Broadcast registrations */
251
252                         list_for_each_entry(mc_entry, &port->mc_list->list,list)
253                                 num_registrations += 2;
254                 }
255
256         if (num_registrations) {
257                 arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
258                 if (!arr)
259                         goto out;  /* Keep the existing array */
260         } else
261                 goto out_update;
262
263         list_for_each_entry(adapter, &adapter_list, list) {
264                 for (k = 0; k < EHEA_MAX_PORTS; k++) {
265                         struct ehea_port *port = adapter->port[k];
266
267                         if (!port || (port->state != EHEA_PORT_UP))
268                                 continue;
269
270                         if (num_registrations == 0)
271                                 goto out_update;
272
273                         arr[i].adh = adapter->handle;
274                         arr[i].port_id = port->logical_port_id;
275                         arr[i].reg_type = EHEA_BCMC_BROADCAST |
276                                           EHEA_BCMC_UNTAGGED;
277                         arr[i++].macaddr = port->mac_addr;
278
279                         arr[i].adh = adapter->handle;
280                         arr[i].port_id = port->logical_port_id;
281                         arr[i].reg_type = EHEA_BCMC_BROADCAST |
282                                           EHEA_BCMC_VLANID_ALL;
283                         arr[i++].macaddr = port->mac_addr;
284                         num_registrations -= 2;
285
286                         list_for_each_entry(mc_entry,
287                                             &port->mc_list->list, list) {
288                                 if (num_registrations == 0)
289                                         goto out_update;
290
291                                 arr[i].adh = adapter->handle;
292                                 arr[i].port_id = port->logical_port_id;
293                                 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
294                                                   EHEA_BCMC_MULTICAST |
295                                                   EHEA_BCMC_UNTAGGED;
296                                 arr[i++].macaddr = mc_entry->macaddr;
297
298                                 arr[i].adh = adapter->handle;
299                                 arr[i].port_id = port->logical_port_id;
300                                 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
301                                                   EHEA_BCMC_MULTICAST |
302                                                   EHEA_BCMC_VLANID_ALL;
303                                 arr[i++].macaddr = mc_entry->macaddr;
304                                 num_registrations -= 2;
305                         }
306                 }
307         }
308
309 out_update:
310         kfree(ehea_bcmc_regs.arr);
311         ehea_bcmc_regs.arr = arr;
312         ehea_bcmc_regs.num_entries = i;
313 out:
314         spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
315 }
316
317 static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev,
318                                         struct rtnl_link_stats64 *stats)
319 {
320         struct ehea_port *port = netdev_priv(dev);
321         u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
322         int i;
323
324         for (i = 0; i < port->num_def_qps; i++) {
325                 rx_packets += port->port_res[i].rx_packets;
326                 rx_bytes   += port->port_res[i].rx_bytes;
327         }
328
329         for (i = 0; i < port->num_def_qps; i++) {
330                 tx_packets += port->port_res[i].tx_packets;
331                 tx_bytes   += port->port_res[i].tx_bytes;
332         }
333
334         stats->tx_packets = tx_packets;
335         stats->rx_bytes = rx_bytes;
336         stats->tx_bytes = tx_bytes;
337         stats->rx_packets = rx_packets;
338
339         return &port->stats;
340 }
341
342 static void ehea_update_stats(struct work_struct *work)
343 {
344         struct ehea_port *port =
345                 container_of(work, struct ehea_port, stats_work.work);
346         struct net_device *dev = port->netdev;
347         struct rtnl_link_stats64 *stats = &port->stats;
348         struct hcp_ehea_port_cb2 *cb2;
349         u64 hret;
350
351         cb2 = (void *)get_zeroed_page(GFP_KERNEL);
352         if (!cb2) {
353                 netdev_err(dev, "No mem for cb2. Some interface statistics were not updated\n");
354                 goto resched;
355         }
356
357         hret = ehea_h_query_ehea_port(port->adapter->handle,
358                                       port->logical_port_id,
359                                       H_PORT_CB2, H_PORT_CB2_ALL, cb2);
360         if (hret != H_SUCCESS) {
361                 netdev_err(dev, "query_ehea_port failed\n");
362                 goto out_herr;
363         }
364
365         if (netif_msg_hw(port))
366                 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
367
368         stats->multicast = cb2->rxmcp;
369         stats->rx_errors = cb2->rxuerr;
370
371 out_herr:
372         free_page((unsigned long)cb2);
373 resched:
374         schedule_delayed_work(&port->stats_work,
375                               round_jiffies_relative(msecs_to_jiffies(1000)));
376 }
377
378 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
379 {
380         struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
381         struct net_device *dev = pr->port->netdev;
382         int max_index_mask = pr->rq1_skba.len - 1;
383         int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
384         int adder = 0;
385         int i;
386
387         pr->rq1_skba.os_skbs = 0;
388
389         if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
390                 if (nr_of_wqes > 0)
391                         pr->rq1_skba.index = index;
392                 pr->rq1_skba.os_skbs = fill_wqes;
393                 return;
394         }
395
396         for (i = 0; i < fill_wqes; i++) {
397                 if (!skb_arr_rq1[index]) {
398                         skb_arr_rq1[index] = netdev_alloc_skb(dev,
399                                                               EHEA_L_PKT_SIZE);
400                         if (!skb_arr_rq1[index]) {
401                                 netdev_info(dev, "Unable to allocate enough skb in the array\n");
402                                 pr->rq1_skba.os_skbs = fill_wqes - i;
403                                 break;
404                         }
405                 }
406                 index--;
407                 index &= max_index_mask;
408                 adder++;
409         }
410
411         if (adder == 0)
412                 return;
413
414         /* Ring doorbell */
415         ehea_update_rq1a(pr->qp, adder);
416 }
417
418 static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
419 {
420         struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
421         struct net_device *dev = pr->port->netdev;
422         int i;
423
424         if (nr_rq1a > pr->rq1_skba.len) {
425                 netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
426                 return;
427         }
428
429         for (i = 0; i < nr_rq1a; i++) {
430                 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
431                 if (!skb_arr_rq1[i]) {
432                         netdev_info(dev, "Not enough memory to allocate skb array\n");
433                         break;
434                 }
435         }
436         /* Ring doorbell */
437         ehea_update_rq1a(pr->qp, i - 1);
438 }
439
440 static int ehea_refill_rq_def(struct ehea_port_res *pr,
441                               struct ehea_q_skb_arr *q_skba, int rq_nr,
442                               int num_wqes, int wqe_type, int packet_size)
443 {
444         struct net_device *dev = pr->port->netdev;
445         struct ehea_qp *qp = pr->qp;
446         struct sk_buff **skb_arr = q_skba->arr;
447         struct ehea_rwqe *rwqe;
448         int i, index, max_index_mask, fill_wqes;
449         int adder = 0;
450         int ret = 0;
451
452         fill_wqes = q_skba->os_skbs + num_wqes;
453         q_skba->os_skbs = 0;
454
455         if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
456                 q_skba->os_skbs = fill_wqes;
457                 return ret;
458         }
459
460         index = q_skba->index;
461         max_index_mask = q_skba->len - 1;
462         for (i = 0; i < fill_wqes; i++) {
463                 u64 tmp_addr;
464                 struct sk_buff *skb;
465
466                 skb = netdev_alloc_skb_ip_align(dev, packet_size);
467                 if (!skb) {
468                         q_skba->os_skbs = fill_wqes - i;
469                         if (q_skba->os_skbs == q_skba->len - 2) {
470                                 netdev_info(pr->port->netdev,
471                                             "rq%i ran dry - no mem for skb\n",
472                                             rq_nr);
473                                 ret = -ENOMEM;
474                         }
475                         break;
476                 }
477
478                 skb_arr[index] = skb;
479                 tmp_addr = ehea_map_vaddr(skb->data);
480                 if (tmp_addr == -1) {
481                         dev_kfree_skb(skb);
482                         q_skba->os_skbs = fill_wqes - i;
483                         ret = 0;
484                         break;
485                 }
486
487                 rwqe = ehea_get_next_rwqe(qp, rq_nr);
488                 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
489                             | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
490                 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
491                 rwqe->sg_list[0].vaddr = tmp_addr;
492                 rwqe->sg_list[0].len = packet_size;
493                 rwqe->data_segments = 1;
494
495                 index++;
496                 index &= max_index_mask;
497                 adder++;
498         }
499
500         q_skba->index = index;
501         if (adder == 0)
502                 goto out;
503
504         /* Ring doorbell */
505         iosync();
506         if (rq_nr == 2)
507                 ehea_update_rq2a(pr->qp, adder);
508         else
509                 ehea_update_rq3a(pr->qp, adder);
510 out:
511         return ret;
512 }
513
514
515 static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
516 {
517         return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
518                                   nr_of_wqes, EHEA_RWQE2_TYPE,
519                                   EHEA_RQ2_PKT_SIZE);
520 }
521
522
523 static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
524 {
525         return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
526                                   nr_of_wqes, EHEA_RWQE3_TYPE,
527                                   EHEA_MAX_PACKET_SIZE);
528 }
529
530 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
531 {
532         *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
533         if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
534                 return 0;
535         if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
536             (cqe->header_length == 0))
537                 return 0;
538         return -EINVAL;
539 }
540
541 static inline void ehea_fill_skb(struct net_device *dev,
542                                  struct sk_buff *skb, struct ehea_cqe *cqe,
543                                  struct ehea_port_res *pr)
544 {
545         int length = cqe->num_bytes_transfered - 4;     /*remove CRC */
546
547         skb_put(skb, length);
548         skb->protocol = eth_type_trans(skb, dev);
549
550         /* The packet was not an IPV4 packet so a complemented checksum was
551            calculated. The value is found in the Internet Checksum field. */
552         if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
553                 skb->ip_summed = CHECKSUM_COMPLETE;
554                 skb->csum = csum_unfold(~cqe->inet_checksum_value);
555         } else
556                 skb->ip_summed = CHECKSUM_UNNECESSARY;
557
558         skb_record_rx_queue(skb, pr - &pr->port->port_res[0]);
559 }
560
561 static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
562                                                int arr_len,
563                                                struct ehea_cqe *cqe)
564 {
565         int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
566         struct sk_buff *skb;
567         void *pref;
568         int x;
569
570         x = skb_index + 1;
571         x &= (arr_len - 1);
572
573         pref = skb_array[x];
574         if (pref) {
575                 prefetchw(pref);
576                 prefetchw(pref + EHEA_CACHE_LINE);
577
578                 pref = (skb_array[x]->data);
579                 prefetch(pref);
580                 prefetch(pref + EHEA_CACHE_LINE);
581                 prefetch(pref + EHEA_CACHE_LINE * 2);
582                 prefetch(pref + EHEA_CACHE_LINE * 3);
583         }
584
585         skb = skb_array[skb_index];
586         skb_array[skb_index] = NULL;
587         return skb;
588 }
589
590 static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
591                                                   int arr_len, int wqe_index)
592 {
593         struct sk_buff *skb;
594         void *pref;
595         int x;
596
597         x = wqe_index + 1;
598         x &= (arr_len - 1);
599
600         pref = skb_array[x];
601         if (pref) {
602                 prefetchw(pref);
603                 prefetchw(pref + EHEA_CACHE_LINE);
604
605                 pref = (skb_array[x]->data);
606                 prefetchw(pref);
607                 prefetchw(pref + EHEA_CACHE_LINE);
608         }
609
610         skb = skb_array[wqe_index];
611         skb_array[wqe_index] = NULL;
612         return skb;
613 }
614
615 static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
616                                  struct ehea_cqe *cqe, int *processed_rq2,
617                                  int *processed_rq3)
618 {
619         struct sk_buff *skb;
620
621         if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
622                 pr->p_stats.err_tcp_cksum++;
623         if (cqe->status & EHEA_CQE_STAT_ERR_IP)
624                 pr->p_stats.err_ip_cksum++;
625         if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
626                 pr->p_stats.err_frame_crc++;
627
628         if (rq == 2) {
629                 *processed_rq2 += 1;
630                 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
631                 dev_kfree_skb(skb);
632         } else if (rq == 3) {
633                 *processed_rq3 += 1;
634                 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
635                 dev_kfree_skb(skb);
636         }
637
638         if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
639                 if (netif_msg_rx_err(pr->port)) {
640                         pr_err("Critical receive error for QP %d. Resetting port.\n",
641                                pr->qp->init_attr.qp_nr);
642                         ehea_dump(cqe, sizeof(*cqe), "CQE");
643                 }
644                 ehea_schedule_port_reset(pr->port);
645                 return 1;
646         }
647
648         return 0;
649 }
650
651 static int ehea_proc_rwqes(struct net_device *dev,
652                            struct ehea_port_res *pr,
653                            int budget)
654 {
655         struct ehea_port *port = pr->port;
656         struct ehea_qp *qp = pr->qp;
657         struct ehea_cqe *cqe;
658         struct sk_buff *skb;
659         struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
660         struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
661         struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
662         int skb_arr_rq1_len = pr->rq1_skba.len;
663         int skb_arr_rq2_len = pr->rq2_skba.len;
664         int skb_arr_rq3_len = pr->rq3_skba.len;
665         int processed, processed_rq1, processed_rq2, processed_rq3;
666         u64 processed_bytes = 0;
667         int wqe_index, last_wqe_index, rq, port_reset;
668
669         processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
670         last_wqe_index = 0;
671
672         cqe = ehea_poll_rq1(qp, &wqe_index);
673         while ((processed < budget) && cqe) {
674                 ehea_inc_rq1(qp);
675                 processed_rq1++;
676                 processed++;
677                 if (netif_msg_rx_status(port))
678                         ehea_dump(cqe, sizeof(*cqe), "CQE");
679
680                 last_wqe_index = wqe_index;
681                 rmb();
682                 if (!ehea_check_cqe(cqe, &rq)) {
683                         if (rq == 1) {
684                                 /* LL RQ1 */
685                                 skb = get_skb_by_index_ll(skb_arr_rq1,
686                                                           skb_arr_rq1_len,
687                                                           wqe_index);
688                                 if (unlikely(!skb)) {
689                                         netif_info(port, rx_err, dev,
690                                                   "LL rq1: skb=NULL\n");
691
692                                         skb = netdev_alloc_skb(dev,
693                                                                EHEA_L_PKT_SIZE);
694                                         if (!skb) {
695                                                 netdev_err(dev, "Not enough memory to allocate skb\n");
696                                                 break;
697                                         }
698                                 }
699                                 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
700                                                  cqe->num_bytes_transfered - 4);
701                                 ehea_fill_skb(dev, skb, cqe, pr);
702                         } else if (rq == 2) {
703                                 /* RQ2 */
704                                 skb = get_skb_by_index(skb_arr_rq2,
705                                                        skb_arr_rq2_len, cqe);
706                                 if (unlikely(!skb)) {
707                                         netif_err(port, rx_err, dev,
708                                                   "rq2: skb=NULL\n");
709                                         break;
710                                 }
711                                 ehea_fill_skb(dev, skb, cqe, pr);
712                                 processed_rq2++;
713                         } else {
714                                 /* RQ3 */
715                                 skb = get_skb_by_index(skb_arr_rq3,
716                                                        skb_arr_rq3_len, cqe);
717                                 if (unlikely(!skb)) {
718                                         netif_err(port, rx_err, dev,
719                                                   "rq3: skb=NULL\n");
720                                         break;
721                                 }
722                                 ehea_fill_skb(dev, skb, cqe, pr);
723                                 processed_rq3++;
724                         }
725
726                         processed_bytes += skb->len;
727
728                         if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
729                                 __vlan_hwaccel_put_tag(skb, cqe->vlan_tag);
730
731                         napi_gro_receive(&pr->napi, skb);
732                 } else {
733                         pr->p_stats.poll_receive_errors++;
734                         port_reset = ehea_treat_poll_error(pr, rq, cqe,
735                                                            &processed_rq2,
736                                                            &processed_rq3);
737                         if (port_reset)
738                                 break;
739                 }
740                 cqe = ehea_poll_rq1(qp, &wqe_index);
741         }
742
743         pr->rx_packets += processed;
744         pr->rx_bytes += processed_bytes;
745
746         ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
747         ehea_refill_rq2(pr, processed_rq2);
748         ehea_refill_rq3(pr, processed_rq3);
749
750         return processed;
751 }
752
753 #define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
754
755 static void reset_sq_restart_flag(struct ehea_port *port)
756 {
757         int i;
758
759         for (i = 0; i < port->num_def_qps; i++) {
760                 struct ehea_port_res *pr = &port->port_res[i];
761                 pr->sq_restart_flag = 0;
762         }
763         wake_up(&port->restart_wq);
764 }
765
766 static void check_sqs(struct ehea_port *port)
767 {
768         struct ehea_swqe *swqe;
769         int swqe_index;
770         int i, k;
771
772         for (i = 0; i < port->num_def_qps; i++) {
773                 struct ehea_port_res *pr = &port->port_res[i];
774                 int ret;
775                 k = 0;
776                 swqe = ehea_get_swqe(pr->qp, &swqe_index);
777                 memset(swqe, 0, SWQE_HEADER_SIZE);
778                 atomic_dec(&pr->swqe_avail);
779
780                 swqe->tx_control |= EHEA_SWQE_PURGE;
781                 swqe->wr_id = SWQE_RESTART_CHECK;
782                 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
783                 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
784                 swqe->immediate_data_length = 80;
785
786                 ehea_post_swqe(pr->qp, swqe);
787
788                 ret = wait_event_timeout(port->restart_wq,
789                                          pr->sq_restart_flag == 0,
790                                          msecs_to_jiffies(100));
791
792                 if (!ret) {
793                         pr_err("HW/SW queues out of sync\n");
794                         ehea_schedule_port_reset(pr->port);
795                         return;
796                 }
797         }
798 }
799
800
801 static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
802 {
803         struct sk_buff *skb;
804         struct ehea_cq *send_cq = pr->send_cq;
805         struct ehea_cqe *cqe;
806         int quota = my_quota;
807         int cqe_counter = 0;
808         int swqe_av = 0;
809         int index;
810         struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev,
811                                                 pr - &pr->port->port_res[0]);
812
813         cqe = ehea_poll_cq(send_cq);
814         while (cqe && (quota > 0)) {
815                 ehea_inc_cq(send_cq);
816
817                 cqe_counter++;
818                 rmb();
819
820                 if (cqe->wr_id == SWQE_RESTART_CHECK) {
821                         pr->sq_restart_flag = 1;
822                         swqe_av++;
823                         break;
824                 }
825
826                 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
827                         pr_err("Bad send completion status=0x%04X\n",
828                                cqe->status);
829
830                         if (netif_msg_tx_err(pr->port))
831                                 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
832
833                         if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
834                                 pr_err("Resetting port\n");
835                                 ehea_schedule_port_reset(pr->port);
836                                 break;
837                         }
838                 }
839
840                 if (netif_msg_tx_done(pr->port))
841                         ehea_dump(cqe, sizeof(*cqe), "CQE");
842
843                 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
844                            == EHEA_SWQE2_TYPE)) {
845
846                         index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
847                         skb = pr->sq_skba.arr[index];
848                         dev_kfree_skb(skb);
849                         pr->sq_skba.arr[index] = NULL;
850                 }
851
852                 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
853                 quota--;
854
855                 cqe = ehea_poll_cq(send_cq);
856         }
857
858         ehea_update_feca(send_cq, cqe_counter);
859         atomic_add(swqe_av, &pr->swqe_avail);
860
861         if (unlikely(netif_tx_queue_stopped(txq) &&
862                      (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) {
863                 __netif_tx_lock(txq, smp_processor_id());
864                 if (netif_tx_queue_stopped(txq) &&
865                     (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))
866                         netif_tx_wake_queue(txq);
867                 __netif_tx_unlock(txq);
868         }
869
870         wake_up(&pr->port->swqe_avail_wq);
871
872         return cqe;
873 }
874
875 #define EHEA_POLL_MAX_CQES 65535
876
877 static int ehea_poll(struct napi_struct *napi, int budget)
878 {
879         struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
880                                                 napi);
881         struct net_device *dev = pr->port->netdev;
882         struct ehea_cqe *cqe;
883         struct ehea_cqe *cqe_skb = NULL;
884         int wqe_index;
885         int rx = 0;
886
887         cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
888         rx += ehea_proc_rwqes(dev, pr, budget - rx);
889
890         while (rx != budget) {
891                 napi_complete(napi);
892                 ehea_reset_cq_ep(pr->recv_cq);
893                 ehea_reset_cq_ep(pr->send_cq);
894                 ehea_reset_cq_n1(pr->recv_cq);
895                 ehea_reset_cq_n1(pr->send_cq);
896                 rmb();
897                 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
898                 cqe_skb = ehea_poll_cq(pr->send_cq);
899
900                 if (!cqe && !cqe_skb)
901                         return rx;
902
903                 if (!napi_reschedule(napi))
904                         return rx;
905
906                 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
907                 rx += ehea_proc_rwqes(dev, pr, budget - rx);
908         }
909
910         return rx;
911 }
912
913 #ifdef CONFIG_NET_POLL_CONTROLLER
914 static void ehea_netpoll(struct net_device *dev)
915 {
916         struct ehea_port *port = netdev_priv(dev);
917         int i;
918
919         for (i = 0; i < port->num_def_qps; i++)
920                 napi_schedule(&port->port_res[i].napi);
921 }
922 #endif
923
924 static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
925 {
926         struct ehea_port_res *pr = param;
927
928         napi_schedule(&pr->napi);
929
930         return IRQ_HANDLED;
931 }
932
933 static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
934 {
935         struct ehea_port *port = param;
936         struct ehea_eqe *eqe;
937         struct ehea_qp *qp;
938         u32 qp_token;
939         u64 resource_type, aer, aerr;
940         int reset_port = 0;
941
942         eqe = ehea_poll_eq(port->qp_eq);
943
944         while (eqe) {
945                 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
946                 pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
947                        eqe->entry, qp_token);
948
949                 qp = port->port_res[qp_token].qp;
950
951                 resource_type = ehea_error_data(port->adapter, qp->fw_handle,
952                                                 &aer, &aerr);
953
954                 if (resource_type == EHEA_AER_RESTYPE_QP) {
955                         if ((aer & EHEA_AER_RESET_MASK) ||
956                             (aerr & EHEA_AERR_RESET_MASK))
957                                  reset_port = 1;
958                 } else
959                         reset_port = 1;   /* Reset in case of CQ or EQ error */
960
961                 eqe = ehea_poll_eq(port->qp_eq);
962         }
963
964         if (reset_port) {
965                 pr_err("Resetting port\n");
966                 ehea_schedule_port_reset(port);
967         }
968
969         return IRQ_HANDLED;
970 }
971
972 static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
973                                        int logical_port)
974 {
975         int i;
976
977         for (i = 0; i < EHEA_MAX_PORTS; i++)
978                 if (adapter->port[i])
979                         if (adapter->port[i]->logical_port_id == logical_port)
980                                 return adapter->port[i];
981         return NULL;
982 }
983
984 int ehea_sense_port_attr(struct ehea_port *port)
985 {
986         int ret;
987         u64 hret;
988         struct hcp_ehea_port_cb0 *cb0;
989
990         /* may be called via ehea_neq_tasklet() */
991         cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
992         if (!cb0) {
993                 pr_err("no mem for cb0\n");
994                 ret = -ENOMEM;
995                 goto out;
996         }
997
998         hret = ehea_h_query_ehea_port(port->adapter->handle,
999                                       port->logical_port_id, H_PORT_CB0,
1000                                       EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
1001                                       cb0);
1002         if (hret != H_SUCCESS) {
1003                 ret = -EIO;
1004                 goto out_free;
1005         }
1006
1007         /* MAC address */
1008         port->mac_addr = cb0->port_mac_addr << 16;
1009
1010         if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
1011                 ret = -EADDRNOTAVAIL;
1012                 goto out_free;
1013         }
1014
1015         /* Port speed */
1016         switch (cb0->port_speed) {
1017         case H_SPEED_10M_H:
1018                 port->port_speed = EHEA_SPEED_10M;
1019                 port->full_duplex = 0;
1020                 break;
1021         case H_SPEED_10M_F:
1022                 port->port_speed = EHEA_SPEED_10M;
1023                 port->full_duplex = 1;
1024                 break;
1025         case H_SPEED_100M_H:
1026                 port->port_speed = EHEA_SPEED_100M;
1027                 port->full_duplex = 0;
1028                 break;
1029         case H_SPEED_100M_F:
1030                 port->port_speed = EHEA_SPEED_100M;
1031                 port->full_duplex = 1;
1032                 break;
1033         case H_SPEED_1G_F:
1034                 port->port_speed = EHEA_SPEED_1G;
1035                 port->full_duplex = 1;
1036                 break;
1037         case H_SPEED_10G_F:
1038                 port->port_speed = EHEA_SPEED_10G;
1039                 port->full_duplex = 1;
1040                 break;
1041         default:
1042                 port->port_speed = 0;
1043                 port->full_duplex = 0;
1044                 break;
1045         }
1046
1047         port->autoneg = 1;
1048         port->num_mcs = cb0->num_default_qps;
1049
1050         /* Number of default QPs */
1051         if (use_mcs)
1052                 port->num_def_qps = cb0->num_default_qps;
1053         else
1054                 port->num_def_qps = 1;
1055
1056         if (!port->num_def_qps) {
1057                 ret = -EINVAL;
1058                 goto out_free;
1059         }
1060
1061         ret = 0;
1062 out_free:
1063         if (ret || netif_msg_probe(port))
1064                 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
1065         free_page((unsigned long)cb0);
1066 out:
1067         return ret;
1068 }
1069
1070 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1071 {
1072         struct hcp_ehea_port_cb4 *cb4;
1073         u64 hret;
1074         int ret = 0;
1075
1076         cb4 = (void *)get_zeroed_page(GFP_KERNEL);
1077         if (!cb4) {
1078                 pr_err("no mem for cb4\n");
1079                 ret = -ENOMEM;
1080                 goto out;
1081         }
1082
1083         cb4->port_speed = port_speed;
1084
1085         netif_carrier_off(port->netdev);
1086
1087         hret = ehea_h_modify_ehea_port(port->adapter->handle,
1088                                        port->logical_port_id,
1089                                        H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
1090         if (hret == H_SUCCESS) {
1091                 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
1092
1093                 hret = ehea_h_query_ehea_port(port->adapter->handle,
1094                                               port->logical_port_id,
1095                                               H_PORT_CB4, H_PORT_CB4_SPEED,
1096                                               cb4);
1097                 if (hret == H_SUCCESS) {
1098                         switch (cb4->port_speed) {
1099                         case H_SPEED_10M_H:
1100                                 port->port_speed = EHEA_SPEED_10M;
1101                                 port->full_duplex = 0;
1102                                 break;
1103                         case H_SPEED_10M_F:
1104                                 port->port_speed = EHEA_SPEED_10M;
1105                                 port->full_duplex = 1;
1106                                 break;
1107                         case H_SPEED_100M_H:
1108                                 port->port_speed = EHEA_SPEED_100M;
1109                                 port->full_duplex = 0;
1110                                 break;
1111                         case H_SPEED_100M_F:
1112                                 port->port_speed = EHEA_SPEED_100M;
1113                                 port->full_duplex = 1;
1114                                 break;
1115                         case H_SPEED_1G_F:
1116                                 port->port_speed = EHEA_SPEED_1G;
1117                                 port->full_duplex = 1;
1118                                 break;
1119                         case H_SPEED_10G_F:
1120                                 port->port_speed = EHEA_SPEED_10G;
1121                                 port->full_duplex = 1;
1122                                 break;
1123                         default:
1124                                 port->port_speed = 0;
1125                                 port->full_duplex = 0;
1126                                 break;
1127                         }
1128                 } else {
1129                         pr_err("Failed sensing port speed\n");
1130                         ret = -EIO;
1131                 }
1132         } else {
1133                 if (hret == H_AUTHORITY) {
1134                         pr_info("Hypervisor denied setting port speed\n");
1135                         ret = -EPERM;
1136                 } else {
1137                         ret = -EIO;
1138                         pr_err("Failed setting port speed\n");
1139                 }
1140         }
1141         if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1142                 netif_carrier_on(port->netdev);
1143
1144         free_page((unsigned long)cb4);
1145 out:
1146         return ret;
1147 }
1148
1149 static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1150 {
1151         int ret;
1152         u8 ec;
1153         u8 portnum;
1154         struct ehea_port *port;
1155         struct net_device *dev;
1156
1157         ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1158         portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1159         port = ehea_get_port(adapter, portnum);
1160         dev = port->netdev;
1161
1162         switch (ec) {
1163         case EHEA_EC_PORTSTATE_CHG:     /* port state change */
1164
1165                 if (!port) {
1166                         netdev_err(dev, "unknown portnum %x\n", portnum);
1167                         break;
1168                 }
1169
1170                 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1171                         if (!netif_carrier_ok(dev)) {
1172                                 ret = ehea_sense_port_attr(port);
1173                                 if (ret) {
1174                                         netdev_err(dev, "failed resensing port attributes\n");
1175                                         break;
1176                                 }
1177
1178                                 netif_info(port, link, dev,
1179                                            "Logical port up: %dMbps %s Duplex\n",
1180                                            port->port_speed,
1181                                            port->full_duplex == 1 ?
1182                                            "Full" : "Half");
1183
1184                                 netif_carrier_on(dev);
1185                                 netif_wake_queue(dev);
1186                         }
1187                 } else
1188                         if (netif_carrier_ok(dev)) {
1189                                 netif_info(port, link, dev,
1190                                            "Logical port down\n");
1191                                 netif_carrier_off(dev);
1192                                 netif_tx_disable(dev);
1193                         }
1194
1195                 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
1196                         port->phy_link = EHEA_PHY_LINK_UP;
1197                         netif_info(port, link, dev,
1198                                    "Physical port up\n");
1199                         if (prop_carrier_state)
1200                                 netif_carrier_on(dev);
1201                 } else {
1202                         port->phy_link = EHEA_PHY_LINK_DOWN;
1203                         netif_info(port, link, dev,
1204                                    "Physical port down\n");
1205                         if (prop_carrier_state)
1206                                 netif_carrier_off(dev);
1207                 }
1208
1209                 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1210                         netdev_info(dev,
1211                                     "External switch port is primary port\n");
1212                 else
1213                         netdev_info(dev,
1214                                     "External switch port is backup port\n");
1215
1216                 break;
1217         case EHEA_EC_ADAPTER_MALFUNC:
1218                 netdev_err(dev, "Adapter malfunction\n");
1219                 break;
1220         case EHEA_EC_PORT_MALFUNC:
1221                 netdev_info(dev, "Port malfunction\n");
1222                 netif_carrier_off(dev);
1223                 netif_tx_disable(dev);
1224                 break;
1225         default:
1226                 netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
1227                 break;
1228         }
1229 }
1230
1231 static void ehea_neq_tasklet(unsigned long data)
1232 {
1233         struct ehea_adapter *adapter = (struct ehea_adapter *)data;
1234         struct ehea_eqe *eqe;
1235         u64 event_mask;
1236
1237         eqe = ehea_poll_eq(adapter->neq);
1238         pr_debug("eqe=%p\n", eqe);
1239
1240         while (eqe) {
1241                 pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
1242                 ehea_parse_eqe(adapter, eqe->entry);
1243                 eqe = ehea_poll_eq(adapter->neq);
1244                 pr_debug("next eqe=%p\n", eqe);
1245         }
1246
1247         event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1248                    | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1249                    | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1250
1251         ehea_h_reset_events(adapter->handle,
1252                             adapter->neq->fw_handle, event_mask);
1253 }
1254
1255 static irqreturn_t ehea_interrupt_neq(int irq, void *param)
1256 {
1257         struct ehea_adapter *adapter = param;
1258         tasklet_hi_schedule(&adapter->neq_tasklet);
1259         return IRQ_HANDLED;
1260 }
1261
1262
1263 static int ehea_fill_port_res(struct ehea_port_res *pr)
1264 {
1265         int ret;
1266         struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1267
1268         ehea_init_fill_rq1(pr, pr->rq1_skba.len);
1269
1270         ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1271
1272         ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1273
1274         return ret;
1275 }
1276
1277 static int ehea_reg_interrupts(struct net_device *dev)
1278 {
1279         struct ehea_port *port = netdev_priv(dev);
1280         struct ehea_port_res *pr;
1281         int i, ret;
1282
1283
1284         snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1285                  dev->name);
1286
1287         ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
1288                                   ehea_qp_aff_irq_handler,
1289                                   IRQF_DISABLED, port->int_aff_name, port);
1290         if (ret) {
1291                 netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
1292                            port->qp_eq->attr.ist1);
1293                 goto out_free_qpeq;
1294         }
1295
1296         netif_info(port, ifup, dev,
1297                    "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
1298                    port->qp_eq->attr.ist1);
1299
1300
1301         for (i = 0; i < port->num_def_qps; i++) {
1302                 pr = &port->port_res[i];
1303                 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
1304                          "%s-queue%d", dev->name, i);
1305                 ret = ibmebus_request_irq(pr->eq->attr.ist1,
1306                                           ehea_recv_irq_handler,
1307                                           IRQF_DISABLED, pr->int_send_name,
1308                                           pr);
1309                 if (ret) {
1310                         netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
1311                                    i, pr->eq->attr.ist1);
1312                         goto out_free_req;
1313                 }
1314                 netif_info(port, ifup, dev,
1315                            "irq_handle 0x%X for function ehea_queue_int %d registered\n",
1316                            pr->eq->attr.ist1, i);
1317         }
1318 out:
1319         return ret;
1320
1321
1322 out_free_req:
1323         while (--i >= 0) {
1324                 u32 ist = port->port_res[i].eq->attr.ist1;
1325                 ibmebus_free_irq(ist, &port->port_res[i]);
1326         }
1327
1328 out_free_qpeq:
1329         ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1330         i = port->num_def_qps;
1331
1332         goto out;
1333
1334 }
1335
1336 static void ehea_free_interrupts(struct net_device *dev)
1337 {
1338         struct ehea_port *port = netdev_priv(dev);
1339         struct ehea_port_res *pr;
1340         int i;
1341
1342         /* send */
1343
1344         for (i = 0; i < port->num_def_qps; i++) {
1345                 pr = &port->port_res[i];
1346                 ibmebus_free_irq(pr->eq->attr.ist1, pr);
1347                 netif_info(port, intr, dev,
1348                            "free send irq for res %d with handle 0x%X\n",
1349                            i, pr->eq->attr.ist1);
1350         }
1351
1352         /* associated events */
1353         ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1354         netif_info(port, intr, dev,
1355                    "associated event interrupt for handle 0x%X freed\n",
1356                    port->qp_eq->attr.ist1);
1357 }
1358
1359 static int ehea_configure_port(struct ehea_port *port)
1360 {
1361         int ret, i;
1362         u64 hret, mask;
1363         struct hcp_ehea_port_cb0 *cb0;
1364
1365         ret = -ENOMEM;
1366         cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1367         if (!cb0)
1368                 goto out;
1369
1370         cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1371                      | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1372                      | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1373                      | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1374                      | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1375                                       PXLY_RC_VLAN_FILTER)
1376                      | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1377
1378         for (i = 0; i < port->num_mcs; i++)
1379                 if (use_mcs)
1380                         cb0->default_qpn_arr[i] =
1381                                 port->port_res[i].qp->init_attr.qp_nr;
1382                 else
1383                         cb0->default_qpn_arr[i] =
1384                                 port->port_res[0].qp->init_attr.qp_nr;
1385
1386         if (netif_msg_ifup(port))
1387                 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1388
1389         mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1390              | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1391
1392         hret = ehea_h_modify_ehea_port(port->adapter->handle,
1393                                        port->logical_port_id,
1394                                        H_PORT_CB0, mask, cb0);
1395         ret = -EIO;
1396         if (hret != H_SUCCESS)
1397                 goto out_free;
1398
1399         ret = 0;
1400
1401 out_free:
1402         free_page((unsigned long)cb0);
1403 out:
1404         return ret;
1405 }
1406
1407 static int ehea_gen_smrs(struct ehea_port_res *pr)
1408 {
1409         int ret;
1410         struct ehea_adapter *adapter = pr->port->adapter;
1411
1412         ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1413         if (ret)
1414                 goto out;
1415
1416         ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1417         if (ret)
1418                 goto out_free;
1419
1420         return 0;
1421
1422 out_free:
1423         ehea_rem_mr(&pr->send_mr);
1424 out:
1425         pr_err("Generating SMRS failed\n");
1426         return -EIO;
1427 }
1428
1429 static int ehea_rem_smrs(struct ehea_port_res *pr)
1430 {
1431         if ((ehea_rem_mr(&pr->send_mr)) ||
1432             (ehea_rem_mr(&pr->recv_mr)))
1433                 return -EIO;
1434         else
1435                 return 0;
1436 }
1437
1438 static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1439 {
1440         int arr_size = sizeof(void *) * max_q_entries;
1441
1442         q_skba->arr = vzalloc(arr_size);
1443         if (!q_skba->arr)
1444                 return -ENOMEM;
1445
1446         q_skba->len = max_q_entries;
1447         q_skba->index = 0;
1448         q_skba->os_skbs = 0;
1449
1450         return 0;
1451 }
1452
1453 static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1454                               struct port_res_cfg *pr_cfg, int queue_token)
1455 {
1456         struct ehea_adapter *adapter = port->adapter;
1457         enum ehea_eq_type eq_type = EHEA_EQ;
1458         struct ehea_qp_init_attr *init_attr = NULL;
1459         int ret = -EIO;
1460         u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
1461
1462         tx_bytes = pr->tx_bytes;
1463         tx_packets = pr->tx_packets;
1464         rx_bytes = pr->rx_bytes;
1465         rx_packets = pr->rx_packets;
1466
1467         memset(pr, 0, sizeof(struct ehea_port_res));
1468
1469         pr->tx_bytes = rx_bytes;
1470         pr->tx_packets = tx_packets;
1471         pr->rx_bytes = rx_bytes;
1472         pr->rx_packets = rx_packets;
1473
1474         pr->port = port;
1475
1476         pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1477         if (!pr->eq) {
1478                 pr_err("create_eq failed (eq)\n");
1479                 goto out_free;
1480         }
1481
1482         pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1483                                      pr->eq->fw_handle,
1484                                      port->logical_port_id);
1485         if (!pr->recv_cq) {
1486                 pr_err("create_cq failed (cq_recv)\n");
1487                 goto out_free;
1488         }
1489
1490         pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1491                                      pr->eq->fw_handle,
1492                                      port->logical_port_id);
1493         if (!pr->send_cq) {
1494                 pr_err("create_cq failed (cq_send)\n");
1495                 goto out_free;
1496         }
1497
1498         if (netif_msg_ifup(port))
1499                 pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
1500                         pr->send_cq->attr.act_nr_of_cqes,
1501                         pr->recv_cq->attr.act_nr_of_cqes);
1502
1503         init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1504         if (!init_attr) {
1505                 ret = -ENOMEM;
1506                 pr_err("no mem for ehea_qp_init_attr\n");
1507                 goto out_free;
1508         }
1509
1510         init_attr->low_lat_rq1 = 1;
1511         init_attr->signalingtype = 1;   /* generate CQE if specified in WQE */
1512         init_attr->rq_count = 3;
1513         init_attr->qp_token = queue_token;
1514         init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1515         init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1516         init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1517         init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1518         init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1519         init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1520         init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1521         init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1522         init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1523         init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1524         init_attr->port_nr = port->logical_port_id;
1525         init_attr->send_cq_handle = pr->send_cq->fw_handle;
1526         init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1527         init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1528
1529         pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1530         if (!pr->qp) {
1531                 pr_err("create_qp failed\n");
1532                 ret = -EIO;
1533                 goto out_free;
1534         }
1535
1536         if (netif_msg_ifup(port))
1537                 pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
1538                         init_attr->qp_nr,
1539                         init_attr->act_nr_send_wqes,
1540                         init_attr->act_nr_rwqes_rq1,
1541                         init_attr->act_nr_rwqes_rq2,
1542                         init_attr->act_nr_rwqes_rq3);
1543
1544         pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1545
1546         ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
1547         ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1548         ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1549         ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1550         if (ret)
1551                 goto out_free;
1552
1553         pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1554         if (ehea_gen_smrs(pr) != 0) {
1555                 ret = -EIO;
1556                 goto out_free;
1557         }
1558
1559         atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1560
1561         kfree(init_attr);
1562
1563         netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
1564
1565         ret = 0;
1566         goto out;
1567
1568 out_free:
1569         kfree(init_attr);
1570         vfree(pr->sq_skba.arr);
1571         vfree(pr->rq1_skba.arr);
1572         vfree(pr->rq2_skba.arr);
1573         vfree(pr->rq3_skba.arr);
1574         ehea_destroy_qp(pr->qp);
1575         ehea_destroy_cq(pr->send_cq);
1576         ehea_destroy_cq(pr->recv_cq);
1577         ehea_destroy_eq(pr->eq);
1578 out:
1579         return ret;
1580 }
1581
1582 static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1583 {
1584         int ret, i;
1585
1586         if (pr->qp)
1587                 netif_napi_del(&pr->napi);
1588
1589         ret = ehea_destroy_qp(pr->qp);
1590
1591         if (!ret) {
1592                 ehea_destroy_cq(pr->send_cq);
1593                 ehea_destroy_cq(pr->recv_cq);
1594                 ehea_destroy_eq(pr->eq);
1595
1596                 for (i = 0; i < pr->rq1_skba.len; i++)
1597                         if (pr->rq1_skba.arr[i])
1598                                 dev_kfree_skb(pr->rq1_skba.arr[i]);
1599
1600                 for (i = 0; i < pr->rq2_skba.len; i++)
1601                         if (pr->rq2_skba.arr[i])
1602                                 dev_kfree_skb(pr->rq2_skba.arr[i]);
1603
1604                 for (i = 0; i < pr->rq3_skba.len; i++)
1605                         if (pr->rq3_skba.arr[i])
1606                                 dev_kfree_skb(pr->rq3_skba.arr[i]);
1607
1608                 for (i = 0; i < pr->sq_skba.len; i++)
1609                         if (pr->sq_skba.arr[i])
1610                                 dev_kfree_skb(pr->sq_skba.arr[i]);
1611
1612                 vfree(pr->rq1_skba.arr);
1613                 vfree(pr->rq2_skba.arr);
1614                 vfree(pr->rq3_skba.arr);
1615                 vfree(pr->sq_skba.arr);
1616                 ret = ehea_rem_smrs(pr);
1617         }
1618         return ret;
1619 }
1620
1621 static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
1622                                   u32 lkey)
1623 {
1624         int skb_data_size = skb_headlen(skb);
1625         u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1626         struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1627         unsigned int immediate_len = SWQE2_MAX_IMM;
1628
1629         swqe->descriptors = 0;
1630
1631         if (skb_is_gso(skb)) {
1632                 swqe->tx_control |= EHEA_SWQE_TSO;
1633                 swqe->mss = skb_shinfo(skb)->gso_size;
1634                 /*
1635                  * For TSO packets we only copy the headers into the
1636                  * immediate area.
1637                  */
1638                 immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1639         }
1640
1641         if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
1642                 skb_copy_from_linear_data(skb, imm_data, immediate_len);
1643                 swqe->immediate_data_length = immediate_len;
1644
1645                 if (skb_data_size > immediate_len) {
1646                         sg1entry->l_key = lkey;
1647                         sg1entry->len = skb_data_size - immediate_len;
1648                         sg1entry->vaddr =
1649                                 ehea_map_vaddr(skb->data + immediate_len);
1650                         swqe->descriptors++;
1651                 }
1652         } else {
1653                 skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1654                 swqe->immediate_data_length = skb_data_size;
1655         }
1656 }
1657
1658 static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1659                                     struct ehea_swqe *swqe, u32 lkey)
1660 {
1661         struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1662         skb_frag_t *frag;
1663         int nfrags, sg1entry_contains_frag_data, i;
1664
1665         nfrags = skb_shinfo(skb)->nr_frags;
1666         sg1entry = &swqe->u.immdata_desc.sg_entry;
1667         sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
1668         sg1entry_contains_frag_data = 0;
1669
1670         write_swqe2_immediate(skb, swqe, lkey);
1671
1672         /* write descriptors */
1673         if (nfrags > 0) {
1674                 if (swqe->descriptors == 0) {
1675                         /* sg1entry not yet used */
1676                         frag = &skb_shinfo(skb)->frags[0];
1677
1678                         /* copy sg1entry data */
1679                         sg1entry->l_key = lkey;
1680                         sg1entry->len = skb_frag_size(frag);
1681                         sg1entry->vaddr =
1682                                 ehea_map_vaddr(skb_frag_address(frag));
1683                         swqe->descriptors++;
1684                         sg1entry_contains_frag_data = 1;
1685                 }
1686
1687                 for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1688
1689                         frag = &skb_shinfo(skb)->frags[i];
1690                         sgentry = &sg_list[i - sg1entry_contains_frag_data];
1691
1692                         sgentry->l_key = lkey;
1693                         sgentry->len = skb_frag_size(frag);
1694                         sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag));
1695                         swqe->descriptors++;
1696                 }
1697         }
1698 }
1699
1700 static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1701 {
1702         int ret = 0;
1703         u64 hret;
1704         u8 reg_type;
1705
1706         /* De/Register untagged packets */
1707         reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1708         hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1709                                      port->logical_port_id,
1710                                      reg_type, port->mac_addr, 0, hcallid);
1711         if (hret != H_SUCCESS) {
1712                 pr_err("%sregistering bc address failed (tagged)\n",
1713                        hcallid == H_REG_BCMC ? "" : "de");
1714                 ret = -EIO;
1715                 goto out_herr;
1716         }
1717
1718         /* De/Register VLAN packets */
1719         reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1720         hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1721                                      port->logical_port_id,
1722                                      reg_type, port->mac_addr, 0, hcallid);
1723         if (hret != H_SUCCESS) {
1724                 pr_err("%sregistering bc address failed (vlan)\n",
1725                        hcallid == H_REG_BCMC ? "" : "de");
1726                 ret = -EIO;
1727         }
1728 out_herr:
1729         return ret;
1730 }
1731
1732 static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1733 {
1734         struct ehea_port *port = netdev_priv(dev);
1735         struct sockaddr *mac_addr = sa;
1736         struct hcp_ehea_port_cb0 *cb0;
1737         int ret;
1738         u64 hret;
1739
1740         if (!is_valid_ether_addr(mac_addr->sa_data)) {
1741                 ret = -EADDRNOTAVAIL;
1742                 goto out;
1743         }
1744
1745         cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1746         if (!cb0) {
1747                 pr_err("no mem for cb0\n");
1748                 ret = -ENOMEM;
1749                 goto out;
1750         }
1751
1752         memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1753
1754         cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1755
1756         hret = ehea_h_modify_ehea_port(port->adapter->handle,
1757                                        port->logical_port_id, H_PORT_CB0,
1758                                        EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1759         if (hret != H_SUCCESS) {
1760                 ret = -EIO;
1761                 goto out_free;
1762         }
1763
1764         memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1765
1766         /* Deregister old MAC in pHYP */
1767         if (port->state == EHEA_PORT_UP) {
1768                 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1769                 if (ret)
1770                         goto out_upregs;
1771         }
1772
1773         port->mac_addr = cb0->port_mac_addr << 16;
1774
1775         /* Register new MAC in pHYP */
1776         if (port->state == EHEA_PORT_UP) {
1777                 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1778                 if (ret)
1779                         goto out_upregs;
1780         }
1781
1782         ret = 0;
1783
1784 out_upregs:
1785         ehea_update_bcmc_registrations();
1786 out_free:
1787         free_page((unsigned long)cb0);
1788 out:
1789         return ret;
1790 }
1791
1792 static void ehea_promiscuous_error(u64 hret, int enable)
1793 {
1794         if (hret == H_AUTHORITY)
1795                 pr_info("Hypervisor denied %sabling promiscuous mode\n",
1796                         enable == 1 ? "en" : "dis");
1797         else
1798                 pr_err("failed %sabling promiscuous mode\n",
1799                        enable == 1 ? "en" : "dis");
1800 }
1801
1802 static void ehea_promiscuous(struct net_device *dev, int enable)
1803 {
1804         struct ehea_port *port = netdev_priv(dev);
1805         struct hcp_ehea_port_cb7 *cb7;
1806         u64 hret;
1807
1808         if (enable == port->promisc)
1809                 return;
1810
1811         cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
1812         if (!cb7) {
1813                 pr_err("no mem for cb7\n");
1814                 goto out;
1815         }
1816
1817         /* Modify Pxs_DUCQPN in CB7 */
1818         cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1819
1820         hret = ehea_h_modify_ehea_port(port->adapter->handle,
1821                                        port->logical_port_id,
1822                                        H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1823         if (hret) {
1824                 ehea_promiscuous_error(hret, enable);
1825                 goto out;
1826         }
1827
1828         port->promisc = enable;
1829 out:
1830         free_page((unsigned long)cb7);
1831 }
1832
1833 static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1834                                      u32 hcallid)
1835 {
1836         u64 hret;
1837         u8 reg_type;
1838
1839         reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1840                  | EHEA_BCMC_UNTAGGED;
1841
1842         hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1843                                      port->logical_port_id,
1844                                      reg_type, mc_mac_addr, 0, hcallid);
1845         if (hret)
1846                 goto out;
1847
1848         reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1849                  | EHEA_BCMC_VLANID_ALL;
1850
1851         hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1852                                      port->logical_port_id,
1853                                      reg_type, mc_mac_addr, 0, hcallid);
1854 out:
1855         return hret;
1856 }
1857
1858 static int ehea_drop_multicast_list(struct net_device *dev)
1859 {
1860         struct ehea_port *port = netdev_priv(dev);
1861         struct ehea_mc_list *mc_entry = port->mc_list;
1862         struct list_head *pos;
1863         struct list_head *temp;
1864         int ret = 0;
1865         u64 hret;
1866
1867         list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1868                 mc_entry = list_entry(pos, struct ehea_mc_list, list);
1869
1870                 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1871                                                  H_DEREG_BCMC);
1872                 if (hret) {
1873                         pr_err("failed deregistering mcast MAC\n");
1874                         ret = -EIO;
1875                 }
1876
1877                 list_del(pos);
1878                 kfree(mc_entry);
1879         }
1880         return ret;
1881 }
1882
1883 static void ehea_allmulti(struct net_device *dev, int enable)
1884 {
1885         struct ehea_port *port = netdev_priv(dev);
1886         u64 hret;
1887
1888         if (!port->allmulti) {
1889                 if (enable) {
1890                         /* Enable ALLMULTI */
1891                         ehea_drop_multicast_list(dev);
1892                         hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1893                         if (!hret)
1894                                 port->allmulti = 1;
1895                         else
1896                                 netdev_err(dev,
1897                                            "failed enabling IFF_ALLMULTI\n");
1898                 }
1899         } else
1900                 if (!enable) {
1901                         /* Disable ALLMULTI */
1902                         hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1903                         if (!hret)
1904                                 port->allmulti = 0;
1905                         else
1906                                 netdev_err(dev,
1907                                            "failed disabling IFF_ALLMULTI\n");
1908                 }
1909 }
1910
1911 static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
1912 {
1913         struct ehea_mc_list *ehea_mcl_entry;
1914         u64 hret;
1915
1916         ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
1917         if (!ehea_mcl_entry) {
1918                 pr_err("no mem for mcl_entry\n");
1919                 return;
1920         }
1921
1922         INIT_LIST_HEAD(&ehea_mcl_entry->list);
1923
1924         memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
1925
1926         hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
1927                                          H_REG_BCMC);
1928         if (!hret)
1929                 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
1930         else {
1931                 pr_err("failed registering mcast MAC\n");
1932                 kfree(ehea_mcl_entry);
1933         }
1934 }
1935
1936 static void ehea_set_multicast_list(struct net_device *dev)
1937 {
1938         struct ehea_port *port = netdev_priv(dev);
1939         struct netdev_hw_addr *ha;
1940         int ret;
1941
1942         if (port->promisc) {
1943                 ehea_promiscuous(dev, 1);
1944                 return;
1945         }
1946         ehea_promiscuous(dev, 0);
1947
1948         if (dev->flags & IFF_ALLMULTI) {
1949                 ehea_allmulti(dev, 1);
1950                 goto out;
1951         }
1952         ehea_allmulti(dev, 0);
1953
1954         if (!netdev_mc_empty(dev)) {
1955                 ret = ehea_drop_multicast_list(dev);
1956                 if (ret) {
1957                         /* Dropping the current multicast list failed.
1958                          * Enabling ALL_MULTI is the best we can do.
1959                          */
1960                         ehea_allmulti(dev, 1);
1961                 }
1962
1963                 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
1964                         pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
1965                                 port->adapter->max_mc_mac);
1966                         goto out;
1967                 }
1968
1969                 netdev_for_each_mc_addr(ha, dev)
1970                         ehea_add_multicast_entry(port, ha->addr);
1971
1972         }
1973 out:
1974         ehea_update_bcmc_registrations();
1975 }
1976
1977 static int ehea_change_mtu(struct net_device *dev, int new_mtu)
1978 {
1979         if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
1980                 return -EINVAL;
1981         dev->mtu = new_mtu;
1982         return 0;
1983 }
1984
1985 static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
1986 {
1987         swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
1988
1989         if (skb->protocol != htons(ETH_P_IP))
1990                 return;
1991
1992         if (skb->ip_summed == CHECKSUM_PARTIAL)
1993                 swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM;
1994
1995         swqe->ip_start = skb_network_offset(skb);
1996         swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1;
1997
1998         switch (ip_hdr(skb)->protocol) {
1999         case IPPROTO_UDP:
2000                 if (skb->ip_summed == CHECKSUM_PARTIAL)
2001                         swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
2002
2003                 swqe->tcp_offset = swqe->ip_end + 1 +
2004                                    offsetof(struct udphdr, check);
2005                 break;
2006
2007         case IPPROTO_TCP:
2008                 if (skb->ip_summed == CHECKSUM_PARTIAL)
2009                         swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
2010
2011                 swqe->tcp_offset = swqe->ip_end + 1 +
2012                                    offsetof(struct tcphdr, check);
2013                 break;
2014         }
2015 }
2016
2017 static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
2018                        struct ehea_swqe *swqe, u32 lkey)
2019 {
2020         swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT;
2021
2022         xmit_common(skb, swqe);
2023
2024         write_swqe2_data(skb, dev, swqe, lkey);
2025 }
2026
2027 static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2028                        struct ehea_swqe *swqe)
2029 {
2030         u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
2031
2032         xmit_common(skb, swqe);
2033
2034         if (!skb->data_len)
2035                 skb_copy_from_linear_data(skb, imm_data, skb->len);
2036         else
2037                 skb_copy_bits(skb, 0, imm_data, skb->len);
2038
2039         swqe->immediate_data_length = skb->len;
2040         dev_kfree_skb(skb);
2041 }
2042
2043 static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2044 {
2045         struct ehea_port *port = netdev_priv(dev);
2046         struct ehea_swqe *swqe;
2047         u32 lkey;
2048         int swqe_index;
2049         struct ehea_port_res *pr;
2050         struct netdev_queue *txq;
2051
2052         pr = &port->port_res[skb_get_queue_mapping(skb)];
2053         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2054
2055         swqe = ehea_get_swqe(pr->qp, &swqe_index);
2056         memset(swqe, 0, SWQE_HEADER_SIZE);
2057         atomic_dec(&pr->swqe_avail);
2058
2059         if (vlan_tx_tag_present(skb)) {
2060                 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2061                 swqe->vlan_tag = vlan_tx_tag_get(skb);
2062         }
2063
2064         pr->tx_packets++;
2065         pr->tx_bytes += skb->len;
2066
2067         if (skb->len <= SWQE3_MAX_IMM) {
2068                 u32 sig_iv = port->sig_comp_iv;
2069                 u32 swqe_num = pr->swqe_id_counter;
2070                 ehea_xmit3(skb, dev, swqe);
2071                 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
2072                         | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
2073                 if (pr->swqe_ll_count >= (sig_iv - 1)) {
2074                         swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
2075                                                       sig_iv);
2076                         swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2077                         pr->swqe_ll_count = 0;
2078                 } else
2079                         pr->swqe_ll_count += 1;
2080         } else {
2081                 swqe->wr_id =
2082                         EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
2083                       | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
2084                       | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
2085                       | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
2086                 pr->sq_skba.arr[pr->sq_skba.index] = skb;
2087
2088                 pr->sq_skba.index++;
2089                 pr->sq_skba.index &= (pr->sq_skba.len - 1);
2090
2091                 lkey = pr->send_mr.lkey;
2092                 ehea_xmit2(skb, dev, swqe, lkey);
2093                 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2094         }
2095         pr->swqe_id_counter += 1;
2096
2097         netif_info(port, tx_queued, dev,
2098                    "post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
2099         if (netif_msg_tx_queued(port))
2100                 ehea_dump(swqe, 512, "swqe");
2101
2102         if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2103                 netif_tx_stop_queue(txq);
2104                 swqe->tx_control |= EHEA_SWQE_PURGE;
2105         }
2106
2107         ehea_post_swqe(pr->qp, swqe);
2108
2109         if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2110                 pr->p_stats.queue_stopped++;
2111                 netif_tx_stop_queue(txq);
2112         }
2113
2114         return NETDEV_TX_OK;
2115 }
2116
2117 static int ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2118 {
2119         struct ehea_port *port = netdev_priv(dev);
2120         struct ehea_adapter *adapter = port->adapter;
2121         struct hcp_ehea_port_cb1 *cb1;
2122         int index;
2123         u64 hret;
2124         int err = 0;
2125
2126         cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2127         if (!cb1) {
2128                 pr_err("no mem for cb1\n");
2129                 err = -ENOMEM;
2130                 goto out;
2131         }
2132
2133         hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2134                                       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2135         if (hret != H_SUCCESS) {
2136                 pr_err("query_ehea_port failed\n");
2137                 err = -EINVAL;
2138                 goto out;
2139         }
2140
2141         index = (vid / 64);
2142         cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
2143
2144         hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2145                                        H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2146         if (hret != H_SUCCESS) {
2147                 pr_err("modify_ehea_port failed\n");
2148                 err = -EINVAL;
2149         }
2150 out:
2151         free_page((unsigned long)cb1);
2152         return err;
2153 }
2154
2155 static int ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2156 {
2157         struct ehea_port *port = netdev_priv(dev);
2158         struct ehea_adapter *adapter = port->adapter;
2159         struct hcp_ehea_port_cb1 *cb1;
2160         int index;
2161         u64 hret;
2162         int err = 0;
2163
2164         cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2165         if (!cb1) {
2166                 pr_err("no mem for cb1\n");
2167                 err = -ENOMEM;
2168                 goto out;
2169         }
2170
2171         hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2172                                       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2173         if (hret != H_SUCCESS) {
2174                 pr_err("query_ehea_port failed\n");
2175                 err = -EINVAL;
2176                 goto out;
2177         }
2178
2179         index = (vid / 64);
2180         cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
2181
2182         hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2183                                        H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2184         if (hret != H_SUCCESS) {
2185                 pr_err("modify_ehea_port failed\n");
2186                 err = -EINVAL;
2187         }
2188 out:
2189         free_page((unsigned long)cb1);
2190         return err;
2191 }
2192
2193 static int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2194 {
2195         int ret = -EIO;
2196         u64 hret;
2197         u16 dummy16 = 0;
2198         u64 dummy64 = 0;
2199         struct hcp_modify_qp_cb0 *cb0;
2200
2201         cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2202         if (!cb0) {
2203                 ret = -ENOMEM;
2204                 goto out;
2205         }
2206
2207         hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2208                                     EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2209         if (hret != H_SUCCESS) {
2210                 pr_err("query_ehea_qp failed (1)\n");
2211                 goto out;
2212         }
2213
2214         cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2215         hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2216                                      EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2217                                      &dummy64, &dummy64, &dummy16, &dummy16);
2218         if (hret != H_SUCCESS) {
2219                 pr_err("modify_ehea_qp failed (1)\n");
2220                 goto out;
2221         }
2222
2223         hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2224                                     EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2225         if (hret != H_SUCCESS) {
2226                 pr_err("query_ehea_qp failed (2)\n");
2227                 goto out;
2228         }
2229
2230         cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2231         hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2232                                      EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2233                                      &dummy64, &dummy64, &dummy16, &dummy16);
2234         if (hret != H_SUCCESS) {
2235                 pr_err("modify_ehea_qp failed (2)\n");
2236                 goto out;
2237         }
2238
2239         hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2240                                     EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2241         if (hret != H_SUCCESS) {
2242                 pr_err("query_ehea_qp failed (3)\n");
2243                 goto out;
2244         }
2245
2246         cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2247         hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2248                                      EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2249                                      &dummy64, &dummy64, &dummy16, &dummy16);
2250         if (hret != H_SUCCESS) {
2251                 pr_err("modify_ehea_qp failed (3)\n");
2252                 goto out;
2253         }
2254
2255         hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2256                                     EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2257         if (hret != H_SUCCESS) {
2258                 pr_err("query_ehea_qp failed (4)\n");
2259                 goto out;
2260         }
2261
2262         ret = 0;
2263 out:
2264         free_page((unsigned long)cb0);
2265         return ret;
2266 }
2267
2268 static int ehea_port_res_setup(struct ehea_port *port, int def_qps)
2269 {
2270         int ret, i;
2271         struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2272         enum ehea_eq_type eq_type = EHEA_EQ;
2273
2274         port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2275                                    EHEA_MAX_ENTRIES_EQ, 1);
2276         if (!port->qp_eq) {
2277                 ret = -EINVAL;
2278                 pr_err("ehea_create_eq failed (qp_eq)\n");
2279                 goto out_kill_eq;
2280         }
2281
2282         pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2283         pr_cfg.max_entries_scq = sq_entries * 2;
2284         pr_cfg.max_entries_sq = sq_entries;
2285         pr_cfg.max_entries_rq1 = rq1_entries;
2286         pr_cfg.max_entries_rq2 = rq2_entries;
2287         pr_cfg.max_entries_rq3 = rq3_entries;
2288
2289         pr_cfg_small_rx.max_entries_rcq = 1;
2290         pr_cfg_small_rx.max_entries_scq = sq_entries;
2291         pr_cfg_small_rx.max_entries_sq = sq_entries;
2292         pr_cfg_small_rx.max_entries_rq1 = 1;
2293         pr_cfg_small_rx.max_entries_rq2 = 1;
2294         pr_cfg_small_rx.max_entries_rq3 = 1;
2295
2296         for (i = 0; i < def_qps; i++) {
2297                 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2298                 if (ret)
2299                         goto out_clean_pr;
2300         }
2301         for (i = def_qps; i < def_qps; i++) {
2302                 ret = ehea_init_port_res(port, &port->port_res[i],
2303                                          &pr_cfg_small_rx, i);
2304                 if (ret)
2305                         goto out_clean_pr;
2306         }
2307
2308         return 0;
2309
2310 out_clean_pr:
2311         while (--i >= 0)
2312                 ehea_clean_portres(port, &port->port_res[i]);
2313
2314 out_kill_eq:
2315         ehea_destroy_eq(port->qp_eq);
2316         return ret;
2317 }
2318
2319 static int ehea_clean_all_portres(struct ehea_port *port)
2320 {
2321         int ret = 0;
2322         int i;
2323
2324         for (i = 0; i < port->num_def_qps; i++)
2325                 ret |= ehea_clean_portres(port, &port->port_res[i]);
2326
2327         ret |= ehea_destroy_eq(port->qp_eq);
2328
2329         return ret;
2330 }
2331
2332 static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
2333 {
2334         if (adapter->active_ports)
2335                 return;
2336
2337         ehea_rem_mr(&adapter->mr);
2338 }
2339
2340 static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
2341 {
2342         if (adapter->active_ports)
2343                 return 0;
2344
2345         return ehea_reg_kernel_mr(adapter, &adapter->mr);
2346 }
2347
2348 static int ehea_up(struct net_device *dev)
2349 {
2350         int ret, i;
2351         struct ehea_port *port = netdev_priv(dev);
2352
2353         if (port->state == EHEA_PORT_UP)
2354                 return 0;
2355
2356         ret = ehea_port_res_setup(port, port->num_def_qps);
2357         if (ret) {
2358                 netdev_err(dev, "port_res_failed\n");
2359                 goto out;
2360         }
2361
2362         /* Set default QP for this port */
2363         ret = ehea_configure_port(port);
2364         if (ret) {
2365                 netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
2366                 goto out_clean_pr;
2367         }
2368
2369         ret = ehea_reg_interrupts(dev);
2370         if (ret) {
2371                 netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
2372                 goto out_clean_pr;
2373         }
2374
2375         for (i = 0; i < port->num_def_qps; i++) {
2376                 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2377                 if (ret) {
2378                         netdev_err(dev, "activate_qp failed\n");
2379                         goto out_free_irqs;
2380                 }
2381         }
2382
2383         for (i = 0; i < port->num_def_qps; i++) {
2384                 ret = ehea_fill_port_res(&port->port_res[i]);
2385                 if (ret) {
2386                         netdev_err(dev, "out_free_irqs\n");
2387                         goto out_free_irqs;
2388                 }
2389         }
2390
2391         ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2392         if (ret) {
2393                 ret = -EIO;
2394                 goto out_free_irqs;
2395         }
2396
2397         port->state = EHEA_PORT_UP;
2398
2399         ret = 0;
2400         goto out;
2401
2402 out_free_irqs:
2403         ehea_free_interrupts(dev);
2404
2405 out_clean_pr:
2406         ehea_clean_all_portres(port);
2407 out:
2408         if (ret)
2409                 netdev_info(dev, "Failed starting. ret=%i\n", ret);
2410
2411         ehea_update_bcmc_registrations();
2412         ehea_update_firmware_handles();
2413
2414         return ret;
2415 }
2416
2417 static void port_napi_disable(struct ehea_port *port)
2418 {
2419         int i;
2420
2421         for (i = 0; i < port->num_def_qps; i++)
2422                 napi_disable(&port->port_res[i].napi);
2423 }
2424
2425 static void port_napi_enable(struct ehea_port *port)
2426 {
2427         int i;
2428
2429         for (i = 0; i < port->num_def_qps; i++)
2430                 napi_enable(&port->port_res[i].napi);
2431 }
2432
2433 static int ehea_open(struct net_device *dev)
2434 {
2435         int ret;
2436         struct ehea_port *port = netdev_priv(dev);
2437
2438         mutex_lock(&port->port_lock);
2439
2440         netif_info(port, ifup, dev, "enabling port\n");
2441
2442         ret = ehea_up(dev);
2443         if (!ret) {
2444                 port_napi_enable(port);
2445                 netif_tx_start_all_queues(dev);
2446         }
2447
2448         mutex_unlock(&port->port_lock);
2449         schedule_delayed_work(&port->stats_work,
2450                               round_jiffies_relative(msecs_to_jiffies(1000)));
2451
2452         return ret;
2453 }
2454
2455 static int ehea_down(struct net_device *dev)
2456 {
2457         int ret;
2458         struct ehea_port *port = netdev_priv(dev);
2459
2460         if (port->state == EHEA_PORT_DOWN)
2461                 return 0;
2462
2463         ehea_drop_multicast_list(dev);
2464         ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2465
2466         ehea_free_interrupts(dev);
2467
2468         port->state = EHEA_PORT_DOWN;
2469
2470         ehea_update_bcmc_registrations();
2471
2472         ret = ehea_clean_all_portres(port);
2473         if (ret)
2474                 netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
2475
2476         ehea_update_firmware_handles();
2477
2478         return ret;
2479 }
2480
2481 static int ehea_stop(struct net_device *dev)
2482 {
2483         int ret;
2484         struct ehea_port *port = netdev_priv(dev);
2485
2486         netif_info(port, ifdown, dev, "disabling port\n");
2487
2488         set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2489         cancel_work_sync(&port->reset_task);
2490         cancel_delayed_work_sync(&port->stats_work);
2491         mutex_lock(&port->port_lock);
2492         netif_tx_stop_all_queues(dev);
2493         port_napi_disable(port);
2494         ret = ehea_down(dev);
2495         mutex_unlock(&port->port_lock);
2496         clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2497         return ret;
2498 }
2499
2500 static void ehea_purge_sq(struct ehea_qp *orig_qp)
2501 {
2502         struct ehea_qp qp = *orig_qp;
2503         struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2504         struct ehea_swqe *swqe;
2505         int wqe_index;
2506         int i;
2507
2508         for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2509                 swqe = ehea_get_swqe(&qp, &wqe_index);
2510                 swqe->tx_control |= EHEA_SWQE_PURGE;
2511         }
2512 }
2513
2514 static void ehea_flush_sq(struct ehea_port *port)
2515 {
2516         int i;
2517
2518         for (i = 0; i < port->num_def_qps; i++) {
2519                 struct ehea_port_res *pr = &port->port_res[i];
2520                 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
2521                 int ret;
2522
2523                 ret = wait_event_timeout(port->swqe_avail_wq,
2524                          atomic_read(&pr->swqe_avail) >= swqe_max,
2525                          msecs_to_jiffies(100));
2526
2527                 if (!ret) {
2528                         pr_err("WARNING: sq not flushed completely\n");
2529                         break;
2530                 }
2531         }
2532 }
2533
2534 static int ehea_stop_qps(struct net_device *dev)
2535 {
2536         struct ehea_port *port = netdev_priv(dev);
2537         struct ehea_adapter *adapter = port->adapter;
2538         struct hcp_modify_qp_cb0 *cb0;
2539         int ret = -EIO;
2540         int dret;
2541         int i;
2542         u64 hret;
2543         u64 dummy64 = 0;
2544         u16 dummy16 = 0;
2545
2546         cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2547         if (!cb0) {
2548                 ret = -ENOMEM;
2549                 goto out;
2550         }
2551
2552         for (i = 0; i < (port->num_def_qps); i++) {
2553                 struct ehea_port_res *pr =  &port->port_res[i];
2554                 struct ehea_qp *qp = pr->qp;
2555
2556                 /* Purge send queue */
2557                 ehea_purge_sq(qp);
2558
2559                 /* Disable queue pair */
2560                 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2561                                             EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2562                                             cb0);
2563                 if (hret != H_SUCCESS) {
2564                         pr_err("query_ehea_qp failed (1)\n");
2565                         goto out;
2566                 }
2567
2568                 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2569                 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2570
2571                 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2572                                              EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2573                                                             1), cb0, &dummy64,
2574                                              &dummy64, &dummy16, &dummy16);
2575                 if (hret != H_SUCCESS) {
2576                         pr_err("modify_ehea_qp failed (1)\n");
2577                         goto out;
2578                 }
2579
2580                 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2581                                             EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2582                                             cb0);
2583                 if (hret != H_SUCCESS) {
2584                         pr_err("query_ehea_qp failed (2)\n");
2585                         goto out;
2586                 }
2587
2588                 /* deregister shared memory regions */
2589                 dret = ehea_rem_smrs(pr);
2590                 if (dret) {
2591                         pr_err("unreg shared memory region failed\n");
2592                         goto out;
2593                 }
2594         }
2595
2596         ret = 0;
2597 out:
2598         free_page((unsigned long)cb0);
2599
2600         return ret;
2601 }
2602
2603 static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2604 {
2605         struct ehea_qp qp = *orig_qp;
2606         struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2607         struct ehea_rwqe *rwqe;
2608         struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2609         struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2610         struct sk_buff *skb;
2611         u32 lkey = pr->recv_mr.lkey;
2612
2613
2614         int i;
2615         int index;
2616
2617         for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2618                 rwqe = ehea_get_next_rwqe(&qp, 2);
2619                 rwqe->sg_list[0].l_key = lkey;
2620                 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2621                 skb = skba_rq2[index];
2622                 if (skb)
2623                         rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2624         }
2625
2626         for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2627                 rwqe = ehea_get_next_rwqe(&qp, 3);
2628                 rwqe->sg_list[0].l_key = lkey;
2629                 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2630                 skb = skba_rq3[index];
2631                 if (skb)
2632                         rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2633         }
2634 }
2635
2636 static int ehea_restart_qps(struct net_device *dev)
2637 {
2638         struct ehea_port *port = netdev_priv(dev);
2639         struct ehea_adapter *adapter = port->adapter;
2640         int ret = 0;
2641         int i;
2642
2643         struct hcp_modify_qp_cb0 *cb0;
2644         u64 hret;
2645         u64 dummy64 = 0;
2646         u16 dummy16 = 0;
2647
2648         cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2649         if (!cb0) {
2650                 ret = -ENOMEM;
2651                 goto out;
2652         }
2653
2654         for (i = 0; i < (port->num_def_qps); i++) {
2655                 struct ehea_port_res *pr =  &port->port_res[i];
2656                 struct ehea_qp *qp = pr->qp;
2657
2658                 ret = ehea_gen_smrs(pr);
2659                 if (ret) {
2660                         netdev_err(dev, "creation of shared memory regions failed\n");
2661                         goto out;
2662                 }
2663
2664                 ehea_update_rqs(qp, pr);
2665
2666                 /* Enable queue pair */
2667                 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2668                                             EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2669                                             cb0);
2670                 if (hret != H_SUCCESS) {
2671                         netdev_err(dev, "query_ehea_qp failed (1)\n");
2672                         goto out;
2673                 }
2674
2675                 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2676                 cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2677
2678                 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2679                                              EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2680                                                             1), cb0, &dummy64,
2681                                              &dummy64, &dummy16, &dummy16);
2682                 if (hret != H_SUCCESS) {
2683                         netdev_err(dev, "modify_ehea_qp failed (1)\n");
2684                         goto out;
2685                 }
2686
2687                 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2688                                             EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2689                                             cb0);
2690                 if (hret != H_SUCCESS) {
2691                         netdev_err(dev, "query_ehea_qp failed (2)\n");
2692                         goto out;
2693                 }
2694
2695                 /* refill entire queue */
2696                 ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2697                 ehea_refill_rq2(pr, 0);
2698                 ehea_refill_rq3(pr, 0);
2699         }
2700 out:
2701         free_page((unsigned long)cb0);
2702
2703         return ret;
2704 }
2705
2706 static void ehea_reset_port(struct work_struct *work)
2707 {
2708         int ret;
2709         struct ehea_port *port =
2710                 container_of(work, struct ehea_port, reset_task);
2711         struct net_device *dev = port->netdev;
2712
2713         mutex_lock(&dlpar_mem_lock);
2714         port->resets++;
2715         mutex_lock(&port->port_lock);
2716         netif_tx_disable(dev);
2717
2718         port_napi_disable(port);
2719
2720         ehea_down(dev);
2721
2722         ret = ehea_up(dev);
2723         if (ret)
2724                 goto out;
2725
2726         ehea_set_multicast_list(dev);
2727
2728         netif_info(port, timer, dev, "reset successful\n");
2729
2730         port_napi_enable(port);
2731
2732         netif_tx_wake_all_queues(dev);
2733 out:
2734         mutex_unlock(&port->port_lock);
2735         mutex_unlock(&dlpar_mem_lock);
2736 }
2737
2738 static void ehea_rereg_mrs(void)
2739 {
2740         int ret, i;
2741         struct ehea_adapter *adapter;
2742
2743         pr_info("LPAR memory changed - re-initializing driver\n");
2744
2745         list_for_each_entry(adapter, &adapter_list, list)
2746                 if (adapter->active_ports) {
2747                         /* Shutdown all ports */
2748                         for (i = 0; i < EHEA_MAX_PORTS; i++) {
2749                                 struct ehea_port *port = adapter->port[i];
2750                                 struct net_device *dev;
2751
2752                                 if (!port)
2753                                         continue;
2754
2755                                 dev = port->netdev;
2756
2757                                 if (dev->flags & IFF_UP) {
2758                                         mutex_lock(&port->port_lock);
2759                                         netif_tx_disable(dev);
2760                                         ehea_flush_sq(port);
2761                                         ret = ehea_stop_qps(dev);
2762                                         if (ret) {
2763                                                 mutex_unlock(&port->port_lock);
2764                                                 goto out;
2765                                         }
2766                                         port_napi_disable(port);
2767                                         mutex_unlock(&port->port_lock);
2768                                 }
2769                                 reset_sq_restart_flag(port);
2770                         }
2771
2772                         /* Unregister old memory region */
2773                         ret = ehea_rem_mr(&adapter->mr);
2774                         if (ret) {
2775                                 pr_err("unregister MR failed - driver inoperable!\n");
2776                                 goto out;
2777                         }
2778                 }
2779
2780         clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2781
2782         list_for_each_entry(adapter, &adapter_list, list)
2783                 if (adapter->active_ports) {
2784                         /* Register new memory region */
2785                         ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
2786                         if (ret) {
2787                                 pr_err("register MR failed - driver inoperable!\n");
2788                                 goto out;
2789                         }
2790
2791                         /* Restart all ports */
2792                         for (i = 0; i < EHEA_MAX_PORTS; i++) {
2793                                 struct ehea_port *port = adapter->port[i];
2794
2795                                 if (port) {
2796                                         struct net_device *dev = port->netdev;
2797
2798                                         if (dev->flags & IFF_UP) {
2799                                                 mutex_lock(&port->port_lock);
2800                                                 ret = ehea_restart_qps(dev);
2801                                                 if (!ret) {
2802                                                         check_sqs(port);
2803                                                         port_napi_enable(port);
2804                                                         netif_tx_wake_all_queues(dev);
2805                                                 } else {
2806                                                         netdev_err(dev, "Unable to restart QPS\n");
2807                                                 }
2808                                                 mutex_unlock(&port->port_lock);
2809                                         }
2810                                 }
2811                         }
2812                 }
2813         pr_info("re-initializing driver complete\n");
2814 out:
2815         return;
2816 }
2817
2818 static void ehea_tx_watchdog(struct net_device *dev)
2819 {
2820         struct ehea_port *port = netdev_priv(dev);
2821
2822         if (netif_carrier_ok(dev) &&
2823             !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
2824                 ehea_schedule_port_reset(port);
2825 }
2826
2827 static int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2828 {
2829         struct hcp_query_ehea *cb;
2830         u64 hret;
2831         int ret;
2832
2833         cb = (void *)get_zeroed_page(GFP_KERNEL);
2834         if (!cb) {
2835                 ret = -ENOMEM;
2836                 goto out;
2837         }
2838
2839         hret = ehea_h_query_ehea(adapter->handle, cb);
2840
2841         if (hret != H_SUCCESS) {
2842                 ret = -EIO;
2843                 goto out_herr;
2844         }
2845
2846         adapter->max_mc_mac = cb->max_mc_mac - 1;
2847         ret = 0;
2848
2849 out_herr:
2850         free_page((unsigned long)cb);
2851 out:
2852         return ret;
2853 }
2854
2855 static int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2856 {
2857         struct hcp_ehea_port_cb4 *cb4;
2858         u64 hret;
2859         int ret = 0;
2860
2861         *jumbo = 0;
2862
2863         /* (Try to) enable *jumbo frames */
2864         cb4 = (void *)get_zeroed_page(GFP_KERNEL);
2865         if (!cb4) {
2866                 pr_err("no mem for cb4\n");
2867                 ret = -ENOMEM;
2868                 goto out;
2869         } else {
2870                 hret = ehea_h_query_ehea_port(port->adapter->handle,
2871                                               port->logical_port_id,
2872                                               H_PORT_CB4,
2873                                               H_PORT_CB4_JUMBO, cb4);
2874                 if (hret == H_SUCCESS) {
2875                         if (cb4->jumbo_frame)
2876                                 *jumbo = 1;
2877                         else {
2878                                 cb4->jumbo_frame = 1;
2879                                 hret = ehea_h_modify_ehea_port(port->adapter->
2880                                                                handle,
2881                                                                port->
2882                                                                logical_port_id,
2883                                                                H_PORT_CB4,
2884                                                                H_PORT_CB4_JUMBO,
2885                                                                cb4);
2886                                 if (hret == H_SUCCESS)
2887                                         *jumbo = 1;
2888                         }
2889                 } else
2890                         ret = -EINVAL;
2891
2892                 free_page((unsigned long)cb4);
2893         }
2894 out:
2895         return ret;
2896 }
2897
2898 static ssize_t ehea_show_port_id(struct device *dev,
2899                                  struct device_attribute *attr, char *buf)
2900 {
2901         struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2902         return sprintf(buf, "%d", port->logical_port_id);
2903 }
2904
2905 static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
2906                    NULL);
2907
2908 static void __devinit logical_port_release(struct device *dev)
2909 {
2910         struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2911         of_node_put(port->ofdev.dev.of_node);
2912 }
2913
2914 static struct device *ehea_register_port(struct ehea_port *port,
2915                                          struct device_node *dn)
2916 {
2917         int ret;
2918
2919         port->ofdev.dev.of_node = of_node_get(dn);
2920         port->ofdev.dev.parent = &port->adapter->ofdev->dev;
2921         port->ofdev.dev.bus = &ibmebus_bus_type;
2922
2923         dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
2924         port->ofdev.dev.release = logical_port_release;
2925
2926         ret = of_device_register(&port->ofdev);
2927         if (ret) {
2928                 pr_err("failed to register device. ret=%d\n", ret);
2929                 goto out;
2930         }
2931
2932         ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
2933         if (ret) {
2934                 pr_err("failed to register attributes, ret=%d\n", ret);
2935                 goto out_unreg_of_dev;
2936         }
2937
2938         return &port->ofdev.dev;
2939
2940 out_unreg_of_dev:
2941         of_device_unregister(&port->ofdev);
2942 out:
2943         return NULL;
2944 }
2945
2946 static void ehea_unregister_port(struct ehea_port *port)
2947 {
2948         device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
2949         of_device_unregister(&port->ofdev);
2950 }
2951
2952 static const struct net_device_ops ehea_netdev_ops = {
2953         .ndo_open               = ehea_open,
2954         .ndo_stop               = ehea_stop,
2955         .ndo_start_xmit         = ehea_start_xmit,
2956 #ifdef CONFIG_NET_POLL_CONTROLLER
2957         .ndo_poll_controller    = ehea_netpoll,
2958 #endif
2959         .ndo_get_stats64        = ehea_get_stats64,
2960         .ndo_set_mac_address    = ehea_set_mac_addr,
2961         .ndo_validate_addr      = eth_validate_addr,
2962         .ndo_set_rx_mode        = ehea_set_multicast_list,
2963         .ndo_change_mtu         = ehea_change_mtu,
2964         .ndo_vlan_rx_add_vid    = ehea_vlan_rx_add_vid,
2965         .ndo_vlan_rx_kill_vid   = ehea_vlan_rx_kill_vid,
2966         .ndo_tx_timeout         = ehea_tx_watchdog,
2967 };
2968
2969 static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
2970                                          u32 logical_port_id,
2971                                          struct device_node *dn)
2972 {
2973         int ret;
2974         struct net_device *dev;
2975         struct ehea_port *port;
2976         struct device *port_dev;
2977         int jumbo;
2978
2979         /* allocate memory for the port structures */
2980         dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
2981
2982         if (!dev) {
2983                 pr_err("no mem for net_device\n");
2984                 ret = -ENOMEM;
2985                 goto out_err;
2986         }
2987
2988         port = netdev_priv(dev);
2989
2990         mutex_init(&port->port_lock);
2991         port->state = EHEA_PORT_DOWN;
2992         port->sig_comp_iv = sq_entries / 10;
2993
2994         port->adapter = adapter;
2995         port->netdev = dev;
2996         port->logical_port_id = logical_port_id;
2997
2998         port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
2999
3000         port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
3001         if (!port->mc_list) {
3002                 ret = -ENOMEM;
3003                 goto out_free_ethdev;
3004         }
3005
3006         INIT_LIST_HEAD(&port->mc_list->list);
3007
3008         ret = ehea_sense_port_attr(port);
3009         if (ret)
3010                 goto out_free_mc_list;
3011
3012         netif_set_real_num_rx_queues(dev, port->num_def_qps);
3013         netif_set_real_num_tx_queues(dev, port->num_def_qps);
3014
3015         port_dev = ehea_register_port(port, dn);
3016         if (!port_dev)
3017                 goto out_free_mc_list;
3018
3019         SET_NETDEV_DEV(dev, port_dev);
3020
3021         /* initialize net_device structure */
3022         memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
3023
3024         dev->netdev_ops = &ehea_netdev_ops;
3025         ehea_set_ethtool_ops(dev);
3026
3027         dev->hw_features = NETIF_F_SG | NETIF_F_TSO
3028                       | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO;
3029         dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
3030                       | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
3031                       | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
3032                       | NETIF_F_RXCSUM;
3033         dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
3034                         NETIF_F_IP_CSUM;
3035         dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3036
3037         INIT_WORK(&port->reset_task, ehea_reset_port);
3038         INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);
3039
3040         init_waitqueue_head(&port->swqe_avail_wq);
3041         init_waitqueue_head(&port->restart_wq);
3042
3043         memset(&port->stats, 0, sizeof(struct net_device_stats));
3044         ret = register_netdev(dev);
3045         if (ret) {
3046                 pr_err("register_netdev failed. ret=%d\n", ret);
3047                 goto out_unreg_port;
3048         }
3049
3050         ret = ehea_get_jumboframe_status(port, &jumbo);
3051         if (ret)
3052                 netdev_err(dev, "failed determining jumbo frame status\n");
3053
3054         netdev_info(dev, "Jumbo frames are %sabled\n",
3055                     jumbo == 1 ? "en" : "dis");
3056
3057         adapter->active_ports++;
3058
3059         return port;
3060
3061 out_unreg_port:
3062         ehea_unregister_port(port);
3063
3064 out_free_mc_list:
3065         kfree(port->mc_list);
3066
3067 out_free_ethdev:
3068         free_netdev(dev);
3069
3070 out_err:
3071         pr_err("setting up logical port with id=%d failed, ret=%d\n",
3072                logical_port_id, ret);
3073         return NULL;
3074 }
3075
3076 static void ehea_shutdown_single_port(struct ehea_port *port)
3077 {
3078         struct ehea_adapter *adapter = port->adapter;
3079
3080         cancel_work_sync(&port->reset_task);
3081         cancel_delayed_work_sync(&port->stats_work);
3082         unregister_netdev(port->netdev);
3083         ehea_unregister_port(port);
3084         kfree(port->mc_list);
3085         free_netdev(port->netdev);
3086         adapter->active_ports--;
3087 }
3088
3089 static int ehea_setup_ports(struct ehea_adapter *adapter)
3090 {
3091         struct device_node *lhea_dn;
3092         struct device_node *eth_dn = NULL;
3093
3094         const u32 *dn_log_port_id;
3095         int i = 0;
3096
3097         lhea_dn = adapter->ofdev->dev.of_node;
3098         while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3099
3100                 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3101                                                  NULL);
3102                 if (!dn_log_port_id) {
3103                         pr_err("bad device node: eth_dn name=%s\n",
3104                                eth_dn->full_name);
3105                         continue;
3106                 }
3107
3108                 if (ehea_add_adapter_mr(adapter)) {
3109                         pr_err("creating MR failed\n");
3110                         of_node_put(eth_dn);
3111                         return -EIO;
3112                 }
3113
3114                 adapter->port[i] = ehea_setup_single_port(adapter,
3115                                                           *dn_log_port_id,
3116                                                           eth_dn);
3117                 if (adapter->port[i])
3118                         netdev_info(adapter->port[i]->netdev,
3119                                     "logical port id #%d\n", *dn_log_port_id);
3120                 else
3121                         ehea_remove_adapter_mr(adapter);
3122
3123                 i++;
3124         }
3125         return 0;
3126 }
3127
3128 static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3129                                            u32 logical_port_id)
3130 {
3131         struct device_node *lhea_dn;
3132         struct device_node *eth_dn = NULL;
3133         const u32 *dn_log_port_id;
3134
3135         lhea_dn = adapter->ofdev->dev.of_node;
3136         while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3137
3138                 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3139                                                  NULL);
3140                 if (dn_log_port_id)
3141                         if (*dn_log_port_id == logical_port_id)
3142                                 return eth_dn;
3143         }
3144
3145         return NULL;
3146 }
3147
3148 static ssize_t ehea_probe_port(struct device *dev,
3149                                struct device_attribute *attr,
3150                                const char *buf, size_t count)
3151 {
3152         struct ehea_adapter *adapter = dev_get_drvdata(dev);
3153         struct ehea_port *port;
3154         struct device_node *eth_dn = NULL;
3155         int i;
3156
3157         u32 logical_port_id;
3158
3159         sscanf(buf, "%d", &logical_port_id);
3160
3161         port = ehea_get_port(adapter, logical_port_id);
3162
3163         if (port) {
3164                 netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
3165                             logical_port_id);
3166                 return -EINVAL;
3167         }
3168
3169         eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3170
3171         if (!eth_dn) {
3172                 pr_info("no logical port with id %d found\n", logical_port_id);
3173                 return -EINVAL;
3174         }
3175
3176         if (ehea_add_adapter_mr(adapter)) {
3177                 pr_err("creating MR failed\n");
3178                 return -EIO;
3179         }
3180
3181         port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
3182
3183         of_node_put(eth_dn);
3184
3185         if (port) {
3186                 for (i = 0; i < EHEA_MAX_PORTS; i++)
3187                         if (!adapter->port[i]) {
3188                                 adapter->port[i] = port;
3189                                 break;
3190                         }
3191
3192                 netdev_info(port->netdev, "added: (logical port id=%d)\n",
3193                             logical_port_id);
3194         } else {
3195                 ehea_remove_adapter_mr(adapter);
3196                 return -EIO;
3197         }
3198
3199         return (ssize_t) count;
3200 }
3201
3202 static ssize_t ehea_remove_port(struct device *dev,
3203                                 struct device_attribute *attr,
3204                                 const char *buf, size_t count)
3205 {
3206         struct ehea_adapter *adapter = dev_get_drvdata(dev);
3207         struct ehea_port *port;
3208         int i;
3209         u32 logical_port_id;
3210
3211         sscanf(buf, "%d", &logical_port_id);
3212
3213         port = ehea_get_port(adapter, logical_port_id);
3214
3215         if (port) {
3216                 netdev_info(port->netdev, "removed: (logical port id=%d)\n",
3217                             logical_port_id);
3218
3219                 ehea_shutdown_single_port(port);
3220
3221                 for (i = 0; i < EHEA_MAX_PORTS; i++)
3222                         if (adapter->port[i] == port) {
3223                                 adapter->port[i] = NULL;
3224                                 break;
3225                         }
3226         } else {
3227                 pr_err("removing port with logical port id=%d failed. port not configured.\n",
3228                        logical_port_id);
3229                 return -EINVAL;
3230         }
3231
3232         ehea_remove_adapter_mr(adapter);
3233
3234         return (ssize_t) count;
3235 }
3236
3237 static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
3238 static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
3239
3240 static int ehea_create_device_sysfs(struct platform_device *dev)
3241 {
3242         int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
3243         if (ret)
3244                 goto out;
3245
3246         ret = device_create_file(&dev->dev, &dev_attr_remove_port);
3247 out:
3248         return ret;
3249 }
3250
3251 static void ehea_remove_device_sysfs(struct platform_device *dev)
3252 {
3253         device_remove_file(&dev->dev, &dev_attr_probe_port);
3254         device_remove_file(&dev->dev, &dev_attr_remove_port);
3255 }
3256
3257 static int __devinit ehea_probe_adapter(struct platform_device *dev,
3258                                         const struct of_device_id *id)
3259 {
3260         struct ehea_adapter *adapter;
3261         const u64 *adapter_handle;
3262         int ret;
3263
3264         if (!dev || !dev->dev.of_node) {
3265                 pr_err("Invalid ibmebus device probed\n");
3266                 return -EINVAL;
3267         }
3268
3269         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3270         if (!adapter) {
3271                 ret = -ENOMEM;
3272                 dev_err(&dev->dev, "no mem for ehea_adapter\n");
3273                 goto out;
3274         }
3275
3276         list_add(&adapter->list, &adapter_list);
3277
3278         adapter->ofdev = dev;
3279
3280         adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
3281                                          NULL);
3282         if (adapter_handle)
3283                 adapter->handle = *adapter_handle;
3284
3285         if (!adapter->handle) {
3286                 dev_err(&dev->dev, "failed getting handle for adapter"
3287                         " '%s'\n", dev->dev.of_node->full_name);
3288                 ret = -ENODEV;
3289                 goto out_free_ad;
3290         }
3291
3292         adapter->pd = EHEA_PD_ID;
3293
3294         dev_set_drvdata(&dev->dev, adapter);
3295
3296
3297         /* initialize adapter and ports */
3298         /* get adapter properties */
3299         ret = ehea_sense_adapter_attr(adapter);
3300         if (ret) {
3301                 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
3302                 goto out_free_ad;
3303         }
3304
3305         adapter->neq = ehea_create_eq(adapter,
3306                                       EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3307         if (!adapter->neq) {
3308                 ret = -EIO;
3309                 dev_err(&dev->dev, "NEQ creation failed\n");
3310                 goto out_free_ad;
3311         }
3312
3313         tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3314                      (unsigned long)adapter);
3315
3316         ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3317                                   ehea_interrupt_neq, IRQF_DISABLED,
3318                                   "ehea_neq", adapter);
3319         if (ret) {
3320                 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3321                 goto out_kill_eq;
3322         }
3323
3324         ret = ehea_create_device_sysfs(dev);
3325         if (ret)
3326                 goto out_free_irq;
3327
3328         ret = ehea_setup_ports(adapter);
3329         if (ret) {
3330                 dev_err(&dev->dev, "setup_ports failed\n");
3331                 goto out_rem_dev_sysfs;
3332         }
3333
3334         ret = 0;
3335         goto out;
3336
3337 out_rem_dev_sysfs:
3338         ehea_remove_device_sysfs(dev);
3339
3340 out_free_irq:
3341         ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3342
3343 out_kill_eq:
3344         ehea_destroy_eq(adapter->neq);
3345
3346 out_free_ad:
3347         list_del(&adapter->list);
3348         kfree(adapter);
3349
3350 out:
3351         ehea_update_firmware_handles();
3352
3353         return ret;
3354 }
3355
3356 static int __devexit ehea_remove(struct platform_device *dev)
3357 {
3358         struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev);
3359         int i;
3360
3361         for (i = 0; i < EHEA_MAX_PORTS; i++)
3362                 if (adapter->port[i]) {
3363                         ehea_shutdown_single_port(adapter->port[i]);
3364                         adapter->port[i] = NULL;
3365                 }
3366
3367         ehea_remove_device_sysfs(dev);
3368
3369         ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3370         tasklet_kill(&adapter->neq_tasklet);
3371
3372         ehea_destroy_eq(adapter->neq);
3373         ehea_remove_adapter_mr(adapter);
3374         list_del(&adapter->list);
3375         kfree(adapter);
3376
3377         ehea_update_firmware_handles();
3378
3379         return 0;
3380 }
3381
3382 static void ehea_crash_handler(void)
3383 {
3384         int i;
3385
3386         if (ehea_fw_handles.arr)
3387                 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3388                         ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3389                                              ehea_fw_handles.arr[i].fwh,
3390                                              FORCE_FREE);
3391
3392         if (ehea_bcmc_regs.arr)
3393                 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3394                         ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3395                                               ehea_bcmc_regs.arr[i].port_id,
3396                                               ehea_bcmc_regs.arr[i].reg_type,
3397                                               ehea_bcmc_regs.arr[i].macaddr,
3398                                               0, H_DEREG_BCMC);
3399 }
3400
3401 static int ehea_mem_notifier(struct notifier_block *nb,
3402                              unsigned long action, void *data)
3403 {
3404         int ret = NOTIFY_BAD;
3405         struct memory_notify *arg = data;
3406
3407         mutex_lock(&dlpar_mem_lock);
3408
3409         switch (action) {
3410         case MEM_CANCEL_OFFLINE:
3411                 pr_info("memory offlining canceled");
3412                 /* Readd canceled memory block */
3413         case MEM_ONLINE:
3414                 pr_info("memory is going online");
3415                 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3416                 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3417                         goto out_unlock;
3418                 ehea_rereg_mrs();
3419                 break;
3420         case MEM_GOING_OFFLINE:
3421                 pr_info("memory is going offline");
3422                 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3423                 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3424                         goto out_unlock;
3425                 ehea_rereg_mrs();
3426                 break;
3427         default:
3428                 break;
3429         }
3430
3431         ehea_update_firmware_handles();
3432         ret = NOTIFY_OK;
3433
3434 out_unlock:
3435         mutex_unlock(&dlpar_mem_lock);
3436         return ret;
3437 }
3438
3439 static struct notifier_block ehea_mem_nb = {
3440         .notifier_call = ehea_mem_notifier,
3441 };
3442
3443 static int ehea_reboot_notifier(struct notifier_block *nb,
3444                                 unsigned long action, void *unused)
3445 {
3446         if (action == SYS_RESTART) {
3447                 pr_info("Reboot: freeing all eHEA resources\n");
3448                 ibmebus_unregister_driver(&ehea_driver);
3449         }
3450         return NOTIFY_DONE;
3451 }
3452
3453 static struct notifier_block ehea_reboot_nb = {
3454         .notifier_call = ehea_reboot_notifier,
3455 };
3456
3457 static int check_module_parm(void)
3458 {
3459         int ret = 0;
3460
3461         if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3462             (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3463                 pr_info("Bad parameter: rq1_entries\n");
3464                 ret = -EINVAL;
3465         }
3466         if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3467             (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3468                 pr_info("Bad parameter: rq2_entries\n");
3469                 ret = -EINVAL;
3470         }
3471         if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3472             (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3473                 pr_info("Bad parameter: rq3_entries\n");
3474                 ret = -EINVAL;
3475         }
3476         if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3477             (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3478                 pr_info("Bad parameter: sq_entries\n");
3479                 ret = -EINVAL;
3480         }
3481
3482         return ret;
3483 }
3484
3485 static ssize_t ehea_show_capabilities(struct device_driver *drv,
3486                                       char *buf)
3487 {
3488         return sprintf(buf, "%d", EHEA_CAPABILITIES);
3489 }
3490
3491 static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
3492                    ehea_show_capabilities, NULL);
3493
3494 static int __init ehea_module_init(void)
3495 {
3496         int ret;
3497
3498         pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
3499
3500         memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3501         memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3502
3503         mutex_init(&ehea_fw_handles.lock);
3504         spin_lock_init(&ehea_bcmc_regs.lock);
3505
3506         ret = check_module_parm();
3507         if (ret)
3508                 goto out;
3509
3510         ret = ehea_create_busmap();
3511         if (ret)
3512                 goto out;
3513
3514         ret = register_reboot_notifier(&ehea_reboot_nb);
3515         if (ret)
3516                 pr_info("failed registering reboot notifier\n");
3517
3518         ret = register_memory_notifier(&ehea_mem_nb);
3519         if (ret)
3520                 pr_info("failed registering memory remove notifier\n");
3521
3522         ret = crash_shutdown_register(ehea_crash_handler);
3523         if (ret)
3524                 pr_info("failed registering crash handler\n");
3525
3526         ret = ibmebus_register_driver(&ehea_driver);
3527         if (ret) {
3528                 pr_err("failed registering eHEA device driver on ebus\n");
3529                 goto out2;
3530         }
3531
3532         ret = driver_create_file(&ehea_driver.driver,
3533                                  &driver_attr_capabilities);
3534         if (ret) {
3535                 pr_err("failed to register capabilities attribute, ret=%d\n",
3536                        ret);
3537                 goto out3;
3538         }
3539
3540         return ret;
3541
3542 out3:
3543         ibmebus_unregister_driver(&ehea_driver);
3544 out2:
3545         unregister_memory_notifier(&ehea_mem_nb);
3546         unregister_reboot_notifier(&ehea_reboot_nb);
3547         crash_shutdown_unregister(ehea_crash_handler);
3548 out:
3549         return ret;
3550 }
3551
3552 static void __exit ehea_module_exit(void)
3553 {
3554         int ret;
3555
3556         driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3557         ibmebus_unregister_driver(&ehea_driver);
3558         unregister_reboot_notifier(&ehea_reboot_nb);
3559         ret = crash_shutdown_unregister(ehea_crash_handler);
3560         if (ret)
3561                 pr_info("failed unregistering crash handler\n");
3562         unregister_memory_notifier(&ehea_mem_nb);
3563         kfree(ehea_fw_handles.arr);
3564         kfree(ehea_bcmc_regs.arr);
3565         ehea_destroy_busmap();
3566 }
3567
3568 module_init(ehea_module_init);
3569 module_exit(ehea_module_exit);