be2net: Updating Module Author string and log message string to "Emulex Corporation"
[~shefty/rdma-dev.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("Emulex Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48         { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88         "LPCMEMHOST",
89         "MGMT_MAC",
90         "PCS0ONLINE",
91         "MPU_IRAM",
92         "PCS1ONLINE",
93         "PCTL0",
94         "PCTL1",
95         "PMEM",
96         "RR",
97         "TXPB",
98         "RXPP",
99         "XAUI",
100         "TXP",
101         "ARM",
102         "IPC",
103         "HOST2",
104         "HOST3",
105         "HOST4",
106         "HOST5",
107         "HOST6",
108         "HOST7",
109         "HOST8",
110         "HOST9",
111         "NETC",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124         return (adapter->function_mode & FLEX10_MODE ||
125                 adapter->function_mode & VNIC_MODE ||
126                 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131         struct be_dma_mem *mem = &q->dma_mem;
132         if (mem->va) {
133                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134                                   mem->dma);
135                 mem->va = NULL;
136         }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140                 u16 len, u16 entry_size)
141 {
142         struct be_dma_mem *mem = &q->dma_mem;
143
144         memset(q, 0, sizeof(*q));
145         q->len = len;
146         q->entry_size = entry_size;
147         mem->size = len * entry_size;
148         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149                                      GFP_KERNEL);
150         if (!mem->va)
151                 return -ENOMEM;
152         memset(mem->va, 0, mem->size);
153         return 0;
154 }
155
156 static void be_intr_set(struct be_adapter *adapter, bool enable)
157 {
158         u32 reg, enabled;
159
160         if (adapter->eeh_error)
161                 return;
162
163         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
164                                 &reg);
165         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166
167         if (!enabled && enable)
168                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169         else if (enabled && !enable)
170                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
171         else
172                 return;
173
174         pci_write_config_dword(adapter->pdev,
175                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
176 }
177
178 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
179 {
180         u32 val = 0;
181         val |= qid & DB_RQ_RING_ID_MASK;
182         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
183
184         wmb();
185         iowrite32(val, adapter->db + DB_RQ_OFFSET);
186 }
187
188 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
189 {
190         u32 val = 0;
191         val |= qid & DB_TXULP_RING_ID_MASK;
192         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
193
194         wmb();
195         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
196 }
197
198 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
199                 bool arm, bool clear_int, u16 num_popped)
200 {
201         u32 val = 0;
202         val |= qid & DB_EQ_RING_ID_MASK;
203         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
204                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
205
206         if (adapter->eeh_error)
207                 return;
208
209         if (arm)
210                 val |= 1 << DB_EQ_REARM_SHIFT;
211         if (clear_int)
212                 val |= 1 << DB_EQ_CLR_SHIFT;
213         val |= 1 << DB_EQ_EVNT_SHIFT;
214         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
215         iowrite32(val, adapter->db + DB_EQ_OFFSET);
216 }
217
218 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
219 {
220         u32 val = 0;
221         val |= qid & DB_CQ_RING_ID_MASK;
222         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
223                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
224
225         if (adapter->eeh_error)
226                 return;
227
228         if (arm)
229                 val |= 1 << DB_CQ_REARM_SHIFT;
230         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
231         iowrite32(val, adapter->db + DB_CQ_OFFSET);
232 }
233
234 static int be_mac_addr_set(struct net_device *netdev, void *p)
235 {
236         struct be_adapter *adapter = netdev_priv(netdev);
237         struct sockaddr *addr = p;
238         int status = 0;
239         u8 current_mac[ETH_ALEN];
240         u32 pmac_id = adapter->pmac_id[0];
241         bool active_mac = true;
242
243         if (!is_valid_ether_addr(addr->sa_data))
244                 return -EADDRNOTAVAIL;
245
246         /* For BE VF, MAC address is already activated by PF.
247          * Hence only operation left is updating netdev->devaddr.
248          * Update it if user is passing the same MAC which was used
249          * during configuring VF MAC from PF(Hypervisor).
250          */
251         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
252                 status = be_cmd_mac_addr_query(adapter, current_mac,
253                                                false, adapter->if_handle, 0);
254                 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
255                         goto done;
256                 else
257                         goto err;
258         }
259
260         if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
261                 goto done;
262
263         /* For Lancer check if any MAC is active.
264          * If active, get its mac id.
265          */
266         if (lancer_chip(adapter) && !be_physfn(adapter))
267                 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
268                                          &pmac_id, 0);
269
270         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
271                                  adapter->if_handle,
272                                  &adapter->pmac_id[0], 0);
273
274         if (status)
275                 goto err;
276
277         if (active_mac)
278                 be_cmd_pmac_del(adapter, adapter->if_handle,
279                                 pmac_id, 0);
280 done:
281         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
282         return 0;
283 err:
284         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
285         return status;
286 }
287
288 /* BE2 supports only v0 cmd */
289 static void *hw_stats_from_cmd(struct be_adapter *adapter)
290 {
291         if (BE2_chip(adapter)) {
292                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
293
294                 return &cmd->hw_stats;
295         } else  {
296                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
297
298                 return &cmd->hw_stats;
299         }
300 }
301
302 /* BE2 supports only v0 cmd */
303 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
304 {
305         if (BE2_chip(adapter)) {
306                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
307
308                 return &hw_stats->erx;
309         } else {
310                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311
312                 return &hw_stats->erx;
313         }
314 }
315
316 static void populate_be_v0_stats(struct be_adapter *adapter)
317 {
318         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
319         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
320         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
321         struct be_port_rxf_stats_v0 *port_stats =
322                                         &rxf_stats->port[adapter->port_num];
323         struct be_drv_stats *drvs = &adapter->drv_stats;
324
325         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
326         drvs->rx_pause_frames = port_stats->rx_pause_frames;
327         drvs->rx_crc_errors = port_stats->rx_crc_errors;
328         drvs->rx_control_frames = port_stats->rx_control_frames;
329         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
330         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
331         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
332         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
333         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
334         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
335         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
336         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
337         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
338         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
339         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
340         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
341         drvs->rx_dropped_header_too_small =
342                 port_stats->rx_dropped_header_too_small;
343         drvs->rx_address_mismatch_drops =
344                                         port_stats->rx_address_mismatch_drops +
345                                         port_stats->rx_vlan_mismatch_drops;
346         drvs->rx_alignment_symbol_errors =
347                 port_stats->rx_alignment_symbol_errors;
348
349         drvs->tx_pauseframes = port_stats->tx_pauseframes;
350         drvs->tx_controlframes = port_stats->tx_controlframes;
351
352         if (adapter->port_num)
353                 drvs->jabber_events = rxf_stats->port1_jabber_events;
354         else
355                 drvs->jabber_events = rxf_stats->port0_jabber_events;
356         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
357         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
358         drvs->forwarded_packets = rxf_stats->forwarded_packets;
359         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
360         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
361         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
362         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
363 }
364
365 static void populate_be_v1_stats(struct be_adapter *adapter)
366 {
367         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
368         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
369         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
370         struct be_port_rxf_stats_v1 *port_stats =
371                                         &rxf_stats->port[adapter->port_num];
372         struct be_drv_stats *drvs = &adapter->drv_stats;
373
374         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
375         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
376         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
377         drvs->rx_pause_frames = port_stats->rx_pause_frames;
378         drvs->rx_crc_errors = port_stats->rx_crc_errors;
379         drvs->rx_control_frames = port_stats->rx_control_frames;
380         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
381         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
382         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
383         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
384         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
385         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
386         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
387         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
388         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
389         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
390         drvs->rx_dropped_header_too_small =
391                 port_stats->rx_dropped_header_too_small;
392         drvs->rx_input_fifo_overflow_drop =
393                 port_stats->rx_input_fifo_overflow_drop;
394         drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
395         drvs->rx_alignment_symbol_errors =
396                 port_stats->rx_alignment_symbol_errors;
397         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
398         drvs->tx_pauseframes = port_stats->tx_pauseframes;
399         drvs->tx_controlframes = port_stats->tx_controlframes;
400         drvs->jabber_events = port_stats->jabber_events;
401         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
402         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
403         drvs->forwarded_packets = rxf_stats->forwarded_packets;
404         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
405         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
406         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
407         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
408 }
409
410 static void populate_lancer_stats(struct be_adapter *adapter)
411 {
412
413         struct be_drv_stats *drvs = &adapter->drv_stats;
414         struct lancer_pport_stats *pport_stats =
415                                         pport_stats_from_cmd(adapter);
416
417         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
418         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
419         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
420         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
421         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
422         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
423         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
424         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
425         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
426         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
427         drvs->rx_dropped_tcp_length =
428                                 pport_stats->rx_dropped_invalid_tcp_length;
429         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
430         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
431         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
432         drvs->rx_dropped_header_too_small =
433                                 pport_stats->rx_dropped_header_too_small;
434         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
435         drvs->rx_address_mismatch_drops =
436                                         pport_stats->rx_address_mismatch_drops +
437                                         pport_stats->rx_vlan_mismatch_drops;
438         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
439         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
440         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
441         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
442         drvs->jabber_events = pport_stats->rx_jabbers;
443         drvs->forwarded_packets = pport_stats->num_forwards_lo;
444         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
445         drvs->rx_drops_too_many_frags =
446                                 pport_stats->rx_drops_too_many_frags_lo;
447 }
448
449 static void accumulate_16bit_val(u32 *acc, u16 val)
450 {
451 #define lo(x)                   (x & 0xFFFF)
452 #define hi(x)                   (x & 0xFFFF0000)
453         bool wrapped = val < lo(*acc);
454         u32 newacc = hi(*acc) + val;
455
456         if (wrapped)
457                 newacc += 65536;
458         ACCESS_ONCE(*acc) = newacc;
459 }
460
461 void be_parse_stats(struct be_adapter *adapter)
462 {
463         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
464         struct be_rx_obj *rxo;
465         int i;
466
467         if (lancer_chip(adapter)) {
468                 populate_lancer_stats(adapter);
469         } else {
470                 if (BE2_chip(adapter))
471                         populate_be_v0_stats(adapter);
472                 else
473                         /* for BE3 and Skyhawk */
474                         populate_be_v1_stats(adapter);
475
476                 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
477                 for_all_rx_queues(adapter, rxo, i) {
478                         /* below erx HW counter can actually wrap around after
479                          * 65535. Driver accumulates a 32-bit value
480                          */
481                         accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
482                                              (u16)erx->rx_drops_no_fragments \
483                                              [rxo->q.id]);
484                 }
485         }
486 }
487
488 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
489                                         struct rtnl_link_stats64 *stats)
490 {
491         struct be_adapter *adapter = netdev_priv(netdev);
492         struct be_drv_stats *drvs = &adapter->drv_stats;
493         struct be_rx_obj *rxo;
494         struct be_tx_obj *txo;
495         u64 pkts, bytes;
496         unsigned int start;
497         int i;
498
499         for_all_rx_queues(adapter, rxo, i) {
500                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
501                 do {
502                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
503                         pkts = rx_stats(rxo)->rx_pkts;
504                         bytes = rx_stats(rxo)->rx_bytes;
505                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
506                 stats->rx_packets += pkts;
507                 stats->rx_bytes += bytes;
508                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
509                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
510                                         rx_stats(rxo)->rx_drops_no_frags;
511         }
512
513         for_all_tx_queues(adapter, txo, i) {
514                 const struct be_tx_stats *tx_stats = tx_stats(txo);
515                 do {
516                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
517                         pkts = tx_stats(txo)->tx_pkts;
518                         bytes = tx_stats(txo)->tx_bytes;
519                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
520                 stats->tx_packets += pkts;
521                 stats->tx_bytes += bytes;
522         }
523
524         /* bad pkts received */
525         stats->rx_errors = drvs->rx_crc_errors +
526                 drvs->rx_alignment_symbol_errors +
527                 drvs->rx_in_range_errors +
528                 drvs->rx_out_range_errors +
529                 drvs->rx_frame_too_long +
530                 drvs->rx_dropped_too_small +
531                 drvs->rx_dropped_too_short +
532                 drvs->rx_dropped_header_too_small +
533                 drvs->rx_dropped_tcp_length +
534                 drvs->rx_dropped_runt;
535
536         /* detailed rx errors */
537         stats->rx_length_errors = drvs->rx_in_range_errors +
538                 drvs->rx_out_range_errors +
539                 drvs->rx_frame_too_long;
540
541         stats->rx_crc_errors = drvs->rx_crc_errors;
542
543         /* frame alignment errors */
544         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
545
546         /* receiver fifo overrun */
547         /* drops_no_pbuf is no per i/f, it's per BE card */
548         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
549                                 drvs->rx_input_fifo_overflow_drop +
550                                 drvs->rx_drops_no_pbuf;
551         return stats;
552 }
553
554 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
555 {
556         struct net_device *netdev = adapter->netdev;
557
558         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
559                 netif_carrier_off(netdev);
560                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
561         }
562
563         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
564                 netif_carrier_on(netdev);
565         else
566                 netif_carrier_off(netdev);
567 }
568
569 static void be_tx_stats_update(struct be_tx_obj *txo,
570                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
571 {
572         struct be_tx_stats *stats = tx_stats(txo);
573
574         u64_stats_update_begin(&stats->sync);
575         stats->tx_reqs++;
576         stats->tx_wrbs += wrb_cnt;
577         stats->tx_bytes += copied;
578         stats->tx_pkts += (gso_segs ? gso_segs : 1);
579         if (stopped)
580                 stats->tx_stops++;
581         u64_stats_update_end(&stats->sync);
582 }
583
584 /* Determine number of WRB entries needed to xmit data in an skb */
585 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
586                                                                 bool *dummy)
587 {
588         int cnt = (skb->len > skb->data_len);
589
590         cnt += skb_shinfo(skb)->nr_frags;
591
592         /* to account for hdr wrb */
593         cnt++;
594         if (lancer_chip(adapter) || !(cnt & 1)) {
595                 *dummy = false;
596         } else {
597                 /* add a dummy to make it an even num */
598                 cnt++;
599                 *dummy = true;
600         }
601         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
602         return cnt;
603 }
604
605 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
606 {
607         wrb->frag_pa_hi = upper_32_bits(addr);
608         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
609         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
610         wrb->rsvd0 = 0;
611 }
612
613 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
614                                         struct sk_buff *skb)
615 {
616         u8 vlan_prio;
617         u16 vlan_tag;
618
619         vlan_tag = vlan_tx_tag_get(skb);
620         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
621         /* If vlan priority provided by OS is NOT in available bmap */
622         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
623                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
624                                 adapter->recommended_prio;
625
626         return vlan_tag;
627 }
628
629 static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
630 {
631         return vlan_tx_tag_present(skb) || adapter->pvid;
632 }
633
634 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
635                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
636 {
637         u16 vlan_tag;
638
639         memset(hdr, 0, sizeof(*hdr));
640
641         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
642
643         if (skb_is_gso(skb)) {
644                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
645                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
646                         hdr, skb_shinfo(skb)->gso_size);
647                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
648                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
649         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
650                 if (is_tcp_pkt(skb))
651                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
652                 else if (is_udp_pkt(skb))
653                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
654         }
655
656         if (vlan_tx_tag_present(skb)) {
657                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
658                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
659                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
660         }
661
662         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
663         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
664         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
665         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
666 }
667
668 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
669                 bool unmap_single)
670 {
671         dma_addr_t dma;
672
673         be_dws_le_to_cpu(wrb, sizeof(*wrb));
674
675         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
676         if (wrb->frag_len) {
677                 if (unmap_single)
678                         dma_unmap_single(dev, dma, wrb->frag_len,
679                                          DMA_TO_DEVICE);
680                 else
681                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
682         }
683 }
684
685 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
686                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
687 {
688         dma_addr_t busaddr;
689         int i, copied = 0;
690         struct device *dev = &adapter->pdev->dev;
691         struct sk_buff *first_skb = skb;
692         struct be_eth_wrb *wrb;
693         struct be_eth_hdr_wrb *hdr;
694         bool map_single = false;
695         u16 map_head;
696
697         hdr = queue_head_node(txq);
698         queue_head_inc(txq);
699         map_head = txq->head;
700
701         if (skb->len > skb->data_len) {
702                 int len = skb_headlen(skb);
703                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
704                 if (dma_mapping_error(dev, busaddr))
705                         goto dma_err;
706                 map_single = true;
707                 wrb = queue_head_node(txq);
708                 wrb_fill(wrb, busaddr, len);
709                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
710                 queue_head_inc(txq);
711                 copied += len;
712         }
713
714         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
715                 const struct skb_frag_struct *frag =
716                         &skb_shinfo(skb)->frags[i];
717                 busaddr = skb_frag_dma_map(dev, frag, 0,
718                                            skb_frag_size(frag), DMA_TO_DEVICE);
719                 if (dma_mapping_error(dev, busaddr))
720                         goto dma_err;
721                 wrb = queue_head_node(txq);
722                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
723                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
724                 queue_head_inc(txq);
725                 copied += skb_frag_size(frag);
726         }
727
728         if (dummy_wrb) {
729                 wrb = queue_head_node(txq);
730                 wrb_fill(wrb, 0, 0);
731                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732                 queue_head_inc(txq);
733         }
734
735         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
736         be_dws_cpu_to_le(hdr, sizeof(*hdr));
737
738         return copied;
739 dma_err:
740         txq->head = map_head;
741         while (copied) {
742                 wrb = queue_head_node(txq);
743                 unmap_tx_frag(dev, wrb, map_single);
744                 map_single = false;
745                 copied -= wrb->frag_len;
746                 queue_head_inc(txq);
747         }
748         return 0;
749 }
750
751 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
752                                              struct sk_buff *skb)
753 {
754         u16 vlan_tag = 0;
755
756         skb = skb_share_check(skb, GFP_ATOMIC);
757         if (unlikely(!skb))
758                 return skb;
759
760         if (vlan_tx_tag_present(skb)) {
761                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
762                 __vlan_put_tag(skb, vlan_tag);
763                 skb->vlan_tci = 0;
764         }
765
766         return skb;
767 }
768
769 static netdev_tx_t be_xmit(struct sk_buff *skb,
770                         struct net_device *netdev)
771 {
772         struct be_adapter *adapter = netdev_priv(netdev);
773         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
774         struct be_queue_info *txq = &txo->q;
775         struct iphdr *ip = NULL;
776         u32 wrb_cnt = 0, copied = 0;
777         u32 start = txq->head, eth_hdr_len;
778         bool dummy_wrb, stopped = false;
779
780         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
781                 VLAN_ETH_HLEN : ETH_HLEN;
782
783         /* HW has a bug which considers padding bytes as legal
784          * and modifies the IPv4 hdr's 'tot_len' field
785          */
786         if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
787                         is_ipv4_pkt(skb)) {
788                 ip = (struct iphdr *)ip_hdr(skb);
789                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
790         }
791
792         /* HW has a bug wherein it will calculate CSUM for VLAN
793          * pkts even though it is disabled.
794          * Manually insert VLAN in pkt.
795          */
796         if (skb->ip_summed != CHECKSUM_PARTIAL &&
797                         be_vlan_tag_chk(adapter, skb)) {
798                 skb = be_insert_vlan_in_pkt(adapter, skb);
799                 if (unlikely(!skb))
800                         goto tx_drop;
801         }
802
803         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
804
805         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
806         if (copied) {
807                 int gso_segs = skb_shinfo(skb)->gso_segs;
808
809                 /* record the sent skb in the sent_skb table */
810                 BUG_ON(txo->sent_skb_list[start]);
811                 txo->sent_skb_list[start] = skb;
812
813                 /* Ensure txq has space for the next skb; Else stop the queue
814                  * *BEFORE* ringing the tx doorbell, so that we serialze the
815                  * tx compls of the current transmit which'll wake up the queue
816                  */
817                 atomic_add(wrb_cnt, &txq->used);
818                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
819                                                                 txq->len) {
820                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
821                         stopped = true;
822                 }
823
824                 be_txq_notify(adapter, txq->id, wrb_cnt);
825
826                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
827         } else {
828                 txq->head = start;
829                 dev_kfree_skb_any(skb);
830         }
831 tx_drop:
832         return NETDEV_TX_OK;
833 }
834
835 static int be_change_mtu(struct net_device *netdev, int new_mtu)
836 {
837         struct be_adapter *adapter = netdev_priv(netdev);
838         if (new_mtu < BE_MIN_MTU ||
839                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
840                                         (ETH_HLEN + ETH_FCS_LEN))) {
841                 dev_info(&adapter->pdev->dev,
842                         "MTU must be between %d and %d bytes\n",
843                         BE_MIN_MTU,
844                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
845                 return -EINVAL;
846         }
847         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
848                         netdev->mtu, new_mtu);
849         netdev->mtu = new_mtu;
850         return 0;
851 }
852
853 /*
854  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
855  * If the user configures more, place BE in vlan promiscuous mode.
856  */
857 static int be_vid_config(struct be_adapter *adapter)
858 {
859         u16 vids[BE_NUM_VLANS_SUPPORTED];
860         u16 num = 0, i;
861         int status = 0;
862
863         /* No need to further configure vids if in promiscuous mode */
864         if (adapter->promiscuous)
865                 return 0;
866
867         if (adapter->vlans_added > adapter->max_vlans)
868                 goto set_vlan_promisc;
869
870         /* Construct VLAN Table to give to HW */
871         for (i = 0; i < VLAN_N_VID; i++)
872                 if (adapter->vlan_tag[i])
873                         vids[num++] = cpu_to_le16(i);
874
875         status = be_cmd_vlan_config(adapter, adapter->if_handle,
876                                     vids, num, 1, 0);
877
878         /* Set to VLAN promisc mode as setting VLAN filter failed */
879         if (status) {
880                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
881                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
882                 goto set_vlan_promisc;
883         }
884
885         return status;
886
887 set_vlan_promisc:
888         status = be_cmd_vlan_config(adapter, adapter->if_handle,
889                                     NULL, 0, 1, 1);
890         return status;
891 }
892
893 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
894 {
895         struct be_adapter *adapter = netdev_priv(netdev);
896         int status = 0;
897
898         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
899                 status = -EINVAL;
900                 goto ret;
901         }
902
903         /* Packets with VID 0 are always received by Lancer by default */
904         if (lancer_chip(adapter) && vid == 0)
905                 goto ret;
906
907         adapter->vlan_tag[vid] = 1;
908         if (adapter->vlans_added <= (adapter->max_vlans + 1))
909                 status = be_vid_config(adapter);
910
911         if (!status)
912                 adapter->vlans_added++;
913         else
914                 adapter->vlan_tag[vid] = 0;
915 ret:
916         return status;
917 }
918
919 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
920 {
921         struct be_adapter *adapter = netdev_priv(netdev);
922         int status = 0;
923
924         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
925                 status = -EINVAL;
926                 goto ret;
927         }
928
929         /* Packets with VID 0 are always received by Lancer by default */
930         if (lancer_chip(adapter) && vid == 0)
931                 goto ret;
932
933         adapter->vlan_tag[vid] = 0;
934         if (adapter->vlans_added <= adapter->max_vlans)
935                 status = be_vid_config(adapter);
936
937         if (!status)
938                 adapter->vlans_added--;
939         else
940                 adapter->vlan_tag[vid] = 1;
941 ret:
942         return status;
943 }
944
945 static void be_set_rx_mode(struct net_device *netdev)
946 {
947         struct be_adapter *adapter = netdev_priv(netdev);
948         int status;
949
950         if (netdev->flags & IFF_PROMISC) {
951                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
952                 adapter->promiscuous = true;
953                 goto done;
954         }
955
956         /* BE was previously in promiscuous mode; disable it */
957         if (adapter->promiscuous) {
958                 adapter->promiscuous = false;
959                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
960
961                 if (adapter->vlans_added)
962                         be_vid_config(adapter);
963         }
964
965         /* Enable multicast promisc if num configured exceeds what we support */
966         if (netdev->flags & IFF_ALLMULTI ||
967             netdev_mc_count(netdev) > adapter->max_mcast_mac) {
968                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
969                 goto done;
970         }
971
972         if (netdev_uc_count(netdev) != adapter->uc_macs) {
973                 struct netdev_hw_addr *ha;
974                 int i = 1; /* First slot is claimed by the Primary MAC */
975
976                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
977                         be_cmd_pmac_del(adapter, adapter->if_handle,
978                                         adapter->pmac_id[i], 0);
979                 }
980
981                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
982                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
983                         adapter->promiscuous = true;
984                         goto done;
985                 }
986
987                 netdev_for_each_uc_addr(ha, adapter->netdev) {
988                         adapter->uc_macs++; /* First slot is for Primary MAC */
989                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
990                                         adapter->if_handle,
991                                         &adapter->pmac_id[adapter->uc_macs], 0);
992                 }
993         }
994
995         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
996
997         /* Set to MCAST promisc mode if setting MULTICAST address fails */
998         if (status) {
999                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1000                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1001                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1002         }
1003 done:
1004         return;
1005 }
1006
1007 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1008 {
1009         struct be_adapter *adapter = netdev_priv(netdev);
1010         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1011         int status;
1012         bool active_mac = false;
1013         u32 pmac_id;
1014         u8 old_mac[ETH_ALEN];
1015
1016         if (!sriov_enabled(adapter))
1017                 return -EPERM;
1018
1019         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1020                 return -EINVAL;
1021
1022         if (lancer_chip(adapter)) {
1023                 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1024                                                   &pmac_id, vf + 1);
1025                 if (!status && active_mac)
1026                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1027                                         pmac_id, vf + 1);
1028
1029                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
1030         } else {
1031                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1032                                          vf_cfg->pmac_id, vf + 1);
1033
1034                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1035                                          &vf_cfg->pmac_id, vf + 1);
1036         }
1037
1038         if (status)
1039                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1040                                 mac, vf);
1041         else
1042                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1043
1044         return status;
1045 }
1046
1047 static int be_get_vf_config(struct net_device *netdev, int vf,
1048                         struct ifla_vf_info *vi)
1049 {
1050         struct be_adapter *adapter = netdev_priv(netdev);
1051         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1052
1053         if (!sriov_enabled(adapter))
1054                 return -EPERM;
1055
1056         if (vf >= adapter->num_vfs)
1057                 return -EINVAL;
1058
1059         vi->vf = vf;
1060         vi->tx_rate = vf_cfg->tx_rate;
1061         vi->vlan = vf_cfg->vlan_tag;
1062         vi->qos = 0;
1063         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1064
1065         return 0;
1066 }
1067
1068 static int be_set_vf_vlan(struct net_device *netdev,
1069                         int vf, u16 vlan, u8 qos)
1070 {
1071         struct be_adapter *adapter = netdev_priv(netdev);
1072         int status = 0;
1073
1074         if (!sriov_enabled(adapter))
1075                 return -EPERM;
1076
1077         if (vf >= adapter->num_vfs || vlan > 4095)
1078                 return -EINVAL;
1079
1080         if (vlan) {
1081                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1082                         /* If this is new value, program it. Else skip. */
1083                         adapter->vf_cfg[vf].vlan_tag = vlan;
1084
1085                         status = be_cmd_set_hsw_config(adapter, vlan,
1086                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1087                 }
1088         } else {
1089                 /* Reset Transparent Vlan Tagging. */
1090                 adapter->vf_cfg[vf].vlan_tag = 0;
1091                 vlan = adapter->vf_cfg[vf].def_vid;
1092                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1093                         adapter->vf_cfg[vf].if_handle);
1094         }
1095
1096
1097         if (status)
1098                 dev_info(&adapter->pdev->dev,
1099                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1100         return status;
1101 }
1102
1103 static int be_set_vf_tx_rate(struct net_device *netdev,
1104                         int vf, int rate)
1105 {
1106         struct be_adapter *adapter = netdev_priv(netdev);
1107         int status = 0;
1108
1109         if (!sriov_enabled(adapter))
1110                 return -EPERM;
1111
1112         if (vf >= adapter->num_vfs)
1113                 return -EINVAL;
1114
1115         if (rate < 100 || rate > 10000) {
1116                 dev_err(&adapter->pdev->dev,
1117                         "tx rate must be between 100 and 10000 Mbps\n");
1118                 return -EINVAL;
1119         }
1120
1121         if (lancer_chip(adapter))
1122                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1123         else
1124                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1125
1126         if (status)
1127                 dev_err(&adapter->pdev->dev,
1128                                 "tx rate %d on VF %d failed\n", rate, vf);
1129         else
1130                 adapter->vf_cfg[vf].tx_rate = rate;
1131         return status;
1132 }
1133
1134 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1135 {
1136         struct pci_dev *dev, *pdev = adapter->pdev;
1137         int vfs = 0, assigned_vfs = 0, pos;
1138         u16 offset, stride;
1139
1140         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1141         if (!pos)
1142                 return 0;
1143         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1144         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1145
1146         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1147         while (dev) {
1148                 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1149                         vfs++;
1150                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1151                                 assigned_vfs++;
1152                 }
1153                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1154         }
1155         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1156 }
1157
1158 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1159 {
1160         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1161         ulong now = jiffies;
1162         ulong delta = now - stats->rx_jiffies;
1163         u64 pkts;
1164         unsigned int start, eqd;
1165
1166         if (!eqo->enable_aic) {
1167                 eqd = eqo->eqd;
1168                 goto modify_eqd;
1169         }
1170
1171         if (eqo->idx >= adapter->num_rx_qs)
1172                 return;
1173
1174         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1175
1176         /* Wrapped around */
1177         if (time_before(now, stats->rx_jiffies)) {
1178                 stats->rx_jiffies = now;
1179                 return;
1180         }
1181
1182         /* Update once a second */
1183         if (delta < HZ)
1184                 return;
1185
1186         do {
1187                 start = u64_stats_fetch_begin_bh(&stats->sync);
1188                 pkts = stats->rx_pkts;
1189         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1190
1191         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1192         stats->rx_pkts_prev = pkts;
1193         stats->rx_jiffies = now;
1194         eqd = (stats->rx_pps / 110000) << 3;
1195         eqd = min(eqd, eqo->max_eqd);
1196         eqd = max(eqd, eqo->min_eqd);
1197         if (eqd < 10)
1198                 eqd = 0;
1199
1200 modify_eqd:
1201         if (eqd != eqo->cur_eqd) {
1202                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1203                 eqo->cur_eqd = eqd;
1204         }
1205 }
1206
1207 static void be_rx_stats_update(struct be_rx_obj *rxo,
1208                 struct be_rx_compl_info *rxcp)
1209 {
1210         struct be_rx_stats *stats = rx_stats(rxo);
1211
1212         u64_stats_update_begin(&stats->sync);
1213         stats->rx_compl++;
1214         stats->rx_bytes += rxcp->pkt_size;
1215         stats->rx_pkts++;
1216         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1217                 stats->rx_mcast_pkts++;
1218         if (rxcp->err)
1219                 stats->rx_compl_err++;
1220         u64_stats_update_end(&stats->sync);
1221 }
1222
1223 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1224 {
1225         /* L4 checksum is not reliable for non TCP/UDP packets.
1226          * Also ignore ipcksm for ipv6 pkts */
1227         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1228                                 (rxcp->ip_csum || rxcp->ipv6);
1229 }
1230
1231 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1232                                                 u16 frag_idx)
1233 {
1234         struct be_adapter *adapter = rxo->adapter;
1235         struct be_rx_page_info *rx_page_info;
1236         struct be_queue_info *rxq = &rxo->q;
1237
1238         rx_page_info = &rxo->page_info_tbl[frag_idx];
1239         BUG_ON(!rx_page_info->page);
1240
1241         if (rx_page_info->last_page_user) {
1242                 dma_unmap_page(&adapter->pdev->dev,
1243                                dma_unmap_addr(rx_page_info, bus),
1244                                adapter->big_page_size, DMA_FROM_DEVICE);
1245                 rx_page_info->last_page_user = false;
1246         }
1247
1248         atomic_dec(&rxq->used);
1249         return rx_page_info;
1250 }
1251
1252 /* Throwaway the data in the Rx completion */
1253 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1254                                 struct be_rx_compl_info *rxcp)
1255 {
1256         struct be_queue_info *rxq = &rxo->q;
1257         struct be_rx_page_info *page_info;
1258         u16 i, num_rcvd = rxcp->num_rcvd;
1259
1260         for (i = 0; i < num_rcvd; i++) {
1261                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1262                 put_page(page_info->page);
1263                 memset(page_info, 0, sizeof(*page_info));
1264                 index_inc(&rxcp->rxq_idx, rxq->len);
1265         }
1266 }
1267
1268 /*
1269  * skb_fill_rx_data forms a complete skb for an ether frame
1270  * indicated by rxcp.
1271  */
1272 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1273                              struct be_rx_compl_info *rxcp)
1274 {
1275         struct be_queue_info *rxq = &rxo->q;
1276         struct be_rx_page_info *page_info;
1277         u16 i, j;
1278         u16 hdr_len, curr_frag_len, remaining;
1279         u8 *start;
1280
1281         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1282         start = page_address(page_info->page) + page_info->page_offset;
1283         prefetch(start);
1284
1285         /* Copy data in the first descriptor of this completion */
1286         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1287
1288         skb->len = curr_frag_len;
1289         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1290                 memcpy(skb->data, start, curr_frag_len);
1291                 /* Complete packet has now been moved to data */
1292                 put_page(page_info->page);
1293                 skb->data_len = 0;
1294                 skb->tail += curr_frag_len;
1295         } else {
1296                 hdr_len = ETH_HLEN;
1297                 memcpy(skb->data, start, hdr_len);
1298                 skb_shinfo(skb)->nr_frags = 1;
1299                 skb_frag_set_page(skb, 0, page_info->page);
1300                 skb_shinfo(skb)->frags[0].page_offset =
1301                                         page_info->page_offset + hdr_len;
1302                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1303                 skb->data_len = curr_frag_len - hdr_len;
1304                 skb->truesize += rx_frag_size;
1305                 skb->tail += hdr_len;
1306         }
1307         page_info->page = NULL;
1308
1309         if (rxcp->pkt_size <= rx_frag_size) {
1310                 BUG_ON(rxcp->num_rcvd != 1);
1311                 return;
1312         }
1313
1314         /* More frags present for this completion */
1315         index_inc(&rxcp->rxq_idx, rxq->len);
1316         remaining = rxcp->pkt_size - curr_frag_len;
1317         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1318                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1319                 curr_frag_len = min(remaining, rx_frag_size);
1320
1321                 /* Coalesce all frags from the same physical page in one slot */
1322                 if (page_info->page_offset == 0) {
1323                         /* Fresh page */
1324                         j++;
1325                         skb_frag_set_page(skb, j, page_info->page);
1326                         skb_shinfo(skb)->frags[j].page_offset =
1327                                                         page_info->page_offset;
1328                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1329                         skb_shinfo(skb)->nr_frags++;
1330                 } else {
1331                         put_page(page_info->page);
1332                 }
1333
1334                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1335                 skb->len += curr_frag_len;
1336                 skb->data_len += curr_frag_len;
1337                 skb->truesize += rx_frag_size;
1338                 remaining -= curr_frag_len;
1339                 index_inc(&rxcp->rxq_idx, rxq->len);
1340                 page_info->page = NULL;
1341         }
1342         BUG_ON(j > MAX_SKB_FRAGS);
1343 }
1344
1345 /* Process the RX completion indicated by rxcp when GRO is disabled */
1346 static void be_rx_compl_process(struct be_rx_obj *rxo,
1347                                 struct be_rx_compl_info *rxcp)
1348 {
1349         struct be_adapter *adapter = rxo->adapter;
1350         struct net_device *netdev = adapter->netdev;
1351         struct sk_buff *skb;
1352
1353         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1354         if (unlikely(!skb)) {
1355                 rx_stats(rxo)->rx_drops_no_skbs++;
1356                 be_rx_compl_discard(rxo, rxcp);
1357                 return;
1358         }
1359
1360         skb_fill_rx_data(rxo, skb, rxcp);
1361
1362         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1363                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1364         else
1365                 skb_checksum_none_assert(skb);
1366
1367         skb->protocol = eth_type_trans(skb, netdev);
1368         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1369         if (netdev->features & NETIF_F_RXHASH)
1370                 skb->rxhash = rxcp->rss_hash;
1371
1372
1373         if (rxcp->vlanf)
1374                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1375
1376         netif_receive_skb(skb);
1377 }
1378
1379 /* Process the RX completion indicated by rxcp when GRO is enabled */
1380 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1381                              struct be_rx_compl_info *rxcp)
1382 {
1383         struct be_adapter *adapter = rxo->adapter;
1384         struct be_rx_page_info *page_info;
1385         struct sk_buff *skb = NULL;
1386         struct be_queue_info *rxq = &rxo->q;
1387         u16 remaining, curr_frag_len;
1388         u16 i, j;
1389
1390         skb = napi_get_frags(napi);
1391         if (!skb) {
1392                 be_rx_compl_discard(rxo, rxcp);
1393                 return;
1394         }
1395
1396         remaining = rxcp->pkt_size;
1397         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1398                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1399
1400                 curr_frag_len = min(remaining, rx_frag_size);
1401
1402                 /* Coalesce all frags from the same physical page in one slot */
1403                 if (i == 0 || page_info->page_offset == 0) {
1404                         /* First frag or Fresh page */
1405                         j++;
1406                         skb_frag_set_page(skb, j, page_info->page);
1407                         skb_shinfo(skb)->frags[j].page_offset =
1408                                                         page_info->page_offset;
1409                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1410                 } else {
1411                         put_page(page_info->page);
1412                 }
1413                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1414                 skb->truesize += rx_frag_size;
1415                 remaining -= curr_frag_len;
1416                 index_inc(&rxcp->rxq_idx, rxq->len);
1417                 memset(page_info, 0, sizeof(*page_info));
1418         }
1419         BUG_ON(j > MAX_SKB_FRAGS);
1420
1421         skb_shinfo(skb)->nr_frags = j + 1;
1422         skb->len = rxcp->pkt_size;
1423         skb->data_len = rxcp->pkt_size;
1424         skb->ip_summed = CHECKSUM_UNNECESSARY;
1425         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1426         if (adapter->netdev->features & NETIF_F_RXHASH)
1427                 skb->rxhash = rxcp->rss_hash;
1428
1429         if (rxcp->vlanf)
1430                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1431
1432         napi_gro_frags(napi);
1433 }
1434
1435 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1436                                  struct be_rx_compl_info *rxcp)
1437 {
1438         rxcp->pkt_size =
1439                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1440         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1441         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1442         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1443         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1444         rxcp->ip_csum =
1445                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1446         rxcp->l4_csum =
1447                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1448         rxcp->ipv6 =
1449                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1450         rxcp->rxq_idx =
1451                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1452         rxcp->num_rcvd =
1453                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1454         rxcp->pkt_type =
1455                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1456         rxcp->rss_hash =
1457                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1458         if (rxcp->vlanf) {
1459                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1460                                           compl);
1461                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1462                                                compl);
1463         }
1464         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1465 }
1466
1467 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1468                                  struct be_rx_compl_info *rxcp)
1469 {
1470         rxcp->pkt_size =
1471                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1472         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1473         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1474         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1475         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1476         rxcp->ip_csum =
1477                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1478         rxcp->l4_csum =
1479                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1480         rxcp->ipv6 =
1481                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1482         rxcp->rxq_idx =
1483                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1484         rxcp->num_rcvd =
1485                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1486         rxcp->pkt_type =
1487                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1488         rxcp->rss_hash =
1489                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1490         if (rxcp->vlanf) {
1491                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1492                                           compl);
1493                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1494                                                compl);
1495         }
1496         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1497 }
1498
1499 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1500 {
1501         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1502         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1503         struct be_adapter *adapter = rxo->adapter;
1504
1505         /* For checking the valid bit it is Ok to use either definition as the
1506          * valid bit is at the same position in both v0 and v1 Rx compl */
1507         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1508                 return NULL;
1509
1510         rmb();
1511         be_dws_le_to_cpu(compl, sizeof(*compl));
1512
1513         if (adapter->be3_native)
1514                 be_parse_rx_compl_v1(compl, rxcp);
1515         else
1516                 be_parse_rx_compl_v0(compl, rxcp);
1517
1518         if (rxcp->vlanf) {
1519                 /* vlanf could be wrongly set in some cards.
1520                  * ignore if vtm is not set */
1521                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1522                         rxcp->vlanf = 0;
1523
1524                 if (!lancer_chip(adapter))
1525                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1526
1527                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1528                     !adapter->vlan_tag[rxcp->vlan_tag])
1529                         rxcp->vlanf = 0;
1530         }
1531
1532         /* As the compl has been parsed, reset it; we wont touch it again */
1533         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1534
1535         queue_tail_inc(&rxo->cq);
1536         return rxcp;
1537 }
1538
1539 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1540 {
1541         u32 order = get_order(size);
1542
1543         if (order > 0)
1544                 gfp |= __GFP_COMP;
1545         return  alloc_pages(gfp, order);
1546 }
1547
1548 /*
1549  * Allocate a page, split it to fragments of size rx_frag_size and post as
1550  * receive buffers to BE
1551  */
1552 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1553 {
1554         struct be_adapter *adapter = rxo->adapter;
1555         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1556         struct be_queue_info *rxq = &rxo->q;
1557         struct page *pagep = NULL;
1558         struct be_eth_rx_d *rxd;
1559         u64 page_dmaaddr = 0, frag_dmaaddr;
1560         u32 posted, page_offset = 0;
1561
1562         page_info = &rxo->page_info_tbl[rxq->head];
1563         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1564                 if (!pagep) {
1565                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1566                         if (unlikely(!pagep)) {
1567                                 rx_stats(rxo)->rx_post_fail++;
1568                                 break;
1569                         }
1570                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1571                                                     0, adapter->big_page_size,
1572                                                     DMA_FROM_DEVICE);
1573                         page_info->page_offset = 0;
1574                 } else {
1575                         get_page(pagep);
1576                         page_info->page_offset = page_offset + rx_frag_size;
1577                 }
1578                 page_offset = page_info->page_offset;
1579                 page_info->page = pagep;
1580                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1581                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1582
1583                 rxd = queue_head_node(rxq);
1584                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1585                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1586
1587                 /* Any space left in the current big page for another frag? */
1588                 if ((page_offset + rx_frag_size + rx_frag_size) >
1589                                         adapter->big_page_size) {
1590                         pagep = NULL;
1591                         page_info->last_page_user = true;
1592                 }
1593
1594                 prev_page_info = page_info;
1595                 queue_head_inc(rxq);
1596                 page_info = &rxo->page_info_tbl[rxq->head];
1597         }
1598         if (pagep)
1599                 prev_page_info->last_page_user = true;
1600
1601         if (posted) {
1602                 atomic_add(posted, &rxq->used);
1603                 be_rxq_notify(adapter, rxq->id, posted);
1604         } else if (atomic_read(&rxq->used) == 0) {
1605                 /* Let be_worker replenish when memory is available */
1606                 rxo->rx_post_starved = true;
1607         }
1608 }
1609
1610 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1611 {
1612         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1613
1614         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1615                 return NULL;
1616
1617         rmb();
1618         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1619
1620         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1621
1622         queue_tail_inc(tx_cq);
1623         return txcp;
1624 }
1625
1626 static u16 be_tx_compl_process(struct be_adapter *adapter,
1627                 struct be_tx_obj *txo, u16 last_index)
1628 {
1629         struct be_queue_info *txq = &txo->q;
1630         struct be_eth_wrb *wrb;
1631         struct sk_buff **sent_skbs = txo->sent_skb_list;
1632         struct sk_buff *sent_skb;
1633         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1634         bool unmap_skb_hdr = true;
1635
1636         sent_skb = sent_skbs[txq->tail];
1637         BUG_ON(!sent_skb);
1638         sent_skbs[txq->tail] = NULL;
1639
1640         /* skip header wrb */
1641         queue_tail_inc(txq);
1642
1643         do {
1644                 cur_index = txq->tail;
1645                 wrb = queue_tail_node(txq);
1646                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1647                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1648                 unmap_skb_hdr = false;
1649
1650                 num_wrbs++;
1651                 queue_tail_inc(txq);
1652         } while (cur_index != last_index);
1653
1654         kfree_skb(sent_skb);
1655         return num_wrbs;
1656 }
1657
1658 /* Return the number of events in the event queue */
1659 static inline int events_get(struct be_eq_obj *eqo)
1660 {
1661         struct be_eq_entry *eqe;
1662         int num = 0;
1663
1664         do {
1665                 eqe = queue_tail_node(&eqo->q);
1666                 if (eqe->evt == 0)
1667                         break;
1668
1669                 rmb();
1670                 eqe->evt = 0;
1671                 num++;
1672                 queue_tail_inc(&eqo->q);
1673         } while (true);
1674
1675         return num;
1676 }
1677
1678 /* Leaves the EQ is disarmed state */
1679 static void be_eq_clean(struct be_eq_obj *eqo)
1680 {
1681         int num = events_get(eqo);
1682
1683         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1684 }
1685
1686 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1687 {
1688         struct be_rx_page_info *page_info;
1689         struct be_queue_info *rxq = &rxo->q;
1690         struct be_queue_info *rx_cq = &rxo->cq;
1691         struct be_rx_compl_info *rxcp;
1692         struct be_adapter *adapter = rxo->adapter;
1693         int flush_wait = 0;
1694         u16 tail;
1695
1696         /* Consume pending rx completions.
1697          * Wait for the flush completion (identified by zero num_rcvd)
1698          * to arrive. Notify CQ even when there are no more CQ entries
1699          * for HW to flush partially coalesced CQ entries.
1700          * In Lancer, there is no need to wait for flush compl.
1701          */
1702         for (;;) {
1703                 rxcp = be_rx_compl_get(rxo);
1704                 if (rxcp == NULL) {
1705                         if (lancer_chip(adapter))
1706                                 break;
1707
1708                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1709                                 dev_warn(&adapter->pdev->dev,
1710                                          "did not receive flush compl\n");
1711                                 break;
1712                         }
1713                         be_cq_notify(adapter, rx_cq->id, true, 0);
1714                         mdelay(1);
1715                 } else {
1716                         be_rx_compl_discard(rxo, rxcp);
1717                         be_cq_notify(adapter, rx_cq->id, true, 1);
1718                         if (rxcp->num_rcvd == 0)
1719                                 break;
1720                 }
1721         }
1722
1723         /* After cleanup, leave the CQ in unarmed state */
1724         be_cq_notify(adapter, rx_cq->id, false, 0);
1725
1726         /* Then free posted rx buffers that were not used */
1727         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1728         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1729                 page_info = get_rx_page_info(rxo, tail);
1730                 put_page(page_info->page);
1731                 memset(page_info, 0, sizeof(*page_info));
1732         }
1733         BUG_ON(atomic_read(&rxq->used));
1734         rxq->tail = rxq->head = 0;
1735 }
1736
1737 static void be_tx_compl_clean(struct be_adapter *adapter)
1738 {
1739         struct be_tx_obj *txo;
1740         struct be_queue_info *txq;
1741         struct be_eth_tx_compl *txcp;
1742         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1743         struct sk_buff *sent_skb;
1744         bool dummy_wrb;
1745         int i, pending_txqs;
1746
1747         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1748         do {
1749                 pending_txqs = adapter->num_tx_qs;
1750
1751                 for_all_tx_queues(adapter, txo, i) {
1752                         txq = &txo->q;
1753                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1754                                 end_idx =
1755                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1756                                                       wrb_index, txcp);
1757                                 num_wrbs += be_tx_compl_process(adapter, txo,
1758                                                                 end_idx);
1759                                 cmpl++;
1760                         }
1761                         if (cmpl) {
1762                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1763                                 atomic_sub(num_wrbs, &txq->used);
1764                                 cmpl = 0;
1765                                 num_wrbs = 0;
1766                         }
1767                         if (atomic_read(&txq->used) == 0)
1768                                 pending_txqs--;
1769                 }
1770
1771                 if (pending_txqs == 0 || ++timeo > 200)
1772                         break;
1773
1774                 mdelay(1);
1775         } while (true);
1776
1777         for_all_tx_queues(adapter, txo, i) {
1778                 txq = &txo->q;
1779                 if (atomic_read(&txq->used))
1780                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1781                                 atomic_read(&txq->used));
1782
1783                 /* free posted tx for which compls will never arrive */
1784                 while (atomic_read(&txq->used)) {
1785                         sent_skb = txo->sent_skb_list[txq->tail];
1786                         end_idx = txq->tail;
1787                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1788                                                    &dummy_wrb);
1789                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1790                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1791                         atomic_sub(num_wrbs, &txq->used);
1792                 }
1793         }
1794 }
1795
1796 static void be_evt_queues_destroy(struct be_adapter *adapter)
1797 {
1798         struct be_eq_obj *eqo;
1799         int i;
1800
1801         for_all_evt_queues(adapter, eqo, i) {
1802                 if (eqo->q.created) {
1803                         be_eq_clean(eqo);
1804                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1805                 }
1806                 be_queue_free(adapter, &eqo->q);
1807         }
1808 }
1809
1810 static int be_evt_queues_create(struct be_adapter *adapter)
1811 {
1812         struct be_queue_info *eq;
1813         struct be_eq_obj *eqo;
1814         int i, rc;
1815
1816         adapter->num_evt_qs = num_irqs(adapter);
1817
1818         for_all_evt_queues(adapter, eqo, i) {
1819                 eqo->adapter = adapter;
1820                 eqo->tx_budget = BE_TX_BUDGET;
1821                 eqo->idx = i;
1822                 eqo->max_eqd = BE_MAX_EQD;
1823                 eqo->enable_aic = true;
1824
1825                 eq = &eqo->q;
1826                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1827                                         sizeof(struct be_eq_entry));
1828                 if (rc)
1829                         return rc;
1830
1831                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1832                 if (rc)
1833                         return rc;
1834         }
1835         return 0;
1836 }
1837
1838 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1839 {
1840         struct be_queue_info *q;
1841
1842         q = &adapter->mcc_obj.q;
1843         if (q->created)
1844                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1845         be_queue_free(adapter, q);
1846
1847         q = &adapter->mcc_obj.cq;
1848         if (q->created)
1849                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1850         be_queue_free(adapter, q);
1851 }
1852
1853 /* Must be called only after TX qs are created as MCC shares TX EQ */
1854 static int be_mcc_queues_create(struct be_adapter *adapter)
1855 {
1856         struct be_queue_info *q, *cq;
1857
1858         cq = &adapter->mcc_obj.cq;
1859         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1860                         sizeof(struct be_mcc_compl)))
1861                 goto err;
1862
1863         /* Use the default EQ for MCC completions */
1864         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1865                 goto mcc_cq_free;
1866
1867         q = &adapter->mcc_obj.q;
1868         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1869                 goto mcc_cq_destroy;
1870
1871         if (be_cmd_mccq_create(adapter, q, cq))
1872                 goto mcc_q_free;
1873
1874         return 0;
1875
1876 mcc_q_free:
1877         be_queue_free(adapter, q);
1878 mcc_cq_destroy:
1879         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1880 mcc_cq_free:
1881         be_queue_free(adapter, cq);
1882 err:
1883         return -1;
1884 }
1885
1886 static void be_tx_queues_destroy(struct be_adapter *adapter)
1887 {
1888         struct be_queue_info *q;
1889         struct be_tx_obj *txo;
1890         u8 i;
1891
1892         for_all_tx_queues(adapter, txo, i) {
1893                 q = &txo->q;
1894                 if (q->created)
1895                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1896                 be_queue_free(adapter, q);
1897
1898                 q = &txo->cq;
1899                 if (q->created)
1900                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1901                 be_queue_free(adapter, q);
1902         }
1903 }
1904
1905 static int be_num_txqs_want(struct be_adapter *adapter)
1906 {
1907         if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1908             be_is_mc(adapter) ||
1909             (!lancer_chip(adapter) && !be_physfn(adapter)) ||
1910             BE2_chip(adapter))
1911                 return 1;
1912         else
1913                 return adapter->max_tx_queues;
1914 }
1915
1916 static int be_tx_cqs_create(struct be_adapter *adapter)
1917 {
1918         struct be_queue_info *cq, *eq;
1919         int status;
1920         struct be_tx_obj *txo;
1921         u8 i;
1922
1923         adapter->num_tx_qs = be_num_txqs_want(adapter);
1924         if (adapter->num_tx_qs != MAX_TX_QS) {
1925                 rtnl_lock();
1926                 netif_set_real_num_tx_queues(adapter->netdev,
1927                         adapter->num_tx_qs);
1928                 rtnl_unlock();
1929         }
1930
1931         for_all_tx_queues(adapter, txo, i) {
1932                 cq = &txo->cq;
1933                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1934                                         sizeof(struct be_eth_tx_compl));
1935                 if (status)
1936                         return status;
1937
1938                 /* If num_evt_qs is less than num_tx_qs, then more than
1939                  * one txq share an eq
1940                  */
1941                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1942                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1943                 if (status)
1944                         return status;
1945         }
1946         return 0;
1947 }
1948
1949 static int be_tx_qs_create(struct be_adapter *adapter)
1950 {
1951         struct be_tx_obj *txo;
1952         int i, status;
1953
1954         for_all_tx_queues(adapter, txo, i) {
1955                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1956                                         sizeof(struct be_eth_wrb));
1957                 if (status)
1958                         return status;
1959
1960                 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1961                 if (status)
1962                         return status;
1963         }
1964
1965         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1966                  adapter->num_tx_qs);
1967         return 0;
1968 }
1969
1970 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1971 {
1972         struct be_queue_info *q;
1973         struct be_rx_obj *rxo;
1974         int i;
1975
1976         for_all_rx_queues(adapter, rxo, i) {
1977                 q = &rxo->cq;
1978                 if (q->created)
1979                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1980                 be_queue_free(adapter, q);
1981         }
1982 }
1983
1984 static int be_rx_cqs_create(struct be_adapter *adapter)
1985 {
1986         struct be_queue_info *eq, *cq;
1987         struct be_rx_obj *rxo;
1988         int rc, i;
1989
1990         /* We'll create as many RSS rings as there are irqs.
1991          * But when there's only one irq there's no use creating RSS rings
1992          */
1993         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1994                                 num_irqs(adapter) + 1 : 1;
1995         if (adapter->num_rx_qs != MAX_RX_QS) {
1996                 rtnl_lock();
1997                 netif_set_real_num_rx_queues(adapter->netdev,
1998                                              adapter->num_rx_qs);
1999                 rtnl_unlock();
2000         }
2001
2002         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2003         for_all_rx_queues(adapter, rxo, i) {
2004                 rxo->adapter = adapter;
2005                 cq = &rxo->cq;
2006                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2007                                 sizeof(struct be_eth_rx_compl));
2008                 if (rc)
2009                         return rc;
2010
2011                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2012                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2013                 if (rc)
2014                         return rc;
2015         }
2016
2017         dev_info(&adapter->pdev->dev,
2018                  "created %d RSS queue(s) and 1 default RX queue\n",
2019                  adapter->num_rx_qs - 1);
2020         return 0;
2021 }
2022
2023 static irqreturn_t be_intx(int irq, void *dev)
2024 {
2025         struct be_eq_obj *eqo = dev;
2026         struct be_adapter *adapter = eqo->adapter;
2027         int num_evts = 0;
2028
2029         /* IRQ is not expected when NAPI is scheduled as the EQ
2030          * will not be armed.
2031          * But, this can happen on Lancer INTx where it takes
2032          * a while to de-assert INTx or in BE2 where occasionaly
2033          * an interrupt may be raised even when EQ is unarmed.
2034          * If NAPI is already scheduled, then counting & notifying
2035          * events will orphan them.
2036          */
2037         if (napi_schedule_prep(&eqo->napi)) {
2038                 num_evts = events_get(eqo);
2039                 __napi_schedule(&eqo->napi);
2040                 if (num_evts)
2041                         eqo->spurious_intr = 0;
2042         }
2043         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2044
2045         /* Return IRQ_HANDLED only for the the first spurious intr
2046          * after a valid intr to stop the kernel from branding
2047          * this irq as a bad one!
2048          */
2049         if (num_evts || eqo->spurious_intr++ == 0)
2050                 return IRQ_HANDLED;
2051         else
2052                 return IRQ_NONE;
2053 }
2054
2055 static irqreturn_t be_msix(int irq, void *dev)
2056 {
2057         struct be_eq_obj *eqo = dev;
2058
2059         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2060         napi_schedule(&eqo->napi);
2061         return IRQ_HANDLED;
2062 }
2063
2064 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2065 {
2066         return (rxcp->tcpf && !rxcp->err) ? true : false;
2067 }
2068
2069 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2070                         int budget)
2071 {
2072         struct be_adapter *adapter = rxo->adapter;
2073         struct be_queue_info *rx_cq = &rxo->cq;
2074         struct be_rx_compl_info *rxcp;
2075         u32 work_done;
2076
2077         for (work_done = 0; work_done < budget; work_done++) {
2078                 rxcp = be_rx_compl_get(rxo);
2079                 if (!rxcp)
2080                         break;
2081
2082                 /* Is it a flush compl that has no data */
2083                 if (unlikely(rxcp->num_rcvd == 0))
2084                         goto loop_continue;
2085
2086                 /* Discard compl with partial DMA Lancer B0 */
2087                 if (unlikely(!rxcp->pkt_size)) {
2088                         be_rx_compl_discard(rxo, rxcp);
2089                         goto loop_continue;
2090                 }
2091
2092                 /* On BE drop pkts that arrive due to imperfect filtering in
2093                  * promiscuous mode on some skews
2094                  */
2095                 if (unlikely(rxcp->port != adapter->port_num &&
2096                                 !lancer_chip(adapter))) {
2097                         be_rx_compl_discard(rxo, rxcp);
2098                         goto loop_continue;
2099                 }
2100
2101                 if (do_gro(rxcp))
2102                         be_rx_compl_process_gro(rxo, napi, rxcp);
2103                 else
2104                         be_rx_compl_process(rxo, rxcp);
2105 loop_continue:
2106                 be_rx_stats_update(rxo, rxcp);
2107         }
2108
2109         if (work_done) {
2110                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2111
2112                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2113                         be_post_rx_frags(rxo, GFP_ATOMIC);
2114         }
2115
2116         return work_done;
2117 }
2118
2119 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2120                           int budget, int idx)
2121 {
2122         struct be_eth_tx_compl *txcp;
2123         int num_wrbs = 0, work_done;
2124
2125         for (work_done = 0; work_done < budget; work_done++) {
2126                 txcp = be_tx_compl_get(&txo->cq);
2127                 if (!txcp)
2128                         break;
2129                 num_wrbs += be_tx_compl_process(adapter, txo,
2130                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2131                                         wrb_index, txcp));
2132         }
2133
2134         if (work_done) {
2135                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2136                 atomic_sub(num_wrbs, &txo->q.used);
2137
2138                 /* As Tx wrbs have been freed up, wake up netdev queue
2139                  * if it was stopped due to lack of tx wrbs.  */
2140                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2141                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2142                         netif_wake_subqueue(adapter->netdev, idx);
2143                 }
2144
2145                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2146                 tx_stats(txo)->tx_compl += work_done;
2147                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2148         }
2149         return (work_done < budget); /* Done */
2150 }
2151
2152 int be_poll(struct napi_struct *napi, int budget)
2153 {
2154         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2155         struct be_adapter *adapter = eqo->adapter;
2156         int max_work = 0, work, i, num_evts;
2157         bool tx_done;
2158
2159         num_evts = events_get(eqo);
2160
2161         /* Process all TXQs serviced by this EQ */
2162         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2163                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2164                                         eqo->tx_budget, i);
2165                 if (!tx_done)
2166                         max_work = budget;
2167         }
2168
2169         /* This loop will iterate twice for EQ0 in which
2170          * completions of the last RXQ (default one) are also processed
2171          * For other EQs the loop iterates only once
2172          */
2173         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2174                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2175                 max_work = max(work, max_work);
2176         }
2177
2178         if (is_mcc_eqo(eqo))
2179                 be_process_mcc(adapter);
2180
2181         if (max_work < budget) {
2182                 napi_complete(napi);
2183                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2184         } else {
2185                 /* As we'll continue in polling mode, count and clear events */
2186                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2187         }
2188         return max_work;
2189 }
2190
2191 void be_detect_error(struct be_adapter *adapter)
2192 {
2193         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2194         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2195         u32 i;
2196
2197         if (be_hw_error(adapter))
2198                 return;
2199
2200         if (lancer_chip(adapter)) {
2201                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2202                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2203                         sliport_err1 = ioread32(adapter->db +
2204                                         SLIPORT_ERROR1_OFFSET);
2205                         sliport_err2 = ioread32(adapter->db +
2206                                         SLIPORT_ERROR2_OFFSET);
2207                 }
2208         } else {
2209                 pci_read_config_dword(adapter->pdev,
2210                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2211                 pci_read_config_dword(adapter->pdev,
2212                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2213                 pci_read_config_dword(adapter->pdev,
2214                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2215                 pci_read_config_dword(adapter->pdev,
2216                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2217
2218                 ue_lo = (ue_lo & ~ue_lo_mask);
2219                 ue_hi = (ue_hi & ~ue_hi_mask);
2220         }
2221
2222         /* On certain platforms BE hardware can indicate spurious UEs.
2223          * Allow the h/w to stop working completely in case of a real UE.
2224          * Hence not setting the hw_error for UE detection.
2225          */
2226         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2227                 adapter->hw_error = true;
2228                 dev_err(&adapter->pdev->dev,
2229                         "Error detected in the card\n");
2230         }
2231
2232         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2233                 dev_err(&adapter->pdev->dev,
2234                         "ERR: sliport status 0x%x\n", sliport_status);
2235                 dev_err(&adapter->pdev->dev,
2236                         "ERR: sliport error1 0x%x\n", sliport_err1);
2237                 dev_err(&adapter->pdev->dev,
2238                         "ERR: sliport error2 0x%x\n", sliport_err2);
2239         }
2240
2241         if (ue_lo) {
2242                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2243                         if (ue_lo & 1)
2244                                 dev_err(&adapter->pdev->dev,
2245                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2246                 }
2247         }
2248
2249         if (ue_hi) {
2250                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2251                         if (ue_hi & 1)
2252                                 dev_err(&adapter->pdev->dev,
2253                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2254                 }
2255         }
2256
2257 }
2258
2259 static void be_msix_disable(struct be_adapter *adapter)
2260 {
2261         if (msix_enabled(adapter)) {
2262                 pci_disable_msix(adapter->pdev);
2263                 adapter->num_msix_vec = 0;
2264         }
2265 }
2266
2267 static uint be_num_rss_want(struct be_adapter *adapter)
2268 {
2269         u32 num = 0;
2270
2271         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2272             (lancer_chip(adapter) ||
2273              (!sriov_want(adapter) && be_physfn(adapter)))) {
2274                 num = adapter->max_rss_queues;
2275                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2276         }
2277         return num;
2278 }
2279
2280 static void be_msix_enable(struct be_adapter *adapter)
2281 {
2282 #define BE_MIN_MSIX_VECTORS             1
2283         int i, status, num_vec, num_roce_vec = 0;
2284         struct device *dev = &adapter->pdev->dev;
2285
2286         /* If RSS queues are not used, need a vec for default RX Q */
2287         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2288         if (be_roce_supported(adapter)) {
2289                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2290                                         (num_online_cpus() + 1));
2291                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2292                 num_vec += num_roce_vec;
2293                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2294         }
2295         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2296
2297         for (i = 0; i < num_vec; i++)
2298                 adapter->msix_entries[i].entry = i;
2299
2300         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2301         if (status == 0) {
2302                 goto done;
2303         } else if (status >= BE_MIN_MSIX_VECTORS) {
2304                 num_vec = status;
2305                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2306                                 num_vec) == 0)
2307                         goto done;
2308         }
2309
2310         dev_warn(dev, "MSIx enable failed\n");
2311         return;
2312 done:
2313         if (be_roce_supported(adapter)) {
2314                 if (num_vec > num_roce_vec) {
2315                         adapter->num_msix_vec = num_vec - num_roce_vec;
2316                         adapter->num_msix_roce_vec =
2317                                 num_vec - adapter->num_msix_vec;
2318                 } else {
2319                         adapter->num_msix_vec = num_vec;
2320                         adapter->num_msix_roce_vec = 0;
2321                 }
2322         } else
2323                 adapter->num_msix_vec = num_vec;
2324         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2325         return;
2326 }
2327
2328 static inline int be_msix_vec_get(struct be_adapter *adapter,
2329                                 struct be_eq_obj *eqo)
2330 {
2331         return adapter->msix_entries[eqo->idx].vector;
2332 }
2333
2334 static int be_msix_register(struct be_adapter *adapter)
2335 {
2336         struct net_device *netdev = adapter->netdev;
2337         struct be_eq_obj *eqo;
2338         int status, i, vec;
2339
2340         for_all_evt_queues(adapter, eqo, i) {
2341                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2342                 vec = be_msix_vec_get(adapter, eqo);
2343                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2344                 if (status)
2345                         goto err_msix;
2346         }
2347
2348         return 0;
2349 err_msix:
2350         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2351                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2352         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2353                 status);
2354         be_msix_disable(adapter);
2355         return status;
2356 }
2357
2358 static int be_irq_register(struct be_adapter *adapter)
2359 {
2360         struct net_device *netdev = adapter->netdev;
2361         int status;
2362
2363         if (msix_enabled(adapter)) {
2364                 status = be_msix_register(adapter);
2365                 if (status == 0)
2366                         goto done;
2367                 /* INTx is not supported for VF */
2368                 if (!be_physfn(adapter))
2369                         return status;
2370         }
2371
2372         /* INTx: only the first EQ is used */
2373         netdev->irq = adapter->pdev->irq;
2374         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2375                              &adapter->eq_obj[0]);
2376         if (status) {
2377                 dev_err(&adapter->pdev->dev,
2378                         "INTx request IRQ failed - err %d\n", status);
2379                 return status;
2380         }
2381 done:
2382         adapter->isr_registered = true;
2383         return 0;
2384 }
2385
2386 static void be_irq_unregister(struct be_adapter *adapter)
2387 {
2388         struct net_device *netdev = adapter->netdev;
2389         struct be_eq_obj *eqo;
2390         int i;
2391
2392         if (!adapter->isr_registered)
2393                 return;
2394
2395         /* INTx */
2396         if (!msix_enabled(adapter)) {
2397                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2398                 goto done;
2399         }
2400
2401         /* MSIx */
2402         for_all_evt_queues(adapter, eqo, i)
2403                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2404
2405 done:
2406         adapter->isr_registered = false;
2407 }
2408
2409 static void be_rx_qs_destroy(struct be_adapter *adapter)
2410 {
2411         struct be_queue_info *q;
2412         struct be_rx_obj *rxo;
2413         int i;
2414
2415         for_all_rx_queues(adapter, rxo, i) {
2416                 q = &rxo->q;
2417                 if (q->created) {
2418                         be_cmd_rxq_destroy(adapter, q);
2419                         /* After the rxq is invalidated, wait for a grace time
2420                          * of 1ms for all dma to end and the flush compl to
2421                          * arrive
2422                          */
2423                         mdelay(1);
2424                         be_rx_cq_clean(rxo);
2425                 }
2426                 be_queue_free(adapter, q);
2427         }
2428 }
2429
2430 static int be_close(struct net_device *netdev)
2431 {
2432         struct be_adapter *adapter = netdev_priv(netdev);
2433         struct be_eq_obj *eqo;
2434         int i;
2435
2436         be_roce_dev_close(adapter);
2437
2438         if (!lancer_chip(adapter))
2439                 be_intr_set(adapter, false);
2440
2441         for_all_evt_queues(adapter, eqo, i)
2442                 napi_disable(&eqo->napi);
2443
2444         be_async_mcc_disable(adapter);
2445
2446         /* Wait for all pending tx completions to arrive so that
2447          * all tx skbs are freed.
2448          */
2449         be_tx_compl_clean(adapter);
2450
2451         be_rx_qs_destroy(adapter);
2452
2453         for_all_evt_queues(adapter, eqo, i) {
2454                 if (msix_enabled(adapter))
2455                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2456                 else
2457                         synchronize_irq(netdev->irq);
2458                 be_eq_clean(eqo);
2459         }
2460
2461         be_irq_unregister(adapter);
2462
2463         return 0;
2464 }
2465
2466 static int be_rx_qs_create(struct be_adapter *adapter)
2467 {
2468         struct be_rx_obj *rxo;
2469         int rc, i, j;
2470         u8 rsstable[128];
2471
2472         for_all_rx_queues(adapter, rxo, i) {
2473                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2474                                     sizeof(struct be_eth_rx_d));
2475                 if (rc)
2476                         return rc;
2477         }
2478
2479         /* The FW would like the default RXQ to be created first */
2480         rxo = default_rxo(adapter);
2481         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2482                                adapter->if_handle, false, &rxo->rss_id);
2483         if (rc)
2484                 return rc;
2485
2486         for_all_rss_queues(adapter, rxo, i) {
2487                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2488                                        rx_frag_size, adapter->if_handle,
2489                                        true, &rxo->rss_id);
2490                 if (rc)
2491                         return rc;
2492         }
2493
2494         if (be_multi_rxq(adapter)) {
2495                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2496                         for_all_rss_queues(adapter, rxo, i) {
2497                                 if ((j + i) >= 128)
2498                                         break;
2499                                 rsstable[j + i] = rxo->rss_id;
2500                         }
2501                 }
2502                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2503                 if (rc)
2504                         return rc;
2505         }
2506
2507         /* First time posting */
2508         for_all_rx_queues(adapter, rxo, i)
2509                 be_post_rx_frags(rxo, GFP_KERNEL);
2510         return 0;
2511 }
2512
2513 static int be_open(struct net_device *netdev)
2514 {
2515         struct be_adapter *adapter = netdev_priv(netdev);
2516         struct be_eq_obj *eqo;
2517         struct be_rx_obj *rxo;
2518         struct be_tx_obj *txo;
2519         u8 link_status;
2520         int status, i;
2521
2522         status = be_rx_qs_create(adapter);
2523         if (status)
2524                 goto err;
2525
2526         be_irq_register(adapter);
2527
2528         if (!lancer_chip(adapter))
2529                 be_intr_set(adapter, true);
2530
2531         for_all_rx_queues(adapter, rxo, i)
2532                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2533
2534         for_all_tx_queues(adapter, txo, i)
2535                 be_cq_notify(adapter, txo->cq.id, true, 0);
2536
2537         be_async_mcc_enable(adapter);
2538
2539         for_all_evt_queues(adapter, eqo, i) {
2540                 napi_enable(&eqo->napi);
2541                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2542         }
2543
2544         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2545         if (!status)
2546                 be_link_status_update(adapter, link_status);
2547
2548         be_roce_dev_open(adapter);
2549         return 0;
2550 err:
2551         be_close(adapter->netdev);
2552         return -EIO;
2553 }
2554
2555 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2556 {
2557         struct be_dma_mem cmd;
2558         int status = 0;
2559         u8 mac[ETH_ALEN];
2560
2561         memset(mac, 0, ETH_ALEN);
2562
2563         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2564         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2565                                     GFP_KERNEL);
2566         if (cmd.va == NULL)
2567                 return -1;
2568         memset(cmd.va, 0, cmd.size);
2569
2570         if (enable) {
2571                 status = pci_write_config_dword(adapter->pdev,
2572                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2573                 if (status) {
2574                         dev_err(&adapter->pdev->dev,
2575                                 "Could not enable Wake-on-lan\n");
2576                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2577                                           cmd.dma);
2578                         return status;
2579                 }
2580                 status = be_cmd_enable_magic_wol(adapter,
2581                                 adapter->netdev->dev_addr, &cmd);
2582                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2583                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2584         } else {
2585                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2586                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2587                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2588         }
2589
2590         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2591         return status;
2592 }
2593
2594 /*
2595  * Generate a seed MAC address from the PF MAC Address using jhash.
2596  * MAC Address for VFs are assigned incrementally starting from the seed.
2597  * These addresses are programmed in the ASIC by the PF and the VF driver
2598  * queries for the MAC address during its probe.
2599  */
2600 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2601 {
2602         u32 vf;
2603         int status = 0;
2604         u8 mac[ETH_ALEN];
2605         struct be_vf_cfg *vf_cfg;
2606
2607         be_vf_eth_addr_generate(adapter, mac);
2608
2609         for_all_vfs(adapter, vf_cfg, vf) {
2610                 if (lancer_chip(adapter)) {
2611                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2612                 } else {
2613                         status = be_cmd_pmac_add(adapter, mac,
2614                                                  vf_cfg->if_handle,
2615                                                  &vf_cfg->pmac_id, vf + 1);
2616                 }
2617
2618                 if (status)
2619                         dev_err(&adapter->pdev->dev,
2620                         "Mac address assignment failed for VF %d\n", vf);
2621                 else
2622                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2623
2624                 mac[5] += 1;
2625         }
2626         return status;
2627 }
2628
2629 static void be_vf_clear(struct be_adapter *adapter)
2630 {
2631         struct be_vf_cfg *vf_cfg;
2632         u32 vf;
2633
2634         if (be_find_vfs(adapter, ASSIGNED)) {
2635                 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2636                 goto done;
2637         }
2638
2639         for_all_vfs(adapter, vf_cfg, vf) {
2640                 if (lancer_chip(adapter))
2641                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2642                 else
2643                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2644                                         vf_cfg->pmac_id, vf + 1);
2645
2646                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2647         }
2648         pci_disable_sriov(adapter->pdev);
2649 done:
2650         kfree(adapter->vf_cfg);
2651         adapter->num_vfs = 0;
2652 }
2653
2654 static int be_clear(struct be_adapter *adapter)
2655 {
2656         int i = 1;
2657
2658         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2659                 cancel_delayed_work_sync(&adapter->work);
2660                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2661         }
2662
2663         if (sriov_enabled(adapter))
2664                 be_vf_clear(adapter);
2665
2666         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2667                 be_cmd_pmac_del(adapter, adapter->if_handle,
2668                         adapter->pmac_id[i], 0);
2669
2670         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2671
2672         be_mcc_queues_destroy(adapter);
2673         be_rx_cqs_destroy(adapter);
2674         be_tx_queues_destroy(adapter);
2675         be_evt_queues_destroy(adapter);
2676
2677         kfree(adapter->pmac_id);
2678         adapter->pmac_id = NULL;
2679
2680         be_msix_disable(adapter);
2681         return 0;
2682 }
2683
2684 static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
2685                                    u32 *cap_flags, u8 domain)
2686 {
2687         bool profile_present = false;
2688         int status;
2689
2690         if (lancer_chip(adapter)) {
2691                 status = be_cmd_get_profile_config(adapter, cap_flags, domain);
2692                 if (!status)
2693                         profile_present = true;
2694         }
2695
2696         if (!profile_present)
2697                 *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2698                              BE_IF_FLAGS_MULTICAST;
2699 }
2700
2701 static int be_vf_setup_init(struct be_adapter *adapter)
2702 {
2703         struct be_vf_cfg *vf_cfg;
2704         int vf;
2705
2706         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2707                                   GFP_KERNEL);
2708         if (!adapter->vf_cfg)
2709                 return -ENOMEM;
2710
2711         for_all_vfs(adapter, vf_cfg, vf) {
2712                 vf_cfg->if_handle = -1;
2713                 vf_cfg->pmac_id = -1;
2714         }
2715         return 0;
2716 }
2717
2718 static int be_vf_setup(struct be_adapter *adapter)
2719 {
2720         struct be_vf_cfg *vf_cfg;
2721         struct device *dev = &adapter->pdev->dev;
2722         u32 cap_flags, en_flags, vf;
2723         u16 def_vlan, lnk_speed;
2724         int status, enabled_vfs;
2725
2726         enabled_vfs = be_find_vfs(adapter, ENABLED);
2727         if (enabled_vfs) {
2728                 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2729                 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2730                 return 0;
2731         }
2732
2733         if (num_vfs > adapter->dev_num_vfs) {
2734                 dev_warn(dev, "Device supports %d VFs and not %d\n",
2735                          adapter->dev_num_vfs, num_vfs);
2736                 num_vfs = adapter->dev_num_vfs;
2737         }
2738
2739         status = pci_enable_sriov(adapter->pdev, num_vfs);
2740         if (!status) {
2741                 adapter->num_vfs = num_vfs;
2742         } else {
2743                 /* Platform doesn't support SRIOV though device supports it */
2744                 dev_warn(dev, "SRIOV enable failed\n");
2745                 return 0;
2746         }
2747
2748         status = be_vf_setup_init(adapter);
2749         if (status)
2750                 goto err;
2751
2752         for_all_vfs(adapter, vf_cfg, vf) {
2753                 be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
2754
2755                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2756                                         BE_IF_FLAGS_BROADCAST |
2757                                         BE_IF_FLAGS_MULTICAST);
2758
2759                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2760                                           &vf_cfg->if_handle, vf + 1);
2761                 if (status)
2762                         goto err;
2763         }
2764
2765         if (!enabled_vfs) {
2766                 status = be_vf_eth_addr_config(adapter);
2767                 if (status)
2768                         goto err;
2769         }
2770
2771         for_all_vfs(adapter, vf_cfg, vf) {
2772                 lnk_speed = 1000;
2773                 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
2774                 if (status)
2775                         goto err;
2776                 vf_cfg->tx_rate = lnk_speed * 10;
2777
2778                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2779                                 vf + 1, vf_cfg->if_handle);
2780                 if (status)
2781                         goto err;
2782                 vf_cfg->def_vid = def_vlan;
2783
2784                 be_cmd_enable_vf(adapter, vf + 1);
2785         }
2786         return 0;
2787 err:
2788         return status;
2789 }
2790
2791 static void be_setup_init(struct be_adapter *adapter)
2792 {
2793         adapter->vlan_prio_bmap = 0xff;
2794         adapter->phy.link_speed = -1;
2795         adapter->if_handle = -1;
2796         adapter->be3_native = false;
2797         adapter->promiscuous = false;
2798         if (be_physfn(adapter))
2799                 adapter->cmd_privileges = MAX_PRIVILEGES;
2800         else
2801                 adapter->cmd_privileges = MIN_PRIVILEGES;
2802 }
2803
2804 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2805                            bool *active_mac, u32 *pmac_id)
2806 {
2807         int status = 0;
2808
2809         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2810                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2811                 if (!lancer_chip(adapter) && !be_physfn(adapter))
2812                         *active_mac = true;
2813                 else
2814                         *active_mac = false;
2815
2816                 return status;
2817         }
2818
2819         if (lancer_chip(adapter)) {
2820                 status = be_cmd_get_mac_from_list(adapter, mac,
2821                                                   active_mac, pmac_id, 0);
2822                 if (*active_mac) {
2823                         status = be_cmd_mac_addr_query(adapter, mac, false,
2824                                                        if_handle, *pmac_id);
2825                 }
2826         } else if (be_physfn(adapter)) {
2827                 /* For BE3, for PF get permanent MAC */
2828                 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2829                 *active_mac = false;
2830         } else {
2831                 /* For BE3, for VF get soft MAC assigned by PF*/
2832                 status = be_cmd_mac_addr_query(adapter, mac, false,
2833                                                if_handle, 0);
2834                 *active_mac = true;
2835         }
2836         return status;
2837 }
2838
2839 static void be_get_resources(struct be_adapter *adapter)
2840 {
2841         int status;
2842         bool profile_present = false;
2843
2844         if (lancer_chip(adapter)) {
2845                 status = be_cmd_get_func_config(adapter);
2846
2847                 if (!status)
2848                         profile_present = true;
2849         }
2850
2851         if (profile_present) {
2852                 /* Sanity fixes for Lancer */
2853                 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2854                                               BE_UC_PMAC_COUNT);
2855                 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2856                                            BE_NUM_VLANS_SUPPORTED);
2857                 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2858                                                BE_MAX_MC);
2859                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2860                                                MAX_TX_QS);
2861                 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2862                                                 BE3_MAX_RSS_QS);
2863                 adapter->max_event_queues = min_t(u16,
2864                                                   adapter->max_event_queues,
2865                                                   BE3_MAX_RSS_QS);
2866
2867                 if (adapter->max_rss_queues &&
2868                     adapter->max_rss_queues == adapter->max_rx_queues)
2869                         adapter->max_rss_queues -= 1;
2870
2871                 if (adapter->max_event_queues < adapter->max_rss_queues)
2872                         adapter->max_rss_queues = adapter->max_event_queues;
2873
2874         } else {
2875                 if (be_physfn(adapter))
2876                         adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2877                 else
2878                         adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2879
2880                 if (adapter->function_mode & FLEX10_MODE)
2881                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2882                 else
2883                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2884
2885                 adapter->max_mcast_mac = BE_MAX_MC;
2886                 adapter->max_tx_queues = MAX_TX_QS;
2887                 adapter->max_rss_queues = (adapter->be3_native) ?
2888                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2889                 adapter->max_event_queues = BE3_MAX_RSS_QS;
2890
2891                 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2892                                         BE_IF_FLAGS_BROADCAST |
2893                                         BE_IF_FLAGS_MULTICAST |
2894                                         BE_IF_FLAGS_PASS_L3L4_ERRORS |
2895                                         BE_IF_FLAGS_MCAST_PROMISCUOUS |
2896                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |
2897                                         BE_IF_FLAGS_PROMISCUOUS;
2898
2899                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2900                         adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2901         }
2902 }
2903
2904 /* Routine to query per function resource limits */
2905 static int be_get_config(struct be_adapter *adapter)
2906 {
2907         int pos, status;
2908         u16 dev_num_vfs;
2909
2910         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2911                                      &adapter->function_mode,
2912                                      &adapter->function_caps);
2913         if (status)
2914                 goto err;
2915
2916         be_get_resources(adapter);
2917
2918         /* primary mac needs 1 pmac entry */
2919         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2920                                    sizeof(u32), GFP_KERNEL);
2921         if (!adapter->pmac_id) {
2922                 status = -ENOMEM;
2923                 goto err;
2924         }
2925
2926         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2927         if (pos) {
2928                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2929                                      &dev_num_vfs);
2930                 if (!lancer_chip(adapter))
2931                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2932                 adapter->dev_num_vfs = dev_num_vfs;
2933         }
2934 err:
2935         return status;
2936 }
2937
2938 static int be_setup(struct be_adapter *adapter)
2939 {
2940         struct device *dev = &adapter->pdev->dev;
2941         u32 en_flags;
2942         u32 tx_fc, rx_fc;
2943         int status;
2944         u8 mac[ETH_ALEN];
2945         bool active_mac;
2946
2947         be_setup_init(adapter);
2948
2949         if (!lancer_chip(adapter))
2950                 be_cmd_req_native_mode(adapter);
2951
2952         status = be_get_config(adapter);
2953         if (status)
2954                 goto err;
2955
2956         be_msix_enable(adapter);
2957
2958         status = be_evt_queues_create(adapter);
2959         if (status)
2960                 goto err;
2961
2962         status = be_tx_cqs_create(adapter);
2963         if (status)
2964                 goto err;
2965
2966         status = be_rx_cqs_create(adapter);
2967         if (status)
2968                 goto err;
2969
2970         status = be_mcc_queues_create(adapter);
2971         if (status)
2972                 goto err;
2973
2974         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
2975         /* In UMC mode FW does not return right privileges.
2976          * Override with correct privilege equivalent to PF.
2977          */
2978         if (be_is_mc(adapter))
2979                 adapter->cmd_privileges = MAX_PRIVILEGES;
2980
2981         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2982                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2983
2984         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2985                 en_flags |= BE_IF_FLAGS_RSS;
2986
2987         en_flags = en_flags & adapter->if_cap_flags;
2988
2989         status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
2990                                   &adapter->if_handle, 0);
2991         if (status != 0)
2992                 goto err;
2993
2994         memset(mac, 0, ETH_ALEN);
2995         active_mac = false;
2996         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2997                                  &active_mac, &adapter->pmac_id[0]);
2998         if (status != 0)
2999                 goto err;
3000
3001         if (!active_mac) {
3002                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3003                                          &adapter->pmac_id[0], 0);
3004                 if (status != 0)
3005                         goto err;
3006         }
3007
3008         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3009                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3010                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3011         }
3012
3013         status = be_tx_qs_create(adapter);
3014         if (status)
3015                 goto err;
3016
3017         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
3018
3019         if (adapter->vlans_added)
3020                 be_vid_config(adapter);
3021
3022         be_set_rx_mode(adapter->netdev);
3023
3024         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3025
3026         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3027                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3028                                         adapter->rx_fc);
3029
3030         if (be_physfn(adapter) && num_vfs) {
3031                 if (adapter->dev_num_vfs)
3032                         be_vf_setup(adapter);
3033                 else
3034                         dev_warn(dev, "device doesn't support SRIOV\n");
3035         }
3036
3037         status = be_cmd_get_phy_info(adapter);
3038         if (!status && be_pause_supported(adapter))
3039                 adapter->phy.fc_autoneg = 1;
3040
3041         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3042         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3043         return 0;
3044 err:
3045         be_clear(adapter);
3046         return status;
3047 }
3048
3049 #ifdef CONFIG_NET_POLL_CONTROLLER
3050 static void be_netpoll(struct net_device *netdev)
3051 {
3052         struct be_adapter *adapter = netdev_priv(netdev);
3053         struct be_eq_obj *eqo;
3054         int i;
3055
3056         for_all_evt_queues(adapter, eqo, i) {
3057                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3058                 napi_schedule(&eqo->napi);
3059         }
3060
3061         return;
3062 }
3063 #endif
3064
3065 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3066 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3067
3068 static bool be_flash_redboot(struct be_adapter *adapter,
3069                         const u8 *p, u32 img_start, int image_size,
3070                         int hdr_size)
3071 {
3072         u32 crc_offset;
3073         u8 flashed_crc[4];
3074         int status;
3075
3076         crc_offset = hdr_size + img_start + image_size - 4;
3077
3078         p += crc_offset;
3079
3080         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3081                         (image_size - 4));
3082         if (status) {
3083                 dev_err(&adapter->pdev->dev,
3084                 "could not get crc from flash, not flashing redboot\n");
3085                 return false;
3086         }
3087
3088         /*update redboot only if crc does not match*/
3089         if (!memcmp(flashed_crc, p, 4))
3090                 return false;
3091         else
3092                 return true;
3093 }
3094
3095 static bool phy_flashing_required(struct be_adapter *adapter)
3096 {
3097         return (adapter->phy.phy_type == TN_8022 &&
3098                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3099 }
3100
3101 static bool is_comp_in_ufi(struct be_adapter *adapter,
3102                            struct flash_section_info *fsec, int type)
3103 {
3104         int i = 0, img_type = 0;
3105         struct flash_section_info_g2 *fsec_g2 = NULL;
3106
3107    &n