pch_gbe: Fix the issue that the receiving data is not normal.
[~shefty/rdma-dev.git] / drivers / net / pch_gbe / pch_gbe_main.c
1 /*
2  * Copyright (C) 1999 - 2010 Intel Corporation.
3  * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
4  *
5  * This code was derived from the Intel e1000e Linux driver.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; version 2 of the License.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
19  */
20
21 #include "pch_gbe.h"
22 #include "pch_gbe_api.h"
23
24 #define DRV_VERSION     "1.00"
25 const char pch_driver_version[] = DRV_VERSION;
26
27 #define PCI_DEVICE_ID_INTEL_IOH1_GBE    0x8802          /* Pci device ID */
28 #define PCH_GBE_MAR_ENTRIES             16
29 #define PCH_GBE_SHORT_PKT               64
30 #define DSC_INIT16                      0xC000
31 #define PCH_GBE_DMA_ALIGN               0
32 #define PCH_GBE_DMA_PADDING             2
33 #define PCH_GBE_WATCHDOG_PERIOD         (1 * HZ)        /* watchdog time */
34 #define PCH_GBE_COPYBREAK_DEFAULT       256
35 #define PCH_GBE_PCI_BAR                 1
36
37 #define PCH_GBE_TX_WEIGHT         64
38 #define PCH_GBE_RX_WEIGHT         64
39 #define PCH_GBE_RX_BUFFER_WRITE   16
40
41 /* Initialize the wake-on-LAN settings */
42 #define PCH_GBE_WL_INIT_SETTING    (PCH_GBE_WLC_MP)
43
44 #define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
45         PCH_GBE_CHIP_TYPE_INTERNAL | \
46         PCH_GBE_RGMII_MODE_RGMII   | \
47         PCH_GBE_CRS_SEL              \
48         )
49
50 /* Ethertype field values */
51 #define PCH_GBE_MAX_JUMBO_FRAME_SIZE    10318
52 #define PCH_GBE_FRAME_SIZE_2048         2048
53 #define PCH_GBE_FRAME_SIZE_4096         4096
54 #define PCH_GBE_FRAME_SIZE_8192         8192
55
56 #define PCH_GBE_GET_DESC(R, i, type)    (&(((struct type *)((R).desc))[i]))
57 #define PCH_GBE_RX_DESC(R, i)           PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
58 #define PCH_GBE_TX_DESC(R, i)           PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
59 #define PCH_GBE_DESC_UNUSED(R) \
60         ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
61         (R)->next_to_clean - (R)->next_to_use - 1)
62
63 /* Pause packet value */
64 #define PCH_GBE_PAUSE_PKT1_VALUE    0x00C28001
65 #define PCH_GBE_PAUSE_PKT2_VALUE    0x00000100
66 #define PCH_GBE_PAUSE_PKT4_VALUE    0x01000888
67 #define PCH_GBE_PAUSE_PKT5_VALUE    0x0000FFFF
68
69 #define PCH_GBE_ETH_ALEN            6
70
71 /* This defines the bits that are set in the Interrupt Mask
72  * Set/Read Register.  Each bit is documented below:
73  *   o RXT0   = Receiver Timer Interrupt (ring 0)
74  *   o TXDW   = Transmit Descriptor Written Back
75  *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
76  *   o RXSEQ  = Receive Sequence Error
77  *   o LSC    = Link Status Change
78  */
79 #define PCH_GBE_INT_ENABLE_MASK ( \
80         PCH_GBE_INT_RX_DMA_CMPLT |    \
81         PCH_GBE_INT_RX_DSC_EMP   |    \
82         PCH_GBE_INT_WOL_DET      |    \
83         PCH_GBE_INT_TX_CMPLT          \
84         )
85
86
87 static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
88
89 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
90 static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
91                                int data);
92 /**
93  * pch_gbe_mac_read_mac_addr - Read MAC address
94  * @hw:             Pointer to the HW structure
95  * Returns
96  *      0:                      Successful.
97  */
98 s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
99 {
100         u32  adr1a, adr1b;
101
102         adr1a = ioread32(&hw->reg->mac_adr[0].high);
103         adr1b = ioread32(&hw->reg->mac_adr[0].low);
104
105         hw->mac.addr[0] = (u8)(adr1a & 0xFF);
106         hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
107         hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
108         hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
109         hw->mac.addr[4] = (u8)(adr1b & 0xFF);
110         hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
111
112         pr_debug("hw->mac.addr : %pM\n", hw->mac.addr);
113         return 0;
114 }
115
116 /**
117  * pch_gbe_wait_clr_bit - Wait to clear a bit
118  * @reg:        Pointer of register
119  * @busy:       Busy bit
120  */
121 static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
122 {
123         u32 tmp;
124         /* wait busy */
125         tmp = 1000;
126         while ((ioread32(reg) & bit) && --tmp)
127                 cpu_relax();
128         if (!tmp)
129                 pr_err("Error: busy bit is not cleared\n");
130 }
131 /**
132  * pch_gbe_mac_mar_set - Set MAC address register
133  * @hw:     Pointer to the HW structure
134  * @addr:   Pointer to the MAC address
135  * @index:  MAC address array register
136  */
137 static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
138 {
139         u32 mar_low, mar_high, adrmask;
140
141         pr_debug("index : 0x%x\n", index);
142
143         /*
144          * HW expects these in little endian so we reverse the byte order
145          * from network order (big endian) to little endian
146          */
147         mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
148                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
149         mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
150         /* Stop the MAC Address of index. */
151         adrmask = ioread32(&hw->reg->ADDR_MASK);
152         iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
153         /* wait busy */
154         pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
155         /* Set the MAC address to the MAC address 1A/1B register */
156         iowrite32(mar_high, &hw->reg->mac_adr[index].high);
157         iowrite32(mar_low, &hw->reg->mac_adr[index].low);
158         /* Start the MAC address of index */
159         iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
160         /* wait busy */
161         pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
162 }
163
164 /**
165  * pch_gbe_mac_reset_hw - Reset hardware
166  * @hw: Pointer to the HW structure
167  */
168 static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
169 {
170         /* Read the MAC address. and store to the private data */
171         pch_gbe_mac_read_mac_addr(hw);
172         iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
173 #ifdef PCH_GBE_MAC_IFOP_RGMII
174         iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
175 #endif
176         pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
177         /* Setup the receive address */
178         pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
179         return;
180 }
181
182 /**
183  * pch_gbe_mac_init_rx_addrs - Initialize receive address's
184  * @hw: Pointer to the HW structure
185  * @mar_count: Receive address registers
186  */
187 static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
188 {
189         u32 i;
190
191         /* Setup the receive address */
192         pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
193
194         /* Zero out the other receive addresses */
195         for (i = 1; i < mar_count; i++) {
196                 iowrite32(0, &hw->reg->mac_adr[i].high);
197                 iowrite32(0, &hw->reg->mac_adr[i].low);
198         }
199         iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
200         /* wait busy */
201         pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
202 }
203
204
205 /**
206  * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses
207  * @hw:             Pointer to the HW structure
208  * @mc_addr_list:   Array of multicast addresses to program
209  * @mc_addr_count:  Number of multicast addresses to program
210  * @mar_used_count: The first MAC Address register free to program
211  * @mar_total_num:  Total number of supported MAC Address Registers
212  */
213 static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
214                                             u8 *mc_addr_list, u32 mc_addr_count,
215                                             u32 mar_used_count, u32 mar_total_num)
216 {
217         u32 i, adrmask;
218
219         /* Load the first set of multicast addresses into the exact
220          * filters (RAR).  If there are not enough to fill the RAR
221          * array, clear the filters.
222          */
223         for (i = mar_used_count; i < mar_total_num; i++) {
224                 if (mc_addr_count) {
225                         pch_gbe_mac_mar_set(hw, mc_addr_list, i);
226                         mc_addr_count--;
227                         mc_addr_list += PCH_GBE_ETH_ALEN;
228                 } else {
229                         /* Clear MAC address mask */
230                         adrmask = ioread32(&hw->reg->ADDR_MASK);
231                         iowrite32((adrmask | (0x0001 << i)),
232                                         &hw->reg->ADDR_MASK);
233                         /* wait busy */
234                         pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
235                         /* Clear MAC address */
236                         iowrite32(0, &hw->reg->mac_adr[i].high);
237                         iowrite32(0, &hw->reg->mac_adr[i].low);
238                 }
239         }
240 }
241
242 /**
243  * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
244  * @hw:             Pointer to the HW structure
245  * Returns
246  *      0:                      Successful.
247  *      Negative value:         Failed.
248  */
249 s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
250 {
251         struct pch_gbe_mac_info *mac = &hw->mac;
252         u32 rx_fctrl;
253
254         pr_debug("mac->fc = %u\n", mac->fc);
255
256         rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
257
258         switch (mac->fc) {
259         case PCH_GBE_FC_NONE:
260                 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
261                 mac->tx_fc_enable = false;
262                 break;
263         case PCH_GBE_FC_RX_PAUSE:
264                 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
265                 mac->tx_fc_enable = false;
266                 break;
267         case PCH_GBE_FC_TX_PAUSE:
268                 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
269                 mac->tx_fc_enable = true;
270                 break;
271         case PCH_GBE_FC_FULL:
272                 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
273                 mac->tx_fc_enable = true;
274                 break;
275         default:
276                 pr_err("Flow control param set incorrectly\n");
277                 return -EINVAL;
278         }
279         if (mac->link_duplex == DUPLEX_HALF)
280                 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
281         iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
282         pr_debug("RX_FCTRL reg : 0x%08x  mac->tx_fc_enable : %d\n",
283                  ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
284         return 0;
285 }
286
287 /**
288  * pch_gbe_mac_set_wol_event - Set wake-on-lan event
289  * @hw:     Pointer to the HW structure
290  * @wu_evt: Wake up event
291  */
292 static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
293 {
294         u32 addr_mask;
295
296         pr_debug("wu_evt : 0x%08x  ADDR_MASK reg : 0x%08x\n",
297                  wu_evt, ioread32(&hw->reg->ADDR_MASK));
298
299         if (wu_evt) {
300                 /* Set Wake-On-Lan address mask */
301                 addr_mask = ioread32(&hw->reg->ADDR_MASK);
302                 iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
303                 /* wait busy */
304                 pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
305                 iowrite32(0, &hw->reg->WOL_ST);
306                 iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
307                 iowrite32(0x02, &hw->reg->TCPIP_ACC);
308                 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
309         } else {
310                 iowrite32(0, &hw->reg->WOL_CTRL);
311                 iowrite32(0, &hw->reg->WOL_ST);
312         }
313         return;
314 }
315
316 /**
317  * pch_gbe_mac_ctrl_miim - Control MIIM interface
318  * @hw:   Pointer to the HW structure
319  * @addr: Address of PHY
320  * @dir:  Operetion. (Write or Read)
321  * @reg:  Access register of PHY
322  * @data: Write data.
323  *
324  * Returns: Read date.
325  */
326 u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
327                         u16 data)
328 {
329         u32 data_out = 0;
330         unsigned int i;
331         unsigned long flags;
332
333         spin_lock_irqsave(&hw->miim_lock, flags);
334
335         for (i = 100; i; --i) {
336                 if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY))
337                         break;
338                 udelay(20);
339         }
340         if (i == 0) {
341                 pr_err("pch-gbe.miim won't go Ready\n");
342                 spin_unlock_irqrestore(&hw->miim_lock, flags);
343                 return 0;       /* No way to indicate timeout error */
344         }
345         iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
346                   (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
347                   dir | data), &hw->reg->MIIM);
348         for (i = 0; i < 100; i++) {
349                 udelay(20);
350                 data_out = ioread32(&hw->reg->MIIM);
351                 if ((data_out & PCH_GBE_MIIM_OPER_READY))
352                         break;
353         }
354         spin_unlock_irqrestore(&hw->miim_lock, flags);
355
356         pr_debug("PHY %s: reg=%d, data=0x%04X\n",
357                  dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
358                  dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
359         return (u16) data_out;
360 }
361
362 /**
363  * pch_gbe_mac_set_pause_packet - Set pause packet
364  * @hw:   Pointer to the HW structure
365  */
366 static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
367 {
368         unsigned long tmp2, tmp3;
369
370         /* Set Pause packet */
371         tmp2 = hw->mac.addr[1];
372         tmp2 = (tmp2 << 8) | hw->mac.addr[0];
373         tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
374
375         tmp3 = hw->mac.addr[5];
376         tmp3 = (tmp3 << 8) | hw->mac.addr[4];
377         tmp3 = (tmp3 << 8) | hw->mac.addr[3];
378         tmp3 = (tmp3 << 8) | hw->mac.addr[2];
379
380         iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
381         iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
382         iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
383         iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
384         iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
385
386         /* Transmit Pause Packet */
387         iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
388
389         pr_debug("PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
390                  ioread32(&hw->reg->PAUSE_PKT1), ioread32(&hw->reg->PAUSE_PKT2),
391                  ioread32(&hw->reg->PAUSE_PKT3), ioread32(&hw->reg->PAUSE_PKT4),
392                  ioread32(&hw->reg->PAUSE_PKT5));
393
394         return;
395 }
396
397
398 /**
399  * pch_gbe_alloc_queues - Allocate memory for all rings
400  * @adapter:  Board private structure to initialize
401  * Returns
402  *      0:      Successfully
403  *      Negative value: Failed
404  */
405 static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
406 {
407         int size;
408
409         size = (int)sizeof(struct pch_gbe_tx_ring);
410         adapter->tx_ring = kzalloc(size, GFP_KERNEL);
411         if (!adapter->tx_ring)
412                 return -ENOMEM;
413         size = (int)sizeof(struct pch_gbe_rx_ring);
414         adapter->rx_ring = kzalloc(size, GFP_KERNEL);
415         if (!adapter->rx_ring) {
416                 kfree(adapter->tx_ring);
417                 return -ENOMEM;
418         }
419         return 0;
420 }
421
422 /**
423  * pch_gbe_init_stats - Initialize status
424  * @adapter:  Board private structure to initialize
425  */
426 static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
427 {
428         memset(&adapter->stats, 0, sizeof(adapter->stats));
429         return;
430 }
431
432 /**
433  * pch_gbe_init_phy - Initialize PHY
434  * @adapter:  Board private structure to initialize
435  * Returns
436  *      0:      Successfully
437  *      Negative value: Failed
438  */
439 static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
440 {
441         struct net_device *netdev = adapter->netdev;
442         u32 addr;
443         u16 bmcr, stat;
444
445         /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
446         for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
447                 adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
448                 bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
449                 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
450                 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
451                 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
452                         break;
453         }
454         adapter->hw.phy.addr = adapter->mii.phy_id;
455         pr_debug("phy_addr = %d\n", adapter->mii.phy_id);
456         if (addr == 32)
457                 return -EAGAIN;
458         /* Selected the phy and isolate the rest */
459         for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
460                 if (addr != adapter->mii.phy_id) {
461                         pch_gbe_mdio_write(netdev, addr, MII_BMCR,
462                                            BMCR_ISOLATE);
463                 } else {
464                         bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
465                         pch_gbe_mdio_write(netdev, addr, MII_BMCR,
466                                            bmcr & ~BMCR_ISOLATE);
467                 }
468         }
469
470         /* MII setup */
471         adapter->mii.phy_id_mask = 0x1F;
472         adapter->mii.reg_num_mask = 0x1F;
473         adapter->mii.dev = adapter->netdev;
474         adapter->mii.mdio_read = pch_gbe_mdio_read;
475         adapter->mii.mdio_write = pch_gbe_mdio_write;
476         adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
477         return 0;
478 }
479
480 /**
481  * pch_gbe_mdio_read - The read function for mii
482  * @netdev: Network interface device structure
483  * @addr:   Phy ID
484  * @reg:    Access location
485  * Returns
486  *      0:      Successfully
487  *      Negative value: Failed
488  */
489 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
490 {
491         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
492         struct pch_gbe_hw *hw = &adapter->hw;
493
494         return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
495                                      (u16) 0);
496 }
497
498 /**
499  * pch_gbe_mdio_write - The write function for mii
500  * @netdev: Network interface device structure
501  * @addr:   Phy ID (not used)
502  * @reg:    Access location
503  * @data:   Write data
504  */
505 static void pch_gbe_mdio_write(struct net_device *netdev,
506                                int addr, int reg, int data)
507 {
508         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
509         struct pch_gbe_hw *hw = &adapter->hw;
510
511         pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
512 }
513
514 /**
515  * pch_gbe_reset_task - Reset processing at the time of transmission timeout
516  * @work:  Pointer of board private structure
517  */
518 static void pch_gbe_reset_task(struct work_struct *work)
519 {
520         struct pch_gbe_adapter *adapter;
521         adapter = container_of(work, struct pch_gbe_adapter, reset_task);
522
523         rtnl_lock();
524         pch_gbe_reinit_locked(adapter);
525         rtnl_unlock();
526 }
527
528 /**
529  * pch_gbe_reinit_locked- Re-initialization
530  * @adapter:  Board private structure
531  */
532 void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
533 {
534         pch_gbe_down(adapter);
535         pch_gbe_up(adapter);
536 }
537
538 /**
539  * pch_gbe_reset - Reset GbE
540  * @adapter:  Board private structure
541  */
542 void pch_gbe_reset(struct pch_gbe_adapter *adapter)
543 {
544         pch_gbe_mac_reset_hw(&adapter->hw);
545         /* Setup the receive address. */
546         pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
547         if (pch_gbe_hal_init_hw(&adapter->hw))
548                 pr_err("Hardware Error\n");
549 }
550
551 /**
552  * pch_gbe_free_irq - Free an interrupt
553  * @adapter:  Board private structure
554  */
555 static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
556 {
557         struct net_device *netdev = adapter->netdev;
558
559         free_irq(adapter->pdev->irq, netdev);
560         if (adapter->have_msi) {
561                 pci_disable_msi(adapter->pdev);
562                 pr_debug("call pci_disable_msi\n");
563         }
564 }
565
566 /**
567  * pch_gbe_irq_disable - Mask off interrupt generation on the NIC
568  * @adapter:  Board private structure
569  */
570 static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
571 {
572         struct pch_gbe_hw *hw = &adapter->hw;
573
574         atomic_inc(&adapter->irq_sem);
575         iowrite32(0, &hw->reg->INT_EN);
576         ioread32(&hw->reg->INT_ST);
577         synchronize_irq(adapter->pdev->irq);
578
579         pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
580 }
581
582 /**
583  * pch_gbe_irq_enable - Enable default interrupt generation settings
584  * @adapter:  Board private structure
585  */
586 static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
587 {
588         struct pch_gbe_hw *hw = &adapter->hw;
589
590         if (likely(atomic_dec_and_test(&adapter->irq_sem)))
591                 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
592         ioread32(&hw->reg->INT_ST);
593         pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
594 }
595
596
597
598 /**
599  * pch_gbe_setup_tctl - configure the Transmit control registers
600  * @adapter:  Board private structure
601  */
602 static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
603 {
604         struct pch_gbe_hw *hw = &adapter->hw;
605         u32 tx_mode, tcpip;
606
607         tx_mode = PCH_GBE_TM_LONG_PKT |
608                 PCH_GBE_TM_ST_AND_FD |
609                 PCH_GBE_TM_SHORT_PKT |
610                 PCH_GBE_TM_TH_TX_STRT_8 |
611                 PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
612
613         iowrite32(tx_mode, &hw->reg->TX_MODE);
614
615         tcpip = ioread32(&hw->reg->TCPIP_ACC);
616         tcpip |= PCH_GBE_TX_TCPIPACC_EN;
617         iowrite32(tcpip, &hw->reg->TCPIP_ACC);
618         return;
619 }
620
621 /**
622  * pch_gbe_configure_tx - Configure Transmit Unit after Reset
623  * @adapter:  Board private structure
624  */
625 static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
626 {
627         struct pch_gbe_hw *hw = &adapter->hw;
628         u32 tdba, tdlen, dctrl;
629
630         pr_debug("dma addr = 0x%08llx  size = 0x%08x\n",
631                  (unsigned long long)adapter->tx_ring->dma,
632                  adapter->tx_ring->size);
633
634         /* Setup the HW Tx Head and Tail descriptor pointers */
635         tdba = adapter->tx_ring->dma;
636         tdlen = adapter->tx_ring->size - 0x10;
637         iowrite32(tdba, &hw->reg->TX_DSC_BASE);
638         iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
639         iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
640
641         /* Enables Transmission DMA */
642         dctrl = ioread32(&hw->reg->DMA_CTRL);
643         dctrl |= PCH_GBE_TX_DMA_EN;
644         iowrite32(dctrl, &hw->reg->DMA_CTRL);
645 }
646
647 /**
648  * pch_gbe_setup_rctl - Configure the receive control registers
649  * @adapter:  Board private structure
650  */
651 static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
652 {
653         struct pch_gbe_hw *hw = &adapter->hw;
654         u32 rx_mode, tcpip;
655
656         rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
657         PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
658
659         iowrite32(rx_mode, &hw->reg->RX_MODE);
660
661         tcpip = ioread32(&hw->reg->TCPIP_ACC);
662
663         if (adapter->rx_csum) {
664                 tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF;
665                 tcpip |= PCH_GBE_RX_TCPIPACC_EN;
666         } else {
667                 tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
668                 tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
669         }
670         iowrite32(tcpip, &hw->reg->TCPIP_ACC);
671         return;
672 }
673
674 /**
675  * pch_gbe_configure_rx - Configure Receive Unit after Reset
676  * @adapter:  Board private structure
677  */
678 static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
679 {
680         struct pch_gbe_hw *hw = &adapter->hw;
681         u32 rdba, rdlen, rctl, rxdma;
682
683         pr_debug("dma adr = 0x%08llx  size = 0x%08x\n",
684                  (unsigned long long)adapter->rx_ring->dma,
685                  adapter->rx_ring->size);
686
687         pch_gbe_mac_force_mac_fc(hw);
688
689         /* Disables Receive MAC */
690         rctl = ioread32(&hw->reg->MAC_RX_EN);
691         iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
692
693         /* Disables Receive DMA */
694         rxdma = ioread32(&hw->reg->DMA_CTRL);
695         rxdma &= ~PCH_GBE_RX_DMA_EN;
696         iowrite32(rxdma, &hw->reg->DMA_CTRL);
697
698         pr_debug("MAC_RX_EN reg = 0x%08x  DMA_CTRL reg = 0x%08x\n",
699                  ioread32(&hw->reg->MAC_RX_EN),
700                  ioread32(&hw->reg->DMA_CTRL));
701
702         /* Setup the HW Rx Head and Tail Descriptor Pointers and
703          * the Base and Length of the Rx Descriptor Ring */
704         rdba = adapter->rx_ring->dma;
705         rdlen = adapter->rx_ring->size - 0x10;
706         iowrite32(rdba, &hw->reg->RX_DSC_BASE);
707         iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
708         iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
709
710         /* Enables Receive DMA */
711         rxdma = ioread32(&hw->reg->DMA_CTRL);
712         rxdma |= PCH_GBE_RX_DMA_EN;
713         iowrite32(rxdma, &hw->reg->DMA_CTRL);
714         /* Enables Receive */
715         iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
716 }
717
718 /**
719  * pch_gbe_unmap_and_free_tx_resource - Unmap and free tx socket buffer
720  * @adapter:     Board private structure
721  * @buffer_info: Buffer information structure
722  */
723 static void pch_gbe_unmap_and_free_tx_resource(
724         struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
725 {
726         if (buffer_info->mapped) {
727                 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
728                                  buffer_info->length, DMA_TO_DEVICE);
729                 buffer_info->mapped = false;
730         }
731         if (buffer_info->skb) {
732                 dev_kfree_skb_any(buffer_info->skb);
733                 buffer_info->skb = NULL;
734         }
735 }
736
737 /**
738  * pch_gbe_unmap_and_free_rx_resource - Unmap and free rx socket buffer
739  * @adapter:      Board private structure
740  * @buffer_info:  Buffer information structure
741  */
742 static void pch_gbe_unmap_and_free_rx_resource(
743                                         struct pch_gbe_adapter *adapter,
744                                         struct pch_gbe_buffer *buffer_info)
745 {
746         if (buffer_info->mapped) {
747                 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
748                                  buffer_info->length, DMA_FROM_DEVICE);
749                 buffer_info->mapped = false;
750         }
751         if (buffer_info->skb) {
752                 dev_kfree_skb_any(buffer_info->skb);
753                 buffer_info->skb = NULL;
754         }
755 }
756
757 /**
758  * pch_gbe_clean_tx_ring - Free Tx Buffers
759  * @adapter:  Board private structure
760  * @tx_ring:  Ring to be cleaned
761  */
762 static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
763                                    struct pch_gbe_tx_ring *tx_ring)
764 {
765         struct pch_gbe_hw *hw = &adapter->hw;
766         struct pch_gbe_buffer *buffer_info;
767         unsigned long size;
768         unsigned int i;
769
770         /* Free all the Tx ring sk_buffs */
771         for (i = 0; i < tx_ring->count; i++) {
772                 buffer_info = &tx_ring->buffer_info[i];
773                 pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
774         }
775         pr_debug("call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
776
777         size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
778         memset(tx_ring->buffer_info, 0, size);
779
780         /* Zero out the descriptor ring */
781         memset(tx_ring->desc, 0, tx_ring->size);
782         tx_ring->next_to_use = 0;
783         tx_ring->next_to_clean = 0;
784         iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
785         iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
786 }
787
788 /**
789  * pch_gbe_clean_rx_ring - Free Rx Buffers
790  * @adapter:  Board private structure
791  * @rx_ring:  Ring to free buffers from
792  */
793 static void
794 pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
795                       struct pch_gbe_rx_ring *rx_ring)
796 {
797         struct pch_gbe_hw *hw = &adapter->hw;
798         struct pch_gbe_buffer *buffer_info;
799         unsigned long size;
800         unsigned int i;
801
802         /* Free all the Rx ring sk_buffs */
803         for (i = 0; i < rx_ring->count; i++) {
804                 buffer_info = &rx_ring->buffer_info[i];
805                 pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
806         }
807         pr_debug("call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
808         size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
809         memset(rx_ring->buffer_info, 0, size);
810
811         /* Zero out the descriptor ring */
812         memset(rx_ring->desc, 0, rx_ring->size);
813         rx_ring->next_to_clean = 0;
814         rx_ring->next_to_use = 0;
815         iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
816         iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
817 }
818
819 static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
820                                     u16 duplex)
821 {
822         struct pch_gbe_hw *hw = &adapter->hw;
823         unsigned long rgmii = 0;
824
825         /* Set the RGMII control. */
826 #ifdef PCH_GBE_MAC_IFOP_RGMII
827         switch (speed) {
828         case SPEED_10:
829                 rgmii = (PCH_GBE_RGMII_RATE_2_5M |
830                          PCH_GBE_MAC_RGMII_CTRL_SETTING);
831                 break;
832         case SPEED_100:
833                 rgmii = (PCH_GBE_RGMII_RATE_25M |
834                          PCH_GBE_MAC_RGMII_CTRL_SETTING);
835                 break;
836         case SPEED_1000:
837                 rgmii = (PCH_GBE_RGMII_RATE_125M |
838                          PCH_GBE_MAC_RGMII_CTRL_SETTING);
839                 break;
840         }
841         iowrite32(rgmii, &hw->reg->RGMII_CTRL);
842 #else   /* GMII */
843         rgmii = 0;
844         iowrite32(rgmii, &hw->reg->RGMII_CTRL);
845 #endif
846 }
847 static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
848                               u16 duplex)
849 {
850         struct net_device *netdev = adapter->netdev;
851         struct pch_gbe_hw *hw = &adapter->hw;
852         unsigned long mode = 0;
853
854         /* Set the communication mode */
855         switch (speed) {
856         case SPEED_10:
857                 mode = PCH_GBE_MODE_MII_ETHER;
858                 netdev->tx_queue_len = 10;
859                 break;
860         case SPEED_100:
861                 mode = PCH_GBE_MODE_MII_ETHER;
862                 netdev->tx_queue_len = 100;
863                 break;
864         case SPEED_1000:
865                 mode = PCH_GBE_MODE_GMII_ETHER;
866                 break;
867         }
868         if (duplex == DUPLEX_FULL)
869                 mode |= PCH_GBE_MODE_FULL_DUPLEX;
870         else
871                 mode |= PCH_GBE_MODE_HALF_DUPLEX;
872         iowrite32(mode, &hw->reg->MODE);
873 }
874
875 /**
876  * pch_gbe_watchdog - Watchdog process
877  * @data:  Board private structure
878  */
879 static void pch_gbe_watchdog(unsigned long data)
880 {
881         struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
882         struct net_device *netdev = adapter->netdev;
883         struct pch_gbe_hw *hw = &adapter->hw;
884         struct ethtool_cmd cmd;
885
886         pr_debug("right now = %ld\n", jiffies);
887
888         pch_gbe_update_stats(adapter);
889         if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
890                 netdev->tx_queue_len = adapter->tx_queue_len;
891                 /* mii library handles link maintenance tasks */
892                 if (mii_ethtool_gset(&adapter->mii, &cmd)) {
893                         pr_err("ethtool get setting Error\n");
894                         mod_timer(&adapter->watchdog_timer,
895                                   round_jiffies(jiffies +
896                                                 PCH_GBE_WATCHDOG_PERIOD));
897                         return;
898                 }
899                 hw->mac.link_speed = cmd.speed;
900                 hw->mac.link_duplex = cmd.duplex;
901                 /* Set the RGMII control. */
902                 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
903                                                 hw->mac.link_duplex);
904                 /* Set the communication mode */
905                 pch_gbe_set_mode(adapter, hw->mac.link_speed,
906                                  hw->mac.link_duplex);
907                 netdev_dbg(netdev,
908                            "Link is Up %d Mbps %s-Duplex\n",
909                            cmd.speed,
910                            cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
911                 netif_carrier_on(netdev);
912                 netif_wake_queue(netdev);
913         } else if ((!mii_link_ok(&adapter->mii)) &&
914                    (netif_carrier_ok(netdev))) {
915                 netdev_dbg(netdev, "NIC Link is Down\n");
916                 hw->mac.link_speed = SPEED_10;
917                 hw->mac.link_duplex = DUPLEX_HALF;
918                 netif_carrier_off(netdev);
919                 netif_stop_queue(netdev);
920         }
921         mod_timer(&adapter->watchdog_timer,
922                   round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
923 }
924
925 /**
926  * pch_gbe_tx_queue - Carry out queuing of the transmission data
927  * @adapter:  Board private structure
928  * @tx_ring:  Tx descriptor ring structure
929  * @skb:      Sockt buffer structure
930  */
931 static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
932                               struct pch_gbe_tx_ring *tx_ring,
933                               struct sk_buff *skb)
934 {
935         struct pch_gbe_hw *hw = &adapter->hw;
936         struct pch_gbe_tx_desc *tx_desc;
937         struct pch_gbe_buffer *buffer_info;
938         struct sk_buff *tmp_skb;
939         unsigned int frame_ctrl;
940         unsigned int ring_num;
941         unsigned long flags;
942
943         /*-- Set frame control --*/
944         frame_ctrl = 0;
945         if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
946                 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
947         if (unlikely(!adapter->tx_csum))
948                 frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
949
950         /* Performs checksum processing */
951         /*
952          * It is because the hardware accelerator does not support a checksum,
953          * when the received data size is less than 64 bytes.
954          */
955         if ((skb->len < PCH_GBE_SHORT_PKT) && (adapter->tx_csum)) {
956                 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
957                               PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
958                 if (skb->protocol == htons(ETH_P_IP)) {
959                         struct iphdr *iph = ip_hdr(skb);
960                         unsigned int offset;
961                         iph->check = 0;
962                         iph->check = ip_fast_csum((u8 *) iph, iph->ihl);
963                         offset = skb_transport_offset(skb);
964                         if (iph->protocol == IPPROTO_TCP) {
965                                 skb->csum = 0;
966                                 tcp_hdr(skb)->check = 0;
967                                 skb->csum = skb_checksum(skb, offset,
968                                                          skb->len - offset, 0);
969                                 tcp_hdr(skb)->check =
970                                         csum_tcpudp_magic(iph->saddr,
971                                                           iph->daddr,
972                                                           skb->len - offset,
973                                                           IPPROTO_TCP,
974                                                           skb->csum);
975                         } else if (iph->protocol == IPPROTO_UDP) {
976                                 skb->csum = 0;
977                                 udp_hdr(skb)->check = 0;
978                                 skb->csum =
979                                         skb_checksum(skb, offset,
980                                                      skb->len - offset, 0);
981                                 udp_hdr(skb)->check =
982                                         csum_tcpudp_magic(iph->saddr,
983                                                           iph->daddr,
984                                                           skb->len - offset,
985                                                           IPPROTO_UDP,
986                                                           skb->csum);
987                         }
988                 }
989         }
990         spin_lock_irqsave(&tx_ring->tx_lock, flags);
991         ring_num = tx_ring->next_to_use;
992         if (unlikely((ring_num + 1) == tx_ring->count))
993                 tx_ring->next_to_use = 0;
994         else
995                 tx_ring->next_to_use = ring_num + 1;
996
997         spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
998         buffer_info = &tx_ring->buffer_info[ring_num];
999         tmp_skb = buffer_info->skb;
1000
1001         /* [Header:14][payload] ---> [Header:14][paddong:2][payload]    */
1002         memcpy(tmp_skb->data, skb->data, ETH_HLEN);
1003         tmp_skb->data[ETH_HLEN] = 0x00;
1004         tmp_skb->data[ETH_HLEN + 1] = 0x00;
1005         tmp_skb->len = skb->len;
1006         memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
1007                (skb->len - ETH_HLEN));
1008         /*-- Set Buffer infomation --*/
1009         buffer_info->length = tmp_skb->len;
1010         buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
1011                                           buffer_info->length,
1012                                           DMA_TO_DEVICE);
1013         if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1014                 pr_err("TX DMA map failed\n");
1015                 buffer_info->dma = 0;
1016                 buffer_info->time_stamp = 0;
1017                 tx_ring->next_to_use = ring_num;
1018                 return;
1019         }
1020         buffer_info->mapped = true;
1021         buffer_info->time_stamp = jiffies;
1022
1023         /*-- Set Tx descriptor --*/
1024         tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
1025         tx_desc->buffer_addr = (buffer_info->dma);
1026         tx_desc->length = (tmp_skb->len);
1027         tx_desc->tx_words_eob = ((tmp_skb->len + 3));
1028         tx_desc->tx_frame_ctrl = (frame_ctrl);
1029         tx_desc->gbec_status = (DSC_INIT16);
1030
1031         if (unlikely(++ring_num == tx_ring->count))
1032                 ring_num = 0;
1033
1034         /* Update software pointer of TX descriptor */
1035         iowrite32(tx_ring->dma +
1036                   (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
1037                   &hw->reg->TX_DSC_SW_P);
1038         dev_kfree_skb_any(skb);
1039 }
1040
1041 /**
1042  * pch_gbe_update_stats - Update the board statistics counters
1043  * @adapter:  Board private structure
1044  */
1045 void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1046 {
1047         struct net_device *netdev = adapter->netdev;
1048         struct pci_dev *pdev = adapter->pdev;
1049         struct pch_gbe_hw_stats *stats = &adapter->stats;
1050         unsigned long flags;
1051
1052         /*
1053          * Prevent stats update while adapter is being reset, or if the pci
1054          * connection is down.
1055          */
1056         if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1057                 return;
1058
1059         spin_lock_irqsave(&adapter->stats_lock, flags);
1060
1061         /* Update device status "adapter->stats" */
1062         stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1063         stats->tx_errors = stats->tx_length_errors +
1064             stats->tx_aborted_errors +
1065             stats->tx_carrier_errors + stats->tx_timeout_count;
1066
1067         /* Update network device status "adapter->net_stats" */
1068         netdev->stats.rx_packets = stats->rx_packets;
1069         netdev->stats.rx_bytes = stats->rx_bytes;
1070         netdev->stats.rx_dropped = stats->rx_dropped;
1071         netdev->stats.tx_packets = stats->tx_packets;
1072         netdev->stats.tx_bytes = stats->tx_bytes;
1073         netdev->stats.tx_dropped = stats->tx_dropped;
1074         /* Fill out the OS statistics structure */
1075         netdev->stats.multicast = stats->multicast;
1076         netdev->stats.collisions = stats->collisions;
1077         /* Rx Errors */
1078         netdev->stats.rx_errors = stats->rx_errors;
1079         netdev->stats.rx_crc_errors = stats->rx_crc_errors;
1080         netdev->stats.rx_frame_errors = stats->rx_frame_errors;
1081         /* Tx Errors */
1082         netdev->stats.tx_errors = stats->tx_errors;
1083         netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
1084         netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
1085
1086         spin_unlock_irqrestore(&adapter->stats_lock, flags);
1087 }
1088
1089 /**
1090  * pch_gbe_intr - Interrupt Handler
1091  * @irq:   Interrupt number
1092  * @data:  Pointer to a network interface device structure
1093  * Returns
1094  *      - IRQ_HANDLED:  Our interrupt
1095  *      - IRQ_NONE:     Not our interrupt
1096  */
1097 static irqreturn_t pch_gbe_intr(int irq, void *data)
1098 {
1099         struct net_device *netdev = data;
1100         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1101         struct pch_gbe_hw *hw = &adapter->hw;
1102         u32 int_st;
1103         u32 int_en;
1104
1105         /* Check request status */
1106         int_st = ioread32(&hw->reg->INT_ST);
1107         int_st = int_st & ioread32(&hw->reg->INT_EN);
1108         /* When request status is no interruption factor */
1109         if (unlikely(!int_st))
1110                 return IRQ_NONE;        /* Not our interrupt. End processing. */
1111         pr_debug("%s occur int_st = 0x%08x\n", __func__, int_st);
1112         if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
1113                 adapter->stats.intr_rx_frame_err_count++;
1114         if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
1115                 adapter->stats.intr_rx_fifo_err_count++;
1116         if (int_st & PCH_GBE_INT_RX_DMA_ERR)
1117                 adapter->stats.intr_rx_dma_err_count++;
1118         if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
1119                 adapter->stats.intr_tx_fifo_err_count++;
1120         if (int_st & PCH_GBE_INT_TX_DMA_ERR)
1121                 adapter->stats.intr_tx_dma_err_count++;
1122         if (int_st & PCH_GBE_INT_TCPIP_ERR)
1123                 adapter->stats.intr_tcpip_err_count++;
1124         /* When Rx descriptor is empty  */
1125         if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
1126                 adapter->stats.intr_rx_dsc_empty_count++;
1127                 pr_err("Rx descriptor is empty\n");
1128                 int_en = ioread32(&hw->reg->INT_EN);
1129                 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
1130                 if (hw->mac.tx_fc_enable) {
1131                         /* Set Pause packet */
1132                         pch_gbe_mac_set_pause_packet(hw);
1133                 }
1134                 if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))
1135                     == 0) {
1136                         return IRQ_HANDLED;
1137                 }
1138         }
1139
1140         /* When request status is Receive interruption */
1141         if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) {
1142                 if (likely(napi_schedule_prep(&adapter->napi))) {
1143                         /* Enable only Rx Descriptor empty */
1144                         atomic_inc(&adapter->irq_sem);
1145                         int_en = ioread32(&hw->reg->INT_EN);
1146                         int_en &=
1147                             ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
1148                         iowrite32(int_en, &hw->reg->INT_EN);
1149                         /* Start polling for NAPI */
1150                         __napi_schedule(&adapter->napi);
1151                 }
1152         }
1153         pr_debug("return = 0x%08x  INT_EN reg = 0x%08x\n",
1154                  IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
1155         return IRQ_HANDLED;
1156 }
1157
1158 /**
1159  * pch_gbe_alloc_rx_buffers - Replace used receive buffers; legacy & extended
1160  * @adapter:       Board private structure
1161  * @rx_ring:       Rx descriptor ring
1162  * @cleaned_count: Cleaned count
1163  */
1164 static void
1165 pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1166                          struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1167 {
1168         struct net_device *netdev = adapter->netdev;
1169         struct pci_dev *pdev = adapter->pdev;
1170         struct pch_gbe_hw *hw = &adapter->hw;
1171         struct pch_gbe_rx_desc *rx_desc;
1172         struct pch_gbe_buffer *buffer_info;
1173         struct sk_buff *skb;
1174         unsigned int i;
1175         unsigned int bufsz;
1176
1177         bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN;
1178         i = rx_ring->next_to_use;
1179
1180         while ((cleaned_count--)) {
1181                 buffer_info = &rx_ring->buffer_info[i];
1182                 skb = buffer_info->skb;
1183                 if (skb) {
1184                         skb_trim(skb, 0);
1185                 } else {
1186                         skb = netdev_alloc_skb(netdev, bufsz);
1187                         if (unlikely(!skb)) {
1188                                 /* Better luck next round */
1189                                 adapter->stats.rx_alloc_buff_failed++;
1190                                 break;
1191                         }
1192                         /* 64byte align */
1193                         skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1194
1195                         buffer_info->skb = skb;
1196                         buffer_info->length = adapter->rx_buffer_len;
1197                 }
1198                 buffer_info->dma = dma_map_single(&pdev->dev,
1199                                                   skb->data,
1200                                                   buffer_info->length,
1201                                                   DMA_FROM_DEVICE);
1202                 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1203                         dev_kfree_skb(skb);
1204                         buffer_info->skb = NULL;
1205                         buffer_info->dma = 0;
1206                         adapter->stats.rx_alloc_buff_failed++;
1207                         break; /* while !buffer_info->skb */
1208                 }
1209                 buffer_info->mapped = true;
1210                 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1211                 rx_desc->buffer_addr = (buffer_info->dma);
1212                 rx_desc->gbec_status = DSC_INIT16;
1213
1214                 pr_debug("i = %d  buffer_info->dma = 0x08%llx  buffer_info->length = 0x%x\n",
1215                          i, (unsigned long long)buffer_info->dma,
1216                          buffer_info->length);
1217
1218                 if (unlikely(++i == rx_ring->count))
1219                         i = 0;
1220         }
1221         if (likely(rx_ring->next_to_use != i)) {
1222                 rx_ring->next_to_use = i;
1223                 if (unlikely(i-- == 0))
1224                         i = (rx_ring->count - 1);
1225                 iowrite32(rx_ring->dma +
1226                           (int)sizeof(struct pch_gbe_rx_desc) * i,
1227                           &hw->reg->RX_DSC_SW_P);
1228         }
1229         return;
1230 }
1231
1232 /**
1233  * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
1234  * @adapter:   Board private structure
1235  * @tx_ring:   Tx descriptor ring
1236  */
1237 static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
1238                                         struct pch_gbe_tx_ring *tx_ring)
1239 {
1240         struct pch_gbe_buffer *buffer_info;
1241         struct sk_buff *skb;
1242         unsigned int i;
1243         unsigned int bufsz;
1244         struct pch_gbe_tx_desc *tx_desc;
1245
1246         bufsz =
1247             adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
1248
1249         for (i = 0; i < tx_ring->count; i++) {
1250                 buffer_info = &tx_ring->buffer_info[i];
1251                 skb = netdev_alloc_skb(adapter->netdev, bufsz);
1252                 skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1253                 buffer_info->skb = skb;
1254                 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1255                 tx_desc->gbec_status = (DSC_INIT16);
1256         }
1257         return;
1258 }
1259
1260 /**
1261  * pch_gbe_clean_tx - Reclaim resources after transmit completes
1262  * @adapter:   Board private structure
1263  * @tx_ring:   Tx descriptor ring
1264  * Returns
1265  *      true:  Cleaned the descriptor
1266  *      false: Not cleaned the descriptor
1267  */
1268 static bool
1269 pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1270                  struct pch_gbe_tx_ring *tx_ring)
1271 {
1272         struct pch_gbe_tx_desc *tx_desc;
1273         struct pch_gbe_buffer *buffer_info;
1274         struct sk_buff *skb;
1275         unsigned int i;
1276         unsigned int cleaned_count = 0;
1277         bool cleaned = false;
1278
1279         pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1280
1281         i = tx_ring->next_to_clean;
1282         tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1283         pr_debug("gbec_status:0x%04x  dma_status:0x%04x\n",
1284                  tx_desc->gbec_status, tx_desc->dma_status);
1285
1286         while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
1287                 pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
1288                 cleaned = true;
1289                 buffer_info = &tx_ring->buffer_info[i];
1290                 skb = buffer_info->skb;
1291
1292                 if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
1293                         adapter->stats.tx_aborted_errors++;
1294                         pr_err("Transfer Abort Error\n");
1295                 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
1296                           ) {
1297                         adapter->stats.tx_carrier_errors++;
1298                         pr_err("Transfer Carrier Sense Error\n");
1299                 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
1300                           ) {
1301                         adapter->stats.tx_aborted_errors++;
1302                         pr_err("Transfer Collision Abort Error\n");
1303                 } else if ((tx_desc->gbec_status &
1304                             (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
1305                              PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
1306                         adapter->stats.collisions++;
1307                         adapter->stats.tx_packets++;
1308                         adapter->stats.tx_bytes += skb->len;
1309                         pr_debug("Transfer Collision\n");
1310                 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
1311                           ) {
1312                         adapter->stats.tx_packets++;
1313                         adapter->stats.tx_bytes += skb->len;
1314                 }
1315                 if (buffer_info->mapped) {
1316                         pr_debug("unmap buffer_info->dma : %d\n", i);
1317                         dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1318                                          buffer_info->length, DMA_TO_DEVICE);
1319                         buffer_info->mapped = false;
1320                 }
1321                 if (buffer_info->skb) {
1322                         pr_debug("trim buffer_info->skb : %d\n", i);
1323                         skb_trim(buffer_info->skb, 0);
1324                 }
1325                 tx_desc->gbec_status = DSC_INIT16;
1326                 if (unlikely(++i == tx_ring->count))
1327                         i = 0;
1328                 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1329
1330                 /* weight of a sort for tx, to avoid endless transmit cleanup */
1331                 if (cleaned_count++ == PCH_GBE_TX_WEIGHT)
1332                         break;
1333         }
1334         pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
1335                  cleaned_count);
1336         /* Recover from running out of Tx resources in xmit_frame */
1337         if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) {
1338                 netif_wake_queue(adapter->netdev);
1339                 adapter->stats.tx_restart_count++;
1340                 pr_debug("Tx wake queue\n");
1341         }
1342         spin_lock(&adapter->tx_queue_lock);
1343         tx_ring->next_to_clean = i;
1344         spin_unlock(&adapter->tx_queue_lock);
1345         pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1346         return cleaned;
1347 }
1348
1349 /**
1350  * pch_gbe_clean_rx - Send received data up the network stack; legacy
1351  * @adapter:     Board private structure
1352  * @rx_ring:     Rx descriptor ring
1353  * @work_done:   Completed count
1354  * @work_to_do:  Request count
1355  * Returns
1356  *      true:  Cleaned the descriptor
1357  *      false: Not cleaned the descriptor
1358  */
1359 static bool
1360 pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1361                  struct pch_gbe_rx_ring *rx_ring,
1362                  int *work_done, int work_to_do)
1363 {
1364         struct net_device *netdev = adapter->netdev;
1365         struct pci_dev *pdev = adapter->pdev;
1366         struct pch_gbe_buffer *buffer_info;
1367         struct pch_gbe_rx_desc *rx_desc;
1368         u32 length;
1369         unsigned int i;
1370         unsigned int cleaned_count = 0;
1371         bool cleaned = false;
1372         struct sk_buff *skb, *new_skb;
1373         u8 dma_status;
1374         u16 gbec_status;
1375         u32 tcp_ip_status;
1376
1377         i = rx_ring->next_to_clean;
1378
1379         while (*work_done < work_to_do) {
1380                 /* Check Rx descriptor status */
1381                 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1382                 if (rx_desc->gbec_status == DSC_INIT16)
1383                         break;
1384                 cleaned = true;
1385                 cleaned_count++;
1386
1387                 dma_status = rx_desc->dma_status;
1388                 gbec_status = rx_desc->gbec_status;
1389                 tcp_ip_status = rx_desc->tcp_ip_status;
1390                 rx_desc->gbec_status = DSC_INIT16;
1391                 buffer_info = &rx_ring->buffer_info[i];
1392                 skb = buffer_info->skb;
1393
1394                 /* unmap dma */
1395                 dma_unmap_single(&pdev->dev, buffer_info->dma,
1396                                    buffer_info->length, DMA_FROM_DEVICE);
1397                 buffer_info->mapped = false;
1398                 /* Prefetch the packet */
1399                 prefetch(skb->data);
1400
1401                 pr_debug("RxDecNo = 0x%04x  Status[DMA:0x%02x GBE:0x%04x "
1402                          "TCP:0x%08x]  BufInf = 0x%p\n",
1403                          i, dma_status, gbec_status, tcp_ip_status,
1404                          buffer_info);
1405                 /* Error check */
1406                 if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
1407                         adapter->stats.rx_frame_errors++;
1408                         pr_err("Receive Not Octal Error\n");
1409                 } else if (unlikely(gbec_status &
1410                                 PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
1411                         adapter->stats.rx_frame_errors++;
1412                         pr_err("Receive Nibble Error\n");
1413                 } else if (unlikely(gbec_status &
1414                                 PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
1415                         adapter->stats.rx_crc_errors++;
1416                         pr_err("Receive CRC Error\n");
1417                 } else {
1418                         /* get receive length */
1419                         /* length convert[-3] */
1420                         length = (rx_desc->rx_words_eob) - 3;
1421
1422                         /* Decide the data conversion method */
1423                         if (!adapter->rx_csum) {
1424                                 /* [Header:14][payload] */
1425                                 if (NET_IP_ALIGN) {
1426                                         /* Because alignment differs,
1427                                          * the new_skb is newly allocated,
1428                                          * and data is copied to new_skb.*/
1429                                         new_skb = netdev_alloc_skb(netdev,
1430                                                          length + NET_IP_ALIGN);
1431                                         if (!new_skb) {
1432                                                 /* dorrop error */
1433                                                 pr_err("New skb allocation "
1434                                                         "Error\n");
1435                                                 goto dorrop;
1436                                         }
1437                                         skb_reserve(new_skb, NET_IP_ALIGN);
1438                                         memcpy(new_skb->data, skb->data,
1439                                                length);
1440                                         skb = new_skb;
1441                                 } else {
1442                                         /* DMA buffer is used as SKB as it is.*/
1443                                         buffer_info->skb = NULL;
1444                                 }
1445                         } else {
1446                                 /* [Header:14][padding:2][payload] */
1447                                 /* The length includes padding length */
1448                                 length = length - PCH_GBE_DMA_PADDING;
1449                                 if ((length < copybreak) ||
1450                                     (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
1451                                         /* Because alignment differs,
1452                                          * the new_skb is newly allocated,
1453                                          * and data is copied to new_skb.
1454                                          * Padding data is deleted
1455                                          * at the time of a copy.*/
1456                                         new_skb = netdev_alloc_skb(netdev,
1457                                                          length + NET_IP_ALIGN);
1458                                         if (!new_skb) {
1459                                                 /* dorrop error */
1460                                                 pr_err("New skb allocation "
1461                                                         "Error\n");
1462                                                 goto dorrop;
1463                                         }
1464                                         skb_reserve(new_skb, NET_IP_ALIGN);
1465                                         memcpy(new_skb->data, skb->data,
1466                                                ETH_HLEN);
1467                                         memcpy(&new_skb->data[ETH_HLEN],
1468                                                &skb->data[ETH_HLEN +
1469                                                PCH_GBE_DMA_PADDING],
1470                                                length - ETH_HLEN);
1471                                         skb = new_skb;
1472                                 } else {
1473                                         /* Padding data is deleted
1474                                          * by moving header data.*/
1475                                         memmove(&skb->data[PCH_GBE_DMA_PADDING],
1476                                                 &skb->data[0], ETH_HLEN);
1477                                         skb_reserve(skb, NET_IP_ALIGN);
1478                                         buffer_info->skb = NULL;
1479                                 }
1480                         }
1481                         /* The length includes FCS length */
1482                         length = length - ETH_FCS_LEN;
1483                         /* update status of driver */
1484                         adapter->stats.rx_bytes += length;
1485                         adapter->stats.rx_packets++;
1486                         if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
1487                                 adapter->stats.multicast++;
1488                         /* Write meta date of skb */
1489                         skb_put(skb, length);
1490                         skb->protocol = eth_type_trans(skb, netdev);
1491                         if ((tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) ==
1492                             PCH_GBE_RXD_ACC_STAT_TCPIPOK) {
1493                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1494                         } else {
1495                                 skb->ip_summed = CHECKSUM_NONE;
1496                         }
1497                         napi_gro_receive(&adapter->napi, skb);
1498                         (*work_done)++;
1499                         pr_debug("Receive skb->ip_summed: %d length: %d\n",
1500                                  skb->ip_summed, length);
1501                 }
1502 dorrop:
1503                 /* return some buffers to hardware, one at a time is too slow */
1504                 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
1505                         pch_gbe_alloc_rx_buffers(adapter, rx_ring,
1506                                                  cleaned_count);
1507                         cleaned_count = 0;
1508                 }
1509                 if (++i == rx_ring->count)
1510                         i = 0;
1511         }
1512         rx_ring->next_to_clean = i;
1513         if (cleaned_count)
1514                 pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1515         return cleaned;
1516 }
1517
1518 /**
1519  * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
1520  * @adapter:  Board private structure
1521  * @tx_ring:  Tx descriptor ring (for a specific queue) to setup
1522  * Returns
1523  *      0:              Successfully
1524  *      Negative value: Failed
1525  */
1526 int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1527                                 struct pch_gbe_tx_ring *tx_ring)
1528 {
1529         struct pci_dev *pdev = adapter->pdev;
1530         struct pch_gbe_tx_desc *tx_desc;
1531         int size;
1532         int desNo;
1533
1534         size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
1535         tx_ring->buffer_info = vzalloc(size);
1536         if (!tx_ring->buffer_info) {
1537                 pr_err("Unable to allocate memory for the buffer infomation\n");
1538                 return -ENOMEM;
1539         }
1540
1541         tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1542
1543         tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1544                                            &tx_ring->dma, GFP_KERNEL);
1545         if (!tx_ring->desc) {
1546                 vfree(tx_ring->buffer_info);
1547                 pr_err("Unable to allocate memory for the transmit descriptor ring\n");
1548                 return -ENOMEM;
1549         }
1550         memset(tx_ring->desc, 0, tx_ring->size);
1551
1552         tx_ring->next_to_use = 0;
1553         tx_ring->next_to_clean = 0;
1554         spin_lock_init(&tx_ring->tx_lock);
1555
1556         for (desNo = 0; desNo < tx_ring->count; desNo++) {
1557                 tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
1558                 tx_desc->gbec_status = DSC_INIT16;
1559         }
1560         pr_debug("tx_ring->desc = 0x%p  tx_ring->dma = 0x%08llx\n"
1561                  "next_to_clean = 0x%08x  next_to_use = 0x%08x\n",
1562                  tx_ring->desc, (unsigned long long)tx_ring->dma,
1563                  tx_ring->next_to_clean, tx_ring->next_to_use);
1564         return 0;
1565 }
1566
1567 /**
1568  * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
1569  * @adapter:  Board private structure
1570  * @rx_ring:  Rx descriptor ring (for a specific queue) to setup
1571  * Returns
1572  *      0:              Successfully
1573  *      Negative value: Failed
1574  */
1575 int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1576                                 struct pch_gbe_rx_ring *rx_ring)
1577 {
1578         struct pci_dev *pdev = adapter->pdev;
1579         struct pch_gbe_rx_desc *rx_desc;
1580         int size;
1581         int desNo;
1582
1583         size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1584         rx_ring->buffer_info = vzalloc(size);
1585         if (!rx_ring->buffer_info) {
1586                 pr_err("Unable to allocate memory for the receive descriptor ring\n");
1587                 return -ENOMEM;
1588         }
1589         rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1590         rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1591                                            &rx_ring->dma, GFP_KERNEL);
1592
1593         if (!rx_ring->desc) {
1594                 pr_err("Unable to allocate memory for the receive descriptor ring\n");
1595                 vfree(rx_ring->buffer_info);
1596                 return -ENOMEM;
1597         }
1598         memset(rx_ring->desc, 0, rx_ring->size);
1599         rx_ring->next_to_clean = 0;
1600         rx_ring->next_to_use = 0;
1601         for (desNo = 0; desNo < rx_ring->count; desNo++) {
1602                 rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
1603                 rx_desc->gbec_status = DSC_INIT16;
1604         }
1605         pr_debug("rx_ring->desc = 0x%p  rx_ring->dma = 0x%08llx "
1606                  "next_to_clean = 0x%08x  next_to_use = 0x%08x\n",
1607                  rx_ring->desc, (unsigned long long)rx_ring->dma,
1608                  rx_ring->next_to_clean, rx_ring->next_to_use);
1609         return 0;
1610 }
1611
1612 /**
1613  * pch_gbe_free_tx_resources - Free Tx Resources
1614  * @adapter:  Board private structure
1615  * @tx_ring:  Tx descriptor ring for a specific queue
1616  */
1617 void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
1618                                 struct pch_gbe_tx_ring *tx_ring)
1619 {
1620         struct pci_dev *pdev = adapter->pdev;
1621
1622         pch_gbe_clean_tx_ring(adapter, tx_ring);
1623         vfree(tx_ring->buffer_info);
1624         tx_ring->buffer_info = NULL;
1625         pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
1626         tx_ring->desc = NULL;
1627 }
1628
1629 /**
1630  * pch_gbe_free_rx_resources - Free Rx Resources
1631  * @adapter:  Board private structure
1632  * @rx_ring:  Ring to clean the resources from
1633  */
1634 void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
1635                                 struct pch_gbe_rx_ring *rx_ring)
1636 {
1637         struct pci_dev *pdev = adapter->pdev;
1638
1639         pch_gbe_clean_rx_ring(adapter, rx_ring);
1640         vfree(rx_ring->buffer_info);
1641         rx_ring->buffer_info = NULL;
1642         pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1643         rx_ring->desc = NULL;
1644 }
1645
1646 /**
1647  * pch_gbe_request_irq - Allocate an interrupt line
1648  * @adapter:  Board private structure
1649  * Returns
1650  *      0:              Successfully
1651  *      Negative value: Failed
1652  */
1653 static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
1654 {
1655         struct net_device *netdev = adapter->netdev;
1656         int err;
1657         int flags;
1658
1659         flags = IRQF_SHARED;
1660         adapter->have_msi = false;
1661         err = pci_enable_msi(adapter->pdev);
1662         pr_debug("call pci_enable_msi\n");
1663         if (err) {
1664                 pr_debug("call pci_enable_msi - Error: %d\n", err);
1665         } else {
1666                 flags = 0;
1667                 adapter->have_msi = true;
1668         }
1669         err = request_irq(adapter->pdev->irq, &pch_gbe_intr,
1670                           flags, netdev->name, netdev);
1671         if (err)
1672                 pr_err("Unable to allocate interrupt Error: %d\n", err);
1673         pr_debug("adapter->have_msi : %d  flags : 0x%04x  return : 0x%04x\n",
1674                  adapter->have_msi, flags, err);
1675         return err;
1676 }
1677
1678
1679 static void pch_gbe_set_multi(struct net_device *netdev);
1680 /**
1681  * pch_gbe_up - Up GbE network device
1682  * @adapter:  Board private structure
1683  * Returns
1684  *      0:              Successfully
1685  *      Negative value: Failed
1686  */
1687 int pch_gbe_up(struct pch_gbe_adapter *adapter)
1688 {
1689         struct net_device *netdev = adapter->netdev;
1690         struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1691         struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1692         int err;
1693
1694         /* hardware has been reset, we need to reload some things */
1695         pch_gbe_set_multi(netdev);
1696
1697         pch_gbe_setup_tctl(adapter);
1698         pch_gbe_configure_tx(adapter);
1699         pch_gbe_setup_rctl(adapter);
1700         pch_gbe_configure_rx(adapter);
1701
1702         err = pch_gbe_request_irq(adapter);
1703         if (err) {
1704                 pr_err("Error: can't bring device up\n");
1705                 return err;
1706         }
1707         pch_gbe_alloc_tx_buffers(adapter, tx_ring);
1708         pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
1709         adapter->tx_queue_len = netdev->tx_queue_len;
1710
1711         mod_timer(&adapter->watchdog_timer, jiffies);
1712
1713         napi_enable(&adapter->napi);
1714         pch_gbe_irq_enable(adapter);
1715         netif_start_queue(adapter->netdev);
1716
1717         return 0;
1718 }
1719
1720 /**
1721  * pch_gbe_down - Down GbE network device
1722  * @adapter:  Board private structure
1723  */
1724 void pch_gbe_down(struct pch_gbe_adapter *adapter)
1725 {
1726         struct net_device *netdev = adapter->netdev;
1727
1728         /* signal that we're down so the interrupt handler does not
1729          * reschedule our watchdog timer */
1730         napi_disable(&adapter->napi);
1731         atomic_set(&adapter->irq_sem, 0);
1732
1733         pch_gbe_irq_disable(adapter);
1734         pch_gbe_free_irq(adapter);
1735
1736         del_timer_sync(&adapter->watchdog_timer);
1737
1738         netdev->tx_queue_len = adapter->tx_queue_len;
1739         netif_carrier_off(netdev);
1740         netif_stop_queue(netdev);
1741
1742         pch_gbe_reset(adapter);
1743         pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
1744         pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
1745 }
1746
1747 /**
1748  * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
1749  * @adapter:  Board private structure to initialize
1750  * Returns
1751  *      0:              Successfully
1752  *      Negative value: Failed
1753  */
1754 static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
1755 {
1756         struct pch_gbe_hw *hw = &adapter->hw;
1757         struct net_device *netdev = adapter->netdev;
1758
1759         adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
1760         hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1761         hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1762
1763         /* Initialize the hardware-specific values */
1764         if (pch_gbe_hal_setup_init_funcs(hw)) {
1765                 pr_err("Hardware Initialization Failure\n");
1766                 return -EIO;
1767         }
1768         if (pch_gbe_alloc_queues(adapter)) {
1769                 pr_err("Unable to allocate memory for queues\n");
1770                 return -ENOMEM;
1771         }
1772         spin_lock_init(&adapter->hw.miim_lock);
1773         spin_lock_init(&adapter->tx_queue_lock);
1774         spin_lock_init(&adapter->stats_lock);
1775         spin_lock_init(&adapter->ethtool_lock);
1776         atomic_set(&adapter->irq_sem, 0);
1777         pch_gbe_irq_disable(adapter);
1778
1779         pch_gbe_init_stats(adapter);
1780
1781         pr_debug("rx_buffer_len : %d  mac.min_frame_size : %d  mac.max_frame_size : %d\n",
1782                  (u32) adapter->rx_buffer_len,
1783                  hw->mac.min_frame_size, hw->mac.max_frame_size);
1784         return 0;
1785 }
1786
1787 /**
1788  * pch_gbe_open - Called when a network interface is made active
1789  * @netdev:     Network interface device structure
1790  * Returns
1791  *      0:              Successfully
1792  *      Negative value: Failed
1793  */
1794 static int pch_gbe_open(struct net_device *netdev)
1795 {
1796         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1797         struct pch_gbe_hw *hw = &adapter->hw;
1798         int err;
1799
1800         /* allocate transmit descriptors */
1801         err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
1802         if (err)
1803                 goto err_setup_tx;
1804         /* allocate receive descriptors */
1805         err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
1806         if (err)
1807                 goto err_setup_rx;
1808         pch_gbe_hal_power_up_phy(hw);
1809         err = pch_gbe_up(adapter);
1810         if (err)
1811                 goto err_up;
1812         pr_debug("Success End\n");
1813         return 0;
1814
1815 err_up:
1816         if (!adapter->wake_up_evt)
1817                 pch_gbe_hal_power_down_phy(hw);
1818         pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
1819 err_setup_rx:
1820         pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
1821 err_setup_tx:
1822         pch_gbe_reset(adapter);
1823         pr_err("Error End\n");
1824         return err;
1825 }
1826
1827 /**
1828  * pch_gbe_stop - Disables a network interface
1829  * @netdev:  Network interface device structure
1830  * Returns
1831  *      0: Successfully
1832  */
1833 static int pch_gbe_stop(struct net_device *netdev)
1834 {
1835         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1836         struct pch_gbe_hw *hw = &adapter->hw;
1837
1838         pch_gbe_down(adapter);
1839         if (!adapter->wake_up_evt)
1840                 pch_gbe_hal_power_down_phy(hw);
1841         pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
1842         pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
1843         return 0;
1844 }
1845
1846 /**
1847  * pch_gbe_xmit_frame - Packet transmitting start
1848  * @skb:     Socket buffer structure
1849  * @netdev:  Network interface device structure
1850  * Returns
1851  *      - NETDEV_TX_OK:   Normal end
1852  *      - NETDEV_TX_BUSY: Error end
1853  */
1854 static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1855 {
1856         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1857         struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1858         unsigned long flags;
1859
1860         if (unlikely(skb->len > (adapter->hw.mac.max_frame_size - 4))) {
1861                 pr_err("Transfer length Error: skb len: %d > max: %d\n",
1862                        skb->len, adapter->hw.mac.max_frame_size);
1863                 dev_kfree_skb_any(skb);
1864                 adapter->stats.tx_length_errors++;
1865                 return NETDEV_TX_OK;
1866         }
1867         if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
1868                 /* Collision - tell upper layer to requeue */
1869                 return NETDEV_TX_LOCKED;
1870         }
1871         if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
1872                 netif_stop_queue(netdev);
1873                 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
1874                 pr_debug("Return : BUSY  next_to use : 0x%08x  next_to clean : 0x%08x\n",
1875                          tx_ring->next_to_use, tx_ring->next_to_clean);
1876                 return NETDEV_TX_BUSY;
1877         }
1878         spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
1879
1880         /* CRC,ITAG no support */
1881         pch_gbe_tx_queue(adapter, tx_ring, skb);
1882         return NETDEV_TX_OK;
1883 }
1884
1885 /**
1886  * pch_gbe_get_stats - Get System Network Statistics
1887  * @netdev:  Network interface device structure
1888  * Returns:  The current stats
1889  */
1890 static struct net_device_stats *pch_gbe_get_stats(struct net_device *netdev)
1891 {
1892         /* only return the current stats */
1893         return &netdev->stats;
1894 }
1895
1896 /**
1897  * pch_gbe_set_multi - Multicast and Promiscuous mode set
1898  * @netdev:   Network interface device structure
1899  */
1900 static void pch_gbe_set_multi(struct net_device *netdev)
1901 {
1902         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1903         struct pch_gbe_hw *hw = &adapter->hw;
1904         struct netdev_hw_addr *ha;
1905         u8 *mta_list;
1906         u32 rctl;
1907         int i;
1908         int mc_count;
1909
1910         pr_debug("netdev->flags : 0x%08x\n", netdev->flags);
1911
1912         /* Check for Promiscuous and All Multicast modes */
1913         rctl = ioread32(&hw->reg->RX_MODE);
1914         mc_count = netdev_mc_count(netdev);
1915         if ((netdev->flags & IFF_PROMISC)) {
1916                 rctl &= ~PCH_GBE_ADD_FIL_EN;
1917                 rctl &= ~PCH_GBE_MLT_FIL_EN;
1918         } else if ((netdev->flags & IFF_ALLMULTI)) {
1919                 /* all the multicasting receive permissions */
1920                 rctl |= PCH_GBE_ADD_FIL_EN;
1921                 rctl &= ~PCH_GBE_MLT_FIL_EN;
1922         } else {
1923                 if (mc_count >= PCH_GBE_MAR_ENTRIES) {
1924                         /* all the multicasting receive permissions */
1925                         rctl |= PCH_GBE_ADD_FIL_EN;
1926                         rctl &= ~PCH_GBE_MLT_FIL_EN;
1927                 } else {
1928                         rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
1929                 }
1930         }
1931         iowrite32(rctl, &hw->reg->RX_MODE);
1932
1933         if (mc_count >= PCH_GBE_MAR_ENTRIES)
1934                 return;
1935         mta_list = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC);
1936         if (!mta_list)
1937                 return;
1938
1939         /* The shared function expects a packed array of only addresses. */
1940         i = 0;
1941         netdev_for_each_mc_addr(ha, netdev) {
1942                 if (i == mc_count)
1943                         break;
1944                 memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
1945         }
1946         pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
1947                                         PCH_GBE_MAR_ENTRIES);
1948         kfree(mta_list);
1949
1950         pr_debug("RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x  netdev->mc_count : 0x%08x\n",
1951                  ioread32(&hw->reg->RX_MODE), mc_count);
1952 }
1953
1954 /**
1955  * pch_gbe_set_mac - Change the Ethernet Address of the NIC
1956  * @netdev: Network interface device structure
1957  * @addr:   Pointer to an address structure
1958  * Returns
1959  *      0:              Successfully
1960  *      -EADDRNOTAVAIL: Failed
1961  */
1962 static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
1963 {
1964         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1965         struct sockaddr *skaddr = addr;
1966         int ret_val;
1967
1968         if (!is_valid_ether_addr(skaddr->sa_data)) {
1969                 ret_val = -EADDRNOTAVAIL;
1970         } else {
1971                 memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
1972                 memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
1973                 pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1974                 ret_val = 0;
1975         }
1976         pr_debug("ret_val : 0x%08x\n", ret_val);
1977         pr_debug("dev_addr : %pM\n", netdev->dev_addr);
1978         pr_debug("mac_addr : %pM\n", adapter->hw.mac.addr);
1979         pr_debug("MAC_ADR1AB reg : 0x%08x 0x%08x\n",
1980                  ioread32(&adapter->hw.reg->mac_adr[0].high),
1981                  ioread32(&adapter->hw.reg->mac_adr[0].low));
1982         return ret_val;
1983 }
1984
1985 /**
1986  * pch_gbe_change_mtu - Change the Maximum Transfer Unit
1987  * @netdev:   Network interface device structure
1988  * @new_mtu:  New value for maximum frame size
1989  * Returns
1990  *      0:              Successfully
1991  *      -EINVAL:        Failed
1992  */
1993 static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
1994 {
1995         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1996         int max_frame;
1997
1998         max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
1999         if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
2000                 (max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) {
2001                 pr_err("Invalid MTU setting\n");
2002                 return -EINVAL;
2003         }
2004         if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
2005                 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
2006         else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
2007                 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
2008         else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
2009                 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
2010         else
2011                 adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE;
2012         netdev->mtu = new_mtu;
2013         adapter->hw.mac.max_frame_size = max_frame;
2014
2015         if (netif_running(netdev))
2016                 pch_gbe_reinit_locked(adapter);
2017         else
2018                 pch_gbe_reset(adapter);
2019
2020         pr_debug("max_frame : %d  rx_buffer_len : %d  mtu : %d  max_frame_size : %d\n",
2021                  max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
2022                  adapter->hw.mac.max_frame_size);
2023         return 0;
2024 }
2025
2026 /**
2027  * pch_gbe_ioctl - Controls register through a MII interface
2028  * @netdev:   Network interface device structure
2029  * @ifr:      Pointer to ifr structure
2030  * @cmd:      Control command
2031  * Returns
2032  *      0:      Successfully
2033  *      Negative value: Failed
2034  */
2035 static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2036 {
2037         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2038
2039         pr_debug("cmd : 0x%04x\n", cmd);
2040
2041         return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
2042 }
2043
2044 /**
2045  * pch_gbe_tx_timeout - Respond to a Tx Hang
2046  * @netdev:   Network interface device structure
2047  */
2048 static void pch_gbe_tx_timeout(struct net_device *netdev)
2049 {
2050         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2051
2052         /* Do the reset outside of interrupt context */
2053         adapter->stats.tx_timeout_count++;
2054         schedule_work(&adapter->reset_task);
2055 }
2056
2057 /**
2058  * pch_gbe_napi_poll - NAPI receive and transfer polling callback
2059  * @napi:    Pointer of polling device struct
2060  * @budget:  The maximum number of a packet
2061  * Returns
2062  *      false:  Exit the polling mode
2063  *      true:   Continue the polling mode
2064  */
2065 static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2066 {
2067         struct pch_gbe_adapter *adapter =
2068             container_of(napi, struct pch_gbe_adapter, napi);
2069         struct net_device *netdev = adapter->netdev;
2070         int work_done = 0;
2071         bool poll_end_flag = false;
2072         bool cleaned = false;
2073
2074         pr_debug("budget : %d\n", budget);
2075
2076         /* Keep link state information with original netdev */
2077         if (!netif_carrier_ok(netdev)) {
2078                 poll_end_flag = true;
2079         } else {
2080                 cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2081                 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2082
2083                 if (cleaned)
2084                         work_done = budget;
2085                 /* If no Tx and not enough Rx work done,
2086                  * exit the polling mode
2087                  */
2088                 if ((work_done < budget) || !netif_running(netdev))
2089                         poll_end_flag = true;
2090         }
2091
2092         if (poll_end_flag) {
2093                 napi_complete(napi);
2094                 pch_gbe_irq_enable(adapter);
2095         }
2096
2097         pr_debug("poll_end_flag : %d  work_done : %d  budget : %d\n",
2098                  poll_end_flag, work_done, budget);
2099
2100         return work_done;
2101 }
2102
2103 #ifdef CONFIG_NET_POLL_CONTROLLER
2104 /**
2105  * pch_gbe_netpoll - Used by things like netconsole to send skbs
2106  * @netdev:  Network interface device structure
2107  */
2108 static void pch_gbe_netpoll(struct net_device *netdev)
2109 {
2110         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2111
2112         disable_irq(adapter->pdev->irq);
2113         pch_gbe_intr(adapter->pdev->irq, netdev);
2114         enable_irq(adapter->pdev->irq);
2115 }
2116 #endif
2117
2118 static const struct net_device_ops pch_gbe_netdev_ops = {
2119         .ndo_open = pch_gbe_open,
2120         .ndo_stop = pch_gbe_stop,
2121         .ndo_start_xmit = pch_gbe_xmit_frame,
2122         .ndo_get_stats = pch_gbe_get_stats,
2123         .ndo_set_mac_address = pch_gbe_set_mac,
2124         .ndo_tx_timeout = pch_gbe_tx_timeout,
2125         .ndo_change_mtu = pch_gbe_change_mtu,
2126         .ndo_do_ioctl = pch_gbe_ioctl,
2127         .ndo_set_multicast_list = &pch_gbe_set_multi,
2128 #ifdef CONFIG_NET_POLL_CONTROLLER
2129         .ndo_poll_controller = pch_gbe_netpoll,
2130 #endif
2131 };
2132
2133 static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
2134                                                 pci_channel_state_t state)
2135 {
2136         struct net_device *netdev = pci_get_drvdata(pdev);
2137         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2138
2139         netif_device_detach(netdev);
2140         if (netif_running(netdev))
2141                 pch_gbe_down(adapter);
2142         pci_disable_device(pdev);
2143         /* Request a slot slot reset. */
2144         return PCI_ERS_RESULT_NEED_RESET;
2145 }
2146
2147 static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
2148 {
2149         struct net_device *netdev = pci_get_drvdata(pdev);
2150         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2151         struct pch_gbe_hw *hw = &adapter->hw;
2152
2153         if (pci_enable_device(pdev)) {
2154                 pr_err("Cannot re-enable PCI device after reset\n");
2155                 return PCI_ERS_RESULT_DISCONNECT;
2156         }
2157         pci_set_master(pdev);
2158         pci_enable_wake(pdev, PCI_D0, 0);
2159         pch_gbe_hal_power_up_phy(hw);
2160         pch_gbe_reset(adapter);
2161         /* Clear wake up status */
2162         pch_gbe_mac_set_wol_event(hw, 0);
2163
2164         return PCI_ERS_RESULT_RECOVERED;
2165 }
2166
2167 static void pch_gbe_io_resume(struct pci_dev *pdev)
2168 {
2169         struct net_device *netdev = pci_get_drvdata(pdev);
2170         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2171
2172         if (netif_running(netdev)) {
2173                 if (pch_gbe_up(adapter)) {
2174                         pr_debug("can't bring device back up after reset\n");
2175                         return;
2176                 }
2177         }
2178         netif_device_attach(netdev);
2179 }
2180
2181 static int __pch_gbe_suspend(struct pci_dev *pdev)
2182 {
2183         struct net_device *netdev = pci_get_drvdata(pdev);
2184         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2185         struct pch_gbe_hw *hw = &adapter->hw;
2186         u32 wufc = adapter->wake_up_evt;
2187         int retval = 0;
2188
2189         netif_device_detach(netdev);
2190         if (netif_running(netdev))
2191                 pch_gbe_down(adapter);
2192         if (wufc) {
2193                 pch_gbe_set_multi(netdev);
2194                 pch_gbe_setup_rctl(adapter);
2195                 pch_gbe_configure_rx(adapter);
2196                 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
2197                                         hw->mac.link_duplex);
2198                 pch_gbe_set_mode(adapter, hw->mac.link_speed,
2199                                         hw->mac.link_duplex);
2200                 pch_gbe_mac_set_wol_event(hw, wufc);
2201                 pci_disable_device(pdev);
2202         } else {
2203                 pch_gbe_hal_power_down_phy(hw);
2204                 pch_gbe_mac_set_wol_event(hw, wufc);
2205                 pci_disable_device(pdev);
2206         }
2207         return retval;
2208 }
2209
2210 #ifdef CONFIG_PM
2211 static int pch_gbe_suspend(struct device *device)
2212 {
2213         struct pci_dev *pdev = to_pci_dev(device);
2214
2215         return __pch_gbe_suspend(pdev);
2216 }
2217
2218 static int pch_gbe_resume(struct device *device)
2219 {
2220         struct pci_dev *pdev = to_pci_dev(device);
2221         struct net_device *netdev = pci_get_drvdata(pdev);
2222         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2223         struct pch_gbe_hw *hw = &adapter->hw;
2224         u32 err;
2225
2226         err = pci_enable_device(pdev);
2227         if (err) {
2228                 pr_err("Cannot enable PCI device from suspend\n");
2229                 return err;
2230         }
2231         pci_set_master(pdev);
2232         pch_gbe_hal_power_up_phy(hw);
2233         pch_gbe_reset(adapter);
2234         /* Clear wake on lan control and status */
2235         pch_gbe_mac_set_wol_event(hw, 0);
2236
2237         if (netif_running(netdev))
2238                 pch_gbe_up(adapter);
2239         netif_device_attach(netdev);
2240
2241         return 0;
2242 }
2243 #endif /* CONFIG_PM */
2244
2245 static void pch_gbe_shutdown(struct pci_dev *pdev)
2246 {
2247         __pch_gbe_suspend(pdev);
2248         if (system_state == SYSTEM_POWER_OFF) {
2249                 pci_wake_from_d3(pdev, true);
2250                 pci_set_power_state(pdev, PCI_D3hot);
2251         }
2252 }
2253
2254 static void pch_gbe_remove(struct pci_dev *pdev)
2255 {
2256         struct net_device *netdev = pci_get_drvdata(pdev);
2257         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2258
2259         cancel_work_sync(&adapter->reset_task);
2260         unregister_netdev(netdev);
2261
2262         pch_gbe_hal_phy_hw_reset(&adapter->hw);
2263
2264         kfree(adapter->tx_ring);
2265         kfree(adapter->rx_ring);
2266
2267         iounmap(adapter->hw.reg);
2268         pci_release_regions(pdev);
2269         free_netdev(netdev);
2270         pci_disable_device(pdev);
2271 }
2272
2273 static int pch_gbe_probe(struct pci_dev *pdev,
2274                           const struct pci_device_id *pci_id)
2275 {
2276         struct net_device *netdev;
2277         struct pch_gbe_adapter *adapter;
2278         int ret;
2279
2280         ret = pci_enable_device(pdev);
2281         if (ret)
2282                 return ret;
2283
2284         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2285                 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
2286                 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2287                 if (ret) {
2288                         ret = pci_set_consistent_dma_mask(pdev,
2289                                                           DMA_BIT_MASK(32));
2290                         if (ret) {
2291                                 dev_err(&pdev->dev, "ERR: No usable DMA "
2292                                         "configuration, aborting\n");
2293                                 goto err_disable_device;
2294                         }
2295                 }
2296         }
2297
2298         ret = pci_request_regions(pdev, KBUILD_MODNAME);
2299         if (ret) {
2300                 dev_err(&pdev->dev,
2301                         "ERR: Can't reserve PCI I/O and memory resources\n");
2302                 goto err_disable_device;
2303         }
2304         pci_set_master(pdev);
2305
2306         netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
2307         if (!netdev) {
2308                 ret = -ENOMEM;
2309                 dev_err(&pdev->dev,
2310                         "ERR: Can't allocate and set up an Ethernet device\n");
2311                 goto err_release_pci;
2312         }
2313         SET_NETDEV_DEV(netdev, &pdev->dev);
2314
2315         pci_set_drvdata(pdev, netdev);
2316         adapter = netdev_priv(netdev);
2317         adapter->netdev = netdev;
2318         adapter->pdev = pdev;
2319         adapter->hw.back = adapter;
2320         adapter->hw.reg = pci_iomap(pdev, PCH_GBE_PCI_BAR, 0);
2321         if (!adapter->hw.reg) {
2322                 ret = -EIO;
2323                 dev_err(&pdev->dev, "Can't ioremap\n");
2324                 goto err_free_netdev;
2325         }
2326
2327         netdev->netdev_ops = &pch_gbe_netdev_ops;
2328         netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
2329         netif_napi_add(netdev, &adapter->napi,
2330                        pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
2331         netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO;
2332         pch_gbe_set_ethtool_ops(netdev);
2333
2334         pch_gbe_mac_reset_hw(&adapter->hw);
2335
2336         /* setup the private structure */
2337         ret = pch_gbe_sw_init(adapter);
2338         if (ret)
2339                 goto err_iounmap;
2340
2341         /* Initialize PHY */
2342         ret = pch_gbe_init_phy(adapter);
2343         if (ret) {
2344                 dev_err(&pdev->dev, "PHY initialize error\n");
2345                 goto err_free_adapter;
2346         }
2347         pch_gbe_hal_get_bus_info(&adapter->hw);
2348
2349         /* Read the MAC address. and store to the private data */
2350         ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
2351         if (ret) {
2352                 dev_err(&pdev->dev, "MAC address Read Error\n");
2353                 goto err_free_adapter;
2354         }
2355
2356         memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
2357         if (!is_valid_ether_addr(netdev->dev_addr)) {
2358                 dev_err(&pdev->dev, "Invalid MAC Address\n");
2359                 ret = -EIO;
2360                 goto err_free_adapter;
2361         }
2362         setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
2363                     (unsigned long)adapter);
2364
2365         INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
2366
2367         pch_gbe_check_options(adapter);
2368
2369         if (adapter->tx_csum)
2370                 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2371         else
2372                 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
2373
2374         /* initialize the wol settings based on the eeprom settings */
2375         adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
2376         dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
2377
2378         /* reset the hardware with the new settings */
2379         pch_gbe_reset(adapter);
2380
2381         ret = register_netdev(netdev);
2382         if (ret)
2383                 goto err_free_adapter;
2384         /* tell the stack to leave us alone until pch_gbe_open() is called */
2385         netif_carrier_off(netdev);
2386         netif_stop_queue(netdev);
2387
2388         dev_dbg(&pdev->dev, "OKIsemi(R) PCH Network Connection\n");
2389
2390         device_set_wakeup_enable(&pdev->dev, 1);
2391         return 0;
2392
2393 err_free_adapter:
2394         pch_gbe_hal_phy_hw_reset(&adapter->hw);
2395         kfree(adapter->tx_ring);
2396         kfree(adapter->rx_ring);
2397 err_iounmap:
2398         iounmap(adapter->hw.reg);
2399 err_free_netdev:
2400         free_netdev(netdev);
2401 err_release_pci:
2402         pci_release_regions(pdev);
2403 err_disable_device:
2404         pci_disable_device(pdev);
2405         return ret;
2406 }
2407
2408 static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
2409         {.vendor = PCI_VENDOR_ID_INTEL,
2410          .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2411          .subvendor = PCI_ANY_ID,
2412          .subdevice = PCI_ANY_ID,
2413          .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2414          .class_mask = (0xFFFF00)
2415          },
2416         /* required last entry */
2417         {0}
2418 };
2419
2420 #ifdef CONFIG_PM
2421 static const struct dev_pm_ops pch_gbe_pm_ops = {
2422         .suspend = pch_gbe_suspend,
2423         .resume = pch_gbe_resume,
2424         .freeze = pch_gbe_suspend,
2425         .thaw = pch_gbe_resume,
2426         .poweroff = pch_gbe_suspend,
2427         .restore = pch_gbe_resume,
2428 };
2429 #endif
2430
2431 static struct pci_error_handlers pch_gbe_err_handler = {
2432         .error_detected = pch_gbe_io_error_detected,
2433         .slot_reset = pch_gbe_io_slot_reset,
2434         .resume = pch_gbe_io_resume
2435 };
2436
2437 static struct pci_driver pch_gbe_pcidev = {
2438         .name = KBUILD_MODNAME,
2439         .id_table = pch_gbe_pcidev_id,
2440         .probe = pch_gbe_probe,
2441         .remove = pch_gbe_remove,
2442 #ifdef CONFIG_PM_OPS
2443         .driver.pm = &pch_gbe_pm_ops,
2444 #endif
2445         .shutdown = pch_gbe_shutdown,
2446         .err_handler = &pch_gbe_err_handler
2447 };
2448
2449
2450 static int __init pch_gbe_init_module(void)
2451 {
2452         int ret;
2453
2454         ret = pci_register_driver(&pch_gbe_pcidev);
2455         if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
2456                 if (copybreak == 0) {
2457                         pr_info("copybreak disabled\n");
2458                 } else {
2459                         pr_info("copybreak enabled for packets <= %u bytes\n",
2460                                 copybreak);
2461                 }
2462         }
2463         return ret;
2464 }
2465
2466 static void __exit pch_gbe_exit_module(void)
2467 {
2468         pci_unregister_driver(&pch_gbe_pcidev);
2469 }
2470
2471 module_init(pch_gbe_init_module);
2472 module_exit(pch_gbe_exit_module);
2473
2474 MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
2475 MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>");
2476 MODULE_LICENSE("GPL");
2477 MODULE_VERSION(DRV_VERSION);
2478 MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
2479
2480 module_param(copybreak, uint, 0644);
2481 MODULE_PARM_DESC(copybreak,
2482         "Maximum size of packet that is copied to a new buffer on receive");
2483
2484 /* pch_gbe_main.c */