2 * Copyright (c) 2012 Intel Corporation. All rights reserved.
3 * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * This file contains all of the code that is specific to the
36 * InfiniPath 7322 chip
39 #include <linux/interrupt.h>
40 #include <linux/pci.h>
41 #include <linux/delay.h>
43 #include <linux/jiffies.h>
44 #include <linux/module.h>
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_smi.h>
49 #include "qib_7322_regs.h"
53 #include "qib_verbs.h"
55 static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
56 static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
57 static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
58 static irqreturn_t qib_7322intr(int irq, void *data);
59 static irqreturn_t qib_7322bufavail(int irq, void *data);
60 static irqreturn_t sdma_intr(int irq, void *data);
61 static irqreturn_t sdma_idle_intr(int irq, void *data);
62 static irqreturn_t sdma_progress_intr(int irq, void *data);
63 static irqreturn_t sdma_cleanup_intr(int irq, void *data);
64 static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
65 struct qib_ctxtdata *rcd);
66 static u8 qib_7322_phys_portstate(u64);
67 static u32 qib_7322_iblink_state(u64);
68 static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
70 static void force_h1(struct qib_pportdata *);
71 static void adj_tx_serdes(struct qib_pportdata *);
72 static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
73 static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
75 static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
76 static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
77 static void serdes_7322_los_enable(struct qib_pportdata *, int);
78 static int serdes_7322_init_old(struct qib_pportdata *);
79 static int serdes_7322_init_new(struct qib_pportdata *);
81 #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
83 /* LE2 serdes values for different cases */
88 /* Below is special-purpose, so only really works for the IB SerDes blocks. */
89 #define IBSD(hw_pidx) (hw_pidx + 2)
91 /* these are variables for documentation and experimentation purposes */
92 static const unsigned rcv_int_timeout = 375;
93 static const unsigned rcv_int_count = 16;
94 static const unsigned sdma_idle_cnt = 64;
96 /* Time to stop altering Rx Equalization parameters, after link up. */
97 #define RXEQ_DISABLE_MSECS 2500
100 * Number of VLs we are configured to use (to allow for more
101 * credits per vl, etc.)
103 ushort qib_num_cfg_vls = 2;
104 module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
105 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
107 static ushort qib_chase = 1;
108 module_param_named(chase, qib_chase, ushort, S_IRUGO);
109 MODULE_PARM_DESC(chase, "Enable state chase handling");
111 static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
112 module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
113 MODULE_PARM_DESC(long_attenuation, \
114 "attenuation cutoff (dB) for long copper cable setup");
116 static ushort qib_singleport;
117 module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
118 MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
120 static ushort qib_krcvq01_no_msi;
121 module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
122 MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
125 * Receive header queue sizes
127 static unsigned qib_rcvhdrcnt;
128 module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
129 MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
131 static unsigned qib_rcvhdrsize;
132 module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
133 MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
135 static unsigned qib_rcvhdrentsize;
136 module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
137 MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
139 #define MAX_ATTEN_LEN 64 /* plenty for any real system */
140 /* for read back, default index is ~5m copper cable */
141 static char txselect_list[MAX_ATTEN_LEN] = "10";
142 static struct kparam_string kp_txselect = {
143 .string = txselect_list,
144 .maxlen = MAX_ATTEN_LEN
146 static int setup_txselect(const char *, struct kernel_param *);
147 module_param_call(txselect, setup_txselect, param_get_string,
148 &kp_txselect, S_IWUSR | S_IRUGO);
149 MODULE_PARM_DESC(txselect, \
150 "Tx serdes indices (for no QSFP or invalid QSFP data)");
152 #define BOARD_QME7342 5
153 #define BOARD_QMH7342 6
154 #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
156 #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
159 #define KREG_IDX(regname) (QIB_7322_##regname##_OFFS / sizeof(u64))
161 #define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
163 #define MASK_ACROSS(lsb, msb) \
164 (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
166 #define SYM_RMASK(regname, fldname) ((u64) \
167 QIB_7322_##regname##_##fldname##_RMASK)
169 #define SYM_MASK(regname, fldname) ((u64) \
170 QIB_7322_##regname##_##fldname##_RMASK << \
171 QIB_7322_##regname##_##fldname##_LSB)
173 #define SYM_FIELD(value, regname, fldname) ((u64) \
174 (((value) >> SYM_LSB(regname, fldname)) & \
175 SYM_RMASK(regname, fldname)))
177 /* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
178 #define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
179 (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
181 #define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
182 #define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
183 #define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
184 #define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
185 #define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
186 /* Below because most, but not all, fields of IntMask have that full suffix */
187 #define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
190 #define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
193 * the size bits give us 2^N, in KB units. 0 marks as invalid,
194 * and 7 is reserved. We currently use only 2KB and 4KB
196 #define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
197 #define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
198 #define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
199 #define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
201 #define SendIBSLIDAssignMask \
202 QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
203 #define SendIBSLMCMask \
204 QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
206 #define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
207 #define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
208 #define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
209 #define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
210 #define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
211 #define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
213 #define _QIB_GPIO_SDA_NUM 1
214 #define _QIB_GPIO_SCL_NUM 0
215 #define QIB_EEPROM_WEN_NUM 14
216 #define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
218 /* HW counter clock is at 4nsec */
219 #define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
221 /* full speed IB port 1 only */
222 #define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
223 #define PORT_SPD_CAP_SHIFT 3
225 /* full speed featuremask, both ports */
226 #define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
229 * This file contains almost all the chip-specific register information and
230 * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
233 /* Use defines to tie machine-generated names to lower-case names */
234 #define kr_contextcnt KREG_IDX(ContextCnt)
235 #define kr_control KREG_IDX(Control)
236 #define kr_counterregbase KREG_IDX(CntrRegBase)
237 #define kr_errclear KREG_IDX(ErrClear)
238 #define kr_errmask KREG_IDX(ErrMask)
239 #define kr_errstatus KREG_IDX(ErrStatus)
240 #define kr_extctrl KREG_IDX(EXTCtrl)
241 #define kr_extstatus KREG_IDX(EXTStatus)
242 #define kr_gpio_clear KREG_IDX(GPIOClear)
243 #define kr_gpio_mask KREG_IDX(GPIOMask)
244 #define kr_gpio_out KREG_IDX(GPIOOut)
245 #define kr_gpio_status KREG_IDX(GPIOStatus)
246 #define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
247 #define kr_debugportval KREG_IDX(DebugPortValueReg)
248 #define kr_fmask KREG_IDX(feature_mask)
249 #define kr_act_fmask KREG_IDX(active_feature_mask)
250 #define kr_hwerrclear KREG_IDX(HwErrClear)
251 #define kr_hwerrmask KREG_IDX(HwErrMask)
252 #define kr_hwerrstatus KREG_IDX(HwErrStatus)
253 #define kr_intclear KREG_IDX(IntClear)
254 #define kr_intmask KREG_IDX(IntMask)
255 #define kr_intredirect KREG_IDX(IntRedirect0)
256 #define kr_intstatus KREG_IDX(IntStatus)
257 #define kr_pagealign KREG_IDX(PageAlign)
258 #define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
259 #define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
260 #define kr_rcvegrbase KREG_IDX(RcvEgrBase)
261 #define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
262 #define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
263 #define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
264 #define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
265 #define kr_rcvtidbase KREG_IDX(RcvTIDBase)
266 #define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
267 #define kr_revision KREG_IDX(Revision)
268 #define kr_scratch KREG_IDX(Scratch)
269 #define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
270 #define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
271 #define kr_sendctrl KREG_IDX(SendCtrl)
272 #define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
273 #define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
274 #define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
275 #define kr_sendpiobufbase KREG_IDX(SendBufBase)
276 #define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
277 #define kr_sendpiosize KREG_IDX(SendBufSize)
278 #define kr_sendregbase KREG_IDX(SendRegBase)
279 #define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
280 #define kr_userregbase KREG_IDX(UserRegBase)
281 #define kr_intgranted KREG_IDX(Int_Granted)
282 #define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
283 #define kr_intblocked KREG_IDX(IntBlocked)
284 #define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
287 * per-port kernel registers. Access only with qib_read_kreg_port()
288 * or qib_write_kreg_port()
290 #define krp_errclear KREG_IBPORT_IDX(ErrClear)
291 #define krp_errmask KREG_IBPORT_IDX(ErrMask)
292 #define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
293 #define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
294 #define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
295 #define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
296 #define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
297 #define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
298 #define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
299 #define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
300 #define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
301 #define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
302 #define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
303 #define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
304 #define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
305 #define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
306 #define krp_psinterval KREG_IBPORT_IDX(PSInterval)
307 #define krp_psstart KREG_IBPORT_IDX(PSStart)
308 #define krp_psstat KREG_IBPORT_IDX(PSStat)
309 #define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
310 #define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
311 #define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
312 #define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
313 #define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
314 #define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
315 #define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
316 #define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
317 #define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
318 #define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
319 #define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
320 #define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
321 #define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
322 #define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
323 #define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
324 #define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
325 #define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
326 #define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
327 #define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
328 #define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
329 #define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
330 #define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
331 #define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
332 #define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
333 #define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
334 #define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
335 #define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
336 #define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
337 #define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
338 #define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
339 #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
342 * Per-context kernel registers. Access only with qib_read_kreg_ctxt()
343 * or qib_write_kreg_ctxt()
345 #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
346 #define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
349 * TID Flow table, per context. Reduces
350 * number of hdrq updates to one per flow (or on errors).
351 * context 0 and 1 share same memory, but have distinct
352 * addresses. Since for now, we never use expected sends
353 * on kernel contexts, we don't worry about that (we initialize
354 * those entries for ctxt 0/1 on driver load twice, for example).
356 #define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
357 #define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
359 /* these are the error bits in the tid flows, and are W1C */
360 #define TIDFLOW_ERRBITS ( \
361 (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
362 SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
363 (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
364 SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
366 /* Most (not all) Counters are per-IBport.
367 * Requires LBIntCnt is at offset 0 in the group
369 #define CREG_IDX(regname) \
370 ((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
372 #define crp_badformat CREG_IDX(RxVersionErrCnt)
373 #define crp_err_rlen CREG_IDX(RxLenErrCnt)
374 #define crp_erricrc CREG_IDX(RxICRCErrCnt)
375 #define crp_errlink CREG_IDX(RxLinkMalformCnt)
376 #define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
377 #define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
378 #define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
379 #define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
380 #define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
381 #define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
382 #define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
383 #define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
384 #define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
385 #define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
386 #define crp_pktrcv CREG_IDX(RxDataPktCnt)
387 #define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
388 #define crp_pktsend CREG_IDX(TxDataPktCnt)
389 #define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
390 #define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
391 #define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
392 #define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
393 #define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
394 #define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
395 #define crp_rcvebp CREG_IDX(RxEBPCnt)
396 #define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
397 #define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
398 #define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
399 #define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
400 #define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
401 #define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
402 #define crp_rxvlerr CREG_IDX(RxVlErrCnt)
403 #define crp_sendstall CREG_IDX(TxFlowStallCnt)
404 #define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
405 #define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
406 #define crp_txlenerr CREG_IDX(TxLenErrCnt)
407 #define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
408 #define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
409 #define crp_txunderrun CREG_IDX(TxUnderrunCnt)
410 #define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
411 #define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
412 #define crp_wordrcv CREG_IDX(RxDwordCnt)
413 #define crp_wordsend CREG_IDX(TxDwordCnt)
414 #define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
416 /* these are the (few) counters that are not port-specific */
417 #define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
418 QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
419 #define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
420 #define cr_lbint CREG_DEVIDX(LBIntCnt)
421 #define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
422 #define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
423 #define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
424 #define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
425 #define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
427 /* no chip register for # of IB ports supported, so define */
428 #define NUM_IB_PORTS 2
430 /* 1 VL15 buffer per hardware IB port, no register for this, so define */
431 #define NUM_VL15_BUFS NUM_IB_PORTS
434 * context 0 and 1 are special, and there is no chip register that
435 * defines this value, so we have to define it here.
436 * These are all allocated to either 0 or 1 for single port
437 * hardware configuration, otherwise each gets half
439 #define KCTXT0_EGRCNT 2048
441 /* values for vl and port fields in PBC, 7322-specific */
442 #define PBC_PORT_SEL_LSB 26
443 #define PBC_PORT_SEL_RMASK 1
444 #define PBC_VL_NUM_LSB 27
445 #define PBC_VL_NUM_RMASK 7
446 #define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
447 #define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
449 static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
450 [IB_RATE_2_5_GBPS] = 16,
451 [IB_RATE_5_GBPS] = 8,
452 [IB_RATE_10_GBPS] = 4,
453 [IB_RATE_20_GBPS] = 2,
454 [IB_RATE_30_GBPS] = 2,
455 [IB_RATE_40_GBPS] = 1
458 #define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
459 #define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
461 /* link training states, from IBC */
462 #define IB_7322_LT_STATE_DISABLED 0x00
463 #define IB_7322_LT_STATE_LINKUP 0x01
464 #define IB_7322_LT_STATE_POLLACTIVE 0x02
465 #define IB_7322_LT_STATE_POLLQUIET 0x03
466 #define IB_7322_LT_STATE_SLEEPDELAY 0x04
467 #define IB_7322_LT_STATE_SLEEPQUIET 0x05
468 #define IB_7322_LT_STATE_CFGDEBOUNCE 0x08
469 #define IB_7322_LT_STATE_CFGRCVFCFG 0x09
470 #define IB_7322_LT_STATE_CFGWAITRMT 0x0a
471 #define IB_7322_LT_STATE_CFGIDLE 0x0b
472 #define IB_7322_LT_STATE_RECOVERRETRAIN 0x0c
473 #define IB_7322_LT_STATE_TXREVLANES 0x0d
474 #define IB_7322_LT_STATE_RECOVERWAITRMT 0x0e
475 #define IB_7322_LT_STATE_RECOVERIDLE 0x0f
476 #define IB_7322_LT_STATE_CFGENH 0x10
477 #define IB_7322_LT_STATE_CFGTEST 0x11
478 #define IB_7322_LT_STATE_CFGWAITRMTTEST 0x12
479 #define IB_7322_LT_STATE_CFGWAITENH 0x13
481 /* link state machine states from IBC */
482 #define IB_7322_L_STATE_DOWN 0x0
483 #define IB_7322_L_STATE_INIT 0x1
484 #define IB_7322_L_STATE_ARM 0x2
485 #define IB_7322_L_STATE_ACTIVE 0x3
486 #define IB_7322_L_STATE_ACT_DEFER 0x4
488 static const u8 qib_7322_physportstate[0x20] = {
489 [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
490 [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
491 [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
492 [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
493 [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
494 [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
495 [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
496 [IB_7322_LT_STATE_CFGRCVFCFG] =
497 IB_PHYSPORTSTATE_CFG_TRAIN,
498 [IB_7322_LT_STATE_CFGWAITRMT] =
499 IB_PHYSPORTSTATE_CFG_TRAIN,
500 [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
501 [IB_7322_LT_STATE_RECOVERRETRAIN] =
502 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
503 [IB_7322_LT_STATE_RECOVERWAITRMT] =
504 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
505 [IB_7322_LT_STATE_RECOVERIDLE] =
506 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
507 [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
508 [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
509 [IB_7322_LT_STATE_CFGWAITRMTTEST] =
510 IB_PHYSPORTSTATE_CFG_TRAIN,
511 [IB_7322_LT_STATE_CFGWAITENH] =
512 IB_PHYSPORTSTATE_CFG_WAIT_ENH,
513 [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
514 [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
515 [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
516 [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
519 struct qib_chip_specific {
520 u64 __iomem *cregbase;
522 spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
523 spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
524 u64 main_int_mask; /* clear bits which have dedicated handlers */
525 u64 int_enable_mask; /* for per port interrupts in single port mode */
528 u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
529 u64 gpio_mask; /* shadow the gpio mask register */
530 u64 extctrl; /* shadow the gpio output enable, etc... */
537 u32 updthresh; /* current AvailUpdThld */
538 u32 updthresh_dflt; /* default AvailUpdThld */
541 u32 num_msix_entries;
545 u32 recovery_ports_initted;
546 struct qib_msix_entry *msix_entries;
547 unsigned long *sendchkenable;
548 unsigned long *sendgrhchk;
549 unsigned long *sendibchk;
550 u32 rcvavail_timeout[18];
551 char emsgbuf[128]; /* for device error interrupt msg buffer */
554 /* Table of entries in "human readable" form Tx Emphasis. */
562 struct vendor_txdds_ent {
563 u8 oui[QSFP_VOUI_LEN];
565 struct txdds_ent sdr;
566 struct txdds_ent ddr;
567 struct txdds_ent qdr;
570 static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
572 #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
573 #define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */
574 #define TXDDS_MFG_SZ 2 /* number of mfg tx settings entries */
575 #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
577 #define H1_FORCE_VAL 8
578 #define H1_FORCE_QME 1 /* may be overridden via setup_txselect() */
579 #define H1_FORCE_QMH 7 /* may be overridden via setup_txselect() */
581 /* The static and dynamic registers are paired, and the pairs indexed by spd */
582 #define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
585 #define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
586 #define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
587 #define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
588 #define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
589 #define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
591 struct qib_chippport_specific {
592 u64 __iomem *kpregbase;
593 u64 __iomem *cpregbase;
595 struct qib_pportdata *ppd;
596 wait_queue_head_t autoneg_wait;
597 struct delayed_work autoneg_work;
598 struct delayed_work ipg_work;
599 struct timer_list chase_timer;
601 * these 5 fields are used to establish deltas for IB symbol
602 * errors and linkrecovery errors. They can be reported on
603 * some chips during link negotiation prior to INIT, and with
604 * DDR when faking DDR negotiations with non-IBTA switches.
605 * The chip counters are adjusted at driver unload if there is
617 u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
618 u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
619 unsigned long qdr_dfe_time;
620 unsigned long chase_end;
626 * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
627 * entry zero is unused, to simplify indexing
630 u8 no_eep; /* txselect table index to use if no qsfp info */
633 struct qib_qsfp_data qsfp_data;
634 char epmsgbuf[192]; /* for port error interrupt msg buffer */
639 irq_handler_t handler;
641 int port; /* 0 if not port-specific, else port # */
643 { "", qib_7322intr, -1, 0 },
644 { " (buf avail)", qib_7322bufavail,
645 SYM_LSB(IntStatus, SendBufAvail), 0 },
646 { " (sdma 0)", sdma_intr,
647 SYM_LSB(IntStatus, SDmaInt_0), 1 },
648 { " (sdma 1)", sdma_intr,
649 SYM_LSB(IntStatus, SDmaInt_1), 2 },
650 { " (sdmaI 0)", sdma_idle_intr,
651 SYM_LSB(IntStatus, SDmaIdleInt_0), 1 },
652 { " (sdmaI 1)", sdma_idle_intr,
653 SYM_LSB(IntStatus, SDmaIdleInt_1), 2 },
654 { " (sdmaP 0)", sdma_progress_intr,
655 SYM_LSB(IntStatus, SDmaProgressInt_0), 1 },
656 { " (sdmaP 1)", sdma_progress_intr,
657 SYM_LSB(IntStatus, SDmaProgressInt_1), 2 },
658 { " (sdmaC 0)", sdma_cleanup_intr,
659 SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 },
660 { " (sdmaC 1)", sdma_cleanup_intr,
661 SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 },
665 #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
666 /* cycle through TS1/TS2 till OK */
667 #define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
668 /* wait for TS1, then go on */
669 #define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
670 #define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
672 #define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
673 #define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
674 #define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
676 #define BLOB_7322_IBCHG 0x101
678 static inline void qib_write_kreg(const struct qib_devdata *dd,
679 const u32 regno, u64 value);
680 static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
681 static void write_7322_initregs(struct qib_devdata *);
682 static void write_7322_init_portregs(struct qib_pportdata *);
683 static void setup_7322_link_recovery(struct qib_pportdata *, u32);
684 static void check_7322_rxe_status(struct qib_pportdata *);
685 static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
688 * qib_read_ureg32 - read 32-bit virtualized per-context register
690 * @regno: register number
691 * @ctxt: context number
693 * Return the contents of a register that is virtualized to be per context.
694 * Returns -1 on errors (not distinguishable from valid contents at
695 * runtime; we may add a separate error variable at some point).
697 static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
698 enum qib_ureg regno, int ctxt)
700 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
702 return readl(regno + (u64 __iomem *)(
703 (dd->ureg_align * ctxt) + (dd->userbase ?
704 (char __iomem *)dd->userbase :
705 (char __iomem *)dd->kregbase + dd->uregbase)));
709 * qib_read_ureg - read virtualized per-context register
711 * @regno: register number
712 * @ctxt: context number
714 * Return the contents of a register that is virtualized to be per context.
715 * Returns -1 on errors (not distinguishable from valid contents at
716 * runtime; we may add a separate error variable at some point).
718 static inline u64 qib_read_ureg(const struct qib_devdata *dd,
719 enum qib_ureg regno, int ctxt)
722 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
724 return readq(regno + (u64 __iomem *)(
725 (dd->ureg_align * ctxt) + (dd->userbase ?
726 (char __iomem *)dd->userbase :
727 (char __iomem *)dd->kregbase + dd->uregbase)));
731 * qib_write_ureg - write virtualized per-context register
733 * @regno: register number
737 * Write the contents of a register that is virtualized to be per context.
739 static inline void qib_write_ureg(const struct qib_devdata *dd,
740 enum qib_ureg regno, u64 value, int ctxt)
744 ubase = (u64 __iomem *)
745 ((char __iomem *) dd->userbase +
746 dd->ureg_align * ctxt);
748 ubase = (u64 __iomem *)
750 (char __iomem *) dd->kregbase +
751 dd->ureg_align * ctxt);
753 if (dd->kregbase && (dd->flags & QIB_PRESENT))
754 writeq(value, &ubase[regno]);
757 static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
760 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
762 return readl((u32 __iomem *) &dd->kregbase[regno]);
765 static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
768 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
770 return readq(&dd->kregbase[regno]);
773 static inline void qib_write_kreg(const struct qib_devdata *dd,
774 const u32 regno, u64 value)
776 if (dd->kregbase && (dd->flags & QIB_PRESENT))
777 writeq(value, &dd->kregbase[regno]);
781 * not many sanity checks for the port-specific kernel register routines,
782 * since they are only used when it's known to be safe.
784 static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
787 if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
789 return readq(&ppd->cpspec->kpregbase[regno]);
792 static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
793 const u16 regno, u64 value)
795 if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
796 (ppd->dd->flags & QIB_PRESENT))
797 writeq(value, &ppd->cpspec->kpregbase[regno]);
801 * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
802 * @dd: the qlogic_ib device
803 * @regno: the register number to write
804 * @ctxt: the context containing the register
805 * @value: the value to write
807 static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
808 const u16 regno, unsigned ctxt,
811 qib_write_kreg(dd, regno + ctxt, value);
814 static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
816 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
818 return readq(&dd->cspec->cregbase[regno]);
823 static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
825 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
827 return readl(&dd->cspec->cregbase[regno]);
832 static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
833 u16 regno, u64 value)
835 if (ppd->cpspec && ppd->cpspec->cpregbase &&
836 (ppd->dd->flags & QIB_PRESENT))
837 writeq(value, &ppd->cpspec->cpregbase[regno]);
840 static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
843 if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
844 !(ppd->dd->flags & QIB_PRESENT))
846 return readq(&ppd->cpspec->cpregbase[regno]);
849 static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
852 if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
853 !(ppd->dd->flags & QIB_PRESENT))
855 return readl(&ppd->cpspec->cpregbase[regno]);
858 /* bits in Control register */
859 #define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
860 #define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
862 /* bits in general interrupt regs */
863 #define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
864 #define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
865 #define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
866 #define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
867 #define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
868 #define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
869 #define QIB_I_C_ERROR INT_MASK(Err)
871 #define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
872 #define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
873 #define QIB_I_GPIO INT_MASK(AssertGPIO)
874 #define QIB_I_P_SDMAINT(pidx) \
875 (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
876 INT_MASK_P(SDmaProgress, pidx) | \
877 INT_MASK_PM(SDmaCleanupDone, pidx))
879 /* Interrupt bits that are "per port" */
880 #define QIB_I_P_BITSEXTANT(pidx) \
881 (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
882 INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
883 INT_MASK_P(SDmaProgress, pidx) | \
884 INT_MASK_PM(SDmaCleanupDone, pidx))
886 /* Interrupt bits that are common to a device */
887 /* currently unused: QIB_I_SPIOSENT */
888 #define QIB_I_C_BITSEXTANT \
889 (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
891 QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
893 #define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
894 QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
897 * Error bits that are "per port".
899 #define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
900 #define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
901 #define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
902 #define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
903 #define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
904 #define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
905 #define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
906 #define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
907 #define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
908 #define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
909 #define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
910 #define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
911 #define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
912 #define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
913 #define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
914 #define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
915 #define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
916 #define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
917 #define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
918 #define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
919 #define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
920 #define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
921 #define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
922 #define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
923 #define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
924 #define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
925 #define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
926 #define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
928 #define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
929 #define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
930 #define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
931 #define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
932 #define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
933 #define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
934 #define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
935 #define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
936 #define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
937 #define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
938 #define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
940 /* Error bits that are common to a device */
941 #define QIB_E_RESET ERR_MASK(ResetNegated)
942 #define QIB_E_HARDWARE ERR_MASK(HardwareErr)
943 #define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
947 * Per chip (rather than per-port) errors. Most either do
948 * nothing but trigger a print (because they self-recover, or
949 * always occur in tandem with other errors that handle the
950 * issue), or because they indicate errors with no recovery,
951 * but we want to know that they happened.
953 #define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
954 #define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
955 #define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
956 #define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
957 #define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
958 #define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
959 #define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
960 #define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
962 /* SDMA chip errors (not per port)
963 * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
964 * the SDMAHALT error immediately, so we just print the dup error via the
965 * E_AUTO mechanism. This is true of most of the per-port fatal errors
966 * as well, but since this is port-independent, by definition, it's
967 * handled a bit differently. SDMA_VL15 and SDMA_WRONG_PORT are per
968 * packet send errors, and so are handled in the same manner as other
971 #define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
972 #define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
973 #define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
976 * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
977 * it is used to print "common" packet errors.
979 #define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
980 QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
981 QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
982 QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
985 /* Error Bits that Packet-related (Receive, per-port) */
986 #define QIB_E_P_RPKTERRS (\
987 QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
988 QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
989 QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
990 QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
991 QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
992 QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
995 * Error bits that are Send-related (per port)
996 * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
997 * All of these potentially need to have a buffer disarmed
999 #define QIB_E_P_SPKTERRS (\
1000 QIB_E_P_SUNEXP_PKTNUM |\
1001 QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1002 QIB_E_P_SMAXPKTLEN |\
1003 QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1004 QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
1005 QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
1007 #define QIB_E_SPKTERRS ( \
1008 QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
1009 ERR_MASK_N(SendUnsupportedVLErr) | \
1010 QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
1012 #define QIB_E_P_SDMAERRS ( \
1013 QIB_E_P_SDMAHALT | \
1014 QIB_E_P_SDMADESCADDRMISALIGN | \
1015 QIB_E_P_SDMAUNEXPDATA | \
1016 QIB_E_P_SDMAMISSINGDW | \
1017 QIB_E_P_SDMADWEN | \
1018 QIB_E_P_SDMARPYTAG | \
1019 QIB_E_P_SDMA1STDESC | \
1020 QIB_E_P_SDMABASE | \
1021 QIB_E_P_SDMATAILOUTOFBOUND | \
1022 QIB_E_P_SDMAOUTOFBOUND | \
1023 QIB_E_P_SDMAGENMISMATCH)
1026 * This sets some bits more than once, but makes it more obvious which
1027 * bits are not handled under other categories, and the repeat definition
1030 #define QIB_E_P_BITSEXTANT ( \
1031 QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
1032 QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
1033 QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
1034 QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
1038 * These are errors that can occur when the link
1039 * changes state while a packet is being sent or received. This doesn't
1040 * cover things like EBP or VCRC that can be the result of a sending
1041 * having the link change state, so we receive a "known bad" packet.
1042 * All of these are "per port", so renamed:
1044 #define QIB_E_P_LINK_PKTERRS (\
1045 QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1046 QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
1047 QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
1051 * This sets some bits more than once, but makes it more obvious which
1052 * bits are not handled under other categories (such as QIB_E_SPKTERRS),
1053 * and the repeat definition is not a problem.
1055 #define QIB_E_C_BITSEXTANT (\
1056 QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
1057 QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
1058 QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
1060 /* Likewise Neuter E_SPKT_ERRS_IGNORE */
1061 #define E_SPKT_ERRS_IGNORE 0
1063 #define QIB_EXTS_MEMBIST_DISABLED \
1064 SYM_MASK(EXTStatus, MemBISTDisabled)
1065 #define QIB_EXTS_MEMBIST_ENDTEST \
1066 SYM_MASK(EXTStatus, MemBISTEndTest)
1068 #define QIB_E_SPIOARMLAUNCH \
1069 ERR_MASK(SendArmLaunchErr)
1071 #define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
1072 #define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
1075 * IBTA_1_2 is set when multiple speeds are enabled (normal),
1076 * and also if forced QDR (only QDR enabled). It's enabled for the
1077 * forced QDR case so that scrambling will be enabled by the TS3
1078 * exchange, when supported by both sides of the link.
1080 #define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
1081 #define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
1082 #define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
1083 #define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
1084 #define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
1085 #define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
1086 SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
1087 #define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
1089 #define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
1090 #define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
1092 #define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
1093 #define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1094 #define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1096 #define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1097 #define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1098 #define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
1099 SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
1100 #define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
1101 SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
1102 #define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
1104 #define IBA7322_REDIRECT_VEC_PER_REG 12
1106 #define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
1107 #define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
1108 #define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
1109 #define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
1110 #define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
1112 #define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
1114 #define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
1115 .msg = #fldname , .sz = sizeof(#fldname) }
1116 #define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
1117 fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
1118 static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
1119 HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
1120 HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
1121 HWE_AUTO(PCIESerdesPClkNotDetect),
1122 HWE_AUTO(PowerOnBISTFailed),
1123 HWE_AUTO(TempsenseTholdReached),
1124 HWE_AUTO(MemoryErr),
1125 HWE_AUTO(PCIeBusParityErr),
1126 HWE_AUTO(PcieCplTimeout),
1127 HWE_AUTO(PciePoisonedTLP),
1128 HWE_AUTO_P(SDmaMemReadErr, 1),
1129 HWE_AUTO_P(SDmaMemReadErr, 0),
1130 HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
1131 HWE_AUTO_P(IBCBusToSPCParityErr, 1),
1132 HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
1133 HWE_AUTO(statusValidNoEop),
1134 HWE_AUTO(LATriggered),
1135 { .mask = 0, .sz = 0 }
1138 #define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
1139 .msg = #fldname, .sz = sizeof(#fldname) }
1140 #define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
1141 .msg = #fldname, .sz = sizeof(#fldname) }
1142 static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
1143 E_AUTO(RcvEgrFullErr),
1144 E_AUTO(RcvHdrFullErr),
1145 E_AUTO(ResetNegated),
1146 E_AUTO(HardwareErr),
1147 E_AUTO(InvalidAddrErr),
1148 E_AUTO(SDmaVL15Err),
1149 E_AUTO(SBufVL15MisUseErr),
1150 E_AUTO(InvalidEEPCmd),
1151 E_AUTO(RcvContextShareErr),
1152 E_AUTO(SendVLMismatchErr),
1153 E_AUTO(SendArmLaunchErr),
1154 E_AUTO(SendSpecialTriggerErr),
1155 E_AUTO(SDmaWrongPortErr),
1156 E_AUTO(SDmaBufMaskDuplicateErr),
1157 { .mask = 0, .sz = 0 }
1160 static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
1161 E_P_AUTO(IBStatusChanged),
1162 E_P_AUTO(SHeadersErr),
1163 E_P_AUTO(VL15BufMisuseErr),
1165 * SDmaHaltErr is not really an error, make it clearer;
1167 {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
1169 E_P_AUTO(SDmaDescAddrMisalignErr),
1170 E_P_AUTO(SDmaUnexpDataErr),
1171 E_P_AUTO(SDmaMissingDwErr),
1172 E_P_AUTO(SDmaDwEnErr),
1173 E_P_AUTO(SDmaRpyTagErr),
1174 E_P_AUTO(SDma1stDescErr),
1175 E_P_AUTO(SDmaBaseErr),
1176 E_P_AUTO(SDmaTailOutOfBoundErr),
1177 E_P_AUTO(SDmaOutOfBoundErr),
1178 E_P_AUTO(SDmaGenMismatchErr),
1179 E_P_AUTO(SendBufMisuseErr),
1180 E_P_AUTO(SendUnsupportedVLErr),
1181 E_P_AUTO(SendUnexpectedPktNumErr),
1182 E_P_AUTO(SendDroppedDataPktErr),
1183 E_P_AUTO(SendDroppedSmpPktErr),
1184 E_P_AUTO(SendPktLenErr),
1185 E_P_AUTO(SendUnderRunErr),
1186 E_P_AUTO(SendMaxPktLenErr),
1187 E_P_AUTO(SendMinPktLenErr),
1188 E_P_AUTO(RcvIBLostLinkErr),
1189 E_P_AUTO(RcvHdrErr),
1190 E_P_AUTO(RcvHdrLenErr),
1191 E_P_AUTO(RcvBadTidErr),
1192 E_P_AUTO(RcvBadVersionErr),
1193 E_P_AUTO(RcvIBFlowErr),
1194 E_P_AUTO(RcvEBPErr),
1195 E_P_AUTO(RcvUnsupportedVLErr),
1196 E_P_AUTO(RcvUnexpectedCharErr),
1197 E_P_AUTO(RcvShortPktLenErr),
1198 E_P_AUTO(RcvLongPktLenErr),
1199 E_P_AUTO(RcvMaxPktLenErr),
1200 E_P_AUTO(RcvMinPktLenErr),
1201 E_P_AUTO(RcvICRCErr),
1202 E_P_AUTO(RcvVCRCErr),
1203 E_P_AUTO(RcvFormatErr),
1204 { .mask = 0, .sz = 0 }
1208 * Below generates "auto-message" for interrupts not specific to any port or
1211 #define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
1212 .msg = #fldname, .sz = sizeof(#fldname) }
1213 /* Below generates "auto-message" for interrupts specific to a port */
1214 #define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
1215 SYM_LSB(IntMask, fldname##Mask##_0), \
1216 SYM_LSB(IntMask, fldname##Mask##_1)), \
1217 .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1218 /* For some reason, the SerDesTrimDone bits are reversed */
1219 #define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
1220 SYM_LSB(IntMask, fldname##Mask##_1), \
1221 SYM_LSB(IntMask, fldname##Mask##_0)), \
1222 .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1224 * Below generates "auto-message" for interrupts specific to a context,
1225 * with ctxt-number appended
1227 #define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
1228 SYM_LSB(IntMask, fldname##0IntMask), \
1229 SYM_LSB(IntMask, fldname##17IntMask)), \
1230 .msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
1232 static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
1233 INTR_AUTO_P(SDmaInt),
1234 INTR_AUTO_P(SDmaProgressInt),
1235 INTR_AUTO_P(SDmaIdleInt),
1236 INTR_AUTO_P(SDmaCleanupDone),
1237 INTR_AUTO_C(RcvUrg),
1238 INTR_AUTO_P(ErrInt),
1239 INTR_AUTO(ErrInt), /* non-port-specific errs */
1240 INTR_AUTO(AssertGPIOInt),
1241 INTR_AUTO_P(SendDoneInt),
1242 INTR_AUTO(SendBufAvailInt),
1243 INTR_AUTO_C(RcvAvail),
1244 { .mask = 0, .sz = 0 }
1247 #define TXSYMPTOM_AUTO_P(fldname) \
1248 { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
1249 .msg = #fldname, .sz = sizeof(#fldname) }
1250 static const struct qib_hwerror_msgs hdrchk_msgs[] = {
1251 TXSYMPTOM_AUTO_P(NonKeyPacket),
1252 TXSYMPTOM_AUTO_P(GRHFail),
1253 TXSYMPTOM_AUTO_P(PkeyFail),
1254 TXSYMPTOM_AUTO_P(QPFail),
1255 TXSYMPTOM_AUTO_P(SLIDFail),
1256 TXSYMPTOM_AUTO_P(RawIPV6),
1257 TXSYMPTOM_AUTO_P(PacketTooSmall),
1258 { .mask = 0, .sz = 0 }
1261 #define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
1264 * Called when we might have an error that is specific to a particular
1265 * PIO buffer, and may need to cancel that buffer, so it can be re-used,
1266 * because we don't need to force the update of pioavail
1268 static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
1270 struct qib_devdata *dd = ppd->dd;
1273 u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
1274 u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
1275 unsigned long sbuf[4];
1278 * It's possible that sendbuffererror could have bits set; might
1279 * have already done this as a result of hardware error handling.
1282 for (i = 0; i < regcnt; ++i) {
1283 sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
1286 qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
1291 qib_disarm_piobufs_set(dd, sbuf, piobcnt);
1294 /* No txe_recover yet, if ever */
1296 /* No decode__errors yet */
1297 static void err_decode(char *msg, size_t len, u64 errs,
1298 const struct qib_hwerror_msgs *msp)
1301 int took, multi, n = 0;
1303 while (errs && msp && msp->mask) {
1304 multi = (msp->mask & (msp->mask - 1));
1305 while (errs & msp->mask) {
1306 these = (errs & msp->mask);
1307 lmask = (these & (these - 1)) ^ these;
1310 /* separate the strings */
1315 /* msp->sz counts the nul */
1316 took = min_t(size_t, msp->sz - (size_t)1, len);
1317 memcpy(msg, msp->msg, took);
1325 /* More than one bit this mask */
1328 while (lmask & msp->mask) {
1332 took = scnprintf(msg, len, "_%d", idx);
1339 /* If some bits are left, show in hex. */
1341 snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
1342 (unsigned long long) errs);
1345 /* only called if r1 set */
1346 static void flush_fifo(struct qib_pportdata *ppd)
1348 struct qib_devdata *dd = ppd->dd;
1349 u32 __iomem *piobuf;
1353 const unsigned hdrwords = 7;
1354 static struct qib_ib_header ibhdr = {
1355 .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
1356 .lrh[1] = IB_LID_PERMISSIVE,
1357 .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
1358 .lrh[3] = IB_LID_PERMISSIVE,
1359 .u.oth.bth[0] = cpu_to_be32(
1360 (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
1361 .u.oth.bth[1] = cpu_to_be32(0),
1362 .u.oth.bth[2] = cpu_to_be32(0),
1363 .u.oth.u.ud.deth[0] = cpu_to_be32(0),
1364 .u.oth.u.ud.deth[1] = cpu_to_be32(0),
1368 * Send a dummy VL15 packet to flush the launch FIFO.
1369 * This will not actually be sent since the TxeBypassIbc bit is set.
1371 pbc = PBC_7322_VL15_SEND |
1372 (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
1373 (hdrwords + SIZE_OF_CRC);
1374 piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
1377 writeq(pbc, piobuf);
1378 hdr = (u32 *) &ibhdr;
1379 if (dd->flags & QIB_PIO_FLUSH_WC) {
1381 qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
1383 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
1386 qib_pio_copy(piobuf + 2, hdr, hdrwords);
1387 qib_sendbuf_done(dd, bufn);
1391 * This is called with interrupts disabled and sdma_lock held.
1393 static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
1395 struct qib_devdata *dd = ppd->dd;
1396 u64 set_sendctrl = 0;
1397 u64 clr_sendctrl = 0;
1399 if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
1400 set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1402 clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1404 if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
1405 set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1407 clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1409 if (op & QIB_SDMA_SENDCTRL_OP_HALT)
1410 set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1412 clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1414 if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
1415 set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1416 SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1417 SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1419 clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1420 SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1421 SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1423 spin_lock(&dd->sendctrl_lock);
1425 /* If we are draining everything, block sends first */
1426 if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1427 ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
1428 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1429 qib_write_kreg(dd, kr_scratch, 0);
1432 ppd->p_sendctrl |= set_sendctrl;
1433 ppd->p_sendctrl &= ~clr_sendctrl;
1435 if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
1436 qib_write_kreg_port(ppd, krp_sendctrl,
1438 SYM_MASK(SendCtrl_0, SDmaCleanup));
1440 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1441 qib_write_kreg(dd, kr_scratch, 0);
1443 if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1444 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
1445 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1446 qib_write_kreg(dd, kr_scratch, 0);
1449 spin_unlock(&dd->sendctrl_lock);
1451 if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
1455 static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
1457 __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
1460 static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
1463 * Set SendDmaLenGen and clear and set
1464 * the MSB of the generation count to enable generation checking
1465 * and load the internal generation counter.
1467 qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
1468 qib_write_kreg_port(ppd, krp_senddmalengen,
1469 ppd->sdma_descq_cnt |
1470 (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
1474 * Must be called with sdma_lock held, or before init finished.
1476 static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
1478 /* Commit writes to memory and advance the tail on the chip */
1480 ppd->sdma_descq_tail = tail;
1481 qib_write_kreg_port(ppd, krp_senddmatail, tail);
1485 * This is called with interrupts disabled and sdma_lock held.
1487 static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
1491 * The hardware doesn't require this but we do it so that verbs
1492 * and user applications don't wait for link active to send stale
1495 sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
1497 qib_sdma_7322_setlengen(ppd);
1498 qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
1499 ppd->sdma_head_dma[0] = 0;
1500 qib_7322_sdma_sendctrl(ppd,
1501 ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
1504 #define DISABLES_SDMA ( \
1505 QIB_E_P_SDMAHALT | \
1506 QIB_E_P_SDMADESCADDRMISALIGN | \
1507 QIB_E_P_SDMAMISSINGDW | \
1508 QIB_E_P_SDMADWEN | \
1509 QIB_E_P_SDMARPYTAG | \
1510 QIB_E_P_SDMA1STDESC | \
1511 QIB_E_P_SDMABASE | \
1512 QIB_E_P_SDMATAILOUTOFBOUND | \
1513 QIB_E_P_SDMAOUTOFBOUND | \
1514 QIB_E_P_SDMAGENMISMATCH)
1516 static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1518 unsigned long flags;
1519 struct qib_devdata *dd = ppd->dd;
1521 errs &= QIB_E_P_SDMAERRS;
1523 if (errs & QIB_E_P_SDMAUNEXPDATA)
1524 qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
1527 spin_lock_irqsave(&ppd->sdma_lock, flags);
1529 switch (ppd->sdma_state.current_state) {
1530 case qib_sdma_state_s00_hw_down:
1533 case qib_sdma_state_s10_hw_start_up_wait:
1534 if (errs & QIB_E_P_SDMAHALT)
1535 __qib_sdma_process_event(ppd,
1536 qib_sdma_event_e20_hw_started);
1539 case qib_sdma_state_s20_idle:
1542 case qib_sdma_state_s30_sw_clean_up_wait:
1545 case qib_sdma_state_s40_hw_clean_up_wait:
1546 if (errs & QIB_E_P_SDMAHALT)
1547 __qib_sdma_process_event(ppd,
1548 qib_sdma_event_e50_hw_cleaned);
1551 case qib_sdma_state_s50_hw_halt_wait:
1552 if (errs & QIB_E_P_SDMAHALT)
1553 __qib_sdma_process_event(ppd,
1554 qib_sdma_event_e60_hw_halted);
1557 case qib_sdma_state_s99_running:
1558 __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
1559 __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1563 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1567 * handle per-device errors (not per-port errors)
1569 static noinline void handle_7322_errors(struct qib_devdata *dd)
1577 qib_stats.sps_errints++;
1578 errs = qib_read_kreg64(dd, kr_errstatus);
1580 qib_devinfo(dd->pcidev, "device error interrupt, "
1581 "but no error bits set!\n");
1585 /* don't report errors that are masked */
1586 errs &= dd->cspec->errormask;
1587 msg = dd->cspec->emsgbuf;
1589 /* do these first, they are most important */
1590 if (errs & QIB_E_HARDWARE) {
1592 qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
1594 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1595 if (errs & dd->eep_st_masks[log_idx].errs_to_log)
1596 qib_inc_eeprom_err(dd, log_idx, 1);
1598 if (errs & QIB_E_SPKTERRS) {
1599 qib_disarm_7322_senderrbufs(dd->pport);
1600 qib_stats.sps_txerrs++;
1601 } else if (errs & QIB_E_INVALIDADDR)
1602 qib_stats.sps_txerrs++;
1603 else if (errs & QIB_E_ARMLAUNCH) {
1604 qib_stats.sps_txerrs++;
1605 qib_disarm_7322_senderrbufs(dd->pport);
1607 qib_write_kreg(dd, kr_errclear, errs);
1610 * The ones we mask off are handled specially below
1611 * or above. Also mask SDMADISABLED by default as it
1614 mask = QIB_E_HARDWARE;
1617 err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask,
1618 qib_7322error_msgs);
1621 * Getting reset is a tragedy for all ports. Mark the device
1622 * _and_ the ports as "offline" in way meaningful to each.
1624 if (errs & QIB_E_RESET) {
1627 qib_dev_err(dd, "Got reset, requires re-init "
1628 "(unload and reload driver)\n");
1629 dd->flags &= ~QIB_INITTED; /* needs re-init */
1630 /* mark as having had error */
1631 *dd->devstatusp |= QIB_STATUS_HWERROR;
1632 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1633 if (dd->pport[pidx].link_speed_supported)
1634 *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
1638 qib_dev_err(dd, "%s error\n", msg);
1641 * If there were hdrq or egrfull errors, wake up any processes
1642 * waiting in poll. We used to try to check which contexts had
1643 * the overflow, but given the cost of that and the chip reads
1644 * to support it, it's better to just wake everybody up if we
1645 * get an overflow; waiters can poll again if it's not them.
1647 if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1648 qib_handle_urcv(dd, ~0U);
1649 if (errs & ERR_MASK(RcvEgrFullErr))
1650 qib_stats.sps_buffull++;
1652 qib_stats.sps_hdrfull++;
1659 static void qib_error_tasklet(unsigned long data)
1661 struct qib_devdata *dd = (struct qib_devdata *)data;
1663 handle_7322_errors(dd);
1664 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1667 static void reenable_chase(unsigned long opaque)
1669 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
1671 ppd->cpspec->chase_timer.expires = 0;
1672 qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1673 QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1676 static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
1679 ppd->cpspec->chase_end = 0;
1684 qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1685 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1686 ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
1687 add_timer(&ppd->cpspec->chase_timer);
1690 static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1695 ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
1698 * Detect and handle the state chase issue, where we can
1699 * get stuck if we are unlucky on timing on both sides of
1700 * the link. If we are, we disable, set a timer, and
1704 case IB_7322_LT_STATE_CFGRCVFCFG:
1705 case IB_7322_LT_STATE_CFGWAITRMT:
1706 case IB_7322_LT_STATE_TXREVLANES:
1707 case IB_7322_LT_STATE_CFGENH:
1709 if (ppd->cpspec->chase_end &&
1710 time_after(tnow, ppd->cpspec->chase_end))
1711 disable_chase(ppd, tnow, ibclt);
1712 else if (!ppd->cpspec->chase_end)
1713 ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1716 ppd->cpspec->chase_end = 0;
1720 if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
1721 ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
1722 ibclt == IB_7322_LT_STATE_LINKUP) &&
1723 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1725 ppd->cpspec->qdr_reforce = 1;
1726 if (!ppd->dd->cspec->r1)
1727 serdes_7322_los_enable(ppd, 0);
1728 } else if (ppd->cpspec->qdr_reforce &&
1729 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1730 (ibclt == IB_7322_LT_STATE_CFGENH ||
1731 ibclt == IB_7322_LT_STATE_CFGIDLE ||
1732 ibclt == IB_7322_LT_STATE_LINKUP))
1735 if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
1736 ppd->link_speed_enabled == QIB_IB_QDR &&
1737 (ibclt == IB_7322_LT_STATE_CFGTEST ||
1738 ibclt == IB_7322_LT_STATE_CFGENH ||
1739 (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
1740 ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
1743 if (ibclt != IB_7322_LT_STATE_LINKUP) {
1744 u8 ltstate = qib_7322_phys_portstate(ibcst);
1745 u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
1747 if (!ppd->dd->cspec->r1 &&
1748 pibclt == IB_7322_LT_STATE_LINKUP &&
1749 ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1750 ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1751 ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1752 ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1753 /* If the link went down (but no into recovery,
1754 * turn LOS back on */
1755 serdes_7322_los_enable(ppd, 1);
1756 if (!ppd->cpspec->qdr_dfe_on &&
1757 ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
1758 ppd->cpspec->qdr_dfe_on = 1;
1759 ppd->cpspec->qdr_dfe_time = 0;
1760 /* On link down, reenable QDR adaptation */
1761 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1762 ppd->dd->cspec->r1 ?
1763 QDR_STATIC_ADAPT_DOWN_R1 :
1764 QDR_STATIC_ADAPT_DOWN);
1765 printk(KERN_INFO QIB_DRV_NAME
1766 " IB%u:%u re-enabled QDR adaptation "
1767 "ibclt %x\n", ppd->dd->unit, ppd->port, ibclt);
1772 static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
1775 * This is per-pport error handling.
1776 * will likely get it's own MSIx interrupt (one for each port,
1777 * although just a single handler).
1779 static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1782 u64 ignore_this_time = 0, iserr = 0, errs, fmask;
1783 struct qib_devdata *dd = ppd->dd;
1785 /* do this as soon as possible */
1786 fmask = qib_read_kreg64(dd, kr_act_fmask);
1788 check_7322_rxe_status(ppd);
1790 errs = qib_read_kreg_port(ppd, krp_errstatus);
1792 qib_devinfo(dd->pcidev,
1793 "Port%d error interrupt, but no error bits set!\n",
1796 errs &= ~QIB_E_P_IBSTATUSCHANGED;
1800 msg = ppd->cpspec->epmsgbuf;
1803 if (errs & ~QIB_E_P_BITSEXTANT) {
1804 err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
1805 errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
1807 snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
1809 qib_dev_porterr(dd, ppd->port, "error interrupt with unknown"
1810 " errors 0x%016Lx set (and %s)\n",
1811 (errs & ~QIB_E_P_BITSEXTANT), msg);
1815 if (errs & QIB_E_P_SHDR) {
1818 /* determine cause, then write to clear */
1819 symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1820 qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1821 err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom,
1824 /* senderrbuf cleared in SPKTERRS below */
1827 if (errs & QIB_E_P_SPKTERRS) {
1828 if ((errs & QIB_E_P_LINK_PKTERRS) &&
1829 !(ppd->lflags & QIBL_LINKACTIVE)) {
1831 * This can happen when trying to bring the link
1832 * up, but the IB link changes state at the "wrong"
1833 * time. The IB logic then complains that the packet
1834 * isn't valid. We don't want to confuse people, so
1835 * we just don't print them, except at debug
1837 err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
1838 (errs & QIB_E_P_LINK_PKTERRS),
1839 qib_7322p_error_msgs);
1841 ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1843 qib_disarm_7322_senderrbufs(ppd);
1844 } else if ((errs & QIB_E_P_LINK_PKTERRS) &&
1845 !(ppd->lflags & QIBL_LINKACTIVE)) {
1847 * This can happen when SMA is trying to bring the link
1848 * up, but the IB link changes state at the "wrong" time.
1849 * The IB logic then complains that the packet isn't
1850 * valid. We don't want to confuse people, so we just
1851 * don't print them, except at debug
1853 err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs,
1854 qib_7322p_error_msgs);
1855 ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1859 qib_write_kreg_port(ppd, krp_errclear, errs);
1861 errs &= ~ignore_this_time;
1865 if (errs & QIB_E_P_RPKTERRS)
1866 qib_stats.sps_rcverrs++;
1867 if (errs & QIB_E_P_SPKTERRS)
1868 qib_stats.sps_txerrs++;
1870 iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
1872 if (errs & QIB_E_P_SDMAERRS)
1873 sdma_7322_p_errors(ppd, errs);
1875 if (errs & QIB_E_P_IBSTATUSCHANGED) {
1879 ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
1880 ltstate = qib_7322_phys_portstate(ibcs);
1882 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1883 handle_serdes_issues(ppd, ibcs);
1884 if (!(ppd->cpspec->ibcctrl_a &
1885 SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
1887 * We got our interrupt, so init code should be
1888 * happy and not try alternatives. Now squelch
1889 * other "chatter" from link-negotiation (pre Init)
1891 ppd->cpspec->ibcctrl_a |=
1892 SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
1893 qib_write_kreg_port(ppd, krp_ibcctrl_a,
1894 ppd->cpspec->ibcctrl_a);
1897 /* Update our picture of width and speed from chip */
1898 ppd->link_width_active =
1899 (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
1900 IB_WIDTH_4X : IB_WIDTH_1X;
1901 ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
1902 LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
1903 SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
1904 QIB_IB_DDR : QIB_IB_SDR;
1906 if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
1907 IB_PHYSPORTSTATE_DISABLED)
1908 qib_set_ib_7322_lstate(ppd, 0,
1909 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1912 * Since going into a recovery state causes the link
1913 * state to go down and since recovery is transitory,
1914 * it is better if we "miss" ever seeing the link
1915 * training state go into recovery (i.e., ignore this
1916 * transition for link state special handling purposes)
1917 * without updating lastibcstat.
1919 if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1920 ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1921 ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1922 ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1923 qib_handle_e_ibstatuschanged(ppd, ibcs);
1926 qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
1928 if (ppd->state_wanted & ppd->lflags)
1929 wake_up_interruptible(&ppd->state_wait);
1934 /* enable/disable chip from delivering interrupts */
1935 static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
1938 if (dd->flags & QIB_BADINTR)
1940 qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
1941 /* cause any pending enabled interrupts to be re-delivered */
1942 qib_write_kreg(dd, kr_intclear, 0ULL);
1943 if (dd->cspec->num_msix_entries) {
1944 /* and same for MSIx */
1945 u64 val = qib_read_kreg64(dd, kr_intgranted);
1947 qib_write_kreg(dd, kr_intgranted, val);
1950 qib_write_kreg(dd, kr_intmask, 0ULL);
1954 * Try to cleanup as much as possible for anything that might have gone
1955 * wrong while in freeze mode, such as pio buffers being written by user
1956 * processes (causing armlaunch), send errors due to going into freeze mode,
1957 * etc., and try to avoid causing extra interrupts while doing so.
1958 * Forcibly update the in-memory pioavail register copies after cleanup
1959 * because the chip won't do it while in freeze mode (the register values
1960 * themselves are kept correct).
1961 * Make sure that we don't lose any important interrupts by using the chip
1962 * feature that says that writing 0 to a bit in *clear that is set in
1963 * *status will cause an interrupt to be generated again (if allowed by
1965 * This is in chip-specific code because of all of the register accesses,
1966 * even though the details are similar on most chips.
1968 static void qib_7322_clear_freeze(struct qib_devdata *dd)
1972 /* disable error interrupts, to avoid confusion */
1973 qib_write_kreg(dd, kr_errmask, 0ULL);
1975 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1976 if (dd->pport[pidx].link_speed_supported)
1977 qib_write_kreg_port(dd->pport + pidx, krp_errmask,
1980 /* also disable interrupts; errormask is sometimes overwriten */
1981 qib_7322_set_intr_state(dd, 0);
1983 /* clear the freeze, and be sure chip saw it */
1984 qib_write_kreg(dd, kr_control, dd->control);
1985 qib_read_kreg32(dd, kr_scratch);
1988 * Force new interrupt if any hwerr, error or interrupt bits are
1989 * still set, and clear "safe" send packet errors related to freeze
1990 * and cancelling sends. Re-enable error interrupts before possible
1991 * force of re-interrupt on pending interrupts.
1993 qib_write_kreg(dd, kr_hwerrclear, 0ULL);
1994 qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
1995 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1996 /* We need to purge per-port errs and reset mask, too */
1997 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1998 if (!dd->pport[pidx].link_speed_supported)
2000 qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
2001 qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
2003 qib_7322_set_intr_state(dd, 1);
2006 /* no error handling to speak of */
2008 * qib_7322_handle_hwerrors - display hardware errors.
2009 * @dd: the qlogic_ib device
2010 * @msg: the output buffer
2011 * @msgl: the size of the output buffer
2013 * Use same msg buffer as regular errors to avoid excessive stack
2014 * use. Most hardware errors are catastrophic, but for right now,
2015 * we'll print them and continue. We reuse the same message buffer as
2016 * qib_handle_errors() to avoid excessive stack usage.
2018 static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
2025 hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
2028 if (hwerrs == ~0ULL) {
2029 qib_dev_err(dd, "Read of hardware error status failed "
2030 "(all bits set); ignoring\n");
2033 qib_stats.sps_hwerrs++;
2035 /* Always clear the error status register, except BIST fail */
2036 qib_write_kreg(dd, kr_hwerrclear, hwerrs &
2037 ~HWE_MASK(PowerOnBISTFailed));
2039 hwerrs &= dd->cspec->hwerrmask;
2041 /* no EEPROM logging, yet */
2044 qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
2045 "(cleared)\n", (unsigned long long) hwerrs);
2047 ctrl = qib_read_kreg32(dd, kr_control);
2048 if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
2050 * No recovery yet...
2052 if ((hwerrs & ~HWE_MASK(LATriggered)) ||
2053 dd->cspec->stay_in_freeze) {
2055 * If any set that we aren't ignoring only make the
2056 * complaint once, in case it's stuck or recurring,
2057 * and we get here multiple times
2058 * Force link down, so switch knows, and
2059 * LEDs are turned off.
2061 if (dd->flags & QIB_INITTED)
2064 qib_7322_clear_freeze(dd);
2067 if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
2069 strlcpy(msg, "[Memory BIST test failed, "
2070 "InfiniPath hardware unusable]", msgl);
2071 /* ignore from now on, so disable until driver reloaded */
2072 dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
2073 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2076 err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
2078 /* Ignore esoteric PLL failures et al. */
2080 qib_dev_err(dd, "%s hardware error\n", msg);
2082 if (isfatal && !dd->diag_client) {
2083 qib_dev_err(dd, "Fatal Hardware Error, no longer"
2084 " usable, SN %.16s\n", dd->serial);
2086 * for /sys status file and user programs to print; if no
2087 * trailing brace is copied, we'll know it was truncated.
2090 snprintf(dd->freezemsg, dd->freezelen,
2092 qib_disable_after_error(dd);
2098 * qib_7322_init_hwerrors - enable hardware errors
2099 * @dd: the qlogic_ib device
2101 * now that we have finished initializing everything that might reasonably
2102 * cause a hardware error, and cleared those errors bits as they occur,
2103 * we can enable hardware errors in the mask (potentially enabling
2104 * freeze mode), and enable hardware errors as errors (along with
2105 * everything else) in errormask
2107 static void qib_7322_init_hwerrors(struct qib_devdata *dd)
2112 extsval = qib_read_kreg64(dd, kr_extstatus);
2113 if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
2114 QIB_EXTS_MEMBIST_ENDTEST)))
2115 qib_dev_err(dd, "MemBIST did not complete!\n");
2117 /* never clear BIST failure, so reported on each driver load */
2118 qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
2119 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2122 qib_write_kreg(dd, kr_errclear, ~0ULL);
2123 /* enable errors that are masked, at least this first time. */
2124 qib_write_kreg(dd, kr_errmask, ~0ULL);
2125 dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
2126 for (pidx = 0; pidx < dd->num_pports; ++pidx)
2127 if (dd->pport[pidx].link_speed_supported)
2128 qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2133 * Disable and enable the armlaunch error. Used for PIO bandwidth testing
2134 * on chips that are count-based, rather than trigger-based. There is no
2135 * reference counting, but that's also fine, given the intended use.
2136 * Only chip-specific because it's all register accesses
2138 static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
2141 qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
2142 dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
2144 dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
2145 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2149 * Formerly took parameter <which> in pre-shifted,
2150 * pre-merged form with LinkCmd and LinkInitCmd
2151 * together, and assuming the zero was NOP.
2153 static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
2157 struct qib_devdata *dd = ppd->dd;
2158 unsigned long flags;
2160 if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
2162 * If we are told to disable, note that so link-recovery
2163 * code does not attempt to bring us back up.
2164 * Also reset everything that we can, so we start
2165 * completely clean when re-enabled (before we
2166 * actually issue the disable to the IBC)
2168 qib_7322_mini_pcs_reset(ppd);
2169 spin_lock_irqsave(&ppd->lflags_lock, flags);
2170 ppd->lflags |= QIBL_IB_LINK_DISABLED;
2171 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2172 } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
2174 * Any other linkinitcmd will lead to LINKDOWN and then
2175 * to INIT (if all is well), so clear flag to let
2176 * link-recovery code attempt to bring us back up.
2178 spin_lock_irqsave(&ppd->lflags_lock, flags);
2179 ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
2180 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2182 * Clear status change interrupt reduction so the
2183 * new state is seen.
2185 ppd->cpspec->ibcctrl_a &=
2186 ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
2189 mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
2190 (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2192 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
2194 /* write to chip to prevent back-to-back writes of ibc reg */
2195 qib_write_kreg(dd, kr_scratch, 0);
2200 * The total RCV buffer memory is 64KB, used for both ports, and is
2201 * in units of 64 bytes (same as IB flow control credit unit).
2202 * The consumedVL unit in the same registers are in 32 byte units!
2203 * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
2204 * and we can therefore allocate just 9 IB credits for 2 VL15 packets
2205 * in krp_rxcreditvl15, rather than 10.
2207 #define RCV_BUF_UNITSZ 64
2208 #define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
2210 static void set_vls(struct qib_pportdata *ppd)
2212 int i, numvls, totcred, cred_vl, vl0extra;
2213 struct qib_devdata *dd = ppd->dd;
2216 numvls = qib_num_vls(ppd->vls_operational);
2219 * Set up per-VL credits. Below is kluge based on these assumptions:
2220 * 1) port is disabled at the time early_init is called.
2221 * 2) give VL15 17 credits, for two max-plausible packets.
2222 * 3) Give VL0-N the rest, with any rounding excess used for VL0
2224 /* 2 VL15 packets @ 288 bytes each (including IB headers) */
2225 totcred = NUM_RCV_BUF_UNITS(dd);
2226 cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
2228 qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
2229 cred_vl = totcred / numvls;
2230 vl0extra = totcred - cred_vl * numvls;
2231 qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
2232 for (i = 1; i < numvls; i++)
2233 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
2234 for (; i < 8; i++) /* no buffer space for other VLs */
2235 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
2237 /* Notify IBC that credits need to be recalculated */
2238 val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
2239 val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2240 qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2241 qib_write_kreg(dd, kr_scratch, 0ULL);
2242 val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2243 qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2245 for (i = 0; i < numvls; i++)
2246 val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
2247 val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
2249 /* Change the number of operational VLs */
2250 ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
2251 ~SYM_MASK(IBCCtrlA_0, NumVLane)) |
2252 ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
2253 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2254 qib_write_kreg(dd, kr_scratch, 0ULL);
2258 * The code that deals with actual SerDes is in serdes_7322_init().
2259 * Compared to the code for iba7220, it is minimal.
2261 static int serdes_7322_init(struct qib_pportdata *ppd);
2264 * qib_7322_bringup_serdes - bring up the serdes
2265 * @ppd: physical port on the qlogic_ib device
2267 static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2269 struct qib_devdata *dd = ppd->dd;
2271 unsigned long flags;
2275 * SerDes model not in Pd, but still need to
2276 * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
2279 /* Put IBC in reset, sends disabled (should be in reset already) */
2280 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2281 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2282 qib_write_kreg(dd, kr_scratch, 0ULL);
2284 if (qib_compat_ddr_negotiate) {
2285 ppd->cpspec->ibdeltainprog = 1;
2286 ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
2288 ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
2289 crp_iblinkerrrecov);
2292 /* flowcontrolwatermark is in units of KBytes */
2293 ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
2295 * Flow control is sent this often, even if no changes in
2296 * buffer space occur. Units are 128ns for this chip.
2299 ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
2300 /* max error tolerance */
2301 ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
2302 /* IB credit flow control. */
2303 ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
2305 * set initial max size pkt IBC will send, including ICRC; it's the
2306 * PIO buffer size in dwords, less 1; also see qib_set_mtu()
2308 ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
2309 SYM_LSB(IBCCtrlA_0, MaxPktLen);
2310 ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2313 * Reset the PCS interface to the serdes (and also ibc, which is still
2314 * in reset from above). Writes new value of ibcctrl_a as last step.
2316 qib_7322_mini_pcs_reset(ppd);
2318 if (!ppd->cpspec->ibcctrl_b) {
2319 unsigned lse = ppd->link_speed_enabled;
2322 * Not on re-init after reset, establish shadow
2323 * and force initial config.
2325 ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
2327 ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
2328 IBA7322_IBC_SPEED_DDR |
2329 IBA7322_IBC_SPEED_SDR |
2330 IBA7322_IBC_WIDTH_AUTONEG |
2331 SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
2332 if (lse & (lse - 1)) /* Muliple speeds enabled */
2333 ppd->cpspec->ibcctrl_b |=
2334 (lse << IBA7322_IBC_SPEED_LSB) |
2335 IBA7322_IBC_IBTA_1_2_MASK |
2336 IBA7322_IBC_MAX_SPEED_MASK;
2338 ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
2339 IBA7322_IBC_SPEED_QDR |
2340 IBA7322_IBC_IBTA_1_2_MASK :
2341 (lse == QIB_IB_DDR) ?
2342 IBA7322_IBC_SPEED_DDR :
2343 IBA7322_IBC_SPEED_SDR;
2344 if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
2345 (IB_WIDTH_1X | IB_WIDTH_4X))
2346 ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
2348 ppd->cpspec->ibcctrl_b |=
2349 ppd->link_width_enabled == IB_WIDTH_4X ?
2350 IBA7322_IBC_WIDTH_4X_ONLY :
2351 IBA7322_IBC_WIDTH_1X_ONLY;
2353 /* always enable these on driver reload, not sticky */
2354 ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
2355 IBA7322_IBC_HRTBT_MASK);
2357 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
2359 /* setup so we have more time at CFGTEST to change H1 */
2360 val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
2361 val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
2362 val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
2363 qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
2365 serdes_7322_init(ppd);
2367 guid = be64_to_cpu(ppd->guid);
2370 guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
2371 ppd->guid = cpu_to_be64(guid);
2374 qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
2375 /* write to chip to prevent back-to-back writes of ibc reg */
2376 qib_write_kreg(dd, kr_scratch, 0);
2379 ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2382 /* initially come up DISABLED, without sending anything. */
2383 val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2384 QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2385 qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
2386 qib_write_kreg(dd, kr_scratch, 0ULL);
2387 /* clear the linkinit cmds */
2388 ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
2390 /* be paranoid against later code motion, etc. */
2391 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2392 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
2393 qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2394 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2396 /* Also enable IBSTATUSCHG interrupt. */
2397 val = qib_read_kreg_port(ppd, krp_errmask);
2398 qib_write_kreg_port(ppd, krp_errmask,
2399 val | ERR_MASK_N(IBStatusChanged));
2401 /* Always zero until we start messing with SerDes for real */
2406 * qib_7322_quiet_serdes - set serdes to txidle
2407 * @dd: the qlogic_ib device
2408 * Called when driver is being unloaded
2410 static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
2413 unsigned long flags;
2415 qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2417 spin_lock_irqsave(&ppd->lflags_lock, flags);
2418 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
2419 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2420 wake_up(&ppd->cpspec->autoneg_wait);
2421 cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
2422 if (ppd->dd->cspec->r1)
2423 cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
2425 ppd->cpspec->chase_end = 0;
2426 if (ppd->cpspec->chase_timer.data) /* if initted */
2427 del_timer_sync(&ppd->cpspec->chase_timer);
2430 * Despite the name, actually disables IBC as well. Do it when
2431 * we are as sure as possible that no more packets can be
2432 * received, following the down and the PCS reset.
2433 * The actual disabling happens in qib_7322_mini_pci_reset(),
2434 * along with the PCS being reset.
2436 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2437 qib_7322_mini_pcs_reset(ppd);
2440 * Update the adjusted counters so the adjustment persists
2441 * across driver reload.
2443 if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
2444 ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
2445 struct qib_devdata *dd = ppd->dd;
2448 /* enable counter writes */
2449 diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
2450 qib_write_kreg(dd, kr_hwdiagctrl,
2451 diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
2453 if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
2454 val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
2455 if (ppd->cpspec->ibdeltainprog)
2456 val -= val - ppd->cpspec->ibsymsnap;
2457 val -= ppd->cpspec->ibsymdelta;
2458 write_7322_creg_port(ppd, crp_ibsymbolerr, val);
2460 if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
2461 val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
2462 if (ppd->cpspec->ibdeltainprog)
2463 val -= val - ppd->cpspec->iblnkerrsnap;
2464 val -= ppd->cpspec->iblnkerrdelta;
2465 write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
2467 if (ppd->cpspec->iblnkdowndelta) {
2468 val = read_7322_creg32_port(ppd, crp_iblinkdown);
2469 val += ppd->cpspec->iblnkdowndelta;
2470 write_7322_creg_port(ppd, crp_iblinkdown, val);
2473 * No need to save ibmalfdelta since IB perfcounters
2474 * are cleared on driver reload.
2477 /* and disable counter writes */
2478 qib_write_kreg(dd, kr_hwdiagctrl, diagc);
2483 * qib_setup_7322_setextled - set the state of the two external LEDs
2484 * @ppd: physical port on the qlogic_ib device
2485 * @on: whether the link is up or not
2487 * The exact combo of LEDs if on is true is determined by looking
2490 * These LEDs indicate the physical and logical state of IB link.
2491 * For this chip (at least with recommended board pinouts), LED1
2492 * is Yellow (logical state) and LED2 is Green (physical state),
2494 * Note: We try to match the Mellanox HCA LED behavior as best
2495 * we can. Green indicates physical link state is OK (something is
2496 * plugged in, and we can train).
2497 * Amber indicates the link is logically up (ACTIVE).
2498 * Mellanox further blinks the amber LED to indicate data packet
2499 * activity, but we have no hardware support for that, so it would
2500 * require waking up every 10-20 msecs and checking the counters
2501 * on the chip, and then turning the LED off if appropriate. That's
2502 * visible overhead, so not something we will do.
2504 static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2506 struct qib_devdata *dd = ppd->dd;
2507 u64 extctl, ledblink = 0, val;
2508 unsigned long flags;
2512 * The diags use the LED to indicate diag info, so we leave
2513 * the external LED alone when the diags are running.
2515 if (dd->diag_client)
2518 /* Allow override of LED display for, e.g. Locating system in rack */
2519 if (ppd->led_override) {
2520 grn = (ppd->led_override & QIB_LED_PHYS);
2521 yel = (ppd->led_override & QIB_LED_LOG);
2523 val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
2524 grn = qib_7322_phys_portstate(val) ==
2525 IB_PHYSPORTSTATE_LINKUP;
2526 yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
2532 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2533 extctl = dd->cspec->extctrl & (ppd->port == 1 ?
2534 ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
2536 extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
2538 * Counts are in chip clock (4ns) periods.
2539 * This is 1/16 sec (66.6ms) on,
2540 * 3/16 sec (187.5 ms) off, with packets rcvd.
2542 ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
2543 ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
2546 extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
2547 dd->cspec->extctrl = extctl;
2548 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
2549 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2551 if (ledblink) /* blink the LED on packet receive */
2552 qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2556 * Disable MSIx interrupt if enabled, call generic MSIx code
2557 * to cleanup, and clear pending MSIx interrupts.
2558 * Used for fallback to INTx, after reset, and when MSIx setup fails.
2560 static void qib_7322_nomsix(struct qib_devdata *dd)
2565 dd->cspec->main_int_mask = ~0ULL;
2566 n = dd->cspec->num_msix_entries;
2570 dd->cspec->num_msix_entries = 0;
2571 for (i = 0; i < n; i++) {
2572 irq_set_affinity_hint(
2573 dd->cspec->msix_entries[i].msix.vector, NULL);
2574 free_cpumask_var(dd->cspec->msix_entries[i].mask);
2575 free_irq(dd->cspec->msix_entries[i].msix.vector,
2576 dd->cspec->msix_entries[i].arg);
2580 /* make sure no MSIx interrupts are left pending */
2581 intgranted = qib_read_kreg64(dd, kr_intgranted);
2583 qib_write_kreg(dd, kr_intgranted, intgranted);
2586 static void qib_7322_free_irq(struct qib_devdata *dd)
2588 if (dd->cspec->irq) {
2589 free_irq(dd->cspec->irq, dd);
2592 qib_7322_nomsix(dd);
2595 static void qib_setup_7322_cleanup(struct qib_devdata *dd)
2599 qib_7322_free_irq(dd);
2600 kfree(dd->cspec->cntrs);
2601 kfree(dd->cspec->sendchkenable);
2602 kfree(dd->cspec->sendgrhchk);
2603 kfree(dd->cspec->sendibchk);
2604 kfree(dd->cspec->msix_entries);
2605 for (i = 0; i < dd->num_pports; i++) {
2606 unsigned long flags;
2607 u32 mask = QSFP_GPIO_MOD_PRS_N |
2608 (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
2610 kfree(dd->pport[i].cpspec->portcntrs);
2611 if (dd->flags & QIB_HAS_QSFP) {
2612 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2613 dd->cspec->gpio_mask &= ~mask;
2614 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2615 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2616 qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
2618 if (dd->pport[i].ibport_data.smi_ah)
2619 ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah);
2623 /* handle SDMA interrupts */
2624 static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
2626 struct qib_pportdata *ppd0 = &dd->pport[0];
2627 struct qib_pportdata *ppd1 = &dd->pport[1];
2628 u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
2629 INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
2630 u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
2631 INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
2634 qib_sdma_intr(ppd0);
2636 qib_sdma_intr(ppd1);
2638 if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
2639 qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
2640 if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
2641 qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
2645 * Set or clear the Send buffer available interrupt enable bit.
2647 static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
2649 unsigned long flags;
2651 spin_lock_irqsave(&dd->sendctrl_lock, flags);
2653 dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
2655 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
2656 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2657 qib_write_kreg(dd, kr_scratch, 0ULL);
2658 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2662 * Somehow got an interrupt with reserved bits set in interrupt status.
2663 * Print a message so we know it happened, then clear them.
2664 * keep mainline interrupt handler cache-friendly
2666 static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
2671 kills = istat & ~QIB_I_BITSEXTANT;
2672 qib_dev_err(dd, "Clearing reserved interrupt(s) 0x%016llx:"
2673 " %s\n", (unsigned long long) kills, msg);
2674 qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
2677 /* keep mainline interrupt handler cache-friendly */
2678 static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2685 * Boards for this chip currently don't use GPIO interrupts,
2686 * so clear by writing GPIOstatus to GPIOclear, and complain
2687 * to developer. To avoid endless repeats, clear
2688 * the bits in the mask, since there is some kind of
2689 * programming error or chip problem.
2691 gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
2693 * In theory, writing GPIOstatus to GPIOclear could
2694 * have a bad side-effect on some diagnostic that wanted
2695 * to poll for a status-change, but the various shadows
2696 * make that problematic at best. Diags will just suppress
2697 * all GPIO interrupts during such tests.
2699 qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
2701 * Check for QSFP MOD_PRS changes
2702 * only works for single port if IB1 != pidx1
2704 for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
2706 struct qib_pportdata *ppd;
2707 struct qib_qsfp_data *qd;
2709 if (!dd->pport[pidx].link_speed_supported)
2711 mask = QSFP_GPIO_MOD_PRS_N;
2712 ppd = dd->pport + pidx;
2713 mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
2714 if (gpiostatus & dd->cspec->gpio_mask & mask) {
2716 qd = &ppd->cpspec->qsfp_data;
2717 gpiostatus &= ~mask;
2718 pins = qib_read_kreg64(dd, kr_extstatus);
2719 pins >>= SYM_LSB(EXTStatus, GPIOIn);
2720 if (!(pins & mask)) {
2722 qd->t_insert = jiffies;
2723 queue_work(ib_wq, &qd->work);
2728 if (gpiostatus && !handled) {
2729 const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
2730 u32 gpio_irq = mask & gpiostatus;
2733 * Clear any troublemakers, and update chip from shadow
2735 dd->cspec->gpio_mask &= ~gpio_irq;
2736 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2741 * Handle errors and unusual events first, separate function
2742 * to improve cache hits for fast path interrupt handling.
2744 static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
2746 if (istat & ~QIB_I_BITSEXTANT)
2747 unknown_7322_ibits(dd, istat);
2748 if (istat & QIB_I_GPIO)
2749 unknown_7322_gpio_intr(dd);
2750 if (istat & QIB_I_C_ERROR) {
2751 qib_write_kreg(dd, kr_errmask, 0ULL);
2752 tasklet_schedule(&dd->error_tasklet);
2754 if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
2755 handle_7322_p_errors(dd->rcd[0]->ppd);
2756 if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
2757 handle_7322_p_errors(dd->rcd[1]->ppd);
2761 * Dynamically adjust the rcv int timeout for a context based on incoming
2764 static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
2766 struct qib_devdata *dd = rcd->dd;
2767 u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
2770 * Dynamically adjust idle timeout on chip
2771 * based on number of packets processed.
2773 if (npkts < rcv_int_count && timeout > 2)
2775 else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
2776 timeout = min(timeout << 1, rcv_int_timeout);
2780 dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
2781 qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
2785 * This is the main interrupt handler.
2786 * It will normally only be used for low frequency interrupts but may
2787 * have to handle all interrupts if INTx is enabled or fewer than normal
2788 * MSIx interrupts were allocated.
2789 * This routine should ignore the interrupt bits for any of the
2790 * dedicated MSIx handlers.
2792 static irqreturn_t qib_7322intr(int irq, void *data)
2794 struct qib_devdata *dd = data;
2802 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
2804 * This return value is not great, but we do not want the
2805 * interrupt core code to remove our interrupt handler
2806 * because we don't appear to be handling an interrupt
2807 * during a chip reset.
2813 istat = qib_read_kreg64(dd, kr_intstatus);
2815 if (unlikely(istat == ~0ULL)) {
2816 qib_bad_intrstatus(dd);
2817 qib_dev_err(dd, "Interrupt status all f's, skipping\n");
2818 /* don't know if it was our interrupt or not */
2823 istat &= dd->cspec->main_int_mask;
2824 if (unlikely(!istat)) {
2825 /* already handled, or shared and not us */
2830 qib_stats.sps_ints++;
2831 if (dd->int_counter != (u32) -1)
2834 /* handle "errors" of various kinds first, device ahead of port */
2835 if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
2836 QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
2837 INT_MASK_P(Err, 1))))
2838 unlikely_7322_intr(dd, istat);
2841 * Clear the interrupt bits we found set, relatively early, so we
2842 * "know" know the chip will have seen this by the time we process
2843 * the queue, and will re-interrupt if necessary. The processor
2844 * itself won't take the interrupt again until we return.
2846 qib_write_kreg(dd, kr_intclear, istat);
2849 * Handle kernel receive queues before checking for pio buffers
2850 * available since receives can overflow; piobuf waiters can afford
2851 * a few extra cycles, since they were waiting anyway.
2853 ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
2855 rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
2856 (1ULL << QIB_I_RCVURG_LSB);
2857 for (i = 0; i < dd->first_user_ctxt; i++) {
2858 if (ctxtrbits & rmask) {
2859 ctxtrbits &= ~rmask;
2861 qib_kreceive(dd->rcd[i], NULL, &npkts);
2866 ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
2867 (ctxtrbits >> QIB_I_RCVURG_LSB);
2868 qib_handle_urcv(dd, ctxtrbits);
2872 if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
2873 sdma_7322_intr(dd, istat);
2875 if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
2876 qib_ib_piobufavail(dd);
2884 * Dedicated receive packet available interrupt handler.
2886 static irqreturn_t qib_7322pintr(int irq, void *data)
2888 struct qib_ctxtdata *rcd = data;
2889 struct qib_devdata *dd = rcd->dd;
2892 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2894 * This return value is not great, but we do not want the
2895 * interrupt core code to remove our interrupt handler
2896 * because we don't appear to be handling an interrupt
2897 * during a chip reset.
2901 qib_stats.sps_ints++;
2902 if (dd->int_counter != (u32) -1)
2905 /* Clear the interrupt bit we expect to be set. */
2906 qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
2907 (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
2909 qib_kreceive(rcd, NULL, &npkts);
2915 * Dedicated Send buffer available interrupt handler.
2917 static irqreturn_t qib_7322bufavail(int irq, void *data)
2919 struct qib_devdata *dd = data;
2921 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2923 * This return value is not great, but we do not want the
2924 * interrupt core code to remove our interrupt handler
2925 * because we don't appear to be handling an interrupt
2926 * during a chip reset.
2930 qib_stats.sps_ints++;
2931 if (dd->int_counter != (u32) -1)
2934 /* Clear the interrupt bit we expect to be set. */
2935 qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
2937 /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
2938 if (dd->flags & QIB_INITTED)
2939 qib_ib_piobufavail(dd);
2941 qib_wantpiobuf_7322_intr(dd, 0);
2947 * Dedicated Send DMA interrupt handler.
2949 static irqreturn_t sdma_intr(int irq, void *data)
2951 struct qib_pportdata *ppd = data;
2952 struct qib_devdata *dd = ppd->dd;
2954 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2956 * This return value is not great, but we do not want the
2957 * interrupt core code to remove our interrupt handler
2958 * because we don't appear to be handling an interrupt
2959 * during a chip reset.
2963 qib_stats.sps_ints++;
2964 if (dd->int_counter != (u32) -1)
2967 /* Clear the interrupt bit we expect to be set. */
2968 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
2969 INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
2976 * Dedicated Send DMA idle interrupt handler.
2978 static irqreturn_t sdma_idle_intr(int irq, void *data)
2980 struct qib_pportdata *ppd = data;
2981 struct qib_devdata *dd = ppd->dd;
2983 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2985 * This return value is not great, but we do not want the
2986 * interrupt core code to remove our interrupt handler
2987 * because we don't appear to be handling an interrupt
2988 * during a chip reset.
2992 qib_stats.sps_ints++;
2993 if (dd->int_counter != (u32) -1)
2996 /* Clear the interrupt bit we expect to be set. */
2997 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
2998 INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
3005 * Dedicated Send DMA progress interrupt handler.
3007 static irqreturn_t sdma_progress_intr(int irq, void *data)
3009 struct qib_pportdata *ppd = data;
3010 struct qib_devdata *dd = ppd->dd;
3012 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3014 * This return value is not great, but we do not want the
3015 * interrupt core code to remove our interrupt handler
3016 * because we don't appear to be handling an interrupt
3017 * during a chip reset.
3021 qib_stats.sps_ints++;
3022 if (dd->int_counter != (u32) -1)
3025 /* Clear the interrupt bit we expect to be set. */
3026 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3027 INT_MASK_P(SDmaProgress, 1) :
3028 INT_MASK_P(SDmaProgress, 0));
3035 * Dedicated Send DMA cleanup interrupt handler.
3037 static irqreturn_t sdma_cleanup_intr(int irq, void *data)
3039 struct qib_pportdata *ppd = data;
3040 struct qib_devdata *dd = ppd->dd;
3042 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3044 * This return value is not great, but we do not want the
3045 * interrupt core code to remove our interrupt handler
3046 * because we don't appear to be handling an interrupt
3047 * during a chip reset.
3051 qib_stats.sps_ints++;
3052 if (dd->int_counter != (u32) -1)
3055 /* Clear the interrupt bit we expect to be set. */
3056 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3057 INT_MASK_PM(SDmaCleanupDone, 1) :
3058 INT_MASK_PM(SDmaCleanupDone, 0));
3059 qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
3065 * Set up our chip-specific interrupt handler.
3066 * The interrupt type has already been setup, so
3067 * we just need to do the registration and error checking.
3068 * If we are using MSIx interrupts, we may fall back to
3069 * INTx later, if the interrupt handler doesn't get called
3070 * within 1/2 second (see verify_interrupt()).
3072 static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
3074 int ret, i, msixnum;
3077 const struct cpumask *local_mask;
3078 int firstcpu, secondcpu = 0, currrcvcpu = 0;
3080 if (!dd->num_pports)
3085 * if not switching interrupt types, be sure interrupts are
3086 * disabled, and then clear anything pending at this point,
3087 * because we are starting clean.
3089 qib_7322_set_intr_state(dd, 0);
3091 /* clear the reset error, init error/hwerror mask */
3092 qib_7322_init_hwerrors(dd);
3094 /* clear any interrupt bits that might be set */
3095 qib_write_kreg(dd, kr_intclear, ~0ULL);
3097 /* make sure no pending MSIx intr, and clear diag reg */
3098 qib_write_kreg(dd, kr_intgranted, ~0ULL);
3099 qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
3102 if (!dd->cspec->num_msix_entries) {
3103 /* Try to get INTx interrupt */
3105 if (!dd->pcidev->irq) {
3106 qib_dev_err(dd, "irq is 0, BIOS error? "
3107 "Interrupts won't work\n");
3110 ret = request_irq(dd->pcidev->irq, qib_7322intr,
3111 IRQF_SHARED, QIB_DRV_NAME, dd);
3113 qib_dev_err(dd, "Couldn't setup INTx "
3114 "interrupt (irq=%d): %d\n",
3115 dd->pcidev->irq, ret);
3118 dd->cspec->irq = dd->pcidev->irq;
3119 dd->cspec->main_int_mask = ~0ULL;
3123 /* Try to get MSIx interrupts */
3124 memset(redirect, 0, sizeof redirect);
3127 local_mask = cpumask_of_pcibus(dd->pcidev->bus);
3128 firstcpu = cpumask_first(local_mask);
3129 if (firstcpu >= nr_cpu_ids ||
3130 cpumask_weight(local_mask) == num_online_cpus()) {
3131 local_mask = topology_core_cpumask(0);
3132 firstcpu = cpumask_first(local_mask);
3134 if (firstcpu < nr_cpu_ids) {
3135 secondcpu = cpumask_next(firstcpu, local_mask);
3136 if (secondcpu >= nr_cpu_ids)
3137 secondcpu = firstcpu;
3138 currrcvcpu = secondcpu;
3140 for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
3141 irq_handler_t handler;
3146 dd->cspec->msix_entries[msixnum].
3147 name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1]
3149 if (i < ARRAY_SIZE(irq_table)) {
3150 if (irq_table[i].port) {
3151 /* skip if for a non-configured port */
3152 if (irq_table[i].port > dd->num_pports)
3154 arg = dd->pport + irq_table[i].port - 1;
3157 lsb = irq_table[i].lsb;
3158 handler = irq_table[i].handler;
3159 snprintf(dd->cspec->msix_entries[msixnum].name,
3160 sizeof(dd->cspec->msix_entries[msixnum].name)
3162 QIB_DRV_NAME "%d%s", dd->unit,
3167 ctxt = i - ARRAY_SIZE(irq_table);
3168 /* per krcvq context receive interrupt */
3169 arg = dd->rcd[ctxt];
3172 if (qib_krcvq01_no_msi && ctxt < 2)
3174 lsb = QIB_I_RCVAVAIL_LSB + ctxt;
3175 handler = qib_7322pintr;
3176 snprintf(dd->cspec->msix_entries[msixnum].name,
3177 sizeof(dd->cspec->msix_entries[msixnum].name)
3179 QIB_DRV_NAME "%d (kctx)", dd->unit);
3182 dd->cspec->msix_entries[msixnum].msix.vector,
3183 handler, 0, dd->cspec->msix_entries[msixnum].name,
3187 * Shouldn't happen since the enable said we could
3188 * have as many as we are trying to setup here.
3190 qib_dev_err(dd, "Couldn't setup MSIx "
3191 "interrupt (vec=%d, irq=%d): %d\n", msixnum,
3192 dd->cspec->msix_entries[msixnum].msix.vector,
3194 qib_7322_nomsix(dd);
3197 dd->cspec->msix_entries[msixnum].arg = arg;
3199 reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
3200 sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
3201 SYM_LSB(IntRedirect0, vec1);
3202 mask &= ~(1ULL << lsb);
3203 redirect[reg] |= ((u64) msixnum) << sh;
3205 val = qib_read_kreg64(dd, 2 * msixnum + 1 +
3206 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3207 if (firstcpu < nr_cpu_ids &&
3209 &dd->cspec->msix_entries[msixnum].mask,
3211 if (handler == qib_7322pintr) {
3212 cpumask_set_cpu(currrcvcpu,
3213 dd->cspec->msix_entries[msixnum].mask);
3214 currrcvcpu = cpumask_next(currrcvcpu,
3216 if (currrcvcpu >= nr_cpu_ids)
3217 currrcvcpu = secondcpu;
3219 cpumask_set_cpu(firstcpu,
3220 dd->cspec->msix_entries[msixnum].mask);
3222 irq_set_affinity_hint(
3223 dd->cspec->msix_entries[msixnum].msix.vector,
3224 dd->cspec->msix_entries[msixnum].mask);
3228 /* Initialize the vector mapping */
3229 for (i = 0; i < ARRAY_SIZE(redirect); i++)
3230 qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
3231 dd->cspec->main_int_mask = mask;
3232 tasklet_init(&dd->error_tasklet, qib_error_tasklet,
3238 * qib_7322_boardname - fill in the board name and note features
3239 * @dd: the qlogic_ib device
3241 * info will be based on the board revision register
3243 static unsigned qib_7322_boardname(struct qib_devdata *dd)
3245 /* Will need enumeration of board-types here */
3247 u32 boardid, namelen;
3248 unsigned features = DUAL_PORT_CAP;
3250 boardid = SYM_FIELD(dd->revision, Revision, BoardID);
3254 n = "InfiniPath_QLE7342_Emulation";
3257 n = "InfiniPath_QLE7340";
3258 dd->flags |= QIB_HAS_QSFP;
3259 features = PORT_SPD_CAP;
3262 n = "InfiniPath_QLE7342";
3263 dd->flags |= QIB_HAS_QSFP;
3266 n = "InfiniPath_QMI7342";
3269 n = "InfiniPath_Unsupported7342";
3270 qib_dev_err(dd, "Unsupported version of QMH7342\n");
3274 n = "InfiniPath_QMH7342";
3278 n = "InfiniPath_QME7342";
3281 n = "InfiniPath_QME7362";
3282 dd->flags |= QIB_HAS_QSFP;
3285 n = "InfiniPath_QLE7342_TEST";
3286 dd->flags |= QIB_HAS_QSFP;
3289 n = "InfiniPath_QLE73xy_UNKNOWN";
3290 qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
3293 dd->board_atten = 1; /* index into txdds_Xdr */
3295 namelen = strlen(n) + 1;
3296 dd->boardname = kmalloc(namelen, GFP_KERNEL);
3298 qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
3300 snprintf(dd->boardname, namelen, "%s", n);
3302 snprintf(dd->boardversion, sizeof(dd->boardversion),
3303 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
3304 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
3305 (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
3306 dd->majrev, dd->minrev,
3307 (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
3309 if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
3310 qib_devinfo(dd->pcidev, "IB%u: Forced to single port mode"
3311 " by module parameter\n", dd->unit);
3312 features &= PORT_SPD_CAP;
3319 * This routine sleeps, so it can only be called from user context, not
3320 * from interrupt context.
3322 static int qib_do_7322_reset(struct qib_devdata *dd)
3326 int i, msix_entries, ret = 1;
3328 u8 int_line, clinesz;
3329 unsigned long flags;
3331 /* Use dev_err so it shows up in logs, etc. */
3332 qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
3334 qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
3336 msix_entries = dd->cspec->num_msix_entries;
3338 /* no interrupts till re-initted */
3339 qib_7322_set_intr_state(dd, 0);
3342 qib_7322_nomsix(dd);
3343 /* can be up to 512 bytes, too big for stack */
3344 msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
3345 sizeof(u64), GFP_KERNEL);
3347 qib_dev_err(dd, "No mem to save MSIx data\n");
3349 msix_vecsave = NULL;
3352 * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
3353 * info that is set up by the BIOS, so we have to save and restore
3354 * it ourselves. There is some risk something could change it,
3355 * after we save it, but since we have disabled the MSIx, it
3356 * shouldn't be touched...
3358 for (i = 0; i < msix_entries; i++) {
3359 u64 vecaddr, vecdata;
3360 vecaddr = qib_read_kreg64(dd, 2 * i +
3361 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3362 vecdata = qib_read_kreg64(dd, 1 + 2 * i +
3363 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3365 msix_vecsave[2 * i] = vecaddr;
3366 /* save it without the masked bit set */
3367 msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
3371 dd->pport->cpspec->ibdeltainprog = 0;
3372 dd->pport->cpspec->ibsymdelta = 0;
3373 dd->pport->cpspec->iblnkerrdelta = 0;
3374 dd->pport->cpspec->ibmalfdelta = 0;
3375 dd->int_counter = 0; /* so we check interrupts work again */
3378 * Keep chip from being accessed until we are ready. Use
3379 * writeq() directly, to allow the write even though QIB_PRESENT
3382 dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
3383 dd->flags |= QIB_DOING_RESET;
3384 val = dd->control | QLOGIC_IB_C_RESET;
3385 writeq(val, &dd->kregbase[kr_control]);
3387 for (i = 1; i <= 5; i++) {
3389 * Allow MBIST, etc. to complete; longer on each retry.
3390 * We sometimes get machine checks from bus timeout if no
3391 * response, so for now, make it *really* long.
3393 msleep(1000 + (1 + i) * 3000);
3395 qib_pcie_reenable(dd, cmdval, int_line, clinesz);
3398 * Use readq directly, so we don't need to mark it as PRESENT
3399 * until we get a successful indication that all is well.
3401 val = readq(&dd->kregbase[kr_revision]);
3402 if (val == dd->revision)
3405 qib_dev_err(dd, "Failed to initialize after reset, "
3412 dd->flags |= QIB_PRESENT; /* it's back */
3415 /* restore the MSIx vector address and data if saved above */
3416 for (i = 0; i < msix_entries; i++) {
3417 dd->cspec->msix_entries[i].msix.entry = i;
3418 if (!msix_vecsave || !msix_vecsave[2 * i])
3420 qib_write_kreg(dd, 2 * i +
3421 (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3422 msix_vecsave[2 * i]);
3423 qib_write_kreg(dd, 1 + 2 * i +
3424 (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3425 msix_vecsave[1 + 2 * i]);
3429 /* initialize the remaining registers. */
3430 for (i = 0; i < dd->num_pports; ++i)
3431 write_7322_init_portregs(&dd->pport[i]);
3432 write_7322_initregs(dd);
3434 if (qib_pcie_params(dd, dd->lbus_width,
3435 &dd->cspec->num_msix_entries,