]> git.openfabrics.org - ~shefty/rdma-dev.git/blob - drivers/infiniband/hw/qib/qib_qp.c
IB/qib: Fix QP RCU sparse warnings
[~shefty/rdma-dev.git] / drivers / infiniband / hw / qib / qib_qp.c
1 /*
2  * Copyright (c) 2012 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2006 - 2012 QLogic Corporation.  * All rights reserved.
4  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #include <linux/err.h>
36 #include <linux/vmalloc.h>
37 #include <linux/jhash.h>
38
39 #include "qib.h"
40
41 #define BITS_PER_PAGE           (PAGE_SIZE*BITS_PER_BYTE)
42 #define BITS_PER_PAGE_MASK      (BITS_PER_PAGE-1)
43
44 static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
45                               struct qpn_map *map, unsigned off)
46 {
47         return (map - qpt->map) * BITS_PER_PAGE + off;
48 }
49
50 static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
51                                         struct qpn_map *map, unsigned off,
52                                         unsigned n)
53 {
54         if (qpt->mask) {
55                 off++;
56                 if (((off & qpt->mask) >> 1) >= n)
57                         off = (off | qpt->mask) + 2;
58         } else
59                 off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
60         return off;
61 }
62
63 /*
64  * Convert the AETH credit code into the number of credits.
65  */
66 static u32 credit_table[31] = {
67         0,                      /* 0 */
68         1,                      /* 1 */
69         2,                      /* 2 */
70         3,                      /* 3 */
71         4,                      /* 4 */
72         6,                      /* 5 */
73         8,                      /* 6 */
74         12,                     /* 7 */
75         16,                     /* 8 */
76         24,                     /* 9 */
77         32,                     /* A */
78         48,                     /* B */
79         64,                     /* C */
80         96,                     /* D */
81         128,                    /* E */
82         192,                    /* F */
83         256,                    /* 10 */
84         384,                    /* 11 */
85         512,                    /* 12 */
86         768,                    /* 13 */
87         1024,                   /* 14 */
88         1536,                   /* 15 */
89         2048,                   /* 16 */
90         3072,                   /* 17 */
91         4096,                   /* 18 */
92         6144,                   /* 19 */
93         8192,                   /* 1A */
94         12288,                  /* 1B */
95         16384,                  /* 1C */
96         24576,                  /* 1D */
97         32768                   /* 1E */
98 };
99
100 static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
101 {
102         unsigned long page = get_zeroed_page(GFP_KERNEL);
103
104         /*
105          * Free the page if someone raced with us installing it.
106          */
107
108         spin_lock(&qpt->lock);
109         if (map->page)
110                 free_page(page);
111         else
112                 map->page = (void *)page;
113         spin_unlock(&qpt->lock);
114 }
115
116 /*
117  * Allocate the next available QPN or
118  * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
119  */
120 static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
121                      enum ib_qp_type type, u8 port)
122 {
123         u32 i, offset, max_scan, qpn;
124         struct qpn_map *map;
125         u32 ret;
126
127         if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
128                 unsigned n;
129
130                 ret = type == IB_QPT_GSI;
131                 n = 1 << (ret + 2 * (port - 1));
132                 spin_lock(&qpt->lock);
133                 if (qpt->flags & n)
134                         ret = -EINVAL;
135                 else
136                         qpt->flags |= n;
137                 spin_unlock(&qpt->lock);
138                 goto bail;
139         }
140
141         qpn = qpt->last + 2;
142         if (qpn >= QPN_MAX)
143                 qpn = 2;
144         if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
145                 qpn = (qpn | qpt->mask) + 2;
146         offset = qpn & BITS_PER_PAGE_MASK;
147         map = &qpt->map[qpn / BITS_PER_PAGE];
148         max_scan = qpt->nmaps - !offset;
149         for (i = 0;;) {
150                 if (unlikely(!map->page)) {
151                         get_map_page(qpt, map);
152                         if (unlikely(!map->page))
153                                 break;
154                 }
155                 do {
156                         if (!test_and_set_bit(offset, map->page)) {
157                                 qpt->last = qpn;
158                                 ret = qpn;
159                                 goto bail;
160                         }
161                         offset = find_next_offset(qpt, map, offset,
162                                 dd->n_krcv_queues);
163                         qpn = mk_qpn(qpt, map, offset);
164                         /*
165                          * This test differs from alloc_pidmap().
166                          * If find_next_offset() does find a zero
167                          * bit, we don't need to check for QPN
168                          * wrapping around past our starting QPN.
169                          * We just need to be sure we don't loop
170                          * forever.
171                          */
172                 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
173                 /*
174                  * In order to keep the number of pages allocated to a
175                  * minimum, we scan the all existing pages before increasing
176                  * the size of the bitmap table.
177                  */
178                 if (++i > max_scan) {
179                         if (qpt->nmaps == QPNMAP_ENTRIES)
180                                 break;
181                         map = &qpt->map[qpt->nmaps++];
182                         offset = 0;
183                 } else if (map < &qpt->map[qpt->nmaps]) {
184                         ++map;
185                         offset = 0;
186                 } else {
187                         map = &qpt->map[0];
188                         offset = 2;
189                 }
190                 qpn = mk_qpn(qpt, map, offset);
191         }
192
193         ret = -ENOMEM;
194
195 bail:
196         return ret;
197 }
198
199 static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
200 {
201         struct qpn_map *map;
202
203         map = qpt->map + qpn / BITS_PER_PAGE;
204         if (map->page)
205                 clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
206 }
207
208 static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
209 {
210         return jhash_1word(qpn, dev->qp_rnd) &
211                 (dev->qp_table_size - 1);
212 }
213
214
215 /*
216  * Put the QP into the hash table.
217  * The hash table holds a reference to the QP.
218  */
219 static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
220 {
221         struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
222         unsigned long flags;
223         unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
224
225         spin_lock_irqsave(&dev->qpt_lock, flags);
226         atomic_inc(&qp->refcount);
227
228         if (qp->ibqp.qp_num == 0)
229                 rcu_assign_pointer(ibp->qp0, qp);
230         else if (qp->ibqp.qp_num == 1)
231                 rcu_assign_pointer(ibp->qp1, qp);
232         else {
233                 qp->next = dev->qp_table[n];
234                 rcu_assign_pointer(dev->qp_table[n], qp);
235         }
236
237         spin_unlock_irqrestore(&dev->qpt_lock, flags);
238         synchronize_rcu();
239 }
240
241 /*
242  * Remove the QP from the table so it can't be found asynchronously by
243  * the receive interrupt routine.
244  */
245 static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
246 {
247         struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
248         unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
249         unsigned long flags;
250
251         spin_lock_irqsave(&dev->qpt_lock, flags);
252
253         if (rcu_dereference_protected(ibp->qp0,
254                         lockdep_is_held(&dev->qpt_lock)) == qp) {
255                 atomic_dec(&qp->refcount);
256                 rcu_assign_pointer(ibp->qp0, NULL);
257         } else if (rcu_dereference_protected(ibp->qp1,
258                         lockdep_is_held(&dev->qpt_lock)) == qp) {
259                 atomic_dec(&qp->refcount);
260                 rcu_assign_pointer(ibp->qp1, NULL);
261         } else {
262                 struct qib_qp *q;
263                 struct qib_qp __rcu **qpp;
264
265                 qpp = &dev->qp_table[n];
266                 q = rcu_dereference_protected(*qpp,
267                         lockdep_is_held(&dev->qpt_lock));
268                 for (; q; qpp = &q->next) {
269                         if (q == qp) {
270                                 atomic_dec(&qp->refcount);
271                                 *qpp = qp->next;
272                                 rcu_assign_pointer(qp->next, NULL);
273                                 q = rcu_dereference_protected(*qpp,
274                                         lockdep_is_held(&dev->qpt_lock));
275                                 break;
276                         }
277                         q = rcu_dereference_protected(*qpp,
278                                 lockdep_is_held(&dev->qpt_lock));
279                 }
280         }
281
282         spin_unlock_irqrestore(&dev->qpt_lock, flags);
283         synchronize_rcu();
284 }
285
286 /**
287  * qib_free_all_qps - check for QPs still in use
288  * @qpt: the QP table to empty
289  *
290  * There should not be any QPs still in use.
291  * Free memory for table.
292  */
293 unsigned qib_free_all_qps(struct qib_devdata *dd)
294 {
295         struct qib_ibdev *dev = &dd->verbs_dev;
296         unsigned long flags;
297         struct qib_qp *qp;
298         unsigned n, qp_inuse = 0;
299
300         for (n = 0; n < dd->num_pports; n++) {
301                 struct qib_ibport *ibp = &dd->pport[n].ibport_data;
302
303                 if (!qib_mcast_tree_empty(ibp))
304                         qp_inuse++;
305                 rcu_read_lock();
306                 if (rcu_dereference(ibp->qp0))
307                         qp_inuse++;
308                 if (rcu_dereference(ibp->qp1))
309                         qp_inuse++;
310                 rcu_read_unlock();
311         }
312
313         spin_lock_irqsave(&dev->qpt_lock, flags);
314         for (n = 0; n < dev->qp_table_size; n++) {
315                 qp = rcu_dereference_protected(dev->qp_table[n],
316                         lockdep_is_held(&dev->qpt_lock));
317                 rcu_assign_pointer(dev->qp_table[n], NULL);
318
319                 for (; qp; qp = rcu_dereference_protected(qp->next,
320                                         lockdep_is_held(&dev->qpt_lock)))
321                         qp_inuse++;
322         }
323         spin_unlock_irqrestore(&dev->qpt_lock, flags);
324         synchronize_rcu();
325
326         return qp_inuse;
327 }
328
329 /**
330  * qib_lookup_qpn - return the QP with the given QPN
331  * @qpt: the QP table
332  * @qpn: the QP number to look up
333  *
334  * The caller is responsible for decrementing the QP reference count
335  * when done.
336  */
337 struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
338 {
339         struct qib_qp *qp = NULL;
340
341         if (unlikely(qpn <= 1)) {
342                 rcu_read_lock();
343                 if (qpn == 0)
344                         qp = rcu_dereference(ibp->qp0);
345                 else
346                         qp = rcu_dereference(ibp->qp1);
347         } else {
348                 struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
349                 unsigned n = qpn_hash(dev, qpn);
350
351                 rcu_read_lock();
352                 for (qp = rcu_dereference(dev->qp_table[n]); qp;
353                         qp = rcu_dereference(qp->next))
354                         if (qp->ibqp.qp_num == qpn)
355                                 break;
356         }
357         if (qp)
358                 if (unlikely(!atomic_inc_not_zero(&qp->refcount)))
359                         qp = NULL;
360
361         rcu_read_unlock();
362         return qp;
363 }
364
365 /**
366  * qib_reset_qp - initialize the QP state to the reset state
367  * @qp: the QP to reset
368  * @type: the QP type
369  */
370 static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type)
371 {
372         qp->remote_qpn = 0;
373         qp->qkey = 0;
374         qp->qp_access_flags = 0;
375         atomic_set(&qp->s_dma_busy, 0);
376         qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
377         qp->s_hdrwords = 0;
378         qp->s_wqe = NULL;
379         qp->s_draining = 0;
380         qp->s_next_psn = 0;
381         qp->s_last_psn = 0;
382         qp->s_sending_psn = 0;
383         qp->s_sending_hpsn = 0;
384         qp->s_psn = 0;
385         qp->r_psn = 0;
386         qp->r_msn = 0;
387         if (type == IB_QPT_RC) {
388                 qp->s_state = IB_OPCODE_RC_SEND_LAST;
389                 qp->r_state = IB_OPCODE_RC_SEND_LAST;
390         } else {
391                 qp->s_state = IB_OPCODE_UC_SEND_LAST;
392                 qp->r_state = IB_OPCODE_UC_SEND_LAST;
393         }
394         qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
395         qp->r_nak_state = 0;
396         qp->r_aflags = 0;
397         qp->r_flags = 0;
398         qp->s_head = 0;
399         qp->s_tail = 0;
400         qp->s_cur = 0;
401         qp->s_acked = 0;
402         qp->s_last = 0;
403         qp->s_ssn = 1;
404         qp->s_lsn = 0;
405         qp->s_mig_state = IB_MIG_MIGRATED;
406         memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
407         qp->r_head_ack_queue = 0;
408         qp->s_tail_ack_queue = 0;
409         qp->s_num_rd_atomic = 0;
410         if (qp->r_rq.wq) {
411                 qp->r_rq.wq->head = 0;
412                 qp->r_rq.wq->tail = 0;
413         }
414         qp->r_sge.num_sge = 0;
415 }
416
417 static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
418 {
419         unsigned n;
420
421         if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
422                 qib_put_ss(&qp->s_rdma_read_sge);
423
424         qib_put_ss(&qp->r_sge);
425
426         if (clr_sends) {
427                 while (qp->s_last != qp->s_head) {
428                         struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
429                         unsigned i;
430
431                         for (i = 0; i < wqe->wr.num_sge; i++) {
432                                 struct qib_sge *sge = &wqe->sg_list[i];
433
434                                 qib_put_mr(sge->mr);
435                         }
436                         if (qp->ibqp.qp_type == IB_QPT_UD ||
437                             qp->ibqp.qp_type == IB_QPT_SMI ||
438                             qp->ibqp.qp_type == IB_QPT_GSI)
439                                 atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
440                         if (++qp->s_last >= qp->s_size)
441                                 qp->s_last = 0;
442                 }
443                 if (qp->s_rdma_mr) {
444                         qib_put_mr(qp->s_rdma_mr);
445                         qp->s_rdma_mr = NULL;
446                 }
447         }
448
449         if (qp->ibqp.qp_type != IB_QPT_RC)
450                 return;
451
452         for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
453                 struct qib_ack_entry *e = &qp->s_ack_queue[n];
454
455                 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
456                     e->rdma_sge.mr) {
457                         qib_put_mr(e->rdma_sge.mr);
458                         e->rdma_sge.mr = NULL;
459                 }
460         }
461 }
462
463 /**
464  * qib_error_qp - put a QP into the error state
465  * @qp: the QP to put into the error state
466  * @err: the receive completion error to signal if a RWQE is active
467  *
468  * Flushes both send and receive work queues.
469  * Returns true if last WQE event should be generated.
470  * The QP r_lock and s_lock should be held and interrupts disabled.
471  * If we are already in error state, just return.
472  */
473 int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
474 {
475         struct qib_ibdev *dev = to_idev(qp->ibqp.device);
476         struct ib_wc wc;
477         int ret = 0;
478
479         if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
480                 goto bail;
481
482         qp->state = IB_QPS_ERR;
483
484         if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
485                 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
486                 del_timer(&qp->s_timer);
487         }
488
489         if (qp->s_flags & QIB_S_ANY_WAIT_SEND)
490                 qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
491
492         spin_lock(&dev->pending_lock);
493         if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
494                 qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
495                 list_del_init(&qp->iowait);
496         }
497         spin_unlock(&dev->pending_lock);
498
499         if (!(qp->s_flags & QIB_S_BUSY)) {
500                 qp->s_hdrwords = 0;
501                 if (qp->s_rdma_mr) {
502                         qib_put_mr(qp->s_rdma_mr);
503                         qp->s_rdma_mr = NULL;
504                 }
505                 if (qp->s_tx) {
506                         qib_put_txreq(qp->s_tx);
507                         qp->s_tx = NULL;
508                 }
509         }
510
511         /* Schedule the sending tasklet to drain the send work queue. */
512         if (qp->s_last != qp->s_head)
513                 qib_schedule_send(qp);
514
515         clear_mr_refs(qp, 0);
516
517         memset(&wc, 0, sizeof(wc));
518         wc.qp = &qp->ibqp;
519         wc.opcode = IB_WC_RECV;
520
521         if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) {
522                 wc.wr_id = qp->r_wr_id;
523                 wc.status = err;
524                 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
525         }
526         wc.status = IB_WC_WR_FLUSH_ERR;
527
528         if (qp->r_rq.wq) {
529                 struct qib_rwq *wq;
530                 u32 head;
531                 u32 tail;
532
533                 spin_lock(&qp->r_rq.lock);
534
535                 /* sanity check pointers before trusting them */
536                 wq = qp->r_rq.wq;
537                 head = wq->head;
538                 if (head >= qp->r_rq.size)
539                         head = 0;
540                 tail = wq->tail;
541                 if (tail >= qp->r_rq.size)
542                         tail = 0;
543                 while (tail != head) {
544                         wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
545                         if (++tail >= qp->r_rq.size)
546                                 tail = 0;
547                         qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
548                 }
549                 wq->tail = tail;
550
551                 spin_unlock(&qp->r_rq.lock);
552         } else if (qp->ibqp.event_handler)
553                 ret = 1;
554
555 bail:
556         return ret;
557 }
558
559 /**
560  * qib_modify_qp - modify the attributes of a queue pair
561  * @ibqp: the queue pair who's attributes we're modifying
562  * @attr: the new attributes
563  * @attr_mask: the mask of attributes to modify
564  * @udata: user data for libibverbs.so
565  *
566  * Returns 0 on success, otherwise returns an errno.
567  */
568 int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
569                   int attr_mask, struct ib_udata *udata)
570 {
571         struct qib_ibdev *dev = to_idev(ibqp->device);
572         struct qib_qp *qp = to_iqp(ibqp);
573         enum ib_qp_state cur_state, new_state;
574         struct ib_event ev;
575         int lastwqe = 0;
576         int mig = 0;
577         int ret;
578         u32 pmtu = 0; /* for gcc warning only */
579
580         spin_lock_irq(&qp->r_lock);
581         spin_lock(&qp->s_lock);
582
583         cur_state = attr_mask & IB_QP_CUR_STATE ?
584                 attr->cur_qp_state : qp->state;
585         new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
586
587         if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
588                                 attr_mask))
589                 goto inval;
590
591         if (attr_mask & IB_QP_AV) {
592                 if (attr->ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
593                         goto inval;
594                 if (qib_check_ah(qp->ibqp.device, &attr->ah_attr))
595                         goto inval;
596         }
597
598         if (attr_mask & IB_QP_ALT_PATH) {
599                 if (attr->alt_ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
600                         goto inval;
601                 if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
602                         goto inval;
603                 if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
604                         goto inval;
605         }
606
607         if (attr_mask & IB_QP_PKEY_INDEX)
608                 if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
609                         goto inval;
610
611         if (attr_mask & IB_QP_MIN_RNR_TIMER)
612                 if (attr->min_rnr_timer > 31)
613                         goto inval;
614
615         if (attr_mask & IB_QP_PORT)
616                 if (qp->ibqp.qp_type == IB_QPT_SMI ||
617                     qp->ibqp.qp_type == IB_QPT_GSI ||
618                     attr->port_num == 0 ||
619                     attr->port_num > ibqp->device->phys_port_cnt)
620                         goto inval;
621
622         if (attr_mask & IB_QP_DEST_QPN)
623                 if (attr->dest_qp_num > QIB_QPN_MASK)
624                         goto inval;
625
626         if (attr_mask & IB_QP_RETRY_CNT)
627                 if (attr->retry_cnt > 7)
628                         goto inval;
629
630         if (attr_mask & IB_QP_RNR_RETRY)
631                 if (attr->rnr_retry > 7)
632                         goto inval;
633
634         /*
635          * Don't allow invalid path_mtu values.  OK to set greater
636          * than the active mtu (or even the max_cap, if we have tuned
637          * that to a small mtu.  We'll set qp->path_mtu
638          * to the lesser of requested attribute mtu and active,
639          * for packetizing messages.
640          * Note that the QP port has to be set in INIT and MTU in RTR.
641          */
642         if (attr_mask & IB_QP_PATH_MTU) {
643                 struct qib_devdata *dd = dd_from_dev(dev);
644                 int mtu, pidx = qp->port_num - 1;
645
646                 mtu = ib_mtu_enum_to_int(attr->path_mtu);
647                 if (mtu == -1)
648                         goto inval;
649                 if (mtu > dd->pport[pidx].ibmtu) {
650                         switch (dd->pport[pidx].ibmtu) {
651                         case 4096:
652                                 pmtu = IB_MTU_4096;
653                                 break;
654                         case 2048:
655                                 pmtu = IB_MTU_2048;
656                                 break;
657                         case 1024:
658                                 pmtu = IB_MTU_1024;
659                                 break;
660                         case 512:
661                                 pmtu = IB_MTU_512;
662                                 break;
663                         case 256:
664                                 pmtu = IB_MTU_256;
665                                 break;
666                         default:
667                                 pmtu = IB_MTU_2048;
668                         }
669                 } else
670                         pmtu = attr->path_mtu;
671         }
672
673         if (attr_mask & IB_QP_PATH_MIG_STATE) {
674                 if (attr->path_mig_state == IB_MIG_REARM) {
675                         if (qp->s_mig_state == IB_MIG_ARMED)
676                                 goto inval;
677                         if (new_state != IB_QPS_RTS)
678                                 goto inval;
679                 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
680                         if (qp->s_mig_state == IB_MIG_REARM)
681                                 goto inval;
682                         if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
683                                 goto inval;
684                         if (qp->s_mig_state == IB_MIG_ARMED)
685                                 mig = 1;
686                 } else
687                         goto inval;
688         }
689
690         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
691                 if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC)
692                         goto inval;
693
694         switch (new_state) {
695         case IB_QPS_RESET:
696                 if (qp->state != IB_QPS_RESET) {
697                         qp->state = IB_QPS_RESET;
698                         spin_lock(&dev->pending_lock);
699                         if (!list_empty(&qp->iowait))
700                                 list_del_init(&qp->iowait);
701                         spin_unlock(&dev->pending_lock);
702                         qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
703                         spin_unlock(&qp->s_lock);
704                         spin_unlock_irq(&qp->r_lock);
705                         /* Stop the sending work queue and retry timer */
706                         cancel_work_sync(&qp->s_work);
707                         del_timer_sync(&qp->s_timer);
708                         wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
709                         if (qp->s_tx) {
710                                 qib_put_txreq(qp->s_tx);
711                                 qp->s_tx = NULL;
712                         }
713                         remove_qp(dev, qp);
714                         wait_event(qp->wait, !atomic_read(&qp->refcount));
715                         spin_lock_irq(&qp->r_lock);
716                         spin_lock(&qp->s_lock);
717                         clear_mr_refs(qp, 1);
718                         qib_reset_qp(qp, ibqp->qp_type);
719                 }
720                 break;
721
722         case IB_QPS_RTR:
723                 /* Allow event to retrigger if QP set to RTR more than once */
724                 qp->r_flags &= ~QIB_R_COMM_EST;
725                 qp->state = new_state;
726                 break;
727
728         case IB_QPS_SQD:
729                 qp->s_draining = qp->s_last != qp->s_cur;
730                 qp->state = new_state;
731                 break;
732
733         case IB_QPS_SQE:
734                 if (qp->ibqp.qp_type == IB_QPT_RC)
735                         goto inval;
736                 qp->state = new_state;
737                 break;
738
739         case IB_QPS_ERR:
740                 lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
741                 break;
742
743         default:
744                 qp->state = new_state;
745                 break;
746         }
747
748         if (attr_mask & IB_QP_PKEY_INDEX)
749                 qp->s_pkey_index = attr->pkey_index;
750
751         if (attr_mask & IB_QP_PORT)
752                 qp->port_num = attr->port_num;
753
754         if (attr_mask & IB_QP_DEST_QPN)
755                 qp->remote_qpn = attr->dest_qp_num;
756
757         if (attr_mask & IB_QP_SQ_PSN) {
758                 qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK;
759                 qp->s_psn = qp->s_next_psn;
760                 qp->s_sending_psn = qp->s_next_psn;
761                 qp->s_last_psn = qp->s_next_psn - 1;
762                 qp->s_sending_hpsn = qp->s_last_psn;
763         }
764
765         if (attr_mask & IB_QP_RQ_PSN)
766                 qp->r_psn = attr->rq_psn & QIB_PSN_MASK;
767
768         if (attr_mask & IB_QP_ACCESS_FLAGS)
769                 qp->qp_access_flags = attr->qp_access_flags;
770
771         if (attr_mask & IB_QP_AV) {
772                 qp->remote_ah_attr = attr->ah_attr;
773                 qp->s_srate = attr->ah_attr.static_rate;
774         }
775
776         if (attr_mask & IB_QP_ALT_PATH) {
777                 qp->alt_ah_attr = attr->alt_ah_attr;
778                 qp->s_alt_pkey_index = attr->alt_pkey_index;
779         }
780
781         if (attr_mask & IB_QP_PATH_MIG_STATE) {
782                 qp->s_mig_state = attr->path_mig_state;
783                 if (mig) {
784                         qp->remote_ah_attr = qp->alt_ah_attr;
785                         qp->port_num = qp->alt_ah_attr.port_num;
786                         qp->s_pkey_index = qp->s_alt_pkey_index;
787                 }
788         }
789
790         if (attr_mask & IB_QP_PATH_MTU) {
791                 qp->path_mtu = pmtu;
792                 qp->pmtu = ib_mtu_enum_to_int(pmtu);
793         }
794
795         if (attr_mask & IB_QP_RETRY_CNT) {
796                 qp->s_retry_cnt = attr->retry_cnt;
797                 qp->s_retry = attr->retry_cnt;
798         }
799
800         if (attr_mask & IB_QP_RNR_RETRY) {
801                 qp->s_rnr_retry_cnt = attr->rnr_retry;
802                 qp->s_rnr_retry = attr->rnr_retry;
803         }
804
805         if (attr_mask & IB_QP_MIN_RNR_TIMER)
806                 qp->r_min_rnr_timer = attr->min_rnr_timer;
807
808         if (attr_mask & IB_QP_TIMEOUT) {
809                 qp->timeout = attr->timeout;
810                 qp->timeout_jiffies =
811                         usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
812                                 1000UL);
813         }
814
815         if (attr_mask & IB_QP_QKEY)
816                 qp->qkey = attr->qkey;
817
818         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
819                 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
820
821         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
822                 qp->s_max_rd_atomic = attr->max_rd_atomic;
823
824         spin_unlock(&qp->s_lock);
825         spin_unlock_irq(&qp->r_lock);
826
827         if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
828                 insert_qp(dev, qp);
829
830         if (lastwqe) {
831                 ev.device = qp->ibqp.device;
832                 ev.element.qp = &qp->ibqp;
833                 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
834                 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
835         }
836         if (mig) {
837                 ev.device = qp->ibqp.device;
838                 ev.element.qp = &qp->ibqp;
839                 ev.event = IB_EVENT_PATH_MIG;
840                 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
841         }
842         ret = 0;
843         goto bail;
844
845 inval:
846         spin_unlock(&qp->s_lock);
847         spin_unlock_irq(&qp->r_lock);
848         ret = -EINVAL;
849
850 bail:
851         return ret;
852 }
853
854 int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
855                  int attr_mask, struct ib_qp_init_attr *init_attr)
856 {
857         struct qib_qp *qp = to_iqp(ibqp);
858
859         attr->qp_state = qp->state;
860         attr->cur_qp_state = attr->qp_state;
861         attr->path_mtu = qp->path_mtu;
862         attr->path_mig_state = qp->s_mig_state;
863         attr->qkey = qp->qkey;
864         attr->rq_psn = qp->r_psn & QIB_PSN_MASK;
865         attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK;
866         attr->dest_qp_num = qp->remote_qpn;
867         attr->qp_access_flags = qp->qp_access_flags;
868         attr->cap.max_send_wr = qp->s_size - 1;
869         attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
870         attr->cap.max_send_sge = qp->s_max_sge;
871         attr->cap.max_recv_sge = qp->r_rq.max_sge;
872         attr->cap.max_inline_data = 0;
873         attr->ah_attr = qp->remote_ah_attr;
874         attr->alt_ah_attr = qp->alt_ah_attr;
875         attr->pkey_index = qp->s_pkey_index;
876         attr->alt_pkey_index = qp->s_alt_pkey_index;
877         attr->en_sqd_async_notify = 0;
878         attr->sq_draining = qp->s_draining;
879         attr->max_rd_atomic = qp->s_max_rd_atomic;
880         attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
881         attr->min_rnr_timer = qp->r_min_rnr_timer;
882         attr->port_num = qp->port_num;
883         attr->timeout = qp->timeout;
884         attr->retry_cnt = qp->s_retry_cnt;
885         attr->rnr_retry = qp->s_rnr_retry_cnt;
886         attr->alt_port_num = qp->alt_ah_attr.port_num;
887         attr->alt_timeout = qp->alt_timeout;
888
889         init_attr->event_handler = qp->ibqp.event_handler;
890         init_attr->qp_context = qp->ibqp.qp_context;
891         init_attr->send_cq = qp->ibqp.send_cq;
892         init_attr->recv_cq = qp->ibqp.recv_cq;
893         init_attr->srq = qp->ibqp.srq;
894         init_attr->cap = attr->cap;
895         if (qp->s_flags & QIB_S_SIGNAL_REQ_WR)
896                 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
897         else
898                 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
899         init_attr->qp_type = qp->ibqp.qp_type;
900         init_attr->port_num = qp->port_num;
901         return 0;
902 }
903
904 /**
905  * qib_compute_aeth - compute the AETH (syndrome + MSN)
906  * @qp: the queue pair to compute the AETH for
907  *
908  * Returns the AETH.
909  */
910 __be32 qib_compute_aeth(struct qib_qp *qp)
911 {
912         u32 aeth = qp->r_msn & QIB_MSN_MASK;
913
914         if (qp->ibqp.srq) {
915                 /*
916                  * Shared receive queues don't generate credits.
917                  * Set the credit field to the invalid value.
918                  */
919                 aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
920         } else {
921                 u32 min, max, x;
922                 u32 credits;
923                 struct qib_rwq *wq = qp->r_rq.wq;
924                 u32 head;
925                 u32 tail;
926
927                 /* sanity check pointers before trusting them */
928                 head = wq->head;
929                 if (head >= qp->r_rq.size)
930                         head = 0;
931                 tail = wq->tail;
932                 if (tail >= qp->r_rq.size)
933                         tail = 0;
934                 /*
935                  * Compute the number of credits available (RWQEs).
936                  * XXX Not holding the r_rq.lock here so there is a small
937                  * chance that the pair of reads are not atomic.
938                  */
939                 credits = head - tail;
940                 if ((int)credits < 0)
941                         credits += qp->r_rq.size;
942                 /*
943                  * Binary search the credit table to find the code to
944                  * use.
945                  */
946                 min = 0;
947                 max = 31;
948                 for (;;) {
949                         x = (min + max) / 2;
950                         if (credit_table[x] == credits)
951                                 break;
952                         if (credit_table[x] > credits)
953                                 max = x;
954                         else if (min == x)
955                                 break;
956                         else
957                                 min = x;
958                 }
959                 aeth |= x << QIB_AETH_CREDIT_SHIFT;
960         }
961         return cpu_to_be32(aeth);
962 }
963
964 /**
965  * qib_create_qp - create a queue pair for a device
966  * @ibpd: the protection domain who's device we create the queue pair for
967  * @init_attr: the attributes of the queue pair
968  * @udata: user data for libibverbs.so
969  *
970  * Returns the queue pair on success, otherwise returns an errno.
971  *
972  * Called by the ib_create_qp() core verbs function.
973  */
974 struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
975                             struct ib_qp_init_attr *init_attr,
976                             struct ib_udata *udata)
977 {
978         struct qib_qp *qp;
979         int err;
980         struct qib_swqe *swq = NULL;
981         struct qib_ibdev *dev;
982         struct qib_devdata *dd;
983         size_t sz;
984         size_t sg_list_sz;
985         struct ib_qp *ret;
986
987         if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
988             init_attr->cap.max_send_wr > ib_qib_max_qp_wrs) {
989                 ret = ERR_PTR(-EINVAL);
990                 goto bail;
991         }
992
993         /* Check receive queue parameters if no SRQ is specified. */
994         if (!init_attr->srq) {
995                 if (init_attr->cap.max_recv_sge > ib_qib_max_sges ||
996                     init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) {
997                         ret = ERR_PTR(-EINVAL);
998                         goto bail;
999                 }
1000                 if (init_attr->cap.max_send_sge +
1001                     init_attr->cap.max_send_wr +
1002                     init_attr->cap.max_recv_sge +
1003                     init_attr->cap.max_recv_wr == 0) {
1004                         ret = ERR_PTR(-EINVAL);
1005                         goto bail;
1006                 }
1007         }
1008
1009         switch (init_attr->qp_type) {
1010         case IB_QPT_SMI:
1011         case IB_QPT_GSI:
1012                 if (init_attr->port_num == 0 ||
1013                     init_attr->port_num > ibpd->device->phys_port_cnt) {
1014                         ret = ERR_PTR(-EINVAL);
1015                         goto bail;
1016                 }
1017         case IB_QPT_UC:
1018         case IB_QPT_RC:
1019         case IB_QPT_UD:
1020                 sz = sizeof(struct qib_sge) *
1021                         init_attr->cap.max_send_sge +
1022                         sizeof(struct qib_swqe);
1023                 swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
1024                 if (swq == NULL) {
1025                         ret = ERR_PTR(-ENOMEM);
1026                         goto bail;
1027                 }
1028                 sz = sizeof(*qp);
1029                 sg_list_sz = 0;
1030                 if (init_attr->srq) {
1031                         struct qib_srq *srq = to_isrq(init_attr->srq);
1032
1033                         if (srq->rq.max_sge > 1)
1034                                 sg_list_sz = sizeof(*qp->r_sg_list) *
1035                                         (srq->rq.max_sge - 1);
1036                 } else if (init_attr->cap.max_recv_sge > 1)
1037                         sg_list_sz = sizeof(*qp->r_sg_list) *
1038                                 (init_attr->cap.max_recv_sge - 1);
1039                 qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
1040                 if (!qp) {
1041                         ret = ERR_PTR(-ENOMEM);
1042                         goto bail_swq;
1043                 }
1044                 RCU_INIT_POINTER(qp->next, NULL);
1045                 qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
1046                 if (!qp->s_hdr) {
1047                         ret = ERR_PTR(-ENOMEM);
1048                         goto bail_qp;
1049                 }
1050                 qp->timeout_jiffies =
1051                         usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1052                                 1000UL);
1053                 if (init_attr->srq)
1054                         sz = 0;
1055                 else {
1056                         qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1057                         qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1058                         sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1059                                 sizeof(struct qib_rwqe);
1060                         qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
1061                                                    qp->r_rq.size * sz);
1062                         if (!qp->r_rq.wq) {
1063                                 ret = ERR_PTR(-ENOMEM);
1064                                 goto bail_qp;
1065                         }
1066                 }
1067
1068                 /*
1069                  * ib_create_qp() will initialize qp->ibqp
1070                  * except for qp->ibqp.qp_num.
1071                  */
1072                 spin_lock_init(&qp->r_lock);
1073                 spin_lock_init(&qp->s_lock);
1074                 spin_lock_init(&qp->r_rq.lock);
1075                 atomic_set(&qp->refcount, 0);
1076                 init_waitqueue_head(&qp->wait);
1077                 init_waitqueue_head(&qp->wait_dma);
1078                 init_timer(&qp->s_timer);
1079                 qp->s_timer.data = (unsigned long)qp;
1080                 INIT_WORK(&qp->s_work, qib_do_send);
1081                 INIT_LIST_HEAD(&qp->iowait);
1082                 INIT_LIST_HEAD(&qp->rspwait);
1083                 qp->state = IB_QPS_RESET;
1084                 qp->s_wq = swq;
1085                 qp->s_size = init_attr->cap.max_send_wr + 1;
1086                 qp->s_max_sge = init_attr->cap.max_send_sge;
1087                 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1088                         qp->s_flags = QIB_S_SIGNAL_REQ_WR;
1089                 dev = to_idev(ibpd->device);
1090                 dd = dd_from_dev(dev);
1091                 err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
1092                                 init_attr->port_num);
1093                 if (err < 0) {
1094                         ret = ERR_PTR(err);
1095                         vfree(qp->r_rq.wq);
1096                         goto bail_qp;
1097                 }
1098                 qp->ibqp.qp_num = err;
1099                 qp->port_num = init_attr->port_num;
1100                 qib_reset_qp(qp, init_attr->qp_type);
1101                 break;
1102
1103         default:
1104                 /* Don't support raw QPs */
1105                 ret = ERR_PTR(-ENOSYS);
1106                 goto bail;
1107         }
1108
1109         init_attr->cap.max_inline_data = 0;
1110
1111         /*
1112          * Return the address of the RWQ as the offset to mmap.
1113          * See qib_mmap() for details.
1114          */
1115         if (udata && udata->outlen >= sizeof(__u64)) {
1116                 if (!qp->r_rq.wq) {
1117                         __u64 offset = 0;
1118
1119                         err = ib_copy_to_udata(udata, &offset,
1120                                                sizeof(offset));
1121                         if (err) {
1122                                 ret = ERR_PTR(err);
1123                                 goto bail_ip;
1124                         }
1125                 } else {
1126                         u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz;
1127
1128                         qp->ip = qib_create_mmap_info(dev, s,
1129                                                       ibpd->uobject->context,
1130                                                       qp->r_rq.wq);
1131                         if (!qp->ip) {
1132                                 ret = ERR_PTR(-ENOMEM);
1133                                 goto bail_ip;
1134                         }
1135
1136                         err = ib_copy_to_udata(udata, &(qp->ip->offset),
1137                                                sizeof(qp->ip->offset));
1138                         if (err) {
1139                                 ret = ERR_PTR(err);
1140                                 goto bail_ip;
1141                         }
1142                 }
1143         }
1144
1145         spin_lock(&dev->n_qps_lock);
1146         if (dev->n_qps_allocated == ib_qib_max_qps) {
1147                 spin_unlock(&dev->n_qps_lock);
1148                 ret = ERR_PTR(-ENOMEM);
1149                 goto bail_ip;
1150         }
1151
1152         dev->n_qps_allocated++;
1153         spin_unlock(&dev->n_qps_lock);
1154
1155         if (qp->ip) {
1156                 spin_lock_irq(&dev->pending_lock);
1157                 list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
1158                 spin_unlock_irq(&dev->pending_lock);
1159         }
1160
1161         ret = &qp->ibqp;
1162         goto bail;
1163
1164 bail_ip:
1165         if (qp->ip)
1166                 kref_put(&qp->ip->ref, qib_release_mmap_info);
1167         else
1168                 vfree(qp->r_rq.wq);
1169         free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1170 bail_qp:
1171         kfree(qp->s_hdr);
1172         kfree(qp);
1173 bail_swq:
1174         vfree(swq);
1175 bail:
1176         return ret;
1177 }
1178
1179 /**
1180  * qib_destroy_qp - destroy a queue pair
1181  * @ibqp: the queue pair to destroy
1182  *
1183  * Returns 0 on success.
1184  *
1185  * Note that this can be called while the QP is actively sending or
1186  * receiving!
1187  */
1188 int qib_destroy_qp(struct ib_qp *ibqp)
1189 {
1190         struct qib_qp *qp = to_iqp(ibqp);
1191         struct qib_ibdev *dev = to_idev(ibqp->device);
1192
1193         /* Make sure HW and driver activity is stopped. */
1194         spin_lock_irq(&qp->s_lock);
1195         if (qp->state != IB_QPS_RESET) {
1196                 qp->state = IB_QPS_RESET;
1197                 spin_lock(&dev->pending_lock);
1198                 if (!list_empty(&qp->iowait))
1199                         list_del_init(&qp->iowait);
1200                 spin_unlock(&dev->pending_lock);
1201                 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
1202                 spin_unlock_irq(&qp->s_lock);
1203                 cancel_work_sync(&qp->s_work);
1204                 del_timer_sync(&qp->s_timer);
1205                 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
1206                 if (qp->s_tx) {
1207                         qib_put_txreq(qp->s_tx);
1208                         qp->s_tx = NULL;
1209                 }
1210                 remove_qp(dev, qp);
1211                 wait_event(qp->wait, !atomic_read(&qp->refcount));
1212                 clear_mr_refs(qp, 1);
1213         } else
1214                 spin_unlock_irq(&qp->s_lock);
1215
1216         /* all user's cleaned up, mark it available */
1217         free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1218         spin_lock(&dev->n_qps_lock);
1219         dev->n_qps_allocated--;
1220         spin_unlock(&dev->n_qps_lock);
1221
1222         if (qp->ip)
1223                 kref_put(&qp->ip->ref, qib_release_mmap_info);
1224         else
1225                 vfree(qp->r_rq.wq);
1226         vfree(qp->s_wq);
1227         kfree(qp->s_hdr);
1228         kfree(qp);
1229         return 0;
1230 }
1231
1232 /**
1233  * qib_init_qpn_table - initialize the QP number table for a device
1234  * @qpt: the QPN table
1235  */
1236 void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt)
1237 {
1238         spin_lock_init(&qpt->lock);
1239         qpt->last = 1;          /* start with QPN 2 */
1240         qpt->nmaps = 1;
1241         qpt->mask = dd->qpn_mask;
1242 }
1243
1244 /**
1245  * qib_free_qpn_table - free the QP number table for a device
1246  * @qpt: the QPN table
1247  */
1248 void qib_free_qpn_table(struct qib_qpn_table *qpt)
1249 {
1250         int i;
1251
1252         for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
1253                 if (qpt->map[i].page)
1254                         free_page((unsigned long) qpt->map[i].page);
1255 }
1256
1257 /**
1258  * qib_get_credit - flush the send work queue of a QP
1259  * @qp: the qp who's send work queue to flush
1260  * @aeth: the Acknowledge Extended Transport Header
1261  *
1262  * The QP s_lock should be held.
1263  */
1264 void qib_get_credit(struct qib_qp *qp, u32 aeth)
1265 {
1266         u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
1267
1268         /*
1269          * If the credit is invalid, we can send
1270          * as many packets as we like.  Otherwise, we have to
1271          * honor the credit field.
1272          */
1273         if (credit == QIB_AETH_CREDIT_INVAL) {
1274                 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1275                         qp->s_flags |= QIB_S_UNLIMITED_CREDIT;
1276                         if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1277                                 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1278                                 qib_schedule_send(qp);
1279                         }
1280                 }
1281         } else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1282                 /* Compute new LSN (i.e., MSN + credit) */
1283                 credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
1284                 if (qib_cmp24(credit, qp->s_lsn) > 0) {
1285                         qp->s_lsn = credit;
1286                         if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1287                                 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1288                                 qib_schedule_send(qp);
1289                         }
1290                 }
1291         }
1292 }