infiniband: ipoib: Sanitize neighbour handling in ipoib_main.c
[~shefty/rdma-dev.git] / drivers / infiniband / ulp / ipoib / ipoib_main.c
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #include "ipoib.h"
36
37 #include <linux/module.h>
38
39 #include <linux/init.h>
40 #include <linux/slab.h>
41 #include <linux/kernel.h>
42 #include <linux/vmalloc.h>
43
44 #include <linux/if_arp.h>       /* For ARPHRD_xxx */
45
46 #include <linux/ip.h>
47 #include <linux/in.h>
48
49 #include <net/dst.h>
50
51 MODULE_AUTHOR("Roland Dreier");
52 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
53 MODULE_LICENSE("Dual BSD/GPL");
54
55 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
56 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
57
58 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
59 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
60 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
61 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
62
63 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
64 int ipoib_debug_level;
65
66 module_param_named(debug_level, ipoib_debug_level, int, 0644);
67 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
68 #endif
69
70 struct ipoib_path_iter {
71         struct net_device *dev;
72         struct ipoib_path  path;
73 };
74
75 static const u8 ipv4_bcast_addr[] = {
76         0x00, 0xff, 0xff, 0xff,
77         0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
78         0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
79 };
80
81 struct workqueue_struct *ipoib_workqueue;
82
83 struct ib_sa_client ipoib_sa_client;
84
85 static void ipoib_add_one(struct ib_device *device);
86 static void ipoib_remove_one(struct ib_device *device);
87
88 static struct ib_client ipoib_client = {
89         .name   = "ipoib",
90         .add    = ipoib_add_one,
91         .remove = ipoib_remove_one
92 };
93
94 int ipoib_open(struct net_device *dev)
95 {
96         struct ipoib_dev_priv *priv = netdev_priv(dev);
97
98         ipoib_dbg(priv, "bringing up interface\n");
99
100         set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
101
102         if (ipoib_pkey_dev_delay_open(dev))
103                 return 0;
104
105         if (ipoib_ib_dev_open(dev))
106                 goto err_disable;
107
108         if (ipoib_ib_dev_up(dev))
109                 goto err_stop;
110
111         if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
112                 struct ipoib_dev_priv *cpriv;
113
114                 /* Bring up any child interfaces too */
115                 mutex_lock(&priv->vlan_mutex);
116                 list_for_each_entry(cpriv, &priv->child_intfs, list) {
117                         int flags;
118
119                         flags = cpriv->dev->flags;
120                         if (flags & IFF_UP)
121                                 continue;
122
123                         dev_change_flags(cpriv->dev, flags | IFF_UP);
124                 }
125                 mutex_unlock(&priv->vlan_mutex);
126         }
127
128         netif_start_queue(dev);
129
130         return 0;
131
132 err_stop:
133         ipoib_ib_dev_stop(dev, 1);
134
135 err_disable:
136         clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
137
138         return -EINVAL;
139 }
140
141 static int ipoib_stop(struct net_device *dev)
142 {
143         struct ipoib_dev_priv *priv = netdev_priv(dev);
144
145         ipoib_dbg(priv, "stopping interface\n");
146
147         clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
148
149         netif_stop_queue(dev);
150
151         ipoib_ib_dev_down(dev, 0);
152         ipoib_ib_dev_stop(dev, 0);
153
154         if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
155                 struct ipoib_dev_priv *cpriv;
156
157                 /* Bring down any child interfaces too */
158                 mutex_lock(&priv->vlan_mutex);
159                 list_for_each_entry(cpriv, &priv->child_intfs, list) {
160                         int flags;
161
162                         flags = cpriv->dev->flags;
163                         if (!(flags & IFF_UP))
164                                 continue;
165
166                         dev_change_flags(cpriv->dev, flags & ~IFF_UP);
167                 }
168                 mutex_unlock(&priv->vlan_mutex);
169         }
170
171         return 0;
172 }
173
174 static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
175 {
176         struct ipoib_dev_priv *priv = netdev_priv(dev);
177
178         if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
179                 features &= ~(NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
180
181         return features;
182 }
183
184 static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
185 {
186         struct ipoib_dev_priv *priv = netdev_priv(dev);
187
188         /* dev->mtu > 2K ==> connected mode */
189         if (ipoib_cm_admin_enabled(dev)) {
190                 if (new_mtu > ipoib_cm_max_mtu(dev))
191                         return -EINVAL;
192
193                 if (new_mtu > priv->mcast_mtu)
194                         ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
195                                    priv->mcast_mtu);
196
197                 dev->mtu = new_mtu;
198                 return 0;
199         }
200
201         if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
202                 return -EINVAL;
203
204         priv->admin_mtu = new_mtu;
205
206         dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
207
208         return 0;
209 }
210
211 static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
212 {
213         struct ipoib_dev_priv *priv = netdev_priv(dev);
214         struct rb_node *n = priv->path_tree.rb_node;
215         struct ipoib_path *path;
216         int ret;
217
218         while (n) {
219                 path = rb_entry(n, struct ipoib_path, rb_node);
220
221                 ret = memcmp(gid, path->pathrec.dgid.raw,
222                              sizeof (union ib_gid));
223
224                 if (ret < 0)
225                         n = n->rb_left;
226                 else if (ret > 0)
227                         n = n->rb_right;
228                 else
229                         return path;
230         }
231
232         return NULL;
233 }
234
235 static int __path_add(struct net_device *dev, struct ipoib_path *path)
236 {
237         struct ipoib_dev_priv *priv = netdev_priv(dev);
238         struct rb_node **n = &priv->path_tree.rb_node;
239         struct rb_node *pn = NULL;
240         struct ipoib_path *tpath;
241         int ret;
242
243         while (*n) {
244                 pn = *n;
245                 tpath = rb_entry(pn, struct ipoib_path, rb_node);
246
247                 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
248                              sizeof (union ib_gid));
249                 if (ret < 0)
250                         n = &pn->rb_left;
251                 else if (ret > 0)
252                         n = &pn->rb_right;
253                 else
254                         return -EEXIST;
255         }
256
257         rb_link_node(&path->rb_node, pn, n);
258         rb_insert_color(&path->rb_node, &priv->path_tree);
259
260         list_add_tail(&path->list, &priv->path_list);
261
262         return 0;
263 }
264
265 static void path_free(struct net_device *dev, struct ipoib_path *path)
266 {
267         struct ipoib_dev_priv *priv = netdev_priv(dev);
268         struct ipoib_neigh *neigh, *tn;
269         struct sk_buff *skb;
270         unsigned long flags;
271
272         while ((skb = __skb_dequeue(&path->queue)))
273                 dev_kfree_skb_irq(skb);
274
275         spin_lock_irqsave(&priv->lock, flags);
276
277         list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
278                 /*
279                  * It's safe to call ipoib_put_ah() inside priv->lock
280                  * here, because we know that path->ah will always
281                  * hold one more reference, so ipoib_put_ah() will
282                  * never do more than decrement the ref count.
283                  */
284                 if (neigh->ah)
285                         ipoib_put_ah(neigh->ah);
286
287                 ipoib_neigh_free(dev, neigh);
288         }
289
290         spin_unlock_irqrestore(&priv->lock, flags);
291
292         if (path->ah)
293                 ipoib_put_ah(path->ah);
294
295         kfree(path);
296 }
297
298 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
299
300 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
301 {
302         struct ipoib_path_iter *iter;
303
304         iter = kmalloc(sizeof *iter, GFP_KERNEL);
305         if (!iter)
306                 return NULL;
307
308         iter->dev = dev;
309         memset(iter->path.pathrec.dgid.raw, 0, 16);
310
311         if (ipoib_path_iter_next(iter)) {
312                 kfree(iter);
313                 return NULL;
314         }
315
316         return iter;
317 }
318
319 int ipoib_path_iter_next(struct ipoib_path_iter *iter)
320 {
321         struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
322         struct rb_node *n;
323         struct ipoib_path *path;
324         int ret = 1;
325
326         spin_lock_irq(&priv->lock);
327
328         n = rb_first(&priv->path_tree);
329
330         while (n) {
331                 path = rb_entry(n, struct ipoib_path, rb_node);
332
333                 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
334                            sizeof (union ib_gid)) < 0) {
335                         iter->path = *path;
336                         ret = 0;
337                         break;
338                 }
339
340                 n = rb_next(n);
341         }
342
343         spin_unlock_irq(&priv->lock);
344
345         return ret;
346 }
347
348 void ipoib_path_iter_read(struct ipoib_path_iter *iter,
349                           struct ipoib_path *path)
350 {
351         *path = iter->path;
352 }
353
354 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
355
356 void ipoib_mark_paths_invalid(struct net_device *dev)
357 {
358         struct ipoib_dev_priv *priv = netdev_priv(dev);
359         struct ipoib_path *path, *tp;
360
361         spin_lock_irq(&priv->lock);
362
363         list_for_each_entry_safe(path, tp, &priv->path_list, list) {
364                 ipoib_dbg(priv, "mark path LID 0x%04x GID %pI6 invalid\n",
365                         be16_to_cpu(path->pathrec.dlid),
366                         path->pathrec.dgid.raw);
367                 path->valid =  0;
368         }
369
370         spin_unlock_irq(&priv->lock);
371 }
372
373 void ipoib_flush_paths(struct net_device *dev)
374 {
375         struct ipoib_dev_priv *priv = netdev_priv(dev);
376         struct ipoib_path *path, *tp;
377         LIST_HEAD(remove_list);
378         unsigned long flags;
379
380         netif_tx_lock_bh(dev);
381         spin_lock_irqsave(&priv->lock, flags);
382
383         list_splice_init(&priv->path_list, &remove_list);
384
385         list_for_each_entry(path, &remove_list, list)
386                 rb_erase(&path->rb_node, &priv->path_tree);
387
388         list_for_each_entry_safe(path, tp, &remove_list, list) {
389                 if (path->query)
390                         ib_sa_cancel_query(path->query_id, path->query);
391                 spin_unlock_irqrestore(&priv->lock, flags);
392                 netif_tx_unlock_bh(dev);
393                 wait_for_completion(&path->done);
394                 path_free(dev, path);
395                 netif_tx_lock_bh(dev);
396                 spin_lock_irqsave(&priv->lock, flags);
397         }
398
399         spin_unlock_irqrestore(&priv->lock, flags);
400         netif_tx_unlock_bh(dev);
401 }
402
403 static void path_rec_completion(int status,
404                                 struct ib_sa_path_rec *pathrec,
405                                 void *path_ptr)
406 {
407         struct ipoib_path *path = path_ptr;
408         struct net_device *dev = path->dev;
409         struct ipoib_dev_priv *priv = netdev_priv(dev);
410         struct ipoib_ah *ah = NULL;
411         struct ipoib_ah *old_ah = NULL;
412         struct ipoib_neigh *neigh, *tn;
413         struct sk_buff_head skqueue;
414         struct sk_buff *skb;
415         unsigned long flags;
416
417         if (!status)
418                 ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n",
419                           be16_to_cpu(pathrec->dlid), pathrec->dgid.raw);
420         else
421                 ipoib_dbg(priv, "PathRec status %d for GID %pI6\n",
422                           status, path->pathrec.dgid.raw);
423
424         skb_queue_head_init(&skqueue);
425
426         if (!status) {
427                 struct ib_ah_attr av;
428
429                 if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av))
430                         ah = ipoib_create_ah(dev, priv->pd, &av);
431         }
432
433         spin_lock_irqsave(&priv->lock, flags);
434
435         if (!IS_ERR_OR_NULL(ah)) {
436                 path->pathrec = *pathrec;
437
438                 old_ah   = path->ah;
439                 path->ah = ah;
440
441                 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
442                           ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
443
444                 while ((skb = __skb_dequeue(&path->queue)))
445                         __skb_queue_tail(&skqueue, skb);
446
447                 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
448                         if (neigh->ah) {
449                                 WARN_ON(neigh->ah != old_ah);
450                                 /*
451                                  * Dropping the ah reference inside
452                                  * priv->lock is safe here, because we
453                                  * will hold one more reference from
454                                  * the original value of path->ah (ie
455                                  * old_ah).
456                                  */
457                                 ipoib_put_ah(neigh->ah);
458                         }
459                         kref_get(&path->ah->ref);
460                         neigh->ah = path->ah;
461                         memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
462                                sizeof(union ib_gid));
463
464                         if (ipoib_cm_enabled(dev, neigh->neighbour)) {
465                                 if (!ipoib_cm_get(neigh))
466                                         ipoib_cm_set(neigh, ipoib_cm_create_tx(dev,
467                                                                                path,
468                                                                                neigh));
469                                 if (!ipoib_cm_get(neigh)) {
470                                         list_del(&neigh->list);
471                                         if (neigh->ah)
472                                                 ipoib_put_ah(neigh->ah);
473                                         ipoib_neigh_free(dev, neigh);
474                                         continue;
475                                 }
476                         }
477
478                         while ((skb = __skb_dequeue(&neigh->queue)))
479                                 __skb_queue_tail(&skqueue, skb);
480                 }
481                 path->valid = 1;
482         }
483
484         path->query = NULL;
485         complete(&path->done);
486
487         spin_unlock_irqrestore(&priv->lock, flags);
488
489         if (old_ah)
490                 ipoib_put_ah(old_ah);
491
492         while ((skb = __skb_dequeue(&skqueue))) {
493                 skb->dev = dev;
494                 if (dev_queue_xmit(skb))
495                         ipoib_warn(priv, "dev_queue_xmit failed "
496                                    "to requeue packet\n");
497         }
498 }
499
500 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
501 {
502         struct ipoib_dev_priv *priv = netdev_priv(dev);
503         struct ipoib_path *path;
504
505         if (!priv->broadcast)
506                 return NULL;
507
508         path = kzalloc(sizeof *path, GFP_ATOMIC);
509         if (!path)
510                 return NULL;
511
512         path->dev = dev;
513
514         skb_queue_head_init(&path->queue);
515
516         INIT_LIST_HEAD(&path->neigh_list);
517
518         memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid));
519         path->pathrec.sgid          = priv->local_gid;
520         path->pathrec.pkey          = cpu_to_be16(priv->pkey);
521         path->pathrec.numb_path     = 1;
522         path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
523
524         return path;
525 }
526
527 static int path_rec_start(struct net_device *dev,
528                           struct ipoib_path *path)
529 {
530         struct ipoib_dev_priv *priv = netdev_priv(dev);
531
532         ipoib_dbg(priv, "Start path record lookup for %pI6\n",
533                   path->pathrec.dgid.raw);
534
535         init_completion(&path->done);
536
537         path->query_id =
538                 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
539                                    &path->pathrec,
540                                    IB_SA_PATH_REC_DGID          |
541                                    IB_SA_PATH_REC_SGID          |
542                                    IB_SA_PATH_REC_NUMB_PATH     |
543                                    IB_SA_PATH_REC_TRAFFIC_CLASS |
544                                    IB_SA_PATH_REC_PKEY,
545                                    1000, GFP_ATOMIC,
546                                    path_rec_completion,
547                                    path, &path->query);
548         if (path->query_id < 0) {
549                 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
550                 path->query = NULL;
551                 complete(&path->done);
552                 return path->query_id;
553         }
554
555         return 0;
556 }
557
558 /* called with rcu_read_lock */
559 static void neigh_add_path(struct sk_buff *skb, struct neighbour *n, struct net_device *dev)
560 {
561         struct ipoib_dev_priv *priv = netdev_priv(dev);
562         struct ipoib_path *path;
563         struct ipoib_neigh *neigh;
564         unsigned long flags;
565
566         neigh = ipoib_neigh_alloc(n, skb->dev);
567         if (!neigh) {
568                 ++dev->stats.tx_dropped;
569                 dev_kfree_skb_any(skb);
570                 return;
571         }
572
573         spin_lock_irqsave(&priv->lock, flags);
574
575         path = __path_find(dev, n->ha + 4);
576         if (!path) {
577                 path = path_rec_create(dev, n->ha + 4);
578                 if (!path)
579                         goto err_path;
580
581                 __path_add(dev, path);
582         }
583
584         list_add_tail(&neigh->list, &path->neigh_list);
585
586         if (path->ah) {
587                 kref_get(&path->ah->ref);
588                 neigh->ah = path->ah;
589                 memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
590                        sizeof(union ib_gid));
591
592                 if (ipoib_cm_enabled(dev, neigh->neighbour)) {
593                         if (!ipoib_cm_get(neigh))
594                                 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
595                         if (!ipoib_cm_get(neigh)) {
596                                 list_del(&neigh->list);
597                                 if (neigh->ah)
598                                         ipoib_put_ah(neigh->ah);
599                                 ipoib_neigh_free(dev, neigh);
600                                 goto err_drop;
601                         }
602                         if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
603                                 __skb_queue_tail(&neigh->queue, skb);
604                         else {
605                                 ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
606                                            skb_queue_len(&neigh->queue));
607                                 goto err_drop;
608                         }
609                 } else {
610                         spin_unlock_irqrestore(&priv->lock, flags);
611                         ipoib_send(dev, skb, path->ah, IPOIB_QPN(n->ha));
612                         return;
613                 }
614         } else {
615                 neigh->ah  = NULL;
616
617                 if (!path->query && path_rec_start(dev, path))
618                         goto err_list;
619
620                 __skb_queue_tail(&neigh->queue, skb);
621         }
622
623         spin_unlock_irqrestore(&priv->lock, flags);
624         return;
625
626 err_list:
627         list_del(&neigh->list);
628
629 err_path:
630         ipoib_neigh_free(dev, neigh);
631 err_drop:
632         ++dev->stats.tx_dropped;
633         dev_kfree_skb_any(skb);
634
635         spin_unlock_irqrestore(&priv->lock, flags);
636 }
637
638 /* called with rcu_read_lock */
639 static void ipoib_path_lookup(struct sk_buff *skb, struct neighbour *n, struct net_device *dev)
640 {
641         struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
642
643         /* Look up path record for unicasts */
644         if (n->ha[4] != 0xff) {
645                 neigh_add_path(skb, n, dev);
646                 return;
647         }
648
649         /* Add in the P_Key for multicasts */
650         n->ha[8] = (priv->pkey >> 8) & 0xff;
651         n->ha[9] = priv->pkey & 0xff;
652         ipoib_mcast_send(dev, n->ha + 4, skb);
653 }
654
655 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
656                              struct ipoib_pseudoheader *phdr)
657 {
658         struct ipoib_dev_priv *priv = netdev_priv(dev);
659         struct ipoib_path *path;
660         unsigned long flags;
661
662         spin_lock_irqsave(&priv->lock, flags);
663
664         path = __path_find(dev, phdr->hwaddr + 4);
665         if (!path || !path->valid) {
666                 int new_path = 0;
667
668                 if (!path) {
669                         path = path_rec_create(dev, phdr->hwaddr + 4);
670                         new_path = 1;
671                 }
672                 if (path) {
673                         /* put pseudoheader back on for next time */
674                         skb_push(skb, sizeof *phdr);
675                         __skb_queue_tail(&path->queue, skb);
676
677                         if (!path->query && path_rec_start(dev, path)) {
678                                 spin_unlock_irqrestore(&priv->lock, flags);
679                                 if (new_path)
680                                         path_free(dev, path);
681                                 return;
682                         } else
683                                 __path_add(dev, path);
684                 } else {
685                         ++dev->stats.tx_dropped;
686                         dev_kfree_skb_any(skb);
687                 }
688
689                 spin_unlock_irqrestore(&priv->lock, flags);
690                 return;
691         }
692
693         if (path->ah) {
694                 ipoib_dbg(priv, "Send unicast ARP to %04x\n",
695                           be16_to_cpu(path->pathrec.dlid));
696
697                 spin_unlock_irqrestore(&priv->lock, flags);
698                 ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
699                 return;
700         } else if ((path->query || !path_rec_start(dev, path)) &&
701                    skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
702                 /* put pseudoheader back on for next time */
703                 skb_push(skb, sizeof *phdr);
704                 __skb_queue_tail(&path->queue, skb);
705         } else {
706                 ++dev->stats.tx_dropped;
707                 dev_kfree_skb_any(skb);
708         }
709
710         spin_unlock_irqrestore(&priv->lock, flags);
711 }
712
713 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
714 {
715         struct ipoib_dev_priv *priv = netdev_priv(dev);
716         struct ipoib_neigh *neigh;
717         struct neighbour *n = NULL;
718         unsigned long flags;
719
720         rcu_read_lock();
721         if (likely(skb_dst(skb))) {
722                 n = dst_get_neighbour_noref(skb_dst(skb));
723                 if (!n) {
724                         ++dev->stats.tx_dropped;
725                         dev_kfree_skb_any(skb);
726                         goto unlock;
727                 }
728         }
729         if (likely(n)) {
730                 if (unlikely(!*to_ipoib_neigh(n))) {
731                         ipoib_path_lookup(skb, n, dev);
732                         goto unlock;
733                 }
734
735                 neigh = *to_ipoib_neigh(n);
736
737                 if (unlikely((memcmp(&neigh->dgid.raw,
738                                      n->ha + 4,
739                                      sizeof(union ib_gid))) ||
740                              (neigh->dev != dev))) {
741                         spin_lock_irqsave(&priv->lock, flags);
742                         /*
743                          * It's safe to call ipoib_put_ah() inside
744                          * priv->lock here, because we know that
745                          * path->ah will always hold one more reference,
746                          * so ipoib_put_ah() will never do more than
747                          * decrement the ref count.
748                          */
749                         if (neigh->ah)
750                                 ipoib_put_ah(neigh->ah);
751                         list_del(&neigh->list);
752                         ipoib_neigh_free(dev, neigh);
753                         spin_unlock_irqrestore(&priv->lock, flags);
754                         ipoib_path_lookup(skb, n, dev);
755                         goto unlock;
756                 }
757
758                 if (ipoib_cm_get(neigh)) {
759                         if (ipoib_cm_up(neigh)) {
760                                 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
761                                 goto unlock;
762                         }
763                 } else if (neigh->ah) {
764                         ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(n->ha));
765                         goto unlock;
766                 }
767
768                 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
769                         spin_lock_irqsave(&priv->lock, flags);
770                         __skb_queue_tail(&neigh->queue, skb);
771                         spin_unlock_irqrestore(&priv->lock, flags);
772                 } else {
773                         ++dev->stats.tx_dropped;
774                         dev_kfree_skb_any(skb);
775                 }
776         } else {
777                 struct ipoib_pseudoheader *phdr =
778                         (struct ipoib_pseudoheader *) skb->data;
779                 skb_pull(skb, sizeof *phdr);
780
781                 if (phdr->hwaddr[4] == 0xff) {
782                         /* Add in the P_Key for multicast*/
783                         phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
784                         phdr->hwaddr[9] = priv->pkey & 0xff;
785
786                         ipoib_mcast_send(dev, phdr->hwaddr + 4, skb);
787                 } else {
788                         /* unicast GID -- should be ARP or RARP reply */
789
790                         if ((be16_to_cpup((__be16 *) skb->data) != ETH_P_ARP) &&
791                             (be16_to_cpup((__be16 *) skb->data) != ETH_P_RARP)) {
792                                 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x %pI6\n",
793                                            skb_dst(skb) ? "neigh" : "dst",
794                                            be16_to_cpup((__be16 *) skb->data),
795                                            IPOIB_QPN(phdr->hwaddr),
796                                            phdr->hwaddr + 4);
797                                 dev_kfree_skb_any(skb);
798                                 ++dev->stats.tx_dropped;
799                                 goto unlock;
800                         }
801
802                         unicast_arp_send(skb, dev, phdr);
803                 }
804         }
805 unlock:
806         rcu_read_unlock();
807         return NETDEV_TX_OK;
808 }
809
810 static void ipoib_timeout(struct net_device *dev)
811 {
812         struct ipoib_dev_priv *priv = netdev_priv(dev);
813
814         ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
815                    jiffies_to_msecs(jiffies - dev->trans_start));
816         ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
817                    netif_queue_stopped(dev),
818                    priv->tx_head, priv->tx_tail);
819         /* XXX reset QP, etc. */
820 }
821
822 static int ipoib_hard_header(struct sk_buff *skb,
823                              struct net_device *dev,
824                              unsigned short type,
825                              const void *daddr, const void *saddr, unsigned len)
826 {
827         struct ipoib_header *header;
828         struct dst_entry *dst;
829         struct neighbour *n;
830
831         header = (struct ipoib_header *) skb_push(skb, sizeof *header);
832
833         header->proto = htons(type);
834         header->reserved = 0;
835
836         /*
837          * If we don't have a neighbour structure, stuff the
838          * destination address onto the front of the skb so we can
839          * figure out where to send the packet later.
840          */
841         dst = skb_dst(skb);
842         n = NULL;
843         if (dst)
844                 n = dst_get_neighbour_noref_raw(dst);
845         if ((!dst || !n) && daddr) {
846                 struct ipoib_pseudoheader *phdr =
847                         (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
848                 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
849         }
850
851         return 0;
852 }
853
854 static void ipoib_set_mcast_list(struct net_device *dev)
855 {
856         struct ipoib_dev_priv *priv = netdev_priv(dev);
857
858         if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
859                 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
860                 return;
861         }
862
863         queue_work(ipoib_workqueue, &priv->restart_task);
864 }
865
866 static void ipoib_neigh_cleanup(struct neighbour *n)
867 {
868         struct ipoib_neigh *neigh;
869         struct ipoib_dev_priv *priv = netdev_priv(n->dev);
870         unsigned long flags;
871         struct ipoib_ah *ah = NULL;
872
873         neigh = *to_ipoib_neigh(n);
874         if (neigh)
875                 priv = netdev_priv(neigh->dev);
876         else
877                 return;
878         ipoib_dbg(priv,
879                   "neigh_cleanup for %06x %pI6\n",
880                   IPOIB_QPN(n->ha),
881                   n->ha + 4);
882
883         spin_lock_irqsave(&priv->lock, flags);
884
885         if (neigh->ah)
886                 ah = neigh->ah;
887         list_del(&neigh->list);
888         ipoib_neigh_free(n->dev, neigh);
889
890         spin_unlock_irqrestore(&priv->lock, flags);
891
892         if (ah)
893                 ipoib_put_ah(ah);
894 }
895
896 struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour,
897                                       struct net_device *dev)
898 {
899         struct ipoib_neigh *neigh;
900
901         neigh = kmalloc(sizeof *neigh, GFP_ATOMIC);
902         if (!neigh)
903                 return NULL;
904
905         neigh->neighbour = neighbour;
906         neigh->dev = dev;
907         memset(&neigh->dgid.raw, 0, sizeof (union ib_gid));
908         *to_ipoib_neigh(neighbour) = neigh;
909         skb_queue_head_init(&neigh->queue);
910         ipoib_cm_set(neigh, NULL);
911
912         return neigh;
913 }
914
915 void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
916 {
917         struct sk_buff *skb;
918         *to_ipoib_neigh(neigh->neighbour) = NULL;
919         while ((skb = __skb_dequeue(&neigh->queue))) {
920                 ++dev->stats.tx_dropped;
921                 dev_kfree_skb_any(skb);
922         }
923         if (ipoib_cm_get(neigh))
924                 ipoib_cm_destroy_tx(ipoib_cm_get(neigh));
925         kfree(neigh);
926 }
927
928 static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms)
929 {
930         parms->neigh_cleanup = ipoib_neigh_cleanup;
931
932         return 0;
933 }
934
935 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
936 {
937         struct ipoib_dev_priv *priv = netdev_priv(dev);
938
939         /* Allocate RX/TX "rings" to hold queued skbs */
940         priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
941                                 GFP_KERNEL);
942         if (!priv->rx_ring) {
943                 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
944                        ca->name, ipoib_recvq_size);
945                 goto out;
946         }
947
948         priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
949         if (!priv->tx_ring) {
950                 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
951                        ca->name, ipoib_sendq_size);
952                 goto out_rx_ring_cleanup;
953         }
954
955         /* priv->tx_head, tx_tail & tx_outstanding are already 0 */
956
957         if (ipoib_ib_dev_init(dev, ca, port))
958                 goto out_tx_ring_cleanup;
959
960         return 0;
961
962 out_tx_ring_cleanup:
963         vfree(priv->tx_ring);
964
965 out_rx_ring_cleanup:
966         kfree(priv->rx_ring);
967
968 out:
969         return -ENOMEM;
970 }
971
972 void ipoib_dev_cleanup(struct net_device *dev)
973 {
974         struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
975
976         ipoib_delete_debug_files(dev);
977
978         /* Delete any child interfaces first */
979         list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
980                 unregister_netdev(cpriv->dev);
981                 ipoib_dev_cleanup(cpriv->dev);
982                 free_netdev(cpriv->dev);
983         }
984
985         ipoib_ib_dev_cleanup(dev);
986
987         kfree(priv->rx_ring);
988         vfree(priv->tx_ring);
989
990         priv->rx_ring = NULL;
991         priv->tx_ring = NULL;
992 }
993
994 static const struct header_ops ipoib_header_ops = {
995         .create = ipoib_hard_header,
996 };
997
998 static const struct net_device_ops ipoib_netdev_ops = {
999         .ndo_open                = ipoib_open,
1000         .ndo_stop                = ipoib_stop,
1001         .ndo_change_mtu          = ipoib_change_mtu,
1002         .ndo_fix_features        = ipoib_fix_features,
1003         .ndo_start_xmit          = ipoib_start_xmit,
1004         .ndo_tx_timeout          = ipoib_timeout,
1005         .ndo_set_rx_mode         = ipoib_set_mcast_list,
1006         .ndo_neigh_setup         = ipoib_neigh_setup_dev,
1007 };
1008
1009 static void ipoib_setup(struct net_device *dev)
1010 {
1011         struct ipoib_dev_priv *priv = netdev_priv(dev);
1012
1013         dev->netdev_ops          = &ipoib_netdev_ops;
1014         dev->header_ops          = &ipoib_header_ops;
1015
1016         ipoib_set_ethtool_ops(dev);
1017
1018         netif_napi_add(dev, &priv->napi, ipoib_poll, 100);
1019
1020         dev->watchdog_timeo      = HZ;
1021
1022         dev->flags              |= IFF_BROADCAST | IFF_MULTICAST;
1023
1024         /*
1025          * We add in INFINIBAND_ALEN to allow for the destination
1026          * address "pseudoheader" for skbs without neighbour struct.
1027          */
1028         dev->hard_header_len     = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
1029         dev->addr_len            = INFINIBAND_ALEN;
1030         dev->type                = ARPHRD_INFINIBAND;
1031         dev->tx_queue_len        = ipoib_sendq_size * 2;
1032         dev->features            = (NETIF_F_VLAN_CHALLENGED     |
1033                                     NETIF_F_HIGHDMA);
1034         dev->priv_flags         &= ~IFF_XMIT_DST_RELEASE;
1035
1036         memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
1037
1038         netif_carrier_off(dev);
1039
1040         priv->dev = dev;
1041
1042         spin_lock_init(&priv->lock);
1043
1044         mutex_init(&priv->vlan_mutex);
1045
1046         INIT_LIST_HEAD(&priv->path_list);
1047         INIT_LIST_HEAD(&priv->child_intfs);
1048         INIT_LIST_HEAD(&priv->dead_ahs);
1049         INIT_LIST_HEAD(&priv->multicast_list);
1050
1051         INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll);
1052         INIT_DELAYED_WORK(&priv->mcast_task,   ipoib_mcast_join_task);
1053         INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
1054         INIT_WORK(&priv->flush_light,   ipoib_ib_dev_flush_light);
1055         INIT_WORK(&priv->flush_normal,   ipoib_ib_dev_flush_normal);
1056         INIT_WORK(&priv->flush_heavy,   ipoib_ib_dev_flush_heavy);
1057         INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
1058         INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
1059 }
1060
1061 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
1062 {
1063         struct net_device *dev;
1064
1065         dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name,
1066                            ipoib_setup);
1067         if (!dev)
1068                 return NULL;
1069
1070         return netdev_priv(dev);
1071 }
1072
1073 static ssize_t show_pkey(struct device *dev,
1074                          struct device_attribute *attr, char *buf)
1075 {
1076         struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1077
1078         return sprintf(buf, "0x%04x\n", priv->pkey);
1079 }
1080 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1081
1082 static ssize_t show_umcast(struct device *dev,
1083                            struct device_attribute *attr, char *buf)
1084 {
1085         struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1086
1087         return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
1088 }
1089
1090 static ssize_t set_umcast(struct device *dev,
1091                           struct device_attribute *attr,
1092                           const char *buf, size_t count)
1093 {
1094         struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1095         unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
1096
1097         if (umcast_val > 0) {
1098                 set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1099                 ipoib_warn(priv, "ignoring multicast groups joined directly "
1100                                 "by userspace\n");
1101         } else
1102                 clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1103
1104         return count;
1105 }
1106 static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast);
1107
1108 int ipoib_add_umcast_attr(struct net_device *dev)
1109 {
1110         return device_create_file(&dev->dev, &dev_attr_umcast);
1111 }
1112
1113 static ssize_t create_child(struct device *dev,
1114                             struct device_attribute *attr,
1115                             const char *buf, size_t count)
1116 {
1117         int pkey;
1118         int ret;
1119
1120         if (sscanf(buf, "%i", &pkey) != 1)
1121                 return -EINVAL;
1122
1123         if (pkey < 0 || pkey > 0xffff)
1124                 return -EINVAL;
1125
1126         /*
1127          * Set the full membership bit, so that we join the right
1128          * broadcast group, etc.
1129          */
1130         pkey |= 0x8000;
1131
1132         ret = ipoib_vlan_add(to_net_dev(dev), pkey);
1133
1134         return ret ? ret : count;
1135 }
1136 static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child);
1137
1138 static ssize_t delete_child(struct device *dev,
1139                             struct device_attribute *attr,
1140                             const char *buf, size_t count)
1141 {
1142         int pkey;
1143         int ret;
1144
1145         if (sscanf(buf, "%i", &pkey) != 1)
1146                 return -EINVAL;
1147
1148         if (pkey < 0 || pkey > 0xffff)
1149                 return -EINVAL;
1150
1151         ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
1152
1153         return ret ? ret : count;
1154
1155 }
1156 static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child);
1157
1158 int ipoib_add_pkey_attr(struct net_device *dev)
1159 {
1160         return device_create_file(&dev->dev, &dev_attr_pkey);
1161 }
1162
1163 int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
1164 {
1165         struct ib_device_attr *device_attr;
1166         int result = -ENOMEM;
1167
1168         device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
1169         if (!device_attr) {
1170                 printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
1171                        hca->name, sizeof *device_attr);
1172                 return result;
1173         }
1174
1175         result = ib_query_device(hca, device_attr);
1176         if (result) {
1177                 printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
1178                        hca->name, result);
1179                 kfree(device_attr);
1180                 return result;
1181         }
1182         priv->hca_caps = device_attr->device_cap_flags;
1183
1184         kfree(device_attr);
1185
1186         if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
1187                 priv->dev->hw_features = NETIF_F_SG |
1188                         NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
1189
1190                 if (priv->hca_caps & IB_DEVICE_UD_TSO)
1191                         priv->dev->hw_features |= NETIF_F_TSO;
1192
1193                 priv->dev->features |= priv->dev->hw_features;
1194         }
1195
1196         return 0;
1197 }
1198
1199 static struct net_device *ipoib_add_port(const char *format,
1200                                          struct ib_device *hca, u8 port)
1201 {
1202         struct ipoib_dev_priv *priv;
1203         struct ib_port_attr attr;
1204         int result = -ENOMEM;
1205
1206         priv = ipoib_intf_alloc(format);
1207         if (!priv)
1208                 goto alloc_mem_failed;
1209
1210         SET_NETDEV_DEV(priv->dev, hca->dma_device);
1211         priv->dev->dev_id = port - 1;
1212
1213         if (!ib_query_port(hca, port, &attr))
1214                 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
1215         else {
1216                 printk(KERN_WARNING "%s: ib_query_port %d failed\n",
1217                        hca->name, port);
1218                 goto device_init_failed;
1219         }
1220
1221         /* MTU will be reset when mcast join happens */
1222         priv->dev->mtu  = IPOIB_UD_MTU(priv->max_ib_mtu);
1223         priv->mcast_mtu  = priv->admin_mtu = priv->dev->mtu;
1224
1225         priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh);
1226
1227         result = ib_query_pkey(hca, port, 0, &priv->pkey);
1228         if (result) {
1229                 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
1230                        hca->name, port, result);
1231                 goto device_init_failed;
1232         }
1233
1234         if (ipoib_set_dev_features(priv, hca))
1235                 goto device_init_failed;
1236
1237         /*
1238          * Set the full membership bit, so that we join the right
1239          * broadcast group, etc.
1240          */
1241         priv->pkey |= 0x8000;
1242
1243         priv->dev->broadcast[8] = priv->pkey >> 8;
1244         priv->dev->broadcast[9] = priv->pkey & 0xff;
1245
1246         result = ib_query_gid(hca, port, 0, &priv->local_gid);
1247         if (result) {
1248                 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
1249                        hca->name, port, result);
1250                 goto device_init_failed;
1251         } else
1252                 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
1253
1254         result = ipoib_dev_init(priv->dev, hca, port);
1255         if (result < 0) {
1256                 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
1257                        hca->name, port, result);
1258                 goto device_init_failed;
1259         }
1260
1261         INIT_IB_EVENT_HANDLER(&priv->event_handler,
1262                               priv->ca, ipoib_event);
1263         result = ib_register_event_handler(&priv->event_handler);
1264         if (result < 0) {
1265                 printk(KERN_WARNING "%s: ib_register_event_handler failed for "
1266                        "port %d (ret = %d)\n",
1267                        hca->name, port, result);
1268                 goto event_failed;
1269         }
1270
1271         result = register_netdev(priv->dev);
1272         if (result) {
1273                 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
1274                        hca->name, port, result);
1275                 goto register_failed;
1276         }
1277
1278         ipoib_create_debug_files(priv->dev);
1279
1280         if (ipoib_cm_add_mode_attr(priv->dev))
1281                 goto sysfs_failed;
1282         if (ipoib_add_pkey_attr(priv->dev))
1283                 goto sysfs_failed;
1284         if (ipoib_add_umcast_attr(priv->dev))
1285                 goto sysfs_failed;
1286         if (device_create_file(&priv->dev->dev, &dev_attr_create_child))
1287                 goto sysfs_failed;
1288         if (device_create_file(&priv->dev->dev, &dev_attr_delete_child))
1289                 goto sysfs_failed;
1290
1291         return priv->dev;
1292
1293 sysfs_failed:
1294         ipoib_delete_debug_files(priv->dev);
1295         unregister_netdev(priv->dev);
1296
1297 register_failed:
1298         ib_unregister_event_handler(&priv->event_handler);
1299         flush_workqueue(ipoib_workqueue);
1300
1301 event_failed:
1302         ipoib_dev_cleanup(priv->dev);
1303
1304 device_init_failed:
1305         free_netdev(priv->dev);
1306
1307 alloc_mem_failed:
1308         return ERR_PTR(result);
1309 }
1310
1311 static void ipoib_add_one(struct ib_device *device)
1312 {
1313         struct list_head *dev_list;
1314         struct net_device *dev;
1315         struct ipoib_dev_priv *priv;
1316         int s, e, p;
1317
1318         if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1319                 return;
1320
1321         dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
1322         if (!dev_list)
1323                 return;
1324
1325         INIT_LIST_HEAD(dev_list);
1326
1327         if (device->node_type == RDMA_NODE_IB_SWITCH) {
1328                 s = 0;
1329                 e = 0;
1330         } else {
1331                 s = 1;
1332                 e = device->phys_port_cnt;
1333         }
1334
1335         for (p = s; p <= e; ++p) {
1336                 if (rdma_port_get_link_layer(device, p) != IB_LINK_LAYER_INFINIBAND)
1337                         continue;
1338                 dev = ipoib_add_port("ib%d", device, p);
1339                 if (!IS_ERR(dev)) {
1340                         priv = netdev_priv(dev);
1341                         list_add_tail(&priv->list, dev_list);
1342                 }
1343         }
1344
1345         ib_set_client_data(device, &ipoib_client, dev_list);
1346 }
1347
1348 static void ipoib_remove_one(struct ib_device *device)
1349 {
1350         struct ipoib_dev_priv *priv, *tmp;
1351         struct list_head *dev_list;
1352
1353         if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1354                 return;
1355
1356         dev_list = ib_get_client_data(device, &ipoib_client);
1357
1358         list_for_each_entry_safe(priv, tmp, dev_list, list) {
1359                 ib_unregister_event_handler(&priv->event_handler);
1360
1361                 rtnl_lock();
1362                 dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
1363                 rtnl_unlock();
1364
1365                 flush_workqueue(ipoib_workqueue);
1366
1367                 unregister_netdev(priv->dev);
1368                 ipoib_dev_cleanup(priv->dev);
1369                 free_netdev(priv->dev);
1370         }
1371
1372         kfree(dev_list);
1373 }
1374
1375 static int __init ipoib_init_module(void)
1376 {
1377         int ret;
1378
1379         ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
1380         ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
1381         ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
1382
1383         ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
1384         ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
1385         ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
1386 #ifdef CONFIG_INFINIBAND_IPOIB_CM
1387         ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
1388 #endif
1389
1390         /*
1391          * When copying small received packets, we only copy from the
1392          * linear data part of the SKB, so we rely on this condition.
1393          */
1394         BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
1395
1396         ret = ipoib_register_debugfs();
1397         if (ret)
1398                 return ret;
1399
1400         /*
1401          * We create our own workqueue mainly because we want to be
1402          * able to flush it when devices are being removed.  We can't
1403          * use schedule_work()/flush_scheduled_work() because both
1404          * unregister_netdev() and linkwatch_event take the rtnl lock,
1405          * so flush_scheduled_work() can deadlock during device
1406          * removal.
1407          */
1408         ipoib_workqueue = create_singlethread_workqueue("ipoib");
1409         if (!ipoib_workqueue) {
1410                 ret = -ENOMEM;
1411                 goto err_fs;
1412         }
1413
1414         ib_sa_register_client(&ipoib_sa_client);
1415
1416         ret = ib_register_client(&ipoib_client);
1417         if (ret)
1418                 goto err_sa;
1419
1420         return 0;
1421
1422 err_sa:
1423         ib_sa_unregister_client(&ipoib_sa_client);
1424         destroy_workqueue(ipoib_workqueue);
1425
1426 err_fs:
1427         ipoib_unregister_debugfs();
1428
1429         return ret;
1430 }
1431
1432 static void __exit ipoib_cleanup_module(void)
1433 {
1434         ib_unregister_client(&ipoib_client);
1435         ib_sa_unregister_client(&ipoib_sa_client);
1436         ipoib_unregister_debugfs();
1437         destroy_workqueue(ipoib_workqueue);
1438 }
1439
1440 module_init(ipoib_init_module);
1441 module_exit(ipoib_cleanup_module);