rdma.m4: check if debugfs_create_bool() uses a bool or u32 pointer
[~tnikolova/compat/.git] / compat / compat-3.9.c
1 /*
2  * Copyright 2013  Mellanox Technologies Ltd.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * Compatibility file for Linux RDMA for kernels 3.9.
9  */
10
11 #include <linux/skbuff.h>
12 #include <linux/export.h>
13 #include <linux/ip.h>
14 #include <linux/ipv6.h>
15 #include <linux/if_vlan.h>
16 #include <net/ip.h>
17 #include <net/ipv6.h>
18 #include <linux/igmp.h>
19 #include <linux/icmp.h>
20 #include <linux/sctp.h>
21 #include <linux/dccp.h>
22 #include <linux/if_tunnel.h>
23 #include <linux/if_pppox.h>
24 #include <linux/ppp_defs.h>
25 #include <net/flow_keys.h>
26
27 #ifdef CONFIG_XPS
28 static u32 hashrnd __read_mostly;
29 #endif
30
31 #ifndef CONFIG_COMPAT_NETIF_HAS_PICK_TX
32 #define get_xps_queue LINUX_BACKPORT(get_xps_queue)
33 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
34 {
35 #ifdef CONFIG_XPS
36         struct xps_dev_maps *dev_maps;
37         struct xps_map *map;
38         int queue_index = -1;
39
40         rcu_read_lock();
41         dev_maps = rcu_dereference(dev->xps_maps);
42         if (dev_maps) {
43                 map = rcu_dereference(
44                     dev_maps->cpu_map[raw_smp_processor_id()]);
45                 if (map) {
46                         if (map->len == 1)
47                                 queue_index = map->queues[0];
48                         else {
49                                 u32 hash;
50                                 if (skb->sk && skb->sk->sk_hash)
51                                         hash = skb->sk->sk_hash;
52                                 else
53                                         hash = (__force u16) skb->protocol ^
54                                             skb->rxhash;
55                                 hash = jhash_1word(hash, hashrnd);
56                                 queue_index = map->queues[
57                                     ((u64)hash * map->len) >> 32];
58                         }
59                         if (unlikely(queue_index >= dev->real_num_tx_queues))
60                                 queue_index = -1;
61                 }
62         }
63         rcu_read_unlock();
64
65         return queue_index;
66 #else
67         return -1;
68 #endif
69 }
70
71 #define __netdev_pick_tx LINUX_BACKPORT(__netdev_pick_tx)
72 u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
73 {
74         int new_index;
75 #ifdef CONFIG_COMPAT_SOCK_HAS_QUEUE
76         struct sock *sk = skb->sk;
77         int queue_index = sk_tx_queue_get(sk);
78
79         if (queue_index >= 0 && queue_index < dev->real_num_tx_queues) {
80 #ifdef CONFIG_COMPAT_NETIF_IS_XPS
81                 if (!skb->ooo_okay)
82 #endif /* CONFIG_COMPAT_NETIF_IS_XPS */
83                         return queue_index;
84         }
85 #endif /* CONFIG_COMPAT_SOCK_HAS_QUEUE */
86
87         new_index = get_xps_queue(dev, skb);
88         if (new_index < 0)
89                 new_index = skb_tx_hash(dev, skb);
90
91 #ifdef CONFIG_COMPAT_SOCK_HAS_QUEUE
92         if (queue_index != new_index && sk) {
93                 struct dst_entry *dst = rcu_dereference(sk->sk_dst_cache);
94                 if (dst && skb_dst(skb) == dst)
95                         sk_tx_queue_set(sk, new_index);
96         }
97 #endif /* CONFIG_COMPAT_SOCK_HAS_QUEUE */
98
99         return new_index;
100 }
101 EXPORT_SYMBOL(__netdev_pick_tx);
102 #endif /* CONFIG_COMPAT_NETIF_HAS_PICK_TX */
103
104 #ifndef CONFIG_COMPAT_NETIF_HAS_SET_XPS_QUEUE
105 #define netif_set_xps_queue LINUX_BACKPORT(netif_set_xps_queue)
106 int netif_set_xps_queue(struct net_device *dev, struct cpumask *msk, u16 idx)
107 {
108 #ifdef HAVE_XPS_MAP
109         int i, len, err;
110         char buf[MAX_XPS_BUFFER_SIZE];
111         struct attribute *attr = NULL;
112         struct kobj_type *ktype = NULL;
113         struct mlx4_en_netq_attribute *xps_attr = NULL;
114         struct netdev_queue *txq = netdev_get_tx_queue(dev, idx);
115
116 #ifdef HAVE_NET_DEVICE_EXTENDED_TX_EXT
117         struct netdev_tx_queue_extended *txq_ext =
118                                         netdev_extended(dev)->_tx_ext + idx;
119         ktype = txq_ext->kobj.ktype;
120 #else /* HAVE_NET_DEVICE_EXTENDED_TX_EXT */
121         ktype = txq->kobj.ktype;
122 #endif /* HAVE_NET_DEVICE_EXTENDED_TX_EXT */
123         if (!ktype)
124                 return -ENOMEM;
125
126         for (i = 0; (attr = ktype->default_attrs[i]); i++) {
127                 if (!strcmp("xps_cpus", attr->name))
128                         break;
129         }
130         if (!attr)
131                 return -EINVAL;
132
133         len = bitmap_scnprintf(buf, MAX_XPS_BUFFER_SIZE,
134                                cpumask_bits(msk), MAX_XPS_CPUS);
135         if (!len)
136                 return -ENOMEM;
137
138         xps_attr = to_netq_attr(attr);
139         err = xps_attr->store(txq, xps_attr, buf, len);
140         if (err)
141                 return -EINVAL;
142
143         return 0;
144 #else /* HAVE_XPS_MAP */
145         return -1;
146 #endif /* HAVE_XPS_MAP */
147 }
148 EXPORT_SYMBOL(netif_set_xps_queue);
149 #endif /* CONFIG_COMPAT_NETIF_HAS_SET_XPS_QUEUE */