1 #ifndef LINUX_26_27_COMPAT_H
2 #define LINUX_26_27_COMPAT_H
4 #include <linux/version.h>
6 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27))
8 #include <linux/debugfs.h>
9 #include <linux/list.h>
10 #include <linux/pci.h>
11 #include <linux/dma-mapping.h>
12 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
13 #include <linux/mmc/sdio.h>
14 #include <linux/mmc/sdio_func.h>
15 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) */
16 #include <linux/netdevice.h>
17 #include <linux/workqueue.h>
18 #include <net/iw_handler.h>
19 #include <asm-generic/bug.h>
20 #include <linux/wireless.h>
21 #include <linux/skbuff.h>
22 #include <net/sch_generic.h>
23 #include <linux/ethtool.h>
25 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
31 * Backports 378a2f09 and c27f339a
32 * This may need a bit more work.
34 enum net_xmit_qdisc_t {
35 __NET_XMIT_STOLEN = 0x00010000,
36 __NET_XMIT_BYPASS = 0x00020000,
44 static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb)
46 return (struct qdisc_skb_cb *)skb->cb;
49 static inline unsigned int qdisc_pkt_len(struct sk_buff *skb)
51 return qdisc_skb_cb(skb)->pkt_len;
54 #define PCI_PM_CAP_PME_SHIFT 11
56 /* I can't find a more suitable replacement... */
57 #define flush_work(work) cancel_work_sync(work)
66 * On older kernels we do not have net_device Multi Queue support, but
67 * since we no longer use MQ on mac80211 we can simply use the 0 queue.
68 * Note that if other fullmac drivers make use of this they then need
69 * to be backported somehow or deal with just 1 queueue from MQ.
71 static inline void netif_tx_wake_all_queues(struct net_device *dev)
73 netif_wake_queue(dev);
75 static inline void netif_tx_start_all_queues(struct net_device *dev)
77 netif_start_queue(dev);
79 static inline void netif_tx_stop_all_queues(struct net_device *dev)
81 netif_stop_queue(dev);
84 /* Are all TX queues of the device empty? */
85 static inline bool qdisc_all_tx_empty(const struct net_device *dev)
87 return skb_queue_empty(&dev->qdisc->q);
90 #define pci_pme_capable LINUX_BACKPORT(pci_pme_capable)
91 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
94 * The net_device has a spin_lock on newer kernels, on older kernels we're out of luck
96 #define netif_addr_lock_bh(dev)
97 #define netif_addr_unlock_bh(dev)
100 * To port this properly we'd have to port warn_slowpath_null(),
101 * which I'm lazy to do so just do a regular print for now. If you
102 * want to port this read kernel/panic.c
104 #define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0)
106 /* This is ported directly as-is on newer kernels */
108 #define WARN(condition, format...) ({ \
109 int __ret_warn_on = !!(condition); \
110 if (unlikely(__ret_warn_on)) \
111 __WARN_printf(format); \
112 unlikely(__ret_warn_on); \
116 /* On 2.6.27 a second argument was added, on older kernels we ignore it */
117 #define dma_mapping_error(pdev, dma_addr) dma_mapping_error(dma_addr)
118 #define pci_dma_mapping_error(pdev, dma_addr) dma_mapping_error(pdev, dma_addr)
120 /* This is from include/linux/ieee80211.h */
121 #define IEEE80211_HT_CAP_DSSSCCK40 0x1000
123 /* New link list changes added as of 2.6.27, needed for ath9k */
125 static inline void __list_cut_position(struct list_head *list,
126 struct list_head *head, struct list_head *entry)
128 struct list_head *new_first = entry->next;
129 list->next = head->next;
130 list->next->prev = list;
133 head->next = new_first;
134 new_first->prev = head;
138 * list_cut_position - cut a list into two
139 * @list: a new list to add all removed entries
140 * @head: a list with entries
141 * @entry: an entry within head, could be the head itself
142 * and if so we won't cut the list
144 * This helper moves the initial part of @head, up to and
145 * including @entry, from @head to @list. You should
146 * pass on @entry an element you know is on @head. @list
147 * should be an empty list or a list you do not care about
151 static inline void list_cut_position(struct list_head *list,
152 struct list_head *head, struct list_head *entry)
154 if (list_empty(head))
156 if (list_is_singular(head) &&
157 (head->next != entry && head != entry))
160 INIT_LIST_HEAD(list);
162 __list_cut_position(list, head, entry);
166 /* __list_splice as re-implemented on 2.6.27, we backport it */
167 static inline void __compat_list_splice_new_27(const struct list_head *list,
168 struct list_head *prev,
169 struct list_head *next)
171 struct list_head *first = list->next;
172 struct list_head *last = list->prev;
182 * list_splice_tail - join two lists, each list being a queue
183 * @list: the new list to add.
184 * @head: the place to add it in the first list.
186 static inline void list_splice_tail(struct list_head *list,
187 struct list_head *head)
189 if (!list_empty(list))
190 __compat_list_splice_new_27(list, head->prev, head);
194 * list_splice_tail_init - join two lists and reinitialise the emptied list
195 * @list: the new list to add.
196 * @head: the place to add it in the first list.
198 * Each of the lists is a queue.
199 * The list at @list is reinitialised
201 static inline void list_splice_tail_init(struct list_head *list,
202 struct list_head *head)
204 if (!list_empty(list)) {
205 __compat_list_splice_new_27(list, head->prev, head);
206 INIT_LIST_HEAD(list);
210 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
211 #define mmc_align_data_size LINUX_BACKPORT(mmc_align_data_size)
212 extern unsigned int mmc_align_data_size(struct mmc_card *, unsigned int);
213 #define sdio_align_size LINUX_BACKPORT(sdio_align_size)
214 extern unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz);
215 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) */
217 #define iwe_stream_add_value(info, event, value, ends, iwe, event_len) iwe_stream_add_value(event, value, ends, iwe, event_len)
218 #define iwe_stream_add_point(info, stream, ends, iwe, extra) iwe_stream_add_point(stream, ends, iwe, extra)
219 #define iwe_stream_add_event(info, stream, ends, iwe, event_len) iwe_stream_add_event(stream, ends, iwe, event_len)
221 /* Flags available in struct iw_request_info */
222 #define IW_REQUEST_FLAG_COMPAT 0x0001 /* Compat ioctl call */
224 static inline int iwe_stream_lcp_len(struct iw_request_info *info)
227 if (info->flags & IW_REQUEST_FLAG_COMPAT)
228 return IW_EV_COMPAT_LCP_LEN;
230 return IW_EV_LCP_LEN;
236 * The caller asks to handle a range between offset and offset + size,
237 * but we process a larger range from 0 to offset + size due to lack of
241 static inline void dma_sync_single_range_for_cpu(struct device *dev,
242 dma_addr_t handle, unsigned long offset, size_t size,
243 enum dma_data_direction dir)
245 dma_sync_single_for_cpu(dev, handle, offset + size, dir);
248 static inline void dma_sync_single_range_for_device(struct device *dev,
249 dma_addr_t handle, unsigned long offset, size_t size,
250 enum dma_data_direction dir)
252 dma_sync_single_for_device(dev, handle, offset + size, dir);
257 #define debugfs_remove_recursive LINUX_BACKPORT(debugfs_remove_recursive)
259 #if defined(CONFIG_DEBUG_FS)
260 void debugfs_remove_recursive(struct dentry *dentry);
262 static inline void debugfs_remove_recursive(struct dentry *dentry)
266 #define device_create(cls, parent, devt, drvdata, fmt, ...) \
268 struct device *_dev; \
269 _dev = (device_create)(cls, parent, devt, fmt, __VA_ARGS__); \
270 dev_set_drvdata(_dev, drvdata); \
274 #define dev_name(dev) dev_name((struct device *)dev)
276 static inline void ethtool_cmd_speed_set(struct ethtool_cmd *ep,
279 ep->speed = (__u16)speed;
282 static inline __u32 ethtool_cmd_speed(const struct ethtool_cmd *ep)
288 * lower_32_bits - return bits 0-31 of a number
289 * @n: the number we're accessing
291 #define lower_32_bits(n) ((u32)(n))
293 #define netif_wake_subqueue netif_start_subqueue
295 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)) */
297 #endif /* LINUX_26_27_COMPAT_H */