1 #ifndef LINUX_26_28_COMPAT_H
2 #define LINUX_26_28_COMPAT_H
4 #include <linux/version.h>
6 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28))
8 #include <linux/skbuff.h>
9 #include <linux/if_ether.h>
10 #include <linux/usb.h>
11 #include <linux/types.h>
12 #include <linux/types.h>
13 #include <linux/cpumask.h>
16 #define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
19 #include <linux/pci.h>
21 typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } compat_cpumask_t;
23 #if defined(CONFIG_X86) || defined(CONFIG_X86_64) || defined(CONFIG_PPC)
25 * CONFIG_PHYS_ADDR_T_64BIT was added as new to all architectures
26 * as of 2.6.28 but x86 and ppc had it already. x86 only got phys_addr_t
27 * as of 2.6.25 but then is backported in compat-2.6.25.h
30 #if defined(CONFIG_64BIT) || defined(CONFIG_X86_PAE) || defned(CONFIG_PPC64) || defined(CONFIG_PHYS_64BIT)
31 #define CONFIG_PHYS_ADDR_T_64BIT 1
32 typedef u64 phys_addr_t;
34 typedef u32 phys_addr_t;
37 #endif /* non x86 and ppc */
40 #define WARN_ONCE(condition, format...) ({ \
41 static int __warned; \
42 int __ret_warn_once = !!(condition); \
44 if (unlikely(__ret_warn_once)) \
45 if (WARN(!__warned, format)) \
47 unlikely(__ret_warn_once); \
49 #endif /* From include/asm-generic/bug.h */
51 #define pci_ioremap_bar LINUX_BACKPORT(pci_ioremap_bar)
52 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
55 * skb_queue_is_last - check if skb is the last entry in the queue
59 * Returns true if @skb is the last buffer on the list.
61 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
62 const struct sk_buff *skb)
64 return (skb->next == (struct sk_buff *) list);
68 * skb_queue_next - return the next packet in the queue
70 * @skb: current buffer
72 * Return the next packet in @list after @skb. It is only valid to
73 * call this if skb_queue_is_last() evaluates to false.
75 static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
76 const struct sk_buff *skb)
78 /* This BUG_ON may seem severe, but if we just return then we
79 * are going to dereference garbage.
81 BUG_ON(skb_queue_is_last(list, skb));
86 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
87 * @list: queue to initialize
89 * This initializes only the list and queue length aspects of
90 * an sk_buff_head object. This allows to initialize the list
91 * aspects of an sk_buff_head without reinitializing things like
92 * the spinlock. It can also be used for on-stack sk_buff_head
93 * objects where the spinlock is known to not be used.
95 static inline void __skb_queue_head_init(struct sk_buff_head *list)
97 list->prev = list->next = (struct sk_buff *)list;
101 static inline void __skb_queue_splice(const struct sk_buff_head *list,
102 struct sk_buff *prev,
103 struct sk_buff *next)
105 struct sk_buff *first = list->next;
106 struct sk_buff *last = list->prev;
116 * skb_queue_splice - join two skb lists, this is designed for stacks
117 * @list: the new list to add
118 * @head: the place to add it in the first list
120 static inline void skb_queue_splice(const struct sk_buff_head *list,
121 struct sk_buff_head *head)
123 if (!skb_queue_empty(list)) {
124 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
125 head->qlen += list->qlen;
130 * skb_queue_splice - join two skb lists and reinitialise the emptied list
131 * @list: the new list to add
132 * @head: the place to add it in the first list
134 * The list at @list is reinitialised
136 static inline void skb_queue_splice_init(struct sk_buff_head *list,
137 struct sk_buff_head *head)
139 if (!skb_queue_empty(list)) {
140 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
141 head->qlen += list->qlen;
142 __skb_queue_head_init(list);
147 * skb_queue_splice_tail - join two skb lists and reinitialise the emptied list
148 * @list: the new list to add
149 * @head: the place to add it in the first list
151 * Each of the lists is a queue.
152 * The list at @list is reinitialised
154 static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
155 struct sk_buff_head *head)
157 if (!skb_queue_empty(list)) {
158 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
159 head->qlen += list->qlen;
160 __skb_queue_head_init(list);
162 } /* From include/linux/skbuff.h */
165 * skb_queue_splice_tail - join two skb lists, each list being a queue
166 * @list: the new list to add
167 * @head: the place to add it in the first list
169 static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
170 struct sk_buff_head *head)
172 if (!skb_queue_empty(list)) {
173 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
174 head->qlen += list->qlen;
178 #define skb_queue_walk_from(queue, skb) \
179 for (; skb != (struct sk_buff *)(queue); \
182 #ifndef DECLARE_TRACE
184 #define TP_PROTO(args...) args
185 #define TP_ARGS(args...) args
187 #define DECLARE_TRACE(name, proto, args) \
188 static inline void _do_trace_##name(struct tracepoint *tp, proto) \
190 static inline void trace_##name(proto) \
192 static inline int register_trace_##name(void (*probe)(proto)) \
196 static inline int unregister_trace_##name(void (*probe)(proto)) \
201 #define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
202 #define EXPORT_TRACEPOINT_SYMBOL(name)
207 #define round_jiffies_up LINUX_BACKPORT(round_jiffies_up)
208 unsigned long round_jiffies_up(unsigned long j);
210 extern void v2_6_28_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
213 #define wake_up_interruptible_poll(x, m) \
214 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
216 #define n_tty_ioctl_helper LINUX_BACKPORT(n_tty_ioctl_helper)
217 extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
218 unsigned int cmd, unsigned long arg);
220 #define pci_wake_from_d3 LINUX_BACKPORT(pci_wake_from_d3)
221 int pci_wake_from_d3(struct pci_dev *dev, bool enable);
223 #define alloc_workqueue(name, flags, max_active) __create_workqueue(name, flags, max_active)
226 #define pr_fmt(fmt) fmt
229 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)) */
231 #endif /* LINUX_26_28_COMPAT_H */