1 #ifndef LINUX_26_36_COMPAT_H
2 #define LINUX_26_36_COMPAT_H
4 #include <linux/version.h>
6 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36))
8 #include <linux/pm_qos_params.h>
9 #include <linux/smp_lock.h>
11 #ifndef kparam_block_sysfs_write
12 #define kparam_block_sysfs_write(a)
14 #ifndef kparam_unblock_sysfs_write
15 #define kparam_unblock_sysfs_write(a)
18 /* mask va_format as RHEL6 backports this */
19 #define va_format compat_va_format
26 #define device_rename(dev, new_name) device_rename(dev, (char *)new_name)
28 struct pm_qos_request_list {
33 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
35 #define pm_qos_add_request(_req, _class, _value) do { \
36 (_req)->request = #_req; \
37 (_req)->qos = _class; \
38 pm_qos_add_requirement((_class), (_req)->request, (_value)); \
41 #define pm_qos_update_request(_req, _value) \
42 pm_qos_update_requirement((_req)->qos, (_req)->request, (_value))
44 #define pm_qos_remove_request(_req) \
45 pm_qos_remove_requirement((_req)->qos, (_req)->request)
49 #define pm_qos_add_request(_req, _class, _value) do { \
50 (_req)->request = pm_qos_add_request((_class), (_value)); \
53 #define pm_qos_update_request(_req, _value) \
54 pm_qos_update_request((_req)->request, (_value))
56 #define pm_qos_remove_request(_req) \
57 pm_qos_remove_request((_req)->request)
61 #ifdef CONFIG_COMPAT_NO_PRINTK_NEEDED
63 * Dummy printk for disabled debugging statements to use whilst maintaining
64 * gcc's format and side-effect checking.
66 static inline __attribute__ ((format (printf, 1, 2)))
67 int no_printk(const char *s, ...) { return 0; }
68 #endif /* CONFIG_COMPAT_NO_PRINTK_NEEDED */
70 #ifndef alloc_workqueue
71 #define alloc_workqueue(name, flags, max_active) __create_workqueue(name, flags, max_active, 0)
74 #define EXTPROC 0200000
75 #define TIOCPKT_IOCTL 64
77 static inline void tty_lock(void) __acquires(kernel_lock)
79 #ifdef CONFIG_LOCK_KERNEL
80 /* kernel_locked is 1 for !CONFIG_LOCK_KERNEL */
81 WARN_ON(kernel_locked());
85 static inline void tty_unlock(void) __releases(kernel_lock)
89 #define tty_locked() (kernel_locked())
91 #define usleep_range(_min, _max) msleep((_max) / 1000)
95 static inline void pm_wakeup_event(struct device *dev, unsigned int msec) {}
97 static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
102 #define skb_tx_timestamp LINUX_BACKPORT(skb_tx_timestamp)
103 static inline void skb_tx_timestamp(struct sk_buff *skb)
108 * System-wide workqueues which are always present.
110 * system_wq is the one used by schedule[_delayed]_work[_on]().
111 * Multi-CPU multi-threaded. There are users which expect relatively
112 * short queue flush time. Don't queue works which can run for too
115 * system_long_wq is similar to system_wq but may host long running
116 * works. Queue flushing might take relatively long.
118 * system_nrt_wq is non-reentrant and guarantees that any given work
119 * item is never executed in parallel by multiple CPUs. Queue
120 * flushing might take relatively long.
122 #define system_wq LINUX_BACKPORT(system_wq)
123 extern struct workqueue_struct *system_wq;
124 #define system_long_wq LINUX_BACKPORT(system_long_wq)
125 extern struct workqueue_struct *system_long_wq;
126 #define system_nrt_wq LINUX_BACKPORT(system_nrt_wq)
127 extern struct workqueue_struct *system_nrt_wq;
129 int backport_system_workqueue_create(void);
130 void backport_system_workqueue_destroy(void);
132 #define schedule_work LINUX_BACKPORT(schedule_work)
133 int schedule_work(struct work_struct *work);
134 #define schedule_work_on LINUX_BACKPORT(schedule_work_on)
135 int schedule_work_on(int cpu, struct work_struct *work);
136 #define schedule_delayed_work LINUX_BACKPORT(schedule_delayed_work)
137 int schedule_delayed_work(struct delayed_work *dwork,
138 unsigned long delay);
139 #define schedule_delayed_work_on LINUX_BACKPORT(schedule_delayed_work_on)
140 int schedule_delayed_work_on(int cpu,
141 struct delayed_work *dwork,
142 unsigned long delay);
143 #define flush_scheduled_work LINUX_BACKPORT(flush_scheduled_work)
144 void flush_scheduled_work(void);
146 #ifndef CONFIG_COMPAT_IS_WORK_BUSY
148 /* bit mask for work_busy() return values */
149 WORK_BUSY_PENDING = 1 << 0,
150 WORK_BUSY_RUNNING = 1 << 1,
154 #define work_busy LINUX_BACKPORT(work_busy)
155 extern unsigned int work_busy(struct work_struct *work);
157 #define br_port_exists(dev) (dev->br_port)
161 static inline int backport_system_workqueue_create(void)
166 static inline void backport_system_workqueue_destroy(void)
171 * This is not part of The 2.6.37 kernel yet but we
172 * we use it to optimize the backport code we
173 * need to implement. Instead of using ifdefs
174 * to check what version of the check we use
175 * we just replace all checks on current code
176 * with this. I'll submit this upstream too, that
177 * way all we'd have to do is to implement this
178 * for older kernels, then we would not have to
179 * edit the upstrema code for backport efforts.
181 #define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT)
187 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)) */
189 #endif /* LINUX_26_36_COMPAT_H */