2 * Copyright 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * Compatibility file for Linux wireless for kernels 2.6.28.
11 #include <linux/compat.h>
12 #include <linux/usb.h>
13 #include <linux/tty.h>
16 /* 2.6.28 compat code goes here */
18 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23))
19 #if defined(CONFIG_USB) || defined(CONFIG_USB_MODULE)
21 * Compat-wireless notes for USB backport stuff:
23 * urb->reject exists on 2.6.27, the poison/unpoison helpers
24 * did not though. The anchor poison does not exist so we cannot use them.
26 * USB anchor poising seems to exist to prevent future driver sumbissions
27 * of usb_anchor_urb() to an anchor marked as poisoned. For older kernels
28 * we cannot use that, so new usb_anchor_urb()s will be anchored. The down
29 * side to this should be submission of URBs will continue being anchored
30 * on an anchor instead of having them being rejected immediately when the
31 * driver realized we needed to stop. For ar9170 we poison URBs upon the
32 * ar9170 mac80211 stop callback(), don't think this should be so bad.
33 * It mean there is period of time in older kernels for which we continue
34 * to anchor new URBs to a known stopped anchor. We have two anchors
40 * usb_poison_urb - reliably kill a transfer and prevent further use of an URB
41 * @urb: pointer to URB describing a previously submitted request,
44 * This routine cancels an in-progress request. It is guaranteed that
45 * upon return all completion handlers will have finished and the URB
46 * will be totally idle and cannot be reused. These features make
47 * this an ideal way to stop I/O in a disconnect() callback.
48 * If the request has not already finished or been unlinked
49 * the completion handler will see urb->status == -ENOENT.
51 * After and while the routine runs, attempts to resubmit the URB will fail
52 * with error -EPERM. Thus even if the URB's completion handler always
53 * tries to resubmit, it will not succeed and the URB will become idle.
55 * This routine may not be used in an interrupt context (such as a bottom
56 * half or a completion handler), or when holding a spinlock, or in other
57 * situations where the caller can't schedule().
59 * This routine should not be called by a driver after its disconnect
60 * method has returned.
62 void usb_poison_urb(struct urb *urb)
65 if (!(urb && urb->dev && urb->ep))
67 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
68 spin_lock_irq(&usb_reject_lock);
71 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
72 spin_unlock_irq(&usb_reject_lock);
75 * XXX: usb_hcd_unlink_urb() needs backporting... this is defined
76 * on usb hcd.c but urb.c gets access to it. That is, older kernels
77 * have usb_hcd_unlink_urb() but its not exported, nor can we
78 * re-implement it exactly. This essentially dequeues the urb from
79 * hw, we need to figure out a way to backport this.
81 //usb_hcd_unlink_urb(urb, -ENOENT);
83 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
85 EXPORT_SYMBOL_GPL(usb_poison_urb);
87 #endif /* CONFIG_USB */
89 #if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE)
91 #include <pcmcia/ds.h>
92 struct pcmcia_cfg_mem {
96 cistpl_cftable_entry_t dflt;
99 * pcmcia_loop_config() - loop over configuration options
100 * @p_dev: the struct pcmcia_device which we need to loop for.
101 * @conf_check: function to call for each configuration option.
102 * It gets passed the struct pcmcia_device, the CIS data
103 * describing the configuration option, and private data
104 * being passed to pcmcia_loop_config()
105 * @priv_data: private data to be passed to the conf_check function.
107 * pcmcia_loop_config() loops over all configuration options, and calls
108 * the driver-specific conf_check() for each one, checking whether
109 * it is a valid one. Returns 0 on success or errorcode otherwise.
111 int pcmcia_loop_config(struct pcmcia_device *p_dev,
112 int (*conf_check) (struct pcmcia_device *p_dev,
113 cistpl_cftable_entry_t *cfg,
114 cistpl_cftable_entry_t *dflt,
119 struct pcmcia_cfg_mem *cfg_mem;
125 cfg_mem = kzalloc(sizeof(struct pcmcia_cfg_mem), GFP_KERNEL);
129 /* get the current Vcc setting */
130 vcc = p_dev->socket->socket.Vcc;
132 tuple = &cfg_mem->tuple;
133 tuple->TupleData = cfg_mem->buf;
134 tuple->TupleDataMax = 255;
135 tuple->TupleOffset = 0;
136 tuple->DesiredTuple = CISTPL_CFTABLE_ENTRY;
137 tuple->Attributes = 0;
139 ret = pcmcia_get_first_tuple(p_dev, tuple);
141 cistpl_cftable_entry_t *cfg = &cfg_mem->parse.cftable_entry;
143 if (pcmcia_get_tuple_data(p_dev, tuple))
146 if (pcmcia_parse_tuple(tuple, &cfg_mem->parse))
150 p_dev->conf.ConfigIndex = cfg->index;
151 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
152 cfg_mem->dflt = *cfg;
154 ret = conf_check(p_dev, cfg, &cfg_mem->dflt, vcc, priv_data);
159 ret = pcmcia_get_next_tuple(p_dev, tuple);
164 EXPORT_SYMBOL_GPL(pcmcia_loop_config);
166 #endif /* CONFIG_PCMCIA */
168 #if defined(CONFIG_USB) || defined(CONFIG_USB_MODULE)
170 void usb_unpoison_urb(struct urb *urb)
172 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
179 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
180 spin_lock_irqsave(&usb_reject_lock, flags);
183 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
184 spin_unlock_irqrestore(&usb_reject_lock, flags);
187 EXPORT_SYMBOL_GPL(usb_unpoison_urb);
192 * usb_poison_anchored_urbs - cease all traffic from an anchor
193 * @anchor: anchor the requests are bound to
195 * this allows all outstanding URBs to be poisoned starting
196 * from the back of the queue. Newly added URBs will also be
199 * This routine should not be called by a driver after its disconnect
200 * method has returned.
202 void usb_poison_anchored_urbs(struct usb_anchor *anchor)
206 spin_lock_irq(&anchor->lock);
207 // anchor->poisoned = 1; /* XXX: Cannot backport */
208 while (!list_empty(&anchor->urb_list)) {
209 victim = list_entry(anchor->urb_list.prev, struct urb,
211 /* we must make sure the URB isn't freed before we kill it*/
213 spin_unlock_irq(&anchor->lock);
214 /* this will unanchor the URB */
215 usb_poison_urb(victim);
217 spin_lock_irq(&anchor->lock);
219 spin_unlock_irq(&anchor->lock);
221 EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
225 * usb_anchor_empty - is an anchor empty
226 * @anchor: the anchor you want to query
228 * returns 1 if the anchor has no urbs associated with it
230 int usb_anchor_empty(struct usb_anchor *anchor)
232 return list_empty(&anchor->urb_list);
235 EXPORT_SYMBOL_GPL(usb_anchor_empty);
236 #endif /* CONFIG_USB */
239 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
242 * Make sure the BAR is actually a memory resource, not an IO resource
244 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
248 return ioremap_nocache(pci_resource_start(pdev, bar),
249 pci_resource_len(pdev, bar));
251 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
253 static unsigned long round_jiffies_common(unsigned long j, int cpu,
257 unsigned long original = j;
260 * We don't want all cpus firing their timers at once hitting the
261 * same lock or cachelines, so we skew each extra cpu with an extra
262 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
264 * The skew is done by adding 3*cpunr, then round, then subtract this
265 * extra offset again.
272 * If the target jiffie is just after a whole second (which can happen
273 * due to delays of the timer irq, long irq off times etc etc) then
274 * we should round down to the whole second, not up. Use 1/4th second
275 * as cutoff for this rounding as an extreme upper bound for this.
276 * But never round down if @force_up is set.
278 if (rem < HZ/4 && !force_up) /* round down */
283 /* now that we have rounded, subtract the extra skew again */
286 if (j <= jiffies) /* rounding ate our timeout entirely; */
292 * round_jiffies_up - function to round jiffies up to a full second
293 * @j: the time in (absolute) jiffies that should be rounded
295 * This is the same as round_jiffies() except that it will never
296 * round down. This is useful for timeouts for which the exact time
297 * of firing does not matter too much, as long as they don't fire too
300 unsigned long round_jiffies_up(unsigned long j)
302 return round_jiffies_common(j, raw_smp_processor_id(), true);
304 EXPORT_SYMBOL_GPL(round_jiffies_up);
306 void v2_6_28_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
309 skb_fill_page_desc(skb, i, page, off, size);
311 skb->data_len += size;
312 skb->truesize += size;
314 EXPORT_SYMBOL_GPL(v2_6_28_skb_add_rx_frag);
316 void tty_write_unlock(struct tty_struct *tty)
318 mutex_unlock(&tty->atomic_write_lock);
319 wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
322 int tty_write_lock(struct tty_struct *tty, int ndelay)
324 if (!mutex_trylock(&tty->atomic_write_lock)) {
327 if (mutex_lock_interruptible(&tty->atomic_write_lock))
334 * send_prio_char - send priority character
336 * Send a high priority character to the tty even if stopped
338 * Locking: none for xchar method, write ordering for write method.
341 static int send_prio_char(struct tty_struct *tty, char ch)
343 int was_stopped = tty->stopped;
345 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
346 if (tty->ops->send_xchar) {
347 tty->ops->send_xchar(tty, ch);
349 if (tty->driver->send_xchar) {
350 tty->driver->send_xchar(tty, ch);
355 if (tty_write_lock(tty, 0) < 0)
360 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
361 tty->ops->write(tty, &ch, 1);
363 tty->driver->write(tty, &ch, 1);
367 tty_write_unlock(tty);
371 int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
372 unsigned int cmd, unsigned long arg)
374 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
381 retval = tty_check_change(tty);
386 if (!tty->flow_stopped) {
387 tty->flow_stopped = 1;
392 if (tty->flow_stopped) {
393 tty->flow_stopped = 0;
398 if (STOP_CHAR(tty) != __DISABLED_CHAR)
399 return send_prio_char(tty, STOP_CHAR(tty));
402 if (START_CHAR(tty) != __DISABLED_CHAR)
403 return send_prio_char(tty, START_CHAR(tty));
410 return tty_perform_flush(tty, arg);
415 if (tty->driver->type != TTY_DRIVER_TYPE_PTY ||
416 tty->driver->subtype != PTY_TYPE_MASTER)
418 if (get_user(pktmode, (int __user *) arg))
420 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
421 spin_lock_irqsave(&tty->ctrl_lock, flags);
426 tty->link->ctrl_status = 0;
430 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
431 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
436 /* Try the mode commands */
437 return tty_mode_ioctl(tty, file, cmd, arg);
440 EXPORT_SYMBOL_GPL(n_tty_ioctl_helper);
443 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
444 * @dev: PCI device to prepare
445 * @enable: True to enable wake-up event generation; false to disable
447 * Many drivers want the device to wake up the system from D3_hot or D3_cold
448 * and this function allows them to set that up cleanly - pci_enable_wake()
449 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
450 * ordering constraints.
452 * This function only returns error code if the device is not capable of
453 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
454 * enable wake-up power for it.
456 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
458 return pci_pme_capable(dev, PCI_D3cold) ?
459 pci_enable_wake(dev, PCI_D3cold, enable) :
460 pci_enable_wake(dev, PCI_D3hot, enable);
462 EXPORT_SYMBOL_GPL(pci_wake_from_d3);