2 * Copyright 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * Compatibility file for Linux wireless for kernels 2.6.28.
11 #include <linux/compat.h>
13 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28))
15 #include <linux/usb.h>
16 #include <linux/tty.h>
19 /* 2.6.28 compat code goes here */
21 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23))
22 #if defined(CONFIG_USB) || defined(CONFIG_USB_MODULE)
24 * Compat-wireless notes for USB backport stuff:
26 * urb->reject exists on 2.6.27, the poison/unpoison helpers
27 * did not though. The anchor poison does not exist so we cannot use them.
29 * USB anchor poising seems to exist to prevent future driver sumbissions
30 * of usb_anchor_urb() to an anchor marked as poisoned. For older kernels
31 * we cannot use that, so new usb_anchor_urb()s will be anchored. The down
32 * side to this should be submission of URBs will continue being anchored
33 * on an anchor instead of having them being rejected immediately when the
34 * driver realized we needed to stop. For ar9170 we poison URBs upon the
35 * ar9170 mac80211 stop callback(), don't think this should be so bad.
36 * It mean there is period of time in older kernels for which we continue
37 * to anchor new URBs to a known stopped anchor. We have two anchors
43 * usb_poison_urb - reliably kill a transfer and prevent further use of an URB
44 * @urb: pointer to URB describing a previously submitted request,
47 * This routine cancels an in-progress request. It is guaranteed that
48 * upon return all completion handlers will have finished and the URB
49 * will be totally idle and cannot be reused. These features make
50 * this an ideal way to stop I/O in a disconnect() callback.
51 * If the request has not already finished or been unlinked
52 * the completion handler will see urb->status == -ENOENT.
54 * After and while the routine runs, attempts to resubmit the URB will fail
55 * with error -EPERM. Thus even if the URB's completion handler always
56 * tries to resubmit, it will not succeed and the URB will become idle.
58 * This routine may not be used in an interrupt context (such as a bottom
59 * half or a completion handler), or when holding a spinlock, or in other
60 * situations where the caller can't schedule().
62 * This routine should not be called by a driver after its disconnect
63 * method has returned.
65 void usb_poison_urb(struct urb *urb)
68 if (!(urb && urb->dev && urb->ep))
70 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
71 spin_lock_irq(&usb_reject_lock);
74 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
75 spin_unlock_irq(&usb_reject_lock);
78 * XXX: usb_hcd_unlink_urb() needs backporting... this is defined
79 * on usb hcd.c but urb.c gets access to it. That is, older kernels
80 * have usb_hcd_unlink_urb() but its not exported, nor can we
81 * re-implement it exactly. This essentially dequeues the urb from
82 * hw, we need to figure out a way to backport this.
84 //usb_hcd_unlink_urb(urb, -ENOENT);
86 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
88 EXPORT_SYMBOL_GPL(usb_poison_urb);
90 #endif /* CONFIG_USB */
92 #if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE)
94 #include <pcmcia/ds.h>
95 struct pcmcia_cfg_mem {
99 cistpl_cftable_entry_t dflt;
102 * pcmcia_loop_config() - loop over configuration options
103 * @p_dev: the struct pcmcia_device which we need to loop for.
104 * @conf_check: function to call for each configuration option.
105 * It gets passed the struct pcmcia_device, the CIS data
106 * describing the configuration option, and private data
107 * being passed to pcmcia_loop_config()
108 * @priv_data: private data to be passed to the conf_check function.
110 * pcmcia_loop_config() loops over all configuration options, and calls
111 * the driver-specific conf_check() for each one, checking whether
112 * it is a valid one. Returns 0 on success or errorcode otherwise.
114 int pcmcia_loop_config(struct pcmcia_device *p_dev,
115 int (*conf_check) (struct pcmcia_device *p_dev,
116 cistpl_cftable_entry_t *cfg,
117 cistpl_cftable_entry_t *dflt,
122 struct pcmcia_cfg_mem *cfg_mem;
128 cfg_mem = kzalloc(sizeof(struct pcmcia_cfg_mem), GFP_KERNEL);
132 /* get the current Vcc setting */
133 vcc = p_dev->socket->socket.Vcc;
135 tuple = &cfg_mem->tuple;
136 tuple->TupleData = cfg_mem->buf;
137 tuple->TupleDataMax = 255;
138 tuple->TupleOffset = 0;
139 tuple->DesiredTuple = CISTPL_CFTABLE_ENTRY;
140 tuple->Attributes = 0;
142 ret = pcmcia_get_first_tuple(p_dev, tuple);
144 cistpl_cftable_entry_t *cfg = &cfg_mem->parse.cftable_entry;
146 if (pcmcia_get_tuple_data(p_dev, tuple))
149 if (pcmcia_parse_tuple(tuple, &cfg_mem->parse))
153 p_dev->conf.ConfigIndex = cfg->index;
154 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
155 cfg_mem->dflt = *cfg;
157 ret = conf_check(p_dev, cfg, &cfg_mem->dflt, vcc, priv_data);
162 ret = pcmcia_get_next_tuple(p_dev, tuple);
167 EXPORT_SYMBOL(pcmcia_loop_config);
169 #endif /* CONFIG_PCMCIA */
171 #if defined(CONFIG_USB) || defined(CONFIG_USB_MODULE)
173 void usb_unpoison_urb(struct urb *urb)
175 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
182 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
183 spin_lock_irqsave(&usb_reject_lock, flags);
186 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
187 spin_unlock_irqrestore(&usb_reject_lock, flags);
190 EXPORT_SYMBOL_GPL(usb_unpoison_urb);
195 * usb_poison_anchored_urbs - cease all traffic from an anchor
196 * @anchor: anchor the requests are bound to
198 * this allows all outstanding URBs to be poisoned starting
199 * from the back of the queue. Newly added URBs will also be
202 * This routine should not be called by a driver after its disconnect
203 * method has returned.
205 void usb_poison_anchored_urbs(struct usb_anchor *anchor)
209 spin_lock_irq(&anchor->lock);
210 // anchor->poisoned = 1; /* XXX: Cannot backport */
211 while (!list_empty(&anchor->urb_list)) {
212 victim = list_entry(anchor->urb_list.prev, struct urb,
214 /* we must make sure the URB isn't freed before we kill it*/
216 spin_unlock_irq(&anchor->lock);
217 /* this will unanchor the URB */
218 usb_poison_urb(victim);
220 spin_lock_irq(&anchor->lock);
222 spin_unlock_irq(&anchor->lock);
224 EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
228 * usb_get_from_anchor - get an anchor's oldest urb
229 * @anchor: the anchor whose urb you want
231 * this will take the oldest urb from an anchor,
232 * unanchor and return it
234 struct urb *usb_get_from_anchor(struct usb_anchor *anchor)
239 spin_lock_irqsave(&anchor->lock, flags);
240 if (!list_empty(&anchor->urb_list)) {
241 victim = list_entry(anchor->urb_list.next, struct urb,
244 spin_unlock_irqrestore(&anchor->lock, flags);
245 usb_unanchor_urb(victim);
247 spin_unlock_irqrestore(&anchor->lock, flags);
254 EXPORT_SYMBOL_GPL(usb_get_from_anchor);
257 * usb_scuttle_anchored_urbs - unanchor all an anchor's urbs
258 * @anchor: the anchor whose urbs you want to unanchor
260 * use this to get rid of all an anchor's urbs
262 void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
267 spin_lock_irqsave(&anchor->lock, flags);
268 while (!list_empty(&anchor->urb_list)) {
269 victim = list_entry(anchor->urb_list.prev, struct urb,
272 spin_unlock_irqrestore(&anchor->lock, flags);
273 /* this may free the URB */
274 usb_unanchor_urb(victim);
276 spin_lock_irqsave(&anchor->lock, flags);
278 spin_unlock_irqrestore(&anchor->lock, flags);
281 EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs);
284 * usb_anchor_empty - is an anchor empty
285 * @anchor: the anchor you want to query
287 * returns 1 if the anchor has no urbs associated with it
289 int usb_anchor_empty(struct usb_anchor *anchor)
291 return list_empty(&anchor->urb_list);
294 EXPORT_SYMBOL_GPL(usb_anchor_empty);
295 #endif /* CONFIG_USB */
298 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
301 * Make sure the BAR is actually a memory resource, not an IO resource
303 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
307 return ioremap_nocache(pci_resource_start(pdev, bar),
308 pci_resource_len(pdev, bar));
310 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
312 static unsigned long round_jiffies_common(unsigned long j, int cpu,
316 unsigned long original = j;
319 * We don't want all cpus firing their timers at once hitting the
320 * same lock or cachelines, so we skew each extra cpu with an extra
321 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
323 * The skew is done by adding 3*cpunr, then round, then subtract this
324 * extra offset again.
331 * If the target jiffie is just after a whole second (which can happen
332 * due to delays of the timer irq, long irq off times etc etc) then
333 * we should round down to the whole second, not up. Use 1/4th second
334 * as cutoff for this rounding as an extreme upper bound for this.
335 * But never round down if @force_up is set.
337 if (rem < HZ/4 && !force_up) /* round down */
342 /* now that we have rounded, subtract the extra skew again */
345 if (j <= jiffies) /* rounding ate our timeout entirely; */
351 * round_jiffies_up - function to round jiffies up to a full second
352 * @j: the time in (absolute) jiffies that should be rounded
354 * This is the same as round_jiffies() except that it will never
355 * round down. This is useful for timeouts for which the exact time
356 * of firing does not matter too much, as long as they don't fire too
359 unsigned long round_jiffies_up(unsigned long j)
361 return round_jiffies_common(j, raw_smp_processor_id(), true);
363 EXPORT_SYMBOL_GPL(round_jiffies_up);
365 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
368 skb_fill_page_desc(skb, i, page, off, size);
370 skb->data_len += size;
371 skb->truesize += size;
373 EXPORT_SYMBOL(skb_add_rx_frag);
375 void tty_write_unlock(struct tty_struct *tty)
377 mutex_unlock(&tty->atomic_write_lock);
378 wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
381 int tty_write_lock(struct tty_struct *tty, int ndelay)
383 if (!mutex_trylock(&tty->atomic_write_lock)) {
386 if (mutex_lock_interruptible(&tty->atomic_write_lock))
393 * send_prio_char - send priority character
395 * Send a high priority character to the tty even if stopped
397 * Locking: none for xchar method, write ordering for write method.
400 static int send_prio_char(struct tty_struct *tty, char ch)
402 int was_stopped = tty->stopped;
404 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
405 if (tty->ops->send_xchar) {
406 tty->ops->send_xchar(tty, ch);
408 if (tty->driver->send_xchar) {
409 tty->driver->send_xchar(tty, ch);
414 if (tty_write_lock(tty, 0) < 0)
419 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
420 tty->ops->write(tty, &ch, 1);
422 tty->driver->write(tty, &ch, 1);
426 tty_write_unlock(tty);
430 int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
431 unsigned int cmd, unsigned long arg)
433 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
440 retval = tty_check_change(tty);
445 if (!tty->flow_stopped) {
446 tty->flow_stopped = 1;
451 if (tty->flow_stopped) {
452 tty->flow_stopped = 0;
457 if (STOP_CHAR(tty) != __DISABLED_CHAR)
458 return send_prio_char(tty, STOP_CHAR(tty));
461 if (START_CHAR(tty) != __DISABLED_CHAR)
462 return send_prio_char(tty, START_CHAR(tty));
469 return tty_perform_flush(tty, arg);
474 if (tty->driver->type != TTY_DRIVER_TYPE_PTY ||
475 tty->driver->subtype != PTY_TYPE_MASTER)
477 if (get_user(pktmode, (int __user *) arg))
479 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
480 spin_lock_irqsave(&tty->ctrl_lock, flags);
485 tty->link->ctrl_status = 0;
489 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
490 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
495 /* Try the mode commands */
496 return tty_mode_ioctl(tty, file, cmd, arg);
499 EXPORT_SYMBOL(n_tty_ioctl_helper);
501 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) */