]> git.openfabrics.org - ~shefty/rdma-dev.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 29 Jul 2009 19:28:49 +0000 (12:28 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 29 Jul 2009 19:28:49 +0000 (12:28 -0700)
* git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6:
  staging: udlfb: Add vmalloc.h include
  staging: remove aten2011 driver
  Staging: android: lowmemorykiller.c: fix it for "oom: move oom_adj value from task_struct to mm_struct"
  Staging: serqt_usb2: fix memory leak in error case
  Staging: serqt_usb2: add missing calls to tty_kref_put()

41 files changed:
drivers/char/n_tty.c
drivers/char/pty.c
drivers/char/tty_buffer.c
drivers/hwmon/asus_atk0110.c
drivers/hwmon/smsc47m1.c
drivers/i2c/chips/tsl2550.c
drivers/isdn/mISDN/l1oip_core.c
drivers/usb/core/config.c
drivers/usb/host/ehci-orion.c
drivers/usb/host/ohci-omap.c
drivers/usb/host/xhci-dbg.c
drivers/usb/host/xhci-hcd.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.h
drivers/usb/misc/Kconfig
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_gadget_ep0.c
drivers/usb/musb/musb_regs.h
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio.h
drivers/usb/serial/mos7840.c
drivers/usb/serial/option.c
drivers/usb/storage/transport.c
fs/btrfs/async-thread.c
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/free-space-cache.c
fs/btrfs/free-space-cache.h
fs/btrfs/inode.c
fs/btrfs/print-tree.c
fs/btrfs/relocation.c
fs/btrfs/transaction.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/ecryptfs/keystore.c
include/linux/tty.h

index ff47907ff1bf9f9ae8ddc3e073c06ae4f3a615df..973be2f441951ed0e68d658c1192c94524f33aff 100644 (file)
@@ -1583,6 +1583,7 @@ static int n_tty_open(struct tty_struct *tty)
 
 static inline int input_available_p(struct tty_struct *tty, int amt)
 {
+       tty_flush_to_ldisc(tty);
        if (tty->icanon) {
                if (tty->canon_data)
                        return 1;
index 3850a68f265ae56ef8fd601d1ca36bf12b339f31..6e6942c45f5b6cb76d6c1aaf91ed96fee9185ae7 100644 (file)
@@ -52,7 +52,6 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
                return;
        tty->link->packet = 0;
        set_bit(TTY_OTHER_CLOSED, &tty->link->flags);
-       tty_flip_buffer_push(tty->link);
        wake_up_interruptible(&tty->link->read_wait);
        wake_up_interruptible(&tty->link->write_wait);
        if (tty->driver->subtype == PTY_TYPE_MASTER) {
@@ -208,7 +207,6 @@ static int pty_open(struct tty_struct *tty, struct file *filp)
        clear_bit(TTY_OTHER_CLOSED, &tty->link->flags);
        set_bit(TTY_THROTTLED, &tty->flags);
        retval = 0;
-       tty->low_latency = 1;
 out:
        return retval;
 }
index 810ee25d66a48fa5042c84b7f4cf47f0a870ef4e..3108991c5c8b60f312742ca4eaeb37e4c812ad4c 100644 (file)
@@ -461,6 +461,19 @@ static void flush_to_ldisc(struct work_struct *work)
        tty_ldisc_deref(disc);
 }
 
+/**
+ *     tty_flush_to_ldisc
+ *     @tty: tty to push
+ *
+ *     Push the terminal flip buffers to the line discipline.
+ *
+ *     Must not be called from IRQ context.
+ */
+void tty_flush_to_ldisc(struct tty_struct *tty)
+{
+       flush_to_ldisc(&tty->buf.work.work);
+}
+
 /**
  *     tty_flip_buffer_push    -       terminal
  *     @tty: tty to push
index bff0103610c17ada5bf070cb49a5f95871967678..fe4fa29c9219b9884e2e55065728920240a55e14 100644 (file)
@@ -593,7 +593,11 @@ static int atk_add_sensor(struct atk_data *data, union acpi_object *obj)
        sensor->data = data;
        sensor->id = flags->integer.value;
        sensor->limit1 = limit1->integer.value;
-       sensor->limit2 = limit2->integer.value;
+       if (data->old_interface)
+               sensor->limit2 = limit2->integer.value;
+       else
+               /* The upper limit is expressed as delta from lower limit */
+               sensor->limit2 = sensor->limit1 + limit2->integer.value;
 
        snprintf(sensor->input_attr_name, ATTR_NAME_SIZE,
                        "%s%d_input", base_name, start + *num);
index a92dbb97ee999255e1ad3d79da106894e83282ef..ba75bfcf14ceb9a0f4d550091d6804e6bf2da081 100644 (file)
@@ -86,6 +86,7 @@ superio_exit(void)
 #define SUPERIO_REG_ACT                0x30
 #define SUPERIO_REG_BASE       0x60
 #define SUPERIO_REG_DEVID      0x20
+#define SUPERIO_REG_DEVREV     0x21
 
 /* Logical device registers */
 
@@ -429,6 +430,9 @@ static int __init smsc47m1_find(unsigned short *addr,
         * The LPC47M292 (device id 0x6B) is somewhat compatible, but it
         * supports a 3rd fan, and the pin configuration registers are
         * unfortunately different.
+        * The LPC47M233 has the same device id (0x6B) but is not compatible.
+        * We check the high bit of the device revision register to
+        * differentiate them.
         */
        switch (val) {
        case 0x51:
@@ -448,6 +452,13 @@ static int __init smsc47m1_find(unsigned short *addr,
                sio_data->type = smsc47m1;
                break;
        case 0x6B:
+               if (superio_inb(SUPERIO_REG_DEVREV) & 0x80) {
+                       pr_debug(DRVNAME ": "
+                                "Found SMSC LPC47M233, unsupported\n");
+                       superio_exit();
+                       return -ENODEV;
+               }
+
                pr_info(DRVNAME ": Found SMSC LPC47M292\n");
                sio_data->type = smsc47m2;
                break;
index 1a9cc135219f4a5ea613c8da9819676cf19da0f4..b96f3025e588245fc498fcaddb78e706bfe03c6e 100644 (file)
@@ -27,7 +27,7 @@
 #include <linux/delay.h>
 
 #define TSL2550_DRV_NAME       "tsl2550"
-#define DRIVER_VERSION         "1.1.1"
+#define DRIVER_VERSION         "1.1.2"
 
 /*
  * Defines
@@ -189,13 +189,16 @@ static int tsl2550_calculate_lux(u8 ch0, u8 ch1)
        u8 r = 128;
 
        /* Avoid division by 0 and count 1 cannot be greater than count 0 */
-       if (c0 && (c1 <= c0))
-               r = c1 * 128 / c0;
+       if (c1 <= c0)
+               if (c0) {
+                       r = c1 * 128 / c0;
+
+                       /* Calculate LUX */
+                       lux = ((c0 - c1) * ratio_lut[r]) / 256;
+               } else
+                       lux = 0;
        else
-               return -1;
-
-       /* Calculate LUX */
-       lux = ((c0 - c1) * ratio_lut[r]) / 256;
+               return -EAGAIN;
 
        /* LUX range check */
        return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux;
index 990e6a7e6674dce0c866083fc576ce8acb1d9e52..c3b661a666cbb97478033040bb2f6bf245129208 100644 (file)
@@ -731,10 +731,10 @@ l1oip_socket_thread(void *data)
        while (!signal_pending(current)) {
                struct kvec iov = {
                        .iov_base = recvbuf,
-                       .iov_len = sizeof(recvbuf),
+                       .iov_len = recvbuf_size,
                };
                recvlen = kernel_recvmsg(socket, &msg, &iov, 1,
-                                        sizeof(recvbuf), 0);
+                                        recvbuf_size, 0);
                if (recvlen > 0) {
                        l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen);
                } else {
index 24dfb33f90cb0fbc2268572ee0ed90ecc15efbb5..a16c538d0132958ab330b6993267663d45d4c4ef 100644 (file)
@@ -80,38 +80,18 @@ static int usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
        int max_tx;
        int i;
 
-       /* Allocate space for the SS endpoint companion descriptor */
-       ep->ss_ep_comp = kzalloc(sizeof(struct usb_host_ss_ep_comp),
-                       GFP_KERNEL);
-       if (!ep->ss_ep_comp)
-               return -ENOMEM;
        desc = (struct usb_ss_ep_comp_descriptor *) buffer;
        if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) {
                dev_warn(ddev, "No SuperSpeed endpoint companion for config %d "
                                " interface %d altsetting %d ep %d: "
                                "using minimum values\n",
                                cfgno, inum, asnum, ep->desc.bEndpointAddress);
-               ep->ss_ep_comp->desc.bLength = USB_DT_SS_EP_COMP_SIZE;
-               ep->ss_ep_comp->desc.bDescriptorType = USB_DT_SS_ENDPOINT_COMP;
-               ep->ss_ep_comp->desc.bMaxBurst = 0;
-               /*
-                * Leave bmAttributes as zero, which will mean no streams for
-                * bulk, and isoc won't support multiple bursts of packets.
-                * With bursts of only one packet, and a Mult of 1, the max
-                * amount of data moved per endpoint service interval is one
-                * packet.
-                */
-               if (usb_endpoint_xfer_isoc(&ep->desc) ||
-                               usb_endpoint_xfer_int(&ep->desc))
-                       ep->ss_ep_comp->desc.wBytesPerInterval =
-                               ep->desc.wMaxPacketSize;
                /*
                 * The next descriptor is for an Endpoint or Interface,
                 * no extra descriptors to copy into the companion structure,
                 * and we didn't eat up any of the buffer.
                 */
-               retval = 0;
-               goto valid;
+               return 0;
        }
        memcpy(&ep->ss_ep_comp->desc, desc, USB_DT_SS_EP_COMP_SIZE);
        desc = &ep->ss_ep_comp->desc;
@@ -320,6 +300,28 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
                buffer += i;
                size -= i;
 
+               /* Allocate space for the SS endpoint companion descriptor */
+               endpoint->ss_ep_comp = kzalloc(sizeof(struct usb_host_ss_ep_comp),
+                               GFP_KERNEL);
+               if (!endpoint->ss_ep_comp)
+                       return -ENOMEM;
+
+               /* Fill in some default values (may be overwritten later) */
+               endpoint->ss_ep_comp->desc.bLength = USB_DT_SS_EP_COMP_SIZE;
+               endpoint->ss_ep_comp->desc.bDescriptorType = USB_DT_SS_ENDPOINT_COMP;
+               endpoint->ss_ep_comp->desc.bMaxBurst = 0;
+               /*
+                * Leave bmAttributes as zero, which will mean no streams for
+                * bulk, and isoc won't support multiple bursts of packets.
+                * With bursts of only one packet, and a Mult of 1, the max
+                * amount of data moved per endpoint service interval is one
+                * packet.
+                */
+               if (usb_endpoint_xfer_isoc(&endpoint->desc) ||
+                               usb_endpoint_xfer_int(&endpoint->desc))
+                       endpoint->ss_ep_comp->desc.wBytesPerInterval =
+                               endpoint->desc.wMaxPacketSize;
+
                if (size > 0) {
                        retval = usb_parse_ss_endpoint_companion(ddev, cfgno,
                                        inum, asnum, endpoint, num_ep, buffer,
@@ -329,6 +331,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
                                retval = buffer - buffer0;
                        }
                } else {
+                       dev_warn(ddev, "config %d interface %d altsetting %d "
+                               "endpoint 0x%X has no "
+                               "SuperSpeed companion descriptor\n",
+                               cfgno, inum, asnum, d->bEndpointAddress);
                        retval = buffer - buffer0;
                }
        } else {
index dc2ac613a9d1c34e2942ff55bf639fcb9d0be29b..1d283e1b2b8de543e56f4e695e333383ee5d116d 100644 (file)
@@ -105,6 +105,7 @@ static int ehci_orion_setup(struct usb_hcd *hcd)
        struct ehci_hcd *ehci = hcd_to_ehci(hcd);
        int retval;
 
+       ehci_reset(ehci);
        retval = ehci_halt(ehci);
        if (retval)
                return retval;
@@ -118,7 +119,6 @@ static int ehci_orion_setup(struct usb_hcd *hcd)
 
        hcd->has_tt = 1;
 
-       ehci_reset(ehci);
        ehci_port_power(ehci, 0);
 
        return retval;
index f3aaba35e912fba4e021e3a6fcf7dd1c22d1153f..83cbecd2a1ed7177029d8f52b7a512b959f9e2b1 100644 (file)
@@ -282,6 +282,7 @@ static int ohci_omap_init(struct usb_hcd *hcd)
 static void ohci_omap_stop(struct usb_hcd *hcd)
 {
        dev_dbg(hcd->self.controller, "stopping USB Controller\n");
+       ohci_stop(hcd);
        omap_ohci_clock_power(0);
 }
 
index 2501c571f855263f21200265888d58602deac86f..705e34324156f069ce4d56f7e596732bbc2fcc93 100644 (file)
@@ -173,6 +173,7 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int
 {
        void *addr;
        u32 temp;
+       u64 temp_64;
 
        addr = &ir_set->irq_pending;
        temp = xhci_readl(xhci, addr);
@@ -200,25 +201,15 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int
                xhci_dbg(xhci, "  WARN: %p: ir_set.rsvd = 0x%x\n",
                                addr, (unsigned int)temp);
 
-       addr = &ir_set->erst_base[0];
-       temp = xhci_readl(xhci, addr);
-       xhci_dbg(xhci, "  %p: ir_set.erst_base[0] = 0x%x\n",
-                       addr, (unsigned int) temp);
-
-       addr = &ir_set->erst_base[1];
-       temp = xhci_readl(xhci, addr);
-       xhci_dbg(xhci, "  %p: ir_set.erst_base[1] = 0x%x\n",
-                       addr, (unsigned int) temp);
+       addr = &ir_set->erst_base;
+       temp_64 = xhci_read_64(xhci, addr);
+       xhci_dbg(xhci, "  %p: ir_set.erst_base = @%08llx\n",
+                       addr, temp_64);
 
-       addr = &ir_set->erst_dequeue[0];
-       temp = xhci_readl(xhci, addr);
-       xhci_dbg(xhci, "  %p: ir_set.erst_dequeue[0] = 0x%x\n",
-                       addr, (unsigned int) temp);
-
-       addr = &ir_set->erst_dequeue[1];
-       temp = xhci_readl(xhci, addr);
-       xhci_dbg(xhci, "  %p: ir_set.erst_dequeue[1] = 0x%x\n",
-                       addr, (unsigned int) temp);
+       addr = &ir_set->erst_dequeue;
+       temp_64 = xhci_read_64(xhci, addr);
+       xhci_dbg(xhci, "  %p: ir_set.erst_dequeue = @%08llx\n",
+                       addr, temp_64);
 }
 
 void xhci_print_run_regs(struct xhci_hcd *xhci)
@@ -268,8 +259,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
                xhci_dbg(xhci, "Link TRB:\n");
                xhci_print_trb_offsets(xhci, trb);
 
-               address = trb->link.segment_ptr[0] +
-                       (((u64) trb->link.segment_ptr[1]) << 32);
+               address = trb->link.segment_ptr;
                xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
 
                xhci_dbg(xhci, "Interrupter target = 0x%x\n",
@@ -282,8 +272,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
                                (unsigned int) (trb->link.control & TRB_NO_SNOOP));
                break;
        case TRB_TYPE(TRB_TRANSFER):
-               address = trb->trans_event.buffer[0] +
-                       (((u64) trb->trans_event.buffer[1]) << 32);
+               address = trb->trans_event.buffer;
                /*
                 * FIXME: look at flags to figure out if it's an address or if
                 * the data is directly in the buffer field.
@@ -291,8 +280,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
                xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
                break;
        case TRB_TYPE(TRB_COMPLETION):
-               address = trb->event_cmd.cmd_trb[0] +
-                       (((u64) trb->event_cmd.cmd_trb[1]) << 32);
+               address = trb->event_cmd.cmd_trb;
                xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
                xhci_dbg(xhci, "Completion status = %u\n",
                                (unsigned int) GET_COMP_CODE(trb->event_cmd.status));
@@ -328,8 +316,8 @@ void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
        for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
                trb = &seg->trbs[i];
                xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr,
-                               (unsigned int) trb->link.segment_ptr[0],
-                               (unsigned int) trb->link.segment_ptr[1],
+                               lower_32_bits(trb->link.segment_ptr),
+                               upper_32_bits(trb->link.segment_ptr),
                                (unsigned int) trb->link.intr_target,
                                (unsigned int) trb->link.control);
                addr += sizeof(*trb);
@@ -386,8 +374,8 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
                entry = &erst->entries[i];
                xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n",
                                (unsigned int) addr,
-                               (unsigned int) entry->seg_addr[0],
-                               (unsigned int) entry->seg_addr[1],
+                               lower_32_bits(entry->seg_addr),
+                               upper_32_bits(entry->seg_addr),
                                (unsigned int) entry->seg_size,
                                (unsigned int) entry->rsvd);
                addr += sizeof(*entry);
@@ -396,90 +384,147 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
 
 void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
 {
-       u32 val;
+       u64 val;
 
-       val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]);
-       xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = 0x%x\n", val);
-       val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[1]);
-       xhci_dbg(xhci, "// xHC command ring deq ptr high bits = 0x%x\n", val);
+       val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+       xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n",
+                       lower_32_bits(val));
+       xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n",
+                       upper_32_bits(val));
 }
 
-void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep)
+/* Print the last 32 bytes for 64-byte contexts */
+static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma)
+{
+       int i;
+       for (i = 0; i < 4; ++i) {
+               xhci_dbg(xhci, "@%p (virt) @%08llx "
+                        "(dma) %#08llx - rsvd64[%d]\n",
+                        &ctx[4 + i], (unsigned long long)dma,
+                        ctx[4 + i], i);
+               dma += 8;
+       }
+}
+
+void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
 {
-       int i, j;
-       int last_ep_ctx = 31;
        /* Fields are 32 bits wide, DMA addresses are in bytes */
        int field_size = 32 / 8;
+       int i;
 
-       xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
-                       &ctx->drop_flags, (unsigned long long)dma,
-                       ctx->drop_flags);
-       dma += field_size;
-       xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
-                       &ctx->add_flags, (unsigned long long)dma,
-                       ctx->add_flags);
-       dma += field_size;
-       for (i = 0; i > 6; ++i) {
-               xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
-                               &ctx->rsvd[i], (unsigned long long)dma,
-                               ctx->rsvd[i], i);
-               dma += field_size;
-       }
+       struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
+       dma_addr_t dma = ctx->dma + ((unsigned long)slot_ctx - (unsigned long)ctx);
+       int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
 
        xhci_dbg(xhci, "Slot Context:\n");
        xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
-                       &ctx->slot.dev_info,
-                       (unsigned long long)dma, ctx->slot.dev_info);
+                       &slot_ctx->dev_info,
+                       (unsigned long long)dma, slot_ctx->dev_info);
        dma += field_size;
        xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
-                       &ctx->slot.dev_info2,
-                       (unsigned long long)dma, ctx->slot.dev_info2);
+                       &slot_ctx->dev_info2,
+                       (unsigned long long)dma, slot_ctx->dev_info2);
        dma += field_size;
        xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
-                       &ctx->slot.tt_info,
-                       (unsigned long long)dma, ctx->slot.tt_info);
+                       &slot_ctx->tt_info,
+                       (unsigned long long)dma, slot_ctx->tt_info);
        dma += field_size;
        xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
-                       &ctx->slot.dev_state,
-                       (unsigned long long)dma, ctx->slot.dev_state);
+                       &slot_ctx->dev_state,
+                       (unsigned long long)dma, slot_ctx->dev_state);
        dma += field_size;
-       for (i = 0; i > 4; ++i) {
+       for (i = 0; i < 4; ++i) {
                xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
-                               &ctx->slot.reserved[i], (unsigned long long)dma,
-                               ctx->slot.reserved[i], i);
+                               &slot_ctx->reserved[i], (unsigned long long)dma,
+                               slot_ctx->reserved[i], i);
                dma += field_size;
        }
 
+       if (csz)
+               dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
+}
+
+void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
+                    struct xhci_container_ctx *ctx,
+                    unsigned int last_ep)
+{
+       int i, j;
+       int last_ep_ctx = 31;
+       /* Fields are 32 bits wide, DMA addresses are in bytes */
+       int field_size = 32 / 8;
+       int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
+
        if (last_ep < 31)
                last_ep_ctx = last_ep + 1;
        for (i = 0; i < last_ep_ctx; ++i) {
+               struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i);
+               dma_addr_t dma = ctx->dma +
+                       ((unsigned long)ep_ctx - (unsigned long)ctx);
+
                xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
                xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
-                               &ctx->ep[i].ep_info,
-                               (unsigned long long)dma, ctx->ep[i].ep_info);
+                               &ep_ctx->ep_info,
+                               (unsigned long long)dma, ep_ctx->ep_info);
                dma += field_size;
                xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
-                               &ctx->ep[i].ep_info2,
-                               (unsigned long long)dma, ctx->ep[i].ep_info2);
-               dma += field_size;
-               xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[0]\n",
-                               &ctx->ep[i].deq[0],
-                               (unsigned long long)dma, ctx->ep[i].deq[0]);
-               dma += field_size;
-               xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[1]\n",
-                               &ctx->ep[i].deq[1],
-                               (unsigned long long)dma, ctx->ep[i].deq[1]);
+                               &ep_ctx->ep_info2,
+                               (unsigned long long)dma, ep_ctx->ep_info2);
                dma += field_size;
+               xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n",
+                               &ep_ctx->deq,
+                               (unsigned long long)dma, ep_ctx->deq);
+               dma += 2*field_size;
                xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
-                               &ctx->ep[i].tx_info,
-                               (unsigned long long)dma, ctx->ep[i].tx_info);
+                               &ep_ctx->tx_info,
+                               (unsigned long long)dma, ep_ctx->tx_info);
                dma += field_size;
                for (j = 0; j < 3; ++j) {
                        xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
-                                       &ctx->ep[i].reserved[j],
+                                       &ep_ctx->reserved[j],
                                        (unsigned long long)dma,
-                                       ctx->ep[i].reserved[j], j);
+                                       ep_ctx->reserved[j], j);
+                       dma += field_size;
+               }
+
+               if (csz)
+                       dbg_rsvd64(xhci, (u64 *)ep_ctx, dma);
+       }
+}
+
+void xhci_dbg_ctx(struct xhci_hcd *xhci,
+                 struct xhci_container_ctx *ctx,
+                 unsigned int last_ep)
+{
+       int i;
+       /* Fields are 32 bits wide, DMA addresses are in bytes */
+       int field_size = 32 / 8;
+       struct xhci_slot_ctx *slot_ctx;
+       dma_addr_t dma = ctx->dma;
+       int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
+
+       if (ctx->type == XHCI_CTX_TYPE_INPUT) {
+               struct xhci_input_control_ctx *ctrl_ctx =
+                       xhci_get_input_control_ctx(xhci, ctx);
+               xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
+                        &ctrl_ctx->drop_flags, (unsigned long long)dma,
+                        ctrl_ctx->drop_flags);
+               dma += field_size;
+               xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
+                        &ctrl_ctx->add_flags, (unsigned long long)dma,
+                        ctrl_ctx->add_flags);
+               dma += field_size;
+               for (i = 0; i < 6; ++i) {
+                       xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n",
+                                &ctrl_ctx->rsvd2[i], (unsigned long long)dma,
+                                ctrl_ctx->rsvd2[i], i);
                        dma += field_size;
                }
+
+               if (csz)
+                       dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma);
        }
+
+       slot_ctx = xhci_get_slot_ctx(xhci, ctx);
+       xhci_dbg_slot_ctx(xhci, ctx);
+       xhci_dbg_ep_ctx(xhci, ctx, last_ep);
 }
index dba3e07ccd09a1098660fb5c4a17c921deefc483..816c39caca1cf6bf5e1ca9b4a9c8c5d03684fcf3 100644 (file)
@@ -103,7 +103,10 @@ int xhci_reset(struct xhci_hcd *xhci)
        u32 state;
 
        state = xhci_readl(xhci, &xhci->op_regs->status);
-       BUG_ON((state & STS_HALT) == 0);
+       if ((state & STS_HALT) == 0) {
+               xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
+               return 0;
+       }
 
        xhci_dbg(xhci, "// Reset the HC\n");
        command = xhci_readl(xhci, &xhci->op_regs->command);
@@ -226,6 +229,7 @@ int xhci_init(struct usb_hcd *hcd)
 static void xhci_work(struct xhci_hcd *xhci)
 {
        u32 temp;
+       u64 temp_64;
 
        /*
         * Clear the op reg interrupt status first,
@@ -248,9 +252,9 @@ static void xhci_work(struct xhci_hcd *xhci)
        /* FIXME this should be a delayed service routine that clears the EHB */
        xhci_handle_event(xhci);
 
-       /* Clear the event handler busy flag; the event ring should be empty. */
-       temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
-       xhci_writel(xhci, temp & ~ERST_EHB, &xhci->ir_set->erst_dequeue[0]);
+       /* Clear the event handler busy flag (RW1C); the event ring should be empty. */
+       temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+       xhci_write_64(xhci, temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue);
        /* Flush posted writes -- FIXME is this necessary? */
        xhci_readl(xhci, &xhci->ir_set->irq_pending);
 }
@@ -266,19 +270,34 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
 {
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
        u32 temp, temp2;
+       union xhci_trb *trb;
 
        spin_lock(&xhci->lock);
+       trb = xhci->event_ring->dequeue;
        /* Check if the xHC generated the interrupt, or the irq is shared */
        temp = xhci_readl(xhci, &xhci->op_regs->status);
        temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+       if (temp == 0xffffffff && temp2 == 0xffffffff)
+               goto hw_died;
+
        if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) {
                spin_unlock(&xhci->lock);
                return IRQ_NONE;
        }
+       xhci_dbg(xhci, "op reg status = %08x\n", temp);
+       xhci_dbg(xhci, "ir set irq_pending = %08x\n", temp2);
+       xhci_dbg(xhci, "Event ring dequeue ptr:\n");
+       xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
+                       (unsigned long long)xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
+                       lower_32_bits(trb->link.segment_ptr),
+                       upper_32_bits(trb->link.segment_ptr),
+                       (unsigned int) trb->link.intr_target,
+                       (unsigned int) trb->link.control);
 
        if (temp & STS_FATAL) {
                xhci_warn(xhci, "WARNING: Host System Error\n");
                xhci_halt(xhci);
+hw_died:
                xhci_to_hcd(xhci)->state = HC_STATE_HALT;
                spin_unlock(&xhci->lock);
                return -ESHUTDOWN;
@@ -295,6 +314,7 @@ void xhci_event_ring_work(unsigned long arg)
 {
        unsigned long flags;
        int temp;
+       u64 temp_64;
        struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
        int i, j;
 
@@ -311,9 +331,9 @@ void xhci_event_ring_work(unsigned long arg)
        xhci_dbg(xhci, "Event ring:\n");
        xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
        xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
-       temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
-       temp &= ERST_PTR_MASK;
-       xhci_dbg(xhci, "ERST deq = 0x%x\n", temp);
+       temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+       temp_64 &= ~ERST_PTR_MASK;
+       xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
        xhci_dbg(xhci, "Command ring:\n");
        xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
        xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
@@ -356,6 +376,7 @@ void xhci_event_ring_work(unsigned long arg)
 int xhci_run(struct usb_hcd *hcd)
 {
        u32 temp;
+       u64 temp_64;
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
        void (*doorbell)(struct xhci_hcd *) = NULL;
 
@@ -382,6 +403,20 @@ int xhci_run(struct usb_hcd *hcd)
        add_timer(&xhci->event_ring_timer);
 #endif
 
+       xhci_dbg(xhci, "Command ring memory map follows:\n");
+       xhci_debug_ring(xhci, xhci->cmd_ring);
+       xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
+       xhci_dbg_cmd_ptrs(xhci);
+
+       xhci_dbg(xhci, "ERST memory map follows:\n");
+       xhci_dbg_erst(xhci, &xhci->erst);
+       xhci_dbg(xhci, "Event ring:\n");
+       xhci_debug_ring(xhci, xhci->event_ring);
+       xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
+       temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+       temp_64 &= ~ERST_PTR_MASK;
+       xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
+
        xhci_dbg(xhci, "// Set the interrupt modulation register\n");
        temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
        temp &= ~ER_IRQ_INTERVAL_MASK;
@@ -406,22 +441,6 @@ int xhci_run(struct usb_hcd *hcd)
        if (NUM_TEST_NOOPS > 0)
                doorbell = xhci_setup_one_noop(xhci);
 
-       xhci_dbg(xhci, "Command ring memory map follows:\n");
-       xhci_debug_ring(xhci, xhci->cmd_ring);
-       xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
-       xhci_dbg_cmd_ptrs(xhci);
-
-       xhci_dbg(xhci, "ERST memory map follows:\n");
-       xhci_dbg_erst(xhci, &xhci->erst);
-       xhci_dbg(xhci, "Event ring:\n");
-       xhci_debug_ring(xhci, xhci->event_ring);
-       xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
-       temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
-       temp &= ERST_PTR_MASK;
-       xhci_dbg(xhci, "ERST deq = 0x%x\n", temp);
-       temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[1]);
-       xhci_dbg(xhci, "ERST deq upper = 0x%x\n", temp);
-
        temp = xhci_readl(xhci, &xhci->op_regs->command);
        temp |= (CMD_RUN);
        xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
@@ -601,10 +620,13 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
                goto exit;
        }
        if (usb_endpoint_xfer_control(&urb->ep->desc))
-               ret = xhci_queue_ctrl_tx(xhci, mem_flags, urb,
+               /* We have a spinlock and interrupts disabled, so we must pass
+                * atomic context to this function, which may allocate memory.
+                */
+               ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
                                slot_id, ep_index);
        else if (usb_endpoint_xfer_bulk(&urb->ep->desc))
-               ret = xhci_queue_bulk_tx(xhci, mem_flags, urb,
+               ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
                                slot_id, ep_index);
        else
                ret = -EINVAL;
@@ -661,8 +683,12 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
                goto done;
 
        xhci_dbg(xhci, "Cancel URB %p\n", urb);
+       xhci_dbg(xhci, "Event ring:\n");
+       xhci_debug_ring(xhci, xhci->event_ring);
        ep_index = xhci_get_endpoint_index(&urb->ep->desc);
        ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index];
+       xhci_dbg(xhci, "Endpoint ring:\n");
+       xhci_debug_ring(xhci, ep_ring);
        td = (struct xhci_td *) urb->hcpriv;
 
        ep_ring->cancels_pending++;
@@ -696,7 +722,9 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
                struct usb_host_endpoint *ep)
 {
        struct xhci_hcd *xhci;
-       struct xhci_device_control *in_ctx;
+       struct xhci_container_ctx *in_ctx, *out_ctx;
+       struct xhci_input_control_ctx *ctrl_ctx;
+       struct xhci_slot_ctx *slot_ctx;
        unsigned int last_ctx;
        unsigned int ep_index;
        struct xhci_ep_ctx *ep_ctx;
@@ -724,31 +752,34 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
        }
 
        in_ctx = xhci->devs[udev->slot_id]->in_ctx;
+       out_ctx = xhci->devs[udev->slot_id]->out_ctx;
+       ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
        ep_index = xhci_get_endpoint_index(&ep->desc);
-       ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index];
+       ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
        /* If the HC already knows the endpoint is disabled,
         * or the HCD has noted it is disabled, ignore this request
         */
        if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
-                       in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
+                       ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
                xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
                                __func__, ep);
                return 0;
        }
 
-       in_ctx->drop_flags |= drop_flag;
-       new_drop_flags = in_ctx->drop_flags;
+       ctrl_ctx->drop_flags |= drop_flag;
+       new_drop_flags = ctrl_ctx->drop_flags;
 
-       in_ctx->add_flags = ~drop_flag;
-       new_add_flags = in_ctx->add_flags;
+       ctrl_ctx->add_flags = ~drop_flag;
+       new_add_flags = ctrl_ctx->add_flags;
 
-       last_ctx = xhci_last_valid_endpoint(in_ctx->add_flags);
+       last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags);
+       slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
        /* Update the last valid endpoint context, if we deleted the last one */
-       if ((in_ctx->slot.dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
-               in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
-               in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
+       if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
+               slot_ctx->dev_info &= ~LAST_CTX_MASK;
+               slot_ctx->dev_info |= LAST_CTX(last_ctx);
        }
-       new_slot_info = in_ctx->slot.dev_info;
+       new_slot_info = slot_ctx->dev_info;
 
        xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
 
@@ -778,17 +809,22 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
                struct usb_host_endpoint *ep)
 {
        struct xhci_hcd *xhci;
-       struct xhci_device_control *in_ctx;
+       struct xhci_container_ctx *in_ctx, *out_ctx;
        unsigned int ep_index;
        struct xhci_ep_ctx *ep_ctx;
+       struct xhci_slot_ctx *slot_ctx;
+       struct xhci_input_control_ctx *ctrl_ctx;
        u32 added_ctxs;
        unsigned int last_ctx;
        u32 new_add_flags, new_drop_flags, new_slot_info;
        int ret = 0;
 
        ret = xhci_check_args(hcd, udev, ep, 1, __func__);
-       if (ret <= 0)
+       if (ret <= 0) {
+               /* So we won't queue a reset ep command for a root hub */
+               ep->hcpriv = NULL;
                return ret;
+       }
        xhci = hcd_to_xhci(hcd);
 
        added_ctxs = xhci_get_endpoint_flag(&ep->desc);
@@ -810,12 +846,14 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
        }
 
        in_ctx = xhci->devs[udev->slot_id]->in_ctx;
+       out_ctx = xhci->devs[udev->slot_id]->out_ctx;
+       ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
        ep_index = xhci_get_endpoint_index(&ep->desc);
-       ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index];
+       ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
        /* If the HCD has already noted the endpoint is enabled,
         * ignore this request.
         */
-       if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
+       if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
                xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
                                __func__, ep);
                return 0;
@@ -833,8 +871,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
                return -ENOMEM;
        }
 
-       in_ctx->add_flags |= added_ctxs;
-       new_add_flags = in_ctx->add_flags;
+       ctrl_ctx->add_flags |= added_ctxs;
+       new_add_flags = ctrl_ctx->add_flags;
 
        /* If xhci_endpoint_disable() was called for this endpoint, but the
         * xHC hasn't been notified yet through the check_bandwidth() call,
@@ -842,14 +880,18 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
         * descriptors.  We must drop and re-add this endpoint, so we leave the
         * drop flags alone.
         */
-       new_drop_flags = in_ctx->drop_flags;
+       new_drop_flags = ctrl_ctx->drop_flags;
 
+       slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
        /* Update the last valid endpoint context, if we just added one past */
-       if ((in_ctx->slot.dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
-               in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
-               in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
+       if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
+               slot_ctx->dev_info &= ~LAST_CTX_MASK;
+               slot_ctx->dev_info |= LAST_CTX(last_ctx);
        }
-       new_slot_info = in_ctx->slot.dev_info;
+       new_slot_info = slot_ctx->dev_info;
+
+       /* Store the usb_device pointer for later use */
+       ep->hcpriv = udev;
 
        xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
                        (unsigned int) ep->desc.bEndpointAddress,
@@ -860,9 +902,11 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
        return 0;
 }
 
-static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev)
+static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
 {
+       struct xhci_input_control_ctx *ctrl_ctx;
        struct xhci_ep_ctx *ep_ctx;
+       struct xhci_slot_ctx *slot_ctx;
        int i;
 
        /* When a device's add flag and drop flag are zero, any subsequent
@@ -870,17 +914,18 @@ static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev)
         * untouched.  Make sure we don't leave any old state in the input
         * endpoint contexts.
         */
-       virt_dev->in_ctx->drop_flags = 0;
-       virt_dev->in_ctx->add_flags = 0;
-       virt_dev->in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
+       ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
+       ctrl_ctx->drop_flags = 0;
+       ctrl_ctx->add_flags = 0;
+       slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
+       slot_ctx->dev_info &= ~LAST_CTX_MASK;
        /* Endpoint 0 is always valid */
-       virt_dev->in_ctx->slot.dev_info |= LAST_CTX(1);
+       slot_ctx->dev_info |= LAST_CTX(1);
        for (i = 1; i < 31; ++i) {
-               ep_ctx = &virt_dev->in_ctx->ep[i];
+               ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
                ep_ctx->ep_info = 0;
                ep_ctx->ep_info2 = 0;
-               ep_ctx->deq[0] = 0;
-               ep_ctx->deq[1] = 0;
+               ep_ctx->deq = 0;
                ep_ctx->tx_info = 0;
        }
 }
@@ -903,6 +948,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
        unsigned long flags;
        struct xhci_hcd *xhci;
        struct xhci_virt_device *virt_dev;
+       struct xhci_input_control_ctx *ctrl_ctx;
+       struct xhci_slot_ctx *slot_ctx;
 
        ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
        if (ret <= 0)
@@ -918,16 +965,18 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
        virt_dev = xhci->devs[udev->slot_id];
 
        /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
-       virt_dev->in_ctx->add_flags |= SLOT_FLAG;
-       virt_dev->in_ctx->add_flags &= ~EP0_FLAG;
-       virt_dev->in_ctx->drop_flags &= ~SLOT_FLAG;
-       virt_dev->in_ctx->drop_flags &= ~EP0_FLAG;
+       ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
+       ctrl_ctx->add_flags |= SLOT_FLAG;
+       ctrl_ctx->add_flags &= ~EP0_FLAG;
+       ctrl_ctx->drop_flags &= ~SLOT_FLAG;
+       ctrl_ctx->drop_flags &= ~EP0_FLAG;
        xhci_dbg(xhci, "New Input Control Context:\n");
-       xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma,
-                       LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
+       slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
+       xhci_dbg_ctx(xhci, virt_dev->in_ctx,
+                       LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
 
        spin_lock_irqsave(&xhci->lock, flags);
-       ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx_dma,
+       ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx->dma,
                        udev->slot_id);
        if (ret < 0) {
                spin_unlock_irqrestore(&xhci->lock, flags);
@@ -982,10 +1031,10 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
        }
 
        xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
-       xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma,
-                       LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
+       xhci_dbg_ctx(xhci, virt_dev->out_ctx,
+                       LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
 
-       xhci_zero_in_ctx(virt_dev);
+       xhci_zero_in_ctx(xhci, virt_dev);
        /* Free any old rings */
        for (i = 1; i < 31; ++i) {
                if (virt_dev->new_ep_rings[i]) {
@@ -1023,7 +1072,67 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
                        virt_dev->new_ep_rings[i] = NULL;
                }
        }
-       xhci_zero_in_ctx(virt_dev);
+       xhci_zero_in_ctx(xhci, virt_dev);
+}
+
+/* Deal with stalled endpoints.  The core should have sent the control message
+ * to clear the halt condition.  However, we need to make the xHCI hardware
+ * reset its sequence number, since a device will expect a sequence number of
+ * zero after the halt condition is cleared.
+ * Context: in_interrupt
+ */
+void xhci_endpoint_reset(struct usb_hcd *hcd,
+               struct usb_host_endpoint *ep)
+{
+       struct xhci_hcd *xhci;
+       struct usb_device *udev;
+       unsigned int ep_index;
+       unsigned long flags;
+       int ret;
+       struct xhci_dequeue_state deq_state;
+       struct xhci_ring *ep_ring;
+
+       xhci = hcd_to_xhci(hcd);
+       udev = (struct usb_device *) ep->hcpriv;
+       /* Called with a root hub endpoint (or an endpoint that wasn't added
+        * with xhci_add_endpoint()
+        */
+       if (!ep->hcpriv)
+               return;
+       ep_index = xhci_get_endpoint_index(&ep->desc);
+       ep_ring = xhci->devs[udev->slot_id]->ep_rings[ep_index];
+       if (!ep_ring->stopped_td) {
+               xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
+                               ep->desc.bEndpointAddress);
+               return;
+       }
+
+       xhci_dbg(xhci, "Queueing reset endpoint command\n");
+       spin_lock_irqsave(&xhci->lock, flags);
+       ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
+       /*
+        * Can't change the ring dequeue pointer until it's transitioned to the
+        * stopped state, which is only upon a successful reset endpoint
+        * command.  Better hope that last command worked!
+        */
+       if (!ret) {
+               xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
+               /* We need to move the HW's dequeue pointer past this TD,
+                * or it will attempt to resend it on the next doorbell ring.
+                */
+               xhci_find_new_dequeue_state(xhci, udev->slot_id,
+                               ep_index, ep_ring->stopped_td, &deq_state);
+               xhci_dbg(xhci, "Queueing new dequeue state\n");
+               xhci_queue_new_dequeue_state(xhci, ep_ring,
+                               udev->slot_id,
+                               ep_index, &deq_state);
+               kfree(ep_ring->stopped_td);
+               xhci_ring_cmd_db(xhci);
+       }
+       spin_unlock_irqrestore(&xhci->lock, flags);
+
+       if (ret)
+               xhci_warn(xhci, "FIXME allocate a new ring segment\n");
 }
 
 /*
@@ -1120,7 +1229,9 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
        struct xhci_virt_device *virt_dev;
        int ret = 0;
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
-       u32 temp;
+       struct xhci_slot_ctx *slot_ctx;
+       struct xhci_input_control_ctx *ctrl_ctx;
+       u64 temp_64;
 
        if (!udev->slot_id) {
                xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
@@ -1133,10 +1244,12 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
        if (!udev->config)
                xhci_setup_addressable_virt_dev(xhci, udev);
        /* Otherwise, assume the core has the device configured how it wants */
+       xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
+       xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
 
        spin_lock_irqsave(&xhci->lock, flags);
-       ret = xhci_queue_address_device(xhci, virt_dev->in_ctx_dma,
-                       udev->slot_id);
+       ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
+                                       udev->slot_id);
        if (ret) {
                spin_unlock_irqrestore(&xhci->lock, flags);
                xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
@@ -1176,41 +1289,37 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
        default:
                xhci_err(xhci, "ERROR: unexpected command completion "
                                "code 0x%x.\n", virt_dev->cmd_status);
+               xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
+               xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
                ret = -EINVAL;
                break;
        }
        if (ret) {
                return ret;
        }
-       temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[0]);
-       xhci_dbg(xhci, "Op regs DCBAA ptr[0] = %#08x\n", temp);
-       temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[1]);
-       xhci_dbg(xhci, "Op regs DCBAA ptr[1] = %#08x\n", temp);
-       xhci_dbg(xhci, "Slot ID %d dcbaa entry[0] @%p = %#08x\n",
-                       udev->slot_id,
-                       &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id],
-                       xhci->dcbaa->dev_context_ptrs[2*udev->slot_id]);
-       xhci_dbg(xhci, "Slot ID %d dcbaa entry[1] @%p = %#08x\n",
+       temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
+       xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
+       xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
                        udev->slot_id,
-                       &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1],
-                       xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1]);
+                       &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
+                       (unsigned long long)
+                               xhci->dcbaa->dev_context_ptrs[udev->slot_id]);
        xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
-                       (unsigned long long)virt_dev->out_ctx_dma);
+                       (unsigned long long)virt_dev->out_ctx->dma);
        xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
-       xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2);
+       xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
        xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
-       xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2);
+       xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
        /*
         * USB core uses address 1 for the roothubs, so we add one to the
         * address given back to us by the HC.
         */
-       udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1;
+       slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+       udev->devnum = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1;
        /* Zero the input context control for later use */
-       virt_dev->in_ctx->add_flags = 0;
-       virt_dev->in_ctx->drop_flags = 0;
-       /* Mirror flags in the output context for future ep enable/disable */
-       virt_dev->out_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
-       virt_dev->out_ctx->drop_flags = 0;
+       ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
+       ctrl_ctx->add_flags = 0;
+       ctrl_ctx->drop_flags = 0;
 
        xhci_dbg(xhci, "Device address = %d\n", udev->devnum);
        /* XXX Meh, not sure if anyone else but choose_address uses this. */
@@ -1252,7 +1361,6 @@ static int __init xhci_hcd_init(void)
        /* xhci_device_control has eight fields, and also
         * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
         */
-       BUILD_BUG_ON(sizeof(struct xhci_device_control) != (8+8+8*31)*32/8);
        BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
        BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
        BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
index c8a72de1c5087c58a9fd7f5fba56d9073c1af040..e6b9a1c6002d3da5b43c65161cca881d18a8e214 100644 (file)
@@ -88,7 +88,7 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
                return;
        prev->next = next;
        if (link_trbs) {
-               prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma;
+               prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma;
 
                /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
                val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
@@ -189,6 +189,63 @@ fail:
        return 0;
 }
 
+#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
+
+struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
+                                                   int type, gfp_t flags)
+{
+       struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
+       if (!ctx)
+               return NULL;
+
+       BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
+       ctx->type = type;
+       ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
+       if (type == XHCI_CTX_TYPE_INPUT)
+               ctx->size += CTX_SIZE(xhci->hcc_params);
+
+       ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
+       memset(ctx->bytes, 0, ctx->size);
+       return ctx;
+}
+
+void xhci_free_container_ctx(struct xhci_hcd *xhci,
+                            struct xhci_container_ctx *ctx)
+{
+       dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
+       kfree(ctx);
+}
+
+struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
+                                             struct xhci_container_ctx *ctx)
+{
+       BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
+       return (struct xhci_input_control_ctx *)ctx->bytes;
+}
+
+struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
+                                       struct xhci_container_ctx *ctx)
+{
+       if (ctx->type == XHCI_CTX_TYPE_DEVICE)
+               return (struct xhci_slot_ctx *)ctx->bytes;
+
+       return (struct xhci_slot_ctx *)
+               (ctx->bytes + CTX_SIZE(xhci->hcc_params));
+}
+
+struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
+                                   struct xhci_container_ctx *ctx,
+                                   unsigned int ep_index)
+{
+       /* increment ep index by offset of start of ep ctx array */
+       ep_index++;
+       if (ctx->type == XHCI_CTX_TYPE_INPUT)
+               ep_index++;
+
+       return (struct xhci_ep_ctx *)
+               (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
+}
+
 /* All the xhci_tds in the ring's TD list should be freed at this point */
 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
 {
@@ -200,8 +257,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
                return;
 
        dev = xhci->devs[slot_id];
-       xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0;
-       xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
+       xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
        if (!dev)
                return;
 
@@ -210,11 +266,10 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
                        xhci_ring_free(xhci, dev->ep_rings[i]);
 
        if (dev->in_ctx)
-               dma_pool_free(xhci->device_pool,
-                               dev->in_ctx, dev->in_ctx_dma);
+               xhci_free_container_ctx(xhci, dev->in_ctx);
        if (dev->out_ctx)
-               dma_pool_free(xhci->device_pool,
-                               dev->out_ctx, dev->out_ctx_dma);
+               xhci_free_container_ctx(xhci, dev->out_ctx);
+
        kfree(xhci->devs[slot_id]);
        xhci->devs[slot_id] = 0;
 }
@@ -222,7 +277,6 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
                struct usb_device *udev, gfp_t flags)
 {
-       dma_addr_t      dma;
        struct xhci_virt_device *dev;
 
        /* Slot ID 0 is reserved */
@@ -236,23 +290,21 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
                return 0;
        dev = xhci->devs[slot_id];
 
-       /* Allocate the (output) device context that will be used in the HC */
-       dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma);
+       /* Allocate the (output) device context that will be used in the HC. */
+       dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
        if (!dev->out_ctx)
                goto fail;
-       dev->out_ctx_dma = dma;
+
        xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
-                       (unsigned long long)dma);
-       memset(dev->out_ctx, 0, sizeof(*dev->out_ctx));
+                       (unsigned long long)dev->out_ctx->dma);
 
        /* Allocate the (input) device context for address device command */
-       dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma);
+       dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
        if (!dev->in_ctx)
                goto fail;
-       dev->in_ctx_dma = dma;
+
        xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
-                       (unsigned long long)dma);
-       memset(dev->in_ctx, 0, sizeof(*dev->in_ctx));
+                       (unsigned long long)dev->in_ctx->dma);
 
        /* Allocate endpoint 0 ring */
        dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags);
@@ -261,17 +313,12 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
 
        init_completion(&dev->cmd_completion);
 
-       /*
-        * Point to output device context in dcbaa; skip the output control
-        * context, which is eight 32 bit fields (or 32 bytes long)
-        */
-       xhci->dcbaa->dev_context_ptrs[2*slot_id] =
-               (u32) dev->out_ctx_dma + (32);
+       /* Point to output device context in dcbaa. */
+       xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma;
        xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
                        slot_id,
-                       &xhci->dcbaa->dev_context_ptrs[2*slot_id],
-                       (unsigned long long)dev->out_ctx_dma);
-       xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
+                       &xhci->dcbaa->dev_context_ptrs[slot_id],
+                       (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]);
 
        return 1;
 fail:
@@ -285,6 +332,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
        struct xhci_virt_device *dev;
        struct xhci_ep_ctx      *ep0_ctx;
        struct usb_device       *top_dev;
+       struct xhci_slot_ctx    *slot_ctx;
+       struct xhci_input_control_ctx *ctrl_ctx;
 
        dev = xhci->devs[udev->slot_id];
        /* Slot ID 0 is reserved */
@@ -293,27 +342,29 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
                                udev->slot_id);
                return -EINVAL;
        }
-       ep0_ctx = &dev->in_ctx->ep[0];
+       ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
+       ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx);
+       slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
 
        /* 2) New slot context and endpoint 0 context are valid*/
-       dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
+       ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
 
        /* 3) Only the control endpoint is valid - one endpoint context */
-       dev->in_ctx->slot.dev_info |= LAST_CTX(1);
+       slot_ctx->dev_info |= LAST_CTX(1);
 
        switch (udev->speed) {
        case USB_SPEED_SUPER:
-               dev->in_ctx->slot.dev_info |= (u32) udev->route;
-               dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS;
+               slot_ctx->dev_info |= (u32) udev->route;
+               slot_ctx->dev_info |= (u32) SLOT_SPEED_SS;
                break;
        case USB_SPEED_HIGH:
-               dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS;
+               slot_ctx->dev_info |= (u32) SLOT_SPEED_HS;
                break;
        case USB_SPEED_FULL:
-               dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS;
+               slot_ctx->dev_info |= (u32) SLOT_SPEED_FS;
                break;
        case USB_SPEED_LOW:
-               dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS;
+               slot_ctx->dev_info |= (u32) SLOT_SPEED_LS;
                break;
        case USB_SPEED_VARIABLE:
                xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
@@ -327,7 +378,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
        for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
                        top_dev = top_dev->parent)
                /* Found device below root hub */;
-       dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
+       slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
        xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
 
        /* Is this a LS/FS device under a HS hub? */
@@ -337,8 +388,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
         */
        if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
                        udev->tt) {
-               dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id;
-               dev->in_ctx->slot.tt_info |= udev->ttport << 8;
+               slot_ctx->tt_info = udev->tt->hub->slot_id;
+               slot_ctx->tt_info |= udev->ttport << 8;
        }
        xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
        xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
@@ -360,10 +411,9 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
        ep0_ctx->ep_info2 |= MAX_BURST(0);
        ep0_ctx->ep_info2 |= ERROR_COUNT(3);
 
-       ep0_ctx->deq[0] =
+       ep0_ctx->deq =
                dev->ep_rings[0]->first_seg->dma;
-       ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state;
-       ep0_ctx->deq[1] = 0;
+       ep0_ctx->deq |= dev->ep_rings[0]->cycle_state;
 
        /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
 
@@ -470,25 +520,26 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
        unsigned int max_burst;
 
        ep_index = xhci_get_endpoint_index(&ep->desc);
-       ep_ctx = &virt_dev->in_ctx->ep[ep_index];
+       ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
 
        /* Set up the endpoint ring */
        virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags);
        if (!virt_dev->new_ep_rings[ep_index])
                return -ENOMEM;
        ep_ring = virt_dev->new_ep_rings[ep_index];
-       ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state;
-       ep_ctx->deq[1] = 0;
+       ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
 
        ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
 
        /* FIXME dig Mult and streams info out of ep companion desc */
 
-       /* Allow 3 retries for everything but isoc */
+       /* Allow 3 retries for everything but isoc;
+        * error count = 0 means infinite retries.
+        */
        if (!usb_endpoint_xfer_isoc(&ep->desc))
                ep_ctx->ep_info2 = ERROR_COUNT(3);
        else
-               ep_ctx->ep_info2 = ERROR_COUNT(0);
+               ep_ctx->ep_info2 = ERROR_COUNT(1);
 
        ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep);
 
@@ -498,7 +549,12 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
                max_packet = ep->desc.wMaxPacketSize;
                ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
                /* dig out max burst from ep companion desc */
-               max_packet = ep->ss_ep_comp->desc.bMaxBurst;
+               if (!ep->ss_ep_comp) {
+                       xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n");
+                       max_packet = 0;
+               } else {
+                       max_packet = ep->ss_ep_comp->desc.bMaxBurst;
+               }
                ep_ctx->ep_info2 |= MAX_BURST(max_packet);
                break;
        case USB_SPEED_HIGH:
@@ -531,18 +587,114 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci,
        struct xhci_ep_ctx *ep_ctx;
 
        ep_index = xhci_get_endpoint_index(&ep->desc);
-       ep_ctx = &virt_dev->in_ctx->ep[ep_index];
+       ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
 
        ep_ctx->ep_info = 0;
        ep_ctx->ep_info2 = 0;
-       ep_ctx->deq[0] = 0;
-       ep_ctx->deq[1] = 0;
+       ep_ctx->deq = 0;
        ep_ctx->tx_info = 0;
        /* Don't free the endpoint ring until the set interface or configuration
         * request succeeds.
         */
 }
 
+/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
+static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
+{
+       int i;
+       struct device *dev = xhci_to_hcd(xhci)->self.controller;
+       int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
+
+       xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp);
+
+       if (!num_sp)
+               return 0;
+
+       xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
+       if (!xhci->scratchpad)
+               goto fail_sp;
+
+       xhci->scratchpad->sp_array =
+               pci_alloc_consistent(to_pci_dev(dev),
+                                    num_sp * sizeof(u64),
+                                    &xhci->scratchpad->sp_dma);
+       if (!xhci->scratchpad->sp_array)
+               goto fail_sp2;
+
+       xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
+       if (!xhci->scratchpad->sp_buffers)
+               goto fail_sp3;
+
+       xhci->scratchpad->sp_dma_buffers =
+               kzalloc(sizeof(dma_addr_t) * num_sp, flags);
+
+       if (!xhci->scratchpad->sp_dma_buffers)
+               goto fail_sp4;
+
+       xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma;
+       for (i = 0; i < num_sp; i++) {
+               dma_addr_t dma;
+               void *buf = pci_alloc_consistent(to_pci_dev(dev),
+                                                xhci->page_size, &dma);
+               if (!buf)
+                       goto fail_sp5;
+
+               xhci->scratchpad->sp_array[i] = dma;
+               xhci->scratchpad->sp_buffers[i] = buf;
+               xhci->scratchpad->sp_dma_buffers[i] = dma;
+       }
+
+       return 0;
+
+ fail_sp5:
+       for (i = i - 1; i >= 0; i--) {
+               pci_free_consistent(to_pci_dev(dev), xhci->page_size,
+                                   xhci->scratchpad->sp_buffers[i],
+                                   xhci->scratchpad->sp_dma_buffers[i]);
+       }
+       kfree(xhci->scratchpad->sp_dma_buffers);
+
+ fail_sp4:
+       kfree(xhci->scratchpad->sp_buffers);
+
+ fail_sp3:
+       pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64),
+                           xhci->scratchpad->sp_array,
+                           xhci->scratchpad->sp_dma);
+
+ fail_sp2:
+       kfree(xhci->scratchpad);
+       xhci->scratchpad = NULL;
+
+ fail_sp:
+       return -ENOMEM;
+}
+
+static void scratchpad_free(struct xhci_hcd *xhci)
+{
+       int num_sp;
+       int i;
+       struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+
+       if (!xhci->scratchpad)
+               return;
+
+       num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
+
+       for (i = 0; i < num_sp; i++) {
+               pci_free_consistent(pdev, xhci->page_size,
+                                   xhci->scratchpad->sp_buffers[i],
+                                   xhci->scratchpad->sp_dma_buffers[i]);
+       }
+       kfree(xhci->scratchpad->sp_dma_buffers);
+       kfree(xhci->scratchpad->sp_buffers);
+       pci_free_consistent(pdev, num_sp * sizeof(u64),
+                           xhci->scratchpad->sp_array,
+                           xhci->scratchpad->sp_dma);
+       kfree(xhci->scratchpad);
+       xhci->scratchpad = NULL;
+}
+
 void xhci_mem_cleanup(struct xhci_hcd *xhci)
 {
        struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
@@ -551,10 +703,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
 
        /* Free the Event Ring Segment Table and the actual Event Ring */
        xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
-       xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]);
-       xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
-       xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]);
-       xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
+       xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
+       xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
        size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
        if (xhci->erst.entries)
                pci_free_consistent(pdev, size,
@@ -566,8 +716,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
        xhci->event_ring = NULL;
        xhci_dbg(xhci, "Freed event ring\n");
 
-       xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]);
-       xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]);
+       xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
        if (xhci->cmd_ring)
                xhci_ring_free(xhci, xhci->cmd_ring);
        xhci->cmd_ring = NULL;
@@ -586,8 +735,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
        xhci->device_pool = NULL;
        xhci_dbg(xhci, "Freed device context pool\n");
 
-       xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]);
-       xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]);
+       xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
        if (xhci->dcbaa)
                pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
                                xhci->dcbaa, xhci->dcbaa->dma);
@@ -595,6 +743,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
 
        xhci->page_size = 0;
        xhci->page_shift = 0;
+       scratchpad_free(xhci);
 }
 
 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
@@ -602,6 +751,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
        dma_addr_t      dma;
        struct device   *dev = xhci_to_hcd(xhci)->self.controller;
        unsigned int    val, val2;
+       u64             val_64;
        struct xhci_segment     *seg;
        u32 page_size;
        int i;
@@ -647,8 +797,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
        xhci->dcbaa->dma = dma;
        xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
                        (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
-       xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]);
-       xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]);
+       xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
 
        /*
         * Initialize the ring segment pool.  The ring must be a contiguous
@@ -658,11 +807,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
         */
        xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
                        SEGMENT_SIZE, 64, xhci->page_size);
+
        /* See Table 46 and Note on Figure 55 */
-       /* FIXME support 64-byte contexts */
        xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
-                       sizeof(struct xhci_device_control),
-                       64, xhci->page_size);
+                       2112, 64, xhci->page_size);
        if (!xhci->segment_pool || !xhci->device_pool)
                goto fail;
 
@@ -675,14 +823,12 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
                        (unsigned long long)xhci->cmd_ring->first_seg->dma);
 
        /* Set the address in the Command Ring Control register */
-       val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]);
-       val = (val & ~CMD_RING_ADDR_MASK) |
-               (xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) |
+       val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+       val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
+               (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
                xhci->cmd_ring->cycle_state;
-       xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val);
-       xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]);
-       xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n");
-       xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]);
+       xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
+       xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
        xhci_dbg_cmd_ptrs(xhci);
 
        val = xhci_readl(xhci, &xhci->cap_regs->db_off);
@@ -722,8 +868,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
        /* set ring base address and size for each segment table entry */
        for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
                struct xhci_erst_entry *entry = &xhci->erst.entries[val];
-               entry->seg_addr[0] = seg->dma;
-               entry->seg_addr[1] = 0;
+               entry->seg_addr = seg->dma;
                entry->seg_size = TRBS_PER_SEGMENT;
                entry->rsvd = 0;
                seg = seg->next;
@@ -741,11 +886,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
        /* set the segment table base address */
        xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
                        (unsigned long long)xhci->erst.erst_dma_addr);
-       val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]);
-       val &= ERST_PTR_MASK;
-       val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK);
-       xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]);
-       xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
+       val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
+       val_64 &= ERST_PTR_MASK;
+       val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
+       xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
 
        /* Set the event ring dequeue address */
        xhci_set_hc_event_deq(xhci);
@@ -761,7 +905,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
        for (i = 0; i < MAX_HC_SLOTS; ++i)
                xhci->devs[i] = 0;
 
+       if (scratchpad_alloc(xhci, flags))
+               goto fail;
+
        return 0;
+
 fail:
        xhci_warn(xhci, "Couldn't initialize memory\n");
        xhci_mem_cleanup(xhci);
index 1462709e26c0411cfc69d13b93641f2764eeb096..592fe7e623f7a0d8f863a5d72a67dae42c4ea7aa 100644 (file)
@@ -117,6 +117,7 @@ static const struct hc_driver xhci_pci_hc_driver = {
        .free_dev =             xhci_free_dev,
        .add_endpoint =         xhci_add_endpoint,
        .drop_endpoint =        xhci_drop_endpoint,
+       .endpoint_reset =       xhci_endpoint_reset,
        .check_bandwidth =      xhci_check_bandwidth,
        .reset_bandwidth =      xhci_reset_bandwidth,
        .address_device =       xhci_address_device,
index 02d81985c4544071b2abc9899a6661d802629b6b..aa88a067148bb70584bdaaaf9c56ccb39b32d3c3 100644 (file)
@@ -135,6 +135,7 @@ static void next_trb(struct xhci_hcd *xhci,
 static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
 {
        union xhci_trb *next = ++(ring->dequeue);
+       unsigned long long addr;
 
        ring->deq_updates++;
        /* Update the dequeue pointer further if that was a link TRB or we're at
@@ -152,6 +153,13 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
                ring->dequeue = ring->deq_seg->trbs;
                next = ring->dequeue;
        }
+       addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
+       if (ring == xhci->event_ring)
+               xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr);
+       else if (ring == xhci->cmd_ring)
+               xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr);
+       else
+               xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr);
 }
 
 /*
@@ -171,6 +179,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
 {
        u32 chain;
        union xhci_trb *next;
+       unsigned long long addr;
 
        chain = ring->enqueue->generic.field[3] & TRB_CHAIN;
        next = ++(ring->enqueue);
@@ -204,6 +213,13 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
                ring->enqueue = ring->enq_seg->trbs;
                next = ring->enqueue;
        }
+       addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
+       if (ring == xhci->event_ring)
+               xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr);
+       else if (ring == xhci->cmd_ring)
+               xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr);
+       else
+               xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr);
 }
 
 /*
@@ -237,7 +253,7 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
 
 void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
 {
-       u32 temp;
+       u64 temp;
        dma_addr_t deq;
 
        deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
@@ -246,13 +262,15 @@ void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
                xhci_warn(xhci, "WARN something wrong with SW event ring "
                                "dequeue ptr.\n");
        /* Update HC event ring dequeue pointer */
-       temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
+       temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
        temp &= ERST_PTR_MASK;
-       if (!in_interrupt())
-               xhci_dbg(xhci, "// Write event ring dequeue pointer\n");
-       xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
-       xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp,
-                       &xhci->ir_set->erst_dequeue[0]);
+       /* Don't clear the EHB bit (which is RW1C) because
+        * there might be more events to service.
+        */
+       temp &= ~ERST_EHB;
+       xhci_dbg(xhci, "// Write event ring dequeue pointer, preserving EHB bit\n");
+       xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
+                       &xhci->ir_set->erst_dequeue);
 }
 
 /* Ring the host controller doorbell after placing a command on the ring */
@@ -279,7 +297,8 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
        /* Don't ring the doorbell for this endpoint if there are pending
         * cancellations because the we don't want to interrupt processing.
         */
-       if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)) {
+       if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)
+                       && !(ep_ring->state & EP_HALTED)) {
                field = xhci_readl(xhci, db_addr) & DB_MASK;
                xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
                /* Flush PCI posted writes - FIXME Matthew Wilcox says this
@@ -316,12 +335,6 @@ static struct xhci_segment *find_trb_seg(
        return cur_seg;
 }
 
-struct dequeue_state {
-       struct xhci_segment *new_deq_seg;
-       union xhci_trb *new_deq_ptr;
-       int new_cycle_state;
-};
-
 /*
  * Move the xHC's endpoint ring dequeue pointer past cur_td.
  * Record the new state of the xHC's endpoint ring dequeue segment,
@@ -336,24 +349,30 @@ struct dequeue_state {
  *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
  *    if we've moved it past a link TRB with the toggle cycle bit set.
  */
-static void find_new_dequeue_state(struct xhci_hcd *xhci,
+void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
                unsigned int slot_id, unsigned int ep_index,
-               struct xhci_td *cur_td, struct dequeue_state *state)
+               struct xhci_td *cur_td, struct xhci_dequeue_state *state)
 {
        struct xhci_virt_device *dev = xhci->devs[slot_id];
        struct xhci_ring *ep_ring = dev->ep_rings[ep_index];
        struct xhci_generic_trb *trb;
+       struct xhci_ep_ctx *ep_ctx;
+       dma_addr_t addr;
 
        state->new_cycle_state = 0;
+       xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
        state->new_deq_seg = find_trb_seg(cur_td->start_seg,
                        ep_ring->stopped_trb,
                        &state->new_cycle_state);
        if (!state->new_deq_seg)
                BUG();
        /* Dig out the cycle state saved by the xHC during the stop ep cmd */
-       state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq[0];
+       xhci_dbg(xhci, "Finding endpoint context\n");
+       ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
+       state->new_cycle_state = 0x1 & ep_ctx->deq;
 
        state->new_deq_ptr = cur_td->last_trb;
+       xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
        state->new_deq_seg = find_trb_seg(state->new_deq_seg,
                        state->new_deq_ptr,
                        &state->new_cycle_state);
@@ -367,6 +386,12 @@ static void find_new_dequeue_state(struct xhci_hcd *xhci,
        next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
 
        /* Don't update the ring cycle state for the producer (us). */
+       xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
+                       state->new_deq_seg);
+       addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
+       xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
+                       (unsigned long long) addr);
+       xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n");
        ep_ring->dequeue = state->new_deq_ptr;
        ep_ring->deq_seg = state->new_deq_seg;
 }
@@ -416,6 +441,30 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
                unsigned int ep_index, struct xhci_segment *deq_seg,
                union xhci_trb *deq_ptr, u32 cycle_state);
 
+void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
+               struct xhci_ring *ep_ring, unsigned int slot_id,
+               unsigned int ep_index, struct xhci_dequeue_state *deq_state)
+{
+       xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
+                       "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
+                       deq_state->new_deq_seg,
+                       (unsigned long long)deq_state->new_deq_seg->dma,
+                       deq_state->new_deq_ptr,
+                       (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
+                       deq_state->new_cycle_state);
+       queue_set_tr_deq(xhci, slot_id, ep_index,
+                       deq_state->new_deq_seg,
+                       deq_state->new_deq_ptr,
+                       (u32) deq_state->new_cycle_state);
+       /* Stop the TD queueing code from ringing the doorbell until
+        * this command completes.  The HC won't set the dequeue pointer
+        * if the ring is running, and ringing the doorbell starts the
+        * ring running.
+        */
+       ep_ring->state |= SET_DEQ_PENDING;
+       xhci_ring_cmd_db(xhci);
+}
+
 /*
  * When we get a command completion for a Stop Endpoint Command, we need to
  * unlink any cancelled TDs from the ring.  There are two ways to do that:
@@ -436,7 +485,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
        struct xhci_td *cur_td = 0;
        struct xhci_td *last_unlinked_td;
 
-       struct dequeue_state deq_state;
+       struct xhci_dequeue_state deq_state;
 #ifdef CONFIG_USB_HCD_STAT
        ktime_t stop_time = ktime_get();
 #endif
@@ -464,7 +513,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
                 * move the xHC endpoint ring dequeue pointer past this TD.
                 */
                if (cur_td == ep_ring->stopped_td)
-                       find_new_dequeue_state(xhci, slot_id, ep_index, cur_td,
+                       xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td,
                                        &deq_state);
                else
                        td_to_noop(xhci, ep_ring, cur_td);
@@ -480,24 +529,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
 
        /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
        if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
-               xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
-                               "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
-                               deq_state.new_deq_seg,
-                               (unsigned long long)deq_state.new_deq_seg->dma,
-                               deq_state.new_deq_ptr,
-                               (unsigned long long)xhci_trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr),
-                               deq_state.new_cycle_state);
-               queue_set_tr_deq(xhci, slot_id, ep_index,
-                               deq_state.new_deq_seg,
-                               deq_state.new_deq_ptr,
-                               (u32) deq_state.new_cycle_state);
-               /* Stop the TD queueing code from ringing the doorbell until
-                * this command completes.  The HC won't set the dequeue pointer
-                * if the ring is running, and ringing the doorbell starts the
-                * ring running.
-                */
-               ep_ring->state |= SET_DEQ_PENDING;
-               xhci_ring_cmd_db(xhci);
+               xhci_queue_new_dequeue_state(xhci, ep_ring,
+                               slot_id, ep_index, &deq_state);
        } else {
                /* Otherwise just ring the doorbell to restart the ring */
                ring_ep_doorbell(xhci, slot_id, ep_index);
@@ -551,11 +584,15 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
        unsigned int ep_index;
        struct xhci_ring *ep_ring;
        struct xhci_virt_device *dev;
+       struct xhci_ep_ctx *ep_ctx;
+       struct xhci_slot_ctx *slot_ctx;
 
        slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
        ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
        dev = xhci->devs[slot_id];
        ep_ring = dev->ep_rings[ep_index];
+       ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
+       slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
 
        if (GET_COMP_CODE(event->status) != COMP_SUCCESS) {
                unsigned int ep_state;
@@ -569,9 +606,9 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
                case COMP_CTX_STATE:
                        xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
                                        "to incorrect slot or ep state.\n");
-                       ep_state = dev->out_ctx->ep[ep_index].ep_info;
+                       ep_state = ep_ctx->ep_info;
                        ep_state &= EP_STATE_MASK;
-                       slot_state = dev->out_ctx->slot.dev_state;
+                       slot_state = slot_ctx->dev_state;
                        slot_state = GET_SLOT_STATE(slot_state);
                        xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
                                        slot_state, ep_state);
@@ -593,16 +630,33 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
                 * cancelling URBs, which might not be an error...
                 */
        } else {
-               xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq[0] = 0x%x, "
-                               "deq[1] = 0x%x.\n",
-                               dev->out_ctx->ep[ep_index].deq[0],
-                               dev->out_ctx->ep[ep_index].deq[1]);
+               xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
+                               ep_ctx->deq);
        }
 
        ep_ring->state &= ~SET_DEQ_PENDING;
        ring_ep_doorbell(xhci, slot_id, ep_index);
 }
 
+static void handle_reset_ep_completion(struct xhci_hcd *xhci,
+               struct xhci_event_cmd *event,
+               union xhci_trb *trb)
+{
+       int slot_id;
+       unsigned int ep_index;
+
+       slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
+       ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
+       /* This command will only fail if the endpoint wasn't halted,
+        * but we don't care.
+        */
+       xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
+                       (unsigned int) GET_COMP_CODE(event->status));
+
+       /* Clear our internal halted state and restart the ring */
+       xhci->devs[slot_id]->ep_rings[ep_index]->state &= ~EP_HALTED;
+       ring_ep_doorbell(xhci, slot_id, ep_index);
+}
 
 static void handle_cmd_completion(struct xhci_hcd *xhci,
                struct xhci_event_cmd *event)
@@ -611,7 +665,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
        u64 cmd_dma;
        dma_addr_t cmd_dequeue_dma;
 
-       cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0];
+       cmd_dma = event->cmd_trb;
        cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
                        xhci->cmd_ring->dequeue);
        /* Is the command ring deq ptr out of sync with the deq seg ptr? */
@@ -653,6 +707,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
        case TRB_TYPE(TRB_CMD_NOOP):
                ++xhci->noops_handled;
                break;
+       case TRB_TYPE(TRB_RESET_EP):
+               handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
+               break;
        default:
                /* Skip over unknown commands on the event ring */
                xhci->error_bitmask |= 1 << 6;
@@ -756,7 +813,9 @@ static int handle_tx_event(struct xhci_hcd *xhci,
        union xhci_trb *event_trb;
        struct urb *urb = 0;
        int status = -EINPROGRESS;
+       struct xhci_ep_ctx *ep_ctx;
 
+       xhci_dbg(xhci, "In %s\n", __func__);
        xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)];
        if (!xdev) {
                xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
@@ -765,17 +824,17 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 
        /* Endpoint ID is 1 based, our index is zero based */
        ep_index = TRB_TO_EP_ID(event->flags) - 1;
+       xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
        ep_ring = xdev->ep_rings[ep_index];
-       if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
+       ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
+       if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
                xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n");
                return -ENODEV;
        }
 
-       event_dma = event->buffer[0];
-       if (event->buffer[1] != 0)
-               xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n");
-
+       event_dma = event->buffer;
        /* This TRB should be in the TD at the head of this ring's TD list */
+       xhci_dbg(xhci, "%s - checking for list empty\n", __func__);
        if (list_empty(&ep_ring->td_list)) {
                xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
                                TRB_TO_SLOT_ID(event->flags), ep_index);
@@ -785,11 +844,14 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                urb = NULL;
                goto cleanup;
        }
+       xhci_dbg(xhci, "%s - getting list entry\n", __func__);
        td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
 
        /* Is this a TRB in the currently executing TD? */
+       xhci_dbg(xhci, "%s - looking for TD\n", __func__);
        event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
                        td->last_trb, event_dma);
+       xhci_dbg(xhci, "%s - found event_seg = %p\n", __func__, event_seg);
        if (!event_seg) {
                /* HC is busted, give up! */
                xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n");
@@ -798,10 +860,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
        event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)];
        xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
                        (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
-       xhci_dbg(xhci, "Offset 0x00 (buffer[0]) = 0x%x\n",
-                       (unsigned int) event->buffer[0]);
-       xhci_dbg(xhci, "Offset 0x04 (buffer[0]) = 0x%x\n",
-                       (unsigned int) event->buffer[1]);
+       xhci_dbg(xhci, "Offset 0x00 (buffer lo) = 0x%x\n",
+                       lower_32_bits(event->buffer));
+       xhci_dbg(xhci, "Offset 0x04 (buffer hi) = 0x%x\n",
+                       upper_32_bits(event->buffer));
        xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n",
                        (unsigned int) event->transfer_len);
        xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n",
@@ -823,6 +885,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                break;
        case COMP_STALL:
                xhci_warn(xhci, "WARN: Stalled endpoint\n");
+               ep_ring->state |= EP_HALTED;
                status = -EPIPE;
                break;
        case COMP_TRB_ERR:
@@ -833,6 +896,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                xhci_warn(xhci, "WARN: transfer error on endpoint\n");
                status = -EPROTO;
                break;
+       case COMP_BABBLE:
+               xhci_warn(xhci, "WARN: babble error on endpoint\n");
+               status = -EOVERFLOW;
+               break;
        case COMP_DB_ERR:
                xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
                status = -ENOSR;
@@ -874,15 +941,26 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                if (event_trb != ep_ring->dequeue) {
                        /* The event was for the status stage */
                        if (event_trb == td->last_trb) {
-                               td->urb->actual_length =
-                                       td->urb->transfer_buffer_length;
+                               if (td->urb->actual_length != 0) {
+                                       /* Don't overwrite a previously set error code */
+                                       if (status == -EINPROGRESS || status == 0)
+                                               /* Did we already see a short data stage? */
+                                               status = -EREMOTEIO;
+                               } else {
+                                       td->urb->actual_length =
+                                               td->urb->transfer_buffer_length;
+                               }
                        } else {
                        /* Maybe the event was for the data stage? */
-                               if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL)
+                               if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) {
                                        /* We didn't stop on a link TRB in the middle */
                                        td->urb->actual_length =
                                                td->urb->transfer_buffer_length -
                                                TRB_LEN(event->transfer_len);
+                                       xhci_dbg(xhci, "Waiting for status stage event\n");
+                                       urb = NULL;
+                                       goto cleanup;
+                               }
                        }
                }
        } else {
@@ -929,16 +1007,20 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                                                        TRB_LEN(event->transfer_len));
                                        td->urb->actual_length = 0;
                                }
-                               if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
-                                       status = -EREMOTEIO;
-                               else
-                                       status = 0;
+                               /* Don't overwrite a previously set error code */
+                               if (status == -EINPROGRESS) {
+                                       if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+                                               status = -EREMOTEIO;
+                                       else
+                                               status = 0;
+                               }
                        } else {
                                td->urb->actual_length = td->urb->transfer_buffer_length;
                                /* Ignore a short packet completion if the
                                 * untransferred length was zero.
                                 */
-                               status = 0;
+                               if (status == -EREMOTEIO)
+                                       status = 0;
                        }
                } else {
                        /* Slow path - walk the list, starting from the dequeue
@@ -965,19 +1047,30 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                                        TRB_LEN(event->transfer_len);
                }
        }
-       /* The Endpoint Stop Command completion will take care of
-        * any stopped TDs.  A stopped TD may be restarted, so don't update the
-        * ring dequeue pointer or take this TD off any lists yet.
-        */
        if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL ||
                        GET_COMP_CODE(event->transfer_len) == COMP_STOP) {
+               /* The Endpoint Stop Command completion will take care of any
+                * stopped TDs.  A stopped TD may be restarted, so don't update
+                * the ring dequeue pointer or take this TD off any lists yet.
+                */
                ep_ring->stopped_td = td;
                ep_ring->stopped_trb = event_trb;
        } else {
-               /* Update ring dequeue pointer */
-               while (ep_ring->dequeue != td->last_trb)
+               if (GET_COMP_CODE(event->transfer_len) == COMP_STALL) {
+                       /* The transfer is completed from the driver's
+                        * perspective, but we need to issue a set dequeue
+                        * command for this stalled endpoint to move the dequeue
+                        * pointer past the TD.  We can't do that here because
+                        * the halt condition must be cleared first.
+                        */
+                       ep_ring->stopped_td = td;
+                       ep_ring->stopped_trb = event_trb;
+               } else {
+                       /* Update ring dequeue pointer */
+                       while (ep_ring->dequeue != td->last_trb)
+                               inc_deq(xhci, ep_ring, false);
                        inc_deq(xhci, ep_ring, false);
-               inc_deq(xhci, ep_ring, false);
+               }
 
                /* Clean up the endpoint's TD list */
                urb = td->urb;
@@ -987,7 +1080,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                        list_del(&td->cancelled_td_list);
                        ep_ring->cancels_pending--;
                }
-               kfree(td);
+               /* Leave the TD around for the reset endpoint function to use */
+               if (GET_COMP_CODE(event->transfer_len) != COMP_STALL) {
+                       kfree(td);
+               }
                urb->hcpriv = NULL;
        }
 cleanup:
@@ -997,6 +1093,8 @@ cleanup:
        /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */
        if (urb) {
                usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
+               xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n",
+                               urb, td->urb->actual_length, status);
                spin_unlock(&xhci->lock);
                usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
                spin_lock(&xhci->lock);
@@ -1014,6 +1112,7 @@ void xhci_handle_event(struct xhci_hcd *xhci)
        int update_ptrs = 1;
        int ret;
 
+       xhci_dbg(xhci, "In %s\n", __func__);
        if (!xhci->event_ring || !xhci->event_ring->dequeue) {
                xhci->error_bitmask |= 1 << 1;
                return;
@@ -1026,18 +1125,25 @@ void xhci_handle_event(struct xhci_hcd *xhci)
                xhci->error_bitmask |= 1 << 2;
                return;
        }
+       xhci_dbg(xhci, "%s - OS owns TRB\n", __func__);
 
        /* FIXME: Handle more event types. */
        switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) {
        case TRB_TYPE(TRB_COMPLETION):
+               xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__);
                handle_cmd_completion(xhci, &event->event_cmd);
+               xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__);
                break;
        case TRB_TYPE(TRB_PORT_STATUS):
+               xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__);
                handle_port_status(xhci, event);
+               xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__);
                update_ptrs = 0;
                break;
        case TRB_TYPE(TRB_TRANSFER):
+               xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__);
                ret = handle_tx_event(xhci, &event->trans_event);
+               xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__);
                if (ret < 0)
                        xhci->error_bitmask |= 1 << 9;
                else
@@ -1093,13 +1199,13 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
                 */
                xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
                return -ENOENT;
-       case EP_STATE_HALTED:
        case EP_STATE_ERROR:
-               xhci_warn(xhci, "WARN waiting for halt or error on ep "
-                               "to be cleared\n");
+               xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
                /* FIXME event handling code for error needs to clear it */
                /* XXX not sure if this should be -ENOENT or not */
                return -EINVAL;
+       case EP_STATE_HALTED:
+               xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
        case EP_STATE_STOPPED:
        case EP_STATE_RUNNING:
                break;
@@ -1128,9 +1234,9 @@ static int prepare_transfer(struct xhci_hcd *xhci,
                gfp_t mem_flags)
 {
        int ret;
-
+       struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
        ret = prepare_ring(xhci, xdev->ep_rings[ep_index],
-                       xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK,
+                       ep_ctx->ep_info & EP_STATE_MASK,
                        num_trbs, mem_flags);
        if (ret)
                return ret;
@@ -1285,6 +1391,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        /* Queue the first TRB, even if it's zero-length */
        do {
                u32 field = 0;
+               u32 length_field = 0;
 
                /* Don't change the cycle bit of the first TRB until later */
                if (first_trb)
@@ -1314,10 +1421,13 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                                        (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
                                        (unsigned int) addr + trb_buff_len);
                }
+               length_field = TRB_LEN(trb_buff_len) |
+                       TD_REMAINDER(urb->transfer_buffer_length - running_total) |
+                       TRB_INTR_TARGET(0);
                queue_trb(xhci, ep_ring, false,
-                               (u32) addr,
-                               (u32) ((u64) addr >> 32),
-                               TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0),
+                               lower_32_bits(addr),
+                               upper_32_bits(addr),
+                               length_field,
                                /* We always want to know if the TRB was short,
                                 * or we won't get an event when it completes.
                                 * (Unless we use event data TRBs, which are a
@@ -1365,7 +1475,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        struct xhci_generic_trb *start_trb;
        bool first_trb;
        int start_cycle;
-       u32 field;
+       u32 field, length_field;
 
        int running_total, trb_buff_len, ret;
        u64 addr;
@@ -1443,10 +1553,13 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                        td->last_trb = ep_ring->enqueue;
                        field |= TRB_IOC;
                }
+               length_field = TRB_LEN(trb_buff_len) |
+                       TD_REMAINDER(urb->transfer_buffer_length - running_total) |
+                       TRB_INTR_TARGET(0);
                queue_trb(xhci, ep_ring, false,
-                               (u32) addr,
-                               (u32) ((u64) addr >> 32),
-                               TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0),
+                               lower_32_bits(addr),
+                               upper_32_bits(addr),
+                               length_field,
                                /* We always want to know if the TRB was short,
                                 * or we won't get an event when it completes.
                                 * (Unless we use event data TRBs, which are a
@@ -1478,7 +1591,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        struct usb_ctrlrequest *setup;
        struct xhci_generic_trb *start_trb;
        int start_cycle;
-       u32 field;
+       u32 field, length_field;
        struct xhci_td *td;
 
        ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
@@ -1528,13 +1641,16 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 
        /* If there's data, queue data TRBs */
        field = 0;
+       length_field = TRB_LEN(urb->transfer_buffer_length) |
+               TD_REMAINDER(urb->transfer_buffer_length) |
+               TRB_INTR_TARGET(0);
        if (urb->transfer_buffer_length > 0) {
                if (setup->bRequestType & USB_DIR_IN)
                        field |= TRB_DIR_IN;
                queue_trb(xhci, ep_ring, false,
                                lower_32_bits(urb->transfer_dma),
                                upper_32_bits(urb->transfer_dma),
-                               TRB_LEN(urb->transfer_buffer_length) | TRB_INTR_TARGET(0),
+                               length_field,
                                /* Event on short tx */
                                field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state);
        }
@@ -1603,7 +1719,8 @@ int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
 int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
                u32 slot_id)
 {
-       return queue_command(xhci, in_ctx_ptr, 0, 0,
+       return queue_command(xhci, lower_32_bits(in_ctx_ptr),
+                       upper_32_bits(in_ctx_ptr), 0,
                        TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id));
 }
 
@@ -1611,7 +1728,8 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
                u32 slot_id)
 {
-       return queue_command(xhci, in_ctx_ptr, 0, 0,
+       return queue_command(xhci, lower_32_bits(in_ctx_ptr),
+                       upper_32_bits(in_ctx_ptr), 0,
                        TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id));
 }
 
@@ -1639,10 +1757,23 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
        u32 type = TRB_TYPE(TRB_SET_DEQ);
 
        addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
-       if (addr == 0)
+       if (addr == 0) {
                xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
                xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
                                deq_seg, deq_ptr);
-       return queue_command(xhci, (u32) addr | cycle_state, 0, 0,
+               return 0;
+       }
+       return queue_command(xhci, lower_32_bits(addr) | cycle_state,
+                       upper_32_bits(addr), 0,
                        trb_slot_id | trb_ep_index | type);
 }
+
+int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
+               unsigned int ep_index)
+{
+       u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
+       u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
+       u32 type = TRB_TYPE(TRB_RESET_EP);
+
+       return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type);
+}
index 8936eeb5588b9d39a9625c2323285db4aa9dbfa9..d31d32206ba318fbf28ea23a105a0bfa81f82f92 100644 (file)
@@ -25,6 +25,7 @@
 
 #include <linux/usb.h>
 #include <linux/timer.h>
+#include <linux/kernel.h>
 
 #include "../core/hcd.h"
 /* Code sharing between pci-quirks and xhci hcd */
  * xHCI register interface.
  * This corresponds to the eXtensible Host Controller Interface (xHCI)
  * Revision 0.95 specification
- *
- * Registers should always be accessed with double word or quad word accesses.
- *
- * Some xHCI implementations may support 64-bit address pointers.  Registers
- * with 64-bit address pointers should be written to with dword accesses by
- * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
- * xHCI implementations that do not support 64-bit address pointers will ignore
- * the high dword, and write order is irrelevant.
  */
 
 /**
@@ -96,6 +89,7 @@ struct xhci_cap_regs {
 #define HCS_ERST_MAX(p)                (((p) >> 4) & 0xf)
 /* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
 /* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */
+#define HCS_MAX_SCRATCHPAD(p)   (((p) >> 27) & 0x1f)
 
 /* HCSPARAMS3 - hcs_params3 - bitmasks */
 /* bits 0:7, Max U1 to U0 latency for the roothub ports */
@@ -166,10 +160,10 @@ struct xhci_op_regs {
        u32     reserved1;
        u32     reserved2;
        u32     dev_notification;
-       u32     cmd_ring[2];
+       u64     cmd_ring;
        /* rsvd: offset 0x20-2F */
        u32     reserved3[4];
-       u32     dcbaa_ptr[2];
+       u64     dcbaa_ptr;
        u32     config_reg;
        /* rsvd: offset 0x3C-3FF */
        u32     reserved4[241];
@@ -254,7 +248,7 @@ struct xhci_op_regs {
 #define CMD_RING_RUNNING       (1 << 3)
 /* bits 4:5 reserved and should be preserved */
 /* Command Ring pointer - bit mask for the lower 32 bits. */
-#define CMD_RING_ADDR_MASK     (0xffffffc0)
+#define CMD_RING_RSVD_BITS     (0x3f)
 
 /* CONFIG - Configure Register - config_reg bitmasks */
 /* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */
@@ -382,8 +376,8 @@ struct xhci_intr_reg {
        u32     irq_control;
        u32     erst_size;
        u32     rsvd;
-       u32     erst_base[2];
-       u32     erst_dequeue[2];
+       u64     erst_base;
+       u64     erst_dequeue;
 };
 
 /* irq_pending bitmasks */
@@ -452,6 +446,27 @@ struct xhci_doorbell_array {
 #define EPI_TO_DB(p)           (((p) + 1) & 0xff)
 
 
+/**
+ * struct xhci_container_ctx
+ * @type: Type of context.  Used to calculated offsets to contained contexts.
+ * @size: Size of the context data
+ * @bytes: The raw context data given to HW
+ * @dma: dma address of the bytes
+ *
+ * Represents either a Device or Input context.  Holds a pointer to the raw
+ * memory used for the context (bytes) and dma address of it (dma).
+ */
+struct xhci_container_ctx {
+       unsigned type;
+#define XHCI_CTX_TYPE_DEVICE  0x1
+#define XHCI_CTX_TYPE_INPUT   0x2
+
+       int size;
+
+       u8 *bytes;
+       dma_addr_t dma;
+};
+
 /**
  * struct xhci_slot_ctx
  * @dev_info:  Route string, device speed, hub info, and last valid endpoint
@@ -538,7 +553,7 @@ struct xhci_slot_ctx {
 struct xhci_ep_ctx {
        u32     ep_info;
        u32     ep_info2;
-       u32     deq[2];
+       u64     deq;
        u32     tx_info;
        /* offset 0x14 - 0x1f reserved for HC internal use */
        u32     reserved[3];
@@ -589,18 +604,16 @@ struct xhci_ep_ctx {
 
 
 /**
- * struct xhci_device_control
- * Input/Output context; see section 6.2.5.
+ * struct xhci_input_control_context
+ * Input control context; see section 6.2.5.
  *
  * @drop_context:      set the bit of the endpoint context you want to disable
  * @add_context:       set the bit of the endpoint context you want to enable
  */
-struct xhci_device_control {
+struct xhci_input_control_ctx {
        u32     drop_flags;
        u32     add_flags;
-       u32     rsvd[6];
-       struct xhci_slot_ctx    slot;
-       struct xhci_ep_ctx      ep[31];
+       u32     rsvd2[6];
 };
 
 /* drop context bitmasks */
@@ -608,7 +621,6 @@ struct xhci_device_control {
 /* add context bitmasks */
 #define        ADD_EP(x)       (0x1 << x)
 
-
 struct xhci_virt_device {
        /*
         * Commands to the hardware are passed an "input context" that
@@ -618,11 +630,10 @@ struct xhci_virt_device {
         * track of input and output contexts separately because
         * these commands might fail and we don't trust the hardware.
         */
-       struct xhci_device_control      *out_ctx;
-       dma_addr_t                      out_ctx_dma;
+       struct xhci_container_ctx       *out_ctx;
        /* Used for addressing devices and configuration changes */
-       struct xhci_device_control      *in_ctx;
-       dma_addr_t                      in_ctx_dma;
+       struct xhci_container_ctx       *in_ctx;
+
        /* FIXME when stream support is added */
        struct xhci_ring                *ep_rings[31];
        /* Temporary storage in case the configure endpoint command fails and we
@@ -641,7 +652,7 @@ struct xhci_virt_device {
  */
 struct xhci_device_context_array {
        /* 64-bit device addresses; we only write 32-bit addresses */
-       u32                     dev_context_ptrs[2*MAX_HC_SLOTS];
+       u64                     dev_context_ptrs[MAX_HC_SLOTS];
        /* private xHCD pointers */
        dma_addr_t      dma;
 };
@@ -654,7 +665,7 @@ struct xhci_device_context_array {
 
 struct xhci_stream_ctx {
        /* 64-bit stream ring address, cycle state, and stream type */
-       u32     stream_ring[2];
+       u64     stream_ring;
        /* offset 0x14 - 0x1f reserved for HC internal use */
        u32     reserved[2];
 };
@@ -662,7 +673,7 @@ struct xhci_stream_ctx {
 
 struct xhci_transfer_event {
        /* 64-bit buffer address, or immediate data */
-       u32     buffer[2];
+       u64     buffer;
        u32     transfer_len;
        /* This field is interpreted differently based on the type of TRB */
        u32     flags;
@@ -744,7 +755,7 @@ struct xhci_transfer_event {
 
 struct xhci_link_trb {
        /* 64-bit segment pointer*/
-       u32 segment_ptr[2];
+       u64 segment_ptr;
        u32 intr_target;
        u32 control;
 };
@@ -755,7 +766,7 @@ struct xhci_link_trb {
 /* Command completion event TRB */
 struct xhci_event_cmd {
        /* Pointer to command TRB, or the value passed by the event data trb */
-       u32 cmd_trb[2];
+       u64 cmd_trb;
        u32 status;
        u32 flags;
 };
@@ -848,8 +859,8 @@ union xhci_trb {
 #define TRB_CONFIG_EP          12
 /* Evaluate Context Command */
 #define TRB_EVAL_CONTEXT       13
-/* Reset Transfer Ring Command */
-#define TRB_RESET_RING         14
+/* Reset Endpoint Command */
+#define TRB_RESET_EP           14
 /* Stop Transfer Ring Command */
 #define TRB_STOP_RING          15
 /* Set Transfer Ring Dequeue Pointer Command */
@@ -929,6 +940,7 @@ struct xhci_ring {
        unsigned int            cancels_pending;
        unsigned int            state;
 #define SET_DEQ_PENDING                (1 << 0)
+#define EP_HALTED              (1 << 1)
        /* The TRB that was last reported in a stopped endpoint ring */
        union xhci_trb          *stopped_trb;
        struct xhci_td          *stopped_td;
@@ -940,9 +952,15 @@ struct xhci_ring {
        u32                     cycle_state;
 };
 
+struct xhci_dequeue_state {
+       struct xhci_segment *new_deq_seg;
+       union xhci_trb *new_deq_ptr;
+       int new_cycle_state;
+};
+
 struct xhci_erst_entry {
        /* 64-bit event ring segment address */
-       u32     seg_addr[2];
+       u64     seg_addr;
        u32     seg_size;
        /* Set to zero */
        u32     rsvd;
@@ -957,6 +975,13 @@ struct xhci_erst {
        unsigned int            erst_size;
 };
 
+struct xhci_scratchpad {
+       u64 *sp_array;
+       dma_addr_t sp_dma;
+       void **sp_buffers;
+       dma_addr_t *sp_dma_buffers;
+};
+
 /*
  * Each segment table entry is 4*32bits long.  1K seems like an ok size:
  * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table,
@@ -1011,6 +1036,9 @@ struct xhci_hcd {
        struct xhci_ring        *cmd_ring;
        struct xhci_ring        *event_ring;
        struct xhci_erst        erst;
+       /* Scratchpad */
+       struct xhci_scratchpad  *scratchpad;
+
        /* slot enabling and address device helpers */
        struct completion       addr_dev;
        int slot_id;
@@ -1071,13 +1099,43 @@ static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
 static inline void xhci_writel(struct xhci_hcd *xhci,
                const unsigned int val, __u32 __iomem *regs)
 {
-       if (!in_interrupt())
-               xhci_dbg(xhci,
-                        "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n",
-                        regs, val);
+       xhci_dbg(xhci,
+                       "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n",
+                       regs, val);
        writel(val, regs);
 }
 
+/*
+ * Registers should always be accessed with double word or quad word accesses.
+ *
+ * Some xHCI implementations may support 64-bit address pointers.  Registers
+ * with 64-bit address pointers should be written to with dword accesses by
+ * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
+ * xHCI implementations that do not support 64-bit address pointers will ignore
+ * the high dword, and write order is irrelevant.
+ */
+static inline u64 xhci_read_64(const struct xhci_hcd *xhci,
+               __u64 __iomem *regs)
+{
+       __u32 __iomem *ptr = (__u32 __iomem *) regs;
+       u64 val_lo = readl(ptr);
+       u64 val_hi = readl(ptr + 1);
+       return val_lo + (val_hi << 32);
+}
+static inline void xhci_write_64(struct xhci_hcd *xhci,
+               const u64 val, __u64 __iomem *regs)
+{
+       __u32 __iomem *ptr = (__u32 __iomem *) regs;
+       u32 val_lo = lower_32_bits(val);
+       u32 val_hi = upper_32_bits(val);
+
+       xhci_dbg(xhci,
+                       "`MEM_WRITE_DWORD(3'b000, 64'h%p, 64'h%0lx, 4'hf);\n",
+                       regs, (long unsigned int) val);
+       writel(val_lo, ptr);
+       writel(val_hi, ptr + 1);
+}
+
 /* xHCI debugging */
 void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
 void xhci_print_registers(struct xhci_hcd *xhci);
@@ -1090,7 +1148,7 @@ void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring);
 void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
 void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
 void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
-void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep);
+void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep);
 
 /* xHCI memory managment */
 void xhci_mem_cleanup(struct xhci_hcd *xhci);
@@ -1128,6 +1186,7 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags);
 int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
 int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
 int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
+void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep);
 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
 
@@ -1148,10 +1207,23 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
                int slot_id, unsigned int ep_index);
 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
                u32 slot_id);
+int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
+               unsigned int ep_index);
+void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
+               unsigned int slot_id, unsigned int ep_index,
+               struct xhci_td *cur_td, struct xhci_dequeue_state *state);
+void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
+               struct xhci_ring *ep_ring, unsigned int slot_id,
+               unsigned int ep_index, struct xhci_dequeue_state *deq_state);
 
 /* xHCI roothub code */
 int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
                char *buf, u16 wLength);
 int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
 
+/* xHCI contexts */
+struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
+struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
+struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index);
+
 #endif /* __LINUX_XHCI_HCD_H */
index a68d91a11bee32422dd1296ff539b7b036228b73..abe3aa67ed0033d59a6735d9d61e3ec97529ad01 100644 (file)
@@ -220,7 +220,7 @@ config USB_IOWARRIOR
 
 config USB_TEST
        tristate "USB testing driver"
-       depends on USB && USB_DEVICEFS
+       depends on USB
        help
          This driver is for testing host controller software.  It is used
          with specialized device firmware for regression and stress testing,
index 554a414f65d1104cbbbeeaed2885f2a209faf42d..c7c1ca0494cda359096b1eeeb281a55b80f2c994 100644 (file)
@@ -1326,7 +1326,6 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
        int             i;
 
        /* log core options (read using indexed model) */
-       musb_ep_select(mbase, 0);
        reg = musb_read_configdata(mbase);
 
        strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
@@ -1990,7 +1989,7 @@ bad_config:
        if (status < 0)
                goto fail2;
 
-#ifdef CONFIG_USB_OTG
+#ifdef CONFIG_USB_MUSB_OTG
        setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb);
 #endif
 
index 40ed50ecedff6ed84a72d4342616a6f07e36f362..7a6778675ad3a8d5a76d1f8de9477a8c48309d12 100644 (file)
@@ -407,7 +407,7 @@ stall:
                                        csr |= MUSB_RXCSR_P_SENDSTALL
                                                | MUSB_RXCSR_FLUSHFIFO
                                                | MUSB_RXCSR_CLRDATATOG
-                                               | MUSB_TXCSR_P_WZC_BITS;
+                                               | MUSB_RXCSR_P_WZC_BITS;
                                        musb_writew(regs, MUSB_RXCSR,
                                                        csr);
                                }
index de3b2f18db4469943b0e7ce3b437ff98d53e0022..fbfd3fd9ce1f876f4949b15b1de016e7877d1348 100644 (file)
@@ -323,6 +323,7 @@ static inline void  musb_write_rxfifoadd(void __iomem *mbase, u16 c_off)
 
 static inline u8 musb_read_configdata(void __iomem *mbase)
 {
+       musb_writeb(mbase, MUSB_INDEX, 0);
        return musb_readb(mbase, 0x10 + MUSB_CONFIGDATA);
 }
 
index e9a40b820fd4b28eac8053e40efa4082f7880b46..985cbcf48bda83317d235395fbbca689f5649ee1 100644 (file)
@@ -80,6 +80,7 @@ static struct usb_device_id id_table [] = {
        { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */
        { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */
        { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
+       { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */
        { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
        { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
        { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
@@ -96,7 +97,9 @@ static struct usb_device_id id_table [] = {
        { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */
        { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
        { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
+       { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
        { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
+       { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
        { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
        { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
index 60c64cc5be2ae9dbbcef5720dc537af008839f67..b574878c78b27b633a056d96cf3eb36a25d098b6 100644 (file)
@@ -698,6 +698,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) },
+       { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) },
        { },                                    /* Optional parameter entry */
        { }                                     /* Terminating entry */
 };
index c9fbd74150929d6849b773bf10d5cb8b7d92ffe1..24dbd99e87d722a95beac152af27a475c46cac2b 100644 (file)
 
 #define FTDI_TURTELIZER_PID    0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */
 
+/*
+ * GN Otometrics (http://www.otometrics.com)
+ * Submitted by Ville Sundberg.
+ */
+#define GN_OTOMETRICS_VID      0x0c33  /* Vendor ID */
+#define AURICAL_USB_PID                0x0010  /* Aurical USB Audiometer */
+
 /*
  *   BmRequestType:  1100 0000b
  *   bRequest:       FTDI_E2_READ
index c31940a307f8d3fbdb9ad223380fd36262e4927e..270009afdf77043f2e60034b8d29182dad053edf 100644 (file)
 #define BANDB_DEVICE_ID_USOPTL4_4       0xAC44
 #define BANDB_DEVICE_ID_USOPTL4_2       0xAC42
 
-/* This driver also supports the ATEN UC2324 device since it is mos7840 based
- *  - if I knew the device id it would also support the ATEN UC2322 */
+/* This driver also supports
+ * ATEN UC2324 device using Moschip MCS7840
+ * ATEN UC2322 device using Moschip MCS7820
+ */
 #define USB_VENDOR_ID_ATENINTL         0x0557
 #define ATENINTL_DEVICE_ID_UC2324      0x2011
+#define ATENINTL_DEVICE_ID_UC2322      0x7820
 
 /* Interrupt Routine Defines    */
 
@@ -177,6 +180,7 @@ static struct usb_device_id moschip_port_id_table[] = {
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
        {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
+       {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
        {}                      /* terminating entry */
 };
 
@@ -186,6 +190,7 @@ static __devinitdata struct usb_device_id moschip_id_table_combined[] = {
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
        {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
+       {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
        {}                      /* terminating entry */
 };
 
index 98262dd552bb58c8b4d3b487b7dfb285cc9d64da..c784ddbe7b614cf13b4e57ab1fbd6ad2efdf3121 100644 (file)
@@ -66,8 +66,10 @@ static int  option_tiocmget(struct tty_struct *tty, struct file *file);
 static int  option_tiocmset(struct tty_struct *tty, struct file *file,
                                unsigned int set, unsigned int clear);
 static int  option_send_setup(struct usb_serial_port *port);
+#ifdef CONFIG_PM
 static int  option_suspend(struct usb_serial *serial, pm_message_t message);
 static int  option_resume(struct usb_serial *serial);
+#endif
 
 /* Vendor and product IDs */
 #define OPTION_VENDOR_ID                       0x0AF0
@@ -205,6 +207,7 @@ static int  option_resume(struct usb_serial *serial);
 #define NOVATELWIRELESS_PRODUCT_MC727          0x4100
 #define NOVATELWIRELESS_PRODUCT_MC950D         0x4400
 #define NOVATELWIRELESS_PRODUCT_U727           0x5010
+#define NOVATELWIRELESS_PRODUCT_MC727_NEW      0x5100
 #define NOVATELWIRELESS_PRODUCT_MC760          0x6000
 #define NOVATELWIRELESS_PRODUCT_OVMC760                0x6002
 
@@ -259,11 +262,6 @@ static int  option_resume(struct usb_serial *serial);
 #define AXESSTEL_VENDOR_ID                     0x1726
 #define AXESSTEL_PRODUCT_MV110H                        0x1000
 
-#define ONDA_VENDOR_ID                         0x19d2
-#define ONDA_PRODUCT_MSA501HS                  0x0001
-#define ONDA_PRODUCT_ET502HS                   0x0002
-#define ONDA_PRODUCT_MT503HS                   0x2000
-
 #define BANDRICH_VENDOR_ID                     0x1A8D
 #define BANDRICH_PRODUCT_C100_1                        0x1002
 #define BANDRICH_PRODUCT_C100_2                        0x1003
@@ -301,6 +299,7 @@ static int  option_resume(struct usb_serial *serial);
 #define ZTE_PRODUCT_MF628                      0x0015
 #define ZTE_PRODUCT_MF626                      0x0031
 #define ZTE_PRODUCT_CDMA_TECH                  0xfffe
+#define ZTE_PRODUCT_AC8710                     0xfff1
 
 #define BENQ_VENDOR_ID                         0x04a5
 #define BENQ_PRODUCT_H10                       0x4068
@@ -322,6 +321,11 @@ static int  option_resume(struct usb_serial *serial);
 #define ALINK_VENDOR_ID                                0x1e0e
 #define ALINK_PRODUCT_3GU                      0x9200
 
+/* ALCATEL PRODUCTS */
+#define ALCATEL_VENDOR_ID                      0x1bbb
+#define ALCATEL_PRODUCT_X060S                  0x0000
+
+
 static struct usb_device_id option_ids[] = {
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -438,6 +442,7 @@ static struct usb_device_id option_ids[] = {
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727_NEW) }, /* Novatel MC727/U727/USB727 refresh */
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */
@@ -474,42 +479,6 @@ static struct usb_device_id option_ids[] = {
        { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
        { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
        { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) },
-       { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MSA501HS) },
-       { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0003) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0004) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0005) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0006) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0007) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0008) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0009) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x000a) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x000b) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x000c) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x000d) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x000e) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x000f) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0010) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0011) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0012) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0013) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0014) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0015) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0016) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0017) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0018) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0019) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0020) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0021) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0022) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0023) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0024) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0025) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0026) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0027) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0028) },
-       { USB_DEVICE(ONDA_VENDOR_ID, 0x0029) },
-       { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MT503HS) },
        { USB_DEVICE(YISO_VENDOR_ID, YISO_PRODUCT_U893) },
        { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) },
        { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) },
@@ -534,10 +503,75 @@ static struct usb_device_id option_ids[] = {
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
        { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
-       { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622) },
-       { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626) },
-       { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) },
-       { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0006, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0007, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0008, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0009, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000a, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000b, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000c, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000d, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000e, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000f, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0082, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
        { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
        { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
        { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) },
@@ -547,6 +581,7 @@ static struct usb_device_id option_ids[] = {
        { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */
        { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
        { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
+       { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) },
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
@@ -555,8 +590,10 @@ static struct usb_driver option_driver = {
        .name       = "option",
        .probe      = usb_serial_probe,
        .disconnect = usb_serial_disconnect,
+#ifdef CONFIG_PM
        .suspend    = usb_serial_suspend,
        .resume     = usb_serial_resume,
+#endif
        .id_table   = option_ids,
        .no_dynamic_id =        1,
 };
@@ -588,8 +625,10 @@ static struct usb_serial_driver option_1port_device = {
        .disconnect        = option_disconnect,
        .release           = option_release,
        .read_int_callback = option_instat_callback,
+#ifdef CONFIG_PM
        .suspend           = option_suspend,
        .resume            = option_resume,
+#endif
 };
 
 static int debug;
@@ -831,7 +870,6 @@ static void option_instat_callback(struct urb *urb)
        int status = urb->status;
        struct usb_serial_port *port =  urb->context;
        struct option_port_private *portdata = usb_get_serial_port_data(port);
-       struct usb_serial *serial = port->serial;
 
        dbg("%s", __func__);
        dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata);
@@ -927,7 +965,6 @@ static int option_open(struct tty_struct *tty,
                        struct usb_serial_port *port, struct file *filp)
 {
        struct option_port_private *portdata;
-       struct usb_serial *serial = port->serial;
        int i, err;
        struct urb *urb;
 
@@ -1187,6 +1224,7 @@ static void option_release(struct usb_serial *serial)
        }
 }
 
+#ifdef CONFIG_PM
 static int option_suspend(struct usb_serial *serial, pm_message_t message)
 {
        dbg("%s entered", __func__);
@@ -1245,6 +1283,7 @@ static int option_resume(struct usb_serial *serial)
        }
        return 0;
 }
+#endif
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
index fcb320217218109d60147b5bb5dc297f65b96da0..e20dc525d17763642f65a07835a4a2ca462f1a4a 100644 (file)
@@ -961,7 +961,7 @@ int usb_stor_Bulk_max_lun(struct us_data *us)
                                 US_BULK_GET_MAX_LUN, 
                                 USB_DIR_IN | USB_TYPE_CLASS | 
                                 USB_RECIP_INTERFACE,
-                                0, us->ifnum, us->iobuf, 1, HZ);
+                                0, us->ifnum, us->iobuf, 1, 10*HZ);
 
        US_DEBUGP("GetMaxLUN command result is %d, data is %d\n", 
                  result, us->iobuf[0]);
index 6e4f6c50a120dfcb78eb5f26dc4301936589cd0f..019e8af449abfeb1679c1a20c801d6b0082edf8b 100644 (file)
@@ -424,11 +424,11 @@ int btrfs_requeue_work(struct btrfs_work *work)
         * list
         */
        if (worker->idle) {
-               spin_lock_irqsave(&worker->workers->lock, flags);
+               spin_lock(&worker->workers->lock);
                worker->idle = 0;
                list_move_tail(&worker->worker_list,
                               &worker->workers->worker_list);
-               spin_unlock_irqrestore(&worker->workers->lock, flags);
+               spin_unlock(&worker->workers->lock);
        }
        if (!worker->working) {
                wake = 1;
index 60a45f3a4e916fe9af5a8c644b3a8eb9b5b73f81..3fdcc0512d3ab62f95d42708ca0d6a049340b877 100644 (file)
@@ -557,19 +557,7 @@ static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
 
        btrfs_disk_key_to_cpu(&k1, disk);
 
-       if (k1.objectid > k2->objectid)
-               return 1;
-       if (k1.objectid < k2->objectid)
-               return -1;
-       if (k1.type > k2->type)
-               return 1;
-       if (k1.type < k2->type)
-               return -1;
-       if (k1.offset > k2->offset)
-               return 1;
-       if (k1.offset < k2->offset)
-               return -1;
-       return 0;
+       return btrfs_comp_cpu_keys(&k1, k2);
 }
 
 /*
@@ -1052,9 +1040,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
            BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
                return 0;
 
-       if (btrfs_header_nritems(mid) > 2)
-               return 0;
-
        if (btrfs_header_nritems(mid) < 2)
                err_on_enospc = 1;
 
@@ -1701,6 +1686,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
        struct extent_buffer *b;
        int slot;
        int ret;
+       int err;
        int level;
        int lowest_unlock = 1;
        u8 lowest_level = 0;
@@ -1737,8 +1723,6 @@ again:
                        p->locks[level] = 1;
 
                if (cow) {
-                       int wret;
-
                        /*
                         * if we don't really need to cow this block
                         * then we don't want to set the path blocking,
@@ -1749,12 +1733,12 @@ again:
 
                        btrfs_set_path_blocking(p);
 
-                       wret = btrfs_cow_block(trans, root, b,
-                                              p->nodes[level + 1],
-                                              p->slots[level + 1], &b);
-                       if (wret) {
+                       err = btrfs_cow_block(trans, root, b,
+                                             p->nodes[level + 1],
+                                             p->slots[level + 1], &b);
+                       if (err) {
                                free_extent_buffer(b);
-                               ret = wret;
+                               ret = err;
                                goto done;
                        }
                }
@@ -1793,41 +1777,45 @@ cow_done:
                ret = bin_search(b, key, level, &slot);
 
                if (level != 0) {
-                       if (ret && slot > 0)
+                       int dec = 0;
+                       if (ret && slot > 0) {
+                               dec = 1;
                                slot -= 1;
+                       }
                        p->slots[level] = slot;
-                       ret = setup_nodes_for_search(trans, root, p, b, level,
+                       err = setup_nodes_for_search(trans, root, p, b, level,
                                                     ins_len);
-                       if (ret == -EAGAIN)
+                       if (err == -EAGAIN)
                                goto again;
-                       else if (ret)
+                       if (err) {
+                               ret = err;
                                goto done;
+                       }
                        b = p->nodes[level];
                        slot = p->slots[level];
 
                        unlock_up(p, level, lowest_unlock);
 
-                       /* this is only true while dropping a snapshot */
                        if (level == lowest_level) {
-                               ret = 0;
+                               if (dec)
+                                       p->slots[level]++;
                                goto done;
                        }
 
-                       ret = read_block_for_search(trans, root, p,
+                       err = read_block_for_search(trans, root, p,
                                                    &b, level, slot, key);
-                       if (ret == -EAGAIN)
+                       if (err == -EAGAIN)
                                goto again;
-
-                       if (ret == -EIO)
+                       if (err) {
+                               ret = err;
                                goto done;
+                       }
 
                        if (!p->skip_locking) {
-                               int lret;
-
                                btrfs_clear_path_blocking(p, NULL);
-                               lret = btrfs_try_spin_lock(b);
+                               err = btrfs_try_spin_lock(b);
 
-                               if (!lret) {
+                               if (!err) {
                                        btrfs_set_path_blocking(p);
                                        btrfs_tree_lock(b);
                                        btrfs_clear_path_blocking(p, b);
@@ -1837,16 +1825,14 @@ cow_done:
                        p->slots[level] = slot;
                        if (ins_len > 0 &&
                            btrfs_leaf_free_space(root, b) < ins_len) {
-                               int sret;
-
                                btrfs_set_path_blocking(p);
-                               sret = split_leaf(trans, root, key,
-                                                     p, ins_len, ret == 0);
+                               err = split_leaf(trans, root, key,
+                                                p, ins_len, ret == 0);
                                btrfs_clear_path_blocking(p, NULL);
 
-                               BUG_ON(sret > 0);
-                               if (sret) {
-                                       ret = sret;
+                               BUG_ON(err > 0);
+                               if (err) {
+                                       ret = err;
                                        goto done;
                                }
                        }
@@ -3807,7 +3793,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
                }
 
                /* delete the leaf if it is mostly empty */
-               if (used < BTRFS_LEAF_DATA_SIZE(root) / 2) {
+               if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
                        /* push_leaf_left fixes the path.
                         * make sure the path still points to our leaf
                         * for possible call to del_ptr below
@@ -4042,10 +4028,9 @@ out:
  * calling this function.
  */
 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
-                       struct btrfs_key *key, int lowest_level,
+                       struct btrfs_key *key, int level,
                        int cache_only, u64 min_trans)
 {
-       int level = lowest_level;
        int slot;
        struct extent_buffer *c;
 
@@ -4058,11 +4043,40 @@ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
                c = path->nodes[level];
 next:
                if (slot >= btrfs_header_nritems(c)) {
-                       level++;
-                       if (level == BTRFS_MAX_LEVEL)
+                       int ret;
+                       int orig_lowest;
+                       struct btrfs_key cur_key;
+                       if (level + 1 >= BTRFS_MAX_LEVEL ||
+                           !path->nodes[level + 1])
                                return 1;
-                       continue;
+
+                       if (path->locks[level + 1]) {
+                               level++;
+                               continue;
+                       }
+
+                       slot = btrfs_header_nritems(c) - 1;
+                       if (level == 0)
+                               btrfs_item_key_to_cpu(c, &cur_key, slot);
+                       else
+                               btrfs_node_key_to_cpu(c, &cur_key, slot);
+
+                       orig_lowest = path->lowest_level;
+                       btrfs_release_path(root, path);
+                       path->lowest_level = level;
+                       ret = btrfs_search_slot(NULL, root, &cur_key, path,
+                                               0, 0);
+                       path->lowest_level = orig_lowest;
+                       if (ret < 0)
+                               return ret;
+
+                       c = path->nodes[level];
+                       slot = path->slots[level];
+                       if (ret == 0)
+                               slot++;
+                       goto next;
                }
+
                if (level == 0)
                        btrfs_item_key_to_cpu(c, key, slot);
                else {
@@ -4146,7 +4160,8 @@ again:
         * advance the path if there are now more items available.
         */
        if (nritems > 0 && path->slots[0] < nritems - 1) {
-               path->slots[0]++;
+               if (ret == 0)
+                       path->slots[0]++;
                ret = 0;
                goto done;
        }
@@ -4278,10 +4293,10 @@ int btrfs_previous_item(struct btrfs_root *root,
                        path->slots[0]--;
 
                btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
-               if (found_key.type == type)
-                       return 0;
                if (found_key.objectid < min_objectid)
                        break;
+               if (found_key.type == type)
+                       return 0;
                if (found_key.objectid == min_objectid &&
                    found_key.type < type)
                        break;
index 98a87383871722fbddc36590cae9e60a512ef53a..215ef8cae823ce53cf91e559ebfe7ed602eda214 100644 (file)
@@ -481,7 +481,7 @@ struct btrfs_shared_data_ref {
 
 struct btrfs_extent_inline_ref {
        u8 type;
-       u64 offset;
+       __le64 offset;
 } __attribute__ ((__packed__));
 
 /* old style backrefs item */
@@ -689,6 +689,7 @@ struct btrfs_space_info {
        struct list_head block_groups;
        spinlock_t lock;
        struct rw_semaphore groups_sem;
+       atomic_t caching_threads;
 };
 
 /*
@@ -707,6 +708,9 @@ struct btrfs_free_cluster {
        /* first extent starting offset */
        u64 window_start;
 
+       /* if this cluster simply points at a bitmap in the block group */
+       bool points_to_bitmap;
+
        struct btrfs_block_group_cache *block_group;
        /*
         * when a cluster is allocated from a block group, we put the
@@ -716,24 +720,37 @@ struct btrfs_free_cluster {
        struct list_head block_group_list;
 };
 
+enum btrfs_caching_type {
+       BTRFS_CACHE_NO          = 0,
+       BTRFS_CACHE_STARTED     = 1,
+       BTRFS_CACHE_FINISHED    = 2,
+};
+
 struct btrfs_block_group_cache {
        struct btrfs_key key;
        struct btrfs_block_group_item item;
+       struct btrfs_fs_info *fs_info;
        spinlock_t lock;
-       struct mutex cache_mutex;
        u64 pinned;
        u64 reserved;
        u64 flags;
-       int cached;
+       u64 sectorsize;
+       int extents_thresh;
+       int free_extents;
+       int total_bitmaps;
        int ro;
        int dirty;
 
+       /* cache tracking stuff */
+       wait_queue_head_t caching_q;
+       int cached;
+
        struct btrfs_space_info *space_info;
 
        /* free space cache stuff */
        spinlock_t tree_lock;
-       struct rb_root free_space_bytes;
        struct rb_root free_space_offset;
+       u64 free_space;
 
        /* block group cache stuff */
        struct rb_node cache_node;
@@ -942,6 +959,9 @@ struct btrfs_root {
        /* the node lock is held while changing the node pointer */
        spinlock_t node_lock;
 
+       /* taken when updating the commit root */
+       struct rw_semaphore commit_root_sem;
+
        struct extent_buffer *commit_root;
        struct btrfs_root *log_root;
        struct btrfs_root *reloc_root;
@@ -1988,6 +2008,7 @@ void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode,
                                 u64 bytes);
 void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode,
                              u64 bytes);
+void btrfs_free_pinned_extents(struct btrfs_fs_info *info);
 /* ctree.c */
 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
                     int level, int *slot);
index d28d29c95f7ca23a91e9ccf3c306840484ea29ed..7dcaa8138864b455ea0314bcf78d17fe820c2f5d 100644 (file)
@@ -909,6 +909,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
        spin_lock_init(&root->inode_lock);
        mutex_init(&root->objectid_mutex);
        mutex_init(&root->log_mutex);
+       init_rwsem(&root->commit_root_sem);
        init_waitqueue_head(&root->log_writer_wait);
        init_waitqueue_head(&root->log_commit_wait[0]);
        init_waitqueue_head(&root->log_commit_wait[1]);
@@ -1799,6 +1800,11 @@ struct btrfs_root *open_ctree(struct super_block *sb,
                                           btrfs_super_chunk_root(disk_super),
                                           blocksize, generation);
        BUG_ON(!chunk_root->node);
+       if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
+               printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
+                      sb->s_id);
+               goto fail_chunk_root;
+       }
        btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
        chunk_root->commit_root = btrfs_root_node(chunk_root);
 
@@ -1826,6 +1832,11 @@ struct btrfs_root *open_ctree(struct super_block *sb,
                                          blocksize, generation);
        if (!tree_root->node)
                goto fail_chunk_root;
+       if (!test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
+               printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
+                      sb->s_id);
+               goto fail_tree_root;
+       }
        btrfs_set_root_node(&tree_root->root_item, tree_root->node);
        tree_root->commit_root = btrfs_root_node(tree_root);
 
@@ -2322,6 +2333,9 @@ int close_ctree(struct btrfs_root *root)
                        printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
        }
 
+       fs_info->closing = 2;
+       smp_mb();
+
        if (fs_info->delalloc_bytes) {
                printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
                       (unsigned long long)fs_info->delalloc_bytes);
@@ -2343,6 +2357,7 @@ int close_ctree(struct btrfs_root *root)
        free_extent_buffer(root->fs_info->csum_root->commit_root);
 
        btrfs_free_block_groups(root->fs_info);
+       btrfs_free_pinned_extents(root->fs_info);
 
        del_fs_roots(fs_info);
 
index a5aca3997d426da09c87e33642916284eca135b6..fadf69a2764b316828f82d31be8490c0dcf57739 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/blkdev.h>
 #include <linux/sort.h>
 #include <linux/rcupdate.h>
+#include <linux/kthread.h>
 #include "compat.h"
 #include "hash.h"
 #include "ctree.h"
@@ -61,6 +62,13 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
                          struct btrfs_root *extent_root, u64 alloc_bytes,
                          u64 flags, int force);
 
+static noinline int
+block_group_cache_done(struct btrfs_block_group_cache *cache)
+{
+       smp_mb();
+       return cache->cached == BTRFS_CACHE_FINISHED;
+}
+
 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
 {
        return (cache->flags & bits) == bits;
@@ -145,21 +153,71 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
        return ret;
 }
 
+/*
+ * We always set EXTENT_LOCKED for the super mirror extents so we don't
+ * overwrite them, so those bits need to be unset.  Also, if we are unmounting
+ * with pinned extents still sitting there because we had a block group caching,
+ * we need to clear those now, since we are done.
+ */
+void btrfs_free_pinned_extents(struct btrfs_fs_info *info)
+{
+       u64 start, end, last = 0;
+       int ret;
+
+       while (1) {
+               ret = find_first_extent_bit(&info->pinned_extents, last,
+                                           &start, &end,
+                                           EXTENT_LOCKED|EXTENT_DIRTY);
+               if (ret)
+                       break;
+
+               clear_extent_bits(&info->pinned_extents, start, end,
+                                 EXTENT_LOCKED|EXTENT_DIRTY, GFP_NOFS);
+               last = end+1;
+       }
+}
+
+static int remove_sb_from_cache(struct btrfs_root *root,
+                               struct btrfs_block_group_cache *cache)
+{
+       struct btrfs_fs_info *fs_info = root->fs_info;
+       u64 bytenr;
+       u64 *logical;
+       int stripe_len;
+       int i, nr, ret;
+
+       for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
+               bytenr = btrfs_sb_offset(i);
+               ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
+                                      cache->key.objectid, bytenr,
+                                      0, &logical, &nr, &stripe_len);
+               BUG_ON(ret);
+               while (nr--) {
+                       try_lock_extent(&fs_info->pinned_extents,
+                                       logical[nr],
+                                       logical[nr] + stripe_len - 1, GFP_NOFS);
+               }
+               kfree(logical);
+       }
+
+       return 0;
+}
+
 /*
  * this is only called by cache_block_group, since we could have freed extents
  * we need to check the pinned_extents for any extents that can't be used yet
  * since their free space will be released as soon as the transaction commits.
  */
-static int add_new_free_space(struct btrfs_block_group_cache *block_group,
+static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
                              struct btrfs_fs_info *info, u64 start, u64 end)
 {
-       u64 extent_start, extent_end, size;
+       u64 extent_start, extent_end, size, total_added = 0;
        int ret;
 
        while (start < end) {
                ret = find_first_extent_bit(&info->pinned_extents, start,
                                            &extent_start, &extent_end,
-                                           EXTENT_DIRTY);
+                                           EXTENT_DIRTY|EXTENT_LOCKED);
                if (ret)
                        break;
 
@@ -167,6 +225,7 @@ static int add_new_free_space(struct btrfs_block_group_cache *block_group,
                        start = extent_end + 1;
                } else if (extent_start > start && extent_start < end) {
                        size = extent_start - start;
+                       total_added += size;
                        ret = btrfs_add_free_space(block_group, start,
                                                   size);
                        BUG_ON(ret);
@@ -178,84 +237,79 @@ static int add_new_free_space(struct btrfs_block_group_cache *block_group,
 
        if (start < end) {
                size = end - start;
+               total_added += size;
                ret = btrfs_add_free_space(block_group, start, size);
                BUG_ON(ret);
        }
 
-       return 0;
+       return total_added;
 }
 
-static int remove_sb_from_cache(struct btrfs_root *root,
-                               struct btrfs_block_group_cache *cache)
-{
-       u64 bytenr;
-       u64 *logical;
-       int stripe_len;
-       int i, nr, ret;
-
-       for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
-               bytenr = btrfs_sb_offset(i);
-               ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
-                                      cache->key.objectid, bytenr, 0,
-                                      &logical, &nr, &stripe_len);
-               BUG_ON(ret);
-               while (nr--) {
-                       btrfs_remove_free_space(cache, logical[nr],
-                                               stripe_len);
-               }
-               kfree(logical);
-       }
-       return 0;
-}
-
-static int cache_block_group(struct btrfs_root *root,
-                            struct btrfs_block_group_cache *block_group)
+static int caching_kthread(void *data)
 {
+       struct btrfs_block_group_cache *block_group = data;
+       struct btrfs_fs_info *fs_info = block_group->fs_info;
+       u64 last = 0;
        struct btrfs_path *path;
        int ret = 0;
        struct btrfs_key key;
        struct extent_buffer *leaf;
        int slot;
-       u64 last;
-
-       if (!block_group)
-               return 0;
+       u64 total_found = 0;
 
-       root = root->fs_info->extent_root;
-
-       if (block_group->cached)
-               return 0;
+       BUG_ON(!fs_info);
 
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
 
-       path->reada = 2;
+       atomic_inc(&block_group->space_info->caching_threads);
+       last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
+again:
+       /* need to make sure the commit_root doesn't disappear */
+       down_read(&fs_info->extent_root->commit_root_sem);
+
        /*
-        * we get into deadlocks with paths held by callers of this function.
-        * since the alloc_mutex is protecting things right now, just
-        * skip the locking here
+        * We don't want to deadlock with somebody trying to allocate a new
+        * extent for the extent root while also trying to search the extent
+        * root to add free space.  So we skip locking and search the commit
+        * root, since its read-only
         */
        path->skip_locking = 1;
-       last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
+       path->search_commit_root = 1;
+       path->reada = 2;
+
        key.objectid = last;
        key.offset = 0;
        btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
-       ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+       ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
        if (ret < 0)
                goto err;
 
        while (1) {
+               smp_mb();
+               if (block_group->fs_info->closing > 1) {
+                       last = (u64)-1;
+                       break;
+               }
+
                leaf = path->nodes[0];
                slot = path->slots[0];
                if (slot >= btrfs_header_nritems(leaf)) {
-                       ret = btrfs_next_leaf(root, path);
+                       ret = btrfs_next_leaf(fs_info->extent_root, path);
                        if (ret < 0)
                                goto err;
-                       if (ret == 0)
-                               continue;
-                       else
+                       else if (ret)
                                break;
+
+                       if (need_resched()) {
+                               btrfs_release_path(fs_info->extent_root, path);
+                               up_read(&fs_info->extent_root->commit_root_sem);
+                               cond_resched();
+                               goto again;
+                       }
+
+                       continue;
                }
                btrfs_item_key_to_cpu(leaf, &key, slot);
                if (key.objectid < block_group->key.objectid)
@@ -266,24 +320,59 @@ static int cache_block_group(struct btrfs_root *root,
                        break;
 
                if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
-                       add_new_free_space(block_group, root->fs_info, last,
-                                          key.objectid);
-
+                       total_found += add_new_free_space(block_group,
+                                                         fs_info, last,
+                                                         key.objectid);
                        last = key.objectid + key.offset;
                }
+
+               if (total_found > (1024 * 1024 * 2)) {
+                       total_found = 0;
+                       wake_up(&block_group->caching_q);
+               }
 next:
                path->slots[0]++;
        }
+       ret = 0;
 
-       add_new_free_space(block_group, root->fs_info, last,
-                          block_group->key.objectid +
-                          block_group->key.offset);
+       total_found += add_new_free_space(block_group, fs_info, last,
+                                         block_group->key.objectid +
+                                         block_group->key.offset);
+
+       spin_lock(&block_group->lock);
+       block_group->cached = BTRFS_CACHE_FINISHED;
+       spin_unlock(&block_group->lock);
 
-       block_group->cached = 1;
-       remove_sb_from_cache(root, block_group);
-       ret = 0;
 err:
        btrfs_free_path(path);
+       up_read(&fs_info->extent_root->commit_root_sem);
+       atomic_dec(&block_group->space_info->caching_threads);
+       wake_up(&block_group->caching_q);
+
+       return 0;
+}
+
+static int cache_block_group(struct btrfs_block_group_cache *cache)
+{
+       struct task_struct *tsk;
+       int ret = 0;
+
+       spin_lock(&cache->lock);
+       if (cache->cached != BTRFS_CACHE_NO) {
+               spin_unlock(&cache->lock);
+               return ret;
+       }
+       cache->cached = BTRFS_CACHE_STARTED;
+       spin_unlock(&cache->lock);
+
+       tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
+                         cache->key.objectid);
+       if (IS_ERR(tsk)) {
+               ret = PTR_ERR(tsk);
+               printk(KERN_ERR "error running thread %d\n", ret);
+               BUG();
+       }
+
        return ret;
 }
 
@@ -2387,13 +2476,29 @@ fail:
 
 }
 
+static struct btrfs_block_group_cache *
+next_block_group(struct btrfs_root *root,
+                struct btrfs_block_group_cache *cache)
+{
+       struct rb_node *node;
+       spin_lock(&root->fs_info->block_group_cache_lock);
+       node = rb_next(&cache->cache_node);
+       btrfs_put_block_group(cache);
+       if (node) {
+               cache = rb_entry(node, struct btrfs_block_group_cache,
+                                cache_node);
+               atomic_inc(&cache->count);
+       } else
+               cache = NULL;
+       spin_unlock(&root->fs_info->block_group_cache_lock);
+       return cache;
+}
+
 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
                                   struct btrfs_root *root)
 {
-       struct btrfs_block_group_cache *cache, *entry;
-       struct rb_node *n;
+       struct btrfs_block_group_cache *cache;
        int err = 0;
-       int werr = 0;
        struct btrfs_path *path;
        u64 last = 0;
 
@@ -2402,39 +2507,35 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
                return -ENOMEM;
 
        while (1) {
-               cache = NULL;
-               spin_lock(&root->fs_info->block_group_cache_lock);
-               for (n = rb_first(&root->fs_info->block_group_cache_tree);
-                    n; n = rb_next(n)) {
-                       entry = rb_entry(n, struct btrfs_block_group_cache,
-                                        cache_node);
-                       if (entry->dirty) {
-                               cache = entry;
-                               break;
-                       }
+               if (last == 0) {
+                       err = btrfs_run_delayed_refs(trans, root,
+                                                    (unsigned long)-1);
+                       BUG_ON(err);
                }
-               spin_unlock(&root->fs_info->block_group_cache_lock);
 
-               if (!cache)
-                       break;
+               cache = btrfs_lookup_first_block_group(root->fs_info, last);
+               while (cache) {
+                       if (cache->dirty)
+                               break;
+                       cache = next_block_group(root, cache);
+               }
+               if (!cache) {
+                       if (last == 0)
+                               break;
+                       last = 0;
+                       continue;
+               }
 
                cache->dirty = 0;
-               last += cache->key.offset;
+               last = cache->key.objectid + cache->key.offset;
 
-               err = write_one_cache_group(trans, root,
-                                           path, cache);
-               /*
-                * if we fail to write the cache group, we want
-                * to keep it marked dirty in hopes that a later
-                * write will work
-                */
-               if (err) {
-                       werr = err;
-                       continue;
-               }
+               err = write_one_cache_group(trans, root, path, cache);
+               BUG_ON(err);
+               btrfs_put_block_group(cache);
        }
+
        btrfs_free_path(path);
-       return werr;
+       return 0;
 }
 
 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
@@ -2484,6 +2585,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
        found->force_alloc = 0;
        *space_info = found;
        list_add_rcu(&found->list, &info->space_info);
+       atomic_set(&found->caching_threads, 0);
        return 0;
 }
 
@@ -2947,13 +3049,9 @@ int btrfs_update_pinned_extents(struct btrfs_root *root,
        struct btrfs_block_group_cache *cache;
        struct btrfs_fs_info *fs_info = root->fs_info;
 
-       if (pin) {
+       if (pin)
                set_extent_dirty(&fs_info->pinned_extents,
                                bytenr, bytenr + num - 1, GFP_NOFS);
-       } else {
-               clear_extent_dirty(&fs_info->pinned_extents,
-                               bytenr, bytenr + num - 1, GFP_NOFS);
-       }
 
        while (num > 0) {
                cache = btrfs_lookup_block_group(fs_info, bytenr);
@@ -2969,14 +3067,34 @@ int btrfs_update_pinned_extents(struct btrfs_root *root,
                        spin_unlock(&cache->space_info->lock);
                        fs_info->total_pinned += len;
                } else {
+                       int unpin = 0;
+
+                       /*
+                        * in order to not race with the block group caching, we
+                        * only want to unpin the extent if we are cached.  If
+                        * we aren't cached, we want to start async caching this
+                        * block group so we can free the extent the next time
+                        * around.
+                        */
                        spin_lock(&cache->space_info->lock);
                        spin_lock(&cache->lock);
-                       cache->pinned -= len;
-                       cache->space_info->bytes_pinned -= len;
+                       unpin = (cache->cached == BTRFS_CACHE_FINISHED);
+                       if (likely(unpin)) {
+                               cache->pinned -= len;
+                               cache->space_info->bytes_pinned -= len;
+                               fs_info->total_pinned -= len;
+                       }
                        spin_unlock(&cache->lock);
                        spin_unlock(&cache->space_info->lock);
-                       fs_info->total_pinned -= len;
-                       if (cache->cached)
+
+                       if (likely(unpin))
+                               clear_extent_dirty(&fs_info->pinned_extents,
+                                                  bytenr, bytenr + len -1,
+                                                  GFP_NOFS);
+                       else
+                               cache_block_group(cache);
+
+                       if (unpin)
                                btrfs_add_free_space(cache, bytenr, len);
                }
                btrfs_put_block_group(cache);
@@ -3030,6 +3148,7 @@ int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
                                            &start, &end, EXTENT_DIRTY);
                if (ret)
                        break;
+
                set_extent_dirty(copy, start, end, GFP_NOFS);
                last = end + 1;
        }
@@ -3058,6 +3177,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
 
                cond_resched();
        }
+
        return ret;
 }
 
@@ -3435,6 +3555,45 @@ static u64 stripe_align(struct btrfs_root *root, u64 val)
        return ret;
 }
 
+/*
+ * when we wait for progress in the block group caching, its because
+ * our allocation attempt failed at least once.  So, we must sleep
+ * and let some progress happen before we try again.
+ *
+ * This function will sleep at least once waiting for new free space to
+ * show up, and then it will check the block group free space numbers
+ * for our min num_bytes.  Another option is to have it go ahead
+ * and look in the rbtree for a free extent of a given size, but this
+ * is a good start.
+ */
+static noinline int
+wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
+                               u64 num_bytes)
+{
+       DEFINE_WAIT(wait);
+
+       prepare_to_wait(&cache->caching_q, &wait, TASK_UNINTERRUPTIBLE);
+
+       if (block_group_cache_done(cache)) {
+               finish_wait(&cache->caching_q, &wait);
+               return 0;
+       }
+       schedule();
+       finish_wait(&cache->caching_q, &wait);
+
+       wait_event(cache->caching_q, block_group_cache_done(cache) ||
+                  (cache->free_space >= num_bytes));
+       return 0;
+}
+
+enum btrfs_loop_type {
+       LOOP_CACHED_ONLY = 0,
+       LOOP_CACHING_NOWAIT = 1,
+       LOOP_CACHING_WAIT = 2,
+       LOOP_ALLOC_CHUNK = 3,
+       LOOP_NO_EMPTY_SIZE = 4,
+};
+
 /*
  * walks the btree of allocated extents and find a hole of a given size.
  * The key ins is changed to record the hole:
@@ -3460,6 +3619,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
        struct btrfs_space_info *space_info;
        int last_ptr_loop = 0;
        int loop = 0;
+       bool found_uncached_bg = false;
 
        WARN_ON(num_bytes < root->sectorsize);
        btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
@@ -3491,15 +3651,18 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
        search_start = max(search_start, first_logical_byte(root, 0));
        search_start = max(search_start, hint_byte);
 
-       if (!last_ptr) {
+       if (!last_ptr)
                empty_cluster = 0;
-               loop = 1;
-       }
 
        if (search_start == hint_byte) {
                block_group = btrfs_lookup_block_group(root->fs_info,
                                                       search_start);
-               if (block_group && block_group_bits(block_group, data)) {
+               /*
+                * we don't want to use the block group if it doesn't match our
+                * allocation bits, or if its not cached.
+                */
+               if (block_group && block_group_bits(block_group, data) &&
+                   block_group_cache_done(block_group)) {
                        down_read(&space_info->groups_sem);
                        if (list_empty(&block_group->list) ||
                            block_group->ro) {
@@ -3522,21 +3685,35 @@ search:
        down_read(&space_info->groups_sem);
        list_for_each_entry(block_group, &space_info->block_groups, list) {
                u64 offset;
+               int cached;
 
                atomic_inc(&block_group->count);
                search_start = block_group->key.objectid;
 
 have_block_group:
-               if (unlikely(!block_group->cached)) {
-                       mutex_lock(&block_group->cache_mutex);
-                       ret = cache_block_group(root, block_group);
-                       mutex_unlock(&block_group->cache_mutex);
-                       if (ret) {
-                               btrfs_put_block_group(block_group);
-                               break;
+               if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
+                       /*
+                        * we want to start caching kthreads, but not too many
+                        * right off the bat so we don't overwhelm the system,
+                        * so only start them if there are less than 2 and we're
+                        * in the initial allocation phase.
+                        */
+                       if (loop > LOOP_CACHING_NOWAIT ||
+                           atomic_read(&space_info->caching_threads) < 2) {
+                               ret = cache_block_group(block_group);
+                               BUG_ON(ret);
                        }
                }
 
+               cached = block_group_cache_done(block_group);
+               if (unlikely(!cached)) {
+                       found_uncached_bg = true;
+
+                       /* if we only want cached bgs, loop */
+                       if (loop == LOOP_CACHED_ONLY)
+                               goto loop;
+               }
+
                if (unlikely(block_group->ro))
                        goto loop;
 
@@ -3615,14 +3792,21 @@ refill_cluster:
                                        spin_unlock(&last_ptr->refill_lock);
                                        goto checks;
                                }
+                       } else if (!cached && loop > LOOP_CACHING_NOWAIT) {
+                               spin_unlock(&last_ptr->refill_lock);
+
+                               wait_block_group_cache_progress(block_group,
+                                      num_bytes + empty_cluster + empty_size);
+                               goto have_block_group;
                        }
+
                        /*
                         * at this point we either didn't find a cluster
                         * or we weren't able to allocate a block from our
                         * cluster.  Free the cluster we've been trying
                         * to use, and go to the next block group
                         */
-                       if (loop < 2) {
+                       if (loop < LOOP_NO_EMPTY_SIZE) {
                                btrfs_return_cluster_to_free_space(NULL,
                                                                   last_ptr);
                                spin_unlock(&last_ptr->refill_lock);
@@ -3633,11 +3817,17 @@ refill_cluster:
 
                offset = btrfs_find_space_for_alloc(block_group, search_start,
                                                    num_bytes, empty_size);
-               if (!offset)
+               if (!offset && (cached || (!cached &&
+                                          loop == LOOP_CACHING_NOWAIT))) {
                        goto loop;
+               } else if (!offset && (!cached &&
+                                      loop > LOOP_CACHING_NOWAIT)) {
+                       wait_block_group_cache_progress(block_group,
+                                       num_bytes + empty_size);
+                       goto have_block_group;
+               }
 checks:
                search_start = stripe_align(root, offset);
-
                /* move on to the next group */
                if (search_start + num_bytes >= search_end) {
                        btrfs_add_free_space(block_group, offset, num_bytes);
@@ -3683,13 +3873,26 @@ loop:
        }
        up_read(&space_info->groups_sem);
 
-       /* loop == 0, try to find a clustered alloc in every block group
-        * loop == 1, try again after forcing a chunk allocation
-        * loop == 2, set empty_size and empty_cluster to 0 and try again
+       /* LOOP_CACHED_ONLY, only search fully cached block groups
+        * LOOP_CACHING_NOWAIT, search partially cached block groups, but
+        *                      dont wait foR them to finish caching
+        * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
+        * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
+        * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
+        *                      again
         */
-       if (!ins->objectid && loop < 3 &&
-           (empty_size || empty_cluster || allowed_chunk_alloc)) {
-               if (loop >= 2) {
+       if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
+           (found_uncached_bg || empty_size || empty_cluster ||
+            allowed_chunk_alloc)) {
+               if (found_uncached_bg) {
+                       found_uncached_bg = false;
+                       if (loop < LOOP_CACHING_WAIT) {
+                               loop++;
+                               goto search;
+                       }
+               }
+
+               if (loop == LOOP_ALLOC_CHUNK) {
                        empty_size = 0;
                        empty_cluster = 0;
                }
@@ -3702,7 +3905,7 @@ loop:
                        space_info->force_alloc = 1;
                }
 
-               if (loop < 3) {
+               if (loop < LOOP_NO_EMPTY_SIZE) {
                        loop++;
                        goto search;
                }
@@ -3798,7 +4001,7 @@ again:
                               num_bytes, data, 1);
                goto again;
        }
-       if (ret) {
+       if (ret == -ENOSPC) {
                struct btrfs_space_info *sinfo;
 
                sinfo = __find_space_info(root->fs_info, data);
@@ -3806,7 +4009,6 @@ again:
                       "wanted %llu\n", (unsigned long long)data,
                       (unsigned long long)num_bytes);
                dump_space_info(sinfo, num_bytes);
-               BUG();
        }
 
        return ret;
@@ -3844,7 +4046,9 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
        ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
                                     empty_size, hint_byte, search_end, ins,
                                     data);
-       update_reserved_extents(root, ins->objectid, ins->offset, 1);
+       if (!ret)
+               update_reserved_extents(root, ins->objectid, ins->offset, 1);
+
        return ret;
 }
 
@@ -4006,9 +4210,9 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
        struct btrfs_block_group_cache *block_group;
 
        block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
-       mutex_lock(&block_group->cache_mutex);
-       cache_block_group(root, block_group);
-       mutex_unlock(&block_group->cache_mutex);
+       cache_block_group(block_group);
+       wait_event(block_group->caching_q,
+                  block_group_cache_done(block_group));
 
        ret = btrfs_remove_free_space(block_group, ins->objectid,
                                      ins->offset);
@@ -4039,7 +4243,8 @@ static int alloc_tree_block(struct btrfs_trans_handle *trans,
        ret = __btrfs_reserve_extent(trans, root, num_bytes, num_bytes,
                                     empty_size, hint_byte, search_end,
                                     ins, 0);
-       BUG_ON(ret);
+       if (ret)
+               return ret;
 
        if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
                if (parent == 0)
@@ -6955,11 +7160,16 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
                         &info->block_group_cache_tree);
                spin_unlock(&info->block_group_cache_lock);
 
-               btrfs_remove_free_space_cache(block_group);
                down_write(&block_group->space_info->groups_sem);
                list_del(&block_group->list);
                up_write(&block_group->space_info->groups_sem);
 
+               if (block_group->cached == BTRFS_CACHE_STARTED)
+                       wait_event(block_group->caching_q,
+                                  block_group_cache_done(block_group));
+
+               btrfs_remove_free_space_cache(block_group);
+
                WARN_ON(atomic_read(&block_group->count) != 1);
                kfree(block_group);
 
@@ -7025,9 +7235,19 @@ int btrfs_read_block_groups(struct btrfs_root *root)
                atomic_set(&cache->count, 1);
                spin_lock_init(&cache->lock);
                spin_lock_init(&cache->tree_lock);
-               mutex_init(&cache->cache_mutex);
+               cache->fs_info = info;
+               init_waitqueue_head(&cache->caching_q);
                INIT_LIST_HEAD(&cache->list);
                INIT_LIST_HEAD(&cache->cluster_list);
+
+               /*
+                * we only want to have 32k of ram per block group for keeping
+                * track of free space, and if we pass 1/2 of that we want to
+                * start converting things over to using bitmaps
+                */
+               cache->extents_thresh = ((1024 * 32) / 2) /
+                       sizeof(struct btrfs_free_space);
+
                read_extent_buffer(leaf, &cache->item,
                                   btrfs_item_ptr_offset(leaf, path->slots[0]),
                                   sizeof(cache->item));
@@ -7036,6 +7256,26 @@ int btrfs_read_block_groups(struct btrfs_root *root)
                key.objectid = found_key.objectid + found_key.offset;
                btrfs_release_path(root, path);
                cache->flags = btrfs_block_group_flags(&cache->item);
+               cache->sectorsize = root->sectorsize;
+
+               remove_sb_from_cache(root, cache);
+
+               /*
+                * check for two cases, either we are full, and therefore
+                * don't need to bother with the caching work since we won't
+                * find any space, or we are empty, and we can just add all
+                * the space in and be done with it.  This saves us _alot_ of
+                * time, particularly in the full case.
+                */
+               if (found_key.offset == btrfs_block_group_used(&cache->item)) {
+                       cache->cached = BTRFS_CACHE_FINISHED;
+               } else if (btrfs_block_group_used(&cache->item) == 0) {
+                       cache->cached = BTRFS_CACHE_FINISHED;
+                       add_new_free_space(cache, root->fs_info,
+                                          found_key.objectid,
+                                          found_key.objectid +
+                                          found_key.offset);
+               }
 
                ret = update_space_info(info, cache->flags, found_key.offset,
                                        btrfs_block_group_used(&cache->item),
@@ -7079,10 +7319,19 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
        cache->key.objectid = chunk_offset;
        cache->key.offset = size;
        cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+       cache->sectorsize = root->sectorsize;
+
+       /*
+        * we only want to have 32k of ram per block group for keeping track
+        * of free space, and if we pass 1/2 of that we want to start
+        * converting things over to using bitmaps
+        */
+       cache->extents_thresh = ((1024 * 32) / 2) /
+               sizeof(struct btrfs_free_space);
        atomic_set(&cache->count, 1);
        spin_lock_init(&cache->lock);
        spin_lock_init(&cache->tree_lock);
-       mutex_init(&cache->cache_mutex);
+       init_waitqueue_head(&cache->caching_q);
        INIT_LIST_HEAD(&cache->list);
        INIT_LIST_HEAD(&cache->cluster_list);
 
@@ -7091,6 +7340,12 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
        cache->flags = type;
        btrfs_set_block_group_flags(&cache->item, type);
 
+       cache->cached = BTRFS_CACHE_FINISHED;
+       remove_sb_from_cache(root, cache);
+
+       add_new_free_space(cache, root->fs_info, chunk_offset,
+                          chunk_offset + size);
+
        ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
                                &cache->space_info);
        BUG_ON(ret);
@@ -7149,7 +7404,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
        rb_erase(&block_group->cache_node,
                 &root->fs_info->block_group_cache_tree);
        spin_unlock(&root->fs_info->block_group_cache_lock);
-       btrfs_remove_free_space_cache(block_group);
+
        down_write(&block_group->space_info->groups_sem);
        /*
         * we must use list_del_init so people can check to see if they
@@ -7158,11 +7413,18 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
        list_del_init(&block_group->list);
        up_write(&block_group->space_info->groups_sem);
 
+       if (block_group->cached == BTRFS_CACHE_STARTED)
+               wait_event(block_group->caching_q,
+                          block_group_cache_done(block_group));
+
+       btrfs_remove_free_space_cache(block_group);
+
        spin_lock(&block_group->space_info->lock);
        block_group->space_info->total_bytes -= block_group->key.offset;
        block_group->space_info->bytes_readonly -= block_group->key.offset;
        spin_unlock(&block_group->space_info->lock);
-       block_group->space_info->full = 0;
+
+       btrfs_clear_space_info_full(root->fs_info);
 
        btrfs_put_block_group(block_group);
        btrfs_put_block_group(block_group);
index 4538e48581a5171b33af2d3d2c632dbff4e412e2..af99b78b288e8c5e8a14e44b27b3005e5676c26c 100644 (file)
  * Boston, MA 021110-1307, USA.
  */
 
+#include <linux/pagemap.h>
 #include <linux/sched.h>
+#include <linux/math64.h>
 #include "ctree.h"
 #include "free-space-cache.h"
 #include "transaction.h"
 
-struct btrfs_free_space {
-       struct rb_node bytes_index;
-       struct rb_node offset_index;
-       u64 offset;
-       u64 bytes;
-};
+#define BITS_PER_BITMAP                (PAGE_CACHE_SIZE * 8)
+#define MAX_CACHE_BYTES_PER_GIG        (32 * 1024)
 
-static int tree_insert_offset(struct rb_root *root, u64 offset,
-                             struct rb_node *node)
+static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize,
+                                         u64 offset)
 {
-       struct rb_node **p = &root->rb_node;
-       struct rb_node *parent = NULL;
-       struct btrfs_free_space *info;
+       BUG_ON(offset < bitmap_start);
+       offset -= bitmap_start;
+       return (unsigned long)(div64_u64(offset, sectorsize));
+}
 
-       while (*p) {
-               parent = *p;
-               info = rb_entry(parent, struct btrfs_free_space, offset_index);
+static inline unsigned long bytes_to_bits(u64 bytes, u64 sectorsize)
+{
+       return (unsigned long)(div64_u64(bytes, sectorsize));
+}
 
-               if (offset < info->offset)
-                       p = &(*p)->rb_left;
-               else if (offset > info->offset)
-                       p = &(*p)->rb_right;
-               else
-                       return -EEXIST;
-       }
+static inline u64 offset_to_bitmap(struct btrfs_block_group_cache *block_group,
+                                  u64 offset)
+{
+       u64 bitmap_start;
+       u64 bytes_per_bitmap;
 
-       rb_link_node(node, parent, p);
-       rb_insert_color(node, root);
+       bytes_per_bitmap = BITS_PER_BITMAP * block_group->sectorsize;
+       bitmap_start = offset - block_group->key.objectid;
+       bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
+       bitmap_start *= bytes_per_bitmap;
+       bitmap_start += block_group->key.objectid;
 
-       return 0;
+       return bitmap_start;
 }
 
-static int tree_insert_bytes(struct rb_root *root, u64 bytes,
-                            struct rb_node *node)
+static int tree_insert_offset(struct rb_root *root, u64 offset,
+                             struct rb_node *node, int bitmap)
 {
        struct rb_node **p = &root->rb_node;
        struct rb_node *parent = NULL;
@@ -62,12 +63,34 @@ static int tree_insert_bytes(struct rb_root *root, u64 bytes,
 
        while (*p) {
                parent = *p;
-               info = rb_entry(parent, struct btrfs_free_space, bytes_index);
+               info = rb_entry(parent, struct btrfs_free_space, offset_index);
 
-               if (bytes < info->bytes)
+               if (offset < info->offset) {
                        p = &(*p)->rb_left;
-               else
+               } else if (offset > info->offset) {
                        p = &(*p)->rb_right;
+               } else {
+                       /*
+                        * we could have a bitmap entry and an extent entry
+                        * share the same offset.  If this is the case, we want
+                        * the extent entry to always be found first if we do a
+                        * linear search through the tree, since we want to have
+                        * the quickest allocation time, and allocating from an
+                        * extent is faster than allocating from a bitmap.  So
+                        * if we're inserting a bitmap and we find an entry at
+                        * this offset, we want to go right, or after this entry
+                        * logically.  If we are inserting an extent and we've
+                        * found a bitmap, we want to go left, or before
+                        * logically.
+                        */
+                       if (bitmap) {
+                               WARN_ON(info->bitmap);
+                               p = &(*p)->rb_right;
+                       } else {
+                               WARN_ON(!info->bitmap);
+                               p = &(*p)->rb_left;
+                       }
+               }
        }
 
        rb_link_node(node, parent, p);
@@ -79,110 +102,143 @@ static int tree_insert_bytes(struct rb_root *root, u64 bytes,
 /*
  * searches the tree for the given offset.
  *
- * fuzzy == 1: this is used for allocations where we are given a hint of where
- * to look for free space.  Because the hint may not be completely on an offset
- * mark, or the hint may no longer point to free space we need to fudge our
- * results a bit.  So we look for free space starting at or after offset with at
- * least bytes size.  We prefer to find as close to the given offset as we can.
- * Also if the offset is within a free space range, then we will return the free
- * space that contains the given offset, which means we can return a free space
- * chunk with an offset before the provided offset.
- *
- * fuzzy == 0: this is just a normal tree search.  Give us the free space that
- * starts at the given offset which is at least bytes size, and if its not there
- * return NULL.
+ * fuzzy - If this is set, then we are trying to make an allocation, and we just
+ * want a section that has at least bytes size and comes at or after the given
+ * offset.
  */
-static struct btrfs_free_space *tree_search_offset(struct rb_root *root,
-                                                  u64 offset, u64 bytes,
-                                                  int fuzzy)
+static struct btrfs_free_space *
+tree_search_offset(struct btrfs_block_group_cache *block_group,
+                  u64 offset, int bitmap_only, int fuzzy)
 {
-       struct rb_node *n = root->rb_node;
-       struct btrfs_free_space *entry, *ret = NULL;
+       struct rb_node *n = block_group->free_space_offset.rb_node;
+       struct btrfs_free_space *entry, *prev = NULL;
+
+       /* find entry that is closest to the 'offset' */
+       while (1) {
+               if (!n) {
+                       entry = NULL;
+                       break;
+               }
 
-       while (n) {
                entry = rb_entry(n, struct btrfs_free_space, offset_index);
+               prev = entry;
 
-               if (offset < entry->offset) {
-                       if (fuzzy &&
-                           (!ret || entry->offset < ret->offset) &&
-                           (bytes <= entry->bytes))
-                               ret = entry;
+               if (offset < entry->offset)
                        n = n->rb_left;
-               } else if (offset > entry->offset) {
-                       if (fuzzy &&
-                           (entry->offset + entry->bytes - 1) >= offset &&
-                           bytes <= entry->bytes) {
-                               ret = entry;
-                               break;
-                       }
+               else if (offset > entry->offset)
                        n = n->rb_right;
-               } else {
-                       if (bytes > entry->bytes) {
-                               n = n->rb_right;
-                               continue;
-                       }
-                       ret = entry;
+               else
                        break;
-               }
        }
 
-       return ret;
-}
-
-/*
- * return a chunk at least bytes size, as close to offset that we can get.
- */
-static struct btrfs_free_space *tree_search_bytes(struct rb_root *root,
-                                                 u64 offset, u64 bytes)
-{
-       struct rb_node *n = root->rb_node;
-       struct btrfs_free_space *entry, *ret = NULL;
+       if (bitmap_only) {
+               if (!entry)
+                       return NULL;
+               if (entry->bitmap)
+                       return entry;
 
-       while (n) {
-               entry = rb_entry(n, struct btrfs_free_space, bytes_index);
+               /*
+                * bitmap entry and extent entry may share same offset,
+                * in that case, bitmap entry comes after extent entry.
+                */
+               n = rb_next(n);
+               if (!n)
+                       return NULL;
+               entry = rb_entry(n, struct btrfs_free_space, offset_index);
+               if (entry->offset != offset)
+                       return NULL;
 
-               if (bytes < entry->bytes) {
+               WARN_ON(!entry->bitmap);
+               return entry;
+       } else if (entry) {
+               if (entry->bitmap) {
                        /*
-                        * We prefer to get a hole size as close to the size we
-                        * are asking for so we don't take small slivers out of
-                        * huge holes, but we also want to get as close to the
-                        * offset as possible so we don't have a whole lot of
-                        * fragmentation.
+                        * if previous extent entry covers the offset,
+                        * we should return it instead of the bitmap entry
                         */
-                       if (offset <= entry->offset) {
-                               if (!ret)
-                                       ret = entry;
-                               else if (entry->bytes < ret->bytes)
-                                       ret = entry;
-                               else if (entry->offset < ret->offset)
-                                       ret = entry;
+                       n = &entry->offset_index;
+                       while (1) {
+                               n = rb_prev(n);
+                               if (!n)
+                                       break;
+                               prev = rb_entry(n, struct btrfs_free_space,
+                                               offset_index);
+                               if (!prev->bitmap) {
+                                       if (prev->offset + prev->bytes > offset)
+                                               entry = prev;
+                                       break;
+                               }
                        }
-                       n = n->rb_left;
-               } else if (bytes > entry->bytes) {
-                       n = n->rb_right;
+               }
+               return entry;
+       }
+
+       if (!prev)
+               return NULL;
+
+       /* find last entry before the 'offset' */
+       entry = prev;
+       if (entry->offset > offset) {
+               n = rb_prev(&entry->offset_index);
+               if (n) {
+                       entry = rb_entry(n, struct btrfs_free_space,
+                                       offset_index);
+                       BUG_ON(entry->offset > offset);
                } else {
-                       /*
-                        * Ok we may have multiple chunks of the wanted size,
-                        * so we don't want to take the first one we find, we
-                        * want to take the one closest to our given offset, so
-                        * keep searching just in case theres a better match.
-                        */
-                       n = n->rb_right;
-                       if (offset > entry->offset)
-                               continue;
-                       else if (!ret || entry->offset < ret->offset)
-                               ret = entry;
+                       if (fuzzy)
+                               return entry;
+                       else
+                               return NULL;
                }
        }
 
-       return ret;
+       if (entry->bitmap) {
+               n = &entry->offset_index;
+               while (1) {
+                       n = rb_prev(n);
+                       if (!n)
+                               break;
+                       prev = rb_entry(n, struct btrfs_free_space,
+                                       offset_index);
+                       if (!prev->bitmap) {
+                               if (prev->offset + prev->bytes > offset)
+                                       return prev;
+                               break;
+                       }
+               }
+               if (entry->offset + BITS_PER_BITMAP *
+                   block_group->sectorsize > offset)
+                       return entry;
+       } else if (entry->offset + entry->bytes > offset)
+               return entry;
+
+       if (!fuzzy)
+               return NULL;
+
+       while (1) {
+               if (entry->bitmap) {
+                       if (entry->offset + BITS_PER_BITMAP *
+                           block_group->sectorsize > offset)
+                               break;
+               } else {
+                       if (entry->offset + entry->bytes > offset)
+                               break;
+               }
+
+               n = rb_next(&entry->offset_index);
+               if (!n)
+                       return NULL;
+               entry = rb_entry(n, struct btrfs_free_space, offset_index);
+       }
+       return entry;
 }
 
 static void unlink_free_space(struct btrfs_block_group_cache *block_group,
                              struct btrfs_free_space *info)
 {
        rb_erase(&info->offset_index, &block_group->free_space_offset);
-       rb_erase(&info->bytes_index, &block_group->free_space_bytes);
+       block_group->free_extents--;
+       block_group->free_space -= info->bytes;
 }
 
 static int link_free_space(struct btrfs_block_group_cache *block_group,
@@ -190,17 +246,314 @@ static int link_free_space(struct btrfs_block_group_cache *block_group,
 {
        int ret = 0;
 
-
-       BUG_ON(!info->bytes);
+       BUG_ON(!info->bitmap && !info->bytes);
        ret = tree_insert_offset(&block_group->free_space_offset, info->offset,
-                                &info->offset_index);
+                                &info->offset_index, (info->bitmap != NULL));
        if (ret)
                return ret;
 
-       ret = tree_insert_bytes(&block_group->free_space_bytes, info->bytes,
-                               &info->bytes_index);
-       if (ret)
-               return ret;
+       block_group->free_space += info->bytes;
+       block_group->free_extents++;
+       return ret;
+}
+
+static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
+{
+       u64 max_bytes, possible_bytes;
+
+       /*
+        * The goal is to keep the total amount of memory used per 1gb of space
+        * at or below 32k, so we need to adjust how much memory we allow to be
+        * used by extent based free space tracking
+        */
+       max_bytes = MAX_CACHE_BYTES_PER_GIG *
+               (div64_u64(block_group->key.offset, 1024 * 1024 * 1024));
+
+       possible_bytes = (block_group->total_bitmaps * PAGE_CACHE_SIZE) +
+               (sizeof(struct btrfs_free_space) *
+                block_group->extents_thresh);
+
+       if (possible_bytes > max_bytes) {
+               int extent_bytes = max_bytes -
+                       (block_group->total_bitmaps * PAGE_CACHE_SIZE);
+
+               if (extent_bytes <= 0) {
+                       block_group->extents_thresh = 0;
+                       return;
+               }
+
+               block_group->extents_thresh = extent_bytes /
+                       (sizeof(struct btrfs_free_space));
+       }
+}
+
+static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group,
+                             struct btrfs_free_space *info, u64 offset,
+                             u64 bytes)
+{
+       unsigned long start, end;
+       unsigned long i;
+
+       start = offset_to_bit(info->offset, block_group->sectorsize, offset);
+       end = start + bytes_to_bits(bytes, block_group->sectorsize);
+       BUG_ON(end > BITS_PER_BITMAP);
+
+       for (i = start; i < end; i++)
+               clear_bit(i, info->bitmap);
+
+       info->bytes -= bytes;
+       block_group->free_space -= bytes;
+}
+
+static void bitmap_set_bits(struct btrfs_block_group_cache *block_group,
+                           struct btrfs_free_space *info, u64 offset,
+                           u64 bytes)
+{
+       unsigned long start, end;
+       unsigned long i;
+
+       start = offset_to_bit(info->offset, block_group->sectorsize, offset);
+       end = start + bytes_to_bits(bytes, block_group->sectorsize);
+       BUG_ON(end > BITS_PER_BITMAP);
+
+       for (i = start; i < end; i++)
+               set_bit(i, info->bitmap);
+
+       info->bytes += bytes;
+       block_group->free_space += bytes;
+}
+
+static int search_bitmap(struct btrfs_block_group_cache *block_group,
+                        struct btrfs_free_space *bitmap_info, u64 *offset,
+                        u64 *bytes)
+{
+       unsigned long found_bits = 0;
+       unsigned long bits, i;
+       unsigned long next_zero;
+
+       i = offset_to_bit(bitmap_info->offset, block_group->sectorsize,
+                         max_t(u64, *offset, bitmap_info->offset));
+       bits = bytes_to_bits(*bytes, block_group->sectorsize);
+
+       for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i);
+            i < BITS_PER_BITMAP;
+            i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i + 1)) {
+               next_zero = find_next_zero_bit(bitmap_info->bitmap,
+                                              BITS_PER_BITMAP, i);
+               if ((next_zero - i) >= bits) {
+                       found_bits = next_zero - i;
+                       break;
+               }
+               i = next_zero;
+       }
+
+       if (found_bits) {
+               *offset = (u64)(i * block_group->sectorsize) +
+                       bitmap_info->offset;
+               *bytes = (u64)(found_bits) * block_group->sectorsize;
+               return 0;
+       }
+
+       return -1;
+}
+
+static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache
+                                               *block_group, u64 *offset,
+                                               u64 *bytes, int debug)
+{
+       struct btrfs_free_space *entry;
+       struct rb_node *node;
+       int ret;
+
+       if (!block_group->free_space_offset.rb_node)
+               return NULL;
+
+       entry = tree_search_offset(block_group,
+                                  offset_to_bitmap(block_group, *offset),
+                                  0, 1);
+       if (!entry)
+               return NULL;
+
+       for (node = &entry->offset_index; node; node = rb_next(node)) {
+               entry = rb_entry(node, struct btrfs_free_space, offset_index);
+               if (entry->bytes < *bytes)
+                       continue;
+
+               if (entry->bitmap) {
+                       ret = search_bitmap(block_group, entry, offset, bytes);
+                       if (!ret)
+                               return entry;
+                       continue;
+               }
+
+               *offset = entry->offset;
+               *bytes = entry->bytes;
+               return entry;
+       }
+
+       return NULL;
+}
+
+static void add_new_bitmap(struct btrfs_block_group_cache *block_group,
+                          struct btrfs_free_space *info, u64 offset)
+{
+       u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
+       int max_bitmaps = (int)div64_u64(block_group->key.offset +
+                                        bytes_per_bg - 1, bytes_per_bg);
+       BUG_ON(block_group->total_bitmaps >= max_bitmaps);
+
+       info->offset = offset_to_bitmap(block_group, offset);
+       link_free_space(block_group, info);
+       block_group->total_bitmaps++;
+
+       recalculate_thresholds(block_group);
+}
+
+static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group,
+                             struct btrfs_free_space *bitmap_info,
+                             u64 *offset, u64 *bytes)
+{
+       u64 end;
+
+again:
+       end = bitmap_info->offset +
+               (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1;
+
+       if (*offset > bitmap_info->offset && *offset + *bytes > end) {
+               bitmap_clear_bits(block_group, bitmap_info, *offset,
+                                 end - *offset + 1);
+               *bytes -= end - *offset + 1;
+               *offset = end + 1;
+       } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
+               bitmap_clear_bits(block_group, bitmap_info, *offset, *bytes);
+               *bytes = 0;
+       }
+
+       if (*bytes) {
+               if (!bitmap_info->bytes) {
+                       unlink_free_space(block_group, bitmap_info);
+                       kfree(bitmap_info->bitmap);
+                       kfree(bitmap_info);
+                       block_group->total_bitmaps--;
+                       recalculate_thresholds(block_group);
+               }
+
+               bitmap_info = tree_search_offset(block_group,
+                                                offset_to_bitmap(block_group,
+                                                                 *offset),
+                                                1, 0);
+               if (!bitmap_info)
+                       return -EINVAL;
+
+               if (!bitmap_info->bitmap)
+                       return -EAGAIN;
+
+               goto again;
+       } else if (!bitmap_info->bytes) {
+               unlink_free_space(block_group, bitmap_info);
+               kfree(bitmap_info->bitmap);
+               kfree(bitmap_info);
+               block_group->total_bitmaps--;
+               recalculate_thresholds(block_group);
+       }
+
+       return 0;
+}
+
+static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
+                             struct btrfs_free_space *info)
+{
+       struct btrfs_free_space *bitmap_info;
+       int added = 0;
+       u64 bytes, offset, end;
+       int ret;
+
+       /*
+        * If we are below the extents threshold then we can add this as an
+        * extent, and don't have to deal with the bitmap
+        */
+       if (block_group->free_extents < block_group->extents_thresh &&
+           info->bytes > block_group->sectorsize * 4)
+               return 0;
+
+       /*
+        * some block groups are so tiny they can't be enveloped by a bitmap, so
+        * don't even bother to create a bitmap for this
+        */
+       if (BITS_PER_BITMAP * block_group->sectorsize >
+           block_group->key.offset)
+               return 0;
+
+       bytes = info->bytes;
+       offset = info->offset;
+
+again:
+       bitmap_info = tree_search_offset(block_group,
+                                        offset_to_bitmap(block_group, offset),
+                                        1, 0);
+       if (!bitmap_info) {
+               BUG_ON(added);
+               goto new_bitmap;
+       }
+
+       end = bitmap_info->offset +
+               (u64)(BITS_PER_BITMAP * block_group->sectorsize);
+
+       if (offset >= bitmap_info->offset && offset + bytes > end) {
+               bitmap_set_bits(block_group, bitmap_info, offset,
+                               end - offset);
+               bytes -= end - offset;
+               offset = end;
+               added = 0;
+       } else if (offset >= bitmap_info->offset && offset + bytes <= end) {
+               bitmap_set_bits(block_group, bitmap_info, offset, bytes);
+               bytes = 0;
+       } else {
+               BUG();
+       }
+
+       if (!bytes) {
+               ret = 1;
+               goto out;
+       } else
+               goto again;
+
+new_bitmap:
+       if (info && info->bitmap) {
+               add_new_bitmap(block_group, info, offset);
+               added = 1;
+               info = NULL;
+               goto again;
+       } else {
+               spin_unlock(&block_group->tree_lock);
+
+               /* no pre-allocated info, allocate a new one */
+               if (!info) {
+                       info = kzalloc(sizeof(struct btrfs_free_space),
+                                      GFP_NOFS);
+                       if (!info) {
+                               spin_lock(&block_group->tree_lock);
+                               ret = -ENOMEM;
+                               goto out;
+                       }
+               }
+
+               /* allocate the bitmap */
+               info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
+               spin_lock(&block_group->tree_lock);
+               if (!info->bitmap) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               goto again;
+       }
+
+out:
+       if (info) {
+               if (info->bitmap)
+                       kfree(info->bitmap);
+               kfree(info);
+       }
 
        return ret;
 }
@@ -208,8 +561,8 @@ static int link_free_space(struct btrfs_block_group_cache *block_group,
 int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
                         u64 offset, u64 bytes)
 {
-       struct btrfs_free_space *right_info;
-       struct btrfs_free_space *left_info;
+       struct btrfs_free_space *right_info = NULL;
+       struct btrfs_free_space *left_info = NULL;
        struct btrfs_free_space *info = NULL;
        int ret = 0;
 
@@ -227,18 +580,38 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
         * are adding, if there is remove that struct and add a new one to
         * cover the entire range
         */
-       right_info = tree_search_offset(&block_group->free_space_offset,
-                                       offset+bytes, 0, 0);
-       left_info = tree_search_offset(&block_group->free_space_offset,
-                                      offset-1, 0, 1);
+       right_info = tree_search_offset(block_group, offset + bytes, 0, 0);
+       if (right_info && rb_prev(&right_info->offset_index))
+               left_info = rb_entry(rb_prev(&right_info->offset_index),
+                                    struct btrfs_free_space, offset_index);
+       else
+               left_info = tree_search_offset(block_group, offset - 1, 0, 0);
 
-       if (right_info) {
+       /*
+        * If there was no extent directly to the left or right of this new
+        * extent then we know we're going to have to allocate a new extent, so
+        * before we do that see if we need to drop this into a bitmap
+        */
+       if ((!left_info || left_info->bitmap) &&
+           (!right_info || right_info->bitmap)) {
+               ret = insert_into_bitmap(block_group, info);
+
+               if (ret < 0) {
+                       goto out;
+               } else if (ret) {
+                       ret = 0;
+                       goto out;
+               }
+       }
+
+       if (right_info && !right_info->bitmap) {
                unlink_free_space(block_group, right_info);
                info->bytes += right_info->bytes;
                kfree(right_info);
        }
 
-       if (left_info && left_info->offset + left_info->bytes == offset) {
+       if (left_info && !left_info->bitmap &&
+           left_info->offset + left_info->bytes == offset) {
                unlink_free_space(block_group, left_info);
                info->offset = left_info->offset;
                info->bytes += left_info->bytes;
@@ -248,11 +621,11 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
        ret = link_free_space(block_group, info);
        if (ret)
                kfree(info);
-
+out:
        spin_unlock(&block_group->tree_lock);
 
        if (ret) {
-               printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret);
+               printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
                BUG_ON(ret == -EEXIST);
        }
 
@@ -263,40 +636,65 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
                            u64 offset, u64 bytes)
 {
        struct btrfs_free_space *info;
+       struct btrfs_free_space *next_info = NULL;
        int ret = 0;
 
        spin_lock(&block_group->tree_lock);
 
-       info = tree_search_offset(&block_group->free_space_offset, offset, 0,
-                                 1);
-       if (info && info->offset == offset) {
-               if (info->bytes < bytes) {
-                       printk(KERN_ERR "Found free space at %llu, size %llu,"
-                              "trying to use %llu\n",
-                              (unsigned long long)info->offset,
-                              (unsigned long long)info->bytes,
-                              (unsigned long long)bytes);
+again:
+       info = tree_search_offset(block_group, offset, 0, 0);
+       if (!info) {
+               WARN_ON(1);
+               goto out_lock;
+       }
+
+       if (info->bytes < bytes && rb_next(&info->offset_index)) {
+               u64 end;
+               next_info = rb_entry(rb_next(&info->offset_index),
+                                            struct btrfs_free_space,
+                                            offset_index);
+
+               if (next_info->bitmap)
+                       end = next_info->offset + BITS_PER_BITMAP *
+                               block_group->sectorsize - 1;
+               else
+                       end = next_info->offset + next_info->bytes;
+
+               if (next_info->bytes < bytes ||
+                   next_info->offset > offset || offset > end) {
+                       printk(KERN_CRIT "Found free space at %llu, size %llu,"
+                             " trying to use %llu\n",
+                             (unsigned long long)info->offset,
+                             (unsigned long long)info->bytes,
+                             (unsigned long long)bytes);
                        WARN_ON(1);
                        ret = -EINVAL;
-                       spin_unlock(&block_group->tree_lock);
-                       goto out;
+                       goto out_lock;
                }
-               unlink_free_space(block_group, info);
 
-               if (info->bytes == bytes) {
-                       kfree(info);
-                       spin_unlock(&block_group->tree_lock);
-                       goto out;
+               info = next_info;
+       }
+
+       if (info->bytes == bytes) {
+               unlink_free_space(block_group, info);
+               if (info->bitmap) {
+                       kfree(info->bitmap);
+                       block_group->total_bitmaps--;
                }
+               kfree(info);
+               goto out_lock;
+       }
 
+       if (!info->bitmap && info->offset == offset) {
+               unlink_free_space(block_group, info);
                info->offset += bytes;
                info->bytes -= bytes;
+               link_free_space(block_group, info);
+               goto out_lock;
+       }
 
-               ret = link_free_space(block_group, info);
-               spin_unlock(&block_group->tree_lock);
-               BUG_ON(ret);
-       } else if (info && info->offset < offset &&
-                  info->offset + info->bytes >= offset + bytes) {
+       if (!info->bitmap && info->offset <= offset &&
+           info->offset + info->bytes >= offset + bytes) {
                u64 old_start = info->offset;
                /*
                 * we're freeing space in the middle of the info,
@@ -312,7 +710,9 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
                        info->offset = offset + bytes;
                        info->bytes = old_end - info->offset;
                        ret = link_free_space(block_group, info);
-                       BUG_ON(ret);
+                       WARN_ON(ret);
+                       if (ret)
+                               goto out_lock;
                } else {
                        /* the hole we're creating ends at the end
                         * of the info struct, just free the info
@@ -320,32 +720,22 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
                        kfree(info);
                }
                spin_unlock(&block_group->tree_lock);
-               /* step two, insert a new info struct to cover anything
-                * before the hole
+
+               /* step two, insert a new info struct to cover
+                * anything before the hole
                 */
                ret = btrfs_add_free_space(block_group, old_start,
                                           offset - old_start);
-               BUG_ON(ret);
-       } else {
-               spin_unlock(&block_group->tree_lock);
-               if (!info) {
-                       printk(KERN_ERR "couldn't find space %llu to free\n",
-                              (unsigned long long)offset);
-                       printk(KERN_ERR "cached is %d, offset %llu bytes %llu\n",
-                              block_group->cached,
-                              (unsigned long long)block_group->key.objectid,
-                              (unsigned long long)block_group->key.offset);
-                       btrfs_dump_free_space(block_group, bytes);
-               } else if (info) {
-                       printk(KERN_ERR "hmm, found offset=%llu bytes=%llu, "
-                              "but wanted offset=%llu bytes=%llu\n",
-                              (unsigned long long)info->offset,
-                              (unsigned long long)info->bytes,
-                              (unsigned long long)offset,
-                              (unsigned long long)bytes);
-               }
-               WARN_ON(1);
+               WARN_ON(ret);
+               goto out;
        }
+
+       ret = remove_from_bitmap(block_group, info, &offset, &bytes);
+       if (ret == -EAGAIN)
+               goto again;
+       BUG_ON(ret);
+out_lock:
+       spin_unlock(&block_group->tree_lock);
 out:
        return ret;
 }
@@ -361,10 +751,13 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
                info = rb_entry(n, struct btrfs_free_space, offset_index);
                if (info->bytes >= bytes)
                        count++;
-               printk(KERN_ERR "entry offset %llu, bytes %llu\n",
+               printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n",
                       (unsigned long long)info->offset,
-                      (unsigned long long)info->bytes);
+                      (unsigned long long)info->bytes,
+                      (info->bitmap) ? "yes" : "no");
        }
+       printk(KERN_INFO "block group has cluster?: %s\n",
+              list_empty(&block_group->cluster_list) ? "no" : "yes");
        printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
               "\n", count);
 }
@@ -397,26 +790,35 @@ __btrfs_return_cluster_to_free_space(
 {
        struct btrfs_free_space *entry;
        struct rb_node *node;
+       bool bitmap;
 
        spin_lock(&cluster->lock);
        if (cluster->block_group != block_group)
                goto out;
 
+       bitmap = cluster->points_to_bitmap;
+       cluster->block_group = NULL;
        cluster->window_start = 0;
+       list_del_init(&cluster->block_group_list);
+       cluster->points_to_bitmap = false;
+
+       if (bitmap)
+               goto out;
+
        node = rb_first(&cluster->root);
-       while(node) {
+       while (node) {
                entry = rb_entry(node, struct btrfs_free_space, offset_index);
                node = rb_next(&entry->offset_index);
                rb_erase(&entry->offset_index, &cluster->root);
-               link_free_space(block_group, entry);
+               BUG_ON(entry->bitmap);
+               tree_insert_offset(&block_group->free_space_offset,
+                                  entry->offset, &entry->offset_index, 0);
        }
-       list_del_init(&cluster->block_group_list);
-
-       btrfs_put_block_group(cluster->block_group);
-       cluster->block_group = NULL;
        cluster->root.rb_node = NULL;
+
 out:
        spin_unlock(&cluster->lock);
+       btrfs_put_block_group(block_group);
        return 0;
 }
 
@@ -425,20 +827,28 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
        struct btrfs_free_space *info;
        struct rb_node *node;
        struct btrfs_free_cluster *cluster;
-       struct btrfs_free_cluster *safe;
+       struct list_head *head;
 
        spin_lock(&block_group->tree_lock);
-
-       list_for_each_entry_safe(cluster, safe, &block_group->cluster_list,
-                                block_group_list) {
+       while ((head = block_group->cluster_list.next) !=
+              &block_group->cluster_list) {
+               cluster = list_entry(head, struct btrfs_free_cluster,
+                                    block_group_list);
 
                WARN_ON(cluster->block_group != block_group);
                __btrfs_return_cluster_to_free_space(block_group, cluster);
+               if (need_resched()) {
+                       spin_unlock(&block_group->tree_lock);
+                       cond_resched();
+                       spin_lock(&block_group->tree_lock);
+               }
        }
 
-       while ((node = rb_last(&block_group->free_space_bytes)) != NULL) {
-               info = rb_entry(node, struct btrfs_free_space, bytes_index);
+       while ((node = rb_last(&block_group->free_space_offset)) != NULL) {
+               info = rb_entry(node, struct btrfs_free_space, offset_index);
                unlink_free_space(block_group, info);
+               if (info->bitmap)
+                       kfree(info->bitmap);
                kfree(info);
                if (need_resched()) {
                        spin_unlock(&block_group->tree_lock);
@@ -446,6 +856,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
                        spin_lock(&block_group->tree_lock);
                }
        }
+
        spin_unlock(&block_group->tree_lock);
 }
 
@@ -453,25 +864,35 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
                               u64 offset, u64 bytes, u64 empty_size)
 {
        struct btrfs_free_space *entry = NULL;
+       u64 bytes_search = bytes + empty_size;
        u64 ret = 0;
 
        spin_lock(&block_group->tree_lock);
-       entry = tree_search_offset(&block_group->free_space_offset, offset,
-                                  bytes + empty_size, 1);
+       entry = find_free_space(block_group, &offset, &bytes_search, 0);
        if (!entry)
-               entry = tree_search_bytes(&block_group->free_space_bytes,
-                                         offset, bytes + empty_size);
-       if (entry) {
+               goto out;
+
+       ret = offset;
+       if (entry->bitmap) {
+               bitmap_clear_bits(block_group, entry, offset, bytes);
+               if (!entry->bytes) {
+                       unlink_free_space(block_group, entry);
+                       kfree(entry->bitmap);
+                       kfree(entry);
+                       block_group->total_bitmaps--;
+                       recalculate_thresholds(block_group);
+               }
+       } else {
                unlink_free_space(block_group, entry);
-               ret = entry->offset;
                entry->offset += bytes;
                entry->bytes -= bytes;
-
                if (!entry->bytes)
                        kfree(entry);
                else
                        link_free_space(block_group, entry);
        }
+
+out:
        spin_unlock(&block_group->tree_lock);
 
        return ret;
@@ -517,6 +938,47 @@ int btrfs_return_cluster_to_free_space(
        return ret;
 }
 
+static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
+                                  struct btrfs_free_cluster *cluster,
+                                  u64 bytes, u64 min_start)
+{
+       struct btrfs_free_space *entry;
+       int err;
+       u64 search_start = cluster->window_start;
+       u64 search_bytes = bytes;
+       u64 ret = 0;
+
+       spin_lock(&block_group->tree_lock);
+       spin_lock(&cluster->lock);
+
+       if (!cluster->points_to_bitmap)
+               goto out;
+
+       if (cluster->block_group != block_group)
+               goto out;
+
+       entry = tree_search_offset(block_group, search_start, 0, 0);
+
+       if (!entry || !entry->bitmap)
+               goto out;
+
+       search_start = min_start;
+       search_bytes = bytes;
+
+       err = search_bitmap(block_group, entry, &search_start,
+                           &search_bytes);
+       if (err)
+               goto out;
+
+       ret = search_start;
+       bitmap_clear_bits(block_group, entry, ret, bytes);
+out:
+       spin_unlock(&cluster->lock);
+       spin_unlock(&block_group->tree_lock);
+
+       return ret;
+}
+
 /*
  * given a cluster, try to allocate 'bytes' from it, returns 0
  * if it couldn't find anything suitably large, or a logical disk offset
@@ -530,6 +992,10 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
        struct rb_node *node;
        u64 ret = 0;
 
+       if (cluster->points_to_bitmap)
+               return btrfs_alloc_from_bitmap(block_group, cluster, bytes,
+                                              min_start);
+
        spin_lock(&cluster->lock);
        if (bytes > cluster->max_size)
                goto out;
@@ -567,9 +1033,73 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
        }
 out:
        spin_unlock(&cluster->lock);
+
        return ret;
 }
 
+static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
+                               struct btrfs_free_space *entry,
+                               struct btrfs_free_cluster *cluster,
+                               u64 offset, u64 bytes, u64 min_bytes)
+{
+       unsigned long next_zero;
+       unsigned long i;
+       unsigned long search_bits;
+       unsigned long total_bits;
+       unsigned long found_bits;
+       unsigned long start = 0;
+       unsigned long total_found = 0;
+       bool found = false;
+
+       i = offset_to_bit(entry->offset, block_group->sectorsize,
+                         max_t(u64, offset, entry->offset));
+       search_bits = bytes_to_bits(min_bytes, block_group->sectorsize);
+       total_bits = bytes_to_bits(bytes, block_group->sectorsize);
+
+again:
+       found_bits = 0;
+       for (i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i);
+            i < BITS_PER_BITMAP;
+            i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) {
+               next_zero = find_next_zero_bit(entry->bitmap,
+                                              BITS_PER_BITMAP, i);
+               if (next_zero - i >= search_bits) {
+                       found_bits = next_zero - i;
+                       break;
+               }
+               i = next_zero;
+       }
+
+       if (!found_bits)
+               return -1;
+
+       if (!found) {
+               start = i;
+               found = true;
+       }
+
+       total_found += found_bits;
+
+       if (cluster->max_size < found_bits * block_group->sectorsize)
+               cluster->max_size = found_bits * block_group->sectorsize;
+
+       if (total_found < total_bits) {
+               i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, next_zero);
+               if (i - start > total_bits * 2) {
+                       total_found = 0;
+                       cluster->max_size = 0;
+                       found = false;
+               }
+               goto again;
+       }
+
+       cluster->window_start = start * block_group->sectorsize +
+               entry->offset;
+       cluster->points_to_bitmap = true;
+
+       return 0;
+}
+
 /*
  * here we try to find a cluster of blocks in a block group.  The goal
  * is to find at least bytes free and up to empty_size + bytes free.
@@ -587,12 +1117,12 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
        struct btrfs_free_space *entry = NULL;
        struct rb_node *node;
        struct btrfs_free_space *next;
-       struct btrfs_free_space *last;
+       struct btrfs_free_space *last = NULL;
        u64 min_bytes;
        u64 window_start;
        u64 window_free;
        u64 max_extent = 0;
-       int total_retries = 0;
+       bool found_bitmap = false;
        int ret;
 
        /* for metadata, allow allocates with more holes */
@@ -620,30 +1150,79 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
                goto out;
        }
 again:
-       min_bytes = min(min_bytes, bytes + empty_size);
-       entry = tree_search_bytes(&block_group->free_space_bytes,
-                                 offset, min_bytes);
+       entry = tree_search_offset(block_group, offset, found_bitmap, 1);
        if (!entry) {
                ret = -ENOSPC;
                goto out;
        }
+
+       /*
+        * If found_bitmap is true, we exhausted our search for extent entries,
+        * and we just want to search all of the bitmaps that we can find, and
+        * ignore any extent entries we find.
+        */
+       while (entry->bitmap || found_bitmap ||
+              (!entry->bitmap && entry->bytes < min_bytes)) {
+               struct rb_node *node = rb_next(&entry->offset_index);
+
+               if (entry->bitmap && entry->bytes > bytes + empty_size) {
+                       ret = btrfs_bitmap_cluster(block_group, entry, cluster,
+                                                  offset, bytes + empty_size,
+                                                  min_bytes);
+                       if (!ret)
+                               goto got_it;
+               }
+
+               if (!node) {
+                       ret = -ENOSPC;
+                       goto out;
+               }
+               entry = rb_entry(node, struct btrfs_free_space, offset_index);
+       }
+
+       /*
+        * We already searched all the extent entries from the passed in offset
+        * to the end and didn't find enough space for the cluster, and we also
+        * didn't find any bitmaps that met our criteria, just go ahead and exit
+        */
+       if (found_bitmap) {
+               ret = -ENOSPC;
+               goto out;
+       }
+
+       cluster->points_to_bitmap = false;
        window_start = entry->offset;
        window_free = entry->bytes;
        last = entry;
        max_extent = entry->bytes;
 
-       while(1) {
+       while (1) {
                /* out window is just right, lets fill it */
                if (window_free >= bytes + empty_size)
                        break;
 
                node = rb_next(&last->offset_index);
                if (!node) {
+                       if (found_bitmap)
+                               goto again;
                        ret = -ENOSPC;
                        goto out;
                }
                next = rb_entry(node, struct btrfs_free_space, offset_index);
 
+               /*
+                * we found a bitmap, so if this search doesn't result in a
+                * cluster, we know to go and search again for the bitmaps and
+                * start looking for space there
+                */
+               if (next->bitmap) {
+                       if (!found_bitmap)
+                               offset = next->offset;
+                       found_bitmap = true;
+                       last = next;
+                       continue;
+               }
+
                /*
                 * we haven't filled the empty size and the window is
                 * very large.  reset and try again
@@ -655,19 +1234,6 @@ again:
                        window_free = entry->bytes;
                        last = entry;
                        max_extent = 0;
-                       total_retries++;
-                       if (total_retries % 64 == 0) {
-                               if (min_bytes >= (bytes + empty_size)) {
-                                       ret = -ENOSPC;
-                                       goto out;
-                               }
-                               /*
-                                * grow our allocation a bit, we're not having
-                                * much luck
-                                */
-                               min_bytes *= 2;
-                               goto again;
-                       }
                } else {
                        last = next;
                        window_free += next->bytes;
@@ -685,11 +1251,19 @@ again:
         * The cluster includes an rbtree, but only uses the offset index
         * of each free space cache entry.
         */
-       while(1) {
+       while (1) {
                node = rb_next(&entry->offset_index);
-               unlink_free_space(block_group, entry);
+               if (entry->bitmap && node) {
+                       entry = rb_entry(node, struct btrfs_free_space,
+                                        offset_index);
+                       continue;
+               } else if (entry->bitmap && !node) {
+                       break;
+               }
+
+               rb_erase(&entry->offset_index, &block_group->free_space_offset);
                ret = tree_insert_offset(&cluster->root, entry->offset,
-                                        &entry->offset_index);
+                                        &entry->offset_index, 0);
                BUG_ON(ret);
 
                if (!node || entry == last)
@@ -697,8 +1271,10 @@ again:
 
                entry = rb_entry(node, struct btrfs_free_space, offset_index);
        }
-       ret = 0;
+
        cluster->max_size = max_extent;
+got_it:
+       ret = 0;
        atomic_inc(&block_group->count);
        list_add_tail(&cluster->block_group_list, &block_group->cluster_list);
        cluster->block_group = block_group;
@@ -718,6 +1294,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
        spin_lock_init(&cluster->refill_lock);
        cluster->root.rb_node = NULL;
        cluster->max_size = 0;
+       cluster->points_to_bitmap = false;
        INIT_LIST_HEAD(&cluster->block_group_list);
        cluster->block_group = NULL;
 }
index 266fb876405442841b31b7e8219fff82e339d821..890a8e79011b2cadc6adc545909289ed90707a27 100644 (file)
 #ifndef __BTRFS_FREE_SPACE_CACHE
 #define __BTRFS_FREE_SPACE_CACHE
 
+struct btrfs_free_space {
+       struct rb_node offset_index;
+       u64 offset;
+       u64 bytes;
+       unsigned long *bitmap;
+       struct list_head list;
+};
+
 int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
                         u64 bytenr, u64 size);
 int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
index 791eab19e330b46f93086ac92b7f1b150b5e1513..56fe83fa60c445d365f4339d1aa472cf875490d3 100644 (file)
@@ -2603,8 +2603,8 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
        if (root->ref_cows)
                btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
        path = btrfs_alloc_path();
-       path->reada = -1;
        BUG_ON(!path);
+       path->reada = -1;
 
        /* FIXME, add redo link to tree so we don't leak on crash */
        key.objectid = inode->i_ino;
index 6d6523da0a30e769c56380c37c6e6aca019217bd..0d126be22b636c239d3e7585803e331d3d6c2f5b 100644 (file)
@@ -309,7 +309,7 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
        }
        printk(KERN_INFO "node %llu level %d total ptrs %d free spc %u\n",
               (unsigned long long)btrfs_header_bytenr(c),
-              btrfs_header_level(c), nr,
+             level, nr,
               (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr);
        for (i = 0; i < nr; i++) {
                btrfs_node_key_to_cpu(c, &key, i);
@@ -326,10 +326,10 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
                                        btrfs_level_size(root, level - 1),
                                        btrfs_node_ptr_generation(c, i));
                if (btrfs_is_leaf(next) &&
-                   btrfs_header_level(c) != 1)
+                  level != 1)
                        BUG();
                if (btrfs_header_level(next) !=
-                       btrfs_header_level(c) - 1)
+                      level - 1)
                        BUG();
                btrfs_print_tree(root, next);
                free_extent_buffer(next);
index 008397934778d457f7d90c3fbd03955fab230639..e71264d1c2c93acc3e1f418b6362c231dd61ad5f 100644 (file)
@@ -670,6 +670,8 @@ again:
                        err = ret;
                        goto out;
                }
+               if (ret > 0 && path2->slots[level] > 0)
+                       path2->slots[level]--;
 
                eb = path2->nodes[level];
                WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) !=
@@ -1609,6 +1611,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
                BUG_ON(level == 0);
                path->lowest_level = level;
                ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
+               path->lowest_level = 0;
                if (ret < 0) {
                        btrfs_free_path(path);
                        return ret;
index 2dbf1c1f56ee41b73068c54ee7c345a969b7f38e..e51d2bc532f8fa95a0febf2ab4682547f133b746 100644 (file)
@@ -40,6 +40,14 @@ static noinline void put_transaction(struct btrfs_transaction *transaction)
        }
 }
 
+static noinline void switch_commit_root(struct btrfs_root *root)
+{
+       down_write(&root->commit_root_sem);
+       free_extent_buffer(root->commit_root);
+       root->commit_root = btrfs_root_node(root);
+       up_write(&root->commit_root_sem);
+}
+
 /*
  * either allocate a new transaction or hop into the existing one
  */
@@ -444,9 +452,6 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
 
        btrfs_write_dirty_block_groups(trans, root);
 
-       ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
-       BUG_ON(ret);
-
        while (1) {
                old_root_bytenr = btrfs_root_bytenr(&root->root_item);
                if (old_root_bytenr == root->node->start)
@@ -457,13 +462,11 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
                                        &root->root_key,
                                        &root->root_item);
                BUG_ON(ret);
-               btrfs_write_dirty_block_groups(trans, root);
 
-               ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+               ret = btrfs_write_dirty_block_groups(trans, root);
                BUG_ON(ret);
        }
-       free_extent_buffer(root->commit_root);
-       root->commit_root = btrfs_root_node(root);
+       switch_commit_root(root);
        return 0;
 }
 
@@ -495,9 +498,6 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
                root = list_entry(next, struct btrfs_root, dirty_list);
 
                update_cowonly_root(trans, root);
-
-               ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
-               BUG_ON(ret);
        }
        return 0;
 }
@@ -544,8 +544,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
                        btrfs_update_reloc_root(trans, root);
 
                        if (root->commit_root != root->node) {
-                               free_extent_buffer(root->commit_root);
-                               root->commit_root = btrfs_root_node(root);
+                               switch_commit_root(root);
                                btrfs_set_root_node(&root->root_item,
                                                    root->node);
                        }
@@ -943,9 +942,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 
                mutex_unlock(&root->fs_info->trans_mutex);
 
-               if (flush_on_commit || snap_pending) {
-                       if (flush_on_commit)
-                               btrfs_start_delalloc_inodes(root);
+               if (flush_on_commit) {
+                       btrfs_start_delalloc_inodes(root);
+                       ret = btrfs_wait_ordered_extents(root, 0);
+                       BUG_ON(ret);
+               } else if (snap_pending) {
                        ret = btrfs_wait_ordered_extents(root, 1);
                        BUG_ON(ret);
                }
@@ -1009,15 +1010,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 
        btrfs_set_root_node(&root->fs_info->tree_root->root_item,
                            root->fs_info->tree_root->node);
-       free_extent_buffer(root->fs_info->tree_root->commit_root);
-       root->fs_info->tree_root->commit_root =
-                               btrfs_root_node(root->fs_info->tree_root);
+       switch_commit_root(root->fs_info->tree_root);
 
        btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
                            root->fs_info->chunk_root->node);
-       free_extent_buffer(root->fs_info->chunk_root->commit_root);
-       root->fs_info->chunk_root->commit_root =
-                               btrfs_root_node(root->fs_info->chunk_root);
+       switch_commit_root(root->fs_info->chunk_root);
 
        update_super_roots(root);
 
@@ -1057,6 +1054,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
        cur_trans->commit_done = 1;
 
        root->fs_info->last_trans_committed = cur_trans->transid;
+
        wake_up(&cur_trans->commit_wait);
 
        put_transaction(cur_trans);
index c13922206d1be59196a8c59c95f9a98bae12be74..d91b0de7c502d13ef51eaeaf667b61cc0230d8a8 100644 (file)
@@ -797,7 +797,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
                return -ENOENT;
 
        inode = read_one_inode(root, key->objectid);
-       BUG_ON(!dir);
+       BUG_ON(!inode);
 
        ref_ptr = btrfs_item_ptr_offset(eb, slot);
        ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
index 3ab80e9cd76767a674e3005185ee99f3afa53433..5dbefd11b4af524f969c21b857067281eab0dc71 100644 (file)
@@ -721,7 +721,8 @@ error:
  */
 static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans,
                                         struct btrfs_device *device,
-                                        u64 num_bytes, u64 *start)
+                                        u64 num_bytes, u64 *start,
+                                        u64 *max_avail)
 {
        struct btrfs_key key;
        struct btrfs_root *root = device->dev_root;
@@ -758,9 +759,13 @@ static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans,
        ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
        if (ret < 0)
                goto error;
-       ret = btrfs_previous_item(root, path, 0, key.type);
-       if (ret < 0)
-               goto error;
+       if (ret > 0) {
+               ret = btrfs_previous_item(root, path, key.objectid, key.type);
+               if (ret < 0)
+                       goto error;
+               if (ret > 0)
+                       start_found = 1;
+       }
        l = path->nodes[0];
        btrfs_item_key_to_cpu(l, &key, path->slots[0]);
        while (1) {
@@ -803,6 +808,10 @@ no_more_items:
                        if (last_byte < search_start)
                                last_byte = search_start;
                        hole_size = key.offset - last_byte;
+
+                       if (hole_size > *max_avail)
+                               *max_avail = hole_size;
+
                        if (key.offset > last_byte &&
                            hole_size >= num_bytes) {
                                *start = last_byte;
@@ -1621,6 +1630,7 @@ static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
        device->fs_devices->total_rw_bytes += diff;
 
        device->total_bytes = new_size;
+       device->disk_total_bytes = new_size;
        btrfs_clear_space_info_full(device->dev_root->fs_info);
 
        return btrfs_update_device(trans, device);
@@ -2007,7 +2017,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
                        goto done;
                if (ret) {
                        ret = 0;
-                       goto done;
+                       break;
                }
 
                l = path->nodes[0];
@@ -2015,7 +2025,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
                btrfs_item_key_to_cpu(l, &key, path->slots[0]);
 
                if (key.objectid != device->devid)
-                       goto done;
+                       break;
 
                dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
                length = btrfs_dev_extent_length(l, dev_extent);
@@ -2171,6 +2181,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
                             max_chunk_size);
 
 again:
+       max_avail = 0;
        if (!map || map->num_stripes != num_stripes) {
                kfree(map);
                map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
@@ -2219,7 +2230,8 @@ again:
 
                if (device->in_fs_metadata && avail >= min_free) {
                        ret = find_free_dev_extent(trans, device,
-                                                  min_free, &dev_offset);
+                                                  min_free, &dev_offset,
+                                                  &max_avail);
                        if (ret == 0) {
                                list_move_tail(&device->dev_alloc_list,
                                               &private_devs);
@@ -2795,26 +2807,6 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
                }
        }
 
-       for (i = 0; i > nr; i++) {
-               struct btrfs_multi_bio *multi;
-               struct btrfs_bio_stripe *stripe;
-               int ret;
-
-               length = 1;
-               ret = btrfs_map_block(map_tree, WRITE, buf[i],
-                                     &length, &multi, 0);
-               BUG_ON(ret);
-
-               stripe = multi->stripes;
-               for (j = 0; j < multi->num_stripes; j++) {
-                       if (stripe->physical >= physical &&
-                           physical < stripe->physical + length)
-                               break;
-               }
-               BUG_ON(j >= multi->num_stripes);
-               kfree(multi);
-       }
-
        *logical = buf;
        *naddrs = nr;
        *stripe_len = map->stripe_len;
index af737bb56cb71942ebed99c9bad8eb6a34bdaeb8..259525c9abb8c0296f19fa3dc340b9ac29e70acd 100644 (file)
@@ -1303,6 +1303,13 @@ parse_tag_3_packet(struct ecryptfs_crypt_stat *crypt_stat,
        }
        (*new_auth_tok)->session_key.encrypted_key_size =
                (body_size - (ECRYPTFS_SALT_SIZE + 5));
+       if ((*new_auth_tok)->session_key.encrypted_key_size
+           > ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES) {
+               printk(KERN_WARNING "Tag 3 packet contains key larger "
+