-What: /sys/bus/fcoe/ctlr_X
+What: /sys/bus/fcoe/
+Date: August 2012
+KernelVersion: TBD
+Contact: Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
+Description: The FCoE bus. Attributes in this directory are control interfaces.
+Attributes:
+
+ ctlr_create: 'FCoE Controller' instance creation interface. Writing an
+ <ifname> to this file will allocate and populate sysfs with a
+ fcoe_ctlr_device (ctlr_X). The user can then configure any
+ per-port settings and finally write to the fcoe_ctlr_device's
+ 'start' attribute to begin the kernel's discovery and login
+ process.
+
+ ctlr_destroy: 'FCoE Controller' instance removal interface. Writing a
+ fcoe_ctlr_device's sysfs name to this file will log the
+ fcoe_ctlr_device out of the fabric or otherwise connected
+ FCoE devices. It will also free all kernel memory allocated
+ for this fcoe_ctlr_device and any structures associated
+ with it, this includes the scsi_host.
+
+What: /sys/bus/fcoe/devices/ctlr_X
Date: March 2012
KernelVersion: TBD
Contact: Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
-Description: 'FCoE Controller' instances on the fcoe bus
+Description: 'FCoE Controller' instances on the fcoe bus.
+ The FCoE Controller now has a three stage creation process.
+ 1) Write interface name to ctlr_create 2) Configure the FCoE
+ Controller (ctlr_X) 3) Enable the FCoE Controller to begin
+ discovery and login. The FCoE Controller is destroyed by
+ writing it's name, i.e. ctlr_X to the ctlr_delete file.
+
Attributes:
fcf_dev_loss_tmo: Device loss timeout peroid (see below). Changing
this value will change the dev_loss_tmo for all
FCFs discovered by this controller.
+ mode: Display or change the FCoE Controller's mode. Possible
+ modes are 'Fabric' and 'VN2VN'. If a FCoE Controller
+ is started in 'Fabric' mode then FIP FCF discovery is
+ initiated and ultimately a fabric login is attempted.
+ If a FCoE Controller is started in 'VN2VN' mode then
+ FIP VN2VN discovery and login is performed. A FCoE
+ Controller only supports one mode at a time.
+
+ enabled: Whether an FCoE controller is enabled or disabled.
+ 0 if disabled, 1 if enabled. Writing either 0 or 1
+ to this file will enable or disable the FCoE controller.
+
lesb/link_fail: Link Error Status Block (LESB) link failure count.
lesb/vlink_fail: Link Error Status Block (LESB) virtual link
Notes: ctlr_X (global increment starting at 0)
-What: /sys/bus/fcoe/fcf_X
+What: /sys/bus/fcoe/devices/fcf_X
Date: March 2012
KernelVersion: TBD
Contact: Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
+Release Date : Sat. Feb 9, 2013 17:00:00 PST 2013 -
+ (emaild-id:megaraidlinux@lsi.com)
+ Adam Radford
+Current Version : 06.506.00.00-rc1
+Old Version : 06.504.01.00-rc1
+ 1. Add 4k FastPath DIF support.
+ 2. Dont load DevHandle unless FastPath enabled.
+ 3. Version and Changelog update.
+-------------------------------------------------------------------------------
Release Date : Mon. Oct 1, 2012 17:00:00 PST 2012 -
(emaild-id:megaraidlinux@lsi.com)
Adam Radford
fib->hw_fib_va->header.StructType = FIB_MAGIC2;
fib->hw_fib_va->header.SenderFibAddress = (u32)address;
fib->hw_fib_va->header.u.TimeStamp = 0;
- BUG_ON((u32)(address >> 32) != 0L);
+ BUG_ON(upper_32_bits(address) != 0L);
address |= fibsize;
} else {
/* Calculate the amount to the fibsize bits */
address |= fibsize;
}
- src_writel(dev, MUnit.IQ_H, (address >> 32) & 0xffffffff);
+ src_writel(dev, MUnit.IQ_H, upper_32_bits(address) & 0xffffffff);
src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
return 0;
static int bnx2fc_enable(struct net_device *netdev);
static int bnx2fc_disable(struct net_device *netdev);
+/* fcoe_syfs control interface handlers */
+static int bnx2fc_ctlr_alloc(struct net_device *netdev);
+static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev);
+
static void bnx2fc_recv_frame(struct sk_buff *skb);
static void bnx2fc_start_disc(struct bnx2fc_interface *interface);
static void bnx2fc_stop(struct bnx2fc_interface *interface);
static int __init bnx2fc_mod_init(void);
static void __exit bnx2fc_mod_exit(void);
-static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev);
unsigned int bnx2fc_debug_level;
module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
}
-/**
- * bnx2fc_get_lesb() - Fill the FCoE Link Error Status Block
- * @lport: the local port
- * @fc_lesb: the link error status block
- */
-static void bnx2fc_get_lesb(struct fc_lport *lport,
- struct fc_els_lesb *fc_lesb)
-{
- struct net_device *netdev = bnx2fc_netdev(lport);
-
- __fcoe_get_lesb(lport, fc_lesb, netdev);
-}
-
-static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
-{
- struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
- struct net_device *netdev = bnx2fc_netdev(fip->lp);
- struct fcoe_fc_els_lesb *fcoe_lesb;
- struct fc_els_lesb fc_lesb;
-
- __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
- fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
-
- ctlr_dev->lesb.lesb_link_fail =
- ntohl(fcoe_lesb->lesb_link_fail);
- ctlr_dev->lesb.lesb_vlink_fail =
- ntohl(fcoe_lesb->lesb_vlink_fail);
- ctlr_dev->lesb.lesb_miss_fka =
- ntohl(fcoe_lesb->lesb_miss_fka);
- ctlr_dev->lesb.lesb_symb_err =
- ntohl(fcoe_lesb->lesb_symb_err);
- ctlr_dev->lesb.lesb_err_block =
- ntohl(fcoe_lesb->lesb_err_block);
- ctlr_dev->lesb.lesb_fcs_error =
- ntohl(fcoe_lesb->lesb_fcs_error);
-}
-EXPORT_SYMBOL(bnx2fc_ctlr_get_lesb);
-
static void bnx2fc_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
{
struct fcoe_ctlr_device *ctlr_dev =
return 0;
}
-static void bnx2fc_link_speed_update(struct fc_lport *lport)
-{
- struct fcoe_port *port = lport_priv(lport);
- struct bnx2fc_interface *interface = port->priv;
- struct net_device *netdev = interface->netdev;
- struct ethtool_cmd ecmd;
-
- if (!__ethtool_get_settings(netdev, &ecmd)) {
- lport->link_supported_speeds &=
- ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
- if (ecmd.supported & (SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full))
- lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
- if (ecmd.supported & SUPPORTED_10000baseT_Full)
- lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
-
- switch (ethtool_cmd_speed(&ecmd)) {
- case SPEED_1000:
- lport->link_speed = FC_PORTSPEED_1GBIT;
- break;
- case SPEED_2500:
- lport->link_speed = FC_PORTSPEED_2GBIT;
- break;
- case SPEED_10000:
- lport->link_speed = FC_PORTSPEED_10GBIT;
- break;
- }
- }
-}
static int bnx2fc_link_ok(struct fc_lport *lport)
{
struct fcoe_port *port = lport_priv(lport);
port->fcoe_pending_queue_active = 0;
setup_timer(&port->timer, fcoe_queue_timer, (unsigned long) lport);
- bnx2fc_link_speed_update(lport);
+ fcoe_link_speed_update(lport);
if (!lport->vport) {
if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
u16 vlan_id)
{
struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
+ struct fcoe_ctlr_device *cdev;
struct fc_lport *lport;
struct fc_lport *vport;
struct bnx2fc_interface *interface, *tmp;
BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n",
interface->netdev->name, event);
- bnx2fc_link_speed_update(lport);
+ fcoe_link_speed_update(lport);
+
+ cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
if (link_possible && !bnx2fc_link_ok(lport)) {
- /* Reset max recv frame size to default */
- fc_set_mfs(lport, BNX2FC_MFS);
- /*
- * ctlr link up will only be handled during
- * enable to avoid sending discovery solicitation
- * on a stale vlan
- */
- if (interface->enabled)
- fcoe_ctlr_link_up(ctlr);
+ switch (cdev->enabled) {
+ case FCOE_CTLR_DISABLED:
+ pr_info("Link up while interface is disabled.\n");
+ break;
+ case FCOE_CTLR_ENABLED:
+ case FCOE_CTLR_UNUSED:
+ /* Reset max recv frame size to default */
+ fc_set_mfs(lport, BNX2FC_MFS);
+ /*
+ * ctlr link up will only be handled during
+ * enable to avoid sending discovery
+ * solicitation on a stale vlan
+ */
+ if (interface->enabled)
+ fcoe_ctlr_link_up(ctlr);
+ };
} else if (fcoe_ctlr_link_down(ctlr)) {
- mutex_lock(&lport->lp_mutex);
- list_for_each_entry(vport, &lport->vports, list)
- fc_host_port_type(vport->host) =
- FC_PORTTYPE_UNKNOWN;
- mutex_unlock(&lport->lp_mutex);
- fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
- per_cpu_ptr(lport->stats,
- get_cpu())->LinkFailureCount++;
- put_cpu();
- fcoe_clean_pending_queue(lport);
- wait_for_upload = 1;
+ switch (cdev->enabled) {
+ case FCOE_CTLR_DISABLED:
+ pr_info("Link down while interface is disabled.\n");
+ break;
+ case FCOE_CTLR_ENABLED:
+ case FCOE_CTLR_UNUSED:
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vport, &lport->vports, list)
+ fc_host_port_type(vport->host) =
+ FC_PORTTYPE_UNKNOWN;
+ mutex_unlock(&lport->lp_mutex);
+ fc_host_port_type(lport->host) =
+ FC_PORTTYPE_UNKNOWN;
+ per_cpu_ptr(lport->stats,
+ get_cpu())->LinkFailureCount++;
+ put_cpu();
+ fcoe_clean_pending_queue(lport);
+ wait_for_upload = 1;
+ };
}
}
mutex_unlock(&bnx2fc_dev_lock);
port = lport_priv(lport);
port->lport = lport;
port->priv = interface;
+ port->get_netdev = bnx2fc_netdev;
INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
/* Configure fcoe_port */
set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
}
-
+/**
+ * Deperecated: Use bnx2fc_enabled()
+ */
static int bnx2fc_disable(struct net_device *netdev)
{
struct bnx2fc_interface *interface;
return rc;
}
-
+/**
+ * Deprecated: Use bnx2fc_enabled()
+ */
static int bnx2fc_enable(struct net_device *netdev)
{
struct bnx2fc_interface *interface;
}
/**
- * bnx2fc_create - Create bnx2fc FCoE interface
+ * bnx2fc_ctlr_enabled() - Enable or disable an FCoE Controller
+ * @cdev: The FCoE Controller that is being enabled or disabled
+ *
+ * fcoe_sysfs will ensure that the state of 'enabled' has
+ * changed, so no checking is necessary here. This routine simply
+ * calls fcoe_enable or fcoe_disable, both of which are deprecated.
+ * When those routines are removed the functionality can be merged
+ * here.
+ */
+static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev)
+{
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev);
+ struct fc_lport *lport = ctlr->lp;
+ struct net_device *netdev = bnx2fc_netdev(lport);
+
+ switch (cdev->enabled) {
+ case FCOE_CTLR_ENABLED:
+ return bnx2fc_enable(netdev);
+ case FCOE_CTLR_DISABLED:
+ return bnx2fc_disable(netdev);
+ case FCOE_CTLR_UNUSED:
+ default:
+ return -ENOTSUPP;
+ };
+}
+
+enum bnx2fc_create_link_state {
+ BNX2FC_CREATE_LINK_DOWN,
+ BNX2FC_CREATE_LINK_UP,
+};
+
+/**
+ * _bnx2fc_create() - Create bnx2fc FCoE interface
+ * @netdev : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
+ * @link_state: The ctlr link state on creation
*
- * @buffer: The name of Ethernet interface to create on
- * @kp: The associated kernel param
+ * Called from either the libfcoe 'create' module parameter
+ * via fcoe_create or from fcoe_syfs's ctlr_create file.
*
- * Called from sysfs.
+ * libfcoe's 'create' module parameter is deprecated so some
+ * consolidation of code can be done when that interface is
+ * removed.
*
* Returns: 0 for success
*/
-static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
+static int _bnx2fc_create(struct net_device *netdev,
+ enum fip_state fip_mode,
+ enum bnx2fc_create_link_state link_state)
{
+ struct fcoe_ctlr_device *cdev;
struct fcoe_ctlr *ctlr;
struct bnx2fc_interface *interface;
struct bnx2fc_hba *hba;
/* Make this master N_port */
ctlr->lp = lport;
- if (!bnx2fc_link_ok(lport)) {
+ cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
+
+ if (link_state == BNX2FC_CREATE_LINK_UP)
+ cdev->enabled = FCOE_CTLR_ENABLED;
+ else
+ cdev->enabled = FCOE_CTLR_DISABLED;
+
+ if (link_state == BNX2FC_CREATE_LINK_UP &&
+ !bnx2fc_link_ok(lport)) {
fcoe_ctlr_link_up(ctlr);
fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
BNX2FC_HBA_DBG(lport, "create: START DISC\n");
bnx2fc_start_disc(interface);
- interface->enabled = true;
+
+ if (link_state == BNX2FC_CREATE_LINK_UP)
+ interface->enabled = true;
+
/*
* Release from kref_init in bnx2fc_interface_setup, on success
* lport should be holding a reference taken in bnx2fc_if_create
return rc;
}
+/**
+ * bnx2fc_create() - Create a bnx2fc interface
+ * @netdev : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
+ *
+ * Called from fcoe transport
+ *
+ * Returns: 0 for success
+ */
+static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
+{
+ return _bnx2fc_create(netdev, fip_mode, BNX2FC_CREATE_LINK_UP);
+}
+
+/**
+ * bnx2fc_ctlr_alloc() - Allocate a bnx2fc interface from fcoe_sysfs
+ * @netdev: The net_device to be used by the allocated FCoE Controller
+ *
+ * This routine is called from fcoe_sysfs. It will start the fcoe_ctlr
+ * in a link_down state. The allows the user an opportunity to configure
+ * the FCoE Controller from sysfs before enabling the FCoE Controller.
+ *
+ * Creating in with this routine starts the FCoE Controller in Fabric
+ * mode. The user can change to VN2VN or another mode before enabling.
+ */
+static int bnx2fc_ctlr_alloc(struct net_device *netdev)
+{
+ return _bnx2fc_create(netdev, FIP_MODE_FABRIC,
+ BNX2FC_CREATE_LINK_DOWN);
+}
+
/**
* bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc hba instance
*
.name = {"bnx2fc"},
.attached = false,
.list = LIST_HEAD_INIT(bnx2fc_transport.list),
+ .alloc = bnx2fc_ctlr_alloc,
.match = bnx2fc_match,
.create = bnx2fc_create,
.destroy = bnx2fc_destroy,
module_exit(bnx2fc_mod_exit);
static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ = {
- .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
- .get_fcoe_ctlr_link_fail = bnx2fc_ctlr_get_lesb,
- .get_fcoe_ctlr_vlink_fail = bnx2fc_ctlr_get_lesb,
- .get_fcoe_ctlr_miss_fka = bnx2fc_ctlr_get_lesb,
- .get_fcoe_ctlr_symb_err = bnx2fc_ctlr_get_lesb,
- .get_fcoe_ctlr_err_block = bnx2fc_ctlr_get_lesb,
- .get_fcoe_ctlr_fcs_error = bnx2fc_ctlr_get_lesb,
+ .set_fcoe_ctlr_enabled = bnx2fc_ctlr_enabled,
+ .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb,
.get_fcoe_fcf_selected = fcoe_fcf_get_selected,
.get_fcoe_fcf_vlan_id = bnx2fc_fcf_get_vlan_id,
.elsct_send = bnx2fc_elsct_send,
.fcp_abort_io = bnx2fc_abort_io,
.fcp_cleanup = bnx2fc_cleanup,
- .get_lesb = bnx2fc_get_lesb,
+ .get_lesb = fcoe_get_lesb,
.rport_event_callback = bnx2fc_rport_event_handler,
};
dcb->max_command = 1;
dcb->target_id = target;
dcb->target_lun = lun;
+ dcb->dev_mode = eeprom->target[target].cfg0;
#ifndef DC395x_NO_DISCONNECT
dcb->identify_msg =
IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun);
#else
dcb->identify_msg = IDENTIFY(0, lun);
#endif
- dcb->dev_mode = eeprom->target[target].cfg0;
dcb->inquiry7 = 0;
dcb->sync_mode = 0;
dcb->min_nego_period = clock_period[period_index];
struct packet_type *, struct net_device *);
static int fcoe_percpu_receive_thread(void *);
static void fcoe_percpu_clean(struct fc_lport *);
-static int fcoe_link_speed_update(struct fc_lport *);
static int fcoe_link_ok(struct fc_lport *);
static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
static int fcoe_hostlist_add(const struct fc_lport *);
+static void fcoe_hostlist_del(const struct fc_lport *);
static int fcoe_device_notification(struct notifier_block *, ulong, void *);
static void fcoe_dev_setup(void);
static int fcoe_enable(struct net_device *netdev);
static int fcoe_disable(struct net_device *netdev);
+/* fcoe_syfs control interface handlers */
+static int fcoe_ctlr_alloc(struct net_device *netdev);
+static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev);
+
+
static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
u32 did, struct fc_frame *,
unsigned int op,
void *, u32 timeout);
static void fcoe_recv_frame(struct sk_buff *skb);
-static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
-
/* notification function for packets from net device */
static struct notifier_block fcoe_notifier = {
.notifier_call = fcoe_device_notification,
static int fcoe_vport_disable(struct fc_vport *, bool disable);
static void fcoe_set_vport_symbolic_name(struct fc_vport *);
static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
-static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *);
static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *);
static struct fcoe_sysfs_function_template fcoe_sysfs_templ = {
- .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
+ .set_fcoe_ctlr_mode = fcoe_ctlr_set_fip_mode,
+ .set_fcoe_ctlr_enabled = fcoe_ctlr_enabled,
.get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
.get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
.get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb,
port = lport_priv(lport);
port->lport = lport;
port->priv = fcoe;
+ port->get_netdev = fcoe_netdev;
port->max_queue_depth = FCOE_MAX_QUEUE_DEPTH;
port->min_queue_depth = FCOE_MIN_QUEUE_DEPTH;
INIT_WORK(&port->destroy_work, fcoe_destroy_work);
+ /*
+ * Need to add the lport to the hostlist
+ * so we catch NETDEV_CHANGE events.
+ */
+ fcoe_hostlist_add(lport);
+
/* configure a fc_lport including the exchange manager */
rc = fcoe_lport_config(lport);
if (rc) {
out_lp_destroy:
fc_exch_mgr_free(lport);
out_host_put:
+ fcoe_hostlist_del(lport);
scsi_host_put(lport->host);
out:
return ERR_PTR(rc);
static int fcoe_device_notification(struct notifier_block *notifier,
ulong event, void *ptr)
{
+ struct fcoe_ctlr_device *cdev;
struct fc_lport *lport = NULL;
struct net_device *netdev = ptr;
struct fcoe_ctlr *ctlr;
fcoe_link_speed_update(lport);
- if (link_possible && !fcoe_link_ok(lport))
- fcoe_ctlr_link_up(ctlr);
- else if (fcoe_ctlr_link_down(ctlr)) {
- stats = per_cpu_ptr(lport->stats, get_cpu());
- stats->LinkFailureCount++;
- put_cpu();
- fcoe_clean_pending_queue(lport);
+ cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
+
+ if (link_possible && !fcoe_link_ok(lport)) {
+ switch (cdev->enabled) {
+ case FCOE_CTLR_DISABLED:
+ pr_info("Link up while interface is disabled.\n");
+ break;
+ case FCOE_CTLR_ENABLED:
+ case FCOE_CTLR_UNUSED:
+ fcoe_ctlr_link_up(ctlr);
+ };
+ } else if (fcoe_ctlr_link_down(ctlr)) {
+ switch (cdev->enabled) {
+ case FCOE_CTLR_DISABLED:
+ pr_info("Link down while interface is disabled.\n");
+ break;
+ case FCOE_CTLR_ENABLED:
+ case FCOE_CTLR_UNUSED:
+ stats = per_cpu_ptr(lport->stats, get_cpu());
+ stats->LinkFailureCount++;
+ put_cpu();
+ fcoe_clean_pending_queue(lport);
+ };
}
out:
return rc;
* Called from fcoe transport.
*
* Returns: 0 for success
+ *
+ * Deprecated: use fcoe_ctlr_enabled()
*/
static int fcoe_disable(struct net_device *netdev)
{
return rc;
}
+/**
+ * fcoe_ctlr_enabled() - Enable or disable an FCoE Controller
+ * @cdev: The FCoE Controller that is being enabled or disabled
+ *
+ * fcoe_sysfs will ensure that the state of 'enabled' has
+ * changed, so no checking is necessary here. This routine simply
+ * calls fcoe_enable or fcoe_disable, both of which are deprecated.
+ * When those routines are removed the functionality can be merged
+ * here.
+ */
+static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev)
+{
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev);
+ struct fc_lport *lport = ctlr->lp;
+ struct net_device *netdev = fcoe_netdev(lport);
+
+ switch (cdev->enabled) {
+ case FCOE_CTLR_ENABLED:
+ return fcoe_enable(netdev);
+ case FCOE_CTLR_DISABLED:
+ return fcoe_disable(netdev);
+ case FCOE_CTLR_UNUSED:
+ default:
+ return -ENOTSUPP;
+ };
+}
+
/**
* fcoe_destroy() - Destroy a FCoE interface
* @netdev : The net_device object the Ethernet interface to create on
{
struct fcoe_port *port;
struct fcoe_interface *fcoe;
+ struct Scsi_Host *shost;
+ struct fc_host_attrs *fc_host;
+ unsigned long flags;
+ struct fc_vport *vport;
+ struct fc_vport *next_vport;
port = container_of(work, struct fcoe_port, destroy_work);
+ shost = port->lport->host;
+ fc_host = shost_to_fc_host(shost);
+
+ /* Loop through all the vports and mark them for deletion */
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) {
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
+ continue;
+ } else {
+ vport->flags |= FC_VPORT_DELETING;
+ queue_work(fc_host_work_q(shost),
+ &vport->vport_delete_work);
+ }
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ flush_workqueue(fc_host_work_q(shost));
+
mutex_lock(&fcoe_config_mutex);
fcoe = port->priv;
#endif
}
+enum fcoe_create_link_state {
+ FCOE_CREATE_LINK_DOWN,
+ FCOE_CREATE_LINK_UP,
+};
+
/**
- * fcoe_create() - Create a fcoe interface
- * @netdev : The net_device object the Ethernet interface to create on
- * @fip_mode: The FIP mode for this creation
+ * _fcoe_create() - (internal) Create a fcoe interface
+ * @netdev : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
+ * @link_state: The ctlr link state on creation
*
- * Called from fcoe transport
+ * Called from either the libfcoe 'create' module parameter
+ * via fcoe_create or from fcoe_syfs's ctlr_create file.
*
- * Returns: 0 for success
+ * libfcoe's 'create' module parameter is deprecated so some
+ * consolidation of code can be done when that interface is
+ * removed.
*/
-static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
+static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode,
+ enum fcoe_create_link_state link_state)
{
int rc = 0;
struct fcoe_ctlr_device *ctlr_dev;
/* setup DCB priority attributes. */
fcoe_dcb_create(fcoe);
- /* add to lports list */
- fcoe_hostlist_add(lport);
-
/* start FIP Discovery and FLOGI */
lport->boot_time = jiffies;
fc_fabric_login(lport);
- if (!fcoe_link_ok(lport)) {
+
+ /*
+ * If the fcoe_ctlr_device is to be set to DISABLED
+ * it must be done after the lport is added to the
+ * hostlist, but before the rtnl_lock is released.
+ * This is because the rtnl_lock protects the
+ * hostlist that fcoe_device_notification uses. If
+ * the FCoE Controller is intended to be created
+ * DISABLED then 'enabled' needs to be considered
+ * handling link events. 'enabled' must be set
+ * before the lport can be found in the hostlist
+ * when a link up event is received.
+ */
+ if (link_state == FCOE_CREATE_LINK_UP)
+ ctlr_dev->enabled = FCOE_CTLR_ENABLED;
+ else
+ ctlr_dev->enabled = FCOE_CTLR_DISABLED;
+
+ if (link_state == FCOE_CREATE_LINK_UP &&
+ !fcoe_link_ok(lport)) {
rtnl_unlock();
fcoe_ctlr_link_up(ctlr);
mutex_unlock(&fcoe_config_mutex);
}
/**
- * fcoe_link_speed_update() - Update the supported and actual link speeds
- * @lport: The local port to update speeds for
+ * fcoe_create() - Create a fcoe interface
+ * @netdev : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
+ *
+ * Called from fcoe transport
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
+{
+ return _fcoe_create(netdev, fip_mode, FCOE_CREATE_LINK_UP);
+}
+
+/**
+ * fcoe_ctlr_alloc() - Allocate a fcoe interface from fcoe_sysfs
+ * @netdev: The net_device to be used by the allocated FCoE Controller
*
- * Returns: 0 if the ethtool query was successful
- * -1 if the ethtool query failed
+ * This routine is called from fcoe_sysfs. It will start the fcoe_ctlr
+ * in a link_down state. The allows the user an opportunity to configure
+ * the FCoE Controller from sysfs before enabling the FCoE Controller.
+ *
+ * Creating in with this routine starts the FCoE Controller in Fabric
+ * mode. The user can change to VN2VN or another mode before enabling.
*/
-static int fcoe_link_speed_update(struct fc_lport *lport)
+static int fcoe_ctlr_alloc(struct net_device *netdev)
{
- struct net_device *netdev = fcoe_netdev(lport);
- struct ethtool_cmd ecmd;
-
- if (!__ethtool_get_settings(netdev, &ecmd)) {
- lport->link_supported_speeds &=
- ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
- if (ecmd.supported & (SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full))
- lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
- if (ecmd.supported & SUPPORTED_10000baseT_Full)
- lport->link_supported_speeds |=
- FC_PORTSPEED_10GBIT;
- switch (ethtool_cmd_speed(&ecmd)) {
- case SPEED_1000:
- lport->link_speed = FC_PORTSPEED_1GBIT;
- break;
- case SPEED_10000:
- lport->link_speed = FC_PORTSPEED_10GBIT;
- break;
- }
- return 0;
- }
- return -1;
+ return _fcoe_create(netdev, FIP_MODE_FABRIC,
+ FCOE_CREATE_LINK_DOWN);
}
/**
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
+ struct fcoe_ctlr_device *cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
fcoe_ctlr_link_down(ctlr);
fcoe_clean_pending_queue(ctlr->lp);
- if (!fcoe_link_ok(ctlr->lp))
+
+ if (cdev->enabled != FCOE_CTLR_DISABLED &&
+ !fcoe_link_ok(ctlr->lp))
fcoe_ctlr_link_up(ctlr);
return 0;
}
return 0;
}
+/**
+ * fcoe_hostlist_del() - Remove the FCoE interface identified by a local
+ * port to the hostlist
+ * @lport: The local port that identifies the FCoE interface to be added
+ *
+ * Locking: must be called with the RTNL mutex held
+ *
+ */
+static void fcoe_hostlist_del(const struct fc_lport *lport)
+{
+ struct fcoe_interface *fcoe;
+ struct fcoe_port *port;
+
+ port = lport_priv(lport);
+ fcoe = port->priv;
+ list_del(&fcoe->list);
+ return;
+}
static struct fcoe_transport fcoe_sw_transport = {
.name = {FCOE_TRANSPORT_DEFAULT},
.attached = false,
.list = LIST_HEAD_INIT(fcoe_sw_transport.list),
.match = fcoe_match,
+ .alloc = fcoe_ctlr_alloc,
.create = fcoe_create,
.destroy = fcoe_destroy,
.enable = fcoe_enable,
/* releases the associated fcoe hosts */
rtnl_lock();
list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
- list_del(&fcoe->list);
ctlr = fcoe_to_ctlr(fcoe);
port = lport_priv(ctlr->lp);
+ fcoe_hostlist_del(port->lport);
queue_work(fcoe_wq, &port->destroy_work);
}
rtnl_unlock();
NULL, NULL, 3 * lport->r_a_tov);
}
-/**
- * fcoe_get_lesb() - Fill the FCoE Link Error Status Block
- * @lport: the local port
- * @fc_lesb: the link error status block
- */
-static void fcoe_get_lesb(struct fc_lport *lport,
- struct fc_els_lesb *fc_lesb)
-{
- struct net_device *netdev = fcoe_netdev(lport);
-
- __fcoe_get_lesb(lport, fc_lesb, netdev);
-}
-
-static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
-{
- struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
- struct net_device *netdev = fcoe_netdev(fip->lp);
- struct fcoe_fc_els_lesb *fcoe_lesb;
- struct fc_els_lesb fc_lesb;
-
- __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
- fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
-
- ctlr_dev->lesb.lesb_link_fail =
- ntohl(fcoe_lesb->lesb_link_fail);
- ctlr_dev->lesb.lesb_vlink_fail =
- ntohl(fcoe_lesb->lesb_vlink_fail);
- ctlr_dev->lesb.lesb_miss_fka =
- ntohl(fcoe_lesb->lesb_miss_fka);
- ctlr_dev->lesb.lesb_symb_err =
- ntohl(fcoe_lesb->lesb_symb_err);
- ctlr_dev->lesb.lesb_err_block =
- ntohl(fcoe_lesb->lesb_err_block);
- ctlr_dev->lesb.lesb_fcs_error =
- ntohl(fcoe_lesb->lesb_fcs_error);
-}
-
static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
{
struct fcoe_ctlr_device *ctlr_dev =
#define FCOE_DBG(fmt, args...) \
FCOE_CHECK_LOGGING(FCOE_LOGGING, \
- printk(KERN_INFO "fcoe: " fmt, ##args);)
+ pr_info("fcoe: " fmt, ##args);)
#define FCOE_NETDEV_DBG(netdev, fmt, args...) \
FCOE_CHECK_LOGGING(FCOE_NETDEV_LOGGING, \
- printk(KERN_INFO "fcoe: %s: " fmt, \
- netdev->name, ##args);)
+ pr_info("fcoe: %s: " fmt, \
+ netdev->name, ##args);)
/**
* struct fcoe_interface - A FCoE interface
LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
- if (!fcf || !lport->port_id)
+ if (!fcf || !lport->port_id) {
+ /*
+ * We are yet to select best FCF, but we got CVL in the
+ * meantime. reset the ctlr and let it rediscover the FCF
+ */
+ mutex_lock(&fip->ctlr_mutex);
+ fcoe_ctlr_reset(fip);
+ mutex_unlock(&fip->ctlr_mutex);
return;
+ }
/*
* mask of required descriptors. Validating each one clears its bit.
fcf->fabric_name, fcf->vfid, fcf->fcf_mac,
fcf->fc_map, fcoe_ctlr_mtu_valid(fcf),
fcf->flogi_sent, fcf->pri);
- if (fcf->fabric_name != first->fabric_name ||
- fcf->vfid != first->vfid ||
- fcf->fc_map != first->fc_map) {
- LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
- "or FC-MAP\n");
- return NULL;
- }
- if (fcf->flogi_sent)
- continue;
if (!fcoe_ctlr_fcf_usable(fcf)) {
LIBFCOE_FIP_DBG(fip, "FCF for fab %16.16llx "
"map %x %svalid %savailable\n",
"" : "un");
continue;
}
+ if (fcf->fabric_name != first->fabric_name ||
+ fcf->vfid != first->vfid ||
+ fcf->fc_map != first->fc_map) {
+ LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
+ "or FC-MAP\n");
+ return NULL;
+ }
+ if (fcf->flogi_sent)
+ continue;
if (!best || fcf->pri < best->pri || best->flogi_sent)
best = fcf;
}
}
EXPORT_SYMBOL(fcoe_fcf_get_selected);
-void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
+void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
{
struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
mutex_lock(&ctlr->ctlr_mutex);
- switch (ctlr->mode) {
- case FIP_MODE_FABRIC:
- ctlr_dev->mode = FIP_CONN_TYPE_FABRIC;
- break;
- case FIP_MODE_VN2VN:
- ctlr_dev->mode = FIP_CONN_TYPE_VN2VN;
+ switch (ctlr_dev->mode) {
+ case FIP_CONN_TYPE_VN2VN:
+ ctlr->mode = FIP_MODE_VN2VN;
break;
+ case FIP_CONN_TYPE_FABRIC:
default:
- ctlr_dev->mode = FIP_CONN_TYPE_UNKNOWN;
+ ctlr->mode = FIP_MODE_FABRIC;
break;
}
+
mutex_unlock(&ctlr->ctlr_mutex);
}
-EXPORT_SYMBOL(fcoe_ctlr_get_fip_mode);
+EXPORT_SYMBOL(fcoe_ctlr_set_fip_mode);
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/etherdevice.h>
+#include <linux/ctype.h>
#include <scsi/fcoe_sysfs.h>
+#include <scsi/libfcoe.h>
+
+/*
+ * OK to include local libfcoe.h for debug_logging, but cannot include
+ * <scsi/libfcoe.h> otherwise non-netdev based fcoe solutions would have
+ * have to include more than fcoe_sysfs.h.
+ */
+#include "libfcoe.h"
static atomic_t ctlr_num;
static atomic_t fcf_num;
((x)->lesb.lesb_err_block)
#define fcoe_ctlr_fcs_error(x) \
((x)->lesb.lesb_fcs_error)
+#define fcoe_ctlr_enabled(x) \
+ ((x)->enabled)
#define fcoe_fcf_state(x) \
((x)->state)
#define fcoe_fcf_fabric_name(x) \
#define fcoe_enum_name_search(title, table_type, table) \
static const char *get_fcoe_##title##_name(enum table_type table_key) \
{ \
- int i; \
- char *name = NULL; \
- \
- for (i = 0; i < ARRAY_SIZE(table); i++) { \
- if (table[i].value == table_key) { \
- name = table[i].name; \
- break; \
- } \
- } \
- return name; \
+ if (table_key < 0 || table_key >= ARRAY_SIZE(table)) \
+ return NULL; \
+ return table[table_key]; \
+}
+
+static char *fip_conn_type_names[] = {
+ [ FIP_CONN_TYPE_UNKNOWN ] = "Unknown",
+ [ FIP_CONN_TYPE_FABRIC ] = "Fabric",
+ [ FIP_CONN_TYPE_VN2VN ] = "VN2VN",
+};
+fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names)
+
+static enum fip_conn_type fcoe_parse_mode(const char *buf)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fip_conn_type_names); i++) {
+ if (strcasecmp(buf, fip_conn_type_names[i]) == 0)
+ return i;
+ }
+
+ return FIP_CONN_TYPE_UNKNOWN;
}
-static struct {
- enum fcf_state value;
- char *name;
-} fcf_state_names[] = {
- { FCOE_FCF_STATE_UNKNOWN, "Unknown" },
- { FCOE_FCF_STATE_DISCONNECTED, "Disconnected" },
- { FCOE_FCF_STATE_CONNECTED, "Connected" },
+static char *fcf_state_names[] = {
+ [ FCOE_FCF_STATE_UNKNOWN ] = "Unknown",
+ [ FCOE_FCF_STATE_DISCONNECTED ] = "Disconnected",
+ [ FCOE_FCF_STATE_CONNECTED ] = "Connected",
};
fcoe_enum_name_search(fcf_state, fcf_state, fcf_state_names)
#define FCOE_FCF_STATE_MAX_NAMELEN 50
}
static FCOE_DEVICE_ATTR(fcf, state, S_IRUGO, show_fcf_state, NULL);
-static struct {
- enum fip_conn_type value;
- char *name;
-} fip_conn_type_names[] = {
- { FIP_CONN_TYPE_UNKNOWN, "Unknown" },
- { FIP_CONN_TYPE_FABRIC, "Fabric" },
- { FIP_CONN_TYPE_VN2VN, "VN2VN" },
-};
-fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names)
-#define FCOE_CTLR_MODE_MAX_NAMELEN 50
-
+#define FCOE_MAX_MODENAME_LEN 20
static ssize_t show_ctlr_mode(struct device *dev,
struct device_attribute *attr,
char *buf)
struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
const char *name;
- if (ctlr->f->get_fcoe_ctlr_mode)
- ctlr->f->get_fcoe_ctlr_mode(ctlr);
-
name = get_fcoe_ctlr_mode_name(ctlr->mode);
if (!name)
return -EINVAL;
- return snprintf(buf, FCOE_CTLR_MODE_MAX_NAMELEN,
+ return snprintf(buf, FCOE_MAX_MODENAME_LEN,
+ "%s\n", name);
+}
+
+static ssize_t store_ctlr_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ char mode[FCOE_MAX_MODENAME_LEN + 1];
+
+ if (count > FCOE_MAX_MODENAME_LEN)
+ return -EINVAL;
+
+ strncpy(mode, buf, count);
+
+ if (mode[count - 1] == '\n')
+ mode[count - 1] = '\0';
+ else
+ mode[count] = '\0';
+
+ switch (ctlr->enabled) {
+ case FCOE_CTLR_ENABLED:
+ LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled.");
+ return -EBUSY;
+ case FCOE_CTLR_DISABLED:
+ if (!ctlr->f->set_fcoe_ctlr_mode) {
+ LIBFCOE_SYSFS_DBG(ctlr,
+ "Mode change not supported by LLD.");
+ return -ENOTSUPP;
+ }
+
+ ctlr->mode = fcoe_parse_mode(mode);
+ if (ctlr->mode == FIP_CONN_TYPE_UNKNOWN) {
+ LIBFCOE_SYSFS_DBG(ctlr,
+ "Unknown mode %s provided.", buf);
+ return -EINVAL;
+ }
+
+ ctlr->f->set_fcoe_ctlr_mode(ctlr);
+ LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.", buf);
+
+ return count;
+ case FCOE_CTLR_UNUSED:
+ default:
+ LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported.");
+ return -ENOTSUPP;
+ };
+}
+
+static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO | S_IWUSR,
+ show_ctlr_mode, store_ctlr_mode);
+
+static ssize_t store_ctlr_enabled(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ int rc;
+
+ switch (ctlr->enabled) {
+ case FCOE_CTLR_ENABLED:
+ if (*buf == '1')
+ return count;
+ ctlr->enabled = FCOE_CTLR_DISABLED;
+ break;
+ case FCOE_CTLR_DISABLED:
+ if (*buf == '0')
+ return count;
+ ctlr->enabled = FCOE_CTLR_ENABLED;
+ break;
+ case FCOE_CTLR_UNUSED:
+ return -ENOTSUPP;
+ };
+
+ rc = ctlr->f->set_fcoe_ctlr_enabled(ctlr);
+ if (rc)
+ return rc;
+
+ return count;
+}
+
+static char *ctlr_enabled_state_names[] = {
+ [ FCOE_CTLR_ENABLED ] = "1",
+ [ FCOE_CTLR_DISABLED ] = "0",
+};
+fcoe_enum_name_search(ctlr_enabled_state, ctlr_enabled_state,
+ ctlr_enabled_state_names)
+#define FCOE_CTLR_ENABLED_MAX_NAMELEN 50
+
+static ssize_t show_ctlr_enabled_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ const char *name;
+
+ name = get_fcoe_ctlr_enabled_state_name(ctlr->enabled);
+ if (!name)
+ return -EINVAL;
+ return snprintf(buf, FCOE_CTLR_ENABLED_MAX_NAMELEN,
"%s\n", name);
}
-static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO,
- show_ctlr_mode, NULL);
+
+static FCOE_DEVICE_ATTR(ctlr, enabled, S_IRUGO | S_IWUSR,
+ show_ctlr_enabled_state,
+ store_ctlr_enabled);
static ssize_t
store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev,
static struct attribute *fcoe_ctlr_attrs[] = {
&device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr,
+ &device_attr_fcoe_ctlr_enabled.attr,
&device_attr_fcoe_ctlr_mode.attr,
NULL,
};
.release = fcoe_fcf_device_release,
};
+struct bus_attribute fcoe_bus_attr_group[] = {
+ __ATTR(ctlr_create, S_IWUSR, NULL, fcoe_ctlr_create_store),
+ __ATTR(ctlr_destroy, S_IWUSR, NULL, fcoe_ctlr_destroy_store),
+ __ATTR_NULL
+};
+
struct bus_type fcoe_bus_type = {
.name = "fcoe",
.match = &fcoe_bus_match,
+ .bus_attrs = fcoe_bus_attr_group,
};
/**
ctlr->id = atomic_inc_return(&ctlr_num) - 1;
ctlr->f = f;
+ ctlr->mode = FIP_CONN_TYPE_FABRIC;
INIT_LIST_HEAD(&ctlr->fcfs);
mutex_init(&ctlr->lock);
ctlr->dev.parent = parent;
.notifier_call = libfcoe_device_notification,
};
+/**
+ * fcoe_link_speed_update() - Update the supported and actual link speeds
+ * @lport: The local port to update speeds for
+ *
+ * Returns: 0 if the ethtool query was successful
+ * -1 if the ethtool query failed
+ */
+int fcoe_link_speed_update(struct fc_lport *lport)
+{
+ struct net_device *netdev = fcoe_get_netdev(lport);
+ struct ethtool_cmd ecmd;
+
+ if (!__ethtool_get_settings(netdev, &ecmd)) {
+ lport->link_supported_speeds &=
+ ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
+ if (ecmd.supported & (SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full))
+ lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
+ if (ecmd.supported & SUPPORTED_10000baseT_Full)
+ lport->link_supported_speeds |=
+ FC_PORTSPEED_10GBIT;
+ switch (ethtool_cmd_speed(&ecmd)) {
+ case SPEED_1000:
+ lport->link_speed = FC_PORTSPEED_1GBIT;
+ break;
+ case SPEED_10000:
+ lport->link_speed = FC_PORTSPEED_10GBIT;
+ break;
+ }
+ return 0;
+ }
+ return -1;
+}
+EXPORT_SYMBOL_GPL(fcoe_link_speed_update);
+
+/**
+ * __fcoe_get_lesb() - Get the Link Error Status Block (LESB) for a given lport
+ * @lport: The local port to update speeds for
+ * @fc_lesb: Pointer to the LESB to be filled up
+ * @netdev: Pointer to the netdev that is associated with the lport
+ *
+ * Note, the Link Error Status Block (LESB) for FCoE is defined in FC-BB-6
+ * Clause 7.11 in v1.04.
+ */
void __fcoe_get_lesb(struct fc_lport *lport,
struct fc_els_lesb *fc_lesb,
struct net_device *netdev)
}
EXPORT_SYMBOL_GPL(__fcoe_get_lesb);
+/**
+ * fcoe_get_lesb() - Fill the FCoE Link Error Status Block
+ * @lport: the local port
+ * @fc_lesb: the link error status block
+ */
+void fcoe_get_lesb(struct fc_lport *lport,
+ struct fc_els_lesb *fc_lesb)
+{
+ struct net_device *netdev = fcoe_get_netdev(lport);
+
+ __fcoe_get_lesb(lport, fc_lesb, netdev);
+}
+EXPORT_SYMBOL_GPL(fcoe_get_lesb);
+
+/**
+ * fcoe_ctlr_get_lesb() - Get the Link Error Status Block (LESB) for a given
+ * fcoe controller device
+ * @ctlr_dev: The given fcoe controller device
+ *
+ */
+void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
+{
+ struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
+ struct net_device *netdev = fcoe_get_netdev(fip->lp);
+ struct fcoe_fc_els_lesb *fcoe_lesb;
+ struct fc_els_lesb fc_lesb;
+
+ __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
+ fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
+
+ ctlr_dev->lesb.lesb_link_fail =
+ ntohl(fcoe_lesb->lesb_link_fail);
+ ctlr_dev->lesb.lesb_vlink_fail =
+ ntohl(fcoe_lesb->lesb_vlink_fail);
+ ctlr_dev->lesb.lesb_miss_fka =
+ ntohl(fcoe_lesb->lesb_miss_fka);
+ ctlr_dev->lesb.lesb_symb_err =
+ ntohl(fcoe_lesb->lesb_symb_err);
+ ctlr_dev->lesb.lesb_err_block =
+ ntohl(fcoe_lesb->lesb_err_block);
+ ctlr_dev->lesb.lesb_fcs_error =
+ ntohl(fcoe_lesb->lesb_fcs_error);
+}
+EXPORT_SYMBOL_GPL(fcoe_ctlr_get_lesb);
+
void fcoe_wwn_to_str(u64 wwn, char *buf, int len)
{
u8 wwpn[8];
return NOTIFY_OK;
}
+ssize_t fcoe_ctlr_create_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+ struct fcoe_ctlr_device *ctlr_dev = NULL;
+ int rc = 0;
+ int err;
+
+ mutex_lock(&ft_mutex);
+
+ netdev = fcoe_if_to_netdev(buf);
+ if (!netdev) {
+ LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buf);
+ rc = -ENODEV;
+ goto out_nodev;
+ }
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (ft) {
+ LIBFCOE_TRANSPORT_DBG("transport %s already has existing "
+ "FCoE instance on %s.\n",
+ ft->name, netdev->name);
+ rc = -EEXIST;
+ goto out_putdev;
+ }
+
+ ft = fcoe_transport_lookup(netdev);
+ if (!ft) {
+ LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
+ netdev->name);
+ rc = -ENODEV;
+ goto out_putdev;
+ }
+
+ /* pass to transport create */
+ err = ft->alloc ? ft->alloc(netdev) : -ENODEV;
+ if (err) {
+ fcoe_del_netdev_mapping(netdev);
+ rc = -ENOMEM;
+ goto out_putdev;
+ }
+
+ err = fcoe_add_netdev_mapping(netdev, ft);
+ if (err) {
+ LIBFCOE_TRANSPORT_DBG("failed to add new netdev mapping "
+ "for FCoE transport %s for %s.\n",
+ ft->name, netdev->name);
+ rc = -ENODEV;
+ goto out_putdev;
+ }
+
+ LIBFCOE_TRANSPORT_DBG("transport %s %s to create fcoe on %s.\n",
+ ft->name, (ctlr_dev) ? "succeeded" : "failed",
+ netdev->name);
+
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+ if (rc)
+ return rc;
+ return count;
+}
+
+ssize_t fcoe_ctlr_destroy_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ int rc = -ENODEV;
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+
+ mutex_lock(&ft_mutex);
+
+ netdev = fcoe_if_to_netdev(buf);
+ if (!netdev) {
+ LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buf);
+ goto out_nodev;
+ }
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (!ft) {
+ LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
+ netdev->name);
+ goto out_putdev;
+ }
+
+ /* pass to transport destroy */
+ rc = ft->destroy(netdev);
+ if (rc)
+ goto out_putdev;
+
+ fcoe_del_netdev_mapping(netdev);
+ LIBFCOE_TRANSPORT_DBG("transport %s %s to destroy fcoe on %s.\n",
+ ft->name, (rc) ? "failed" : "succeeded",
+ netdev->name);
+ rc = count; /* required for successful return */
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+ return rc;
+}
+EXPORT_SYMBOL(fcoe_ctlr_destroy_store);
/**
* fcoe_transport_create() - Create a fcoe interface
dev_put(netdev);
out_nodev:
mutex_unlock(&ft_mutex);
-
- if (rc == -ERESTARTSYS)
- return restart_syscall();
- else
- return rc;
+ return rc;
}
/**
#define _FCOE_LIBFCOE_H_
extern unsigned int libfcoe_debug_logging;
-#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */
-#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
-#define LIBFCOE_TRANSPORT_LOGGING 0x04 /* FCoE transport logging */
+#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */
+#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
+#define LIBFCOE_TRANSPORT_LOGGING 0x04 /* FCoE transport logging */
+#define LIBFCOE_SYSFS_LOGGING 0x08 /* fcoe_sysfs logging */
#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \
do { \
#define LIBFCOE_DBG(fmt, args...) \
LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \
- printk(KERN_INFO "libfcoe: " fmt, ##args);)
+ pr_info("libfcoe: " fmt, ##args);)
#define LIBFCOE_FIP_DBG(fip, fmt, args...) \
LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \
- printk(KERN_INFO "host%d: fip: " fmt, \
- (fip)->lp->host->host_no, ##args);)
+ pr_info("host%d: fip: " fmt, \
+ (fip)->lp->host->host_no, ##args);)
#define LIBFCOE_TRANSPORT_DBG(fmt, args...) \
LIBFCOE_CHECK_LOGGING(LIBFCOE_TRANSPORT_LOGGING, \
- printk(KERN_INFO "%s: " fmt, \
- __func__, ##args);)
+ pr_info("%s: " fmt, __func__, ##args);)
+
+#define LIBFCOE_SYSFS_DBG(cdev, fmt, args...) \
+ LIBFCOE_CHECK_LOGGING(LIBFCOE_SYSFS_LOGGING, \
+ pr_info("ctlr_%d: " fmt, cdev->id, ##args);)
#endif /* _FCOE_LIBFCOE_H_ */
fnic_res.o \
fnic_fcs.o \
fnic_scsi.o \
+ fnic_trace.o \
+ fnic_debugfs.o \
vnic_cq.o \
vnic_dev.o \
vnic_intr.o \
#include <scsi/libfcoe.h>
#include "fnic_io.h"
#include "fnic_res.h"
+#include "fnic_trace.h"
#include "vnic_dev.h"
#include "vnic_wq.h"
#include "vnic_rq.h"
#define FNIC_TAG_MASK (BIT(24) - 1) /* mask for lookup */
#define FNIC_NO_TAG -1
+/*
+ * Command flags to identify the type of command and for other future
+ * use.
+ */
+#define FNIC_NO_FLAGS 0
+#define FNIC_IO_INITIALIZED BIT(0)
+#define FNIC_IO_ISSUED BIT(1)
+#define FNIC_IO_DONE BIT(2)
+#define FNIC_IO_REQ_NULL BIT(3)
+#define FNIC_IO_ABTS_PENDING BIT(4)
+#define FNIC_IO_ABORTED BIT(5)
+#define FNIC_IO_ABTS_ISSUED BIT(6)
+#define FNIC_IO_TERM_ISSUED BIT(7)
+#define FNIC_IO_INTERNAL_TERM_ISSUED BIT(8)
+#define FNIC_IO_ABT_TERM_DONE BIT(9)
+#define FNIC_IO_ABT_TERM_REQ_NULL BIT(10)
+#define FNIC_IO_ABT_TERM_TIMED_OUT BIT(11)
+#define FNIC_DEVICE_RESET BIT(12) /* Device reset request */
+#define FNIC_DEV_RST_ISSUED BIT(13)
+#define FNIC_DEV_RST_TIMED_OUT BIT(14)
+#define FNIC_DEV_RST_ABTS_ISSUED BIT(15)
+#define FNIC_DEV_RST_TERM_ISSUED BIT(16)
+#define FNIC_DEV_RST_DONE BIT(17)
+#define FNIC_DEV_RST_REQ_NULL BIT(18)
+#define FNIC_DEV_RST_ABTS_DONE BIT(19)
+#define FNIC_DEV_RST_TERM_DONE BIT(20)
+#define FNIC_DEV_RST_ABTS_PENDING BIT(21)
+
/*
* Usage of the scsi_cmnd scratchpad.
* These fields are locked by the hashed io_req_lock.
#define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message)
#define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
#define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command)
+#define CMD_FLAGS(Cmnd) ((Cmnd)->SCp.Status)
#define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */
#define FNIC_HOST_RESET_TIMEOUT 10000 /* mSec */
#define FNIC_RMDEVICE_TIMEOUT 1000 /* mSec */
#define FNIC_HOST_RESET_SETTLE_TIME 30 /* Sec */
+#define FNIC_ABT_TERM_DELAY_TIMEOUT 500 /* mSec */
#define FNIC_MAX_FCP_TARGET 256
+/**
+ * state_flags to identify host state along along with fnic's state
+ **/
+#define __FNIC_FLAGS_FWRESET BIT(0) /* fwreset in progress */
+#define __FNIC_FLAGS_BLOCK_IO BIT(1) /* IOs are blocked */
+
+#define FNIC_FLAGS_NONE (0)
+#define FNIC_FLAGS_FWRESET (__FNIC_FLAGS_FWRESET | \
+ __FNIC_FLAGS_BLOCK_IO)
+
+#define FNIC_FLAGS_IO_BLOCKED (__FNIC_FLAGS_BLOCK_IO)
+
+#define fnic_set_state_flags(fnicp, st_flags) \
+ __fnic_set_state_flags(fnicp, st_flags, 0)
+
+#define fnic_clear_state_flags(fnicp, st_flags) \
+ __fnic_set_state_flags(fnicp, st_flags, 1)
+
extern unsigned int fnic_log_level;
#define FNIC_MAIN_LOGGING 0x01
struct completion *remove_wait; /* device remove thread blocks */
+ atomic_t in_flight; /* io counter */
+ u32 _reserved; /* fill hole */
+ unsigned long state_flags; /* protected by host lock */
enum fnic_state state;
spinlock_t fnic_lock;
void fnic_log_q_error(struct fnic *fnic);
void fnic_handle_link_event(struct fnic *fnic);
+int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *);
+
+static inline int
+fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
+{
+ return ((fnic->state_flags & st_flags) == st_flags);
+}
+void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long);
#endif /* _FNIC_H_ */
--- /dev/null
+/*
+ * Copyright 2012 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/debugfs.h>
+#include "fnic.h"
+
+static struct dentry *fnic_trace_debugfs_root;
+static struct dentry *fnic_trace_debugfs_file;
+static struct dentry *fnic_trace_enable;
+
+/*
+ * fnic_trace_ctrl_open - Open the trace_enable file
+ * @inode: The inode pointer.
+ * @file: The file pointer to attach the trace enable/disable flag.
+ *
+ * Description:
+ * This routine opens a debugsfs file trace_enable.
+ *
+ * Returns:
+ * This function returns zero if successful.
+ */
+static int fnic_trace_ctrl_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+/*
+ * fnic_trace_ctrl_read - Read a trace_enable debugfs file
+ * @filp: The file pointer to read from.
+ * @ubuf: The buffer to copy the data to.
+ * @cnt: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads value of variable fnic_tracing_enabled
+ * and stores into local @buf. It will start reading file at @ppos and
+ * copy up to @cnt of data to @ubuf from @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read.
+ */
+static ssize_t fnic_trace_ctrl_read(struct file *filp,
+ char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ int len;
+ len = sprintf(buf, "%u\n", fnic_tracing_enabled);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+}
+
+/*
+ * fnic_trace_ctrl_write - Write to trace_enable debugfs file
+ * @filp: The file pointer to write from.
+ * @ubuf: The buffer to copy the data from.
+ * @cnt: The number of bytes to write.
+ * @ppos: The position in the file to start writing to.
+ *
+ * Description:
+ * This routine writes data from user buffer @ubuf to buffer @buf and
+ * sets fnic_tracing_enabled value as per user input.
+ *
+ * Returns:
+ * This function returns the amount of data that was written.
+ */
+static ssize_t fnic_trace_ctrl_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ unsigned long val;
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ fnic_tracing_enabled = val;
+ (*ppos)++;
+
+ return cnt;
+}
+
+/*
+ * fnic_trace_debugfs_open - Open the fnic trace log
+ * @inode: The inode pointer
+ * @file: The file pointer to attach the log output
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation.
+ * It allocates the necessary buffer for the log, fills the buffer from
+ * the in-memory log and then returns a pointer to that log in
+ * the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return
+ * a negative error value.
+ */
+static int fnic_trace_debugfs_open(struct inode *inode,
+ struct file *file)
+{
+ fnic_dbgfs_t *fnic_dbg_prt;
+ fnic_dbg_prt = kzalloc(sizeof(fnic_dbgfs_t), GFP_KERNEL);
+ if (!fnic_dbg_prt)
+ return -ENOMEM;
+
+ fnic_dbg_prt->buffer = vmalloc((3*(trace_max_pages * PAGE_SIZE)));
+ if (!fnic_dbg_prt->buffer) {
+ kfree(fnic_dbg_prt);
+ return -ENOMEM;
+ }
+ memset((void *)fnic_dbg_prt->buffer, 0,
+ (3*(trace_max_pages * PAGE_SIZE)));
+ fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt);
+ file->private_data = fnic_dbg_prt;
+ return 0;
+}
+
+/*
+ * fnic_trace_debugfs_lseek - Seek through a debugfs file
+ * @file: The file pointer to seek through.
+ * @offset: The offset to seek to or the amount to seek by.
+ * @howto: Indicates how to seek.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs lseek file operation.
+ * The @howto parameter indicates whether @offset is the offset to directly
+ * seek to, or if it is a value to seek forward or reverse by. This function
+ * figures out what the new offset of the debugfs file will be and assigns
+ * that value to the f_pos field of @file.
+ *
+ * Returns:
+ * This function returns the new offset if successful and returns a negative
+ * error if unable to process the seek.
+ */
+static loff_t fnic_trace_debugfs_lseek(struct file *file,
+ loff_t offset,
+ int howto)
+{
+ fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
+ loff_t pos = -1;
+
+ switch (howto) {
+ case 0:
+ pos = offset;
+ break;
+ case 1:
+ pos = file->f_pos + offset;
+ break;
+ case 2:
+ pos = fnic_dbg_prt->buffer_len - offset;
+ }
+ return (pos < 0 || pos > fnic_dbg_prt->buffer_len) ?
+ -EINVAL : (file->f_pos = pos);
+}
+
+/*
+ * fnic_trace_debugfs_read - Read a debugfs file
+ * @file: The file pointer to read from.
+ * @ubuf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @pos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the buffer indicated in the private_data
+ * field of @file. It will start reading at @pos and copy up to @nbytes of
+ * data to @ubuf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be
+ * less than @nbytes if the end of the file was reached).
+ */
+static ssize_t fnic_trace_debugfs_read(struct file *file,
+ char __user *ubuf,
+ size_t nbytes,
+ loff_t *pos)
+{
+ fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
+ int rc = 0;
+ rc = simple_read_from_buffer(ubuf, nbytes, pos,
+ fnic_dbg_prt->buffer,
+ fnic_dbg_prt->buffer_len);
+ return rc;
+}
+
+/*
+ * fnic_trace_debugfs_release - Release the buffer used to store
+ * debugfs file data
+ * @inode: The inode pointer
+ * @file: The file pointer that contains the buffer to release
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs
+ * file was opened.
+ *
+ * Returns:
+ * This function returns zero.
+ */
+static int fnic_trace_debugfs_release(struct inode *inode,
+ struct file *file)
+{
+ fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
+
+ vfree(fnic_dbg_prt->buffer);
+ kfree(fnic_dbg_prt);
+ return 0;
+}
+
+static const struct file_operations fnic_trace_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .open = fnic_trace_ctrl_open,
+ .read = fnic_trace_ctrl_read,
+ .write = fnic_trace_ctrl_write,
+};
+
+static const struct file_operations fnic_trace_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = fnic_trace_debugfs_open,
+ .llseek = fnic_trace_debugfs_lseek,
+ .read = fnic_trace_debugfs_read,
+ .release = fnic_trace_debugfs_release,
+};
+
+/*
+ * fnic_trace_debugfs_init - Initialize debugfs for fnic trace logging
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up the fnic debugfs
+ * file system. If not already created, this routine will create the
+ * fnic directory. It will create file trace to log fnic trace buffer
+ * output into debugfs and it will also create file trace_enable to
+ * control enable/disable of trace logging into trace buffer.
+ */
+int fnic_trace_debugfs_init(void)
+{
+ int rc = -1;
+ fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL);
+ if (!fnic_trace_debugfs_root) {
+ printk(KERN_DEBUG "Cannot create debugfs root\n");
+ return rc;
+ }
+ fnic_trace_enable = debugfs_create_file("tracing_enable",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic_trace_debugfs_root,
+ NULL, &fnic_trace_ctrl_fops);
+
+ if (!fnic_trace_enable) {
+ printk(KERN_DEBUG "Cannot create trace_enable file"
+ " under debugfs");
+ return rc;
+ }
+
+ fnic_trace_debugfs_file = debugfs_create_file("trace",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic_trace_debugfs_root,
+ NULL,
+ &fnic_trace_debugfs_fops);
+
+ if (!fnic_trace_debugfs_file) {
+ printk(KERN_DEBUG "Cannot create trace file under debugfs");
+ return rc;
+ }
+ rc = 0;
+ return rc;
+}
+
+/*
+ * fnic_trace_debugfs_terminate - Tear down debugfs infrastructure
+ *
+ * Description:
+ * When Debugfs is configured this routine removes debugfs file system
+ * elements that are specific to fnic trace logging.
+ */
+void fnic_trace_debugfs_terminate(void)
+{
+ if (fnic_trace_debugfs_file) {
+ debugfs_remove(fnic_trace_debugfs_file);
+ fnic_trace_debugfs_file = NULL;
+ }
+ if (fnic_trace_enable) {
+ debugfs_remove(fnic_trace_enable);
+ fnic_trace_enable = NULL;
+ }
+ if (fnic_trace_debugfs_root) {
+ debugfs_remove(fnic_trace_debugfs_root);
+ fnic_trace_debugfs_root = NULL;
+ }
+}
#include <scsi/fc/fc_fcp.h>
#define FNIC_DFLT_SG_DESC_CNT 32
-#define FNIC_MAX_SG_DESC_CNT 1024 /* Maximum descriptors per sgl */
+#define FNIC_MAX_SG_DESC_CNT 256 /* Maximum descriptors per sgl */
#define FNIC_SG_DESC_ALIGN 16 /* Descriptor address alignment */
struct host_sg_desc {
};
enum fnic_ioreq_state {
- FNIC_IOREQ_CMD_PENDING = 0,
+ FNIC_IOREQ_NOT_INITED = 0,
+ FNIC_IOREQ_CMD_PENDING,
FNIC_IOREQ_ABTS_PENDING,
FNIC_IOREQ_ABTS_COMPLETE,
FNIC_IOREQ_CMD_COMPLETE,
u8 sgl_type; /* device DMA descriptor list type */
u8 io_completed:1; /* set to 1 when fw completes IO */
u32 port_id; /* remote port DID */
+ unsigned long start_time; /* in jiffies */
struct completion *abts_done; /* completion for abts */
struct completion *dr_done; /* completion for device reset */
};
module_param(fnic_log_level, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
+unsigned int fnic_trace_max_pages = 16;
+module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages "
+ "for fnic trace buffer");
static struct libfc_function_template fnic_transport_template = {
.frame_send = fnic_send,
}
fnic->state = FNIC_IN_FC_MODE;
+ atomic_set(&fnic->in_flight, 0);
+ fnic->state_flags = FNIC_FLAGS_NONE;
+
/* Enable hardware stripping of vlan header on ingress */
fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);
printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
+ /* Allocate memory for trace buffer */
+ err = fnic_trace_buf_init();
+ if (err < 0) {
+ printk(KERN_ERR PFX "Trace buffer initialization Failed "
+ "Fnic Tracing utility is disabled\n");
+ fnic_trace_free();
+ }
+
/* Create a cache for allocation of default size sgls */
len = sizeof(struct fnic_dflt_sgl_list);
fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
err_create_fnic_sgl_slab_max:
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
err_create_fnic_sgl_slab_dflt:
+ fnic_trace_free();
return err;
}
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
kmem_cache_destroy(fnic_io_req_cache);
fc_release_transport(fnic_fc_transport);
+ fnic_trace_free();
}
module_init(fnic_init_module);
};
static const char *fnic_ioreq_state_str[] = {
+ [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
[FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
[FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
[FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
}
+/**
+ * __fnic_set_state_flags
+ * Sets/Clears bits in fnic's state_flags
+ **/
+void
+__fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
+ unsigned long clearbits)
+{
+ struct Scsi_Host *host = fnic->lport->host;
+ int sh_locked = spin_is_locked(host->host_lock);
+ unsigned long flags = 0;
+
+ if (!sh_locked)
+ spin_lock_irqsave(host->host_lock, flags);
+
+ if (clearbits)
+ fnic->state_flags &= ~st_flags;
+ else
+ fnic->state_flags |= st_flags;
+
+ if (!sh_locked)
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ return;
+}
+
+
/*
* fnic_fw_reset_handler
* Routine to send reset msg to fw
int ret = 0;
unsigned long flags;
+ /* indicate fwreset to io path */
+ fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
+
skb_queue_purge(&fnic->frame_queue);
skb_queue_purge(&fnic->tx_queue);
+ /* wait for io cmpl */
+ while (atomic_read(&fnic->in_flight))
+ schedule_timeout(msecs_to_jiffies(1));
+
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
if (!ret)
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Issued fw reset\n");
- else
+ else {
+ fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Failed to issue fw reset\n");
+ }
+
return ret;
}
if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "fnic_queue_wq_copy_desc failure - no descriptors\n");
return SCSI_MLQUEUE_HOST_BUSY;
}
*/
static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
{
- struct fc_lport *lp;
+ struct fc_lport *lp = shost_priv(sc->device->host);
struct fc_rport *rport;
- struct fnic_io_req *io_req;
- struct fnic *fnic;
+ struct fnic_io_req *io_req = NULL;
+ struct fnic *fnic = lport_priv(lp);
struct vnic_wq_copy *wq;
int ret;
- int sg_count;
+ u64 cmd_trace;
+ int sg_count = 0;
unsigned long flags;
unsigned long ptr;
+ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
rport = starget_to_rport(scsi_target(sc->device));
ret = fc_remote_port_chkready(rport);
if (ret) {
return 0;
}
- lp = shost_priv(sc->device->host);
if (lp->state != LPORT_ST_READY || !(lp->link_up))
return SCSI_MLQUEUE_HOST_BUSY;
+ atomic_inc(&fnic->in_flight);
+
/*
* Release host lock, use driver resource specific locks from here.
* Don't re-enable interrupts in case they were disabled prior to the
* caller disabling them.
*/
spin_unlock(lp->host->host_lock);
+ CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED;
+ CMD_FLAGS(sc) = FNIC_NO_FLAGS;
/* Get a new io_req for this SCSI IO */
- fnic = lport_priv(lp);
-
io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
if (!io_req) {
ret = SCSI_MLQUEUE_HOST_BUSY;
/* Map the data buffer */
sg_count = scsi_dma_map(sc);
if (sg_count < 0) {
+ FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
+ sc->request->tag, sc, 0, sc->cmnd[0],
+ sg_count, CMD_STATE(sc));
mempool_free(io_req, fnic->io_req_pool);
goto out;
}
/* initialize rest of io_req */
io_req->port_id = rport->port_id;
+ io_req->start_time = jiffies;
CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
CMD_SP(sc) = (char *)io_req;
+ CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
sc->scsi_done = done;
/* create copy wq desc and enqueue it */
* refetch the pointer under the lock.
*/
spinlock_t *io_lock = fnic_io_lock_hash(fnic, sc);
-
+ FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
+ sc->request->tag, sc, 0, 0, 0,
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
CMD_SP(sc) = NULL;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
}
+ } else {
+ /* REVISIT: Use per IO lock in the final code */
+ CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
}
out:
+ cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
+ (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
+ (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
+ sc->cmnd[5]);
+
+ FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
+ sc->request->tag, sc, io_req,
+ sg_count, cmd_trace,
+ (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
+ atomic_dec(&fnic->in_flight);
/* acquire host lock before returning to SCSI */
spin_lock(lp->host->host_lock);
return ret;
fnic_flush_tx(fnic);
reset_cmpl_handler_end:
+ fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
+
return ret;
}
struct vnic_wq_copy *wq;
u16 request_out = desc->u.ack.request_out;
unsigned long flags;
+ u64 *ox_id_tag = (u64 *)(void *)desc;
/* mark the ack state */
wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
fnic->fw_ack_recd[0] = 1;
}
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+ FNIC_TRACE(fnic_fcpio_ack_handler,
+ fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
+ ox_id_tag[4], ox_id_tag[5]);
}
/*
struct scsi_cmnd *sc;
unsigned long flags;
spinlock_t *io_lock;
+ u64 cmd_trace;
+ unsigned long start_time;
/* Decode the cmpl description to get the io_req id */
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
fcpio_tag_id_dec(&tag, &id);
+ icmnd_cmpl = &desc->u.icmnd_cmpl;
- if (id >= FNIC_MAX_IO_REQ)
+ if (id >= FNIC_MAX_IO_REQ) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Tag out of range tag %x hdr status = %s\n",
+ id, fnic_fcpio_status_to_str(hdr_status));
return;
+ }
sc = scsi_host_find_tag(fnic->lport->host, id);
WARN_ON_ONCE(!sc);
- if (!sc)
+ if (!sc) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "icmnd_cmpl sc is null - "
+ "hdr status = %s tag = 0x%x desc = 0x%p\n",
+ fnic_fcpio_status_to_str(hdr_status), id, desc);
+ FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
+ fnic->lport->host->host_no, id,
+ ((u64)icmnd_cmpl->_resvd0[1] << 16 |
+ (u64)icmnd_cmpl->_resvd0[0]),
+ ((u64)hdr_status << 16 |
+ (u64)icmnd_cmpl->scsi_status << 8 |
+ (u64)icmnd_cmpl->flags), desc,
+ (u64)icmnd_cmpl->residual, 0);
return;
+ }
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
WARN_ON_ONCE(!io_req);
if (!io_req) {
+ CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
spin_unlock_irqrestore(io_lock, flags);
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "icmnd_cmpl io_req is null - "
+ "hdr status = %s tag = 0x%x sc 0x%p\n",
+ fnic_fcpio_status_to_str(hdr_status), id, sc);
return;
}
+ start_time = io_req->start_time;
/* firmware completed the io */
io_req->io_completed = 1;
*/
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
+ switch (hdr_status) {
+ case FCPIO_SUCCESS:
+ CMD_FLAGS(sc) |= FNIC_IO_DONE;
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "icmnd_cmpl ABTS pending hdr status = %s "
+ "sc 0x%p scsi_status %x residual %d\n",
+ fnic_fcpio_status_to_str(hdr_status), sc,
+ icmnd_cmpl->scsi_status,
+ icmnd_cmpl->residual);
+ break;
+ case FCPIO_ABORTED:
+ CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
+ break;
+ default:
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "icmnd_cmpl abts pending "
+ "hdr status = %s tag = 0x%x sc = 0x%p\n",
+ fnic_fcpio_status_to_str(hdr_status),
+ id, sc);
+ break;
+ }
return;
}
/* Break link with the SCSI command */
CMD_SP(sc) = NULL;
+ CMD_FLAGS(sc) |= FNIC_IO_DONE;
spin_unlock_irqrestore(io_lock, flags);
mempool_free(io_req, fnic->io_req_pool);
+ cmd_trace = ((u64)hdr_status << 56) |
+ (u64)icmnd_cmpl->scsi_status << 48 |
+ (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5];
+
+ FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
+ sc->device->host->host_no, id, sc,
+ ((u64)icmnd_cmpl->_resvd0[1] << 56 |
+ (u64)icmnd_cmpl->_resvd0[0] << 48 |
+ jiffies_to_msecs(jiffies - start_time)),
+ desc, cmd_trace,
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
if (sc->sc_data_direction == DMA_FROM_DEVICE) {
fnic->lport->host_stats.fcp_input_requests++;
fnic->fcp_input_bytes += xfer_len;
/* Call SCSI completion function to complete the IO */
if (sc->scsi_done)
sc->scsi_done(sc);
-
}
/* fnic_fcpio_itmf_cmpl_handler
struct fnic_io_req *io_req;
unsigned long flags;
spinlock_t *io_lock;
+ unsigned long start_time;
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
fcpio_tag_id_dec(&tag, &id);
- if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ)
+ if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Tag out of range tag %x hdr status = %s\n",
+ id, fnic_fcpio_status_to_str(hdr_status));
return;
+ }
sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
WARN_ON_ONCE(!sc);
- if (!sc)
+ if (!sc) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
+ fnic_fcpio_status_to_str(hdr_status), id);
return;
-
+ }
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
WARN_ON_ONCE(!io_req);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "itmf_cmpl io_req is null - "
+ "hdr status = %s tag = 0x%x sc 0x%p\n",
+ fnic_fcpio_status_to_str(hdr_status), id, sc);
return;
}
+ start_time = io_req->start_time;
- if (id & FNIC_TAG_ABORT) {
+ if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
+ /* Abort and terminate completion of device reset req */
+ /* REVISIT : Add asserts about various flags */
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "dev reset abts cmpl recd. id %x status %s\n",
+ id, fnic_fcpio_status_to_str(hdr_status));
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
+ CMD_ABTS_STATUS(sc) = hdr_status;
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
+ if (io_req->abts_done)
+ complete(io_req->abts_done);
+ spin_unlock_irqrestore(io_lock, flags);
+ } else if (id & FNIC_TAG_ABORT) {
/* Completion of abort cmd */
if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
/* This is a late completion. Ignore it */
CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
CMD_ABTS_STATUS(sc) = hdr_status;
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"abts cmpl recd. id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
- if (sc->scsi_done)
+ if (sc->scsi_done) {
+ FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
+ sc->device->host->host_no, id,
+ sc,
+ jiffies_to_msecs(jiffies - start_time),
+ desc,
+ (((u64)hdr_status << 40) |
+ (u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 |
+ (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) |
+ CMD_STATE(sc)));
sc->scsi_done(sc);
+ }
}
} else if (id & FNIC_TAG_DEV_RST) {
/* Completion of device reset */
CMD_LR_STATUS(sc) = hdr_status;
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING;
+ FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
+ sc->device->host->host_no, id, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ desc, 0,
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Terminate pending "
+ "dev reset cmpl recd. id %d status %s\n",
+ (int)(id & FNIC_TAG_MASK),
+ fnic_fcpio_status_to_str(hdr_status));
+ return;
+ }
+ if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) {
+ /* Need to wait for terminate completion */
+ spin_unlock_irqrestore(io_lock, flags);
+ FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
+ sc->device->host->host_no, id, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ desc, 0,
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "dev reset cmpl recd after time out. "
+ "id %d status %s\n",
+ (int)(id & FNIC_TAG_MASK),
+ fnic_fcpio_status_to_str(hdr_status));
+ return;
+ }
CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"dev reset cmpl recd. id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
struct fcpio_fw_req *desc)
{
struct fnic *fnic = vnic_dev_priv(vdev);
- int ret = 0;
switch (desc->hdr.type) {
case FCPIO_ACK: /* fw copied copy wq desc to its queue */
case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
- ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
+ fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
break;
case FCPIO_RESET_CMPL: /* fw completed reset */
- ret = fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
+ fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
break;
default:
break;
}
- return ret;
+ return 0;
}
/*
unsigned long flags = 0;
struct scsi_cmnd *sc;
spinlock_t *io_lock;
+ unsigned long start_time = 0;
for (i = 0; i < FNIC_MAX_IO_REQ; i++) {
if (i == exclude_id)
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+ !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
+ /*
+ * We will be here only when FW completes reset
+ * without sending completions for outstanding ios.
+ */
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
+ if (io_req && io_req->dr_done)
+ complete(io_req->dr_done);
+ else if (io_req && io_req->abts_done)
+ complete(io_req->abts_done);
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
goto cleanup_scsi_cmd;
* If there is a scsi_cmnd associated with this io_req, then
* free the corresponding state
*/
+ start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
" DID_TRANSPORT_DISRUPTED\n");
/* Complete the command to SCSI */
- if (sc->scsi_done)
+ if (sc->scsi_done) {
+ FNIC_TRACE(fnic_cleanup_io,
+ sc->device->host->host_no, i, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 |
+ (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
sc->scsi_done(sc);
+ }
}
}
struct scsi_cmnd *sc;
unsigned long flags;
spinlock_t *io_lock;
+ unsigned long start_time = 0;
/* get the tag reference */
fcpio_tag_id_dec(&desc->hdr.tag, &id);
spin_unlock_irqrestore(io_lock, flags);
+ start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
" DID_NO_CONNECT\n");
- if (sc->scsi_done)
+ if (sc->scsi_done) {
+ FNIC_TRACE(fnic_wq_copy_cleanup_handler,
+ sc->device->host->host_no, id, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
sc->scsi_done(sc);
+ }
}
static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
struct fnic_io_req *io_req)
{
struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+ struct Scsi_Host *host = fnic->lport->host;
unsigned long flags;
+ spin_lock_irqsave(host->host_lock, flags);
+ if (unlikely(fnic_chk_state_flags_locked(fnic,
+ FNIC_FLAGS_IO_BLOCKED))) {
+ spin_unlock_irqrestore(host->host_lock, flags);
+ return 1;
+ } else
+ atomic_inc(&fnic->in_flight);
+ spin_unlock_irqrestore(host->host_lock, flags);
+
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
if (!vnic_wq_copy_desc_avail(wq)) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+ atomic_dec(&fnic->in_flight);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_queue_abort_io_req: failure: no descriptors\n");
return 1;
}
fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
fnic->config.ra_tov, fnic->config.ed_tov);
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+ atomic_dec(&fnic->in_flight);
+
return 0;
}
-void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
+static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
{
int tag;
+ int abt_tag;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host,
- "fnic_rport_reset_exch called portid 0x%06x\n",
+ "fnic_rport_exch_reset called portid 0x%06x\n",
port_id);
if (fnic->in_remove)
return;
for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+ abt_tag = tag;
sc = scsi_host_find_tag(fnic->lport->host, tag);
if (!sc)
continue;
continue;
}
+ if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+ (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
+ sc);
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+
/*
* Found IO that is still pending with firmware and
* belongs to rport that went away
spin_unlock_irqrestore(io_lock, flags);
continue;
}
+ if (io_req->abts_done) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "fnic_rport_exch_reset: io_req->abts_done is set "
+ "state is %s\n",
+ fnic_ioreq_state_to_str(CMD_STATE(sc)));
+ }
+
+ if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "rport_exch_reset "
+ "IO not yet issued %p tag 0x%x flags "
+ "%x state %d\n",
+ sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
+ }
old_ioreq_state = CMD_STATE(sc);
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ abt_tag = (tag | FNIC_TAG_DEV_RST);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_rport_exch_reset dev rst sc 0x%p\n",
+ sc);
+ }
BUG_ON(io_req->abts_done);
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
- if (fnic_queue_abort_io_req(fnic, tag,
+ if (fnic_queue_abort_io_req(fnic, abt_tag,
FCPIO_ITMF_ABT_TASK_TERM,
fc_lun.scsi_lun, io_req)) {
/*
* aborted later by scsi_eh, or cleaned up during
* lun reset
*/
- io_lock = fnic_io_lock_hash(fnic, sc);
-
spin_lock_irqsave(io_lock, flags);
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
CMD_STATE(sc) = old_ioreq_state;
spin_unlock_irqrestore(io_lock, flags);
+ } else {
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ else
+ CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
}
}
void fnic_terminate_rport_io(struct fc_rport *rport)
{
int tag;
+ int abt_tag;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host, "fnic_terminate_rport_io called"
- " wwpn 0x%llx, wwnn0x%llx, portid 0x%06x\n",
- rport->port_name, rport->node_name,
+ " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
+ rport->port_name, rport->node_name, rport,
rport->port_id);
if (fnic->in_remove)
return;
for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+ abt_tag = tag;
sc = scsi_host_find_tag(fnic->lport->host, tag);
if (!sc)
continue;
continue;
}
+ if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+ (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_terminate_rport_io dev rst not pending sc 0x%p\n",
+ sc);
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
/*
* Found IO that is still pending with firmware and
* belongs to rport that went away
spin_unlock_irqrestore(io_lock, flags);
continue;
}
+ if (io_req->abts_done) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "fnic_terminate_rport_io: io_req->abts_done is set "
+ "state is %s\n",
+ fnic_ioreq_state_to_str(CMD_STATE(sc)));
+ }
+ if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "fnic_terminate_rport_io "
+ "IO not yet issued %p tag 0x%x flags "
+ "%x state %d\n",
+ sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
+ }
old_ioreq_state = CMD_STATE(sc);
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ abt_tag = (tag | FNIC_TAG_DEV_RST);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
+ }
BUG_ON(io_req->abts_done);
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
- if (fnic_queue_abort_io_req(fnic, tag,
+ if (fnic_queue_abort_io_req(fnic, abt_tag,
FCPIO_ITMF_ABT_TASK_TERM,
fc_lun.scsi_lun, io_req)) {
/*
* aborted later by scsi_eh, or cleaned up during
* lun reset
*/
- io_lock = fnic_io_lock_hash(fnic, sc);
-
spin_lock_irqsave(io_lock, flags);
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
CMD_STATE(sc) = old_ioreq_state;
spin_unlock_irqrestore(io_lock, flags);
+ } else {
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ else
+ CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
}
}
{
struct fc_lport *lp;
struct fnic *fnic;
- struct fnic_io_req *io_req;
+ struct fnic_io_req *io_req = NULL;
struct fc_rport *rport;
spinlock_t *io_lock;
unsigned long flags;
+ unsigned long start_time = 0;
int ret = SUCCESS;
- u32 task_req;
+ u32 task_req = 0;
struct scsi_lun fc_lun;
+ int tag;
DECLARE_COMPLETION_ONSTACK(tm_done);
/* Wait for rport to unblock */
fnic = lport_priv(lp);
rport = starget_to_rport(scsi_target(sc->device));
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %d\n",
- rport->port_id, sc->device->lun, sc->request->tag);
+ tag = sc->request->tag;
+ FNIC_SCSI_DBG(KERN_DEBUG,
+ fnic->lport->host,
+ "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %x flags %x\n",
+ rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc));
+
+ CMD_FLAGS(sc) = FNIC_NO_FLAGS;
if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
ret = FAILED;
ret = FAILED;
goto fnic_abort_cmd_end;
}
+ if (task_req == FCPIO_ITMF_ABT_TASK)
+ CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
+ else
+ CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
/*
* We queued an abort IO, wait for its completion.
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
ret = FAILED;
goto fnic_abort_cmd_end;
}
/* fw did not complete abort, timed out */
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
ret = FAILED;
goto fnic_abort_cmd_end;
}
spin_unlock_irqrestore(io_lock, flags);
+ start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
fnic_abort_cmd_end:
+ FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
+ sc->request->tag, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "Returning from abort cmd %s\n",
+ "Returning from abort cmd type %x %s\n", task_req,
(ret == SUCCESS) ?
"SUCCESS" : "FAILED");
return ret;
struct fnic_io_req *io_req)
{
struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+ struct Scsi_Host *host = fnic->lport->host;
struct scsi_lun fc_lun;
int ret = 0;
unsigned long intr_flags;
+ spin_lock_irqsave(host->host_lock, intr_flags);
+ if (unlikely(fnic_chk_state_flags_locked(fnic,
+ FNIC_FLAGS_IO_BLOCKED))) {
+ spin_unlock_irqrestore(host->host_lock, intr_flags);
+ return FAILED;
+ } else
+ atomic_inc(&fnic->in_flight);
+ spin_unlock_irqrestore(host->host_lock, intr_flags);
+
spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
free_wq_copy_descs(fnic, wq);
if (!vnic_wq_copy_desc_avail(wq)) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "queue_dr_io_req failure - no descriptors\n");
ret = -EAGAIN;
goto lr_io_req_end;
}
lr_io_req_end:
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
+ atomic_dec(&fnic->in_flight);
return ret;
}
static int fnic_clean_pending_aborts(struct fnic *fnic,
struct scsi_cmnd *lr_sc)
{
- int tag;
+ int tag, abt_tag;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
struct scsi_lun fc_lun;
struct scsi_device *lun_dev = lr_sc->device;
DECLARE_COMPLETION_ONSTACK(tm_done);
+ enum fnic_ioreq_state old_ioreq_state;
for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
sc = scsi_host_find_tag(fnic->lport->host, tag);
"Found IO in %s on lun\n",
fnic_ioreq_state_to_str(CMD_STATE(sc)));
- BUG_ON(CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING);
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+ if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+ (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "%s dev rst not pending sc 0x%p\n", __func__,
+ sc);
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+ old_ioreq_state = CMD_STATE(sc);
+ /*
+ * Any pending IO issued prior to reset is expected to be
+ * in abts pending state, if not we need to set
+ * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
+ * When IO is completed, the IO will be handed over and
+ * handled in this function.
+ */
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+
+ if (io_req->abts_done)
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "%s: io_req->abts_done is set state is %s\n",
+ __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
+
+ BUG_ON(io_req->abts_done);
+
+ abt_tag = tag;
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ abt_tag |= FNIC_TAG_DEV_RST;
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "%s: dev rst sc 0x%p\n", __func__, sc);
+ }
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
io_req->abts_done = &tm_done;
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
- if (fnic_queue_abort_io_req(fnic, tag,
+ if (fnic_queue_abort_io_req(fnic, abt_tag,
FCPIO_ITMF_ABT_TASK_TERM,
fc_lun.scsi_lun, io_req)) {
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (io_req)
io_req->abts_done = NULL;
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
+ CMD_STATE(sc) = old_ioreq_state;
spin_unlock_irqrestore(io_lock, flags);
ret = 1;
goto clean_pending_aborts_end;
+ } else {
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
}
+ CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
wait_for_completion_timeout(&tm_done,
msecs_to_jiffies
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
- ret = 1;
- goto clean_pending_aborts_end;
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
+ continue;
}
io_req->abts_done = NULL;
/* if abort is still pending with fw, fail */
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
ret = 1;
goto clean_pending_aborts_end;
}
mempool_free(io_req, fnic->io_req_pool);
}
+ schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
+
+ /* walk again to check, if IOs are still pending in fw */
+ if (fnic_is_abts_pending(fnic, lr_sc))
+ ret = FAILED;
+
clean_pending_aborts_end:
return ret;
}
+/**
+ * fnic_scsi_host_start_tag
+ * Allocates tagid from host's tag list
+ **/
+static inline int
+fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
+{
+ struct blk_queue_tag *bqt = fnic->lport->host->bqt;
+ int tag, ret = SCSI_NO_TAG;
+
+ BUG_ON(!bqt);
+ if (!bqt) {
+ pr_err("Tags are not supported\n");
+ goto end;
+ }
+
+ do {
+ tag = find_next_zero_bit(bqt->tag_map, bqt->max_depth, 1);
+ if (tag >= bqt->max_depth) {
+ pr_err("Tag allocation failure\n");
+ goto end;
+ }
+ } while (test_and_set_bit(tag, bqt->tag_map));
+
+ bqt->tag_index[tag] = sc->request;
+ sc->request->tag = tag;
+ sc->tag = tag;
+ if (!sc->request->special)
+ sc->request->special = sc;
+
+ ret = tag;
+
+end:
+ return ret;
+}
+
+/**
+ * fnic_scsi_host_end_tag
+ * frees tag allocated by fnic_scsi_host_start_tag.
+ **/
+static inline void
+fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
+{
+ struct blk_queue_tag *bqt = fnic->lport->host->bqt;
+ int tag = sc->request->tag;
+
+ if (tag == SCSI_NO_TAG)
+ return;
+
+ BUG_ON(!bqt || !bqt->tag_index[tag]);
+ if (!bqt)
+ return;
+
+ bqt->tag_index[tag] = NULL;
+ clear_bit(tag, bqt->tag_map);
+
+ return;
+}
+
/*
* SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
* fail to get aborted. It calls driver's eh_device_reset with a SCSI command
{
struct fc_lport *lp;
struct fnic *fnic;
- struct fnic_io_req *io_req;
+ struct fnic_io_req *io_req = NULL;
struct fc_rport *rport;
int status;
int ret = FAILED;
spinlock_t *io_lock;
unsigned long flags;
+ unsigned long start_time = 0;
+ struct scsi_lun fc_lun;
+ int tag = 0;
DECLARE_COMPLETION_ONSTACK(tm_done);
+ int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
/* Wait for rport to unblock */
fc_block_scsi_eh(sc);
rport = starget_to_rport(scsi_target(sc->device));
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "Device reset called FCID 0x%x, LUN 0x%x\n",
- rport->port_id, sc->device->lun);
+ "Device reset called FCID 0x%x, LUN 0x%x sc 0x%p\n",
+ rport->port_id, sc->device->lun, sc);
if (lp->state != LPORT_ST_READY || !(lp->link_up))
goto fnic_device_reset_end;
if (fc_remote_port_chkready(rport))
goto fnic_device_reset_end;
+ CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
+ /* Allocate tag if not present */
+
+ tag = sc->request->tag;
+ if (unlikely(tag < 0)) {
+ tag = fnic_scsi_host_start_tag(fnic, sc);
+ if (unlikely(tag == SCSI_NO_TAG))
+ goto fnic_device_reset_end;
+ tag_gen_flag = 1;
+ }
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
spin_unlock_irqrestore(io_lock, flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %d\n",
- sc->request->tag);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
/*
* issue the device reset, if enqueue failed, clean up the ioreq
io_req->dr_done = NULL;
goto fnic_device_reset_clean;
}
+ spin_lock_irqsave(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
/*
* Wait on the local completion for LUN reset. The io_req may be
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "io_req is null tag 0x%x sc 0x%p\n", tag, sc);
goto fnic_device_reset_end;
}
io_req->dr_done = NULL;
status = CMD_LR_STATUS(sc);
- spin_unlock_irqrestore(io_lock, flags);
/*
* If lun reset not completed, bail out with failed. io_req
if (status == FCPIO_INVALID_CODE) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Device reset timed out\n");
- goto fnic_device_reset_end;
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
+ spin_unlock_irqrestore(io_lock, flags);
+ int_to_scsilun(sc->device->lun, &fc_lun);
+ /*
+ * Issue abort and terminate on the device reset request.
+ * If q'ing of the abort fails, retry issue it after a delay.
+ */
+ while (1) {
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) {
+ spin_unlock_irqrestore(io_lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(io_lock, flags);
+ if (fnic_queue_abort_io_req(fnic,
+ tag | FNIC_TAG_DEV_RST,
+ FCPIO_ITMF_ABT_TASK_TERM,
+ fc_lun.scsi_lun, io_req)) {
+ wait_for_completion_timeout(&tm_done,
+ msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
+ } else {
+ spin_lock_irqsave(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+ io_req->abts_done = &tm_done;
+ spin_unlock_irqrestore(io_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Abort and terminate issued on Device reset "
+ "tag 0x%x sc 0x%p\n", tag, sc);
+ break;
+ }
+ }
+ while (1) {
+ spin_lock_irqsave(io_lock, flags);
+ if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
+ spin_unlock_irqrestore(io_lock, flags);
+ wait_for_completion_timeout(&tm_done,
+ msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
+ break;
+ } else {
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req->abts_done = NULL;
+ goto fnic_device_reset_clean;
+ }
+ }
+ } else {
+ spin_unlock_irqrestore(io_lock, flags);
}
/* Completed, but not successful, clean up the io_req, return fail */
spin_unlock_irqrestore(io_lock, flags);
if (io_req) {
+ start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
}
fnic_device_reset_end:
+ FNIC_TRACE(fnic_device_reset, sc->device->host->host_no,
+ sc->request->tag, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
+ /* free tag if it is allocated */
+ if (unlikely(tag_gen_flag))
+ fnic_scsi_host_end_tag(fnic, sc);
+
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Returning from device reset %s\n",
(ret == SUCCESS) ?
DECLARE_COMPLETION_ONSTACK(remove_wait);
/* Issue firmware reset for fnic, wait for reset to complete */
+retry_fw_reset:
spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
+ /* fw reset is in progress, poll for its completion */
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ schedule_timeout(msecs_to_jiffies(100));
+ goto retry_fw_reset;
+ }
+
fnic->remove_wait = &remove_wait;
old_state = fnic->state;
fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
struct fnic *fnic = lport_priv(lp);
/* issue fw reset */
+retry_fw_reset:
spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
+ /* fw reset is in progress, poll for its completion */
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ schedule_timeout(msecs_to_jiffies(100));
+ goto retry_fw_reset;
+ }
old_state = fnic->state;
fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
fc_exch_mgr_reset(lp, sid, did);
}
+
+/*
+ * fnic_is_abts_pending() is a helper function that
+ * walks through tag map to check if there is any IOs pending,if there is one,
+ * then it returns 1 (true), otherwise 0 (false)
+ * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
+ * otherwise, it checks for all IOs.
+ */
+int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
+{
+ int tag;
+ struct fnic_io_req *io_req;
+ spinlock_t *io_lock;
+ unsigned long flags;
+ int ret = 0;
+ struct scsi_cmnd *sc;
+ struct scsi_device *lun_dev = NULL;
+
+ if (lr_sc)
+ lun_dev = lr_sc->device;
+
+ /* walk again to check, if IOs are still pending in fw */
+ for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+ sc = scsi_host_find_tag(fnic->lport->host, tag);
+ /*
+ * ignore this lun reset cmd or cmds that do not belong to
+ * this lun
+ */
+ if (!sc || (lr_sc && (sc->device != lun_dev || sc == lr_sc)))
+ continue;
+
+ io_lock = fnic_io_lock_hash(fnic, sc);
+ spin_lock_irqsave(io_lock, flags);
+
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+
+ if (!io_req || sc->device != lun_dev) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+
+ /*
+ * Found IO that is still pending with firmware and
+ * belongs to the LUN that we are resetting
+ */
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "Found IO in %s on lun\n",
+ fnic_ioreq_state_to_str(CMD_STATE(sc)));
+
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ ret = 1;
+ continue;
+ }
+ }
+
+ return ret;
+}
--- /dev/null
+/*
+ * Copyright 2012 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
+#include "fnic_io.h"
+#include "fnic.h"
+
+unsigned int trace_max_pages;
+static int fnic_max_trace_entries;
+
+static unsigned long fnic_trace_buf_p;
+static DEFINE_SPINLOCK(fnic_trace_lock);
+
+static fnic_trace_dbg_t fnic_trace_entries;
+int fnic_tracing_enabled = 1;
+
+/*
+ * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information
+ *
+ * Description:
+ * This routine gets next available trace buffer entry location @wr_idx
+ * from allocated trace buffer pages and give that memory location
+ * to user to store the trace information.
+ *
+ * Return Value:
+ * This routine returns pointer to next available trace entry
+ * @fnic_buf_head for user to fill trace information.
+ */
+fnic_trace_data_t *fnic_trace_get_buf(void)
+{
+ unsigned long fnic_buf_head;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic_trace_lock, flags);
+
+ /*
+ * Get next available memory location for writing trace information
+ * at @wr_idx and increment @wr_idx
+ */
+ fnic_buf_head =
+ fnic_trace_entries.page_offset[fnic_trace_entries.wr_idx];
+ fnic_trace_entries.wr_idx++;
+
+ /*
+ * Verify if trace buffer is full then change wd_idx to
+ * start from zero
+ */
+ if (fnic_trace_entries.wr_idx >= fnic_max_trace_entries)
+ fnic_trace_entries.wr_idx = 0;
+
+ /*
+ * Verify if write index @wr_idx and read index @rd_idx are same then
+ * increment @rd_idx to move to next entry in trace buffer
+ */
+ if (fnic_trace_entries.wr_idx == fnic_trace_entries.rd_idx) {
+ fnic_trace_entries.rd_idx++;
+ if (fnic_trace_entries.rd_idx >= fnic_max_trace_entries)
+ fnic_trace_entries.rd_idx = 0;
+ }
+ spin_unlock_irqrestore(&fnic_trace_lock, flags);
+ return (fnic_trace_data_t *)fnic_buf_head;
+}
+
+/*
+ * fnic_get_trace_data - Copy trace buffer to a memory file
+ * @fnic_dbgfs_t: pointer to debugfs trace buffer
+ *
+ * Description:
+ * This routine gathers the fnic trace debugfs data from the fnic_trace_data_t
+ * buffer and dumps it to fnic_dbgfs_t. It will start at the rd_idx entry in
+ * the log and process the log until the end of the buffer. Then it will gather
+ * from the beginning of the log and process until the current entry @wr_idx.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into fnic_dbgfs_t
+ */
+int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
+{
+ int rd_idx;
+ int wr_idx;
+ int len = 0;
+ unsigned long flags;
+ char str[KSYM_SYMBOL_LEN];
+ struct timespec val;
+ fnic_trace_data_t *tbp;
+
+ spin_lock_irqsave(&fnic_trace_lock, flags);
+ rd_idx = fnic_trace_entries.rd_idx;
+ wr_idx = fnic_trace_entries.wr_idx;
+ if (wr_idx < rd_idx) {
+ while (1) {
+ /* Start from read index @rd_idx */
+ tbp = (fnic_trace_data_t *)
+ fnic_trace_entries.page_offset[rd_idx];
+ if (!tbp) {
+ spin_unlock_irqrestore(&fnic_trace_lock, flags);
+ return 0;
+ }
+ /* Convert function pointer to function name */
+ if (sizeof(unsigned long) < 8) {
+ sprint_symbol(str, tbp->fnaddr.low);
+ jiffies_to_timespec(tbp->timestamp.low, &val);
+ } else {
+ sprint_symbol(str, tbp->fnaddr.val);
+ jiffies_to_timespec(tbp->timestamp.val, &val);
+ }
+ /*
+ * Dump trace buffer entry to memory file
+ * and increment read index @rd_idx
+ */
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ (trace_max_pages * PAGE_SIZE * 3) - len,
+ "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
+ "%16llx %16llx %16llx\n", val.tv_sec,
+ val.tv_nsec, str, tbp->host_no, tbp->tag,
+ tbp->data[0], tbp->data[1], tbp->data[2],
+ tbp->data[3], tbp->data[4]);
+ rd_idx++;
+ /*
+ * If rd_idx is reached to maximum trace entries
+ * then move rd_idx to zero
+ */
+ if (rd_idx > (fnic_max_trace_entries-1))
+ rd_idx = 0;
+ /*
+ * Continure dumpping trace buffer entries into
+ * memory file till rd_idx reaches write index
+ */
+ if (rd_idx == wr_idx)
+ break;
+ }
+ } else if (wr_idx > rd_idx) {
+ while (1) {
+ /* Start from read index @rd_idx */
+ tbp = (fnic_trace_data_t *)
+ fnic_trace_entries.page_offset[rd_idx];
+ if (!tbp) {
+ spin_unlock_irqrestore(&fnic_trace_lock, flags);
+ return 0;
+ }
+ /* Convert function pointer to function name */
+ if (sizeof(unsigned long) < 8) {
+ sprint_symbol(str, tbp->fnaddr.low);
+ jiffies_to_timespec(tbp->timestamp.low, &val);
+ } else {
+ sprint_symbol(str, tbp->fnaddr.val);
+ jiffies_to_timespec(tbp->timestamp.val, &val);
+ }
+ /*
+ * Dump trace buffer entry to memory file
+ * and increment read index @rd_idx
+ */
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ (trace_max_pages * PAGE_SIZE * 3) - len,
+ "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
+ "%16llx %16llx %16llx\n", val.tv_sec,
+ val.tv_nsec, str, tbp->host_no, tbp->tag,
+ tbp->data[0], tbp->data[1], tbp->data[2],
+ tbp->data[3], tbp->data[4]);
+ rd_idx++;
+ /*
+ * Continue dumpping trace buffer entries into
+ * memory file till rd_idx reaches write index
+ */
+ if (rd_idx == wr_idx)
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&fnic_trace_lock, flags);
+ return len;
+}
+
+/*
+ * fnic_trace_buf_init - Initialize fnic trace buffer logging facility
+ *
+ * Description:
+ * Initialize trace buffer data structure by allocating required memory and
+ * setting page_offset information for every trace entry by adding trace entry
+ * length to previous page_offset value.
+ */
+int fnic_trace_buf_init(void)
+{
+ unsigned long fnic_buf_head;
+ int i;
+ int err = 0;
+
+ trace_max_pages = fnic_trace_max_pages;
+ fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/
+ FNIC_ENTRY_SIZE_BYTES;
+
+ fnic_trace_buf_p = (unsigned long)vmalloc((trace_max_pages * PAGE_SIZE));
+ if (!fnic_trace_buf_p) {
+ printk(KERN_ERR PFX "Failed to allocate memory "
+ "for fnic_trace_buf_p\n");
+ err = -ENOMEM;
+ goto err_fnic_trace_buf_init;
+ }
+ memset((void *)fnic_trace_buf_p, 0, (trace_max_pages * PAGE_SIZE));
+
+ fnic_trace_entries.page_offset = vmalloc(fnic_max_trace_entries *
+ sizeof(unsigned long));
+ if (!fnic_trace_entries.page_offset) {
+ printk(KERN_ERR PFX "Failed to allocate memory for"
+ " page_offset\n");
+ if (fnic_trace_buf_p) {
+ vfree((void *)fnic_trace_buf_p);
+ fnic_trace_buf_p = 0;
+ }
+ err = -ENOMEM;
+ goto err_fnic_trace_buf_init;
+ }
+ memset((void *)fnic_trace_entries.page_offset, 0,
+ (fnic_max_trace_entries * sizeof(unsigned long)));
+ fnic_trace_entries.wr_idx = fnic_trace_entries.rd_idx = 0;
+ fnic_buf_head = fnic_trace_buf_p;
+
+ /*
+ * Set page_offset field of fnic_trace_entries struct by
+ * calculating memory location for every trace entry using
+ * length of each trace entry
+ */
+ for (i = 0; i < fnic_max_trace_entries; i++) {
+ fnic_trace_entries.page_offset[i] = fnic_buf_head;
+ fnic_buf_head += FNIC_ENTRY_SIZE_BYTES;
+ }
+ err = fnic_trace_debugfs_init();
+ if (err < 0) {
+ printk(KERN_ERR PFX "Failed to initialize debugfs for tracing\n");
+ goto err_fnic_trace_debugfs_init;
+ }
+ printk(KERN_INFO PFX "Successfully Initialized Trace Buffer\n");
+ return err;
+err_fnic_trace_debugfs_init:
+ fnic_trace_free();
+err_fnic_trace_buf_init:
+ return err;
+}
+
+/*
+ * fnic_trace_free - Free memory of fnic trace data structures.
+ */
+void fnic_trace_free(void)
+{
+ fnic_tracing_enabled = 0;
+ fnic_trace_debugfs_terminate();
+ if (fnic_trace_entries.page_offset) {
+ vfree((void *)fnic_trace_entries.page_offset);
+ fnic_trace_entries.page_offset = NULL;
+ }
+ if (fnic_trace_buf_p) {
+ vfree((void *)fnic_trace_buf_p);
+ fnic_trace_buf_p = 0;
+ }
+ printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n");
+}
--- /dev/null
+/*
+ * Copyright 2012 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __FNIC_TRACE_H__
+#define __FNIC_TRACE_H__
+
+#define FNIC_ENTRY_SIZE_BYTES 64
+
+extern ssize_t simple_read_from_buffer(void __user *to,
+ size_t count,
+ loff_t *ppos,
+ const void *from,
+ size_t available);
+
+extern unsigned int fnic_trace_max_pages;
+extern int fnic_tracing_enabled;
+extern unsigned int trace_max_pages;
+
+typedef struct fnic_trace_dbg {
+ int wr_idx;
+ int rd_idx;
+ unsigned long *page_offset;
+} fnic_trace_dbg_t;
+
+typedef struct fnic_dbgfs {
+ int buffer_len;
+ char *buffer;
+} fnic_dbgfs_t;
+
+struct fnic_trace_data {
+ union {
+ struct {
+ u32 low;
+ u32 high;
+ };
+ u64 val;
+ } timestamp, fnaddr;
+ u32 host_no;
+ u32 tag;
+ u64 data[5];
+} __attribute__((__packed__));
+
+typedef struct fnic_trace_data fnic_trace_data_t;
+
+#define FNIC_TRACE_ENTRY_SIZE \
+ (FNIC_ENTRY_SIZE_BYTES - sizeof(fnic_trace_data_t))
+
+#define FNIC_TRACE(_fn, _hn, _t, _a, _b, _c, _d, _e) \
+ if (unlikely(fnic_tracing_enabled)) { \
+ fnic_trace_data_t *trace_buf = fnic_trace_get_buf(); \
+ if (trace_buf) { \
+ if (sizeof(unsigned long) < 8) { \
+ trace_buf->timestamp.low = jiffies; \
+ trace_buf->fnaddr.low = (u32)(unsigned long)_fn; \
+ } else { \
+ trace_buf->timestamp.val = jiffies; \
+ trace_buf->fnaddr.val = (u64)(unsigned long)_fn; \
+ } \
+ trace_buf->host_no = _hn; \
+ trace_buf->tag = _t; \
+ trace_buf->data[0] = (u64)(unsigned long)_a; \
+ trace_buf->data[1] = (u64)(unsigned long)_b; \
+ trace_buf->data[2] = (u64)(unsigned long)_c; \
+ trace_buf->data[3] = (u64)(unsigned long)_d; \
+ trace_buf->data[4] = (u64)(unsigned long)_e; \
+ } \
+ }
+
+fnic_trace_data_t *fnic_trace_get_buf(void);
+int fnic_get_trace_data(fnic_dbgfs_t *);
+int fnic_trace_buf_init(void);
+void fnic_trace_free(void);
+int fnic_trace_debugfs_init(void);
+void fnic_trace_debugfs_terminate(void);
+
+#endif
static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
static struct CommandList *cmd_alloc(struct ctlr_info *h);
static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
-static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
+static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
int cmd_type);
return -ENOMEM;
}
-static void hpsa_map_sg_chain_block(struct ctlr_info *h,
+static int hpsa_map_sg_chain_block(struct ctlr_info *h,
struct CommandList *c)
{
struct SGDescriptor *chain_sg, *chain_block;
(c->Header.SGTotal - h->max_cmd_sg_entries);
temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&h->pdev->dev, temp64)) {
+ /* prevent subsequent unmapping */
+ chain_sg->Addr.lower = 0;
+ chain_sg->Addr.upper = 0;
+ return -1;
+ }
chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
+ return 0;
}
static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
}
}
-static void hpsa_map_one(struct pci_dev *pdev,
+static int hpsa_map_one(struct pci_dev *pdev,
struct CommandList *cp,
unsigned char *buf,
size_t buflen,
if (buflen == 0 || data_direction == PCI_DMA_NONE) {
cp->Header.SGList = 0;
cp->Header.SGTotal = 0;
- return;
+ return 0;
}
addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
+ if (dma_mapping_error(&pdev->dev, addr64)) {
+ /* Prevent subsequent unmap of something never mapped */
+ cp->Header.SGList = 0;
+ cp->Header.SGTotal = 0;
+ return -1;
+ }
cp->SG[0].Addr.lower =
(u32) (addr64 & (u64) 0x00000000FFFFFFFF);
cp->SG[0].Addr.upper =
cp->SG[0].Len = buflen;
cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
+ return 0;
}
static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
return -ENOMEM;
}
- fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
+ if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
+ page, scsi3addr, TYPE_CMD)) {
+ rc = -1;
+ goto out;
+ }
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
ei = c->err_info;
if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
hpsa_scsi_interpret_error(c);
rc = -1;
}
+out:
cmd_special_free(h, c);
return rc;
}
return -ENOMEM;
}
- fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
+ /* fill_cmd can't fail here, no data buffer to map. */
+ (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h,
+ NULL, 0, 0, scsi3addr, TYPE_MSG);
hpsa_scsi_do_simple_cmd_core(h, c);
/* no unmap needed here because no data xfer. */
}
/* address the controller */
memset(scsi3addr, 0, sizeof(scsi3addr));
- fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
- buf, bufsize, 0, scsi3addr, TYPE_CMD);
+ if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
+ buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
+ rc = -1;
+ goto out;
+ }
if (extended_response)
c->Request.CDB[1] = extended_response;
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
hpsa_scsi_interpret_error(c);
rc = -1;
}
+out:
cmd_special_free(h, c);
return rc;
}
if (chained) {
cp->Header.SGList = h->max_cmd_sg_entries;
cp->Header.SGTotal = (u16) (use_sg + 1);
- hpsa_map_sg_chain_block(h, cp);
+ if (hpsa_map_sg_chain_block(h, cp)) {
+ scsi_dma_unmap(cmd);
+ return -1;
+ }
return 0;
}
if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
waittime = waittime * 2;
- /* Send the Test Unit Ready */
- fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
+ /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
+ (void) fill_cmd(c, TEST_UNIT_READY, h,
+ NULL, 0, 0, lunaddr, TYPE_CMD);
hpsa_scsi_do_simple_cmd_core(h, c);
/* no unmap needed here because no data xfer. */
return -ENOMEM;
}
- fill_cmd(c, HPSA_ABORT_MSG, h, abort, 0, 0, scsi3addr, TYPE_MSG);
+ /* fill_cmd can't fail here, no buffer to map */
+ (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort,
+ 0, 0, scsi3addr, TYPE_MSG);
if (swizzle)
swizzle_abort_tag(&c->Request.CDB[4]);
hpsa_scsi_do_simple_cmd_core(h, c);
struct CommandList *c;
char *buff = NULL;
union u64bit temp64;
+ int rc = 0;
if (!argp)
return -EINVAL;
/* Copy the data into the buffer we created */
if (copy_from_user(buff, iocommand.buf,
iocommand.buf_size)) {
- kfree(buff);
- return -EFAULT;
+ rc = -EFAULT;
+ goto out_kfree;
}
} else {
memset(buff, 0, iocommand.buf_size);
}
c = cmd_special_alloc(h);
if (c == NULL) {
- kfree(buff);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out_kfree;
}
/* Fill in the command type */
c->cmd_type = CMD_IOCTL_PEND;
if (iocommand.buf_size > 0) {
temp64.val = pci_map_single(h->pdev, buff,
iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
+ c->SG[0].Addr.lower = 0;
+ c->SG[0].Addr.upper = 0;
+ c->SG[0].Len = 0;
+ rc = -ENOMEM;
+ goto out;
+ }
c->SG[0].Addr.lower = temp64.val32.lower;
c->SG[0].Addr.upper = temp64.val32.upper;
c->SG[0].Len = iocommand.buf_size;
memcpy(&iocommand.error_info, c->err_info,
sizeof(iocommand.error_info));
if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
- kfree(buff);
- cmd_special_free(h, c);
- return -EFAULT;
+ rc = -EFAULT;
+ goto out;
}
if (iocommand.Request.Type.Direction == XFER_READ &&
iocommand.buf_size > 0) {
/* Copy the data out of the buffer we created */
if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
- kfree(buff);
- cmd_special_free(h, c);
- return -EFAULT;
+ rc = -EFAULT;
+ goto out;
}
}
- kfree(buff);
+out:
cmd_special_free(h, c);
- return 0;
+out_kfree:
+ kfree(buff);
+ return rc;
}
static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
for (i = 0; i < sg_used; i++) {
temp64.val = pci_map_single(h->pdev, buff[i],
buff_size[i], PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
+ c->SG[i].Addr.lower = 0;
+ c->SG[i].Addr.upper = 0;
+ c->SG[i].Len = 0;
+ hpsa_pci_unmap(h->pdev, c, i,
+ PCI_DMA_BIDIRECTIONAL);
+ status = -ENOMEM;
+ goto cleanup1;
+ }
c->SG[i].Addr.lower = temp64.val32.lower;
c->SG[i].Addr.upper = temp64.val32.upper;
c->SG[i].Len = buff_size[i];
c = cmd_alloc(h);
if (!c)
return -ENOMEM;
- fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
+ /* fill_cmd can't fail here, no data buffer to map */
+ (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
RAID_CTLR_LUNID, TYPE_MSG);
c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
c->waiting = NULL;
return 0;
}
-static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
+static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
int cmd_type)
{
default:
dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
BUG();
- return;
+ return -1;
}
} else if (cmd_type == TYPE_MSG) {
switch (cmd) {
default:
pci_dir = PCI_DMA_BIDIRECTIONAL;
}
-
- hpsa_map_one(h->pdev, c, buff, size, pci_dir);
-
- return;
+ if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
+ return -1;
+ return 0;
}
/*
dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
goto out_of_memory;
}
- fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
- RAID_CTLR_LUNID, TYPE_CMD);
+ if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
+ RAID_CTLR_LUNID, TYPE_CMD)) {
+ goto out;
+ }
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
if (c->err_info->CommandStatus != 0)
+out:
dev_warn(&h->pdev->dev,
"error flushing cache on controller\n");
cmd_special_free(h, c);
* We have told the host to stop giving us new requests, but
* ERP ops don't count. FIXME
*/
- if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead)) {
+ if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
return SCSI_MLQUEUE_HOST_BUSY;
}
* FIXME - Create scsi_set_host_offline interface
* and the ioa_is_dead check can be removed
*/
- if (unlikely(hrrq->ioa_is_dead || !res)) {
+ if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
goto err_nodev;
}
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ENTER;
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
+ ipr_trace;
+ spin_unlock_irq(ioa_cfg->host->host_lock);
+ scsi_unblock_requests(ioa_cfg->host);
+ spin_lock_irq(ioa_cfg->host->host_lock);
+ }
+
ioa_cfg->in_reset_reload = 0;
ioa_cfg->reset_retries = 0;
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
wake_up_all(&ioa_cfg->reset_wait_q);
-
- spin_unlock_irq(ioa_cfg->host->host_lock);
- scsi_unblock_requests(ioa_cfg->host);
- spin_lock_irq(ioa_cfg->host->host_lock);
LEAVE;
return IPR_RC_JOB_RETURN;
spin_unlock(&ioa_cfg->hrrq[i]._lock);
}
wmb();
- scsi_block_requests(ioa_cfg->host);
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
+ scsi_block_requests(ioa_cfg->host);
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
ioa_cfg->reset_cmd = ipr_cmd;
ipr_fail_all_ops(ioa_cfg);
wake_up_all(&ioa_cfg->reset_wait_q);
- spin_unlock_irq(ioa_cfg->host->host_lock);
- scsi_unblock_requests(ioa_cfg->host);
- spin_lock_irq(ioa_cfg->host->host_lock);
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
+ spin_unlock_irq(ioa_cfg->host->host_lock);
+ scsi_unblock_requests(ioa_cfg->host);
+ spin_lock_irq(ioa_cfg->host->host_lock);
+ }
return;
} else {
ioa_cfg->in_ioa_bringdown = 1;
{
unsigned long host_lock_flags = 0;
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+ int i;
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
}
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].removing_ioa = 1;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
u8 allow_interrupts:1;
u8 ioa_is_dead:1;
u8 allow_cmds:1;
+ u8 removing_ioa:1;
struct blk_iopoll iopoll;
};
fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
- if (fsp->state & FC_SRB_RCV_STATUS)
- fc_fcp_complete_locked(fsp);
- else if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
+ if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
fc_fcp_rec(fsp);
+ else if (fsp->state & FC_SRB_RCV_STATUS)
+ fc_fcp_complete_locked(fsp);
else
fc_fcp_recovery(fsp, FC_TIMED_OUT);
fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
#define FC_LIBFC_DBG(fmt, args...) \
FC_CHECK_LOGGING(FC_LIBFC_LOGGING, \
- printk(KERN_INFO "libfc: " fmt, ##args))
+ pr_info("libfc: " fmt, ##args))
#define FC_LPORT_DBG(lport, fmt, args...) \
FC_CHECK_LOGGING(FC_LPORT_LOGGING, \
- printk(KERN_INFO "host%u: lport %6.6x: " fmt, \
- (lport)->host->host_no, \
- (lport)->port_id, ##args))
+ pr_info("host%u: lport %6.6x: " fmt, \
+ (lport)->host->host_no, \
+ (lport)->port_id, ##args))
-#define FC_DISC_DBG(disc, fmt, args...) \
- FC_CHECK_LOGGING(FC_DISC_LOGGING, \
- printk(KERN_INFO "host%u: disc: " fmt, \
- fc_disc_lport(disc)->host->host_no, \
- ##args))
+#define FC_DISC_DBG(disc, fmt, args...) \
+ FC_CHECK_LOGGING(FC_DISC_LOGGING, \
+ pr_info("host%u: disc: " fmt, \
+ fc_disc_lport(disc)->host->host_no, \
+ ##args))
#define FC_RPORT_ID_DBG(lport, port_id, fmt, args...) \
FC_CHECK_LOGGING(FC_RPORT_LOGGING, \
- printk(KERN_INFO "host%u: rport %6.6x: " fmt, \
- (lport)->host->host_no, \
- (port_id), ##args))
+ pr_info("host%u: rport %6.6x: " fmt, \
+ (lport)->host->host_no, \
+ (port_id), ##args))
#define FC_RPORT_DBG(rdata, fmt, args...) \
FC_RPORT_ID_DBG((rdata)->local_port, (rdata)->ids.port_id, fmt, ##args)
if ((pkt)->seq_ptr) { \
struct fc_exch *_ep = NULL; \
_ep = fc_seq_exch((pkt)->seq_ptr); \
- printk(KERN_INFO "host%u: fcp: %6.6x: " \
+ pr_info("host%u: fcp: %6.6x: " \
"xid %04x-%04x: " fmt, \
(pkt)->lp->host->host_no, \
(pkt)->rport->port_id, \
(_ep)->oxid, (_ep)->rxid, ##args); \
} else { \
- printk(KERN_INFO "host%u: fcp: %6.6x: " fmt, \
+ pr_info("host%u: fcp: %6.6x: " fmt, \
(pkt)->lp->host->host_no, \
(pkt)->rport->port_id, ##args); \
} \
#define FC_EXCH_DBG(exch, fmt, args...) \
FC_CHECK_LOGGING(FC_EXCH_LOGGING, \
- printk(KERN_INFO "host%u: xid %4x: " fmt, \
- (exch)->lp->host->host_no, \
- exch->xid, ##args))
+ pr_info("host%u: xid %4x: " fmt, \
+ (exch)->lp->host->host_no, \
+ exch->xid, ##args))
#define FC_SCSI_DBG(lport, fmt, args...) \
FC_CHECK_LOGGING(FC_SCSI_LOGGING, \
- printk(KERN_INFO "host%u: scsi: " fmt, \
- (lport)->host->host_no, ##args))
+ pr_info("host%u: scsi: " fmt, \
+ (lport)->host->host_no, ##args))
/*
* FC-4 Providers.
static void fc_rport_error_retry(struct fc_rport_priv *rdata,
struct fc_frame *fp)
{
- unsigned long delay = FC_DEF_E_D_TOV;
+ unsigned long delay = msecs_to_jiffies(FC_DEF_E_D_TOV);
/* make sure this isn't an FC_EX_CLOSED error, never retry those */
if (PTR_ERR(fp) == -FC_EX_CLOSED)
lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
uint32_t flag)
{
- MAILBOX_t *mb;
+ MAILBOX_t *mbx;
struct lpfc_sli *psli = &phba->sli;
uint32_t status, evtctr;
uint32_t ha_copy, hc_copy;
psli = &phba->sli;
- mb = &pmbox->u.mb;
+ mbx = &pmbox->u.mb;
status = MBX_SUCCESS;
if (phba->link_state == LPFC_HBA_ERROR) {
goto out_not_finished;
}
- if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
+ if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
!(hc_copy & HC_MBINT_ENA)) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
"(%d):0308 Mbox cmd issue - BUSY Data: "
"x%x x%x x%x x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0xffffff,
- mb->mbxCommand, phba->pport->port_state,
+ mbx->mbxCommand, phba->pport->port_state,
psli->sli_flag, flag);
psli->slistat.mbox_busy++;
lpfc_debugfs_disc_trc(pmbox->vport,
LPFC_DISC_TRC_MBOX_VPORT,
"MBOX Bsy vport: cmd:x%x mb:x%x x%x",
- (uint32_t)mb->mbxCommand,
- mb->un.varWords[0], mb->un.varWords[1]);
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
}
else {
lpfc_debugfs_disc_trc(phba->pport,
LPFC_DISC_TRC_MBOX,
"MBOX Bsy: cmd:x%x mb:x%x x%x",
- (uint32_t)mb->mbxCommand,
- mb->un.varWords[0], mb->un.varWords[1]);
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
}
return MBX_BUSY;
/* If we are not polling, we MUST be in SLI2 mode */
if (flag != MBX_POLL) {
if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
- (mb->mbxCommand != MBX_KILL_BOARD)) {
+ (mbx->mbxCommand != MBX_KILL_BOARD)) {
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
"(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
"x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0,
- mb->mbxCommand, phba->pport->port_state,
+ mbx->mbxCommand, phba->pport->port_state,
psli->sli_flag, flag);
- if (mb->mbxCommand != MBX_HEARTBEAT) {
+ if (mbx->mbxCommand != MBX_HEARTBEAT) {
if (pmbox->vport) {
lpfc_debugfs_disc_trc(pmbox->vport,
LPFC_DISC_TRC_MBOX_VPORT,
"MBOX Send vport: cmd:x%x mb:x%x x%x",
- (uint32_t)mb->mbxCommand,
- mb->un.varWords[0], mb->un.varWords[1]);
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
}
else {
lpfc_debugfs_disc_trc(phba->pport,
LPFC_DISC_TRC_MBOX,
"MBOX Send: cmd:x%x mb:x%x x%x",
- (uint32_t)mb->mbxCommand,
- mb->un.varWords[0], mb->un.varWords[1]);
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
}
}
evtctr = psli->slistat.mbox_event;
/* next set own bit for the adapter and copy over command word */
- mb->mbxOwner = OWN_CHIP;
+ mbx->mbxOwner = OWN_CHIP;
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
/* Populate mbox extension offset word. */
if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
- *(((uint32_t *)mb) + pmbox->mbox_offset_word)
+ *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
= (uint8_t *)phba->mbox_ext
- (uint8_t *)phba->mbox;
}
pmbox->in_ext_byte_len);
}
/* Copy command data to host SLIM area */
- lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
+ lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
} else {
/* Populate mbox extension offset word. */
if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
- *(((uint32_t *)mb) + pmbox->mbox_offset_word)
+ *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
= MAILBOX_HBA_EXT_OFFSET;
/* Copy the mailbox extension data */
pmbox->context2, pmbox->in_ext_byte_len);
}
- if (mb->mbxCommand == MBX_CONFIG_PORT) {
+ if (mbx->mbxCommand == MBX_CONFIG_PORT) {
/* copy command data into host mbox for cmpl */
- lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
+ lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
}
/* First copy mbox command data to HBA SLIM, skip past first
word */
to_slim = phba->MBslimaddr + sizeof (uint32_t);
- lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
+ lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
MAILBOX_CMD_SIZE - sizeof (uint32_t));
/* Next copy over first word, with mbxOwner set */
- ldata = *((uint32_t *)mb);
+ ldata = *((uint32_t *)mbx);
to_slim = phba->MBslimaddr;
writel(ldata, to_slim);
readl(to_slim); /* flush */
- if (mb->mbxCommand == MBX_CONFIG_PORT) {
+ if (mbx->mbxCommand == MBX_CONFIG_PORT) {
/* switch over to host mailbox */
psli->sli_flag |= LPFC_SLI_ACTIVE;
}
/* First copy command data */
word0 = *((uint32_t *)phba->mbox);
word0 = le32_to_cpu(word0);
- if (mb->mbxCommand == MBX_CONFIG_PORT) {
+ if (mbx->mbxCommand == MBX_CONFIG_PORT) {
MAILBOX_t *slimmb;
uint32_t slimword0;
/* Check real SLIM for any errors */
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
/* copy results back to user */
- lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
+ lpfc_sli_pcimem_bcopy(phba->mbox, mbx, MAILBOX_CMD_SIZE);
/* Copy the mailbox extension data */
if (pmbox->out_ext_byte_len && pmbox->context2) {
lpfc_sli_pcimem_bcopy(phba->mbox_ext,
}
} else {
/* First copy command data */
- lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
+ lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
MAILBOX_CMD_SIZE);
/* Copy the mailbox extension data */
if (pmbox->out_ext_byte_len && pmbox->context2) {
readl(phba->HAregaddr); /* flush */
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
- status = mb->mbxStatus;
+ status = mbx->mbxStatus;
}
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "06.504.01.00-rc1"
-#define MEGASAS_RELDATE "Oct. 1, 2012"
-#define MEGASAS_EXT_VERSION "Mon. Oct. 1 17:00:00 PDT 2012"
+#define MEGASAS_VERSION "06.506.00.00-rc1"
+#define MEGASAS_RELDATE "Feb. 9, 2013"
+#define MEGASAS_EXT_VERSION "Sat. Feb. 9 17:00:00 PDT 2013"
/*
* Device IDs
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : v06.504.01.00-rc1
+ * Version : v06.506.00.00-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
}
io_request->Control |= (0x4 << 26);
- io_request->EEDPBlockSize = MEGASAS_EEDPBLOCKSIZE;
+ io_request->EEDPBlockSize = scp->device->sector_size;
} else {
/* Some drives don't support 16/12 byte CDB's, convert to 10 */
if (((cdb_len == 12) || (cdb_len == 16)) &&
if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS &&
instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
io_request->Function = 0;
- io_request->DevHandle =
+ if (fusion->fast_path_io)
+ io_request->DevHandle =
local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
io_request->RaidContext.timeoutValue =
local_map_ptr->raidMap.fpPdIoTimeoutSec;
#define MEGASAS_SCSI_ADDL_CDB_LEN 0x18
#define MEGASAS_RD_WR_PROTECT_CHECK_ALL 0x20
#define MEGASAS_RD_WR_PROTECT_CHECK_NONE 0x60
-#define MEGASAS_EEDPBLOCKSIZE 512
/*
* Raid context flags
printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
MPT2SAS_INTEL_RMS25KB040_BRANDING);
break;
+ case MPT2SAS_INTEL_RMS25LB040_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25LB040_BRANDING);
+ break;
+ case MPT2SAS_INTEL_RMS25LB080_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25LB080_BRANDING);
+ break;
default:
break;
}
"Intel(R) Integrated RAID Module RMS25KB080"
#define MPT2SAS_INTEL_RMS25KB040_BRANDING \
"Intel(R) Integrated RAID Module RMS25KB040"
+#define MPT2SAS_INTEL_RMS25LB040_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25LB040"
+#define MPT2SAS_INTEL_RMS25LB080_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25LB080"
#define MPT2SAS_INTEL_RMS2LL080_BRANDING \
"Intel Integrated RAID Module RMS2LL080"
#define MPT2SAS_INTEL_RMS2LL040_BRANDING \
#define MPT2SAS_INTEL_RMS25JB040_SSDID 0x3517
#define MPT2SAS_INTEL_RMS25KB080_SSDID 0x3518
#define MPT2SAS_INTEL_RMS25KB040_SSDID 0x3519
+#define MPT2SAS_INTEL_RMS25LB040_SSDID 0x351A
+#define MPT2SAS_INTEL_RMS25LB080_SSDID 0x351B
#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E
#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F
#define MPT2SAS_INTEL_RS25GB008_SSDID 0x3000
struct mvs_task_exec_info *tei)
{
int elem, rc, i;
+ struct sas_ha_struct *sha = mvi->sas;
struct sas_task *task = tei->task;
struct mvs_cmd_hdr *hdr = tei->hdr;
struct domain_device *dev = task->dev;
struct asd_sas_port *sas_port = dev->port;
+ struct sas_phy *sphy = dev->phy;
+ struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
struct scatterlist *sg_req, *sg_resp;
u32 req_len, resp_len, tag = tei->tag;
void *buf_tmp;
slot->tx = mvi->tx_prod;
mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
TXQ_MODE_I | tag |
- (sas_port->phy_mask << TXQ_PHY_SHIFT));
+ (MVS_PHY_ID << TXQ_PHY_SHIFT));
hdr->flags |= flags;
hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
static int mvs_task_prep_ata(struct mvs_info *mvi,
struct mvs_task_exec_info *tei)
{
+ struct sas_ha_struct *sha = mvi->sas;
struct sas_task *task = tei->task;
struct domain_device *dev = task->dev;
struct mvs_device *mvi_dev = dev->lldd_dev;
struct mvs_cmd_hdr *hdr = tei->hdr;
struct asd_sas_port *sas_port = dev->port;
+ struct sas_phy *sphy = dev->phy;
+ struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
struct mvs_slot_info *slot;
void *buf_prd;
u32 tag = tei->tag, hdr_tag;
slot->tx = mvi->tx_prod;
del_q = TXQ_MODE_I | tag |
(TXQ_CMD_STP << TXQ_CMD_SHIFT) |
- (sas_port->phy_mask << TXQ_PHY_SHIFT) |
+ (MVS_PHY_ID << TXQ_PHY_SHIFT) |
(mvi_dev->taskfileset << TXQ_SRS_SHIFT);
mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
(__mc) != 0 ; \
(++__lseq), (__mc) >>= 1)
+#define MVS_PHY_ID (1U << sas_phy->id)
#define MV_INIT_DELAYED_WORK(w, f, d) INIT_DELAYED_WORK(w, f)
#define UNASSOC_D2H_FIS(id) \
((void *) mvi->rx_fis + 0x100 * id)
odi->osdname_len = get_attrs[a].len;
/* Avoid NULL for memcmp optimization 0-length is good enough */
odi->osdname = kzalloc(odi->osdname_len + 1, GFP_KERNEL);
+ if (!odi->osdname) {
+ ret = -ENOMEM;
+ goto out;
+ }
if (odi->osdname_len)
memcpy(odi->osdname, get_attrs[a].val_ptr, odi->osdname_len);
OSD_INFO("OSD_NAME [%s]\n", odi->osdname);
for (i = 0; i < USI_MAX_MEMCNT; i++) {
if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
pci_free_consistent(pm8001_ha->pdev,
- pm8001_ha->memoryMap.region[i].element_size,
+ (pm8001_ha->memoryMap.region[i].total_len +
+ pm8001_ha->memoryMap.region[i].alignment),
pm8001_ha->memoryMap.region[i].virt_ptr,
pm8001_ha->memoryMap.region[i].phys_addr);
}
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
struct device_attribute *attr, char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- int rval = QLA_FUNCTION_FAILED;
- uint16_t temp, frac;
+ uint16_t temp = 0;
- if (!vha->hw->flags.thermal_supported)
- return snprintf(buf, PAGE_SIZE, "\n");
+ if (!vha->hw->thermal_support) {
+ ql_log(ql_log_warn, vha, 0x70db,
+ "Thermal not supported by this card.\n");
+ goto done;
+ }
- temp = frac = 0;
- if (qla2x00_reset_active(vha))
- ql_log(ql_log_warn, vha, 0x707b,
- "ISP reset active.\n");
- else if (!vha->hw->flags.eeh_busy)
- rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
- if (rval != QLA_SUCCESS)
- return snprintf(buf, PAGE_SIZE, "\n");
+ if (qla2x00_reset_active(vha)) {
+ ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
+ goto done;
+ }
+
+ if (vha->hw->flags.eeh_busy) {
+ ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
+ goto done;
+ }
+
+ if (qla2x00_get_thermal_temp(vha, &temp) == QLA_SUCCESS)
+ return snprintf(buf, PAGE_SIZE, "%d\n", temp);
- return snprintf(buf, PAGE_SIZE, "%d.%02d\n", temp, frac);
+done:
+ return snprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t
qla2x00_bsg_sp_free(void *data, void *ptr)
{
srb_t *sp = (srb_t *)ptr;
- struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+ struct scsi_qla_host *vha = sp->fcport->vha;
struct fc_bsg_job *bsg_job = sp->u.bsg_job;
struct qla_hw_data *ha = vha->hw;
if (sp->type == SRB_CT_CMD ||
sp->type == SRB_ELS_CMD_HST)
kfree(sp->fcport);
- mempool_free(sp, vha->hw->srb_mempool);
+ qla2x00_rel_sp(vha, sp);
}
int
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x700e,
"qla2x00_start_sp failed = %d\n", rval);
- mempool_free(sp, ha->srb_mempool);
+ qla2x00_rel_sp(vha, sp);
rval = -EIO;
goto done_unmap_sg;
}
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7017,
"qla2x00_start_sp failed=%d.\n", rval);
- mempool_free(sp, ha->srb_mempool);
+ qla2x00_rel_sp(vha, sp);
rval = -EIO;
goto done_free_fcport;
}
done:
return rval;
}
+
+/* Disable loopback mode */
+static inline int
+qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
+ int wait, int wait2)
+{
+ int ret = 0;
+ int rval = 0;
+ uint16_t new_config[4];
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+ goto done_reset_internal;
+
+ memset(new_config, 0 , sizeof(new_config));
+ if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
+ ENABLE_INTERNAL_LOOPBACK ||
+ (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
+ ENABLE_EXTERNAL_LOOPBACK) {
+ new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
+ ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
+ (new_config[0] & INTERNAL_LOOPBACK_MASK));
+ memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
+
+ ha->notify_dcbx_comp = wait;
+ ha->notify_lb_portup_comp = wait2;
+
+ ret = qla81xx_set_port_config(vha, new_config);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x7025,
+ "Set port config failed.\n");
+ ha->notify_dcbx_comp = 0;
+ ha->notify_lb_portup_comp = 0;
+ rval = -EINVAL;
+ goto done_reset_internal;
+ }
+
+ /* Wait for DCBX complete event */
+ if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
+ (DCBX_COMP_TIMEOUT * HZ))) {
+ ql_dbg(ql_dbg_user, vha, 0x7026,
+ "DCBX completion not received.\n");
+ ha->notify_dcbx_comp = 0;
+ ha->notify_lb_portup_comp = 0;
+ rval = -EINVAL;
+ goto done_reset_internal;
+ } else
+ ql_dbg(ql_dbg_user, vha, 0x7027,
+ "DCBX completion received.\n");
+
+ if (wait2 &&
+ !wait_for_completion_timeout(&ha->lb_portup_comp,
+ (LB_PORTUP_COMP_TIMEOUT * HZ))) {
+ ql_dbg(ql_dbg_user, vha, 0x70c5,
+ "Port up completion not received.\n");
+ ha->notify_lb_portup_comp = 0;
+ rval = -EINVAL;
+ goto done_reset_internal;
+ } else
+ ql_dbg(ql_dbg_user, vha, 0x70c6,
+ "Port up completion received.\n");
+
+ ha->notify_dcbx_comp = 0;
+ ha->notify_lb_portup_comp = 0;
+ }
+done_reset_internal:
+ return rval;
+}
+
/*
* Set the port configuration to enable the internal or external loopback
* depending on the loopback mode.
}
/* Wait for DCBX complete event */
- if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
+ if (!wait_for_completion_timeout(&ha->dcbx_comp,
+ (DCBX_COMP_TIMEOUT * HZ))) {
ql_dbg(ql_dbg_user, vha, 0x7022,
- "State change notification not received.\n");
+ "DCBX completion not received.\n");
+ ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
+ /*
+ * If the reset of the loopback mode doesn't work take a FCoE
+ * dump and reset the chip.
+ */
+ if (ret) {
+ ha->isp_ops->fw_dump(vha, 0);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ }
rval = -EINVAL;
} else {
if (ha->flags.idc_compl_status) {
ha->flags.idc_compl_status = 0;
} else
ql_dbg(ql_dbg_user, vha, 0x7023,
- "State change received.\n");
+ "DCBX completion received.\n");
}
ha->notify_dcbx_comp = 0;
return rval;
}
-/* Disable loopback mode */
-static inline int
-qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
- int wait)
-{
- int ret = 0;
- int rval = 0;
- uint16_t new_config[4];
- struct qla_hw_data *ha = vha->hw;
-
- if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
- goto done_reset_internal;
-
- memset(new_config, 0 , sizeof(new_config));
- if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
- ENABLE_INTERNAL_LOOPBACK ||
- (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
- ENABLE_EXTERNAL_LOOPBACK) {
- new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
- ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
- (new_config[0] & INTERNAL_LOOPBACK_MASK));
- memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
-
- ha->notify_dcbx_comp = wait;
- ret = qla81xx_set_port_config(vha, new_config);
- if (ret != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x7025,
- "Set port config failed.\n");
- ha->notify_dcbx_comp = 0;
- rval = -EINVAL;
- goto done_reset_internal;
- }
-
- /* Wait for DCBX complete event */
- if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
- (20 * HZ))) {
- ql_dbg(ql_dbg_user, vha, 0x7026,
- "State change notification not received.\n");
- ha->notify_dcbx_comp = 0;
- rval = -EINVAL;
- goto done_reset_internal;
- } else
- ql_dbg(ql_dbg_user, vha, 0x7027,
- "State change received.\n");
-
- ha->notify_dcbx_comp = 0;
- }
-done_reset_internal:
- return rval;
-}
-
static int
qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
{
if (IS_QLA81XX(ha) || IS_QLA8031(ha)) {
memset(config, 0, sizeof(config));
memset(new_config, 0, sizeof(new_config));
+
if (qla81xx_get_port_config(vha, config)) {
ql_log(ql_log_warn, vha, 0x701f,
"Get port config failed.\n");
goto done_free_dma_rsp;
}
+ if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
+ ql_dbg(ql_dbg_user, vha, 0x70c4,
+ "Loopback operation already in "
+ "progress.\n");
+ rval = -EAGAIN;
+ goto done_free_dma_rsp;
+ }
+
ql_dbg(ql_dbg_user, vha, 0x70c0,
"elreq.options=%04x\n", elreq.options);
config, new_config, elreq.options);
else
rval = qla81xx_reset_loopback_mode(vha,
- config, 1);
+ config, 1, 0);
else
rval = qla81xx_set_loopback_mode(vha, config,
new_config, elreq.options);
command_sent = INT_DEF_LB_LOOPBACK_CMD;
rval = qla2x00_loopback_test(vha, &elreq, response);
- if (new_config[0]) {
- /* Revert back to original port config
- * Also clear internal loopback
- */
- qla81xx_reset_loopback_mode(vha,
- new_config, 0);
- }
-
if (response[0] == MBS_COMMAND_ERROR &&
response[1] == MBS_LB_RESET) {
ql_log(ql_log_warn, vha, 0x7029,
qla2xxx_wake_dpc(vha);
qla2x00_wait_for_chip_reset(vha);
/* Also reset the MPI */
- if (qla81xx_restart_mpi_firmware(vha) !=
- QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x702a,
- "MPI reset failed.\n");
+ if (IS_QLA81XX(ha)) {
+ if (qla81xx_restart_mpi_firmware(vha) !=
+ QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x702a,
+ "MPI reset failed.\n");
+ }
}
rval = -EIO;
goto done_free_dma_rsp;
}
+
+ if (new_config[0]) {
+ int ret;
+
+ /* Revert back to original port config
+ * Also clear internal loopback
+ */
+ ret = qla81xx_reset_loopback_mode(vha,
+ new_config, 0, 1);
+ if (ret) {
+ /*
+ * If the reset of the loopback mode
+ * doesn't work take FCoE dump and then
+ * reset the chip.
+ */
+ ha->isp_ops->fw_dump(vha, 0);
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ }
+
+ }
+
} else {
type = "FC_BSG_HST_VENDOR_LOOPBACK";
ql_dbg(ql_dbg_user, vha, 0x702b,
if (!req)
continue;
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
if (((sp->type == SRB_CT_CMD) ||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (bsg_job->request->msgcode == FC_BSG_HST_CT)
kfree(sp->fcport);
- mempool_free(sp, ha->srb_mempool);
+ qla2x00_rel_sp(vha, sp);
return 0;
}
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x0125 | 0x4b,0xba,0xfa |
- * | Mailbox commands | 0x114f | 0x111a-0x111b |
+ * | Module Init and Probe | 0x0126 | 0x4b,0xba,0xfa |
+ * | Mailbox commands | 0x115b | 0x111a-0x111b |
* | | | 0x112c-0x112e |
* | | | 0x113a |
* | Device Discovery | 0x2087 | 0x2020-0x2022, |
* | | | 0x2016 |
- * | Queue Command and IO tracing | 0x3030 | 0x3006-0x300b |
+ * | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x302d-0x302e |
* | DPC Thread | 0x401d | 0x4002,0x4013 |
* | Async Events | 0x5071 | 0x502b-0x502f |
* | | | 0x5047,0x5052 |
* | Timer Routines | 0x6011 | |
- * | User Space Interactions | 0x70c3 | 0x7018,0x702e, |
+ * | User Space Interactions | 0x70c4 | 0x7018,0x702e, |
+ * | | | 0x7020,0x7024, |
* | | | 0x7039,0x7045, |
* | | | 0x7073-0x7075, |
* | | | 0x708c, |
* | | | 0x800b,0x8039 |
* | AER/EEH | 0x9011 | |
* | Virtual Port | 0xa007 | |
- * | ISP82XX Specific | 0xb084 | 0xb002,0xb024 |
+ * | ISP82XX Specific | 0xb086 | 0xb002,0xb024 |
* | MultiQ | 0xc00c | |
* | Misc | 0xd010 | |
- * | Target Mode | 0xe06f | |
- * | Target Mode Management | 0xf071 | |
+ * | Target Mode | 0xe070 | |
+ * | Target Mode Management | 0xf072 | |
* | Target Mode Task Management | 0x1000b | |
* ----------------------------------------------------------------------
*/
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_nx.h"
#define QLA2XXX_DRIVER_NAME "qla2xxx"
#define QLA2XXX_APIDEV "ql2xapidev"
+#define QLA2XXX_MANUFACTURER "QLogic Corporation"
/*
* We have MAILBOX_REGISTER_COUNT sized arrays in a few places,
#define LOOP_DOWN_TIME 255 /* 240 */
#define LOOP_DOWN_RESET (LOOP_DOWN_TIME - 30)
-/* Maximum outstanding commands in ISP queues (1-65535) */
-#define MAX_OUTSTANDING_COMMANDS 1024
+#define DEFAULT_OUTSTANDING_COMMANDS 1024
+#define MIN_OUTSTANDING_COMMANDS 128
/* ISP request and response entry counts (37-65535) */
#define REQUEST_ENTRY_CNT_2100 128 /* Number of request entries. */
uint32_t req_q_out;
uint32_t rsp_q_in;
uint32_t rsp_q_out;
+ uint32_t atio_q_in;
+ uint32_t atio_q_out;
};
typedef union {
&(reg)->u.isp2100.mailbox5 : \
&(reg)->u.isp2300.rsp_q_out)
+#define ISP_ATIO_Q_IN(vha) (vha->hw->tgt.atio_q_in)
+#define ISP_ATIO_Q_OUT(vha) (vha->hw->tgt.atio_q_out)
+
#define MAILBOX_REG(ha, reg, num) \
(IS_QLA2100(ha) || IS_QLA2200(ha) ? \
(num < 8 ? \
#define MBC_PORT_LOGOUT 0x56 /* Port Logout request */
#define MBC_SEND_RNID_ELS 0x57 /* Send RNID ELS request */
#define MBC_SET_RNID_PARAMS 0x59 /* Set RNID parameters */
-#define MBC_GET_RNID_PARAMS 0x5a /* Data Rate */
-#define MBC_DATA_RATE 0x5d /* Get RNID parameters */
+#define MBC_GET_RNID_PARAMS 0x5a /* Get RNID parameters */
+#define MBC_DATA_RATE 0x5d /* Data Rate */
#define MBC_INITIALIZE_FIRMWARE 0x60 /* Initialize firmware */
#define MBC_INITIATE_LIP 0x62 /* Initiate Loop */
/* Initialization Procedure */
#define MBC_HOST_MEMORY_COPY 0x53 /* Host Memory Copy. */
#define MBC_SEND_RNFT_ELS 0x5e /* Send RNFT ELS request */
#define MBC_GET_LINK_PRIV_STATS 0x6d /* Get link & private data. */
+#define MBC_LINK_INITIALIZATION 0x72 /* Do link initialization. */
#define MBC_SET_VENDOR_ID 0x76 /* Set Vendor ID. */
#define MBC_PORT_RESET 0x120 /* Port Reset */
#define MBC_SET_PORT_CONFIG 0x122 /* Set port configuration */
#define MBX_1 BIT_1
#define MBX_0 BIT_0
+#define RNID_TYPE_SET_VERSION 0x9
+#define RNID_TYPE_ASIC_TEMP 0xC
+
/*
* Firmware state codes from get firmware state mailbox command
*/
uint8_t scan_state;
} fc_port_t;
-#define QLA_FCPORT_SCAN_NONE 0
-#define QLA_FCPORT_SCAN_FOUND 1
-
/*
* Fibre channel port/lun states.
*/
uint16_t qos;
uint16_t vp_idx;
struct rsp_que *rsp;
- srb_t *outstanding_cmds[MAX_OUTSTANDING_COMMANDS];
+ srb_t **outstanding_cmds;
uint32_t current_outstanding_cmd;
+ uint16_t num_outstanding_cmds;
+#define MAX_Q_DEPTH 32
int max_q_depth;
};
struct atio *atio_ring_ptr; /* Current address. */
uint16_t atio_ring_index; /* Current index. */
uint16_t atio_q_length;
+ uint32_t __iomem *atio_q_in;
+ uint32_t __iomem *atio_q_out;
void *target_lport_ptr;
struct qla_tgt_func_tmpl *tgt_ops;
struct qla_tgt *qla_tgt;
- struct qla_tgt_cmd *cmds[MAX_OUTSTANDING_COMMANDS];
+ struct qla_tgt_cmd *cmds[DEFAULT_OUTSTANDING_COMMANDS];
uint16_t current_handle;
struct qla_tgt_vp_map *tgt_vp_map;
uint32_t nic_core_hung:1;
uint32_t quiesce_owner:1;
- uint32_t thermal_supported:1;
uint32_t nic_core_reset_hdlr_active:1;
uint32_t nic_core_reset_owner:1;
uint32_t isp82xx_no_md_cap:1;
#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha))
#define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
(((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
+#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha))
+#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length)
/* HBA serial number */
uint8_t serial0;
struct completion mbx_cmd_comp; /* Serialize mbx access */
struct completion mbx_intr_comp; /* Used for completion notification */
struct completion dcbx_comp; /* For set port config notification */
+ struct completion lb_portup_comp; /* Used to wait for link up during
+ * loopback */
+#define DCBX_COMP_TIMEOUT 20
+#define LB_PORTUP_COMP_TIMEOUT 10
+
int notify_dcbx_comp;
+ int notify_lb_portup_comp;
struct mutex selflogin_lock;
/* Basic firmware related information. */
#define RISC_START_ADDRESS_2300 0x800
#define RISC_START_ADDRESS_2400 0x100000
uint16_t fw_xcb_count;
+ uint16_t fw_iocb_count;
uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */
uint8_t fw_seriallink_options[4];
struct work_struct idc_state_handler;
struct work_struct nic_core_unrecoverable;
+#define HOST_QUEUE_RAMPDOWN_INTERVAL (60 * HZ)
+#define HOST_QUEUE_RAMPUP_INTERVAL (30 * HZ)
+ unsigned long host_last_rampdown_time;
+ unsigned long host_last_rampup_time;
+ int cfg_lun_q_depth;
+
struct qlt_hw_data tgt;
+ uint16_t thermal_support;
+#define THERMAL_SUPPORT_I2C BIT_0
+#define THERMAL_SUPPORT_ISP BIT_1
};
/*
#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */
#define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */
#define SCR_PENDING 21 /* SCR in target mode */
+#define HOST_RAMP_DOWN_QUEUE_DEPTH 22
+#define HOST_RAMP_UP_QUEUE_DEPTH 23
uint32_t device_flags;
#define SWITCH_FOUND BIT_0
#define NVRAM_DELAY() udelay(10)
-#define INVALID_HANDLE (MAX_OUTSTANDING_COMMANDS+1)
-
/*
* Flash support definitions
*/
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
uint32_t prio_request_q_address[2];
uint16_t msix;
- uint8_t reserved_2[6];
+ uint16_t msix_atio;
+ uint8_t reserved_2[4];
uint16_t atio_q_inpointer;
uint16_t atio_q_length;
#define FLT_REG_FCP_PRIO_0 0x87
#define FLT_REG_FCP_PRIO_1 0x88
#define FLT_REG_FCOE_FW 0xA4
-#define FLT_REG_FCOE_VPD_0 0xA9
#define FLT_REG_FCOE_NVRAM_0 0xAA
-#define FLT_REG_FCOE_VPD_1 0xAB
#define FLT_REG_FCOE_NVRAM_1 0xAC
struct qla_flt_region {
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *);
extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *);
-extern int qla2x00_get_thermal_temp(scsi_qla_host_t *, uint16_t *, uint16_t *);
+extern int qla2x00_get_thermal_temp(scsi_qla_host_t *, uint16_t *);
extern void qla84xx_put_chip(struct scsi_qla_host *);
extern void qla83xx_reset_ownership(scsi_qla_host_t *);
extern int qla2xxx_mctp_dump(scsi_qla_host_t *);
+extern int
+qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
+
/*
* Global Data in qla_os.c source file.
*/
extern int ql2xplogiabsentdevice;
extern int ql2xloginretrycount;
extern int ql2xfdmienable;
+extern int ql2xmaxqdepth;
extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
extern int ql2xiidmaenable;
extern int
qla2x00_get_port_name(scsi_qla_host_t *, uint16_t, uint8_t *, uint8_t);
+extern int
+qla24xx_link_initialize(scsi_qla_host_t *);
+
extern int
qla2x00_lip_reset(scsi_qla_host_t *);
extern int
qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *);
+extern int
+qla2x00_set_driver_version(scsi_qla_host_t *, char *);
+
extern int
qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
uint16_t, uint16_t, uint16_t, uint16_t);
uint32_t);
extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
uint32_t);
+extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t);
extern int qla2x00_beacon_on(struct scsi_qla_host *);
extern int qla2x00_beacon_off(struct scsi_qla_host *);
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
/* Manufacturer. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_MANUFACTURER);
- strcpy(eiter->a.manufacturer, "QLogic Corporation");
- alen = strlen(eiter->a.manufacturer);
+ alen = strlen(QLA2XXX_MANUFACTURER);
+ strncpy(eiter->a.manufacturer, QLA2XXX_MANUFACTURER, alen + 1);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
/* OS device name. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
- strcpy(eiter->a.os_dev_name, QLA2XXX_DRIVER_NAME);
- alen = strlen(eiter->a.os_dev_name);
+ alen = strlen(QLA2XXX_DRIVER_NAME);
+ strncpy(eiter->a.os_dev_name, QLA2XXX_DRIVER_NAME, alen + 1);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
del_timer(&iocb->timer);
- mempool_free(sp, vha->hw->srb_mempool);
-
- QLA_VHA_MARK_NOT_BUSY(vha);
+ qla2x00_rel_sp(vha, sp);
}
/* Asynchronous Login/Logout Routines -------------------------------------- */
vha->flags.reset_active = 0;
ha->flags.pci_channel_io_perm_failure = 0;
ha->flags.eeh_busy = 0;
- ha->flags.thermal_supported = 1;
+ ha->thermal_support = THERMAL_SUPPORT_I2C|THERMAL_SUPPORT_ISP;
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
atomic_set(&vha->loop_state, LOOP_DOWN);
vha->device_flags = DFLG_NO_CABLE;
if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
qla24xx_read_fcp_prio_cfg(vha);
+ qla2x00_set_driver_version(vha, QLA2XXX_VERSION);
+
return (rval);
}
return rval;
}
+int
+qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
+{
+ /* Don't try to reallocate the array */
+ if (req->outstanding_cmds)
+ return QLA_SUCCESS;
+
+ if (!IS_FWI2_CAPABLE(ha) || (ha->mqiobase &&
+ (ql2xmultique_tag || ql2xmaxqueues > 1)))
+ req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
+ else {
+ if (ha->fw_xcb_count <= ha->fw_iocb_count)
+ req->num_outstanding_cmds = ha->fw_xcb_count;
+ else
+ req->num_outstanding_cmds = ha->fw_iocb_count;
+ }
+
+ req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
+ req->num_outstanding_cmds, GFP_KERNEL);
+
+ if (!req->outstanding_cmds) {
+ /*
+ * Try to allocate a minimal size just so we can get through
+ * initialization.
+ */
+ req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
+ req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
+ req->num_outstanding_cmds, GFP_KERNEL);
+
+ if (!req->outstanding_cmds) {
+ ql_log(ql_log_fatal, NULL, 0x0126,
+ "Failed to allocate memory for "
+ "outstanding_cmds for req_que %p.\n", req);
+ req->num_outstanding_cmds = 0;
+ return QLA_FUNCTION_FAILED;
+ }
+ }
+
+ return QLA_SUCCESS;
+}
+
/**
* qla2x00_setup_chip() - Load and start RISC firmware.
* @ha: HA context
MIN_MULTI_ID_FABRIC - 1;
}
qla2x00_get_resource_cnts(vha, NULL,
- &ha->fw_xcb_count, NULL, NULL,
+ &ha->fw_xcb_count, NULL, &ha->fw_iocb_count,
&ha->max_npiv_vports, NULL);
+ /*
+ * Allocate the array of outstanding commands
+ * now that we know the firmware resources.
+ */
+ rval = qla2x00_alloc_outstanding_cmds(ha,
+ vha->req);
+ if (rval != QLA_SUCCESS)
+ goto failed;
+
if (!fw_major_version && ql2xallocfwdump
&& !IS_QLA82XX(ha))
qla2x00_alloc_fw_dump(vha);
WRT_REG_DWORD(®->isp24.rsp_q_in, 0);
WRT_REG_DWORD(®->isp24.rsp_q_out, 0);
}
- qlt_24xx_config_rings(vha, reg);
+ qlt_24xx_config_rings(vha);
/* PCI posting */
RD_REG_DWORD(&ioreg->hccr);
req = ha->req_q_map[que];
if (!req)
continue;
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
req->outstanding_cmds[cnt] = NULL;
req->current_outstanding_cmd = 1;
char connect_type[22];
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
/* Get host addresses. */
rval = qla2x00_get_adapter_id(vha,
} else {
ql_log(ql_log_warn, vha, 0x2009,
"Unable to get host loop ID.\n");
+ if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
+ (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
+ ql_log(ql_log_warn, vha, 0x1151,
+ "Doing link init.\n");
+ if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
+ return rval;
+ }
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
}
return (rval);
fcport->loop_id = FC_NO_LOOP_ID;
qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
fcport->supported_classes = FC_COS_UNSPECIFIED;
- fcport->scan_state = QLA_FCPORT_SCAN_NONE;
return fcport;
}
qla2x00_configure_fabric(scsi_qla_host_t *vha)
{
int rval;
- fc_port_t *fcport;
+ fc_port_t *fcport, *fcptemp;
uint16_t next_loopid;
uint16_t mb[MAILBOX_REGISTER_COUNT];
uint16_t loop_id;
0xfc, mb, BIT_1|BIT_0);
if (rval != QLA_SUCCESS) {
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- break;
+ return rval;
}
if (mb[0] != MBS_COMMAND_COMPLETE) {
ql_dbg(ql_dbg_disc, vha, 0x2042,
}
}
+#define QLA_FCPORT_SCAN 1
+#define QLA_FCPORT_FOUND 2
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ }
+
rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
if (rval != QLA_SUCCESS)
break;
- /* Add new ports to existing port list */
- list_splice_tail_init(&new_fcports, &vha->vp_fcports);
-
- /* Starting free loop ID. */
- next_loopid = ha->min_external_loopid;
-
+ /*
+ * Logout all previous fabric devices marked lost, except
+ * FCP2 devices.
+ */
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
continue;
- /* Logout lost/gone fabric devices (non-FCP2) */
- if (fcport->scan_state != QLA_FCPORT_SCAN_FOUND &&
+ if (fcport->scan_state == QLA_FCPORT_SCAN &&
atomic_read(&fcport->state) == FCS_ONLINE) {
qla2x00_mark_device_lost(vha, fcport,
ql2xplogiabsentdevice, 0);
fcport->d_id.b.domain,
fcport->d_id.b.area,
fcport->d_id.b.al_pa);
+ fcport->loop_id = FC_NO_LOOP_ID;
}
- continue;
}
- fcport->scan_state = QLA_FCPORT_SCAN_NONE;
-
- /* Login fabric devices that need a login */
- if ((fcport->flags & FCF_LOGIN_NEEDED) != 0 &&
- atomic_read(&vha->loop_down_timer) == 0) {
- if (fcport->loop_id == FC_NO_LOOP_ID) {
- fcport->loop_id = next_loopid;
- rval = qla2x00_find_new_loop_id(
- base_vha, fcport);
- if (rval != QLA_SUCCESS) {
- /* Ran out of IDs to use */
- continue;
- }
+ }
+
+ /* Starting free loop ID. */
+ next_loopid = ha->min_external_loopid;
+
+ /*
+ * Scan through our port list and login entries that need to be
+ * logged in.
+ */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (atomic_read(&vha->loop_down_timer) ||
+ test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
+
+ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
+ (fcport->flags & FCF_LOGIN_NEEDED) == 0)
+ continue;
+
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ fcport->loop_id = next_loopid;
+ rval = qla2x00_find_new_loop_id(
+ base_vha, fcport);
+ if (rval != QLA_SUCCESS) {
+ /* Ran out of IDs to use */
+ break;
&n