diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile --- a/drivers/scsi/lpfc/Makefile +++ b/drivers/scsi/lpfc/Makefile @@ -24,6 +24,15 @@ ifneq ($(GCOV),) EXTRA_CFLAGS += -O0 endif +EXTRA_CFLAGS += -DENABLE_DISABLE_PORT \ + -DCONFIG_LPFC_DEBUG_FS \ + -DENABLE_BG \ + -DENABLE_BG_SELECT_MODE_OF_OP \ + -DENABLE_BG_DBG \ + -DENABLE_BG_DBG_DIF_ERR_INJECT \ + -DENABLE_BG_DBG_DUMP_DIF_DATA_ON_ERR \ + -DENABLE_BG_DBG_TRACE_CMD + obj-$(CONFIG_SCSI_LPFC) := lpfc.o lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \ diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -29,8 +29,15 @@ struct lpfc_sli2_slim; #define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact the NameServer before giving up. */ #define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */ +#ifdef ENABLE_BG +#define LPFC_DEFAULT_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ +#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */ +#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ +#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/ +#else #define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */ #define LPFC_MAX_SG_SEG_CNT 256 /* sg element count per scsi cmnd */ +#endif #define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ #define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ #define LPFC_VNAME_LEN 100 /* vport symbolic name length */ @@ -411,6 +418,9 @@ struct lpfc_hba { #define LPFC_SLI3_HBQ_ENABLED 0x02 #define LPFC_SLI3_NPIV_ENABLED 0x04 #define LPFC_SLI3_VPORT_TEARDOWN 0x08 +#ifdef ENABLE_BG +#define LPFC_SLI3_BG_ENABLED 0x10 +#endif uint32_t iocb_cmd_size; uint32_t iocb_rsp_size; @@ -474,12 +484,18 @@ struct lpfc_hba { uint32_t cfg_poll_tmo; uint32_t cfg_use_msi; uint32_t cfg_sg_seg_cnt; +#ifdef ENABLE_BG + uint32_t cfg_prot_sg_seg_cnt; +#endif uint32_t cfg_sg_dma_buf_size; uint64_t cfg_soft_wwnn; uint64_t cfg_soft_wwpn; uint32_t cfg_hba_queue_depth; uint32_t cfg_enable_hba_reset; uint32_t cfg_enable_hba_heartbeat; +#ifdef ENABLE_BG + uint32_t cfg_enable_bg; +#endif lpfc_vpd_t vpd; /* vital product data */ @@ -543,6 +559,12 @@ struct lpfc_hba { uint64_t fc4InputRequests; uint64_t fc4OutputRequests; uint64_t fc4ControlRequests; +#ifdef ENABLE_BG + uint64_t bg_guard_err_cnt; + uint64_t bg_apptag_err_cnt; + uint64_t bg_reftag_err_cnt; +#endif + struct lpfc_sysfs_mbox sysfs_mbox; @@ -590,6 +612,21 @@ struct lpfc_hba { struct dentry *debug_hbqinfo; struct dentry *debug_dumpHostSlim; struct dentry *debug_dumpHBASlim; +#ifdef ENABLE_BG +#ifdef ENABLE_BG_DBG +#ifdef ENABLE_BG_DBG_DUMP_DIF_DATA_ON_ERR + struct dentry *debug_dumpData; /* BlockGuard BPL*/ + struct dentry *debug_dumpDif; /* BlockGuard BPL*/ +#endif +#ifdef ENABLE_BG_DBG_DIF_ERR_INJECT + struct dentry *debug_writeGuard; /* inject write guard_tag errors*/ + struct dentry *debug_writeApp; /* inject write app_tag errors*/ + struct dentry *debug_writeRef; /* inject write ref_tag errors*/ + struct dentry *debug_readApp; /* inject read app_tag errors*/ + struct dentry *debug_readRef; /* inject read ref_tag errors*/ +#endif +#endif +#endif struct dentry *debug_slow_ring_trc; struct lpfc_debugfs_trc *slow_ring_trc; atomic_t slow_ring_trc_cnt; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -23,6 +23,10 @@ #include #include #include +#ifdef ENABLE_STOP_RESUME +#include +#include +#endif #include #include @@ -49,6 +53,74 @@ #define LPFC_LINK_SPEED_BITMAP 0x00000117 #define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8" +#ifdef ENABLE_STOP_RESUME +extern wait_queue_head_t jtag_wait; +extern int jtag_continue_test; + + +static struct proc_dir_entry *proc_scsi; +static ssize_t proc_scsi_write(struct file *file, const char __user *buf, + size_t length, loff_t *ppos) +{ + printk(KERN_ERR "BLKGRD dbg: About to wake-up probe\n"); + jtag_continue_test = 1; + wake_up(&jtag_wait); + return 1; +} + +static int proc_scsi_show(struct seq_file *s, void *p) +{ + seq_printf(s, "Write 1 to this file when JTAG is ready\n"); + return 0; +} + +static int proc_scsi_open(struct inode *inode, struct file *file) +{ + /* + * We don't really needs this for the write case but it doesn't + * harm either. + */ + return single_open(file, proc_scsi_show, NULL); +} + +static const struct file_operations proc_scsi_operations = { + .open = proc_scsi_open, + .read = seq_read, + .write = proc_scsi_write, + .llseek = seq_lseek, + .release = single_release, +}; + +int +lpfc_create_proc(void) +{ + struct proc_dir_entry *pde; + + proc_scsi = proc_mkdir("lpfc", NULL); + if (!proc_scsi) + goto err1; + + pde = create_proc_entry("lpfc/jtag_ready", 0, NULL); + if (!pde) + goto err2; + pde->proc_fops = &proc_scsi_operations; + + return 0; + +err2: + remove_proc_entry("lpfc", NULL); +err1: + return -ENOMEM; +} + +void +lpfc_remove_proc(void) +{ + remove_proc_entry("lpfc/jtag_ready", NULL); + remove_proc_entry("lpfc", NULL); +} +#endif + static void lpfc_jedec_to_ascii(int incr, char hdw[]) { @@ -71,6 +143,62 @@ lpfc_drvr_version_show(struct device *de { return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n"); } + +#ifdef ENABLE_BG +static ssize_t +lpfc_bg_info_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + if (phba->cfg_enable_bg) { + if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { + return snprintf(buf, PAGE_SIZE, "BlockGuard Enabled\n"); + } else { + return snprintf(buf, PAGE_SIZE, + "BlockGuard Not Supported\n"); + } + } else { + return snprintf(buf, PAGE_SIZE, + "BlockGuard Disabled\n"); + } +} + +static ssize_t +lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + return snprintf(buf, PAGE_SIZE, "%llu\n",phba->bg_guard_err_cnt); +} + +static ssize_t +lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + return snprintf(buf, PAGE_SIZE, "%llu\n",phba->bg_apptag_err_cnt); +} + +static ssize_t +lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + return snprintf(buf, PAGE_SIZE, "%llu\n",phba->bg_reftag_err_cnt); +} +#endif static ssize_t lpfc_info_show(struct device *dev, struct device_attribute *attr, @@ -928,6 +1056,12 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ lpfc_##name##_show, lpfc_##name##_store) +#ifdef ENABLE_BG +static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL); +static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL); +static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL); +static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL); +#endif static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL); static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL); static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL); @@ -1647,6 +1781,64 @@ LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, " */ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat."); +#ifdef ENABLE_BG +/* +# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF) +# 0 = BlockGuard disabled (default) +# 1 = BlockGuard enabled +# Value range is [0,1]. Default value is 0. +*/ +LPFC_ATTR_R(enable_bg, 1, 0, 1, "Enable BlockGuard Support"); +#endif + +#ifdef ENABLE_DISABLE_PORT +/* +# lpfc_enable_port: Bit mask of lpfc ports that are turned on +# A port number is a seq number that starts at 1 for +# the first port the .probe function is called for, incrementing +# for every PCI function that is probed. +# +# The default is 0xffff, which is to allow all PCI functions/ports +# +*/ +unsigned int lpfc_enable_port = 0xffffffff; +module_param(lpfc_enable_port, uint, 0); +MODULE_PARM_DESC(lpfc_enable_port, "enable port bit-mask"); +#endif + +#ifdef ENABLE_BG +#ifdef ENABLE_BG_SELECT_MODE_OF_OP +/* +# lpfc_prot_mask: i +# - Bit mask of host protection capabilities used to register with the +# SCSI mid-layer +# - Only meaningful if BG is turned on (lpfc_enable_bg=1). +# - Allows you to ultimately specify which profiles to use +# - Default will result in registering capabilities for all profiles. +# +*/ +unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION | + SHOST_DIX_TYPE0_PROTECTION | + SHOST_DIX_TYPE1_PROTECTION; + +module_param(lpfc_prot_mask, uint, 0); +MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask"); + +/* +# lpfc_prot_guard: i +# - Bit mask of protection guard types to register with the SCSI mid-layer +# - Guard types are currently either 1) IP checksum 2) T10-DIF CRC +# - Allows you to ultimately specify which profiles to use +# - Default will result in registering capabilities for all guard types +# +*/ +unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP; +module_param(lpfc_prot_guard, byte, 0); +MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type"); + +#endif +#endif + /* * lpfc_sg_seg_cnt: Initial Maximum DMA Segment Count * This value can be set to values between 64 and 256. The default value is @@ -1656,7 +1848,19 @@ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count"); +#ifdef ENABLE_BG +LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_PROT_SG_SEG_CNT, + LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_MAX_PROT_SG_SEG_CNT, + "Max Protection Scatter Gather Segment Count"); +#endif + struct device_attribute *lpfc_hba_attrs[] = { +#ifdef ENABLE_BG + &dev_attr_bg_info, + &dev_attr_bg_guard_err, + &dev_attr_bg_apptag_err, + &dev_attr_bg_reftag_err, +#endif &dev_attr_info, &dev_attr_serialnum, &dev_attr_modeldesc, @@ -1703,12 +1907,18 @@ struct device_attribute *lpfc_hba_attrs[ &dev_attr_lpfc_poll, &dev_attr_lpfc_poll_tmo, &dev_attr_lpfc_use_msi, +#ifdef ENABLE_BG + &dev_attr_lpfc_enable_bg, +#endif &dev_attr_lpfc_soft_wwnn, &dev_attr_lpfc_soft_wwpn, &dev_attr_lpfc_soft_wwn_enable, &dev_attr_lpfc_enable_hba_reset, &dev_attr_lpfc_enable_hba_heartbeat, &dev_attr_lpfc_sg_seg_cnt, +#ifdef ENABLE_BG + &dev_attr_lpfc_prot_sg_seg_cnt, +#endif NULL, }; @@ -2618,10 +2828,16 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_use_msi_init(phba, lpfc_use_msi); lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); +#ifdef ENABLE_BG + lpfc_enable_bg_init(phba, lpfc_enable_bg); +#endif phba->cfg_poll = lpfc_poll; phba->cfg_soft_wwnn = 0L; phba->cfg_soft_wwpn = 0L; lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); +#ifdef ENABLE_BG + lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt); +#endif /* Also reinitialize the host templates with new values. */ lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; @@ -2633,6 +2849,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); +#ifdef ENABLE_BG + phba->cfg_sg_dma_buf_size += + phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); +#endif lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); return; } diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -44,6 +45,27 @@ #include "lpfc_version.h" #include "lpfc_compat.h" #include "lpfc_debugfs.h" + +#ifdef ENABLE_BG +#ifdef ENABLE_BG_DBG +#ifdef ENABLE_BG_DBG_DUMP_DIF_DATA_ON_ERR +extern char * _dump_buf_data; +extern unsigned long _dump_buf_data_order; +extern char * _dump_buf_dif; +extern unsigned long _dump_buf_dif_order; +extern spinlock_t _dump_buf_lock; +extern int _dump_buf_done; +#endif + +#ifdef ENABLE_BG_DBG_DIF_ERR_INJECT +extern unsigned int injerr_wgrd_cnt; +extern unsigned int injerr_wapp_cnt; +extern unsigned int injerr_wref_cnt; +extern unsigned int injerr_rapp_cnt; +extern unsigned int injerr_rref_cnt; +#endif +#endif +#endif #ifdef CONFIG_LPFC_DEBUG_FS /* debugfs interface @@ -719,6 +741,172 @@ out: return rc; } +#ifdef ENABLE_BG +#ifdef ENABLE_BG_DBG +#ifdef ENABLE_BG_DBG_DUMP_DIF_DATA_ON_ERR +static int +lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug; + int rc = -ENOMEM; + + if (!_dump_buf_data) + return -EBUSY; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundry */ + printk(KERN_ERR "BLKGRD %s: _dump_buf_data=0x%p\n", + __FUNCTION__,_dump_buf_data); + debug->buffer = _dump_buf_data; + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = (1 << _dump_buf_data_order) << PAGE_SHIFT; + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static int +lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug; + int rc = -ENOMEM; + + if (!_dump_buf_dif) + return -EBUSY; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundry */ + printk(KERN_ERR "BLKGRD %s: _dump_buf_dif=0x%p file=%s\n", + __FUNCTION__,_dump_buf_dif,file->f_dentry->d_name.name); + debug->buffer = _dump_buf_dif; + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = (1 << _dump_buf_dif_order) << PAGE_SHIFT; + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static ssize_t +lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + /* + * The Data/DIF buffers only save one failing IO + * The write op is used as a reset mechanism after an IO has + * already been saved to the next one can be saved + */ + spin_lock_irq(&_dump_buf_lock); + + memset((void *)_dump_buf_data, 0, + ((1 << PAGE_SHIFT) << _dump_buf_data_order)); + memset((void *)_dump_buf_dif, 0, + ((1 << PAGE_SHIFT) << _dump_buf_dif_order)); + + _dump_buf_done = 0; + + spin_unlock_irq(&_dump_buf_lock); + + return nbytes; +} +#endif + +#ifdef ENABLE_BG_DBG_DIF_ERR_INJECT +static int +lpfc_debugfs_dif_err_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t +lpfc_debugfs_dif_err_read(struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct dentry *dent = file->f_dentry; + struct lpfc_hba *phba = file->private_data; + char cbuf[16]; + int cnt = 0; + + if (dent == phba->debug_writeGuard) { + cnt = snprintf(cbuf, 16, "%u\n",injerr_wgrd_cnt); + } else if (dent == phba->debug_writeApp) { + cnt = snprintf(cbuf, 16, "%u\n",injerr_wapp_cnt); + } else if (dent == phba->debug_writeRef) { + cnt = snprintf(cbuf, 16, "%u\n",injerr_wref_cnt); + } else if (dent == phba->debug_readApp) { + cnt = snprintf(cbuf, 16, "%u\n",injerr_rapp_cnt); + } else if (dent == phba->debug_readRef) { + cnt = snprintf(cbuf, 16, "%u\n",injerr_rref_cnt); + } else { + BUG(); + } + + return simple_read_from_buffer(buf, nbytes, ppos, + &cbuf, cnt); +} + +static ssize_t +lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct dentry *dent = file->f_dentry; + struct lpfc_hba *phba = file->private_data; + char dstbuf[32]; + char **end = (char **) &dstbuf; + uint32_t tmp; + int size; + + memset(dstbuf, 0, 32); + size = (nbytes < 32) ? nbytes : 32; + if(copy_from_user(dstbuf, buf, size)) + return 0; + + tmp = simple_strtoul(dstbuf, end, 10); + + if (dent == phba->debug_writeGuard) { + injerr_wgrd_cnt = tmp; + } else if (dent == phba->debug_writeApp) { + injerr_wapp_cnt = tmp; + } else if (dent == phba->debug_writeRef) { + injerr_wref_cnt = tmp; + } else if (dent == phba->debug_readApp) { + injerr_rapp_cnt = tmp; + } else if (dent == phba->debug_readRef) { + injerr_rref_cnt = tmp; + } else { + BUG(); + } + + return nbytes; +} + +static int +lpfc_debugfs_dif_err_release(struct inode *inode, struct file *file) +{ + return 0; +} +#endif +#endif +#endif + static int lpfc_debugfs_nodelist_open(struct inode *inode, struct file *file) { @@ -787,6 +975,24 @@ lpfc_debugfs_release(struct inode *inode return 0; } + +#ifdef ENABLE_BG +#ifdef ENABLE_BG_DBG +#ifdef ENABLE_BG_DBG_DUMP_DIF_DATA_ON_ERR +static int +lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug = file->private_data; + + debug->buffer = NULL; + kfree(debug); + + return 0; +} +#endif +#endif +#endif + #undef lpfc_debugfs_op_disc_trc static struct file_operations lpfc_debugfs_op_disc_trc = { .owner = THIS_MODULE, @@ -831,6 +1037,44 @@ static struct file_operations lpfc_debug .read = lpfc_debugfs_read, .release = lpfc_debugfs_release, }; + +#ifdef ENABLE_BG +#ifdef ENABLE_BG_DBG +#ifdef ENABLE_BG_DBG_DUMP_DIF_DATA_ON_ERR +#undef lpfc_debugfs_op_dumpData +static struct file_operations lpfc_debugfs_op_dumpData = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_dumpData_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .write = lpfc_debugfs_dumpDataDif_write, + .release = lpfc_debugfs_dumpDataDif_release, +}; + +#undef lpfc_debugfs_op_dumpDif +static struct file_operations lpfc_debugfs_op_dumpDif = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_dumpDif_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .write = lpfc_debugfs_dumpDataDif_write, + .release = lpfc_debugfs_dumpDataDif_release, +}; +#endif + +#ifdef ENABLE_BG_DBG_DIF_ERR_INJECT +#undef lpfc_debugfs_op_dif_err +static struct file_operations lpfc_debugfs_op_dif_err = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_dif_err_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_dif_err_read, + .write = lpfc_debugfs_dif_err_write, + .release = lpfc_debugfs_dif_err_release, +}; +#endif +#endif +#endif #undef lpfc_debugfs_op_slow_ring_trc static struct file_operations lpfc_debugfs_op_slow_ring_trc = { @@ -917,6 +1161,94 @@ lpfc_debugfs_initialize(struct lpfc_vpor "0409 Cannot create debugfs dumpHostSlim\n"); goto debug_failed; } + +#ifdef ENABLE_BG +#ifdef ENABLE_BG_DBG +#ifdef ENABLE_BG_DBG_DUMP_DIF_DATA_ON_ERR + /* Setup dumpData */ + snprintf(name, sizeof(name), "dumpData"); + phba->debug_dumpData = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dumpData); + if (!phba->debug_dumpData) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0409 Cannot create debugfs dumpData\n"); + goto debug_failed; + } + + /* Setup dumpDif */ + snprintf(name, sizeof(name), "dumpDif"); + phba->debug_dumpDif = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dumpDif); + if (!phba->debug_dumpDif) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0409 Cannot create debugfs dumpDif\n"); + goto debug_failed; + } + +#endif +#ifdef ENABLE_BG_DBG_DIF_ERR_INJECT + /* Setup DIF Error Injections */ + snprintf(name, sizeof(name), "writeGuardInjErr"); + phba->debug_writeGuard = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dif_err); + if (!phba->debug_writeGuard) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0409 Cannot create debugfs writeGuard\n"); + goto debug_failed; + } + + snprintf(name, sizeof(name), "writeAppInjErr"); + phba->debug_writeApp = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dif_err); + if (!phba->debug_writeApp) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0409 Cannot create debugfs writeApp\n"); + goto debug_failed; + } + + snprintf(name, sizeof(name), "writeRefInjErr"); + phba->debug_writeRef = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dif_err); + if (!phba->debug_writeRef) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0409 Cannot create debugfs writeRef\n"); + goto debug_failed; + } + + snprintf(name, sizeof(name), "readAppInjErr"); + phba->debug_readApp = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dif_err); + if (!phba->debug_readApp) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0409 Cannot create debugfs readApp\n"); + goto debug_failed; + } + + snprintf(name, sizeof(name), "readRefInjErr"); + phba->debug_readRef = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dif_err); + if (!phba->debug_readRef) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0409 Cannot create debugfs readApp\n"); + goto debug_failed; + } +#endif +#endif +#endif /* Setup slow ring trace */ if (lpfc_debugfs_max_slow_ring_trc) { @@ -1072,6 +1404,49 @@ lpfc_debugfs_terminate(struct lpfc_vport debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */ phba->debug_dumpHostSlim = NULL; } +#ifdef ENABLE_BG +#ifdef ENABLE_BG_DBG +#ifdef ENABLE_BG_DBG_DUMP_DIF_DATA_ON_ERR + if (phba->debug_dumpData) { + debugfs_remove(phba->debug_dumpData); /* dumpData */ + phba->debug_dumpData = NULL; + } + + if (phba->debug_dumpDif) { + debugfs_remove(phba->debug_dumpDif); /* dumpDif */ + phba->debug_dumpDif = NULL; + } +#endif + +#ifdef ENABLE_BG_DBG_DIF_ERR_INJECT + if (phba->debug_writeGuard) { + debugfs_remove(phba->debug_writeGuard); /* dumpDif */ + phba->debug_writeGuard= NULL; + } + + if (phba->debug_writeApp) { + debugfs_remove(phba->debug_writeApp); /* dumpDif */ + phba->debug_writeApp= NULL; + } + + if (phba->debug_writeRef) { + debugfs_remove(phba->debug_writeRef); /* dumpDif */ + phba->debug_writeRef= NULL; + } + + if (phba->debug_readApp) { + debugfs_remove(phba->debug_readApp); /* dumpDif */ + phba->debug_readApp= NULL; + } + + if (phba->debug_readRef) { + debugfs_remove(phba->debug_readRef); /* dumpDif */ + phba->debug_readRef= NULL; + } +#endif +#endif +#endif + if (phba->slow_ring_trc) { kfree(phba->slow_ring_trc); phba->slow_ring_trc = NULL; diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -17,6 +17,8 @@ * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ +#ifndef _LPFC_HW_H +#define _LPFC_HW_H #define FDMI_DID 0xfffffaU #define NameServer_DID 0xfffffcU @@ -1507,6 +1509,112 @@ typedef struct ULP_BDL { /* SLI-2 */ uint32_t ulpIoTag32; /* Can be used for 32 bit I/O Tag */ } ULP_BDL; +#ifdef ENABLE_BG +/* * * * * * * * * * * * * * * * * * * * + * + * B l o c k G u a r d D e f i n e s + * + * * * * * * * * * * * * * * * * * * * */ + +enum lpfc_protgrp_type { + LPFC_PG_TYPE_INVALID = 0, /* used to indicate errors */ + LPFC_PG_TYPE_NO_DIF, /* no DIF data pointed to by prot grp */ + LPFC_PG_TYPE_EMBD_DIF, /* DIF is embedded (inline) with data */ + LPFC_PG_TYPE_DIF_BUF /* DIF has its own scatter/gather list */ +}; + +/* PDE Descriptors */ +#define LPFC_PDE1_DESCRIPTOR 0x81 +#define LPFC_PDE2_DESCRIPTOR 0x82 +#define LPFC_PDE3_DESCRIPTOR 0x83 + +/* BlockGuard Profiles */ +enum lpfc_bg_prof_codes { + LPFC_PROF_INVALID, + LPFC_PROF_A1 = 128, /* Full Protection */ + LPFC_PROF_A2, /* Disabled Protection Checks:A2~A4 */ + LPFC_PROF_A3, + LPFC_PROF_A4, + LPFC_PROF_B1, /* Embedded DIFs: B1~B3 */ + LPFC_PROF_B2, + LPFC_PROF_B3, + LPFC_PROF_C1, /* Separate DIFs: C1~C3 */ + LPFC_PROF_C2, + LPFC_PROF_C3, + LPFC_PROF_D1, /* Full Protection */ + LPFC_PROF_D2, /* Partial Protection & Check Disabling */ + LPFC_PROF_D3, + LPFC_PROF_E1, /* E1~E4:out - check-only, in - update apptag */ + LPFC_PROF_E2, + LPFC_PROF_E3, + LPFC_PROF_E4, + LPFC_PROF_F1, /* Full Translation - F1 Prot Descriptor */ + /* F1 Translation BDE */ + LPFC_PROF_ANT1, /* TCP checksum, DIF inline with data buffers */ + LPFC_PROF_AST1, /* TCP checksum, DIF split from data buffer */ + LPFC_PROF_ANT2, + LPFC_PROF_AST2 +}; + +/* BlockGuard error-control defines */ +#define BG_EC_STOP_ERR 0x00 +#define BG_EC_CONT_ERR 0x01 +#define BG_EC_IGN_UNINIT_STOP_ERR 0x10 +#define BG_EC_IGN_UNINIT_CONT_ERR 0x11 + +/* PDE (Protection Descriptor Entry) word 0 bit masks and shifts */ +#define PDE_DESC_TYPE_MASK 0xff000000 +#define PDE_DESC_TYPE_SHIFT 24 +#define PDE_BG_PROFILE_MASK 0x00ff0000 +#define PDE_BG_PROFILE_SHIFT 16 +#define PDE_BLOCK_LEN_MASK 0x0000fffc +#define PDE_BLOCK_LEN_SHIFT 2 +#define PDE_ERR_CTRL_MASK 0x00000003 +#define PDE_ERR_CTRL_SHIFT 0 +/* PDE word 1 bit masks and shifts */ +#define PDE_APPTAG_MASK_MASK 0xffff0000 +#define PDE_APPTAG_MASK_SHIFT 16 +#define PDE_APPTAG_VAL_MASK 0x0000ffff +#define PDE_APPTAG_VAL_SHIFT 0 +struct lpfc_pde { + uint32_t parms; /* bitfields of descriptor, prof, len, and ec */ + uint32_t apptag; /* bitfields of app tag maskand app tag value */ + uint32_t reftag; /* reference tag occupying all 32 bits */ +}; + +/* inline function to set fields in parms of PDE */ +static inline void +lpfc_pde_set_bg_parms(struct lpfc_pde *p, u8 desc, u8 prof, u16 len, u8 ec) +{ + uint32_t *wp = &p->parms; + + /* spec indicates that adapter appends two 0's to length field */ + len = len >> 2; + + *wp &= 0; + *wp |= ((desc << PDE_DESC_TYPE_SHIFT) & PDE_DESC_TYPE_MASK); + *wp |= ((prof << PDE_BG_PROFILE_SHIFT) & PDE_BG_PROFILE_MASK); + *wp |= ((len << PDE_BLOCK_LEN_SHIFT) & PDE_BLOCK_LEN_MASK); + *wp |= ((ec << PDE_ERR_CTRL_SHIFT) & PDE_ERR_CTRL_MASK); + *wp = le32_to_cpu(*wp); +} + +/* inline function to set apptag and reftag fields of PDE */ +static inline void +lpfc_pde_set_dif_parms(struct lpfc_pde *p, u16 apptagmask, u16 apptagval, + u32 reftag) +{ + uint32_t *wp = &p->apptag; + *wp &= 0; + *wp |= ((apptagmask << PDE_APPTAG_MASK_SHIFT) & PDE_APPTAG_MASK_MASK); + *wp |= ((apptagval << PDE_APPTAG_VAL_SHIFT) & PDE_APPTAG_VAL_MASK); + *wp = le32_to_cpu(*wp); + wp = &p->reftag; + *wp = le32_to_cpu(reftag); +} + +#endif /* ENABLE_BG */ + /* Structure for MB Command LOAD_SM and DOWN_LOAD */ typedef struct { @@ -2490,7 +2598,12 @@ typedef struct { uint32_t hbainit[6]; #ifdef __BIG_ENDIAN_BITFIELD +#ifdef ENABLE_BG + uint32_t rsvd : 23; /* Reserved */ + uint32_t cbg : 1; /* Configure BlockGuard */ +#else uint32_t rsvd : 24; /* Reserved */ +#endif uint32_t cmv : 1; /* Configure Max VPIs */ uint32_t ccrp : 1; /* Config Command Ring Polling */ uint32_t csah : 1; /* Configure Synchronous Abort Handling */ @@ -2508,10 +2621,20 @@ typedef struct { uint32_t csah : 1; /* Configure Synchronous Abort Handling */ uint32_t ccrp : 1; /* Config Command Ring Polling */ uint32_t cmv : 1; /* Configure Max VPIs */ +#ifdef ENABLE_BG + uint32_t cbg : 1; /* Configure BlockGuard */ + uint32_t rsvd : 23; /* Reserved */ +#else uint32_t rsvd : 24; /* Reserved */ #endif +#endif #ifdef __BIG_ENDIAN_BITFIELD +#ifdef ENABLE_BG + uint32_t rsvd2 : 23; /* Reserved */ + uint32_t gbg : 1; /* Grant BlockGuard */ +#else uint32_t rsvd2 : 24; /* Reserved */ +#endif uint32_t gmv : 1; /* Grant Max VPIs */ uint32_t gcrp : 1; /* Grant Command Ring Polling */ uint32_t gsah : 1; /* Grant Synchronous Abort Handling */ @@ -2529,7 +2652,12 @@ typedef struct { uint32_t gsah : 1; /* Grant Synchronous Abort Handling */ uint32_t gcrp : 1; /* Grant Command Ring Polling */ uint32_t gmv : 1; /* Grant Max VPIs */ - uint32_t rsvd2 : 24; /* Reserved */ +#ifdef ENABLE_BG + uint32_t gbg : 1; /* Grant BlockGuard */ + uint32_t rsvd2 : 23; /* Reserved */ +#else + uint32_t rsvd2 : 23; /* Reserved */ +#endif #endif #ifdef __BIG_ENDIAN_BITFIELD @@ -3105,6 +3233,96 @@ struct que_xri64cx_ext_fields { struct lpfc_hbq_entry buff[5]; }; +#ifdef ENABLE_BG +struct sli3_bg_fields { + uint32_t filler[6]; /* word 8-13 in IOCB */ + uint32_t bghm; /* word 14 - BlockGuard High Water Mark */ +/* Bitfields for bgstat (BlockGuard Status - word 15 of IOCB) */ +#define BGS_BIDIR_BG_PROF_MASK 0xff000000 +#define BGS_BIDIR_BG_PROF_SHIFT 24 +#define BGS_BIDIR_ERR_COND_FLAGS_MASK 0x003f0000 +#define BGS_BIDIR_ERR_COND_SHIFT 16 +#define BGS_BG_PROFILE_MASK 0x0000ff00 +#define BGS_BG_PROFILE_SHIFT 8 +#define BGS_INVALID_PROF_MASK 0x00000020 +#define BGS_INVALID_PROF_SHIFT 5 +#define BGS_UNINIT_DIF_BLOCK_MASK 0x00000010 +#define BGS_UNINIT_DIF_BLOCK_SHIFT 4 +#define BGS_HI_WATER_MARK_PRESENT_MASK 0x00000008 +#define BGS_HI_WATER_MARK_PRESENT_SHIFT 3 +#define BGS_REFTAG_ERR_MASK 0x00000004 +#define BGS_REFTAG_ERR_SHIFT 2 +#define BGS_APPTAG_ERR_MASK 0x00000002 +#define BGS_APPTAG_ERR_SHIFT 1 +#define BGS_GUARD_ERR_MASK 0x00000001 +#define BGS_GUARD_ERR_SHIFT 0 + uint32_t bgstat; /* word 15 - BlockGuard Status */ +}; + +static inline uint32_t +lpfc_bgs_get_bidir_bg_prof(uint32_t bgstat) +{ + return ((le32_to_cpu(bgstat) & BGS_BIDIR_BG_PROF_MASK) >> + BGS_BIDIR_BG_PROF_SHIFT); +} + +static inline uint32_t +lpfc_bgs_get_bidir_err_cond(uint32_t bgstat) +{ + return ((le32_to_cpu(bgstat) & BGS_BIDIR_ERR_COND_FLAGS_MASK) >> + BGS_BIDIR_ERR_COND_SHIFT); +} + +static inline uint32_t +lpfc_bgs_get_bg_prof(uint32_t bgstat) +{ + return ((le32_to_cpu(bgstat) & BGS_BG_PROFILE_MASK) >> + BGS_BG_PROFILE_SHIFT); +} + +static inline uint32_t +lpfc_bgs_get_invalid_prof(uint32_t bgstat) +{ + return ((le32_to_cpu(bgstat) & BGS_INVALID_PROF_MASK) >> + BGS_INVALID_PROF_SHIFT); +} + +static inline uint32_t +lpfc_bgs_get_uninit_dif_block(uint32_t bgstat) +{ + return ((le32_to_cpu(bgstat) & BGS_UNINIT_DIF_BLOCK_MASK) >> + BGS_UNINIT_DIF_BLOCK_SHIFT); +} + +static inline uint32_t +lpfc_bgs_get_hi_water_mark_present(uint32_t bgstat) +{ + return ((le32_to_cpu(bgstat) & BGS_HI_WATER_MARK_PRESENT_MASK) >> + BGS_HI_WATER_MARK_PRESENT_SHIFT); +} + +static inline uint32_t +lpfc_bgs_get_reftag_err(uint32_t bgstat) +{ + return ((le32_to_cpu(bgstat) & BGS_REFTAG_ERR_MASK) >> + BGS_REFTAG_ERR_SHIFT); +} + +static inline uint32_t +lpfc_bgs_get_apptag_err(uint32_t bgstat) +{ + return ((le32_to_cpu(bgstat) & BGS_APPTAG_ERR_MASK) >> + BGS_APPTAG_ERR_SHIFT); +} + +static inline uint32_t +lpfc_bgs_get_guard_err(uint32_t bgstat) +{ + return ((le32_to_cpu(bgstat) & BGS_GUARD_ERR_MASK) >> + BGS_GUARD_ERR_SHIFT); +} +#endif + typedef struct _IOCB { /* IOCB structure */ union { GENERIC_RSP grsp; /* Generic response */ @@ -3192,6 +3410,11 @@ typedef struct _IOCB { /* IOCB structure struct que_xri64cx_ext_fields que_xri64cx_ext_words; uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */ + +#ifdef ENABLE_BG + /* words 8-15 for block guard */ + struct sli3_bg_fields sli3_bg; +#endif } unsli3; #define ulpCt_h ulpXS @@ -3292,3 +3515,5 @@ lpfc_error_lost_link(IOCB_t *iocbp) iocbp->un.ulpWord[4] == IOERR_LINK_DOWN || iocbp->un.ulpWord[4] == IOERR_SLI_DOWN)); } + +#endif /* _LPFC_HW_H */ diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -43,6 +43,35 @@ #include "lpfc_crtn.h" #include "lpfc_vport.h" #include "lpfc_version.h" + +#ifdef ENABLE_BG +#ifdef ENABLE_BG_SELECT_MODE_OF_OP +extern unsigned int lpfc_prot_mask; +extern unsigned char lpfc_prot_guard; +#endif +#endif + +#ifdef ENABLE_DISABLE_PORT +extern unsigned int lpfc_enable_port; +#endif +#ifdef ENABLE_BG +#ifdef ENABLE_BG_DBG +#ifdef ENABLE_BG_DBG_DUMP_DIF_DATA_ON_ERR +char *_dump_buf_data = NULL; +unsigned long _dump_buf_data_order = 0; +char * _dump_buf_dif = NULL; +unsigned long _dump_buf_dif_order = 0; +spinlock_t _dump_buf_lock; +#endif +#endif +#endif + +#ifdef ENABLE_STOP_RESUME +DECLARE_WAIT_QUEUE_HEAD(jtag_wait); +int jtag_continue_test = 0; +int lpfc_create_proc(void); +void lpfc_remove_proc(void); +#endif static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); @@ -1732,12 +1761,17 @@ lpfc_create_port(struct lpfc_hba *phba, shost->max_lun = vport->cfg_max_luns; shost->this_id = -1; shost->max_cmd_len = 16; + /* * Set initial can_queue value since 0 is no longer supported and * scsi_add_host will fail. This will be adjusted later based on the * max xri value determined in hba setup. */ +#ifdef ENABLE_BG_DBG_ONE_CMD + shost->can_queue = 1; +#else shost->can_queue = phba->cfg_hba_queue_depth - 10; +#endif if (dev != &phba->pcidev->dev) { shost->transportt = lpfc_vport_transport_template; vport->port_type = LPFC_NPIV_PORT; @@ -1942,6 +1976,14 @@ lpfc_disable_msix(struct lpfc_hba *phba) pci_disable_msix(phba->pcidev); } +#ifdef ENABLE_STOP_RESUME +/* + * Temporary code to help saturn fw team debug issues. + * The driver stalls right after CONFIG_PORT is issued, allowing + * the fw team to attach their JTAG debugger. Once they're ready + * the driver can be instructed to resume its work. + * FIXME: Remove all code under ENABLE_STOP_RESUME + */ static int __devinit lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) { @@ -1956,6 +1998,26 @@ lpfc_pci_probe_one(struct pci_dev *pdev, int i, hbq_count; uint16_t iotag; int bars = pci_select_bars(pdev, IORESOURCE_MEM); +#ifdef ENABLE_DISABLE_PORT + static unsigned int probecnt = 1; + + if (lpfc_enable_port != 0xffffffff) { + if (!(lpfc_enable_port & probecnt)) { + printk(KERN_ERR "lpfc: skipping PCI func " + "%02x:%02x:%02x:%02x:%02x:%02x\n", + pdev->vendor, pdev->device, + pdev->bus->number, + pdev->bus->primary, + pdev->bus->secondary, + pdev->devfn); + probecnt <<= 1; + goto out_disable_device; + } + + probecnt <<= 1; + } + +#endif if (pci_enable_device_mem(pdev)) goto out; @@ -2083,7 +2145,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, if (iocbq_entry == NULL) { printk(KERN_ERR "%s: only allocated %d iocbs of " "expected %d count. Unloading driver.\n", - __func__, i, LPFC_IOCB_LIST_CNT); + __FUNCTION__, i, LPFC_IOCB_LIST_CNT); error = -ENOMEM; goto out_free_iocbq; } @@ -2093,7 +2155,438 @@ lpfc_pci_probe_one(struct pci_dev *pdev, kfree (iocbq_entry); printk(KERN_ERR "%s: failed to allocate IOTAG. " "Unloading driver.\n", - __func__); + __FUNCTION__); + error = -ENOMEM; + goto out_free_iocbq; + } + + spin_lock_irq(&phba->hbalock); + list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); + phba->total_iocbq_bufs++; + spin_unlock_irq(&phba->hbalock); + } + + /* Initialize HBA structure */ + phba->fc_edtov = FF_DEF_EDTOV; + phba->fc_ratov = FF_DEF_RATOV; + phba->fc_altov = FF_DEF_ALTOV; + phba->fc_arbtov = FF_DEF_ARBTOV; + + INIT_LIST_HEAD(&phba->work_list); + phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT); + phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); + + /* Initialize the wait queue head for the kernel thread */ + init_waitqueue_head(&phba->work_waitq); + + /* FIXME: worker thread used to star here */ + + /* Initialize the list of scsi buffers used by driver for scsi IO. */ + spin_lock_init(&phba->scsi_buf_list_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); + + /* Initialize list of fabric iocbs */ + INIT_LIST_HEAD(&phba->fabric_iocb_list); + + /* Initialize list to save ELS buffers */ + INIT_LIST_HEAD(&phba->elsbuf); + + vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); + if (!vport) + goto out_kthread_stop; + + shost = lpfc_shost_from_vport(vport); + phba->pport = vport; + lpfc_debugfs_initialize(vport); + + pci_set_drvdata(pdev, shost); + phba->intr_type = NONE; + + if (phba->cfg_use_msi == 2) { + error = lpfc_enable_msix(phba); + if (!error) + phba->intr_type = MSIX; + } + + /* Fallback to MSI if MSI-X initialization failed */ + if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) { + retval = pci_enable_msi(phba->pcidev); + if (!retval) + phba->intr_type = MSI; + else + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0452 Enable MSI failed, continuing " + "with IRQ\n"); + } + + /* MSI-X is the only case the doesn't need to call request_irq */ + if (phba->intr_type != MSIX) { + retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, + IRQF_SHARED, LPFC_DRIVER_NAME, phba); + if (retval) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0451 Enable " + "interrupt handler failed\n"); + error = retval; + goto out_disable_msi; + } else if (phba->intr_type != MSI) + phba->intr_type = INTx; + } + + phba->MBslimaddr = phba->slim_memmap_p; + phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; + phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; + phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; + phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; + + if (lpfc_alloc_sysfs_attr(vport)) { + error = -ENOMEM; + goto out_free_irq; + } + + if (lpfc_sli_hba_setup(phba)) { + error = -ENODEV; + goto out_remove_device; + } + + /* + * hba setup may have changed the hba_queue_depth so we need to adjust + * the value of can_queue. + */ +#ifdef ENABLE_BG_DBG_ONE_CMD + shost->can_queue = 1; +#else + shost->can_queue = phba->cfg_hba_queue_depth - 10; +#endif + +#ifdef ENABLE_BG + if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { +#ifdef ENABLE_BG_SELECT_MODE_OF_OP + if (lpfc_prot_mask && lpfc_prot_guard) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "Registering BlockGuard with the SCSI " + "layer\n"); + + scsi_host_set_prot(shost, lpfc_prot_mask); + scsi_host_set_guard(shost, lpfc_prot_guard); + } +#else + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "Registering BlockGuard with the SCSI layer\n"); + scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION + | SHOST_DIX_TYPE0_PROTECTION + | SHOST_DIX_TYPE1_PROTECTION); + scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP); +#endif + } +#ifdef ENABLE_BG_DBG +#ifdef ENABLE_BG_DBG_DUMP_DIF_DATA_ON_ERR + if (!_dump_buf_data) { + int pagecnt = 10; + while (pagecnt) { + spin_lock_init(&_dump_buf_lock); + _dump_buf_data = + (char *)__get_free_pages(GFP_KERNEL,pagecnt); + if (_dump_buf_data) { + printk(KERN_ERR "BLKGRD allocated %d pages for " + "_dump_buf_data at 0x%p\n", + (1 << pagecnt),_dump_buf_data); + _dump_buf_data_order = pagecnt; + memset((void *)_dump_buf_data, 0, ((1 << PAGE_SHIFT) << pagecnt)); + break; + } else { + --pagecnt; + } + + } + + if (!_dump_buf_data_order) + printk(KERN_ERR "BLKGRD ERROR unable to allocate " + "memory for hexdump\n"); + + } else { + printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p\n" + ,_dump_buf_data); + } + + + if (!_dump_buf_dif) { + int pagecnt = 10; + while (pagecnt) { + _dump_buf_dif = + (char *)__get_free_pages(GFP_KERNEL,pagecnt); + if (_dump_buf_dif) { + printk(KERN_ERR "BLKGRD allocated %d pages for " + "_dump_buf_dif at 0x%p\n", + (1 << pagecnt),_dump_buf_dif); + _dump_buf_dif_order = pagecnt; + memset((void *)_dump_buf_dif, 0, ((1 << PAGE_SHIFT) << pagecnt)); + break; + } else { + --pagecnt; + } + + } + + if (!_dump_buf_dif_order) + printk(KERN_ERR "BLKGRD ERROR unable to allocate " + "memory for hexdump\n"); + + } else { + printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n" + ,_dump_buf_dif); + } +#endif +#endif +#endif + + printk(KERN_ERR "BLKGRD dbg: Connect JTAG -- going to sleep\n"); + lpfc_create_proc(); + wait_event(jtag_wait, jtag_continue_test == 1); + printk(KERN_ERR "BLKGRD dbg: Woke-up - about to scan\n"); + + /* Startup the kernel thread for this host adapter. */ + phba->worker_thread = kthread_run(lpfc_do_work, phba, + "lpfc_worker_%d", phba->brd_no); + if (IS_ERR(phba->worker_thread)) { + error = PTR_ERR(phba->worker_thread); + goto out_free_iocbq; + } + lpfc_host_attrib_init(shost); + + if (phba->cfg_poll & DISABLE_FCP_RING_INT) { + spin_lock_irq(shost->host_lock); + lpfc_poll_start_timer(phba); + spin_unlock_irq(shost->host_lock); + } + + scsi_scan_host(shost); + + return 0; + +out_remove_device: + lpfc_free_sysfs_attr(vport); + spin_lock_irq(shost->host_lock); + vport->load_flag |= FC_UNLOADING; + spin_unlock_irq(shost->host_lock); +out_free_irq: + lpfc_stop_phba_timers(phba); + phba->pport->work_port_events = 0; + + if (phba->intr_type == MSIX) + lpfc_disable_msix(phba); + else + free_irq(phba->pcidev->irq, phba); + +out_disable_msi: + if (phba->intr_type == MSI) + pci_disable_msi(phba->pcidev); + destroy_port(vport); +out_kthread_stop: + kthread_stop(phba->worker_thread); +out_free_iocbq: + list_for_each_entry_safe(iocbq_entry, iocbq_next, + &phba->lpfc_iocb_list, list) { + kfree(iocbq_entry); + phba->total_iocbq_bufs--; + } + lpfc_mem_free(phba); +out_free_hbqslimp: + dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt, + phba->hbqslimp.phys); +out_free_slim: + dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p, + phba->slim2p_mapping); +out_iounmap: + iounmap(phba->ctrl_regs_memmap_p); +out_iounmap_slim: + iounmap(phba->slim_memmap_p); +out_idr_remove: + idr_remove(&lpfc_hba_index, phba->brd_no); +out_free_phba: + kfree(phba); +out_release_regions: + pci_release_selected_regions(pdev, bars); +out_disable_device: + pci_disable_device(pdev); +out: + pci_set_drvdata(pdev, NULL); + if (shost) + scsi_host_put(shost); + return error; +} +#else +static int __devinit +lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) +{ + struct lpfc_vport *vport = NULL; + struct lpfc_hba *phba; + struct lpfc_sli *psli; + struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; + struct Scsi_Host *shost = NULL; + void *ptr; + unsigned long bar0map_len, bar2map_len; + int error = -ENODEV, retval; + int i, hbq_count; + uint16_t iotag; + int bars = pci_select_bars(pdev, IORESOURCE_MEM); +#ifdef ENABLE_DISABLE_PORT + static unsigned int probecnt = 1; + + if (lpfc_enable_port != 0xffffffff) { + if (!(lpfc_enable_port & probecnt)) { + printk(KERN_ERR "lpfc: skipping PCI func " + "%02x:%02x:%02x:%02x:%02x:%02x\n", + pdev->vendor, pdev->device, + pdev->bus->number, + pdev->bus->primary, + pdev->bus->secondary, + pdev->devfn); + probecnt <<= 1; + goto out_disable_device; + } + + probecnt <<= 1; + } + +#endif + + if (pci_enable_device_mem(pdev)) + goto out; + if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) + goto out_disable_device; + + phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL); + if (!phba) + goto out_release_regions; + + spin_lock_init(&phba->hbalock); + + /* Initialize ndlp management spinlock */ + spin_lock_init(&phba->ndlp_lock); + + phba->pcidev = pdev; + + /* Assign an unused board number */ + if ((phba->brd_no = lpfc_get_instance()) < 0) + goto out_free_phba; + + INIT_LIST_HEAD(&phba->port_list); + /* + * Get all the module params for configuring this host and then + * establish the host. + */ + lpfc_get_cfgparam(phba); + phba->max_vpi = LPFC_MAX_VPI; + + /* Initialize timers used by driver */ + init_timer(&phba->hb_tmofunc); + phba->hb_tmofunc.function = lpfc_hb_timeout; + phba->hb_tmofunc.data = (unsigned long)phba; + + psli = &phba->sli; + init_timer(&psli->mbox_tmo); + psli->mbox_tmo.function = lpfc_mbox_timeout; + psli->mbox_tmo.data = (unsigned long) phba; + init_timer(&phba->fcp_poll_timer); + phba->fcp_poll_timer.function = lpfc_poll_timeout; + phba->fcp_poll_timer.data = (unsigned long) phba; + init_timer(&phba->fabric_block_timer); + phba->fabric_block_timer.function = lpfc_fabric_block_timeout; + phba->fabric_block_timer.data = (unsigned long) phba; + + pci_set_master(pdev); + pci_try_set_mwi(pdev); + + if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0) + if (pci_set_dma_mask(phba->pcidev, DMA_32BIT_MASK) != 0) + goto out_idr_remove; + + /* + * Get the bus address of Bar0 and Bar2 and the number of bytes + * required by each mapping. + */ + phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0); + bar0map_len = pci_resource_len(phba->pcidev, 0); + + phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2); + bar2map_len = pci_resource_len(phba->pcidev, 2); + + /* Map HBA SLIM to a kernel virtual address. */ + phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); + if (!phba->slim_memmap_p) { + error = -ENODEV; + dev_printk(KERN_ERR, &pdev->dev, + "ioremap failed for SLIM memory.\n"); + goto out_idr_remove; + } + + /* Map HBA Control Registers to a kernel virtual address. */ + phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); + if (!phba->ctrl_regs_memmap_p) { + error = -ENODEV; + dev_printk(KERN_ERR, &pdev->dev, + "ioremap failed for HBA control registers.\n"); + goto out_iounmap_slim; + } + + /* Allocate memory for SLI-2 structures */ + phba->slim2p = dma_alloc_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE, + &phba->slim2p_mapping, GFP_KERNEL); + if (!phba->slim2p) + goto out_iounmap; + + memset(phba->slim2p, 0, SLI2_SLIM_SIZE); + + phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev, + lpfc_sli_hbq_size(), + &phba->hbqslimp.phys, + GFP_KERNEL); + if (!phba->hbqslimp.virt) + goto out_free_slim; + + hbq_count = lpfc_sli_hbq_count(); + ptr = phba->hbqslimp.virt; + for (i = 0; i < hbq_count; ++i) { + phba->hbqs[i].hbq_virt = ptr; + INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); + ptr += (lpfc_hbq_defs[i]->entry_count * + sizeof(struct lpfc_hbq_entry)); + } + phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; + phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; + + memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); + + INIT_LIST_HEAD(&phba->hbqbuf_in_list); + + /* Initialize the SLI Layer to run with lpfc HBAs. */ + lpfc_sli_setup(phba); + lpfc_sli_queue_setup(phba); + + retval = lpfc_mem_alloc(phba); + if (retval) { + error = retval; + goto out_free_hbqslimp; + } + + /* Initialize and populate the iocb list per host. */ + INIT_LIST_HEAD(&phba->lpfc_iocb_list); + for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) { + iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); + if (iocbq_entry == NULL) { + printk(KERN_ERR "%s: only allocated %d iocbs of " + "expected %d count. Unloading driver.\n", + __FUNCTION__, i, LPFC_IOCB_LIST_CNT); + error = -ENOMEM; + goto out_free_iocbq; + } + + iotag = lpfc_sli_next_iotag(phba, iocbq_entry); + if (iotag == 0) { + kfree (iocbq_entry); + printk(KERN_ERR "%s: failed to allocate IOTAG. " + "Unloading driver.\n", + __FUNCTION__); error = -ENOMEM; goto out_free_iocbq; } @@ -2196,7 +2689,93 @@ lpfc_pci_probe_one(struct pci_dev *pdev, * hba setup may have changed the hba_queue_depth so we need to adjust * the value of can_queue. */ +#ifdef ENABLE_BG_DBG_ONE_CMD + shost->can_queue = 1; +#else shost->can_queue = phba->cfg_hba_queue_depth - 10; +#endif +#ifdef ENABLE_BG + if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { + +#ifdef ENABLE_BG_SELECT_MODE_OF_OP + if (lpfc_prot_mask && lpfc_prot_guard) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "Registering BlockGuard with the SCSI " + "layer\n"); + + scsi_host_set_prot(shost, lpfc_prot_mask); + scsi_host_set_guard(shost, lpfc_prot_guard); + } +#else + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "Registering BlockGuard with the SCSI layer\n"); + scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION + | SHOST_DIX_TYPE0_PROTECTION + | SHOST_DIX_TYPE1_PROTECTION); + scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP); +#endif + } + +#ifdef ENABLE_BG_DBG +#ifdef ENABLE_BG_DBG_DUMP_DIF_DATA_ON_ERR + if (!_dump_buf_data) { + int pagecnt = 10; + while (pagecnt) { + spin_lock_init(&_dump_buf_lock); + _dump_buf_data = + (char *)__get_free_pages(GFP_KERNEL,pagecnt); + if (_dump_buf_data) { + printk(KERN_ERR "BLKGRD allocated %d pages for " + "_dump_buf_data at 0x%p\n", + (1 << pagecnt),_dump_buf_data); + _dump_buf_data_order = pagecnt; + memset((void *)_dump_buf_data, 0, ((1 << PAGE_SHIFT) << pagecnt)); + break; + } else { + --pagecnt; + } + + } + + if (!_dump_buf_data_order) + printk(KERN_ERR "BLKGRD ERROR unable to allocate " + "memory for hexdump\n"); + + } else { + printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p\n" + ,_dump_buf_data); + } + + + if (!_dump_buf_dif) { + int pagecnt = 10; + while (pagecnt) { + _dump_buf_dif = + (char *)__get_free_pages(GFP_KERNEL,pagecnt); + if (_dump_buf_dif) { + printk(KERN_ERR "BLKGRD allocated %d pages for " + "_dump_buf_dif at 0x%p\n", + (1 << pagecnt),_dump_buf_dif); + _dump_buf_dif_order = pagecnt; + memset((void *)_dump_buf_dif, 0, ((1 << PAGE_SHIFT) << pagecnt)); + break; + } else { + --pagecnt; + } + + } + + if (!_dump_buf_dif_order) + printk(KERN_ERR "BLKGRD ERROR unable to allocate " + "memory for hexdump\n"); + + } else { + printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n" + ,_dump_buf_dif); + } +#endif +#endif +#endif lpfc_host_attrib_init(shost); @@ -2261,6 +2840,7 @@ out: scsi_host_put(shost); return error; } +#endif static void __devexit lpfc_pci_remove_one(struct pci_dev *pdev) @@ -2580,6 +3160,29 @@ lpfc_exit(void) fc_release_transport(lpfc_transport_template); if (lpfc_enable_npiv) fc_release_transport(lpfc_vport_transport_template); +#ifdef ENABLE_STOP_RESUME + printk(KERN_ERR "BLKGRD dbg: removing proc entries\n"); + lpfc_remove_proc(); +#endif +#ifdef ENABLE_BG +#ifdef ENABLE_BG_DBG +#ifdef ENABLE_BG_DBG_DUMP_DIF_DATA_ON_ERR + if (_dump_buf_data) { + printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data " + "at 0x%p\n", + (1L << _dump_buf_data_order),_dump_buf_data); + free_pages((unsigned long)_dump_buf_data,_dump_buf_data_order); + } + + if (_dump_buf_dif) { + printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif " + "at 0x%p\n", + (1L << _dump_buf_dif_order),_dump_buf_dif); + free_pages((unsigned long)_dump_buf_dif,_dump_buf_dif_order); + } +#endif +#endif +#endif } module_init(lpfc_init); diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h @@ -27,6 +27,9 @@ #define LOG_FCP 0x40 /* FCP traffic history */ #define LOG_NODE 0x80 /* Node table events */ #define LOG_TEMP 0x100 /* Temperature sensor events */ +#ifdef ENABLE_BG +#define LOG_BG 0x200 /* BlockBuard events */ +#endif #define LOG_MISC 0x400 /* Miscellaneous events */ #define LOG_SLI 0x800 /* SLI events */ #define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */ diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -710,6 +710,10 @@ lpfc_config_port(struct lpfc_hba *phba, /* If HBA supports SLI=3 ask for it */ if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) { +#ifdef ENABLE_BG + if (phba->cfg_enable_bg) + mb->un.varCfgPort.cbg = 1; /* configure BlockGuard*/ +#endif mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count(); if (phba->max_vpi && phba->cfg_enable_npiv && diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -47,6 +47,12 @@ lpfc_mem_alloc(struct lpfc_hba * phba) int longs; int i; +#ifdef ENABLE_BG +#ifdef ENABLE_BG_DBG + printk(KERN_ERR "BLKGRD: Attempting to create pci pool with %d bytes\n", + phba->cfg_sg_dma_buf_size); +#endif +#endif phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", phba->pcidev, phba->cfg_sg_dma_buf_size, 8, 0); if (!phba->lpfc_scsi_dma_buf_pool) diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -18,13 +18,13 @@ * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ - #include #include #include #include #include +#include #include #include #include @@ -41,6 +41,153 @@ #define LPFC_RESET_WAIT 2 #define LPFC_ABORT_WAIT 2 + +#ifdef ENABLE_BG +#ifdef ENABLE_BG_DBG +#ifdef ENABLE_BG_DBG_DUMP_DIF_DATA_ON_ERR +extern char *_dump_buf_data; +extern unsigned long _dump_buf_data_order; +extern char * _dump_buf_dif; +extern unsigned long _dump_buf_dif_order; +extern spinlock_t _dump_buf_lock; +int _dump_buf_done = 0; +#endif + +static char *dif_op_str[] = { + "SCSI_PROT_NORMAL", + "SCSI_PROT_READ_INSERT", + "SCSI_PROT_WRITE_STRIP", + "SCSI_PROT_READ_STRIP", + "SCSI_PROT_WRITE_INSERT", + "SCSI_PROT_READ_PASS", + "SCSI_PROT_WRITE_PASS", + "SCSI_PROT_READ_CONVERT", + "SCSI_PROT_WRITE_CONVERT" +}; + +#ifdef ENABLE_BG_DBG_DIF_ERR_INJECT +unsigned int injerr_wgrd_cnt = 0; +unsigned int injerr_wapp_cnt = 0; +unsigned int injerr_wref_cnt = 0; +unsigned int injerr_rapp_cnt = 0; +unsigned int injerr_rref_cnt = 0; +#endif +#endif + +void +lpfc_hexdump(unsigned char* uchar_ptr, uint32_t size) +{ + int32_t *i_ptr; + int32_t i; + + if (uchar_ptr == NULL) + return; + + i_ptr = (int32_t *) uchar_ptr; + + for (i = 0; i < size; i += sizeof (int32_t)) { + printk(KERN_ALERT "(%.2d:%.4x:) 0x%8.8x\n", + (int32_t)(i/sizeof (int32_t)), i, *i_ptr++); + } +} + +#ifdef ENABLE_BG_DBG +#ifdef ENABLE_BG_DBG_NODEF +void +lpfc_debug_hexdump(unsigned char* uchar_ptr, uint32_t size) +{ + int32_t *i_ptr; + int32_t i; + char *dst; + unsigned int cnt; + + if (!_dump_buf_data) { + printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n", + __FUNCTION__); + return; + } + + if ((size + 32) >= ((1 << _dump_buf_data_order) << PAGE_SHIFT) ) { + printk(KERN_ERR "BLKGRD WARNING %s adjusting size to " + "max buffer size\n", + __FUNCTION__); + size = ((1 << _dump_buf_data_order) << PAGE_SHIFT); + } + + if (uchar_ptr == NULL) + return; + + i_ptr = (int32_t *) uchar_ptr; + + dst = _dump_buf_data; + printk(KERN_ERR "BLKGRD %s: _dump_buf_data=0x%p\n",__FUNCTION__, dst); + for (i = 0; i < size; i += sizeof (int32_t)) { + cnt = sprintf(dst, "(%.2d:%.4x:) 0x%8.8x\n", + (int32_t)(i/sizeof (int32_t)), i, *i_ptr++); + dst += cnt; + + } + + sprintf(dst, "\n*** END OF BUFFER***\n"); +} +#endif + +#ifdef ENABLE_BG_DBG_DUMP_DIF_DATA_ON_ERR +void +lpfc_debug_save_data(struct scsi_cmnd *cmnd) +{ + void *src, *dst; + struct scatterlist *sgde = scsi_sglist(cmnd); + + if (!_dump_buf_data) { + printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n", + __FUNCTION__); + return; + } + + + if(!sgde) { + printk(KERN_ERR "BLKGRD ERROR: data scatterlist is null\n"); + return; + } + + dst = (void *) _dump_buf_data; + while(sgde) { + src = sg_virt(sgde); + memcpy(dst, src, sgde->length); + dst += sgde->length; + sgde = sg_next(sgde); + } +} + +void +lpfc_debug_save_dif(struct scsi_cmnd *cmnd) +{ + void *src, *dst; + struct scatterlist *sgde = scsi_prot_sglist(cmnd); + + if (!_dump_buf_dif) { + printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n", + __FUNCTION__); + return; + } + + if(!sgde) { + printk(KERN_ERR "BLKGRD ERROR: data scatterlist is null\n"); + return; + } + + dst = (void *) _dump_buf_dif; + while(sgde) { + src = sg_virt(sgde); + memcpy(dst, src, sgde->length); + dst += sgde->length; + sgde = sg_next(sgde); + } +} +#endif +#endif +#endif /* * This function is called with no lock held when there is a resource @@ -111,6 +258,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor return; } +#ifndef ENABLE_BG_DBG_ONE_CMD void lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) { @@ -151,7 +299,14 @@ lpfc_ramp_down_queue_handler(struct lpfc atomic_set(&phba->num_rsrc_err, 0); atomic_set(&phba->num_cmd_success, 0); } +#else +void +lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) +{ +} +#endif +#ifndef ENABLE_BG_DBG_ONE_CMD void lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) { @@ -182,6 +337,12 @@ lpfc_ramp_up_queue_handler(struct lpfc_h atomic_set(&phba->num_rsrc_err, 0); atomic_set(&phba->num_cmd_success, 0); } +#else +void +lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) +{ +} +#endif /* * This routine allocates a scsi buffer, which contains all the necessary @@ -289,6 +450,9 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba if (lpfc_cmd) { lpfc_cmd->seg_cnt = 0; lpfc_cmd->nonsg_phys = 0; +#ifdef ENABLE_BG + lpfc_cmd->prot_seg_cnt = 0; +#endif } spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); return lpfc_cmd; @@ -341,7 +505,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { printk(KERN_ERR "%s: Too many sg segments from " "dma_map_sg. Config %d, seg_cnt %d", - __func__, phba->cfg_sg_seg_cnt, + __FUNCTION__, phba->cfg_sg_seg_cnt, lpfc_cmd->seg_cnt); scsi_dma_unmap(scsi_cmnd); return 1; @@ -380,8 +544,851 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * iocb_cmd->ulpBdeCount = 1; iocb_cmd->ulpLe = 1; fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); + + /* + * Due to difference in data length between DIF/non-DIF paths, + * we need to set word 4 of IOCB here + */ + iocb_cmd->un.fcpi.fcpi_parm = le32_to_cpu(scsi_bufflen(scsi_cmnd)); return 0; } + +#ifdef ENABLE_BG +/* + * Given a scsi cmnd, determine the BlockGuard profile to be used + * with the cmd + */ +static int +lpfc_sc_to_sli_prof(struct scsi_cmnd *sc) +{ + uint8_t guard_type = scsi_host_get_guard(sc->device->host); + uint8_t ret_prof = LPFC_PROF_INVALID; + + if (guard_type == SHOST_DIX_GUARD_IP) { + switch (scsi_get_prot_op(sc)) { + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_WRITE_STRIP: + ret_prof = LPFC_PROF_AST2; + break; + + case SCSI_PROT_READ_STRIP: + case SCSI_PROT_WRITE_INSERT: + ret_prof = LPFC_PROF_A1; + break; + + case SCSI_PROT_READ_CONVERT: + case SCSI_PROT_WRITE_CONVERT: + ret_prof = LPFC_PROF_AST1; + break; + + case SCSI_PROT_READ_PASS: + case SCSI_PROT_WRITE_PASS: + case SCSI_PROT_NORMAL: + default: + printk(KERN_ERR "Bad op/guard:%d/%d combination\n", + scsi_get_prot_op(sc), guard_type); + break; + + } + } else if (guard_type == SHOST_DIX_GUARD_CRC) { + switch (scsi_get_prot_op(sc)) { + case SCSI_PROT_READ_STRIP: + case SCSI_PROT_WRITE_INSERT: + ret_prof = LPFC_PROF_A1; + break; + + case SCSI_PROT_READ_PASS: + case SCSI_PROT_WRITE_PASS: + ret_prof = LPFC_PROF_C1; + break; + + case SCSI_PROT_READ_CONVERT: + case SCSI_PROT_WRITE_CONVERT: + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_WRITE_STRIP: + case SCSI_PROT_NORMAL: + default: + printk(KERN_ERR "Bad op/guard:%d/%d combination\n", + scsi_get_prot_op(sc), guard_type); + break; + } + } else { + /* unsupported format */ + BUG(); + } + + return ret_prof; +} + +/* returns LBA from commands with 32 bit LBA */ +static inline uint32_t +lpfc_scsi_10_lba(const uint8_t *cdb_p) +{ + uint32_t lba = 0; + + lba = ((((uint64_t)cdb_p[2]) << 24) | (((uint64_t)cdb_p[3]) << 16) | + (((uint64_t)cdb_p[4]) << 8) | (((uint64_t)cdb_p[5]))); + + return (lba); +} + +/* returns low-order 32 bits of LBA from commands with 64 bit LBA */ +static inline uint32_t +lpfc_scsi_16_lba(const uint8_t *cdb_p) +{ + uint64_t lba = 0; + uint32_t lower_lba; + + lba = ((((uint64_t)cdb_p[2]) << 56) | (((uint64_t)cdb_p[3]) << 48) | + (((uint64_t)cdb_p[4]) << 40) | (((uint64_t)cdb_p[5]) << 32) | + (((uint64_t)cdb_p[6]) << 24) | (((uint64_t)cdb_p[7]) << 16) | + (((uint64_t)cdb_p[8]) << 8) | (((uint64_t)cdb_p[9]))); + + lower_lba = ((uint32_t) (0xffffffff & (lba))); + + return (lower_lba); +} + +/* returns low-order 32 bits of LBA from command with arbitrary LBA size */ +static inline uint32_t +lpfc_get_lba_lower_bytes(struct scsi_cmnd *sc) +{ + uint32_t lba; + + switch (sc->cmnd[0]){ + case READ_10: + case READ_12: + case WRITE_10: + case WRITE_12: + lba = lpfc_scsi_10_lba(&sc->cmnd[0]); + break; + case READ_16: + case WRITE_16: + lba = lpfc_scsi_16_lba(&sc->cmnd[0]); + break; + default: + printk(KERN_ERR "Unable to decode cmd=0x%x\n", sc->cmnd[0]); + BUG(); + } + + return lba; +} + +struct scsi_dif_tuple { + __be16 guard_tag; /* Checksum */ + __be16 app_tag; /* Opaque storage */ + __be32 ref_tag; /* Target LBA or indirect LBA */ +}; + +static inline unsigned +lpfc_cmd_blksize(struct scsi_cmnd *sc) +{ + return sc->device->sector_size; +} + +/** + * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command + * @sc: in: SCSI command + * @apptagmask out: app tag mask + * @apptagval out: app tag value + * @reftag out: ref tag (reference tag) + * + * Description: + * Extract DIF paramters from the command if possible. Otherwise, + * use default paratmers. + * + **/ +static inline void +lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask, + uint16_t *apptagval, uint32_t *reftag) +{ + struct scsi_dif_tuple *spt; + unsigned char op = scsi_get_prot_op(sc); + unsigned int protcnt = scsi_prot_sg_count(sc); + static int cnt = 0; + + + if (protcnt && (op == SCSI_PROT_WRITE_STRIP || + op == SCSI_PROT_WRITE_PASS || + op == SCSI_PROT_WRITE_CONVERT)) { + + cnt++; + spt = page_address(sg_page(scsi_prot_sglist(sc))) + + scsi_prot_sglist(sc)[0].offset; + *apptagmask = 0xffff; + *apptagval = cpu_to_be32(spt->app_tag); + *reftag = cpu_to_be32(spt->ref_tag); + +#ifdef ENABLE_BG_DBG +#ifdef ENABLE_BG_DBG_DIF_ERR_INJECT + if (injerr_wgrd_cnt) { + int count=0; + int ppage_offset = 0; + struct scatterlist *curr = scsi_prot_sglist(sc); + spt = page_address(sg_page(curr)) + curr->offset; + + while (1) { + if (ppage_offset >= curr->length) { + curr = sg_next(curr); + if (!curr) + break; + + spt = page_address(sg_page(curr)) + + curr->offset; + + ppage_offset = 0; + } + + spt += ppage_offset; + ppage_offset += sizeof(struct scsi_dif_tuple); + count++; + } + printk(KERN_ERR "BLKGRD dbg: injecting error in write " + "checksum at block %d\n",count); + spt->guard_tag = 0xDEAD; + injerr_wgrd_cnt--; + } + + if (injerr_wapp_cnt) { + printk(KERN_ERR "BLKGRD dbg: injecting error in write apptag\n"); + *apptagval = 0xDEAD; + injerr_wapp_cnt--; + } + + if (injerr_wref_cnt) { + printk(KERN_ERR "BLKGRD dbg: injecting error in write reftag\n"); + *reftag = 0xDEADBEEF; + injerr_wref_cnt--; + } +#endif +#ifdef ENABLE_BG_DBG_WRITE_DIF_HEXDUMP + { + struct scatterlist *sgel = NULL; + uint32_t i, j; + unsigned int len; + struct scsi_dif_tuple *sdt; + int cnt; + + printk(KERN_ERR "BLKGRD dbg: WRITE DIF hexdump\n"); + scsi_for_each_prot_sg(sc, sgel, protcnt, i) { + len = sgel->length; + BUG_ON(len % 8); + sdt = sg_virt(sgel); + cnt = len / sizeof(*sdt); + for (j = 0; j < cnt; j++) { + lpfc_hexdump((unsigned char *)sdt, + sizeof(*sdt)); + sdt++; + } + } + } +#endif +#endif + } else { + *reftag = lpfc_get_lba_lower_bytes(sc); + *apptagmask = 0xffff; + *apptagval = 0; +#ifdef ENABLE_BG_DBG +#ifdef ENABLE_BG_DBG_DIF_ERR_INJECT + if (injerr_rapp_cnt) { + printk(KERN_ERR "BLKGRD dbg: injecting error in read apptag\n"); + *apptagval = 0xDEAD; + injerr_rapp_cnt--; + } + + if (injerr_rref_cnt) { + printk(KERN_ERR "BLKGRD dbg: injecting error in read reftag\n"); + *reftag = 0xDEADBEEF; + injerr_rref_cnt--; + } +#endif +#endif + } +} + +/* + * This function sets up buffer list for protection groups of + * type LPFC_PG_TYPE_NO_DIF + * + * This is usually used when the HBA is instructed to generate + * DIFs and insert them into data stream (or strip DIF from + * incoming data stream) + * + * The buffer list consists of just one protection group described + * below: + * +-------------------------+ + * start of prot group --> | PDE_1 | + * +-------------------------+ + * | Data BDE | + * +-------------------------+ + * |more Data BDE's ... (opt)| + * +-------------------------+ + * + * @sc: pointer to scsi command we're working on + * @bpl: pointer to buffer list for protection groups + * @datacnt: number of segments of data that have been dma mapped + * + * Note: Data s/g buffers have been dma mapped + */ +static int +lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, + struct ulp_bde64 *bpl, int datasegcnt) +{ + struct scatterlist *sgde = NULL; /* s/g data entry */ + struct lpfc_pde *pde1 = NULL; + dma_addr_t physaddr; + int i = 0, num_bde = 0; + int datadir = sc->sc_data_direction; + int prof = LPFC_PROF_INVALID; + unsigned blksize; + uint32_t reftag; + uint16_t apptagmask, apptagval; + + pde1 = (struct lpfc_pde *) bpl; + prof = lpfc_sc_to_sli_prof(sc); + + if (prof == LPFC_PROF_INVALID) + goto out; + + /* extract some info from the scsi command for PDE1*/ + blksize = lpfc_cmd_blksize(sc); + lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag); + + /* setup PDE1 with what we have */ + lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize, + BG_EC_STOP_ERR); + lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag); + + num_bde++; + bpl++; + + /* assumption: caller has already run dma_map_sg on command data */ + scsi_for_each_sg(sc, sgde, datasegcnt, i) { + physaddr = sg_dma_address(sgde); + bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); + bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); + bpl->tus.f.bdeSize = sg_dma_len(sgde); + if (datadir == DMA_TO_DEVICE) + bpl->tus.f.bdeFlags = 0; + else + bpl->tus.f.bdeFlags = BUFF_USE_RCV; + bpl->tus.w = le32_to_cpu(bpl->tus.w); + bpl++; + num_bde++; + } + +out: + return num_bde; +} + +/* + * This function sets up buffer list for protection groups of + * type LPFC_PG_TYPE_DIF_BUF + * + * This is usually used when DIFs are in their own buffers, + * separate from the data. The HBA can then by instructed + * to place the DIFs in the outgoing stream. For read operations, + * The HBA could extract the DIFs and place it in DIF buffers. + * + * The buffer list for this type consists of one or more of the + * protection groups described below: + * +-------------------------+ + * start of first prot group --> | PDE_1 | + * +-------------------------+ + * | PDE_3 (Prot BDE) | + * +-------------------------+ + * | Data BDE | + * +-------------------------+ + * |more Data BDE's ... (opt)| + * +-------------------------+ + * start of new prot group --> | PDE_1 | + * +-------------------------+ + * | ... | + * +-------------------------+ + * + * @sc: pointer to scsi command we're working on + * @bpl: pointer to buffer list for protection groups + * @datacnt: number of segments of data that have been dma mapped + * @protcnt: number of segment of protection data that have been dma mapped + * + * Note: It is assumed that both data and protection s/g buffers have been + * mapped for DMA + */ +static int +lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, + struct ulp_bde64 *bpl, int datacnt, int protcnt) +{ + struct scatterlist *sgde = NULL; /* s/g data entry */ + struct scatterlist *sgpe = NULL; /* s/g prot entry */ + struct lpfc_pde *pde1 = NULL; + struct ulp_bde64 *prot_bde= NULL; + dma_addr_t dataphysaddr, protphysaddr; + unsigned short curr_data = 0, curr_prot = 0; + unsigned int split_offset, protgroup_len; + unsigned int protgrp_blks, protgrp_bytes; + unsigned int remainder, subtotal; + int prof = LPFC_PROF_INVALID; + int datadir = sc->sc_data_direction; + unsigned char pgdone = 0, alldone = 0; + unsigned blksize; + uint32_t reftag; + uint16_t apptagmask, apptagval; + int num_bde = 0; +#ifdef ENABLE_BG_DBG +#ifdef ENABLE_BG_DBG_BPL_HEXDUMP + struct ulp_bde64 *orig_bpl = bpl; + int nrpg = 0; +#endif +#ifdef ENABLE_BG_DBG_NODEF + struct ulp_bde64 *orig_bpl2 = bpl; + unsigned int dlen = 0, plen = 0; +#endif +#endif + + sgpe = scsi_prot_sglist(sc); + sgde = scsi_sglist(sc); + + if(!sgpe || !sgde){ + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, + "Invalid s/g entry: data=0x%p prot=0x%p\n", + sgpe, sgde); + return 0; + } + + prof = lpfc_sc_to_sli_prof(sc); + if (prof == LPFC_PROF_INVALID) + goto out; + + /* extract some info from the scsi command for PDE1*/ + blksize = lpfc_cmd_blksize(sc); + lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag); + + split_offset = 0; + do { + /* setup the first PDE_1 */ + pde1 = (struct lpfc_pde *) bpl; + + lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize, + BG_EC_STOP_ERR); + lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag); + + num_bde++; + bpl++; +#ifdef ENABLE_BG_DBG +#ifdef ENABLE_BG_DBG_BPL_HEXDUMP + nrpg++; /* Up protection groups */ +#endif +#endif + + /* setup the first BDE that points to protection buffer */ + prot_bde = (struct ulp_bde64*) bpl; + protphysaddr = sg_dma_address(sgpe); + prot_bde->addrLow = le32_to_cpu(putPaddrLow(protphysaddr)); + prot_bde->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr)); + protgroup_len = sg_dma_len(sgpe); + +#ifdef ENABLE_BG_DBG +#ifdef ENABLE_BG_DBG_NODEF + plen += protgroup_len; +#endif +#endif + + /* must be integer multiple of the DIF block length */ + BUG_ON(protgroup_len % 8); + + protgrp_blks = protgroup_len / 8; + protgrp_bytes = protgrp_blks * blksize; + + prot_bde->tus.f.bdeSize = protgroup_len; + if (datadir == DMA_TO_DEVICE) + prot_bde->tus.f.bdeFlags = 0; + else + prot_bde->tus.f.bdeFlags = BUFF_USE_RCV; + prot_bde->tus.w = le32_to_cpu(bpl->tus.w); + + curr_prot++; + num_bde++; + + /* setup BDE's for data blocks associated with DIF data */ + pgdone = 0; + subtotal = 0; /* total bytes processed for current prot grp */ + while (!pgdone){ + bpl++; + dataphysaddr = sg_dma_address(sgde) + split_offset; + bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr)); + bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr)); + + remainder = sg_dma_len(sgde) - split_offset; + + if ((subtotal + remainder) <= protgrp_bytes){ + /* we can use this whole buffer */ + bpl->tus.f.bdeSize = remainder; + split_offset = 0; + + if ((subtotal + remainder) == protgrp_bytes) + pgdone = 1; + } else { + /* must split this buffer with next prot grp */ + printk(KERN_ERR "BLKGRD dbg: *** splitting " + "data buf with next " + "protection group\n"); + bpl->tus.f.bdeSize = protgrp_bytes - subtotal; + split_offset += bpl->tus.f.bdeSize; + } + + subtotal += bpl->tus.f.bdeSize; + + if (datadir == DMA_TO_DEVICE) + bpl->tus.f.bdeFlags = 0; + else + bpl->tus.f.bdeFlags = BUFF_USE_RCV; + bpl->tus.w = le32_to_cpu(bpl->tus.w); + + num_bde++; + curr_data++; + + if (split_offset) + break; + +#ifdef ENABLE_BG_DBG +#ifdef ENABLE_BG_DBG_NODEF + dlen += sg_dma_len(sgde); +#endif +#endif + /* Move to the next s/g segment if possible */ + if (curr_data < datacnt) { + sgde = sg_next(sgde); + } + + } + + /* are we done ? */ + if (curr_prot == protcnt) { + alldone = 1; + } else if (curr_prot < protcnt) { + /* advance to next prot buffer */ + sgpe = sg_next(sgpe); + bpl++; + + /* update the reference tag */ + reftag += protgrp_blks; + } else { + /* if we're here, we have a bug */ +#ifdef ENABLE_BG_DBG + printk(KERN_ERR "BLKGRD dbg: bug in %s\n",__FUNCTION__); +#else + BUG(); +#endif + } + + } while (!alldone); + +out: +#ifdef ENABLE_BG_DBG +#ifdef ENABLE_BG_DBG_NODEF + printk(KERN_ERR "BLKGRD dbg: data_len=%u prot_len=%u\n", dlen, plen); + lpfc_debug_hexdump ((unsigned char *) orig_bpl2, + sizeof (*orig_bpl2) * num_bde); + +#endif +#ifdef ENABLE_BG_DBG_BPL_HEXDUMP + printk(KERN_ERR "BLKGRD dbg: %s datacnt=%d protcnt=%d num_bde=%d\n", + __FUNCTION__,datacnt, protcnt, num_bde); + printk(KERN_ERR "BLKGRD dbg: from helpers " + "datacnt=%d protcnt=%d no pg=%d\n", + scsi_sg_count(sc), scsi_prot_sg_count(sc), nrpg); + lpfc_hexdump ((unsigned char *) orig_bpl, sizeof (*orig_bpl) * num_bde); +#endif +#endif + + return num_bde; +} +/* + * Given a SCSI command that supports DIF, determine composition of protection + * groups involved in setting up buffer lists + * + * Returns: + * for DIF (for both read and write) + * */ +static int +lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) +{ + int ret = LPFC_PG_TYPE_INVALID; + unsigned char op = scsi_get_prot_op(sc); + + switch (op){ + case SCSI_PROT_READ_STRIP: + case SCSI_PROT_WRITE_INSERT: + ret = LPFC_PG_TYPE_NO_DIF; + break; + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_WRITE_STRIP: + case SCSI_PROT_READ_PASS: + case SCSI_PROT_WRITE_PASS: + case SCSI_PROT_WRITE_CONVERT: + case SCSI_PROT_READ_CONVERT: + ret = LPFC_PG_TYPE_DIF_BUF; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, + "Unsupported protection op:%d\n", op); + break; + } + + return ret; +} + +/* + * This is the protection/DIF aware version of + * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the + * two functions eventually, but for now, it's here + */ +static int +lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, + struct lpfc_scsi_buf *lpfc_cmd) +{ + struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; + struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; + struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; + IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; + uint32_t num_bde = 0; + int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; + int prot_group_type = 0; + int diflen, fcpdl; + unsigned blksize; + + /* + * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd + * fcp_rsp regions to the first data bde entry + */ + bpl += 2; + if (scsi_sg_count(scsi_cmnd)) { + /* + * The driver stores the segment count returned from pci_map_sg + * because this a count of dma-mappings used to map the use_sg + * pages. They are not guaranteed to be the same for those + * architectures that implement an IOMMU. + */ + datasegcnt = dma_map_sg(&phba->pcidev->dev, + scsi_sglist(scsi_cmnd), + scsi_sg_count(scsi_cmnd), datadir); + if (unlikely(!datasegcnt)) + return 1; + + lpfc_cmd->seg_cnt = datasegcnt; + if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { + printk(KERN_ERR "%s: Too many sg segments from " + "dma_map_sg. Config %d, seg_cnt %d", + __FUNCTION__, phba->cfg_sg_seg_cnt, + lpfc_cmd->seg_cnt); + scsi_dma_unmap(scsi_cmnd); + return 1; + } + + prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); + + switch (prot_group_type){ + case LPFC_PG_TYPE_NO_DIF: + num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, + datasegcnt); + /* we shoud have 2 or more entries in buffer list */ + if (num_bde < 2) + goto err; + break; + case LPFC_PG_TYPE_DIF_BUF:{ + /* + * This type indicates that protection buffers are + * passed to the driver, so that needs to be prepared + * for DMA + */ + protsegcnt = dma_map_sg(&phba->pcidev->dev, + scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), datadir); + if (unlikely(!protsegcnt)) { + scsi_dma_unmap(scsi_cmnd); + return 1; + } + + lpfc_cmd->prot_seg_cnt = protsegcnt; + if (lpfc_cmd->prot_seg_cnt > phba->cfg_prot_sg_seg_cnt){ + printk(KERN_ERR "%s: Too many prot sg segments " + "from dma_map_sg. Config %d," + "prot_seg_cnt %d", + __FUNCTION__, + phba->cfg_prot_sg_seg_cnt, + lpfc_cmd->prot_seg_cnt); + dma_unmap_sg(&phba->pcidev->dev, + scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), + datadir); + scsi_dma_unmap(scsi_cmnd); + return 1; + } + + num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, + datasegcnt, protsegcnt); + /* we shoud have 3 or more entries in buffer list */ + if (num_bde < 3) + goto err; + break; + } + case LPFC_PG_TYPE_INVALID: + default: + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, + "Unexpected protection group %i\n", + prot_group_type); + return 1; + } + } + + /* + * Finish initializing those IOCB fields that are dependent on the + * scsi_cmnd request_buffer. Note that the bdeSize is explicitly + * reinitialized since all iocb memory resources are used many times + * for transmit, receive, and continuation bpl's. + */ + iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); + iocb_cmd->un.fcpi64.bdl.bdeSize += + (num_bde * sizeof (struct ulp_bde64)); + iocb_cmd->ulpBdeCount = 1; + iocb_cmd->ulpLe = 1; + + fcpdl = scsi_bufflen(scsi_cmnd); + + if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1){ + /* + * We are in DIF Type 1 mode + * Every data block has a 8 byte DIF (trailer) + * attached to it. Must ajust FCP data length + */ + blksize = lpfc_cmd_blksize(scsi_cmnd); + diflen = (fcpdl / blksize) * 8; + fcpdl += diflen; + } +#ifdef ENABLE_BG_DBG +#ifdef ENABLE_BG_DBG_CMD_FCPDL + printk(KERN_ERR "BLKGRD dbg: fcpdl=%u\n", fcpdl); +#endif +#endif + fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); + + /* + * Due to difference in data length between DIF/non-DIF paths, + * we need to set word 4 of IOCB here + */ + iocb_cmd->un.fcpi.fcpi_parm = fcpdl; + + return 0; +err: + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, + "Could not setup all needed BDE's" + "prot_group_type=%d, num_bde=%d\n", + prot_group_type, num_bde); + return 1; +} + +/* + * This function checks for BlockGuard errors detected by + * the HBA. In case of errors, the ASC/ASCQ fields in the + * sense buffer will be set correctly, paired with + * ILLEGAL_REQUEST to signal to the kernel that the HBA + * detected corruption. + * + * Returns: + * 0 - No error found + * 1 - BlockGuard error found + * -1 - Internal error (bad profile, ...etc) + */ +static int +lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, + struct lpfc_iocbq *pIocbOut) +{ + struct scsi_cmnd *cmd = lpfc_cmd->pCmd; + struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg; + int ret = 0; + + printk(KERN_ERR "%s: ERROR in DIF\n",__FUNCTION__); + printk(KERN_ERR "Remember: I/O @ sector %u, count %u cmd=%p\n", + (cmd->cmnd[2] << 24 | cmd->cmnd[3] << 16 | + cmd->cmnd[4] << 8 | cmd->cmnd[5]), + (cmd->cmnd[7] << 8 | cmd->cmnd[8]), + cmd); + printk(KERN_ERR "BLKGRD dbg: CDB: %02x %02x %02x %02x %02x " + "%02x %02x %02x %02x %02x \n", + cmd->cmnd[0],cmd->cmnd[1],cmd->cmnd[2], + cmd->cmnd[3],cmd->cmnd[4],cmd->cmnd[5], + cmd->cmnd[6],cmd->cmnd[7],cmd->cmnd[8], + cmd->cmnd[9]); + + lpfc_hexdump((unsigned char *)bgf, sizeof(*bgf)); + +#ifdef ENABLE_BG_DBG +#ifdef ENABLE_BG_DBG_DUMP_DIF_DATA_ON_ERR + spin_lock_irq(&_dump_buf_lock); + if (!_dump_buf_done) { + printk(KERN_ERR "Saving Data/DIF for %u blocks to debugfs\n", + (cmd->cmnd[7] << 8 | cmd->cmnd[8])); + lpfc_debug_save_data(cmd); + lpfc_debug_save_dif(cmd); + _dump_buf_done = 1; + } + spin_unlock_irq(&_dump_buf_lock); +#endif +#endif + + if(lpfc_bgs_get_invalid_prof(bgf->bgstat)) { + printk(KERN_ERR "Invalid BlockGuard profile. bgstat:0x%x\n", + bgf->bgstat); + ret = (-1); + goto out; + } + + if (lpfc_bgs_get_uninit_dif_block(bgf->bgstat)) { + printk(KERN_ERR "Invalid BlockGuard DIF Block. bgstat:0x%x\n", + bgf->bgstat); + ret = (-1); + goto out; + } + + if (lpfc_bgs_get_guard_err(bgf->bgstat)) { + ret = 1; + + scsi_build_sense_buffer(0, cmd->sense_buffer, ILLEGAL_REQUEST, + 0x10, 0x1); + cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24 + | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); + phba->bg_guard_err_cnt++; + printk(KERN_ERR "BLKGRD dbg: guard_tag error\n"); + } + + if (lpfc_bgs_get_reftag_err(bgf->bgstat)) { + ret = 1; + + scsi_build_sense_buffer(0, cmd->sense_buffer, ILLEGAL_REQUEST, + 0x10, 0x3); + cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24 + | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); + + phba->bg_reftag_err_cnt++; + printk(KERN_ERR "BLKGRD dbg: ref_tag error\n"); + } + + if (lpfc_bgs_get_apptag_err(bgf->bgstat)) { + ret = 1; + + scsi_build_sense_buffer(0, cmd->sense_buffer, ILLEGAL_REQUEST, + 0x10, 0x2); + cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24 + | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); + + phba->bg_apptag_err_cnt++; + printk(KERN_ERR "BLKGRD dbg: app_tag error\n"); + } + +out: + return ret; +} +#endif static void lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) @@ -394,6 +1401,12 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba */ if (psb->seg_cnt > 0) scsi_dma_unmap(psb->pCmd); +#ifdef ENABLE_BG + if (psb->prot_seg_cnt > 0) + dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd), + scsi_prot_sg_count(psb->pCmd), + psb->pCmd->sc_data_direction); +#endif } static void @@ -573,12 +1586,35 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba cmd->result = ScsiResult(DID_BUS_BUSY, 0); break; case IOSTAT_LOCAL_REJECT: + +#ifdef ENABLE_BG +#ifdef ENABLE_BG_DBG + /* dump IOCB response*/ + lpfc_hexdump((unsigned char *) &pIocbOut->iocb, + sizeof(pIocbOut->iocb)); +#endif +#endif + + if (lpfc_cmd->result == RJT_UNAVAIL_PERM || lpfc_cmd->result == IOERR_NO_RESOURCES || lpfc_cmd->result == RJT_LOGIN_REQUIRED) { cmd->result = ScsiResult(DID_REQUEUE, 0); break; - } /* else: fall through */ + } + +#ifdef ENABLE_BG + if (pIocbOut->iocb.unsli3.sli3_bg.bgstat) { + /* + * This is a response for a blockguard-enabled + * operation. Check for BlockGuard error + */ + lpfc_parse_bg_err(phba, lpfc_cmd, pIocbOut); + break; + } +#endif + + /* else: fall through */ default: cmd->result = ScsiResult(DID_ERROR, 0); break; @@ -605,6 +1641,12 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba result = cmd->result; sdev = cmd->device; lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); +#ifdef ENABLE_BG_DBG +#ifdef ENABLE_BG_DBG_CMD_DONE + printk(KERN_ERR "BLKGRD calling done() for cmd %02x iotag=0x%x\n", + cmd->cmnd[0], pIocbOut->iocb.ulpIoTag); +#endif +#endif cmd->scsi_done(cmd); if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { @@ -625,6 +1667,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba if (!result) lpfc_rampup_queue_depth(vport, sdev); +#ifndef ENABLE_BG_DBG_ONE_CMD if (!result && pnode && NLP_CHK_NODE_ACT(pnode) && ((jiffies - pnode->last_ramp_up_time) > LPFC_Q_RAMP_UP_INTERVAL * HZ) && @@ -648,6 +1691,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba } } } +#endif /* * Check for queue full. If the lun is reporting queue full, then @@ -747,7 +1791,6 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *v } else { iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; iocb_cmd->ulpPU = PARM_READ_CHECK; - iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); fcp_cmnd->fcpCntl3 = READ_DATA; phba->fc4InputRequests++; } @@ -962,6 +2005,19 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd goto out_fail_command; } +#ifdef ENABLE_BG + if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && + scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) + { + printk(KERN_ERR "BLKGRD ERROR: rcvd protected cmd:%02x op:%02x " + "str=%s without registering for BlockGuard - " + "Rejecting command\n", + cmnd->cmnd[0],scsi_get_prot_op(cmnd), + dif_op_str[scsi_get_prot_op(cmnd)]); + goto out_fail_command; + } +#endif + /* * Catch race where our node has transitioned, but the * transport is still transitioning. @@ -990,7 +2046,69 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd cmnd->host_scribble = (unsigned char *)lpfc_cmd; cmnd->scsi_done = done; +#ifdef ENABLE_BG + if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { + +#ifdef ENABLE_BG_DBG +#ifdef ENABLE_BG_DBG_TRACE_CMD + lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, + "BLKGRD dbg: rcvd protected cmd:%02x op:%02x " + "str=%s\n", + cmnd->cmnd[0],scsi_get_prot_op(cmnd), + dif_op_str[scsi_get_prot_op(cmnd)]); + lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, + "BLKGRD dbg: CDB: %02x %02x %02x %02x %02x " + "%02x %02x %02x %02x %02x \n", + cmnd->cmnd[0],cmnd->cmnd[1],cmnd->cmnd[2], + cmnd->cmnd[3],cmnd->cmnd[4],cmnd->cmnd[5], + cmnd->cmnd[6],cmnd->cmnd[7],cmnd->cmnd[8], + cmnd->cmnd[9]); + if (cmnd->cmnd[0] == READ_10) + lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, + "BLKGRD dbg: READ @ sector %u, " + "count %u\n", + (cmnd->cmnd[2] << 24 | + cmnd->cmnd[3] << 16 | + cmnd->cmnd[4] << 8 | cmnd->cmnd[5]), + (cmnd->cmnd[7] << 8 | cmnd->cmnd[8])); + else if (cmnd->cmnd[0] == WRITE_10) + lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, + "BLKGRD dbg: WRITE @ sector %u, " + "count %u cmd=%p\n", + (cmnd->cmnd[2] << 24 | + cmnd->cmnd[3] << 16 | + cmnd->cmnd[4] << 8 | cmnd->cmnd[5]), + (cmnd->cmnd[7] << 8 | cmnd->cmnd[8]), + cmnd); + else + lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, + "BLKGRD dbg: parser not implemented\n"); +#endif +#endif + err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); + } else { +#ifdef ENABLE_BG_DBG +#ifdef ENABLE_BG_DBG_TRACE_CMD + lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, + "BLKGRD dbg: rcvd unprotected cmd:%02x op:%02x " + "str=%s\n", + cmnd->cmnd[0],scsi_get_prot_op(cmnd), + dif_op_str[scsi_get_prot_op(cmnd)]); + lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, + "BLKGRD dbg: CDB: %02x %02x %02x %02x %02x " + "%02x %02x %02x %02x %02x \n", + cmnd->cmnd[0],cmnd->cmnd[1],cmnd->cmnd[2], + cmnd->cmnd[3],cmnd->cmnd[4],cmnd->cmnd[5], + cmnd->cmnd[6],cmnd->cmnd[7],cmnd->cmnd[8], + cmnd->cmnd[9]); +#endif +#endif + err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); + } +#else err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); +#endif + if (err) goto out_host_busy_free_buf; @@ -1423,8 +2541,13 @@ struct scsi_host_template lpfc_template .scan_finished = lpfc_scan_finished, .this_id = -1, .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, +#ifdef ENABLE_BG + .cmd_per_lun = 1, + .use_clustering = DISABLE_CLUSTERING, +#else .cmd_per_lun = LPFC_CMD_PER_LUN, .use_clustering = ENABLE_CLUSTERING, +#endif .shost_attrs = lpfc_hba_attrs, .max_sectors = 0xFFFF, }; @@ -1444,7 +2567,11 @@ struct scsi_host_template lpfc_vport_tem .this_id = -1, .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, .cmd_per_lun = LPFC_CMD_PER_LUN, +#ifdef ENABLE_BG + .use_clustering = DISABLE_CLUSTERING, +#else .use_clustering = ENABLE_CLUSTERING, +#endif .shost_attrs = lpfc_vport_attrs, .max_sectors = 0xFFFF, }; diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -120,6 +120,10 @@ struct lpfc_scsi_buf { uint32_t seg_cnt; /* Number of scatter-gather segments returned by * dma_map_sg. The driver needs this for calls * to dma_unmap_sg. */ +#ifdef ENABLE_BG + uint32_t prot_seg_cnt; /* seg_cnt's counterpart for protection data */ +#endif + dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */ /* diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -219,7 +219,7 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd case CMD_IOCB_LOGENTRY_CN: case CMD_IOCB_LOGENTRY_ASYNC_CN: printk("%s - Unhandled SLI-3 Command x%x\n", - __func__, iocb_cmnd); + __FUNCTION__, iocb_cmnd); type = LPFC_UNKNOWN_IOCB; break; default: @@ -407,6 +407,14 @@ lpfc_sli_submit_iocb(struct lpfc_hba *ph * Set up an iotag */ nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; + +#ifdef ENABLE_BG +#ifdef ENABLE_BG_DBG +#ifdef ENABLE_BG_DBG_IOTAG + printk(KERN_ERR "BLKGRD dbg: iotag=0x%x\n",nextiocb->iocb.ulpIoTag); +#endif +#endif +#endif if (pring->ringno == LPFC_ELS_RING) { lpfc_debugfs_slow_ring_trc(phba, @@ -1715,7 +1723,7 @@ lpfc_sli_handle_slow_ring_event(struct l rspiocbp = __lpfc_sli_get_iocbq(phba); if (rspiocbp == NULL) { printk(KERN_ERR "%s: out of buffers! Failing " - "completion.\n", __func__); + "completion.\n", __FUNCTION__); break; } @@ -2482,6 +2490,21 @@ lpfc_do_config_port(struct lpfc_hba *phb pmb->mb.un.varCfgPort.gmv) != 0 ? pmb->mb.un.varCfgPort.max_vpi : 0; + +#ifdef ENABLE_BG + if (phba->cfg_enable_bg) { + if (pmb->mb.un.varCfgPort.gbg) { + phba->sli3_options |= + LPFC_SLI3_BG_ENABLED; + } else { + lpfc_printf_log(phba, KERN_ERR, + LOG_INIT, + "Adapter did not grant " + "BlockGuard\n"); + } + } +#endif + } } @@ -3793,7 +3816,7 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_i break; default: printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", - __func__, ctx_cmd); + __FUNCTION__, ctx_cmd); break; }