summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/viodasd.c8
-rw-r--r--drivers/cdrom/viocd.c6
-rw-r--r--drivers/char/nvram.c110
-rw-r--r--drivers/char/viocons.c10
-rw-r--r--drivers/char/viotape.c10
-rw-r--r--drivers/infiniband/Kconfig2
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/core/agent.c3
-rw-r--r--drivers/infiniband/core/cm.c6
-rw-r--r--drivers/infiniband/core/device.c10
-rw-r--r--drivers/infiniband/core/mad.c31
-rw-r--r--drivers/infiniband/core/sysfs.c6
-rw-r--r--drivers/infiniband/core/ucm.c9
-rw-r--r--drivers/infiniband/core/user_mad.c80
-rw-r--r--drivers/infiniband/core/uverbs.h1
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c1
-rw-r--r--drivers/infiniband/core/uverbs_main.c13
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c31
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_profile.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c7
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c24
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c4
-rw-r--r--drivers/infiniband/ulp/srp/Kbuild1
-rw-r--r--drivers/infiniband/ulp/srp/Kconfig11
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c1700
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h150
-rw-r--r--drivers/macintosh/via-pmu.c29
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/cs89x0.c14
-rw-r--r--drivers/net/cs89x0.h2
-rw-r--r--drivers/net/iseries_veth.c8
-rw-r--r--drivers/scsi/ahci.c31
-rw-r--r--drivers/scsi/ibmvscsi/iseries_vscsi.c8
-rw-r--r--drivers/scsi/libata-core.c129
-rw-r--r--drivers/scsi/libata-scsi.c14
-rw-r--r--drivers/scsi/pdc_adma.c8
-rw-r--r--drivers/scsi/sata_mv.c48
-rw-r--r--drivers/scsi/sata_qstor.c8
-rw-r--r--drivers/scsi/sata_sil24.c48
-rw-r--r--drivers/scsi/sata_sx4.c13
-rw-r--r--drivers/serial/8250_early.c2
-rw-r--r--drivers/serial/clps711x.c9
-rw-r--r--drivers/usb/host/pci-quirks.c2
-rw-r--r--drivers/video/nvidia/nvidia.c8
53 files changed, 2310 insertions, 365 deletions
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index 709f809f79f1..2d518aa2720a 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -45,10 +45,10 @@
#include <asm/uaccess.h>
#include <asm/vio.h>
-#include <asm/iSeries/HvTypes.h>
-#include <asm/iSeries/HvLpEvent.h>
-#include <asm/iSeries/HvLpConfig.h>
-#include <asm/iSeries/vio.h>
+#include <asm/iseries/hv_types.h>
+#include <asm/iseries/hv_lp_event.h>
+#include <asm/iseries/hv_lp_config.h>
+#include <asm/iseries/vio.h>
MODULE_DESCRIPTION("iSeries Virtual DASD");
MODULE_AUTHOR("Dave Boutcher");
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 36f31d202223..b5191780ecca 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -46,9 +46,9 @@
#include <asm/vio.h>
#include <asm/scatterlist.h>
-#include <asm/iSeries/HvTypes.h>
-#include <asm/iSeries/HvLpEvent.h>
-#include <asm/iSeries/vio.h>
+#include <asm/iseries/hv_types.h>
+#include <asm/iseries/hv_lp_event.h>
+#include <asm/iseries/vio.h>
#define VIOCD_DEVICE "iseries/vcd"
#define VIOCD_DEVICE_DEVFS "iseries/vcd"
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 9e24bbd4090c..1af733d07321 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -32,11 +32,9 @@
* added changelog
* 1.2 Erik Gilling: Cobalt Networks support
* Tim Hockin: general cleanup, Cobalt support
- * 1.3 Jon Ringle: Comdial MP1000 support
- *
*/
-#define NVRAM_VERSION "1.3"
+#define NVRAM_VERSION "1.2"
#include <linux/module.h>
#include <linux/config.h>
@@ -47,7 +45,6 @@
#define PC 1
#define ATARI 2
#define COBALT 3
-#define MP1000 4
/* select machine configuration */
#if defined(CONFIG_ATARI)
@@ -57,9 +54,6 @@
# if defined(CONFIG_COBALT)
# include <linux/cobalt-nvram.h>
# define MACH COBALT
-# elif defined(CONFIG_MACH_MP1000)
-# undef MACH
-# define MACH MP1000
# else
# define MACH PC
# endif
@@ -118,23 +112,6 @@
#endif
-#if MACH == MP1000
-
-/* RTC in a MP1000 */
-#define CHECK_DRIVER_INIT() 1
-
-#define MP1000_CKS_RANGE_START 0
-#define MP1000_CKS_RANGE_END 111
-#define MP1000_CKS_LOC 112
-
-#define NVRAM_BYTES (128-NVRAM_FIRST_BYTE)
-
-#define mach_check_checksum mp1000_check_checksum
-#define mach_set_checksum mp1000_set_checksum
-#define mach_proc_infos mp1000_proc_infos
-
-#endif
-
/* Note that *all* calls to CMOS_READ and CMOS_WRITE must be done with
* rtc_lock held. Due to the index-port/data-port design of the RTC, we
* don't want two different things trying to get to it at once. (e.g. the
@@ -938,91 +915,6 @@ atari_proc_infos(unsigned char *nvram, char *buffer, int *len,
#endif /* MACH == ATARI */
-#if MACH == MP1000
-
-static int
-mp1000_check_checksum(void)
-{
- int i;
- unsigned short sum = 0;
- unsigned short expect;
-
- for (i = MP1000_CKS_RANGE_START; i <= MP1000_CKS_RANGE_END; ++i)
- sum += __nvram_read_byte(i);
-
- expect = __nvram_read_byte(MP1000_CKS_LOC+1)<<8 |
- __nvram_read_byte(MP1000_CKS_LOC);
- return ((sum & 0xffff) == expect);
-}
-
-static void
-mp1000_set_checksum(void)
-{
- int i;
- unsigned short sum = 0;
-
- for (i = MP1000_CKS_RANGE_START; i <= MP1000_CKS_RANGE_END; ++i)
- sum += __nvram_read_byte(i);
- __nvram_write_byte(sum >> 8, MP1000_CKS_LOC + 1);
- __nvram_write_byte(sum & 0xff, MP1000_CKS_LOC);
-}
-
-#ifdef CONFIG_PROC_FS
-
-#define SERVER_N_LEN 32
-#define PATH_N_LEN 32
-#define FILE_N_LEN 32
-#define NVRAM_MAGIC_SIG 0xdead
-
-typedef struct NvRamImage
-{
- unsigned short int magic;
- unsigned short int mode;
- char fname[FILE_N_LEN];
- char path[PATH_N_LEN];
- char server[SERVER_N_LEN];
- char pad[12];
-} NvRam;
-
-static int
-mp1000_proc_infos(unsigned char *nvram, char *buffer, int *len,
- off_t *begin, off_t offset, int size)
-{
- int checksum;
- NvRam* nv = (NvRam*)nvram;
-
- spin_lock_irq(&rtc_lock);
- checksum = __nvram_check_checksum();
- spin_unlock_irq(&rtc_lock);
-
- PRINT_PROC("Checksum status: %svalid\n", checksum ? "" : "not ");
-
- switch( nv->mode )
- {
- case 0 :
- PRINT_PROC( "\tMode 0, tftp prompt\n" );
- break;
- case 1 :
- PRINT_PROC( "\tMode 1, booting from disk\n" );
- break;
- case 2 :
- PRINT_PROC( "\tMode 2, Alternate boot from disk /boot/%s\n", nv->fname );
- break;
- case 3 :
- PRINT_PROC( "\tMode 3, Booting from net:\n" );
- PRINT_PROC( "\t\t%s:%s%s\n",nv->server, nv->path, nv->fname );
- break;
- default:
- PRINT_PROC( "\tInconsistant nvram?\n" );
- break;
- }
-
- return 1;
-}
-#endif
-
-#endif /* MACH == MP1000 */
-
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(__nvram_read_byte);
diff --git a/drivers/char/viocons.c b/drivers/char/viocons.c
index 44f5fb4a46ef..98601c7d04a9 100644
--- a/drivers/char/viocons.c
+++ b/drivers/char/viocons.c
@@ -44,12 +44,12 @@
#include <linux/tty_flip.h>
#include <linux/sysrq.h>
-#include <asm/iSeries/vio.h>
+#include <asm/iseries/vio.h>
-#include <asm/iSeries/HvLpEvent.h>
-#include <asm/iSeries/HvCallEvent.h>
-#include <asm/iSeries/HvLpConfig.h>
-#include <asm/iSeries/HvCall.h>
+#include <asm/iseries/hv_lp_event.h>
+#include <asm/iseries/hv_call_event.h>
+#include <asm/iseries/hv_lp_config.h>
+#include <asm/iseries/hv_call.h>
#ifdef CONFIG_VT
#error You must turn off CONFIG_VT to use CONFIG_VIOCONS
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c
index 51abd3defc1c..867cc4e418c7 100644
--- a/drivers/char/viotape.c
+++ b/drivers/char/viotape.c
@@ -29,7 +29,7 @@
*
* All tape operations are performed by sending messages back and forth to
* the OS/400 partition. The format of the messages is defined in
- * iSeries/vio.h
+ * iseries/vio.h
*/
#include <linux/config.h>
#include <linux/version.h>
@@ -54,10 +54,10 @@
#include <asm/ioctls.h>
#include <asm/vio.h>
-#include <asm/iSeries/vio.h>
-#include <asm/iSeries/HvLpEvent.h>
-#include <asm/iSeries/HvCallEvent.h>
-#include <asm/iSeries/HvLpConfig.h>
+#include <asm/iseries/vio.h>
+#include <asm/iseries/hv_lp_event.h>
+#include <asm/iseries/hv_call_event.h>
+#include <asm/iseries/hv_lp_config.h>
#define VIOTAPE_VERSION "1.2"
#define VIOTAPE_MAXREQ 1
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 325d502e25cd..bdf0891a92dd 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -33,4 +33,6 @@ source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/ulp/ipoib/Kconfig"
+source "drivers/infiniband/ulp/srp/Kconfig"
+
endmenu
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index d256cf798218..a43fb34cca94 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_INFINIBAND) += core/
obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/
obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
+obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index 0c3c6952faae..7545775d38ef 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -155,13 +155,12 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
int ret;
/* Create new device info */
- port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL);
+ port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
if (!port_priv) {
printk(KERN_ERR SPFX "No memory for ib_agent_port_private\n");
ret = -ENOMEM;
goto error1;
}
- memset(port_priv, 0, sizeof *port_priv);
/* Obtain send only MAD agent for SMI QP */
port_priv->agent[0] = ib_register_mad_agent(device, port_num,
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 580c3a2bb102..02110e00d145 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -544,11 +544,10 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
struct cm_id_private *cm_id_priv;
int ret;
- cm_id_priv = kmalloc(sizeof *cm_id_priv, GFP_KERNEL);
+ cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
if (!cm_id_priv)
return ERR_PTR(-ENOMEM);
- memset(cm_id_priv, 0, sizeof *cm_id_priv);
cm_id_priv->id.state = IB_CM_IDLE;
cm_id_priv->id.device = device;
cm_id_priv->id.cm_handler = cm_handler;
@@ -621,10 +620,9 @@ static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
{
struct cm_timewait_info *timewait_info;
- timewait_info = kmalloc(sizeof *timewait_info, GFP_KERNEL);
+ timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
if (!timewait_info)
return ERR_PTR(-ENOMEM);
- memset(timewait_info, 0, sizeof *timewait_info);
timewait_info->work.local_id = local_id;
INIT_WORK(&timewait_info->work.work, cm_work_handler,
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 5a6e44976405..e169e798354b 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -161,17 +161,9 @@ static int alloc_name(char *name)
*/
struct ib_device *ib_alloc_device(size_t size)
{
- void *dev;
-
BUG_ON(size < sizeof (struct ib_device));
- dev = kmalloc(size, GFP_KERNEL);
- if (!dev)
- return NULL;
-
- memset(dev, 0, size);
-
- return dev;
+ return kzalloc(size, GFP_KERNEL);
}
EXPORT_SYMBOL(ib_alloc_device);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 88f9f8c9eacc..3d8175e5f054 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -255,12 +255,11 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
}
/* Allocate structures */
- mad_agent_priv = kmalloc(sizeof *mad_agent_priv, GFP_KERNEL);
+ mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
if (!mad_agent_priv) {
ret = ERR_PTR(-ENOMEM);
goto error1;
}
- memset(mad_agent_priv, 0, sizeof *mad_agent_priv);
mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
IB_ACCESS_LOCAL_WRITE);
@@ -448,14 +447,13 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
goto error1;
}
/* Allocate structures */
- mad_snoop_priv = kmalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
+ mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
if (!mad_snoop_priv) {
ret = ERR_PTR(-ENOMEM);
goto error1;
}
/* Now, fill in the various structures */
- memset(mad_snoop_priv, 0, sizeof *mad_snoop_priv);
mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
mad_snoop_priv->agent.device = device;
mad_snoop_priv->agent.recv_handler = recv_handler;
@@ -794,10 +792,9 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
(!rmpp_active && buf_size > sizeof(struct ib_mad)))
return ERR_PTR(-EINVAL);
- buf = kmalloc(sizeof *mad_send_wr + buf_size, gfp_mask);
+ buf = kzalloc(sizeof *mad_send_wr + buf_size, gfp_mask);
if (!buf)
return ERR_PTR(-ENOMEM);
- memset(buf, 0, sizeof *mad_send_wr + buf_size);
mad_send_wr = buf + buf_size;
mad_send_wr->send_buf.mad = buf;
@@ -1039,14 +1036,12 @@ static int method_in_use(struct ib_mad_mgmt_method_table **method,
static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
{
/* Allocate management method table */
- *method = kmalloc(sizeof **method, GFP_ATOMIC);
+ *method = kzalloc(sizeof **method, GFP_ATOMIC);
if (!*method) {
printk(KERN_ERR PFX "No memory for "
"ib_mad_mgmt_method_table\n");
return -ENOMEM;
}
- /* Clear management method table */
- memset(*method, 0, sizeof **method);
return 0;
}
@@ -1137,15 +1132,14 @@ static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
if (!*class) {
/* Allocate management class table for "new" class version */
- *class = kmalloc(sizeof **class, GFP_ATOMIC);
+ *class = kzalloc(sizeof **class, GFP_ATOMIC);
if (!*class) {
printk(KERN_ERR PFX "No memory for "
"ib_mad_mgmt_class_table\n");
ret = -ENOMEM;
goto error1;
}
- /* Clear management class table */
- memset(*class, 0, sizeof(**class));
+
/* Allocate method table for this management class */
method = &(*class)->method_table[mgmt_class];
if ((ret = allocate_method_table(method)))
@@ -1209,25 +1203,24 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
mad_reg_req->mgmt_class_version].vendor;
if (!*vendor_table) {
/* Allocate mgmt vendor class table for "new" class version */
- vendor = kmalloc(sizeof *vendor, GFP_ATOMIC);
+ vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
if (!vendor) {
printk(KERN_ERR PFX "No memory for "
"ib_mad_mgmt_vendor_class_table\n");
goto error1;
}
- /* Clear management vendor class table */
- memset(vendor, 0, sizeof(*vendor));
+
*vendor_table = vendor;
}
if (!(*vendor_table)->vendor_class[vclass]) {
/* Allocate table for this management vendor class */
- vendor_class = kmalloc(sizeof *vendor_class, GFP_ATOMIC);
+ vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
if (!vendor_class) {
printk(KERN_ERR PFX "No memory for "
"ib_mad_mgmt_vendor_class\n");
goto error2;
}
- memset(vendor_class, 0, sizeof(*vendor_class));
+
(*vendor_table)->vendor_class[vclass] = vendor_class;
}
for (i = 0; i < MAX_MGMT_OUI; i++) {
@@ -2524,12 +2517,12 @@ static int ib_mad_port_open(struct ib_device *device,
char name[sizeof "ib_mad123"];
/* Create new device info */
- port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL);
+ port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
if (!port_priv) {
printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
return -ENOMEM;
}
- memset(port_priv, 0, sizeof *port_priv);
+
port_priv->device = device;
port_priv->port_num = port_num;
spin_lock_init(&port_priv->reg_lock);
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 7ce7a6c782fa..b8120650e711 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -307,14 +307,13 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
if (!p->ibdev->process_mad)
return sprintf(buf, "N/A (no PMA)\n");
- in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
+ in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
if (!in_mad || !out_mad) {
ret = -ENOMEM;
goto out;
}
- memset(in_mad, 0, sizeof *in_mad);
in_mad->mad_hdr.base_version = 1;
in_mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_PERF_MGMT;
in_mad->mad_hdr.class_version = 1;
@@ -508,10 +507,9 @@ static int add_port(struct ib_device *device, int port_num)
if (ret)
return ret;
- p = kmalloc(sizeof *p, GFP_KERNEL);
+ p = kzalloc(sizeof *p, GFP_KERNEL);
if (!p)
return -ENOMEM;
- memset(p, 0, sizeof *p);
p->ibdev = device;
p->port_num = port_num;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 28477565ecba..6e15787d1de1 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -172,11 +172,10 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
struct ib_ucm_context *ctx;
int result;
- ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
if (!ctx)
return NULL;
- memset(ctx, 0, sizeof *ctx);
atomic_set(&ctx->ref, 1);
init_waitqueue_head(&ctx->wait);
ctx->file = file;
@@ -386,11 +385,10 @@ static int ib_ucm_event_handler(struct ib_cm_id *cm_id,
ctx = cm_id->context;
- uevent = kmalloc(sizeof(*uevent), GFP_KERNEL);
+ uevent = kzalloc(sizeof *uevent, GFP_KERNEL);
if (!uevent)
goto err1;
- memset(uevent, 0, sizeof(*uevent));
uevent->ctx = ctx;
uevent->cm_id = cm_id;
uevent->resp.uid = ctx->uid;
@@ -1345,11 +1343,10 @@ static void ib_ucm_add_one(struct ib_device *device)
if (!device->alloc_ucontext)
return;
- ucm_dev = kmalloc(sizeof *ucm_dev, GFP_KERNEL);
+ ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL);
if (!ucm_dev)
return;
- memset(ucm_dev, 0, sizeof *ucm_dev);
ucm_dev->ib_dev = device;
ucm_dev->devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 97128e25f78b..aed5ca23fb22 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -94,6 +94,9 @@ struct ib_umad_port {
struct class_device *sm_class_dev;
struct semaphore sm_sem;
+ struct rw_semaphore mutex;
+ struct list_head file_list;
+
struct ib_device *ib_dev;
struct ib_umad_device *umad_dev;
int dev_num;
@@ -108,10 +111,10 @@ struct ib_umad_device {
struct ib_umad_file {
struct ib_umad_port *port;
- spinlock_t recv_lock;
struct list_head recv_list;
+ struct list_head port_list;
+ spinlock_t recv_lock;
wait_queue_head_t recv_wait;
- struct rw_semaphore agent_mutex;
struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS];
struct ib_mr *mr[IB_UMAD_MAX_AGENTS];
};
@@ -148,7 +151,7 @@ static int queue_packet(struct ib_umad_file *file,
{
int ret = 1;
- down_read(&file->agent_mutex);
+ down_read(&file->port->mutex);
for (packet->mad.hdr.id = 0;
packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
packet->mad.hdr.id++)
@@ -161,7 +164,7 @@ static int queue_packet(struct ib_umad_file *file,
break;
}
- up_read(&file->agent_mutex);
+ up_read(&file->port->mutex);
return ret;
}
@@ -322,7 +325,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
goto err;
}
- down_read(&file->agent_mutex);
+ down_read(&file->port->mutex);
agent = file->agent[packet->mad.hdr.id];
if (!agent) {
@@ -419,7 +422,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
if (ret)
goto err_msg;
- up_read(&file->agent_mutex);
+ up_read(&file->port->mutex);
return count;
@@ -430,7 +433,7 @@ err_ah:
ib_destroy_ah(ah);
err_up:
- up_read(&file->agent_mutex);
+ up_read(&file->port->mutex);
err:
kfree(packet);
@@ -460,7 +463,12 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, unsigned long arg)
int agent_id;
int ret;
- down_write(&file->agent_mutex);
+ down_write(&file->port->mutex);
+
+ if (!file->port->ib_dev) {
+ ret = -EPIPE;
+ goto out;
+ }
if (copy_from_user(&ureq, (void __user *) arg, sizeof ureq)) {
ret = -EFAULT;
@@ -522,7 +530,7 @@ err:
ib_unregister_mad_agent(agent);
out:
- up_write(&file->agent_mutex);
+ up_write(&file->port->mutex);
return ret;
}
@@ -531,7 +539,7 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, unsigned long arg)
u32 id;
int ret = 0;
- down_write(&file->agent_mutex);
+ down_write(&file->port->mutex);
if (get_user(id, (u32 __user *) arg)) {
ret = -EFAULT;
@@ -548,7 +556,7 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, unsigned long arg)
file->agent[id] = NULL;
out:
- up_write(&file->agent_mutex);
+ up_write(&file->port->mutex);
return ret;
}
@@ -569,6 +577,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
{
struct ib_umad_port *port;
struct ib_umad_file *file;
+ int ret = 0;
spin_lock(&port_lock);
port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE];
@@ -579,21 +588,32 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
if (!port)
return -ENXIO;
+ down_write(&port->mutex);
+
+ if (!port->ib_dev) {
+ ret = -ENXIO;
+ goto out;
+ }
+
file = kzalloc(sizeof *file, GFP_KERNEL);
if (!file) {
kref_put(&port->umad_dev->ref, ib_umad_release_dev);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
spin_lock_init(&file->recv_lock);
- init_rwsem(&file->agent_mutex);
INIT_LIST_HEAD(&file->recv_list);
init_waitqueue_head(&file->recv_wait);
file->port = port;
filp->private_data = file;
- return 0;
+ list_add_tail(&file->port_list, &port->file_list);
+
+out:
+ up_write(&port->mutex);
+ return ret;
}
static int ib_umad_close(struct inode *inode, struct file *filp)
@@ -603,6 +623,7 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
struct ib_umad_packet *packet, *tmp;
int i;
+ down_write(&file->port->mutex);
for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i)
if (file->agent[i]) {
ib_dereg_mr(file->mr[i]);
@@ -612,6 +633,9 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
list_for_each_entry_safe(packet, tmp, &file->recv_list, list)
kfree(packet);
+ list_del(&file->port_list);
+ up_write(&file->port->mutex);
+
kfree(file);
kref_put(&dev->ref, ib_umad_release_dev);
@@ -680,9 +704,13 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
struct ib_port_modify props = {
.clr_port_cap_mask = IB_PORT_SM
};
- int ret;
+ int ret = 0;
+
+ down_write(&port->mutex);
+ if (port->ib_dev)
+ ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
+ up_write(&port->mutex);
- ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
up(&port->sm_sem);
kref_put(&port->umad_dev->ref, ib_umad_release_dev);
@@ -745,6 +773,8 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
port->ib_dev = device;
port->port_num = port_num;
init_MUTEX(&port->sm_sem);
+ init_rwsem(&port->mutex);
+ INIT_LIST_HEAD(&port->file_list);
port->dev = cdev_alloc();
if (!port->dev)
@@ -813,6 +843,9 @@ err_cdev:
static void ib_umad_kill_port(struct ib_umad_port *port)
{
+ struct ib_umad_file *file;
+ int id;
+
class_set_devdata(port->class_dev, NULL);
class_set_devdata(port->sm_class_dev, NULL);
@@ -826,6 +859,21 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
umad_port[port->dev_num] = NULL;
spin_unlock(&port_lock);
+ down_write(&port->mutex);
+
+ port->ib_dev = NULL;
+
+ list_for_each_entry(file, &port->file_list, port_list)
+ for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id) {
+ if (!file->agent[id])
+ continue;
+ ib_dereg_mr(file->mr[id]);
+ ib_unregister_mad_agent(file->agent[id]);
+ file->agent[id] = NULL;
+ }
+
+ up_write(&port->mutex);
+
clear_bit(port->dev_num, dev_map);
}
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 031cdf3c066d..ecb830127865 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -113,6 +113,7 @@ struct ib_uevent_object {
struct ib_ucq_object {
struct ib_uobject uobject;
+ struct ib_uverbs_file *uverbs_file;
struct list_head comp_list;
struct list_head async_list;
u32 comp_events_reported;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 8c89abc8c764..63a74151c60b 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -602,6 +602,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
uobj->uobject.user_handle = cmd.user_handle;
uobj->uobject.context = file->ucontext;
+ uobj->uverbs_file = file;
uobj->comp_events_reported = 0;
uobj->async_events_reported = 0;
INIT_LIST_HEAD(&uobj->comp_list);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 0eb38f479b39..de6581d7cb8d 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -442,13 +442,10 @@ static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
{
- struct ib_uverbs_event_file *ev_file = context_ptr;
- struct ib_ucq_object *uobj;
+ struct ib_ucq_object *uobj = container_of(event->element.cq->uobject,
+ struct ib_ucq_object, uobject);
- uobj = container_of(event->element.cq->uobject,
- struct ib_ucq_object, uobject);
-
- ib_uverbs_async_handler(ev_file->uverbs_file, uobj->uobject.user_handle,
+ ib_uverbs_async_handler(uobj->uverbs_file, uobj->uobject.user_handle,
event->event, &uobj->async_list,
&uobj->async_events_reported);
@@ -728,12 +725,10 @@ static void ib_uverbs_add_one(struct ib_device *device)
if (!device->alloc_ucontext)
return;
- uverbs_dev = kmalloc(sizeof *uverbs_dev, GFP_KERNEL);
+ uverbs_dev = kzalloc(sizeof *uverbs_dev, GFP_KERNEL);
if (!uverbs_dev)
return;
- memset(uverbs_dev, 0, sizeof *uverbs_dev);
-
kref_init(&uverbs_dev->ref);
spin_lock(&map_lock);
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 8600b6c3e0c2..f98e23555826 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -208,7 +208,7 @@ static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq,
}
}
-void mthca_cq_event(struct mthca_dev *dev, u32 cqn)
+void mthca_cq_completion(struct mthca_dev *dev, u32 cqn)
{
struct mthca_cq *cq;
@@ -224,6 +224,35 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn)
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
+void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
+ enum ib_event_type event_type)
+{
+ struct mthca_cq *cq;
+ struct ib_event event;
+
+ spin_lock(&dev->cq_table.lock);
+
+ cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
+
+ if (cq)
+ atomic_inc(&cq->refcount);
+ spin_unlock(&dev->cq_table.lock);
+
+ if (!cq) {
+ mthca_warn(dev, "Async event for bogus CQ %08x\n", cqn);
+ return;
+ }
+
+ event.device = &dev->ib_dev;
+ event.event = event_type;
+ event.element.cq = &cq->ibcq;
+ if (cq->ibcq.event_handler)
+ cq->ibcq.event_handler(&event, cq->ibcq.cq_context);
+
+ if (atomic_dec_and_test(&cq->refcount))
+ wake_up(&cq->wait);
+}
+
void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
struct mthca_srq *srq)
{
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 7e68bd4a3780..e7e5d3b4f004 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -460,7 +460,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
struct mthca_cq *cq);
void mthca_free_cq(struct mthca_dev *dev,
struct mthca_cq *cq);
-void mthca_cq_event(struct mthca_dev *dev, u32 cqn);
+void mthca_cq_completion(struct mthca_dev *dev, u32 cqn);
+void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
+ enum ib_event_type event_type);
void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
struct mthca_srq *srq);
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index e5a047a6dbeb..34d68e5a72d8 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -292,7 +292,7 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
case MTHCA_EVENT_TYPE_COMP:
disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
disarm_cq(dev, eq->eqn, disarm_cqn);
- mthca_cq_event(dev, disarm_cqn);
+ mthca_cq_completion(dev, disarm_cqn);
break;
case MTHCA_EVENT_TYPE_PATH_MIG:
@@ -364,6 +364,8 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
eqe->event.cq_err.syndrome == 1 ?
"overrun" : "access violation",
be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
+ mthca_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
+ IB_EVENT_CQ_ERR);
break;
case MTHCA_EVENT_TYPE_EQ_OVERFLOW:
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 883d1e5a79bc..45c6328e780c 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -1057,7 +1057,7 @@ static int __devinit mthca_init_one(struct pci_dev *pdev,
goto err_cmd;
if (mdev->fw_ver < mthca_hca_table[id->driver_data].latest_fw) {
- mthca_warn(mdev, "HCA FW version %x.%x.%x is old (%x.%x.%x is current).\n",
+ mthca_warn(mdev, "HCA FW version %d.%d.%d is old (%d.%d.%d is current).\n",
(int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff,
(int) (mdev->fw_ver & 0xffff),
(int) (mthca_hca_table[id->driver_data].latest_fw >> 32),
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 1f97a44477f5..e995e2aa016d 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -140,13 +140,11 @@ static int __devinit mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
buddy->max_order = max_order;
spin_lock_init(&buddy->lock);
- buddy->bits = kmalloc((buddy->max_order + 1) * sizeof (long *),
+ buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
GFP_KERNEL);
if (!buddy->bits)
goto err_out;
- memset(buddy->bits, 0, (buddy->max_order + 1) * sizeof (long *));
-
for (i = 0; i <= buddy->max_order; ++i) {
s = BITS_TO_LONGS(1 << (buddy->max_order - i));
buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL);
diff --git a/drivers/infiniband/hw/mthca/mthca_profile.c b/drivers/infiniband/hw/mthca/mthca_profile.c
index bd1338682074..08a909371b0a 100644
--- a/drivers/infiniband/hw/mthca/mthca_profile.c
+++ b/drivers/infiniband/hw/mthca/mthca_profile.c
@@ -82,12 +82,10 @@ u64 mthca_make_profile(struct mthca_dev *dev,
struct mthca_resource tmp;
int i, j;
- profile = kmalloc(MTHCA_RES_NUM * sizeof *profile, GFP_KERNEL);
+ profile = kzalloc(MTHCA_RES_NUM * sizeof *profile, GFP_KERNEL);
if (!profile)
return -ENOMEM;
- memset(profile, 0, MTHCA_RES_NUM * sizeof *profile);
-
profile[MTHCA_RES_QP].size = dev_lim->qpc_entry_sz;
profile[MTHCA_RES_EEC].size = dev_lim->eec_entry_sz;
profile[MTHCA_RES_SRQ].size = dev_lim->srq_entry_sz;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 1b9477edbd7b..6b0166668269 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1028,7 +1028,7 @@ static ssize_t show_rev(struct class_device *cdev, char *buf)
static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
{
struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
- return sprintf(buf, "%x.%x.%x\n", (int) (dev->fw_ver >> 32),
+ return sprintf(buf, "%d.%d.%d\n", (int) (dev->fw_ver >> 32),
(int) (dev->fw_ver >> 16) & 0xffff,
(int) dev->fw_ver & 0xffff);
}
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 7c9afde5ace5..8852ea477c21 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -584,6 +584,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
return -EINVAL;
}
+ if ((attr_mask & IB_QP_PKEY_INDEX) &&
+ attr->pkey_index >= dev->limits.pkey_table_len) {
+ mthca_dbg(dev, "PKey index (%u) too large. max is %d\n",
+ attr->pkey_index,dev->limits.pkey_table_len-1);
+ return -EINVAL;
+ }
+
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index 64f70aa1b3c0..292f55be8cbd 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -75,15 +75,16 @@ static void *get_wqe(struct mthca_srq *srq, int n)
/*
* Return a pointer to the location within a WQE that we're using as a
- * link when the WQE is in the free list. We use an offset of 4
- * because in the Tavor case, posting a WQE may overwrite the first
- * four bytes of the previous WQE. The offset avoids corrupting our
- * free list if the WQE has already completed and been put on the free
- * list when we post the next WQE.
+ * link when the WQE is in the free list. We use the imm field
+ * because in the Tavor case, posting a WQE may overwrite the next
+ * segment of the previous WQE, but a receive WQE will never touch the
+ * imm field. This avoids corrupting our free list if the previous
+ * WQE has already completed and been put on the free list when we
+ * post the next WQE.
*/
static inline int *wqe_to_link(void *wqe)
{
- return (int *) (wqe + 4);
+ return (int *) (wqe + offsetof(struct mthca_next_seg, imm));
}
static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index c994a916a58a..0095acc0fbbe 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -235,6 +235,7 @@ static inline void ipoib_put_ah(struct ipoib_ah *ah)
kref_put(&ah->ref, ipoib_free_ah);
}
+int ipoib_open(struct net_device *dev);
int ipoib_add_pkey_attr(struct net_device *dev);
void ipoib_send(struct net_device *dev, struct sk_buff *skb,
@@ -267,6 +268,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
void ipoib_mcast_dev_down(struct net_device *dev);
void ipoib_mcast_dev_flush(struct net_device *dev);
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev);
void ipoib_mcast_iter_free(struct ipoib_mcast_iter *iter);
int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter);
@@ -276,6 +278,7 @@ void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
unsigned int *queuelen,
unsigned int *complete,
unsigned int *send_only);
+#endif
int ipoib_mcast_attach(struct net_device *dev, u16 mlid,
union ib_gid *mgid);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 192fef884e21..54ef2fea530f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -486,15 +486,16 @@ int ipoib_ib_dev_stop(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_qp_attr qp_attr;
- int attr_mask;
unsigned long begin;
struct ipoib_tx_buf *tx_req;
int i;
- /* Kill the existing QP and allocate a new one */
+ /*
+ * Move our QP to the error state and then reinitialize in
+ * when all work requests have completed or have been flushed.
+ */
qp_attr.qp_state = IB_QPS_ERR;
- attr_mask = IB_QP_STATE;
- if (ib_modify_qp(priv->qp, &qp_attr, attr_mask))
+ if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
/* Wait for all sends and receives to complete */
@@ -541,8 +542,7 @@ int ipoib_ib_dev_stop(struct net_device *dev)
timeout:
qp_attr.qp_state = IB_QPS_RESET;
- attr_mask = IB_QP_STATE;
- if (ib_modify_qp(priv->qp, &qp_attr, attr_mask))
+ if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
ipoib_warn(priv, "Failed to modify QP to RESET state\n");
/* Wait for all AHs to be reaped */
@@ -636,7 +636,6 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
* Bug #2507. This implementation will probably be removed when the P_Key
* change async notification is available.
*/
-int ipoib_open(struct net_device *dev);
static void ipoib_pkey_dev_check_presence(struct net_device *dev)
{
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index cd4f42328dbe..ce0296273e76 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -356,18 +356,15 @@ static struct ipoib_path *path_rec_create(struct net_device *dev,
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path;
- path = kmalloc(sizeof *path, GFP_ATOMIC);
+ path = kzalloc(sizeof *path, GFP_ATOMIC);
if (!path)
return NULL;
- path->dev = dev;
- path->pathrec.dlid = 0;
- path->ah = NULL;
+ path->dev = dev;
skb_queue_head_init(&path->queue);
INIT_LIST_HEAD(&path->neigh_list);
- path->query = NULL;
init_completion(&path->done);
memcpy(path->pathrec.dgid.raw, gid->raw, sizeof (union ib_gid));
@@ -551,11 +548,8 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct ipoib_neigh *neigh;
unsigned long flags;
- local_irq_save(flags);
- if (!spin_trylock(&priv->tx_lock)) {
- local_irq_restore(flags);
+ if (!spin_trylock_irqsave(&priv->tx_lock, flags))
return NETDEV_TX_LOCKED;
- }
/*
* Check if our queue is stopped. Since we have the LLTX bit
@@ -732,25 +726,21 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
/* Allocate RX/TX "rings" to hold queued skbs */
- priv->rx_ring = kmalloc(IPOIB_RX_RING_SIZE * sizeof (struct ipoib_rx_buf),
+ priv->rx_ring = kzalloc(IPOIB_RX_RING_SIZE * sizeof (struct ipoib_rx_buf),
GFP_KERNEL);
if (!priv->rx_ring) {
printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
ca->name, IPOIB_RX_RING_SIZE);
goto out;
}
- memset(priv->rx_ring, 0,
- IPOIB_RX_RING_SIZE * sizeof (struct ipoib_rx_buf));
- priv->tx_ring = kmalloc(IPOIB_TX_RING_SIZE * sizeof (struct ipoib_tx_buf),
+ priv->tx_ring = kzalloc(IPOIB_TX_RING_SIZE * sizeof (struct ipoib_tx_buf),
GFP_KERNEL);
if (!priv->tx_ring) {
printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
ca->name, IPOIB_TX_RING_SIZE);
goto out_rx_ring_cleanup;
}
- memset(priv->tx_ring, 0,
- IPOIB_TX_RING_SIZE * sizeof (struct ipoib_tx_buf));
/* priv->tx_head & tx_tail are already 0 */
@@ -807,10 +797,6 @@ static void ipoib_setup(struct net_device *dev)
dev->watchdog_timeo = HZ;
- dev->rebuild_header = NULL;
- dev->set_mac_address = NULL;
- dev->header_cache_update = NULL;
-
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
/*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 36ce29836bf2..3ecf78a9493a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -135,12 +135,10 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
{
struct ipoib_mcast *mcast;
- mcast = kmalloc(sizeof (*mcast), can_sleep ? GFP_KERNEL : GFP_ATOMIC);
+ mcast = kzalloc(sizeof *mcast, can_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (!mcast)
return NULL;
- memset(mcast, 0, sizeof (*mcast));
-
init_completion(&mcast->done);
mcast->dev = dev;
@@ -919,6 +917,8 @@ void ipoib_mcast_restart_task(void *dev_ptr)
ipoib_mcast_start_thread(dev);
}
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+
struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev)
{
struct ipoib_mcast_iter *iter;
@@ -991,3 +991,5 @@ void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
*complete = iter->complete;
*send_only = iter->send_only;
}
+
+#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index b5902a7ec240..e829e10400e3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -41,7 +41,6 @@ int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_qp_attr *qp_attr;
- int attr_mask;
int ret;
u16 pkey_index;
@@ -59,8 +58,7 @@ int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid)
/* set correct QKey for QP */
qp_attr->qkey = priv->qkey;
- attr_mask = IB_QP_QKEY;
- ret = ib_modify_qp(priv->qp, qp_attr, attr_mask);
+ ret = ib_modify_qp(priv->qp, qp_attr, IB_QP_QKEY);
if (ret) {
ipoib_warn(priv, "failed to modify QP, ret = %d\n", ret);
goto out;
diff --git a/drivers/infiniband/ulp/srp/Kbuild b/drivers/infiniband/ulp/srp/Kbuild
new file mode 100644
index 000000000000..a16c73c667cb
--- /dev/null
+++ b/drivers/infiniband/ulp/srp/Kbuild
@@ -0,0 +1 @@
+obj-$(CONFIG_INFINIBAND_SRP) += ib_srp.o
diff --git a/drivers/infiniband/ulp/srp/Kconfig b/drivers/infiniband/ulp/srp/Kconfig
new file mode 100644
index 000000000000..8fe3be4e9910
--- /dev/null
+++ b/drivers/infiniband/ulp/srp/Kconfig
@@ -0,0 +1,11 @@
+config INFINIBAND_SRP
+ tristate "InfiniBand SCSI RDMA Protocol"
+ depends on INFINIBAND && SCSI
+ ---help---
+ Support for the SCSI RDMA Protocol over InfiniBand. This
+ allows you to access storage devices that speak SRP over
+ InfiniBand.
+
+ The SRP protocol is defined by the INCITS T10 technical
+ committee. See <http://www.t10.org/>.
+
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
new file mode 100644
index 000000000000..2687e34aa5bc
--- /dev/null
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -0,0 +1,1700 @@
+/*
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: ib_srp.c 3932 2005-11-01 17:19:29Z roland $
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/random.h>
+
+#include <asm/atomic.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/srp.h>
+
+#include <rdma/ib_cache.h>
+
+#include "ib_srp.h"
+
+#define DRV_NAME "ib_srp"
+#define PFX DRV_NAME ": "
+#define DRV_VERSION "0.2"
+#define DRV_RELDATE "November 1, 2005"
+
+MODULE_AUTHOR("Roland Dreier");
+MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
+ "v" DRV_VERSION " (" DRV_RELDATE ")");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static int topspin_workarounds = 1;
+
+module_param(topspin_workarounds, int, 0444);
+MODULE_PARM_DESC(topspin_workarounds,
+ "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
+
+static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
+
+static void srp_add_one(struct ib_device *device);
+static void srp_remove_one(struct ib_device *device);
+static void srp_completion(struct ib_cq *cq, void *target_ptr);
+static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
+
+static struct ib_client srp_client = {
+ .name = "srp",
+ .add = srp_add_one,
+ .remove = srp_remove_one
+};
+
+static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
+{
+ return (struct srp_target_port *) host->hostdata;
+}
+
+static const char *srp_target_info(struct Scsi_Host *host)
+{
+ return host_to_target(host)->target_name;
+}
+
+static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
+ gfp_t gfp_mask,
+ enum dma_data_direction direction)
+{
+ struct srp_iu *iu;
+
+ iu = kmalloc(sizeof *iu, gfp_mask);
+ if (!iu)
+ goto out;
+
+ iu->buf = kzalloc(size, gfp_mask);
+ if (!iu->buf)
+ goto out_free_iu;
+
+ iu->dma = dma_map_single(host->dev->dma_device, iu->buf, size, direction);
+ if (dma_mapping_error(iu->dma))
+ goto out_free_buf;
+
+ iu->size = size;
+ iu->direction = direction;
+
+ return iu;
+
+out_free_buf:
+ kfree(iu->buf);
+out_free_iu:
+ kfree(iu);
+out:
+ return NULL;
+}
+
+static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
+{
+ if (!iu)
+ return;
+
+ dma_unmap_single(host->dev->dma_device, iu->dma, iu->size, iu->direction);
+ kfree(iu->buf);
+ kfree(iu);
+}
+
+static void srp_qp_event(struct ib_event *event, void *context)
+{
+ printk(KERN_ERR PFX "QP event %d\n", event->event);
+}
+
+static int srp_init_qp(struct srp_target_port *target,
+ struct ib_qp *qp)
+{
+ struct ib_qp_attr *attr;
+ int ret;
+
+ attr = kmalloc(sizeof *attr, GFP_KERNEL);
+ if (!attr)
+ return -ENOMEM;
+
+ ret = ib_find_cached_pkey(target->srp_host->dev,
+ target->srp_host->port,
+ be16_to_cpu(target->path.pkey),
+ &attr->pkey_index);
+ if (ret)
+ goto out;
+
+ attr->qp_state = IB_QPS_INIT;
+ attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE);
+ attr->port_num = target->srp_host->port;
+
+ ret = ib_modify_qp(qp, attr,
+ IB_QP_STATE |
+ IB_QP_PKEY_INDEX |
+ IB_QP_ACCESS_FLAGS |
+ IB_QP_PORT);
+
+out:
+ kfree(attr);
+ return ret;
+}
+
+static int srp_create_target_ib(struct srp_target_port *target)
+{
+ struct ib_qp_init_attr *init_attr;
+ int ret;
+
+ init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
+ if (!init_attr)
+ return -ENOMEM;
+
+ target->cq = ib_create_cq(target->srp_host->dev, srp_completion,
+ NULL, target, SRP_CQ_SIZE);
+ if (IS_ERR(target->cq)) {
+ ret = PTR_ERR(target->cq);
+ goto out;
+ }
+
+ ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP);
+
+ init_attr->event_handler = srp_qp_event;
+ init_attr->cap.max_send_wr = SRP_SQ_SIZE;
+ init_attr->cap.max_recv_wr = SRP_RQ_SIZE;
+ init_attr->cap.max_recv_sge = 1;
+ init_attr->cap.max_send_sge = 1;
+ init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
+ init_attr->qp_type = IB_QPT_RC;
+ init_attr->send_cq = target->cq;
+ init_attr->recv_cq = target->cq;
+
+ target->qp = ib_create_qp(target->srp_host->pd, init_attr);
+ if (IS_ERR(target->qp)) {
+ ret = PTR_ERR(target->qp);
+ ib_destroy_cq(target->cq);
+ goto out;
+ }
+
+ ret = srp_init_qp(target, target->qp);
+ if (ret) {
+ ib_destroy_qp(target->qp);
+ ib_destroy_cq(target->cq);
+ goto out;
+ }
+
+out:
+ kfree(init_attr);
+ return ret;
+}
+
+static void srp_free_target_ib(struct srp_target_port *target)
+{
+ int i;
+
+ ib_destroy_qp(target->qp);
+ ib_destroy_cq(target->cq);
+
+ for (i = 0; i < SRP_RQ_SIZE; ++i)
+ srp_free_iu(target->srp_host, target->rx_ring[i]);
+ for (i = 0; i < SRP_SQ_SIZE + 1; ++i)
+ srp_free_iu(target->srp_host, target->tx_ring[i]);
+}
+
+static void srp_path_rec_completion(int status,
+ struct ib_sa_path_rec *pathrec,
+ void *target_ptr)
+{
+ struct srp_target_port *target = target_ptr;
+
+ target->status = status;
+ if (status)
+ printk(KERN_ERR PFX "Got failed path rec status %d\n", status);
+ else
+ target->path = *pathrec;
+ complete(&target->done);
+}
+
+static int srp_lookup_path(struct srp_target_port *target)
+{
+ target->path.numb_path = 1;
+
+ init_completion(&target->done);
+
+ target->path_query_id = ib_sa_path_rec_get(target->srp_host->dev,
+ target->srp_host->port,
+ &target->path,
+ IB_SA_PATH_REC_DGID |
+ IB_SA_PATH_REC_SGID |
+ IB_SA_PATH_REC_NUMB_PATH |
+ IB_SA_PATH_REC_PKEY,
+ SRP_PATH_REC_TIMEOUT_MS,
+ GFP_KERNEL,
+ srp_path_rec_completion,
+ target, &target->path_query);
+ if (target->path_query_id < 0)
+ return target->path_query_id;
+
+ wait_for_completion(&target->done);
+
+ if (target->status < 0)
+ printk(KERN_WARNING PFX "Path record query failed\n");
+
+ return target->status;
+}
+
+static int srp_send_req(struct srp_target_port *target)
+{
+ struct {
+ struct ib_cm_req_param param;
+ struct srp_login_req priv;
+ } *req = NULL;
+ int status;
+
+ req = kzalloc(sizeof *req, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ req->param.primary_path = &target->path;
+ req->param.alternate_path = NULL;
+ req->param.service_id = target->service_id;
+ req->param.qp_num = target->qp->qp_num;
+ req->param.qp_type = target->qp->qp_type;
+ req->param.private_data = &req->priv;
+ req->param.private_data_len = sizeof req->priv;
+ req->param.flow_control = 1;
+
+ get_random_bytes(&req->param.starting_psn, 4);
+ req->param.starting_psn &= 0xffffff;
+
+ /*
+ * Pick some arbitrary defaults here; we could make these
+ * module parameters if anyone cared about setting them.
+ */
+ req->param.responder_resources = 4;
+ req->param.remote_cm_response_timeout = 20;
+ req->param.local_cm_response_timeout = 20;
+ req->param.retry_count = 7;
+ req->param.rnr_retry_count = 7;
+ req->param.max_cm_retries = 15;
+
+ req->priv.opcode = SRP_LOGIN_REQ;
+ req->priv.tag = 0;
+ req->priv.req_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN);
+ req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
+ SRP_BUF_FORMAT_INDIRECT);
+ memcpy(req->priv.initiator_port_id, target->srp_host->initiator_port_id, 16);
+ /*
+ * Topspin/Cisco SRP targets will reject our login unless we
+ * zero out the first 8 bytes of our initiator port ID. The
+ * second 8 bytes must be our local node GUID, but we always
+ * use that anyway.
+ */
+ if (topspin_workarounds && !memcmp(&target->ioc_guid, topspin_oui, 3)) {
+ printk(KERN_DEBUG PFX "Topspin/Cisco initiator port ID workaround "
+ "activated for target GUID %016llx\n",
+ (unsigned long long) be64_to_cpu(target->ioc_guid));
+ memset(req->priv.initiator_port_id, 0, 8);
+ }
+ memcpy(req->priv.target_port_id, &target->id_ext, 8);
+ memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
+
+ status = ib_send_cm_req(target->cm_id, &req->param);
+
+ kfree(req);
+
+ return status;
+}
+
+static void srp_disconnect_target(struct srp_target_port *target)
+{
+ /* XXX should send SRP_I_LOGOUT request */
+
+ init_completion(&target->done);
+ ib_send_cm_dreq(target->cm_id, NULL, 0);
+ wait_for_completion(&target->done);
+}
+
+static void srp_remove_work(void *target_ptr)
+{
+ struct srp_target_port *target = target_ptr;
+
+ spin_lock_irq(target->scsi_host->host_lock);
+ if (target->state != SRP_TARGET_DEAD) {
+ spin_unlock_irq(target->scsi_host->host_lock);
+ scsi_host_put(target->scsi_host);
+ return;
+ }
+ target->state = SRP_TARGET_REMOVED;
+ spin_unlock_irq(target->scsi_host->host_lock);
+
+ down(&target->srp_host->target_mutex);
+ list_del(&target->list);
+ up(&target->srp_host->target_mutex);
+
+ scsi_remove_host(target->scsi_host);
+ ib_destroy_cm_id(target->cm_id);
+ srp_free_target_ib(target);
+ scsi_host_put(target->scsi_host);
+ /* And another put to really free the target port... */
+ scsi_host_put(target->scsi_host);
+}
+
+static int srp_connect_target(struct srp_target_port *target)
+{
+ int ret;
+
+ ret = srp_lookup_path(target);
+ if (ret)
+ return ret;
+
+ while (1) {
+ init_completion(&target->done);
+ ret = srp_send_req(target);
+ if (ret)
+ return ret;
+ wait_for_completion(&target->done);
+
+ /*
+ * The CM event handling code will set status to
+ * SRP_PORT_REDIRECT if we get a port redirect REJ
+ * back, or SRP_DLID_REDIRECT if we get a lid/qp
+ * redirect REJ back.
+ */
+ switch (target->status) {
+ case 0:
+ return 0;
+
+ case SRP_PORT_REDIRECT:
+ ret = srp_lookup_path(target);
+ if (ret)
+ return ret;
+ break;
+
+ case SRP_DLID_REDIRECT:
+ break;
+
+ default:
+ return target->status;
+ }
+ }
+}
+
+static int srp_reconnect_target(struct srp_target_port *target)
+{
+ struct ib_cm_id *new_cm_id;
+ struct ib_qp_attr qp_attr;
+ struct srp_request *req;
+ struct ib_wc wc;
+ int ret;
+ int i;
+
+ spin_lock_irq(target->scsi_host->host_lock);
+ if (target->state != SRP_TARGET_LIVE) {
+ spin_unlock_irq(target->scsi_host->host_lock);
+ return -EAGAIN;
+ }
+ target->state = SRP_TARGET_CONNECTING;
+ spin_unlock_irq(target->scsi_host->host_lock);
+
+ srp_disconnect_target(target);
+ /*
+ * Now get a new local CM ID so that we avoid confusing the
+ * target in case things are really fouled up.
+ */
+ new_cm_id = ib_create_cm_id(target->srp_host->dev,
+ srp_cm_handler, target);
+ if (IS_ERR(new_cm_id)) {
+ ret = PTR_ERR(new_cm_id);
+ goto err;
+ }
+ ib_destroy_cm_id(target->cm_id);
+ target->cm_id = new_cm_id;
+
+ qp_attr.qp_state = IB_QPS_RESET;
+ ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
+ if (ret)
+ goto err;
+
+ ret = srp_init_qp(target, target->qp);
+ if (ret)
+ goto err;
+
+ while (ib_poll_cq(target->cq, 1, &wc) > 0)
+ ; /* nothing */
+
+ list_for_each_entry(req, &target->req_queue, list) {
+ req->scmnd->result = DID_RESET << 16;
+ req->scmnd->scsi_done(req->scmnd);
+ }
+
+ target->rx_head = 0;
+ target->tx_head = 0;
+ target->tx_tail = 0;
+ target->req_head = 0;
+ for (i = 0; i < SRP_SQ_SIZE - 1; ++i)
+ target->req_ring[i].next = i + 1;
+ target->req_ring[SRP_SQ_SIZE - 1].next = -1;
+ INIT_LIST_HEAD(&target->req_queue);
+
+ ret = srp_connect_target(target);
+ if (ret)
+ goto err;
+
+ spin_lock_irq(target->scsi_host->host_lock);
+ if (target->state == SRP_TARGET_CONNECTING) {
+ ret = 0;
+ target->state = SRP_TARGET_LIVE;
+ } else
+ ret = -EAGAIN;
+ spin_unlock_irq(target->scsi_host->host_lock);
+
+ return ret;
+
+err:
+ printk(KERN_ERR PFX "reconnect failed (%d), removing target port.\n", ret);
+
+ /*
+ * We couldn't reconnect, so kill our target port off.
+ * However, we have to defer the real removal because we might
+ * be in the context of the SCSI error handler now, which
+ * would deadlock if we call scsi_remove_host().
+ */
+ spin_lock_irq(target->scsi_host->host_lock);
+ if (target->state == SRP_TARGET_CONNECTING) {
+ target->state = SRP_TARGET_DEAD;
+ INIT_WORK(&target->work, srp_remove_work, target);
+ schedule_work(&target->work);
+ }
+ spin_unlock_irq(target->scsi_host->host_lock);
+
+ return ret;
+}
+
+static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
+ struct srp_request *req)
+{
+ struct srp_cmd *cmd = req->cmd->buf;
+ int len;
+ u8 fmt;
+
+ if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE)
+ return sizeof (struct srp_cmd);
+
+ if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
+ scmnd->sc_data_direction != DMA_TO_DEVICE) {
+ printk(KERN_WARNING PFX "Unhandled data direction %d\n",
+ scmnd->sc_data_direction);
+ return -EINVAL;
+ }
+
+ if (scmnd->use_sg) {
+ struct scatterlist *scat = scmnd->request_buffer;
+ int n;
+ int i;
+
+ n = dma_map_sg(target->srp_host->dev->dma_device,
+ scat, scmnd->use_sg, scmnd->sc_data_direction);
+
+ if (n == 1) {
+ struct srp_direct_buf *buf = (void *) cmd->add_data;
+
+ fmt = SRP_DATA_DESC_DIRECT;
+
+ buf->va = cpu_to_be64(sg_dma_address(scat));
+ buf->key = cpu_to_be32(target->srp_host->mr->rkey);
+ buf->len = cpu_to_be32(sg_dma_len(scat));
+
+ len = sizeof (struct srp_cmd) +
+ sizeof (struct srp_direct_buf);
+ } else {
+ struct srp_indirect_buf *buf = (void *) cmd->add_data;
+ u32 datalen = 0;
+
+ fmt = SRP_DATA_DESC_INDIRECT;
+
+ if (scmnd->sc_data_direction == DMA_TO_DEVICE)
+ cmd->data_out_desc_cnt = n;
+ else
+ cmd->data_in_desc_cnt = n;
+
+ buf->table_desc.va = cpu_to_be64(req->cmd->dma +
+ sizeof *cmd +
+ sizeof *buf);
+ buf->table_desc.key =
+ cpu_to_be32(target->srp_host->mr->rkey);
+ buf->table_desc.len =
+ cpu_to_be32(n * sizeof (struct srp_direct_buf));
+
+ for (i = 0; i < n; ++i) {
+ buf->desc_list[i].va = cpu_to_be64(sg_dma_address(&scat[i]));
+ buf->desc_list[i].key =
+ cpu_to_be32(target->srp_host->mr->rkey);
+ buf->desc_list[i].len = cpu_to_be32(sg_dma_len(&scat[i]));
+
+ datalen += sg_dma_len(&scat[i]);
+ }
+
+ buf->len = cpu_to_be32(datalen);
+
+ len = sizeof (struct srp_cmd) +
+ sizeof (struct srp_indirect_buf) +
+ n * sizeof (struct srp_direct_buf);
+ }
+ } else {
+ struct srp_direct_buf *buf = (void *) cmd->add_data;
+ dma_addr_t dma;
+
+ dma = dma_map_single(target->srp_host->dev->dma_device,
+ scmnd->request_buffer, scmnd->request_bufflen,
+ scmnd->sc_data_direction);
+ if (dma_mapping_error(dma)) {
+ printk(KERN_WARNING PFX "unable to map %p/%d (dir %d)\n",
+ scmnd->request_buffer, (int) scmnd->request_bufflen,
+ scmnd->sc_data_direction);
+ return -EINVAL;
+ }
+
+ pci_unmap_addr_set(req, direct_mapping, dma);
+
+ buf->va = cpu_to_be64(dma);
+ buf->key = cpu_to_be32(target->srp_host->mr->rkey);
+ buf->len = cpu_to_be32(scmnd->request_bufflen);
+
+ fmt = SRP_DATA_DESC_DIRECT;
+
+ len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
+ }
+
+ if (scmnd->sc_data_direction == DMA_TO_DEVICE)
+ cmd->buf_fmt = fmt << 4;
+ else
+ cmd->buf_fmt = fmt;
+
+
+ return len;
+}
+
+static void srp_unmap_data(struct scsi_cmnd *scmnd,
+ struct srp_target_port *target,
+ struct srp_request *req)
+{
+ if (!scmnd->request_buffer ||
+ (scmnd->sc_data_direction != DMA_TO_DEVICE &&
+ scmnd->sc_data_direction != DMA_FROM_DEVICE))
+ return;
+
+ if (scmnd->use_sg)
+ dma_unmap_sg(target->srp_host->dev->dma_device,
+ (struct scatterlist *) scmnd->request_buffer,
+ scmnd->use_sg, scmnd->sc_data_direction);
+ else
+ dma_unmap_single(target->srp_host->dev->dma_device,
+ pci_unmap_addr(req, direct_mapping),
+ scmnd->request_bufflen,
+ scmnd->sc_data_direction);
+}
+
+static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
+{
+ struct srp_request *req;
+ struct scsi_cmnd *scmnd;
+ unsigned long flags;
+ s32 delta;
+
+ delta = (s32) be32_to_cpu(rsp->req_lim_delta);
+
+ spin_lock_irqsave(target->scsi_host->host_lock, flags);
+
+ target->req_lim += delta;
+
+ req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT];
+
+ if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
+ if (be32_to_cpu(rsp->resp_data_len) < 4)
+ req->tsk_status = -1;
+ else
+ req->tsk_status = rsp->data[3];
+ complete(&req->done);
+ } else {
+ scmnd = req->scmnd;
+ if (!scmnd)
+ printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n",
+ (unsigned long long) rsp->tag);
+ scmnd->result = rsp->status;
+
+ if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
+ memcpy(scmnd->sense_buffer, rsp->data +
+ be32_to_cpu(rsp->resp_data_len),
+ min_t(int, be32_to_cpu(rsp->sense_data_len),
+ SCSI_SENSE_BUFFERSIZE));
+ }
+
+ if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
+ scmnd->resid = be32_to_cpu(rsp->data_out_res_cnt);
+ else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
+ scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt);
+
+ srp_unmap_data(scmnd, target, req);
+
+ if (!req->tsk_mgmt) {
+ req->scmnd = NULL;
+ scmnd->host_scribble = (void *) -1L;
+ scmnd->scsi_done(scmnd);
+
+ list_del(&req->list);
+ req->next = target->req_head;
+ target->req_head = rsp->tag & ~SRP_TAG_TSK_MGMT;
+ } else
+ req->cmd_done = 1;
+ }
+
+ spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
+}
+
+static void srp_reconnect_work(void *target_ptr)
+{
+ struct srp_target_port *target = target_ptr;
+
+ srp_reconnect_target(target);
+}
+
+static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
+{
+ struct srp_iu *iu;
+ u8 opcode;
+
+ iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV];
+
+ dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma,
+ target->max_ti_iu_len, DMA_FROM_DEVICE);
+
+ opcode = *(u8 *) iu->buf;
+
+ if (0) {
+ int i;
+
+ printk(KERN_ERR PFX "recv completion, opcode 0x%02x\n", opcode);
+
+ for (i = 0; i < wc->byte_len; ++i) {
+ if (i % 8 == 0)
+ printk(KERN_ERR " [%02x] ", i);
+ printk(" %02x", ((u8 *) iu->buf)[i]);
+ if ((i + 1) % 8 == 0)
+ printk("\n");
+ }
+
+ if (wc->byte_len % 8)
+ printk("\n");
+ }
+
+ switch (opcode) {
+ case SRP_RSP:
+ srp_process_rsp(target, iu->buf);
+ break;
+
+ case SRP_T_LOGOUT:
+ /* XXX Handle target logout */
+ printk(KERN_WARNING PFX "Got target logout request\n");
+ break;
+
+ default:
+ printk(KERN_WARNING PFX "Unhandled SRP opcode 0x%02x\n", opcode);
+ break;
+ }
+
+ dma_sync_single_for_device(target->srp_host->dev->dma_device, iu->dma,
+ target->max_ti_iu_len, DMA_FROM_DEVICE);
+}
+
+static void srp_completion(struct ib_cq *cq, void *target_ptr)
+{
+ struct srp_target_port *target = target_ptr;
+ struct ib_wc wc;
+ unsigned long flags;
+
+ ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+ while (ib_poll_cq(cq, 1, &wc) > 0) {
+ if (wc.status) {
+ printk(KERN_ERR PFX "failed %s status %d\n",
+ wc.wr_id & SRP_OP_RECV ? "receive" : "send",
+ wc.status);
+ spin_lock_irqsave(target->scsi_host->host_lock, flags);
+ if (target->state == SRP_TARGET_LIVE)
+ schedule_work(&target->work);
+ spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
+ break;
+ }
+
+ if (wc.wr_id & SRP_OP_RECV)
+ srp_handle_recv(target, &wc);
+ else
+ ++target->tx_tail;
+ }
+}
+
+static int __srp_post_recv(struct srp_target_port *target)
+{
+ struct srp_iu *iu;
+ struct ib_sge list;
+ struct ib_recv_wr wr, *bad_wr;
+ unsigned int next;
+ int ret;
+
+ next = target->rx_head & (SRP_RQ_SIZE - 1);
+ wr.wr_id = next | SRP_OP_RECV;
+ iu = target->rx_ring[next];
+
+ list.addr = iu->dma;
+ list.length = iu->size;
+ list.lkey = target->srp_host->mr->lkey;
+
+ wr.next = NULL;
+ wr.sg_list = &list;
+ wr.num_sge = 1;
+
+ ret = ib_post_recv(target->qp, &wr, &bad_wr);
+ if (!ret)
+ ++target->rx_head;
+
+ return ret;
+}
+
+static int srp_post_recv(struct srp_target_port *target)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(target->scsi_host->host_lock, flags);
+ ret = __srp_post_recv(target);
+ spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
+
+ return ret;
+}
+
+/*
+ * Must be called with target->scsi_host->host_lock held to protect
+ * req_lim and tx_head.
+ */
+static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target)
+{
+ if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
+ return NULL;
+
+ return target->tx_ring[target->tx_head & SRP_SQ_SIZE];
+}
+
+/*
+ * Must be called with target->scsi_host->host_lock held to protect
+ * req_lim and tx_head.
+ */
+static int __srp_post_send(struct srp_target_port *target,
+ struct srp_iu *iu, int len)
+{
+ struct ib_sge list;
+ struct ib_send_wr wr, *bad_wr;
+ int ret = 0;
+
+ if (target->req_lim < 1) {
+ printk(KERN_ERR PFX "Target has req_lim %d\n", target->req_lim);
+ return -EAGAIN;
+ }
+
+ list.addr = iu->dma;
+ list.length = len;
+ list.lkey = target->srp_host->mr->lkey;
+
+ wr.next = NULL;
+ wr.wr_id = target->tx_head & SRP_SQ_SIZE;
+ wr.sg_list = &list;
+ wr.num_sge = 1;
+ wr.opcode = IB_WR_SEND;
+ wr.send_flags = IB_SEND_SIGNALED;
+
+ ret = ib_post_send(target->qp, &wr, &bad_wr);
+
+ if (!ret) {
+ ++target->tx_head;
+ --target->req_lim;
+ }
+
+ return ret;
+}
+
+static int srp_queuecommand(struct scsi_cmnd *scmnd,
+ void (*done)(struct scsi_cmnd *))
+{
+ struct srp_target_port *target = host_to_target(scmnd->device->host);
+ struct srp_request *req;
+ struct srp_iu *iu;
+ struct srp_cmd *cmd;
+ long req_index;
+ int len;
+
+ if (target->state == SRP_TARGET_CONNECTING)
+ goto err;
+
+ if (target->state == SRP_TARGET_DEAD ||
+ target->state == SRP_TARGET_REMOVED) {
+ scmnd->result = DID_BAD_TARGET << 16;
+ done(scmnd);
+ return 0;
+ }
+
+ iu = __srp_get_tx_iu(target);
+ if (!iu)
+ goto err;
+
+ dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma,
+ SRP_MAX_IU_LEN, DMA_TO_DEVICE);
+
+ req_index = target->req_head;
+
+ scmnd->scsi_done = done;
+ scmnd->result = 0;
+ scmnd->host_scribble = (void *) req_index;
+
+ cmd = iu->buf;
+ memset(cmd, 0, sizeof *cmd);
+
+ cmd->opcode = SRP_CMD;
+ cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
+ cmd->tag = req_index;
+ memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
+
+ req = &target->req_ring[req_index];
+
+ req->scmnd = scmnd;
+ req->cmd = iu;
+ req->cmd_done = 0;
+ req->tsk_mgmt = NULL;
+
+ len = srp_map_data(scmnd, target, req);
+ if (len < 0) {
+ printk(KERN_ERR PFX "Failed to map data\n");
+ goto err;
+ }
+
+ if (__srp_post_recv(target)) {
+ printk(KERN_ERR PFX "Recv failed\n");
+ goto err_unmap;
+ }
+
+ dma_sync_single_for_device(target->srp_host->dev->dma_device, iu->dma,
+ SRP_MAX_IU_LEN, DMA_TO_DEVICE);
+
+ if (__srp_post_send(target, iu, len)) {
+ printk(KERN_ERR PFX "Send failed\n");
+ goto err_unmap;
+ }
+
+ target->req_head = req->next;
+ list_add_tail(&req->list, &target->req_queue);
+
+ return 0;
+
+err_unmap:
+ srp_unmap_data(scmnd, target, req);
+
+err:
+ return SCSI_MLQUEUE_HOST_BUSY;
+}
+
+static int srp_alloc_iu_bufs(struct srp_target_port *target)
+{
+ int i;
+
+ for (i = 0; i < SRP_RQ_SIZE; ++i) {
+ target->rx_ring[i] = srp_alloc_iu(target->srp_host,
+ target->max_ti_iu_len,
+ GFP_KERNEL, DMA_FROM_DEVICE);
+ if (!target->rx_ring[i])
+ goto err;
+ }
+
+ for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {
+ target->tx_ring[i] = srp_alloc_iu(target->srp_host,
+ SRP_MAX_IU_LEN,
+ GFP_KERNEL, DMA_TO_DEVICE);
+ if (!target->tx_ring[i])
+ goto err;
+ }
+
+ return 0;
+
+err:
+ for (i = 0; i < SRP_RQ_SIZE; ++i) {
+ srp_free_iu(target->srp_host, target->rx_ring[i]);
+ target->rx_ring[i] = NULL;
+ }
+
+ for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {
+ srp_free_iu(target->srp_host, target->tx_ring[i]);
+ target->tx_ring[i] = NULL;
+ }
+
+ return -ENOMEM;
+}
+
+static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
+ struct ib_cm_event *event,
+ struct srp_target_port *target)
+{
+ struct ib_class_port_info *cpi;
+ int opcode;
+
+ switch (event->param.rej_rcvd.reason) {
+ case IB_CM_REJ_PORT_CM_REDIRECT:
+ cpi = event->param.rej_rcvd.ari;
+ target->path.dlid = cpi->redirect_lid;
+ target->path.pkey = cpi->redirect_pkey;
+ cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
+ memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
+
+ target->status = target->path.dlid ?
+ SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
+ break;
+
+ case IB_CM_REJ_PORT_REDIRECT:
+ if (topspin_workarounds &&
+ !memcmp(&target->ioc_guid, topspin_oui, 3)) {
+ /*
+ * Topspin/Cisco SRP gateways incorrectly send
+ * reject reason code 25 when they mean 24
+ * (port redirect).
+ */
+ memcpy(target->path.dgid.raw,
+ event->param.rej_rcvd.ari, 16);
+
+ printk(KERN_DEBUG PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
+ (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
+ (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
+
+ target->status = SRP_PORT_REDIRECT;
+ } else {
+ printk(KERN_WARNING " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
+ target->status = -ECONNRESET;
+ }
+ break;
+
+ case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
+ printk(KERN_WARNING " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
+ target->status = -ECONNRESET;
+ break;
+
+ case IB_CM_REJ_CONSUMER_DEFINED:
+ opcode = *(u8 *) event->private_data;
+ if (opcode == SRP_LOGIN_REJ) {
+ struct srp_login_rej *rej = event->private_data;
+ u32 reason = be32_to_cpu(rej->reason);
+
+ if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
+ printk(KERN_WARNING PFX
+ "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
+ else
+ printk(KERN_WARNING PFX
+ "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
+ } else
+ printk(KERN_WARNING " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
+ " opcode 0x%02x\n", opcode);
+ target->status = -ECONNRESET;
+ break;
+
+ default:
+ printk(KERN_WARNING " REJ reason 0x%x\n",
+ event->param.rej_rcvd.reason);
+ target->status = -ECONNRESET;
+ }
+}
+
+static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+{
+ struct srp_target_port *target = cm_id->context;
+ struct ib_qp_attr *qp_attr = NULL;
+ int attr_mask = 0;
+ int comp = 0;
+ int opcode = 0;
+
+ switch (event->event) {
+ case IB_CM_REQ_ERROR:
+ printk(KERN_DEBUG PFX "Sending CM REQ failed\n");
+ comp = 1;
+ target->status = -ECONNRESET;
+ break;
+
+ case IB_CM_REP_RECEIVED:
+ comp = 1;
+ opcode = *(u8 *) event->private_data;
+
+ if (opcode == SRP_LOGIN_RSP) {
+ struct srp_login_rsp *rsp = event->private_data;
+
+ target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len);
+ target->req_lim = be32_to_cpu(rsp->req_lim_delta);
+
+ target->scsi_host->can_queue = min(target->req_lim,
+ target->scsi_host->can_queue);
+ } else {
+ printk(KERN_WARNING PFX "Unhandled RSP opcode %#x\n", opcode);
+ target->status = -ECONNRESET;
+ break;
+ }
+
+ target->status = srp_alloc_iu_bufs(target);
+ if (target->status)
+ break;
+
+ qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
+ if (!qp_attr) {
+ target->status = -ENOMEM;
+ break;
+ }
+
+ qp_attr->qp_state = IB_QPS_RTR;
+ target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
+ if (target->status)
+ break;
+
+ target->status = ib_modify_qp(target->qp, qp_attr, attr_mask);
+ if (target->status)
+ break;
+
+ target->status = srp_post_recv(target);
+ if (target->status)
+ break;
+
+ qp_attr->qp_state = IB_QPS_RTS;
+ target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
+ if (target->status)
+ break;
+
+ target->status = ib_modify_qp(target->qp, qp_attr, attr_mask);
+ if (target->status)
+ break;
+
+ target->status = ib_send_cm_rtu(cm_id, NULL, 0);
+ if (target->status)
+ break;
+
+ break;
+
+ case IB_CM_REJ_RECEIVED:
+ printk(KERN_DEBUG PFX "REJ received\n");
+ comp = 1;
+
+ srp_cm_rej_handler(cm_id, event, target);
+ break;
+
+ case IB_CM_MRA_RECEIVED:
+ printk(KERN_ERR PFX "MRA received\n");
+ break;
+
+ case IB_CM_DREP_RECEIVED:
+ break;
+
+ case IB_CM_TIMEWAIT_EXIT:
+ printk(KERN_ERR PFX "connection closed\n");
+
+ comp = 1;
+ target->status = 0;
+ break;
+
+ default:
+ printk(KERN_WARNING PFX "Unhandled CM event %d\n", event->event);
+ break;
+ }
+
+ if (comp)
+ complete(&target->done);
+
+ kfree(qp_attr);
+
+ return 0;
+}
+
+static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func)
+{
+ struct srp_target_port *target = host_to_target(scmnd->device->host);
+ struct srp_request *req;
+ struct srp_iu *iu;
+ struct srp_tsk_mgmt *tsk_mgmt;
+ int req_index;
+ int ret = FAILED;
+
+ spin_lock_irq(target->scsi_host->host_lock);
+
+ if (scmnd->host_scribble == (void *) -1L)
+ goto out;
+
+ req_index = (long) scmnd->host_scribble;
+ printk(KERN_ERR "Abort for req_index %d\n", req_index);
+
+ req = &target->req_ring[req_index];
+ init_completion(&req->done);
+
+ iu = __srp_get_tx_iu(target);
+ if (!iu)
+ goto out;
+
+ tsk_mgmt = iu->buf;
+ memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
+
+ tsk_mgmt->opcode = SRP_TSK_MGMT;
+ tsk_mgmt->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
+ tsk_mgmt->tag = req_index | SRP_TAG_TSK_MGMT;
+ tsk_mgmt->tsk_mgmt_func = func;
+ tsk_mgmt->task_tag = req_index;
+
+ if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
+ goto out;
+
+ req->tsk_mgmt = iu;
+
+ spin_unlock_irq(target->scsi_host->host_lock);
+ if (!wait_for_completion_timeout(&req->done,
+ msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
+ return FAILED;
+ spin_lock_irq(target->scsi_host->host_lock);
+
+ if (req->cmd_done) {
+ list_del(&req->list);
+ req->next = target->req_head;
+ target->req_head = req_index;
+
+ scmnd->scsi_done(scmnd);
+ } else if (!req->tsk_status) {
+ scmnd->result = DID_ABORT << 16;
+ ret = SUCCESS;
+ }
+
+out:
+ spin_unlock_irq(target->scsi_host->host_lock);
+ return ret;
+}
+
+static int srp_abort(struct scsi_cmnd *scmnd)
+{
+ printk(KERN_ERR "SRP abort called\n");
+
+ return srp_send_tsk_mgmt(scmnd, SRP_TSK_ABORT_TASK);
+}
+
+static int srp_reset_device(struct scsi_cmnd *scmnd)
+{
+ printk(KERN_ERR "SRP reset_device called\n");
+
+ return srp_send_tsk_mgmt(scmnd, SRP_TSK_LUN_RESET);
+}
+
+static int srp_reset_host(struct scsi_cmnd *scmnd)
+{
+ struct srp_target_port *target = host_to_target(scmnd->device->host);
+ int ret = FAILED;
+
+ printk(KERN_ERR PFX "SRP reset_host called\n");
+
+ if (!srp_reconnect_target(target))
+ ret = SUCCESS;
+
+ return ret;
+}
+
+static struct scsi_host_template srp_template = {
+ .module = THIS_MODULE,
+ .name = DRV_NAME,
+ .info = srp_target_info,
+ .queuecommand = srp_queuecommand,
+ .eh_abort_handler = srp_abort,
+ .eh_device_reset_handler = srp_reset_device,
+ .eh_host_reset_handler = srp_reset_host,
+ .can_queue = SRP_SQ_SIZE,
+ .this_id = -1,
+ .sg_tablesize = SRP_MAX_INDIRECT,
+ .cmd_per_lun = SRP_SQ_SIZE,
+ .use_clustering = ENABLE_CLUSTERING
+};
+
+static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
+{
+ sprintf(target->target_name, "SRP.T10:%016llX",
+ (unsigned long long) be64_to_cpu(target->id_ext));
+
+ if (scsi_add_host(target->scsi_host, host->dev->dma_device))
+ return -ENODEV;
+
+ down(&host->target_mutex);
+ list_add_tail(&target->list, &host->target_list);
+ up(&host->target_mutex);
+
+ target->state = SRP_TARGET_LIVE;
+
+ /* XXX: are we supposed to have a definition of SCAN_WILD_CARD ?? */
+ scsi_scan_target(&target->scsi_host->shost_gendev,
+ 0, target->scsi_id, ~0, 0);
+
+ return 0;
+}
+
+static void srp_release_class_dev(struct class_device *class_dev)
+{
+ struct srp_host *host =
+ container_of(class_dev, struct srp_host, class_dev);
+
+ complete(&host->released);
+}
+
+static struct class srp_class = {
+ .name = "infiniband_srp",
+ .release = srp_release_class_dev
+};
+
+/*
+ * Target ports are added by writing
+ *
+ * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
+ * pkey=<P_Key>,service_id=<service ID>
+ *
+ * to the add_target sysfs attribute.
+ */
+enum {
+ SRP_OPT_ERR = 0,
+ SRP_OPT_ID_EXT = 1 << 0,
+ SRP_OPT_IOC_GUID = 1 << 1,
+ SRP_OPT_DGID = 1 << 2,
+ SRP_OPT_PKEY = 1 << 3,
+ SRP_OPT_SERVICE_ID = 1 << 4,
+ SRP_OPT_MAX_SECT = 1 << 5,
+ SRP_OPT_ALL = (SRP_OPT_ID_EXT |
+ SRP_OPT_IOC_GUID |
+ SRP_OPT_DGID |
+ SRP_OPT_PKEY |
+ SRP_OPT_SERVICE_ID),
+};
+
+static match_table_t srp_opt_tokens = {
+ { SRP_OPT_ID_EXT, "id_ext=%s" },
+ { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
+ { SRP_OPT_DGID, "dgid=%s" },
+ { SRP_OPT_PKEY, "pkey=%x" },
+ { SRP_OPT_SERVICE_ID, "service_id=%s" },
+ { SRP_OPT_MAX_SECT, "max_sect=%d" },
+ { SRP_OPT_ERR, NULL }
+};
+
+static int srp_parse_options(const char *buf, struct srp_target_port *target)
+{
+ char *options, *sep_opt;
+ char *p;
+ char dgid[3];
+ substring_t args[MAX_OPT_ARGS];
+ int opt_mask = 0;
+ int token;
+ int ret = -EINVAL;
+ int i;
+
+ options = kstrdup(buf, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ sep_opt = options;
+ while ((p = strsep(&sep_opt, ",")) != NULL) {
+ if (!*p)
+ continue;
+
+ token = match_token(p, srp_opt_tokens, args);
+ opt_mask |= token;
+
+ switch (token) {
+ case SRP_OPT_ID_EXT:
+ p = match_strdup(args);
+ target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
+ kfree(p);
+ break;
+
+ case SRP_OPT_IOC_GUID:
+ p = match_strdup(args);
+ target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
+ kfree(p);
+ break;
+
+ case SRP_OPT_DGID:
+ p = match_strdup(args);
+ if (strlen(p) != 32) {
+ printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p);
+ goto out;
+ }
+
+ for (i = 0; i < 16; ++i) {
+ strlcpy(dgid, p + i * 2, 3);
+ target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
+ }
+ break;
+
+ case SRP_OPT_PKEY:
+ if (match_hex(args, &token)) {
+ printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p);
+ goto out;
+ }
+ target->path.pkey = cpu_to_be16(token);
+ break;
+
+ case SRP_OPT_SERVICE_ID:
+ p = match_strdup(args);
+ target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
+ kfree(p);
+ break;
+
+ case SRP_OPT_MAX_SECT:
+ if (match_int(args, &token)) {
+ printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p);
+ goto out;
+ }
+ target->scsi_host->max_sectors = token;
+ break;
+
+ default:
+ printk(KERN_WARNING PFX "unknown parameter or missing value "
+ "'%s' in target creation request\n", p);
+ goto out;
+ }
+ }
+
+ if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
+ ret = 0;
+ else
+ for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
+ if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
+ !(srp_opt_tokens[i].token & opt_mask))
+ printk(KERN_WARNING PFX "target creation request is "
+ "missing parameter '%s'\n",
+ srp_opt_tokens[i].pattern);
+
+out:
+ kfree(options);
+ return ret;
+}
+
+static ssize_t srp_create_target(struct class_device *class_dev,
+ const char *buf, size_t count)
+{
+ struct srp_host *host =
+ container_of(class_dev, struct srp_host, class_dev);
+ struct Scsi_Host *target_host;
+ struct srp_target_port *target;
+ int ret;
+ int i;
+
+ target_host = scsi_host_alloc(&srp_template,
+ sizeof (struct srp_target_port));
+ if (!target_host)
+ return -ENOMEM;
+
+ target = host_to_target(target_host);
+ memset(target, 0, sizeof *target);
+
+ target->scsi_host = target_host;
+ target->srp_host = host;
+
+ INIT_WORK(&target->work, srp_reconnect_work, target);
+
+ for (i = 0; i < SRP_SQ_SIZE - 1; ++i)
+ target->req_ring[i].next = i + 1;
+ target->req_ring[SRP_SQ_SIZE - 1].next = -1;
+ INIT_LIST_HEAD(&target->req_queue);
+
+ ret = srp_parse_options(buf, target);
+ if (ret)
+ goto err;
+
+ ib_get_cached_gid(host->dev, host->port, 0, &target->path.sgid);
+
+ printk(KERN_DEBUG PFX "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
+ "service_id %016llx dgid %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+ (unsigned long long) be64_to_cpu(target->id_ext),
+ (unsigned long long) be64_to_cpu(target->ioc_guid),
+ be16_to_cpu(target->path.pkey),
+ (unsigned long long) be64_to_cpu(target->service_id),
+ (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[0]),
+ (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[2]),
+ (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[4]),
+ (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[6]),
+ (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[8]),
+ (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[10]),
+ (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[12]),
+ (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[14]));
+
+ ret = srp_create_target_ib(target);
+ if (ret)
+ goto err;
+
+ target->cm_id = ib_create_cm_id(host->dev, srp_cm_handler, target);
+ if (IS_ERR(target->cm_id)) {
+ ret = PTR_ERR(target->cm_id);
+ goto err_free;
+ }
+
+ ret = srp_connect_target(target);
+ if (ret) {
+ printk(KERN_ERR PFX "Connection failed\n");
+ goto err_cm_id;
+ }
+
+ ret = srp_add_target(host, target);
+ if (ret)
+ goto err_disconnect;
+
+ return count;
+
+err_disconnect:
+ srp_disconnect_target(target);
+
+err_cm_id:
+ ib_destroy_cm_id(target->cm_id);
+
+err_free:
+ srp_free_target_ib(target);
+
+err:
+ scsi_host_put(target_host);
+
+ return ret;
+}
+
+static CLASS_DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
+
+static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
+{
+ struct srp_host *host =
+ container_of(class_dev, struct srp_host, class_dev);
+
+ return sprintf(buf, "%s\n", host->dev->name);
+}
+
+static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
+
+static ssize_t show_port(struct class_device *class_dev, char *buf)
+{
+ struct srp_host *host =
+ container_of(class_dev, struct srp_host, class_dev);
+
+ return sprintf(buf, "%d\n", host->port);
+}
+
+static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
+
+static struct srp_host *srp_add_port(struct ib_device *device,
+ __be64 node_guid, u8 port)
+{
+ struct srp_host *host;
+
+ host = kzalloc(sizeof *host, GFP_KERNEL);
+ if (!host)
+ return NULL;
+
+ INIT_LIST_HEAD(&host->target_list);
+ init_MUTEX(&host->target_mutex);
+ init_completion(&host->released);
+ host->dev = device;
+ host->port = port;
+
+ host->initiator_port_id[7] = port;
+ memcpy(host->initiator_port_id + 8, &node_guid, 8);
+
+ host->pd = ib_alloc_pd(device);
+ if (IS_ERR(host->pd))
+ goto err_free;
+
+ host->mr = ib_get_dma_mr(host->pd,
+ IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE);
+ if (IS_ERR(host->mr))
+ goto err_pd;
+
+ host->class_dev.class = &srp_class;
+ host->class_dev.dev = device->dma_device;
+ snprintf(host->class_dev.class_id, BUS_ID_SIZE, "srp-%s-%d",
+ device->name, port);
+
+ if (class_device_register(&host->class_dev))
+ goto err_mr;
+ if (class_device_create_file(&host->class_dev, &class_device_attr_add_target))
+ goto err_class;
+ if (class_device_create_file(&host->class_dev, &class_device_attr_ibdev))
+ goto err_class;
+ if (class_device_create_file(&host->class_dev, &class_device_attr_port))
+ goto err_class;
+
+ return host;
+
+err_class:
+ class_device_unregister(&host->class_dev);
+
+err_mr:
+ ib_dereg_mr(host->mr);
+
+err_pd:
+ ib_dealloc_pd(host->pd);
+
+err_free:
+ kfree(host);
+
+ return NULL;
+}
+
+static void srp_add_one(struct ib_device *device)
+{
+ struct list_head *dev_list;
+ struct srp_host *host;
+ struct ib_device_attr *dev_attr;
+ int s, e, p;
+
+ dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
+ if (!dev_attr)
+ return;
+
+ if (ib_query_device(device, dev_attr)) {
+ printk(KERN_WARNING PFX "Couldn't query node GUID for %s.\n",
+ device->name);
+ goto out;
+ }
+
+ dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
+ if (!dev_list)
+ goto out;
+
+ INIT_LIST_HEAD(dev_list);
+
+ if (device->node_type == IB_NODE_SWITCH) {
+ s = 0;
+ e = 0;
+ } else {
+ s = 1;
+ e = device->phys_port_cnt;
+ }
+
+ for (p = s; p <= e; ++p) {
+ host = srp_add_port(device, dev_attr->node_guid, p);
+ if (host)
+ list_add_tail(&host->list, dev_list);
+ }
+
+ ib_set_client_data(device, &srp_client, dev_list);
+
+out:
+ kfree(dev_attr);
+}
+
+static void srp_remove_one(struct ib_device *device)
+{
+ struct list_head *dev_list;
+ struct srp_host *host, *tmp_host;
+ LIST_HEAD(target_list);
+ struct srp_target_port *target, *tmp_target;
+ unsigned long flags;
+
+ dev_list = ib_get_client_data(device, &srp_client);
+
+ list_for_each_entry_safe(host, tmp_host, dev_list, list) {
+ class_device_unregister(&host->class_dev);
+ /*
+ * Wait for the sysfs entry to go away, so that no new
+ * target ports can be created.
+ */
+ wait_for_completion(&host->released);
+
+ /*
+ * Mark all target ports as removed, so we stop queueing
+ * commands and don't try to reconnect.
+ */
+ down(&host->target_mutex);
+ list_for_each_entry_safe(target, tmp_target,
+ &host->target_list, list) {
+ spin_lock_irqsave(target->scsi_host->host_lock, flags);
+ if (target->state != SRP_TARGET_REMOVED)
+ target->state = SRP_TARGET_REMOVED;
+ spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
+ }
+ up(&host->target_mutex);
+
+ /*
+ * Wait for any reconnection tasks that may have
+ * started before we marked our target ports as
+ * removed, and any target port removal tasks.
+ */
+ flush_scheduled_work();
+
+ list_for_each_entry_safe(target, tmp_target,
+ &host->target_list, list) {
+ scsi_remove_host(target->scsi_host);
+ srp_disconnect_target(target);
+ ib_destroy_cm_id(target->cm_id);
+ srp_free_target_ib(target);
+ scsi_host_put(target->scsi_host);
+ }
+
+ ib_dereg_mr(host->mr);
+ ib_dealloc_pd(host->pd);
+ kfree(host);
+ }
+
+ kfree(dev_list);
+}
+
+static int __init srp_init_module(void)
+{
+ int ret;
+
+ ret = class_register(&srp_class);
+ if (ret) {
+ printk(KERN_ERR PFX "couldn't register class infiniband_srp\n");
+ return ret;
+ }
+
+ ret = ib_register_client(&srp_client);
+ if (ret) {
+ printk(KERN_ERR PFX "couldn't register IB client\n");
+ class_unregister(&srp_class);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit srp_cleanup_module(void)
+{
+ ib_unregister_client(&srp_client);
+ class_unregister(&srp_class);
+}
+
+module_init(srp_init_module);
+module_exit(srp_cleanup_module);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
new file mode 100644
index 000000000000..4fec28a71367
--- /dev/null
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: ib_srp.h 3932 2005-11-01 17:19:29Z roland $
+ */
+
+#ifndef IB_SRP_H
+#define IB_SRP_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+#include <asm/semaphore.h>
+
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_sa.h>
+#include <rdma/ib_cm.h>
+
+enum {
+ SRP_PATH_REC_TIMEOUT_MS = 1000,
+ SRP_ABORT_TIMEOUT_MS = 5000,
+
+ SRP_PORT_REDIRECT = 1,
+ SRP_DLID_REDIRECT = 2,
+
+ SRP_MAX_IU_LEN = 256,
+
+ SRP_RQ_SHIFT = 6,
+ SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT,
+ SRP_SQ_SIZE = SRP_RQ_SIZE - 1,
+ SRP_CQ_SIZE = SRP_SQ_SIZE + SRP_RQ_SIZE,
+
+ SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1)
+};
+
+#define SRP_OP_RECV (1 << 31)
+#define SRP_MAX_INDIRECT ((SRP_MAX_IU_LEN - \
+ sizeof (struct srp_cmd) - \
+ sizeof (struct srp_indirect_buf)) / 16)
+
+enum srp_target_state {
+ SRP_TARGET_LIVE,
+ SRP_TARGET_CONNECTING,
+ SRP_TARGET_DEAD,
+ SRP_TARGET_REMOVED
+};
+
+struct srp_host {
+ u8 initiator_port_id[16];
+ struct ib_device *dev;
+ u8 port;
+ struct ib_pd *pd;
+ struct ib_mr *mr;
+ struct class_device class_dev;
+ struct list_head target_list;
+ struct semaphore target_mutex;
+ struct completion released;
+ struct list_head list;
+};
+
+struct srp_request {
+ struct list_head list;
+ struct scsi_cmnd *scmnd;
+ struct srp_iu *cmd;
+ struct srp_iu *tsk_mgmt;
+ DECLARE_PCI_UNMAP_ADDR(direct_mapping)
+ struct completion done;
+ short next;
+ u8 cmd_done;
+ u8 tsk_status;
+};
+
+struct srp_target_port {
+ __be64 id_ext;
+ __be64 ioc_guid;
+ __be64 service_id;
+ struct srp_host *srp_host;
+ struct Scsi_Host *scsi_host;
+ char target_name[32];
+ unsigned int scsi_id;
+
+ struct ib_sa_path_rec path;
+ struct ib_sa_query *path_query;
+ int path_query_id;
+
+ struct ib_cm_id *cm_id;
+ struct ib_cq *cq;
+ struct ib_qp *qp;
+
+ int max_ti_iu_len;
+ s32 req_lim;
+
+ unsigned rx_head;
+ struct srp_iu *rx_ring[SRP_RQ_SIZE];
+
+ unsigned tx_head;
+ unsigned tx_tail;
+ struct srp_iu *tx_ring[SRP_SQ_SIZE + 1];
+
+ int req_head;
+ struct list_head req_queue;
+ struct srp_request req_ring[SRP_SQ_SIZE];
+
+ struct work_struct work;
+
+ struct list_head list;
+ struct completion done;
+ int status;
+ enum srp_target_state state;
+};
+
+struct srp_iu {
+ dma_addr_t dma;
+ void *buf;
+ size_t size;
+ enum dma_data_direction direction;
+};
+
+#endif /* IB_SRP_H */
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 91920a1140fa..9bc6cc6e3845 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -155,10 +155,10 @@ static spinlock_t pmu_lock;
static u8 pmu_intr_mask;
static int pmu_version;
static int drop_interrupts;
-#ifdef CONFIG_PM
+#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
static int option_lid_wakeup = 1;
static int sleep_in_progress;
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM && CONFIG_PPC32 */
static unsigned long async_req_locks;
static unsigned int pmu_irq_stats[11];
@@ -865,7 +865,7 @@ proc_read_options(char *page, char **start, off_t off,
{
char *p = page;
-#ifdef CONFIG_PM
+#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
if (pmu_kind == PMU_KEYLARGO_BASED &&
pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) >= 0)
p += sprintf(p, "lid_wakeup=%d\n", option_lid_wakeup);
@@ -906,7 +906,7 @@ proc_write_options(struct file *file, const char __user *buffer,
*(val++) = 0;
while(*val == ' ')
val++;
-#ifdef CONFIG_PM
+#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
if (pmu_kind == PMU_KEYLARGO_BASED &&
pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) >= 0)
if (!strcmp(label, "lid_wakeup"))
@@ -2063,6 +2063,9 @@ pmu_unregister_sleep_notifier(struct pmu_sleep_notifier* n)
n->list.next = NULL;
return 0;
}
+#endif /* CONFIG_PM */
+
+#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
/* Sleep is broadcast last-to-first */
static int
@@ -2687,7 +2690,7 @@ powerbook_sleep_3400(void)
return 0;
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM && CONFIG_PPC32 */
/*
* Support for /dev/pmu device
@@ -2871,7 +2874,7 @@ pmu_ioctl(struct inode * inode, struct file *filp,
int error = -EINVAL;
switch (cmd) {
-#ifdef CONFIG_PM
+#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
case PMU_IOC_SLEEP:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -2899,7 +2902,7 @@ pmu_ioctl(struct inode * inode, struct file *filp,
return put_user(0, argp);
else
return put_user(1, argp);
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM && CONFIG_PPC32 */
#ifdef CONFIG_PMAC_BACKLIGHT
/* Backlight should have its own device or go via
@@ -3047,7 +3050,7 @@ pmu_polled_request(struct adb_request *req)
* to do suspend-to-disk.
*/
-#ifdef CONFIG_PM
+#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
static int pmu_sys_suspended = 0;
@@ -3082,7 +3085,7 @@ static int pmu_sys_resume(struct sys_device *sysdev)
return 0;
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM && CONFIG_PPC32 */
static struct sysdev_class pmu_sysclass = {
set_kset_name("pmu"),
@@ -3094,10 +3097,10 @@ static struct sys_device device_pmu = {
};
static struct sysdev_driver driver_pmu = {
-#ifdef CONFIG_PM
+#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
.suspend = &pmu_sys_suspend,
.resume = &pmu_sys_resume,
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM && CONFIG_PPC32 */
};
static int __init init_pmu_sysfs(void)
@@ -3135,12 +3138,12 @@ EXPORT_SYMBOL(pmu_i2c_combined_read);
EXPORT_SYMBOL(pmu_i2c_stdsub_write);
EXPORT_SYMBOL(pmu_i2c_simple_read);
EXPORT_SYMBOL(pmu_i2c_simple_write);
-#ifdef CONFIG_PM
+#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
EXPORT_SYMBOL(pmu_register_sleep_notifier);
EXPORT_SYMBOL(pmu_unregister_sleep_notifier);
EXPORT_SYMBOL(pmu_enable_irled);
EXPORT_SYMBOL(pmu_battery_count);
EXPORT_SYMBOL(pmu_batteries);
EXPORT_SYMBOL(pmu_power_flags);
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM && CONFIG_PPC32 */
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 6d4f9ceb0a32..57edae4790e8 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1374,7 +1374,7 @@ config FORCEDETH
config CS89x0
tristate "CS89x0 support"
- depends on (NET_PCI && (ISA || ARCH_IXDP2X01)) || ARCH_PNX0105 || MACH_MP1000
+ depends on (NET_PCI && (ISA || ARCH_IXDP2X01)) || ARCH_PNX0105
---help---
Support for CS89x0 chipset based Ethernet cards. If you have a
network (Ethernet) card of this type, say Y and read the
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index bfdae10036ed..a6078ad9b654 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -182,10 +182,6 @@ static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0};
#define CIRRUS_DEFAULT_IRQ VH_INTC_INT_NUM_CASCADED_INTERRUPT_1 /* Event inputs bank 1 - ID 35/bit 3 */
static unsigned int netcard_portlist[] __initdata = {CIRRUS_DEFAULT_BASE, 0};
static unsigned int cs8900_irq_map[] = {CIRRUS_DEFAULT_IRQ, 0, 0, 0};
-#elif defined(CONFIG_MACH_MP1000)
-#include <asm/arch/mp1000-seprom.h>
-static unsigned int netcard_portlist[] __initdata = {MP1000_EIO_BASE+0x300, 0};
-static unsigned int cs8900_irq_map[] = {IRQ_EINT3,0,0,0};
#else
static unsigned int netcard_portlist[] __initdata =
{ 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0};
@@ -594,10 +590,6 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
cnt -= j;
}
} else
-#elif defined(CONFIG_MACH_MP1000)
- if (1) {
- memcpy(dev->dev_addr, get_eeprom_mac_address(), ETH_ALEN);
- } else
#endif
if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) ==
@@ -657,10 +649,6 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
if (1) {
printk(KERN_NOTICE "cs89x0: No EEPROM on HiCO.SH4\n");
} else
-#elif defined(CONFIG_MACH_MP1000)
- if (1) {
- lp->force |= FORCE_RJ45;
- } else
#endif
if ((readreg(dev, PP_SelfST) & EEPROM_PRESENT) == 0)
printk(KERN_WARNING "cs89x0: No EEPROM, relying on command line....\n");
@@ -1243,7 +1231,7 @@ net_open(struct net_device *dev)
else
#endif
{
-#if !defined(CONFIG_ARCH_IXDP2X01) && !defined(CONFIG_ARCH_PNX0105) && !defined(CONFIG_MACH_MP1000)
+#if !defined(CONFIG_ARCH_IXDP2X01) && !defined(CONFIG_ARCH_PNX0105)
if (((1 << dev->irq) & lp->irq_map) == 0) {
printk(KERN_ERR "%s: IRQ %d is not in our map of allowable IRQs, which is %x\n",
dev->name, dev->irq, lp->irq_map);
diff --git a/drivers/net/cs89x0.h b/drivers/net/cs89x0.h
index f19d1ebe0183..decea264f121 100644
--- a/drivers/net/cs89x0.h
+++ b/drivers/net/cs89x0.h
@@ -16,7 +16,7 @@
#include <linux/config.h>
-#if defined(CONFIG_ARCH_IXDP2X01) || defined(CONFIG_ARCH_PNX0105) || defined (CONFIG_MACH_MP1000)
+#if defined(CONFIG_ARCH_IXDP2X01) || defined(CONFIG_ARCH_PNX0105)
/* IXDP2401/IXDP2801 uses dword-aligned register addressing */
#define CS89x0_PORT(reg) ((reg) * 2)
#else
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index f5ea39ff1017..d86d8f055a6c 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -72,12 +72,12 @@
#include <linux/ethtool.h>
#include <asm/abs_addr.h>
-#include <asm/iSeries/mf.h>
+#include <asm/iseries/mf.h>
#include <asm/uaccess.h>
-#include <asm/iSeries/HvLpConfig.h>
-#include <asm/iSeries/HvTypes.h>
-#include <asm/iSeries/HvLpEvent.h>
+#include <asm/iseries/hv_lp_config.h>
+#include <asm/iseries/hv_types.h>
+#include <asm/iseries/hv_lp_event.h>
#include <asm/iommu.h>
#include <asm/vio.h>
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index e2a5657d5fdb..4612312c0c2d 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -307,14 +307,22 @@ static int ahci_port_start(struct ata_port *ap)
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
void *mem;
dma_addr_t mem_dma;
+ int rc;
pp = kmalloc(sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
memset(pp, 0, sizeof(*pp));
+ rc = ata_pad_alloc(ap, dev);
+ if (rc) {
+ kfree(pp);
+ return rc;
+ }
+
mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL);
if (!mem) {
+ ata_pad_free(ap, dev);
kfree(pp);
return -ENOMEM;
}
@@ -390,6 +398,7 @@ static void ahci_port_stop(struct ata_port *ap)
ap->private_data = NULL;
dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ,
pp->cmd_slot, pp->cmd_slot_dma);
+ ata_pad_free(ap, dev);
kfree(pp);
}
@@ -468,23 +477,23 @@ static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
static void ahci_fill_sg(struct ata_queued_cmd *qc)
{
struct ahci_port_priv *pp = qc->ap->private_data;
- unsigned int i;
+ struct scatterlist *sg;
+ struct ahci_sg *ahci_sg;
VPRINTK("ENTER\n");
/*
* Next, the S/G list.
*/
- for (i = 0; i < qc->n_elem; i++) {
- u32 sg_len;
- dma_addr_t addr;
-
- addr = sg_dma_address(&qc->sg[i]);
- sg_len = sg_dma_len(&qc->sg[i]);
-
- pp->cmd_tbl_sg[i].addr = cpu_to_le32(addr & 0xffffffff);
- pp->cmd_tbl_sg[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
- pp->cmd_tbl_sg[i].flags_size = cpu_to_le32(sg_len - 1);
+ ahci_sg = pp->cmd_tbl_sg;
+ ata_for_each_sg(sg, qc) {
+ dma_addr_t addr = sg_dma_address(sg);
+ u32 sg_len = sg_dma_len(sg);
+
+ ahci_sg->addr = cpu_to_le32(addr & 0xffffffff);
+ ahci_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
+ ahci_sg->flags_size = cpu_to_le32(sg_len - 1);
+ ahci_sg++;
}
}
diff --git a/drivers/scsi/ibmvscsi/iseries_vscsi.c b/drivers/scsi/ibmvscsi/iseries_vscsi.c
index e9202f2a8276..1045872b0175 100644
--- a/drivers/scsi/ibmvscsi/iseries_vscsi.c
+++ b/drivers/scsi/ibmvscsi/iseries_vscsi.c
@@ -28,10 +28,10 @@
* hypervisor system or a converged hypervisor system.
*/
-#include <asm/iSeries/vio.h>
-#include <asm/iSeries/HvLpEvent.h>
-#include <asm/iSeries/HvTypes.h>
-#include <asm/iSeries/HvLpConfig.h>
+#include <asm/iseries/vio.h>
+#include <asm/iseries/hv_lp_event.h>
+#include <asm/iseries/hv_types.h>
+#include <asm/iseries/hv_lp_config.h>
#include <asm/vio.h>
#include <linux/device.h>
#include "ibmvscsi.h"
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index ff18fa7044c5..e1346cddd37f 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -2390,8 +2390,9 @@ static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev)
static void ata_sg_clean(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
- struct scatterlist *sg = qc->sg;
+ struct scatterlist *sg = qc->__sg;
int dir = qc->dma_dir;
+ void *pad_buf = NULL;
assert(qc->flags & ATA_QCFLAG_DMAMAP);
assert(sg != NULL);
@@ -2401,14 +2402,35 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
DPRINTK("unmapping %u sg elements\n", qc->n_elem);
- if (qc->flags & ATA_QCFLAG_SG)
+ /* if we padded the buffer out to 32-bit bound, and data
+ * xfer direction is from-device, we must copy from the
+ * pad buffer back into the supplied buffer
+ */
+ if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
+ pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
+
+ if (qc->flags & ATA_QCFLAG_SG) {
dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
- else
+ /* restore last sg */
+ sg[qc->orig_n_elem - 1].length += qc->pad_len;
+ if (pad_buf) {
+ struct scatterlist *psg = &qc->pad_sgent;
+ void *addr = kmap_atomic(psg->page, KM_IRQ0);
+ memcpy(addr + psg->offset, pad_buf, qc->pad_len);
+ kunmap_atomic(psg->page, KM_IRQ0);
+ }
+ } else {
dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]),
sg_dma_len(&sg[0]), dir);
+ /* restore sg */
+ sg->length += qc->pad_len;
+ if (pad_buf)
+ memcpy(qc->buf_virt + sg->length - qc->pad_len,
+ pad_buf, qc->pad_len);
+ }
qc->flags &= ~ATA_QCFLAG_DMAMAP;
- qc->sg = NULL;
+ qc->__sg = NULL;
}
/**
@@ -2424,15 +2446,15 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
*/
static void ata_fill_sg(struct ata_queued_cmd *qc)
{
- struct scatterlist *sg = qc->sg;
struct ata_port *ap = qc->ap;
- unsigned int idx, nelem;
+ struct scatterlist *sg;
+ unsigned int idx;
- assert(sg != NULL);
+ assert(qc->__sg != NULL);
assert(qc->n_elem > 0);
idx = 0;
- for (nelem = qc->n_elem; nelem; nelem--,sg++) {
+ ata_for_each_sg(sg, qc) {
u32 addr, offset;
u32 sg_len, len;
@@ -2518,12 +2540,18 @@ void ata_qc_prep(struct ata_queued_cmd *qc)
void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
{
+ struct scatterlist *sg;
+
qc->flags |= ATA_QCFLAG_SINGLE;
- qc->sg = &qc->sgent;
+ memset(&qc->sgent, 0, sizeof(qc->sgent));
+ qc->__sg = &qc->sgent;
qc->n_elem = 1;
+ qc->orig_n_elem = 1;
qc->buf_virt = buf;
- sg_init_one(qc->sg, buf, buflen);
+
+ sg = qc->__sg;
+ sg_init_one(sg, buf, buflen);
}
/**
@@ -2544,8 +2572,9 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
unsigned int n_elem)
{
qc->flags |= ATA_QCFLAG_SG;
- qc->sg = sg;
+ qc->__sg = sg;
qc->n_elem = n_elem;
+ qc->orig_n_elem = n_elem;
}
/**
@@ -2565,9 +2594,32 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
int dir = qc->dma_dir;
- struct scatterlist *sg = qc->sg;
+ struct scatterlist *sg = qc->__sg;
dma_addr_t dma_address;
+ /* we must lengthen transfers to end on a 32-bit boundary */
+ qc->pad_len = sg->length & 3;
+ if (qc->pad_len) {
+ void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
+ struct scatterlist *psg = &qc->pad_sgent;
+
+ assert(qc->dev->class == ATA_DEV_ATAPI);
+
+ memset(pad_buf, 0, ATA_DMA_PAD_SZ);
+
+ if (qc->tf.flags & ATA_TFLAG_WRITE)
+ memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
+ qc->pad_len);
+
+ sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
+ sg_dma_len(psg) = ATA_DMA_PAD_SZ;
+ /* trim sg */
+ sg->length -= qc->pad_len;
+
+ DPRINTK("padding done, sg->length=%u pad_len=%u\n",
+ sg->length, qc->pad_len);
+ }
+
dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
sg->length, dir);
if (dma_mapping_error(dma_address))
@@ -2599,12 +2651,47 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
static int ata_sg_setup(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
- struct scatterlist *sg = qc->sg;
+ struct scatterlist *sg = qc->__sg;
+ struct scatterlist *lsg = &sg[qc->n_elem - 1];
int n_elem, dir;
VPRINTK("ENTER, ata%u\n", ap->id);
assert(qc->flags & ATA_QCFLAG_SG);
+ /* we must lengthen transfers to end on a 32-bit boundary */
+ qc->pad_len = lsg->length & 3;
+ if (qc->pad_len) {
+ void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
+ struct scatterlist *psg = &qc->pad_sgent;
+ unsigned int offset;
+
+ assert(qc->dev->class == ATA_DEV_ATAPI);
+
+ memset(pad_buf, 0, ATA_DMA_PAD_SZ);
+
+ /*
+ * psg->page/offset are used to copy to-be-written
+ * data in this function or read data in ata_sg_clean.
+ */
+ offset = lsg->offset + lsg->length - qc->pad_len;
+ psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
+ psg->offset = offset_in_page(offset);
+
+ if (qc->tf.flags & ATA_TFLAG_WRITE) {
+ void *addr = kmap_atomic(psg->page, KM_IRQ0);
+ memcpy(pad_buf, addr + psg->offset, qc->pad_len);
+ kunmap_atomic(psg->page, KM_IRQ0);
+ }
+
+ sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
+ sg_dma_len(psg) = ATA_DMA_PAD_SZ;
+ /* trim last sg */
+ lsg->length -= qc->pad_len;
+
+ DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
+ qc->n_elem - 1, lsg->length, qc->pad_len);
+ }
+
dir = qc->dma_dir;
n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir);
if (n_elem < 1)
@@ -2880,7 +2967,7 @@ static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
static void ata_pio_sector(struct ata_queued_cmd *qc)
{
int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
- struct scatterlist *sg = qc->sg;
+ struct scatterlist *sg = qc->__sg;
struct ata_port *ap = qc->ap;
struct page *page;
unsigned int offset;
@@ -2930,7 +3017,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
{
int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
- struct scatterlist *sg = qc->sg;
+ struct scatterlist *sg = qc->__sg;
struct ata_port *ap = qc->ap;
struct page *page;
unsigned char *buf;
@@ -2963,7 +3050,7 @@ next_sg:
return;
}
- sg = &qc->sg[qc->cursg];
+ sg = &qc->__sg[qc->cursg];
page = sg->page;
offset = sg->offset + qc->cursg_ofs;
@@ -3320,7 +3407,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
qc = ata_qc_new(ap);
if (qc) {
- qc->sg = NULL;
+ qc->__sg = NULL;
qc->flags = 0;
qc->scsicmd = NULL;
qc->ap = ap;
@@ -4004,11 +4091,18 @@ err_out:
int ata_port_start (struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
+ int rc;
ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
if (!ap->prd)
return -ENOMEM;
+ rc = ata_pad_alloc(ap, dev);
+ if (rc) {
+ dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
+ return rc;
+ }
+
DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
return 0;
@@ -4032,6 +4126,7 @@ void ata_port_stop (struct ata_port *ap)
struct device *dev = ap->host_set->dev;
dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
+ ata_pad_free(ap, dev);
}
void ata_host_stop (struct ata_host_set *host_set)
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 248baae96486..eb604b0a8990 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -355,10 +355,10 @@ struct ata_queued_cmd *ata_scsi_qc_new(struct ata_port *ap,
qc->scsidone = done;
if (cmd->use_sg) {
- qc->sg = (struct scatterlist *) cmd->request_buffer;
+ qc->__sg = (struct scatterlist *) cmd->request_buffer;
qc->n_elem = cmd->use_sg;
} else {
- qc->sg = &qc->sgent;
+ qc->__sg = &qc->sgent;
qc->n_elem = 1;
}
} else {
@@ -702,6 +702,16 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
*/
blk_queue_max_sectors(sdev->request_queue, 2048);
}
+
+ /*
+ * SATA DMA transfers must be multiples of 4 byte, so
+ * we need to pad ATAPI transfers using an extra sg.
+ * Decrement max hw segments accordingly.
+ */
+ if (dev->class == ATA_DEV_ATAPI) {
+ request_queue_t *q = sdev->request_queue;
+ blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
+ }
}
return 0; /* scsi layer doesn't check return value, sigh */
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
index 665017eda8a6..a50588c60fab 100644
--- a/drivers/scsi/pdc_adma.c
+++ b/drivers/scsi/pdc_adma.c
@@ -293,14 +293,14 @@ static void adma_eng_timeout(struct ata_port *ap)
static int adma_fill_sg(struct ata_queued_cmd *qc)
{
- struct scatterlist *sg = qc->sg;
+ struct scatterlist *sg;
struct ata_port *ap = qc->ap;
struct adma_port_priv *pp = ap->private_data;
u8 *buf = pp->pkt;
- int nelem, i = (2 + buf[3]) * 8;
+ int i = (2 + buf[3]) * 8;
u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0);
- for (nelem = 0; nelem < qc->n_elem; nelem++,sg++) {
+ ata_for_each_sg(sg, qc) {
u32 addr;
u32 len;
@@ -312,7 +312,7 @@ static int adma_fill_sg(struct ata_queued_cmd *qc)
*(__le32 *)(buf + i) = cpu_to_le32(len);
i += 4;
- if ((nelem + 1) == qc->n_elem)
+ if (ata_sg_is_last(sg, qc))
pFLAGS |= pEND;
buf[i++] = pFLAGS;
buf[i++] = qc->dev->dma_mode & 0xf;
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index 46dbdee79f77..0f469e3dabe2 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -670,6 +670,11 @@ static void mv_host_stop(struct ata_host_set *host_set)
ata_host_stop(host_set);
}
+static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
+{
+ dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
+}
+
/**
* mv_port_start - Port specific init/start routine.
* @ap: ATA channel to manipulate
@@ -687,21 +692,23 @@ static int mv_port_start(struct ata_port *ap)
void __iomem *port_mmio = mv_ap_base(ap);
void *mem;
dma_addr_t mem_dma;
+ int rc = -ENOMEM;
pp = kmalloc(sizeof(*pp), GFP_KERNEL);
- if (!pp) {
- return -ENOMEM;
- }
+ if (!pp)
+ goto err_out;
memset(pp, 0, sizeof(*pp));
mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
GFP_KERNEL);
- if (!mem) {
- kfree(pp);
- return -ENOMEM;
- }
+ if (!mem)
+ goto err_out_pp;
memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
+ rc = ata_pad_alloc(ap, dev);
+ if (rc)
+ goto err_out_priv;
+
/* First item in chunk of DMA memory:
* 32-slot command request table (CRQB), 32 bytes each in size
*/
@@ -746,6 +753,13 @@ static int mv_port_start(struct ata_port *ap)
*/
ap->private_data = pp;
return 0;
+
+err_out_priv:
+ mv_priv_free(pp, dev);
+err_out_pp:
+ kfree(pp);
+err_out:
+ return rc;
}
/**
@@ -768,7 +782,8 @@ static void mv_port_stop(struct ata_port *ap)
spin_unlock_irqrestore(&ap->host_set->lock, flags);
ap->private_data = NULL;
- dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
+ ata_pad_free(ap, dev);
+ mv_priv_free(pp, dev);
kfree(pp);
}
@@ -784,23 +799,24 @@ static void mv_port_stop(struct ata_port *ap)
static void mv_fill_sg(struct ata_queued_cmd *qc)
{
struct mv_port_priv *pp = qc->ap->private_data;
- unsigned int i;
+ unsigned int i = 0;
+ struct scatterlist *sg;
- for (i = 0; i < qc->n_elem; i++) {
+ ata_for_each_sg(sg, qc) {
u32 sg_len;
dma_addr_t addr;
- addr = sg_dma_address(&qc->sg[i]);
- sg_len = sg_dma_len(&qc->sg[i]);
+ addr = sg_dma_address(sg);
+ sg_len = sg_dma_len(sg);
pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff);
pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
assert(0 == (sg_len & ~MV_DMA_BOUNDARY));
pp->sg_tbl[i].flags_size = cpu_to_le32(sg_len);
- }
- if (0 < qc->n_elem) {
- pp->sg_tbl[qc->n_elem - 1].flags_size |=
- cpu_to_le32(EPRD_FLAG_END_OF_TBL);
+ if (ata_sg_is_last(sg, qc))
+ pp->sg_tbl[i].flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
+
+ i++;
}
}
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index 9938dae782b6..65502c157a54 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -271,16 +271,17 @@ static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
static void qs_fill_sg(struct ata_queued_cmd *qc)
{
- struct scatterlist *sg = qc->sg;
+ struct scatterlist *sg;
struct ata_port *ap = qc->ap;
struct qs_port_priv *pp = ap->private_data;
unsigned int nelem;
u8 *prd = pp->pkt + QS_CPB_BYTES;
- assert(sg != NULL);
+ assert(qc->__sg != NULL);
assert(qc->n_elem > 0);
- for (nelem = 0; nelem < qc->n_elem; nelem++,sg++) {
+ nelem = 0;
+ ata_for_each_sg(sg, qc) {
u64 addr;
u32 len;
@@ -294,6 +295,7 @@ static void qs_fill_sg(struct ata_queued_cmd *qc)
VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", nelem,
(unsigned long long)addr, len);
+ nelem++;
}
}
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index c66548025657..e6c8e89c226f 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -411,15 +411,20 @@ static void sil24_phy_reset(struct ata_port *ap)
static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
struct sil24_cmd_block *cb)
{
- struct scatterlist *sg = qc->sg;
struct sil24_sge *sge = cb->sge;
- unsigned i;
+ struct scatterlist *sg;
+ unsigned int idx = 0;
- for (i = 0; i < qc->n_elem; i++, sg++, sge++) {
+ ata_for_each_sg(sg, qc) {
sge->addr = cpu_to_le64(sg_dma_address(sg));
sge->cnt = cpu_to_le32(sg_dma_len(sg));
- sge->flags = 0;
- sge->flags = i < qc->n_elem - 1 ? 0 : cpu_to_le32(SGE_TRM);
+ if (ata_sg_is_last(sg, qc))
+ sge->flags = cpu_to_le32(SGE_TRM);
+ else
+ sge->flags = 0;
+
+ sge++;
+ idx++;
}
}
@@ -630,6 +635,13 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *
return IRQ_RETVAL(handled);
}
+static inline void sil24_cblk_free(struct sil24_port_priv *pp, struct device *dev)
+{
+ const size_t cb_size = sizeof(*pp->cmd_block);
+
+ dma_free_coherent(dev, cb_size, pp->cmd_block, pp->cmd_block_dma);
+}
+
static int sil24_port_start(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
@@ -637,36 +649,44 @@ static int sil24_port_start(struct ata_port *ap)
struct sil24_cmd_block *cb;
size_t cb_size = sizeof(*cb);
dma_addr_t cb_dma;
+ int rc = -ENOMEM;
- pp = kmalloc(sizeof(*pp), GFP_KERNEL);
+ pp = kzalloc(sizeof(*pp), GFP_KERNEL);
if (!pp)
- return -ENOMEM;
- memset(pp, 0, sizeof(*pp));
+ goto err_out;
pp->tf.command = ATA_DRDY;
cb = dma_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL);
- if (!cb) {
- kfree(pp);
- return -ENOMEM;
- }
+ if (!cb)
+ goto err_out_pp;
memset(cb, 0, cb_size);
+ rc = ata_pad_alloc(ap, dev);
+ if (rc)
+ goto err_out_pad;
+
pp->cmd_block = cb;
pp->cmd_block_dma = cb_dma;
ap->private_data = pp;
return 0;
+
+err_out_pad:
+ sil24_cblk_free(pp, dev);
+err_out_pp:
+ kfree(pp);
+err_out:
+ return rc;
}
static void sil24_port_stop(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct sil24_port_priv *pp = ap->private_data;
- size_t cb_size = sizeof(*pp->cmd_block);
- dma_free_coherent(dev, cb_size, pp->cmd_block, pp->cmd_block_dma);
+ sil24_cblk_free(pp, dev);
kfree(pp);
}
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index 0ec21e09f5d8..f859bbd681ed 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -450,14 +450,14 @@ static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
{
- struct scatterlist *sg = qc->sg;
+ struct scatterlist *sg;
struct ata_port *ap = qc->ap;
struct pdc_port_priv *pp = ap->private_data;
void __iomem *mmio = ap->host_set->mmio_base;
struct pdc_host_priv *hpriv = ap->host_set->private_data;
void __iomem *dimm_mmio = hpriv->dimm_mmio;
unsigned int portno = ap->port_no;
- unsigned int i, last, idx, total_len = 0, sgt_len;
+ unsigned int i, idx, total_len = 0, sgt_len;
u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
assert(qc->flags & ATA_QCFLAG_DMAMAP);
@@ -470,12 +470,11 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
/*
* Build S/G table
*/
- last = qc->n_elem;
idx = 0;
- for (i = 0; i < last; i++) {
- buf[idx++] = cpu_to_le32(sg_dma_address(&sg[i]));
- buf[idx++] = cpu_to_le32(sg_dma_len(&sg[i]));
- total_len += sg_dma_len(&sg[i]);
+ ata_for_each_sg(sg, qc) {
+ buf[idx++] = cpu_to_le32(sg_dma_address(sg));
+ buf[idx++] = cpu_to_le32(sg_dma_len(sg));
+ total_len += sg_dma_len(sg);
}
buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
sgt_len = idx * 4;
diff --git a/drivers/serial/8250_early.c b/drivers/serial/8250_early.c
index b7a5dd710228..59ba5d993b4b 100644
--- a/drivers/serial/8250_early.c
+++ b/drivers/serial/8250_early.c
@@ -164,7 +164,7 @@ static int __init parse_options(struct early_uart_device *device, char *options)
if ((options = strchr(options, ','))) {
options++;
- device->baud = simple_strtoul(options, 0, 0);
+ device->baud = simple_strtoul(options, NULL, 0);
length = min(strcspn(options, " "), sizeof(device->options));
strncpy(device->options, options, length);
} else {
diff --git a/drivers/serial/clps711x.c b/drivers/serial/clps711x.c
index 6a67e8f585b3..87ef368384fb 100644
--- a/drivers/serial/clps711x.c
+++ b/drivers/serial/clps711x.c
@@ -408,11 +408,7 @@ static struct uart_port clps711x_ports[UART_NR] = {
{
.iobase = SYSCON1,
.irq = IRQ_UTXINT1, /* IRQ_URXINT1, IRQ_UMSINT */
-#ifdef CONFIG_MP1000_90MHZ
- .uartclk = 4515840,
-#else
.uartclk = 3686400,
-#endif
.fifosize = 16,
.ops = &clps711x_pops,
.line = 0,
@@ -421,11 +417,7 @@ static struct uart_port clps711x_ports[UART_NR] = {
{
.iobase = SYSCON2,
.irq = IRQ_UTXINT2, /* IRQ_URXINT2 */
-#ifdef CONFIG_MP1000_90MHZ
- .uartclk = 4515840,
-#else
.uartclk = 3686400,
-#endif
.fifosize = 16,
.ops = &clps711x_pops,
.line = 1,
@@ -559,7 +551,6 @@ console_initcall(clps711xuart_console_init);
static struct uart_driver clps711x_reg = {
.driver_name = "ttyCL",
.dev_name = "ttyCL",
- .devfs_name = "ttyCL",
.major = SERIAL_CLPS711X_MAJOR,
.minor = SERIAL_CLPS711X_MINOR,
.nr = UART_NR,
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index b1aa350fd32f..e46528c825bf 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -316,4 +316,4 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI)
quirk_usb_disable_ehci(pdev);
}
-DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
+DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index a7f020ada630..308defc389a2 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -384,6 +384,14 @@ static struct pci_device_id nvidiafb_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B_GT,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GT,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GTX,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_7800,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_7800_GTX,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_NVIDIA, 0x021d,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_NVIDIA, 0x021e,