summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2015-07-27 11:10:05 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2015-07-27 11:10:05 -0700
commit6e64e22449023dc89dfb92c6f19d4c5b03f46889 (patch)
treec0099a75238a2cdcedf005956963329eee0aac75 /drivers
parent4ad2adc98f113d6c21d7bd365cd45ba88d4f7470 (diff)
parentcbfe8fa6cd672011c755c3cd85c9ffd4e2d10a6f (diff)
Merge 4.2-rc4 into staging-next
We want the iio and other fixes in this branch as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/libata-core.c21
-rw-r--r--drivers/ata/libata-pmp.c7
-rw-r--r--drivers/ata/libata-scsi.c3
-rw-r--r--drivers/ata/libata-transport.c2
-rw-r--r--drivers/block/null_blk.c18
-rw-r--r--drivers/bluetooth/btbcm.c11
-rw-r--r--drivers/firmware/efi/cper.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c48
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c12
-rw-r--r--drivers/gpu/drm/drm_crtc.c5
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c26
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/hid/hid-cp2112.c2
-rw-r--r--drivers/hid/hid-multitouch.c7
-rw-r--r--drivers/hid/usbhid/hid-quirks.c3
-rw-r--r--drivers/hid/wacom_sys.c6
-rw-r--r--drivers/hid/wacom_wac.c3
-rw-r--r--drivers/iio/accel/mma8452.c8
-rw-r--r--drivers/iio/adc/mcp320x.c2
-rw-r--r--drivers/iio/adc/vf610_adc.c2
-rw-r--r--drivers/iio/light/stk3310.c26
-rw-r--r--drivers/iio/magnetometer/Kconfig1
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c4
-rw-r--r--drivers/iio/magnetometer/mmc35240.c12
-rw-r--r--drivers/iio/temperature/mlx90614.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c6
-rw-r--r--drivers/input/input-leds.c16
-rw-r--r--drivers/input/mouse/elantech.c13
-rw-r--r--drivers/input/touchscreen/goodix.c36
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c3
-rw-r--r--drivers/input/touchscreen/zforce_ts.c2
-rw-r--r--drivers/iommu/arm-smmu-v3.c60
-rw-r--r--drivers/iommu/intel-iommu.c9
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c35
-rw-r--r--drivers/md/bitmap.c28
-rw-r--r--drivers/md/md-cluster.c12
-rw-r--r--drivers/md/md-cluster.h2
-rw-r--r--drivers/md/md.c4
-rw-r--r--drivers/md/raid1.c9
-rw-r--r--drivers/md/raid10.c5
-rw-r--r--drivers/md/raid5.c35
-rw-r--r--drivers/md/raid5.h3
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c15
-rw-r--r--drivers/misc/mei/main.c2
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.c15
-rw-r--r--drivers/mmc/card/block.c2
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/omap_hsmmc.c11
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c210
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h2
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c1
-rw-r--r--drivers/mmc/host/sdhci.c16
-rw-r--r--drivers/net/bonding/bond_main.c34
-rw-r--r--drivers/net/can/at91_can.c8
-rw-r--r--drivers/net/can/bfin_can.c6
-rw-r--r--drivers/net/can/cc770/cc770.c4
-rw-r--r--drivers/net/can/flexcan.c7
-rw-r--r--drivers/net/can/grcan.c3
-rw-r--r--drivers/net/can/sja1000/sja1000.c6
-rw-r--r--drivers/net/can/slcan.c2
-rw-r--r--drivers/net/can/spi/mcp251x.c17
-rw-r--r--drivers/net/can/ti_hecc.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c6
-rw-r--r--drivers/net/can/usb/esd_usb2.c6
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c7
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c4
-rw-r--r--drivers/net/can/usb/usb_8dev.c6
-rw-r--r--drivers/net/dsa/bcm_sf2.c15
-rw-r--r--drivers/net/dsa/mv88e6xxx.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c88
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c22
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c74
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c9
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/ipvlan/ipvlan.h9
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c6
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c42
-rw-r--r--drivers/net/phy/dp83867.c2
-rw-r--r--drivers/net/phy/mdio_bus.c19
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/virtio_net.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c5
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h51
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c414
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c52
-rw-r--r--drivers/net/xen-netback/netback.c6
-rw-r--r--drivers/nvdimm/region_devs.c5
-rw-r--r--drivers/parport/share.c11
-rw-r--r--drivers/phy/Kconfig2
-rw-r--r--drivers/phy/phy-berlin-usb.c4
-rw-r--r--drivers/phy/phy-ti-pipe3.c172
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c3
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c1
-rw-r--r--drivers/pinctrl/pinctrl-lpc18xx.c4
-rw-r--r--drivers/pinctrl/pinctrl-single.c3
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c5
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h2
-rw-r--r--drivers/regulator/88pm800.c2
-rw-r--r--drivers/regulator/core.c19
-rw-r--r--drivers/regulator/max8973-regulator.c2
-rw-r--r--drivers/regulator/s2mps11.c14
-rw-r--r--drivers/s390/Makefile2
-rw-r--r--drivers/s390/virtio/Makefile (renamed from drivers/s390/kvm/Makefile)0
-rw-r--r--drivers/s390/virtio/kvm_virtio.c (renamed from drivers/s390/kvm/kvm_virtio.c)0
-rw-r--r--drivers/s390/virtio/virtio_ccw.c (renamed from drivers/s390/kvm/virtio_ccw.c)0
-rw-r--r--drivers/scsi/virtio_scsi.c4
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/spi/spi-img-spfi.c2
-rw-r--r--drivers/spi/spi-imx.c5
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c1
-rw-r--r--drivers/spi/spidev.c1
-rw-r--r--drivers/tty/n_tty.c16
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/serial/amba-pl011.c4
-rw-r--r--drivers/tty/serial/etraxfs-uart.c2
-rw-r--r--drivers/tty/serial/imx.c15
-rw-r--r--drivers/tty/serial/sc16is7xx.c30
-rw-r--r--drivers/tty/serial/serial_core.c3
-rw-r--r--drivers/tty/vt/selection.c1
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/usb/class/cdc-acm.c1
-rw-r--r--drivers/usb/common/ulpi.c2
-rw-r--r--drivers/usb/core/hcd.c7
-rw-r--r--drivers/usb/core/hub.c2
-rw-r--r--drivers/usb/core/usb.h1
-rw-r--r--drivers/usb/dwc3/ep0.c4
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c2
-rw-r--r--drivers/usb/gadget/udc/udc-core.c14
-rw-r--r--drivers/usb/host/ohci-q.c7
-rw-r--r--drivers/usb/host/ohci-tmio.c2
-rw-r--r--drivers/usb/host/xhci-hub.c22
-rw-r--r--drivers/usb/host/xhci-mem.c2
-rw-r--r--drivers/usb/host/xhci-pci.c57
-rw-r--r--drivers/usb/host/xhci-ring.c3
-rw-r--r--drivers/usb/host/xhci.c3
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/storage/unusual_devs.h23
-rw-r--r--drivers/vhost/vhost.c67
154 files changed, 1263 insertions, 1160 deletions
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index e83fc3d0da9c..db5d9f79a247 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2478,6 +2478,10 @@ int ata_dev_configure(struct ata_device *dev)
dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
dev->max_sectors);
+ if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
+ dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
+ dev->max_sectors);
+
if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
dev->max_sectors = ATA_MAX_SECTORS_LBA48;
@@ -4146,6 +4150,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
{ "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
+ /*
+ * Causes silent data corruption with higher max sects.
+ * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
+ */
+ { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
+
/* Devices we expect to fail diagnostics */
/* Devices where NCQ should be avoided */
@@ -4174,9 +4184,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },
- /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
+ /* drives which fail FPDMA_AA activation (some may freeze afterwards) */
{ "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
{ "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
+ { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
/* Blacklist entries taken from Silicon Image 3124/3132
Windows driver .inf file - also several Linux problem reports */
@@ -4229,7 +4240,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
- { "Micron_M5[15]0*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
+ { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -4238,6 +4249,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ /* devices that don't properly handle TRIM commands */
+ { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
+
/*
* As defined, the DRAT (Deterministic Read After Trim) and RZAT
* (Return Zero After Trim) flags in the ATA Command Set are
@@ -4501,7 +4515,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
else /* In the ancient relic department - skip all of this */
return 0;
- err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+ /* On some disks, this command causes spin-up, so we need longer timeout */
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
DPRINTK("EXIT, err_mask=%x\n", err_mask);
return err_mask;
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 7ccc084bf1df..85aa76116a30 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -460,6 +460,13 @@ static void sata_pmp_quirks(struct ata_port *ap)
ATA_LFLAG_NO_SRST |
ATA_LFLAG_ASSUME_ATA;
}
+ } else if (vendor == 0x11ab && devid == 0x4140) {
+ /* Marvell 4140 quirks */
+ ata_for_each_link(link, ap, EDGE) {
+ /* port 4 is for SEMB device and it doesn't like SRST */
+ if (link->pmp == 4)
+ link->flags |= ATA_LFLAG_DISABLED;
+ }
}
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 3131adcc1f87..641a61a59e89 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2568,7 +2568,8 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
rbuf[14] = (lowest_aligned >> 8) & 0x3f;
rbuf[15] = lowest_aligned;
- if (ata_id_has_trim(args->id)) {
+ if (ata_id_has_trim(args->id) &&
+ !(dev->horkage & ATA_HORKAGE_NOTRIM)) {
rbuf[14] |= 0x80; /* LBPME */
if (ata_id_has_zero_after_trim(args->id) &&
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index d6c37bcd416d..e2d94972962d 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -569,6 +569,8 @@ show_ata_dev_trim(struct device *dev,
if (!ata_id_has_trim(ata_dev->id))
mode = "unsupported";
+ else if (ata_dev->horkage & ATA_HORKAGE_NOTRIM)
+ mode = "forced_unsupported";
else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM)
mode = "forced_unqueued";
else if (ata_fpdma_dsm_supported(ata_dev))
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 69de41a87b74..3177b245d2bd 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -240,19 +240,19 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
while ((entry = llist_del_all(&cq->list)) != NULL) {
entry = llist_reverse_order(entry);
do {
+ struct request_queue *q = NULL;
+
cmd = container_of(entry, struct nullb_cmd, ll_list);
entry = entry->next;
+ if (cmd->rq)
+ q = cmd->rq->q;
end_cmd(cmd);
- if (cmd->rq) {
- struct request_queue *q = cmd->rq->q;
-
- if (!q->mq_ops && blk_queue_stopped(q)) {
- spin_lock(q->queue_lock);
- if (blk_queue_stopped(q))
- blk_start_queue(q);
- spin_unlock(q->queue_lock);
- }
+ if (q && !q->mq_ops && blk_queue_stopped(q)) {
+ spin_lock(q->queue_lock);
+ if (blk_queue_stopped(q))
+ blk_start_queue(q);
+ spin_unlock(q->queue_lock);
}
} while (entry);
}
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 1e1a4323a71f..9ceb8ac68fdc 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -472,12 +472,11 @@ int btbcm_setup_apple(struct hci_dev *hdev)
/* Read Verbose Config Version Info */
skb = btbcm_read_verbose_config(hdev);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
- get_unaligned_le16(skb->data + 5));
- kfree_skb(skb);
+ if (!IS_ERR(skb)) {
+ BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
+ get_unaligned_le16(skb->data + 5));
+ kfree_skb(skb);
+ }
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 4fd9961d552e..d42537425438 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -305,10 +305,17 @@ const char *cper_mem_err_unpack(struct trace_seq *p,
return ret;
}
-static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem)
+static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem,
+ int len)
{
struct cper_mem_err_compact cmem;
+ /* Don't trust UEFI 2.1/2.2 structure with bad validation bits */
+ if (len == sizeof(struct cper_sec_mem_err_old) &&
+ (mem->validation_bits & ~(CPER_MEM_VALID_RANK_NUMBER - 1))) {
+ pr_err(FW_WARN "valid bits set for fields beyond structure\n");
+ return;
+ }
if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS)
printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status);
if (mem->validation_bits & CPER_MEM_VALID_PA)
@@ -405,8 +412,10 @@ static void cper_estatus_print_section(
} else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem_err = (void *)(gdata + 1);
printk("%s""section_type: memory error\n", newpfx);
- if (gdata->error_data_length >= sizeof(*mem_err))
- cper_print_mem(newpfx, mem_err);
+ if (gdata->error_data_length >=
+ sizeof(struct cper_sec_mem_err_old))
+ cper_print_mem(newpfx, mem_err,
+ gdata->error_data_length);
else
goto err_section_too_small;
} else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 01657830b470..e9fde72cf038 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1614,6 +1614,9 @@ struct amdgpu_uvd {
#define AMDGPU_MAX_VCE_HANDLES 16
#define AMDGPU_VCE_FIRMWARE_OFFSET 256
+#define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
+#define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
+
struct amdgpu_vce {
struct amdgpu_bo *vcpu_bo;
uint64_t gpu_addr;
@@ -1626,6 +1629,7 @@ struct amdgpu_vce {
const struct firmware *fw; /* VCE firmware */
struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
struct amdgpu_irq_src irq;
+ unsigned harvest_config;
};
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 5533434c7a8f..31ad444c6386 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -459,6 +459,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap));
dev_info.vram_type = adev->mc.vram_type;
dev_info.vram_bit_width = adev->mc.vram_width;
+ dev_info.vce_harvest_config = adev->vce.harvest_config;
return copy_to_user(out, &dev_info,
min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 1a2d419cbf16..ace870afc7d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -494,29 +494,67 @@ static void cz_dpm_fini(struct amdgpu_device *adev)
amdgpu_free_extended_power_table(adev);
}
+#define ixSMUSVI_NB_CURRENTVID 0xD8230044
+#define CURRENT_NB_VID_MASK 0xff000000
+#define CURRENT_NB_VID__SHIFT 24
+#define ixSMUSVI_GFX_CURRENTVID 0xD8230048
+#define CURRENT_GFX_VID_MASK 0xff000000
+#define CURRENT_GFX_VID__SHIFT 24
+
static void
cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
struct seq_file *m)
{
+ struct cz_power_info *pi = cz_get_pi(adev);
struct amdgpu_clock_voltage_dependency_table *table =
&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
- u32 current_index =
- (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
- u32 sclk, tmp;
- u16 vddc;
-
- if (current_index >= NUM_SCLK_LEVELS) {
- seq_printf(m, "invalid dpm profile %d\n", current_index);
+ struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
+ &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+ struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
+ &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+ u32 sclk_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX),
+ TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
+ u32 uvd_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
+ TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
+ u32 vce_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
+ TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
+ u32 sclk, vclk, dclk, ecclk, tmp;
+ u16 vddnb, vddgfx;
+
+ if (sclk_index >= NUM_SCLK_LEVELS) {
+ seq_printf(m, "invalid sclk dpm profile %d\n", sclk_index);
} else {
- sclk = table->entries[current_index].clk;
- tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) &
- SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
- SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT;
- vddc = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
- seq_printf(m, "power level %d sclk: %u vddc: %u\n",
- current_index, sclk, vddc);
+ sclk = table->entries[sclk_index].clk;
+ seq_printf(m, "%u sclk: %u\n", sclk_index, sclk);
+ }
+
+ tmp = (RREG32_SMC(ixSMUSVI_NB_CURRENTVID) &
+ CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
+ vddnb = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
+ tmp = (RREG32_SMC(ixSMUSVI_GFX_CURRENTVID) &
+ CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
+ vddgfx = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
+ seq_printf(m, "vddnb: %u vddgfx: %u\n", vddnb, vddgfx);
+
+ seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
+ if (!pi->uvd_power_gated) {
+ if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+ seq_printf(m, "invalid uvd dpm level %d\n", uvd_index);
+ } else {
+ vclk = uvd_table->entries[uvd_index].vclk;
+ dclk = uvd_table->entries[uvd_index].dclk;
+ seq_printf(m, "%u uvd vclk: %u dclk: %u\n", uvd_index, vclk, dclk);
+ }
+ }
+
+ seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en");
+ if (!pi->vce_power_gated) {
+ if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+ seq_printf(m, "invalid vce dpm level %d\n", vce_index);
+ } else {
+ ecclk = vce_table->entries[vce_index].ecclk;
+ seq_printf(m, "%u vce ecclk: %u\n", vce_index, ecclk);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 6e77964f1b64..e70a26f587a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2632,6 +2632,7 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ unsigned type;
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -2640,6 +2641,9 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
dce_v10_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
dce_v10_0_vga_enable(crtc, false);
+ /* Make sure VBLANK interrupt is still enabled */
+ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+ amdgpu_irq_update(adev, &adev->crtc_irq, type);
drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
dce_v10_0_crtc_load_lut(crtc);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 7f7abb0e0be5..dcb402ee048a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2631,6 +2631,7 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ unsigned type;
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -2639,6 +2640,9 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
dce_v11_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
dce_v11_0_vga_enable(crtc, false);
+ /* Make sure VBLANK interrupt is still enabled */
+ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+ amdgpu_irq_update(adev, &adev->crtc_irq, type);
drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
dce_v11_0_crtc_load_lut(crtc);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index d62c4002e39c..d1064ca3670e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -35,6 +35,8 @@
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
#include "gca/gfx_8_0_d.h"
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
@@ -112,6 +114,10 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (idx = 0; idx < 2; ++idx) {
+
+ if (adev->vce.harvest_config & (1 << idx))
+ continue;
+
if(idx == 0)
WREG32_P(mmGRBM_GFX_INDEX, 0,
~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
@@ -190,10 +196,52 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
return 0;
}
+#define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074
+#define VCE_HARVEST_FUSE_MACRO__SHIFT 27
+#define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000
+
+static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
+{
+ u32 tmp;
+ unsigned ret;
+
+ if (adev->flags & AMDGPU_IS_APU)
+ tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
+ VCE_HARVEST_FUSE_MACRO__MASK) >>
+ VCE_HARVEST_FUSE_MACRO__SHIFT;
+ else
+ tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
+ CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
+ CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
+
+ switch (tmp) {
+ case 1:
+ ret = AMDGPU_VCE_HARVEST_VCE0;
+ break;
+ case 2:
+ ret = AMDGPU_VCE_HARVEST_VCE1;
+ break;
+ case 3:
+ ret = AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
+ break;
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
static int vce_v3_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
+
+ if ((adev->vce.harvest_config &
+ (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
+ (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
+ return -ENOENT;
+
vce_v3_0_set_ring_funcs(adev);
vce_v3_0_set_irq_funcs(adev);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index f69b92535505..5ae5c6923128 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -355,6 +355,7 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev)
planes->overlays[i]->base.possible_crtcs = 1 << crtc->id;
drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs);
+ drm_crtc_vblank_reset(&crtc->base);
dc->crtc = &crtc->base;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 60b0c13d7ff5..6fad1f9648f3 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -313,20 +313,20 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
pm_runtime_enable(dev->dev);
- ret = atmel_hlcdc_dc_modeset_init(dev);
+ ret = drm_vblank_init(dev, 1);
if (ret < 0) {
- dev_err(dev->dev, "failed to initialize mode setting\n");
+ dev_err(dev->dev, "failed to initialize vblank\n");
goto err_periph_clk_disable;
}
- drm_mode_config_reset(dev);
-
- ret = drm_vblank_init(dev, 1);
+ ret = atmel_hlcdc_dc_modeset_init(dev);
if (ret < 0) {
- dev_err(dev->dev, "failed to initialize vblank\n");
+ dev_err(dev->dev, "failed to initialize mode setting\n");
goto err_periph_clk_disable;
}
+ drm_mode_config_reset(dev);
+
pm_runtime_get_sync(dev->dev);
ret = drm_irq_install(dev, dc->hlcdc->irq);
pm_runtime_put_sync(dev->dev);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 357bd04a173b..fed748311b92 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -5398,12 +5398,9 @@ void drm_mode_config_reset(struct drm_device *dev)
if (encoder->funcs->reset)
encoder->funcs->reset(encoder);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- connector->status = connector_status_unknown;
-
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->funcs->reset)
connector->funcs->reset(connector);
- }
}
EXPORT_SYMBOL(drm_mode_config_reset);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index a6d8a3ee7750..260389acfb77 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1274,10 +1274,12 @@ int i915_reg_read_ioctl(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_reg_read *reg = data;
struct register_whitelist const *entry = whitelist;
+ unsigned size;
+ u64 offset;
int i, ret = 0;
for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
- if (entry->offset == reg->offset &&
+ if (entry->offset == (reg->offset & -entry->size) &&
(1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
break;
}
@@ -1285,23 +1287,33 @@ int i915_reg_read_ioctl(struct drm_device *dev,
if (i == ARRAY_SIZE(whitelist))
return -EINVAL;
+ /* We use the low bits to encode extra flags as the register should
+ * be naturally aligned (and those that are not so aligned merely
+ * limit the available flags for that register).
+ */
+ offset = entry->offset;
+ size = entry->size;
+ size |= reg->offset ^ offset;
+
intel_runtime_pm_get(dev_priv);
- switch (entry->size) {
+ switch (size) {
+ case 8 | 1:
+ reg->val = I915_READ64_2x32(offset, offset+4);
+ break;
case 8:
- reg->val = I915_READ64(reg->offset);
+ reg->val = I915_READ64(offset);
break;
case 4:
- reg->val = I915_READ(reg->offset);
+ reg->val = I915_READ(offset);
break;
case 2:
- reg->val = I915_READ16(reg->offset);
+ reg->val = I915_READ16(offset);
break;
case 1:
- reg->val = I915_READ8(reg->offset);
+ reg->val = I915_READ8(offset);
break;
default:
- MISSING_CASE(entry->size);
ret = -EINVAL;
goto out;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 882cccdad272..ac6fe40b99f7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -490,7 +490,8 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
else if (boot_cpu_data.x86 > 3)
tmp = pgprot_noncached(tmp);
#endif
-#if defined(__ia64__) || defined(__arm__) || defined(__powerpc__)
+#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
+ defined(__powerpc__)
if (caching_flags & TTM_PL_FLAG_WC)
tmp = pgprot_writecombine(tmp);
else
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 3318de690e00..a2dbbbe0d8d7 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -356,6 +356,8 @@ static int cp2112_read(struct cp2112_device *dev, u8 *data, size_t size)
struct cp2112_force_read_report report;
int ret;
+ if (size > sizeof(dev->read_data))
+ size = sizeof(dev->read_data);
report.report = CP2112_DATA_READ_FORCE_SEND;
report.length = cpu_to_be16(size);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 6a9b05b328a9..7c811252c1ce 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -778,9 +778,16 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
/*
* some egalax touchscreens have "application == HID_DG_TOUCHSCREEN"
* for the stylus.
+ * The check for mt_report_id ensures we don't process
+ * HID_DG_CONTACTCOUNT from the pen report as it is outside the physical
+ * collection, but within the report ID.
*/
if (field->physical == HID_DG_STYLUS)
return 0;
+ else if ((field->physical == 0) &&
+ (field->report->id != td->mt_report_id) &&
+ (td->mt_report_id != -1))
+ return 0;
if (field->application == HID_DG_TOUCHSCREEN ||
field->application == HID_DG_TOUCHPAD)
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 53e7de7cb9e2..20f9a653444c 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -87,6 +87,9 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 4c0ffca97bef..44958d79d598 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -1271,11 +1271,13 @@ fail_leds:
pad_input_dev = NULL;
wacom_wac->pad_registered = false;
fail_register_pad_input:
- input_unregister_device(touch_input_dev);
+ if (touch_input_dev)
+ input_unregister_device(touch_input_dev);
wacom_wac->touch_input = NULL;
wacom_wac->touch_registered = false;
fail_register_touch_input:
- input_unregister_device(pen_input_dev);
+ if (pen_input_dev)
+ input_unregister_device(pen_input_dev);
wacom_wac->pen_input = NULL;
wacom_wac->pen_registered = false;
fail_register_pen_input:
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 232da89f4e88..0d244239e55d 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2213,6 +2213,9 @@ void wacom_setup_device_quirks(struct wacom *wacom)
features->x_max = 4096;
features->y_max = 4096;
}
+ else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) {
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ }
}
/*
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index e8e2077c7244..13ea1ea23328 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -557,21 +557,21 @@ static void mma8452_transient_interrupt(struct iio_dev *indio_dev)
if (src & MMA8452_TRANSIENT_SRC_XTRANSE)
iio_push_event(indio_dev,
IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X,
- IIO_EV_TYPE_THRESH,
+ IIO_EV_TYPE_MAG,
IIO_EV_DIR_RISING),
ts);
if (src & MMA8452_TRANSIENT_SRC_YTRANSE)
iio_push_event(indio_dev,
IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Y,
- IIO_EV_TYPE_THRESH,
+ IIO_EV_TYPE_MAG,
IIO_EV_DIR_RISING),
ts);
if (src & MMA8452_TRANSIENT_SRC_ZTRANSE)
iio_push_event(indio_dev,
IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Z,
- IIO_EV_TYPE_THRESH,
+ IIO_EV_TYPE_MAG,
IIO_EV_DIR_RISING),
ts);
}
@@ -644,7 +644,7 @@ static int mma8452_reg_access_dbg(struct iio_dev *indio_dev,
static const struct iio_event_spec mma8452_transient_event[] = {
{
- .type = IIO_EV_TYPE_THRESH,
+ .type = IIO_EV_TYPE_MAG,
.dir = IIO_EV_DIR_RISING,
.mask_separate = BIT(IIO_EV_INFO_ENABLE),
.mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index 8d9c9b9215dd..d819823f7257 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -299,6 +299,8 @@ static int mcp320x_probe(struct spi_device *spi)
indio_dev->channels = chip_info->channels;
indio_dev->num_channels = chip_info->num_channels;
+ adc->chip_info = chip_info;
+
adc->transfer[0].tx_buf = &adc->tx_buf;
adc->transfer[0].len = sizeof(adc->tx_buf);
adc->transfer[1].rx_buf = adc->rx_buf;
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 480f335a0f9f..819632bf1fda 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -635,7 +635,7 @@ static int vf610_adc_reg_access(struct iio_dev *indio_dev,
struct vf610_adc *info = iio_priv(indio_dev);
if ((readval == NULL) ||
- (!(reg % 4) || (reg > VF610_REG_ADC_PCTL)))
+ ((reg % 4) || (reg > VF610_REG_ADC_PCTL)))
return -EINVAL;
*readval = readl(info->regs + reg);
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index c1a218236be5..11a027adc204 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -200,7 +200,7 @@ static int stk3310_read_event(struct iio_dev *indio_dev,
int *val, int *val2)
{
u8 reg;
- u16 buf;
+ __be16 buf;
int ret;
struct stk3310_data *data = iio_priv(indio_dev);
@@ -222,7 +222,7 @@ static int stk3310_read_event(struct iio_dev *indio_dev,
dev_err(&data->client->dev, "register read failed\n");
return ret;
}
- *val = swab16(buf);
+ *val = be16_to_cpu(buf);
return IIO_VAL_INT;
}
@@ -235,7 +235,7 @@ static int stk3310_write_event(struct iio_dev *indio_dev,
int val, int val2)
{
u8 reg;
- u16 buf;
+ __be16 buf;
int ret;
unsigned int index;
struct stk3310_data *data = iio_priv(indio_dev);
@@ -252,7 +252,7 @@ static int stk3310_write_event(struct iio_dev *indio_dev,
else
return -EINVAL;
- buf = swab16(val);
+ buf = cpu_to_be16(val);
ret = regmap_bulk_write(data->regmap, reg, &buf, 2);
if (ret < 0)
dev_err(&client->dev, "failed to set PS threshold!\n");
@@ -301,7 +301,7 @@ static int stk3310_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
u8 reg;
- u16 buf;
+ __be16 buf;
int ret;
unsigned int index;
struct stk3310_data *data = iio_priv(indio_dev);
@@ -322,7 +322,7 @@ static int stk3310_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&data->lock);
return ret;
}
- *val = swab16(buf);
+ *val = be16_to_cpu(buf);
mutex_unlock(&data->lock);
return IIO_VAL_INT;
case IIO_CHAN_INFO_INT_TIME:
@@ -608,13 +608,7 @@ static int stk3310_probe(struct i2c_client *client,
if (ret < 0)
return ret;
- ret = iio_device_register(indio_dev);
- if (ret < 0) {
- dev_err(&client->dev, "device_register failed\n");
- stk3310_set_state(data, STK3310_STATE_STANDBY);
- }
-
- if (client->irq <= 0)
+ if (client->irq < 0)
client->irq = stk3310_gpio_probe(client);
if (client->irq >= 0) {
@@ -629,6 +623,12 @@ static int stk3310_probe(struct i2c_client *client,
client->irq);
}
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "device_register failed\n");
+ stk3310_set_state(data, STK3310_STATE_STANDBY);
+ }
+
return ret;
}
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index dcadfc4f0661..efb9350b0d76 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -90,6 +90,7 @@ config IIO_ST_MAGN_SPI_3AXIS
config BMC150_MAGN
tristate "Bosch BMC150 Magnetometer Driver"
depends on I2C
+ select REGMAP_I2C
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index cd002710dd02..7da4ce9409e8 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -694,11 +694,11 @@ static int bmc150_magn_init(struct bmc150_magn_data *data)
goto err_poweroff;
}
if (chip_id != BMC150_MAGN_CHIP_ID_VAL) {
- dev_err(&data->client->dev, "Invalid chip id 0x%x\n", ret);
+ dev_err(&data->client->dev, "Invalid chip id 0x%x\n", chip_id);
ret = -ENODEV;
goto err_poweroff;
}
- dev_dbg(&data->client->dev, "Chip id %x\n", ret);
+ dev_dbg(&data->client->dev, "Chip id %x\n", chip_id);
preset = bmc150_magn_presets_table[BMC150_MAGN_DEFAULT_PRESET];
ret = bmc150_magn_set_odr(data, preset.odr);
diff --git a/drivers/iio/magnetometer/mmc35240.c b/drivers/iio/magnetometer/mmc35240.c
index d927397a6ef7..706ebfd6297f 100644
--- a/drivers/iio/magnetometer/mmc35240.c
+++ b/drivers/iio/magnetometer/mmc35240.c
@@ -202,8 +202,8 @@ static int mmc35240_hw_set(struct mmc35240_data *data, bool set)
coil_bit = MMC35240_CTRL0_RESET_BIT;
return regmap_update_bits(data->regmap, MMC35240_REG_CTRL0,
- MMC35240_CTRL0_REFILL_BIT,
- coil_bit);
+ coil_bit, coil_bit);
+
}
static int mmc35240_init(struct mmc35240_data *data)
@@ -222,14 +222,15 @@ static int mmc35240_init(struct mmc35240_data *data)
/*
* make sure we restore sensor characteristics, by doing
- * a RESET/SET sequence
+ * a SET/RESET sequence, the axis polarity being naturally
+ * aligned after RESET
*/
- ret = mmc35240_hw_set(data, false);
+ ret = mmc35240_hw_set(data, true);
if (ret < 0)
return ret;
usleep_range(MMC53240_WAIT_SET_RESET, MMC53240_WAIT_SET_RESET + 1);
- ret = mmc35240_hw_set(data, true);
+ ret = mmc35240_hw_set(data, false);
if (ret < 0)
return ret;
@@ -503,6 +504,7 @@ static int mmc35240_probe(struct i2c_client *client,
}
data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
data->client = client;
data->regmap = regmap;
data->res = MMC35240_16_BITS_SLOW;
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index 90f70b461567..e4ec7af4c1bf 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -204,7 +204,7 @@ static int mlx90614_read_raw(struct iio_dev *indio_dev,
*val = ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_OFFSET:
- *val = 13657;
+ *val = -13657;
*val2 = 500000;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SCALE:
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 2d7e503d13cb..871dbe56216a 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -31,6 +31,8 @@
* SOFTWARE.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/idr.h>
@@ -399,8 +401,8 @@ static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
u32 bar0 = 0, bar1 = 0;
#ifdef CONFIG_X86_64
- if (WARN(pat_enabled(),
- "ipath needs PAT disabled, boot with nopat kernel parameter\n")) {
+ if (pat_enabled()) {
+ pr_warn("ipath needs PAT disabled, boot with nopat kernel parameter\n");
ret = -ENODEV;
goto bail;
}
diff --git a/drivers/input/input-leds.c b/drivers/input/input-leds.c
index 074a65ed17bb..766bf2660116 100644
--- a/drivers/input/input-leds.c
+++ b/drivers/input/input-leds.c
@@ -71,6 +71,18 @@ static void input_leds_event(struct input_handle *handle, unsigned int type,
{
}
+static int input_leds_get_count(struct input_dev *dev)
+{
+ unsigned int led_code;
+ int count = 0;
+
+ for_each_set_bit(led_code, dev->ledbit, LED_CNT)
+ if (input_led_info[led_code].name)
+ count++;
+
+ return count;
+}
+
static int input_leds_connect(struct input_handler *handler,
struct input_dev *dev,
const struct input_device_id *id)
@@ -81,7 +93,7 @@ static int input_leds_connect(struct input_handler *handler,
int led_no;
int error;
- num_leds = bitmap_weight(dev->ledbit, LED_CNT);
+ num_leds = input_leds_get_count(dev);
if (!num_leds)
return -ENXIO;
@@ -112,7 +124,7 @@ static int input_leds_connect(struct input_handler *handler,
led->handle = &leds->handle;
led->code = led_code;
- if (WARN_ON(!input_led_info[led_code].name))
+ if (!input_led_info[led_code].name)
continue;
led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s",
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index ce3d40004458..22b9ca901f4e 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1167,7 +1167,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
struct input_dev *dev = psmouse->dev;
struct elantech_data *etd = psmouse->private;
unsigned int x_min = 0, y_min = 0, x_max = 0, y_max = 0, width = 0;
- unsigned int x_res = 0, y_res = 0;
+ unsigned int x_res = 31, y_res = 31;
if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width))
return -1;
@@ -1232,8 +1232,6 @@ static int elantech_set_input_params(struct psmouse *psmouse)
/* For X to recognize me as touchpad. */
input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0);
- input_abs_set_res(dev, ABS_X, x_res);
- input_abs_set_res(dev, ABS_Y, y_res);
/*
* range of pressure and width is the same as v2,
* report ABS_PRESSURE, ABS_TOOL_WIDTH for compatibility.
@@ -1246,8 +1244,6 @@ static int elantech_set_input_params(struct psmouse *psmouse)
input_mt_init_slots(dev, ETP_MAX_FINGERS, 0);
input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0);
input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0);
- input_abs_set_res(dev, ABS_MT_POSITION_X, x_res);
- input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res);
input_set_abs_params(dev, ABS_MT_PRESSURE, ETP_PMIN_V2,
ETP_PMAX_V2, 0, 0);
/*
@@ -1259,6 +1255,13 @@ static int elantech_set_input_params(struct psmouse *psmouse)
break;
}
+ input_abs_set_res(dev, ABS_X, x_res);
+ input_abs_set_res(dev, ABS_Y, y_res);
+ if (etd->hw_version > 1) {
+ input_abs_set_res(dev, ABS_MT_POSITION_X, x_res);
+ input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res);
+ }
+
etd->y_max = y_max;
etd->width = width;
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index b4d12e29abff..e36162b28c2a 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -15,6 +15,7 @@
*/
#include <linux/kernel.h>
+#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/input/mt.h>
@@ -34,6 +35,7 @@ struct goodix_ts_data {
int abs_y_max;
unsigned int max_touch_num;
unsigned int int_trigger_type;
+ bool rotated_screen;
};
#define GOODIX_MAX_HEIGHT 4096
@@ -60,6 +62,30 @@ static const unsigned long goodix_irq_flags[] = {
IRQ_TYPE_LEVEL_HIGH,
};
+/*
+ * Those tablets have their coordinates origin at the bottom right
+ * of the tablet, as if rotated 180 degrees
+ */
+static const struct dmi_system_id rotated_screen[] = {
+#if defined(CONFIG_DMI) && defined(CONFIG_X86)
+ {
+ .ident = "WinBook TW100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TW100")
+ }
+ },
+ {
+ .ident = "WinBook TW700",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TW700")
+ },
+ },
+#endif
+ {}
+};
+
/**
* goodix_i2c_read - read data from a register of the i2c slave device.
*
@@ -129,6 +155,11 @@ static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
int input_y = get_unaligned_le16(&coor_data[3]);
int input_w = get_unaligned_le16(&coor_data[5]);
+ if (ts->rotated_screen) {
+ input_x = ts->abs_x_max - input_x;
+ input_y = ts->abs_y_max - input_y;
+ }
+
input_mt_slot(ts->input_dev, id);
input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true);
input_report_abs(ts->input_dev, ABS_MT_POSITION_X, input_x);
@@ -223,6 +254,11 @@ static void goodix_read_config(struct goodix_ts_data *ts)
ts->abs_y_max = GOODIX_MAX_HEIGHT;
ts->max_touch_num = GOODIX_MAX_CONTACTS;
}
+
+ ts->rotated_screen = dmi_check_system(rotated_screen);
+ if (ts->rotated_screen)
+ dev_dbg(&ts->client->dev,
+ "Applying '180 degrees rotated screen' quirk\n");
}
/**
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index f2c6c352c55a..2c41107240de 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -627,6 +627,9 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
goto err_out;
}
+ /* TSC-25 data sheet specifies a delay after the RESET command */
+ msleep(150);
+
/* set coordinate output rate */
buf[0] = buf[1] = 0xFF;
ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index f58a196521a9..80285c71786e 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -429,7 +429,7 @@ static int zforce_read_packet(struct zforce_ts *ts, u8 *buf)
goto unlock;
}
- if (buf[PAYLOAD_LENGTH] == 0) {
+ if (buf[PAYLOAD_LENGTH] == 0 || buf[PAYLOAD_LENGTH] > FRAME_MAXSIZE) {
dev_err(&client->dev, "invalid payload length: %d\n",
buf[PAYLOAD_LENGTH]);
ret = -EIO;
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 8e9ec81ce4bb..da902baaa794 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -199,9 +199,10 @@
* Stream table.
*
* Linear: Enough to cover 1 << IDR1.SIDSIZE entries
- * 2lvl: 8k L1 entries, 256 lazy entries per table (each table covers a PCI bus)
+ * 2lvl: 128k L1 entries,
+ * 256 lazy entries per table (each table covers a PCI bus)
*/
-#define STRTAB_L1_SZ_SHIFT 16
+#define STRTAB_L1_SZ_SHIFT 20
#define STRTAB_SPLIT 8
#define STRTAB_L1_DESC_DWORDS 1
@@ -269,10 +270,10 @@
#define ARM64_TCR_TG0_SHIFT 14
#define ARM64_TCR_TG0_MASK 0x3UL
#define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
-#define ARM64_TCR_IRGN0_SHIFT 24
+#define ARM64_TCR_IRGN0_SHIFT 8
#define ARM64_TCR_IRGN0_MASK 0x3UL
#define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
-#define ARM64_TCR_ORGN0_SHIFT 26
+#define ARM64_TCR_ORGN0_SHIFT 10
#define ARM64_TCR_ORGN0_MASK 0x3UL
#define CTXDESC_CD_0_TCR_SH0_SHIFT 12
#define ARM64_TCR_SH0_SHIFT 12
@@ -542,6 +543,9 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_HYP (1 << 12)
u32 features;
+#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
+ u32 options;
+
struct arm_smmu_cmdq cmdq;
struct arm_smmu_evtq evtq;
struct arm_smmu_priq priq;
@@ -602,11 +606,35 @@ struct arm_smmu_domain {
static DEFINE_SPINLOCK(arm_smmu_devices_lock);
static LIST_HEAD(arm_smmu_devices);
+struct arm_smmu_option_prop {
+ u32 opt;
+ const char *prop;
+};
+
+static struct arm_smmu_option_prop arm_smmu_options[] = {
+ { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
+ { 0, NULL},
+};
+
static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
{
return container_of(dom, struct arm_smmu_domain, domain);
}
+static void parse_driver_options(struct arm_smmu_device *smmu)
+{
+ int i = 0;
+
+ do {
+ if (of_property_read_bool(smmu->dev->of_node,
+ arm_smmu_options[i].prop)) {
+ smmu->options |= arm_smmu_options[i].opt;
+ dev_notice(smmu->dev, "option %s\n",
+ arm_smmu_options[i].prop);
+ }
+ } while (arm_smmu_options[++i].opt);
+}
+
/* Low-level queue manipulation functions */
static bool queue_full(struct arm_smmu_queue *q)
{
@@ -1036,7 +1064,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
arm_smmu_sync_ste_for_sid(smmu, sid);
/* It's likely that we'll want to use the new STE soon */
- arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
+ if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
+ arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
}
static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
@@ -1064,7 +1093,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
return 0;
size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
- strtab = &cfg->strtab[sid >> STRTAB_SPLIT << STRTAB_L1_DESC_DWORDS];
+ strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
desc->span = STRTAB_SPLIT + 1;
desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
@@ -2020,21 +2049,23 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
{
void *strtab;
u64 reg;
- u32 size;
+ u32 size, l1size;
int ret;
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
/* Calculate the L1 size, capped to the SIDSIZE */
size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
size = min(size, smmu->sid_bits - STRTAB_SPLIT);
- if (size + STRTAB_SPLIT < smmu->sid_bits)
+ cfg->num_l1_ents = 1 << size;
+
+ size += STRTAB_SPLIT;
+ if (size < smmu->sid_bits)
dev_warn(smmu->dev,
"2-level strtab only covers %u/%u bits of SID\n",
- size + STRTAB_SPLIT, smmu->sid_bits);
+ size, smmu->sid_bits);
- cfg->num_l1_ents = 1 << size;
- size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
- strtab = dma_zalloc_coherent(smmu->dev, size, &cfg->strtab_dma,
+ l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
+ strtab = dma_zalloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
GFP_KERNEL);
if (!strtab) {
dev_err(smmu->dev,
@@ -2055,8 +2086,7 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
ret = arm_smmu_init_l1_strtab(smmu);
if (ret)
dma_free_coherent(smmu->dev,
- cfg->num_l1_ents *
- (STRTAB_L1_DESC_DWORDS << 3),
+ l1size,
strtab,
cfg->strtab_dma);
return ret;
@@ -2573,6 +2603,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
if (irq > 0)
smmu->gerr_irq = irq;
+ parse_driver_options(smmu);
+
/* Probe the h/w */
ret = arm_smmu_device_probe(smmu);
if (ret)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a98a7b27aca1..0649b94f5958 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1830,8 +1830,9 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
static void domain_exit(struct dmar_domain *domain)
{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
struct page *freelist = NULL;
- int i;
/* Domain 0 is reserved, so dont process it */
if (!domain)
@@ -1851,8 +1852,10 @@ static void domain_exit(struct dmar_domain *domain)
/* clear attached or cached domains */
rcu_read_lock();
- for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
- iommu_detach_domain(domain, g_iommus[i]);
+ for_each_active_iommu(iommu, drhd)
+ if (domain_type_is_vm(domain) ||
+ test_bit(iommu->seq_id, domain->iommu_bmp))
+ iommu_detach_domain(domain, iommu);
rcu_read_unlock();
dma_free_pagelist(freelist);
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 8c91fd5eb6fd..375be509e95f 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -524,9 +524,18 @@ gigaset_tty_open(struct tty_struct *tty)
cs->hw.ser->tty = tty;
atomic_set(&cs->hw.ser->refcnt, 1);
init_completion(&cs->hw.ser->dead_cmp);
-
tty->disc_data = cs;
+ /* Set the amount of data we're willing to receive per call
+ * from the hardware driver to half of the input buffer size
+ * to leave some reserve.
+ * Note: We don't do flow control towards the hardware driver.
+ * If more data is received than will fit into the input buffer,
+ * it will be dropped and an error will be logged. This should
+ * never happen as the device is slow and the buffer size ample.
+ */
+ tty->receive_room = RBUFSIZE/2;
+
/* OK.. Initialization of the datastructures and the HW is done.. Now
* startup system and notify the LL that we are ready to run
*/
@@ -598,28 +607,6 @@ static int gigaset_tty_hangup(struct tty_struct *tty)
}
/*
- * Read on the tty.
- * Unused, received data goes only to the Gigaset driver.
- */
-static ssize_t
-gigaset_tty_read(struct tty_struct *tty, struct file *file,
- unsigned char __user *buf, size_t count)
-{
- return -EAGAIN;
-}
-
-/*
- * Write on the tty.
- * Unused, transmit data comes only from the Gigaset driver.
- */
-static ssize_t
-gigaset_tty_write(struct tty_struct *tty, struct file *file,
- const unsigned char *buf, size_t count)
-{
- return -EAGAIN;
-}
-
-/*
* Ioctl on the tty.
* Called in process context only.
* May be re-entered by multiple ioctl calling threads.
@@ -752,8 +739,6 @@ static struct tty_ldisc_ops gigaset_ldisc = {
.open = gigaset_tty_open,
.close = gigaset_tty_close,
.hangup = gigaset_tty_hangup,
- .read = gigaset_tty_read,
- .write = gigaset_tty_write,
.ioctl = gigaset_tty_ioctl,
.receive_buf = gigaset_tty_receive,
.write_wakeup = gigaset_tty_wakeup,
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index ed2346ddf4c9..e51de52eeb94 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -494,7 +494,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
bitmap_super_t *sb;
unsigned long chunksize, daemon_sleep, write_behind;
- bitmap->storage.sb_page = alloc_page(GFP_KERNEL);
+ bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (bitmap->storage.sb_page == NULL)
return -ENOMEM;
bitmap->storage.sb_page->index = 0;
@@ -541,6 +541,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
sb->state = cpu_to_le32(bitmap->flags);
bitmap->events_cleared = bitmap->mddev->events;
sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
+ bitmap->mddev->bitmap_info.nodes = 0;
kunmap_atomic(sb);
@@ -558,6 +559,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
unsigned long sectors_reserved = 0;
int err = -EINVAL;
struct page *sb_page;
+ loff_t offset = bitmap->mddev->bitmap_info.offset;
if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
chunksize = 128 * 1024 * 1024;
@@ -584,9 +586,9 @@ re_read:
bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
/* to 4k blocks */
bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
- bitmap->mddev->bitmap_info.offset += bitmap->cluster_slot * (bm_blocks << 3);
+ offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
- bitmap->cluster_slot, (unsigned long long)bitmap->mddev->bitmap_info.offset);
+ bitmap->cluster_slot, offset);
}
if (bitmap->storage.file) {
@@ -597,7 +599,7 @@ re_read:
bitmap, bytes, sb_page);
} else {
err = read_sb_page(bitmap->mddev,
- bitmap->mddev->bitmap_info.offset,
+ offset,
sb_page,
0, sizeof(bitmap_super_t));
}
@@ -611,8 +613,16 @@ re_read:
daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
write_behind = le32_to_cpu(sb->write_behind);
sectors_reserved = le32_to_cpu(sb->sectors_reserved);
- nodes = le32_to_cpu(sb->nodes);
- strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64);
+ /* XXX: This is a hack to ensure that we don't use clustering
+ * in case:
+ * - dm-raid is in use and
+ * - the nodes written in bitmap_sb is erroneous.
+ */
+ if (!bitmap->mddev->sync_super) {
+ nodes = le32_to_cpu(sb->nodes);
+ strlcpy(bitmap->mddev->bitmap_info.cluster_name,
+ sb->cluster_name, 64);
+ }
/* verify that the bitmap-specific fields are valid */
if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
@@ -671,7 +681,7 @@ out:
kunmap_atomic(sb);
/* Assiging chunksize is required for "re_read" */
bitmap->mddev->bitmap_info.chunksize = chunksize;
- if (nodes && (bitmap->cluster_slot < 0)) {
+ if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
err = md_setup_cluster(bitmap->mddev, nodes);
if (err) {
pr_err("%s: Could not setup cluster service (%d)\n",
@@ -1866,10 +1876,6 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot,
if (IS_ERR(bitmap))
return PTR_ERR(bitmap);
- rv = bitmap_read_sb(bitmap);
- if (rv)
- goto err;
-
rv = bitmap_init_from_disk(bitmap, 0);
if (rv)
goto err;
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index fcfc4b9b2672..0072190515e0 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -44,6 +44,7 @@ struct resync_info {
/* md_cluster_info flags */
#define MD_CLUSTER_WAITING_FOR_NEWDISK 1
+#define MD_CLUSTER_SUSPEND_READ_BALANCING 2
struct md_cluster_info {
@@ -275,6 +276,9 @@ clear_bit:
static void recover_prep(void *arg)
{
+ struct mddev *mddev = arg;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ set_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
}
static void recover_slot(void *arg, struct dlm_slot *slot)
@@ -307,6 +311,7 @@ static void recover_done(void *arg, struct dlm_slot *slots,
cinfo->slot_number = our_slot;
complete(&cinfo->completion);
+ clear_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
}
static const struct dlm_lockspace_ops md_ls_ops = {
@@ -816,12 +821,17 @@ static void resync_finish(struct mddev *mddev)
resync_send(mddev, RESYNCING, 0, 0);
}
-static int area_resyncing(struct mddev *mddev, sector_t lo, sector_t hi)
+static int area_resyncing(struct mddev *mddev, int direction,
+ sector_t lo, sector_t hi)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
int ret = 0;
struct suspend_info *s;
+ if ((direction == READ) &&
+ test_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state))
+ return 1;
+
spin_lock_irq(&cinfo->suspend_lock);
if (list_empty(&cinfo->suspend_list))
goto out;
diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h
index 6817ee00e053..00defe2badbc 100644
--- a/drivers/md/md-cluster.h
+++ b/drivers/md/md-cluster.h
@@ -18,7 +18,7 @@ struct md_cluster_operations {
int (*metadata_update_start)(struct mddev *mddev);
int (*metadata_update_finish)(struct mddev *mddev);
int (*metadata_update_cancel)(struct mddev *mddev);
- int (*area_resyncing)(struct mddev *mddev, sector_t lo, sector_t hi);
+ int (*area_resyncing)(struct mddev *mddev, int direction, sector_t lo, sector_t hi);
int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev);
int (*add_new_disk_finish)(struct mddev *mddev);
int (*new_disk_ack)(struct mddev *mddev, bool ack);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d429c30cd514..0c2a4e8b873c 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5382,6 +5382,8 @@ static void __md_stop(struct mddev *mddev)
{
struct md_personality *pers = mddev->pers;
mddev_detach(mddev);
+ /* Ensure ->event_work is done */
+ flush_workqueue(md_misc_wq);
spin_lock(&mddev->lock);
mddev->ready = 0;
mddev->pers = NULL;
@@ -7437,7 +7439,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes)
err = request_module("md-cluster");
if (err) {
pr_err("md-cluster module not found.\n");
- return err;
+ return -ENOENT;
}
spin_lock(&pers_lock);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f80f1af61ce7..94f5b55069e0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -336,7 +336,7 @@ static void raid1_end_read_request(struct bio *bio, int error)
spin_lock_irqsave(&conf->device_lock, flags);
if (r1_bio->mddev->degraded == conf->raid_disks ||
(r1_bio->mddev->degraded == conf->raid_disks-1 &&
- !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
+ test_bit(In_sync, &conf->mirrors[mirror].rdev->flags)))
uptodate = 1;
spin_unlock_irqrestore(&conf->device_lock, flags);
}
@@ -541,7 +541,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
if ((conf->mddev->recovery_cp < this_sector + sectors) ||
(mddev_is_clustered(conf->mddev) &&
- md_cluster_ops->area_resyncing(conf->mddev, this_sector,
+ md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
this_sector + sectors)))
choose_first = 1;
else
@@ -1111,7 +1111,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
((bio_end_sector(bio) > mddev->suspend_lo &&
bio->bi_iter.bi_sector < mddev->suspend_hi) ||
(mddev_is_clustered(mddev) &&
- md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
+ md_cluster_ops->area_resyncing(mddev, WRITE,
+ bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
/* As the suspend_* range is controlled by
* userspace, we want an interruptible
* wait.
@@ -1124,7 +1125,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
if (bio_end_sector(bio) <= mddev->suspend_lo ||
bio->bi_iter.bi_sector >= mddev->suspend_hi ||
(mddev_is_clustered(mddev) &&
- !md_cluster_ops->area_resyncing(mddev,
+ !md_cluster_ops->area_resyncing(mddev, WRITE,
bio->bi_iter.bi_sector, bio_end_sector(bio))))
break;
schedule();
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 940f2f365461..38c58e19cfce 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3556,6 +3556,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
/* far_copies must be 1 */
conf->prev.stride = conf->dev_sectors;
}
+ conf->reshape_safe = conf->reshape_progress;
spin_lock_init(&conf->device_lock);
INIT_LIST_HEAD(&conf->retry_list);
@@ -3760,7 +3761,6 @@ static int run(struct mddev *mddev)
}
conf->offset_diff = min_offset_diff;
- conf->reshape_safe = conf->reshape_progress;
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
@@ -4103,6 +4103,7 @@ static int raid10_start_reshape(struct mddev *mddev)
conf->reshape_progress = size;
} else
conf->reshape_progress = 0;
+ conf->reshape_safe = conf->reshape_progress;
spin_unlock_irq(&conf->device_lock);
if (mddev->delta_disks && mddev->bitmap) {
@@ -4170,6 +4171,7 @@ abort:
rdev->new_data_offset = rdev->data_offset;
smp_wmb();
conf->reshape_progress = MaxSector;
+ conf->reshape_safe = MaxSector;
mddev->reshape_position = MaxSector;
spin_unlock_irq(&conf->device_lock);
return ret;
@@ -4524,6 +4526,7 @@ static void end_reshape(struct r10conf *conf)
md_finish_reshape(conf->mddev);
smp_wmb();
conf->reshape_progress = MaxSector;
+ conf->reshape_safe = MaxSector;
spin_unlock_irq(&conf->device_lock);
/* read-ahead size must cover two whole stripes, which is
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 59e44e99eef3..643d217bfa13 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2162,6 +2162,9 @@ static int resize_stripes(struct r5conf *conf, int newsize)
if (!sc)
return -ENOMEM;
+ /* Need to ensure auto-resizing doesn't interfere */
+ mutex_lock(&conf->cache_size_mutex);
+
for (i = conf->max_nr_stripes; i; i--) {
nsh = alloc_stripe(sc, GFP_KERNEL);
if (!nsh)
@@ -2178,6 +2181,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
kmem_cache_free(sc, nsh);
}
kmem_cache_destroy(sc);
+ mutex_unlock(&conf->cache_size_mutex);
return -ENOMEM;
}
/* Step 2 - Must use GFP_NOIO now.
@@ -2224,6 +2228,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
} else
err = -ENOMEM;
+ mutex_unlock(&conf->cache_size_mutex);
/* Step 4, return new stripes to service */
while(!list_empty(&newstripes)) {
nsh = list_entry(newstripes.next, struct stripe_head, lru);
@@ -4061,8 +4066,10 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
&first_bad, &bad_sectors))
set_bit(R5_ReadRepl, &dev->flags);
else {
- if (rdev)
+ if (rdev && !test_bit(Faulty, &rdev->flags))
set_bit(R5_NeedReplace, &dev->flags);
+ else
+ clear_bit(R5_NeedReplace, &dev->flags);
rdev = rcu_dereference(conf->disks[i].rdev);
clear_bit(R5_ReadRepl, &dev->flags);
}
@@ -5857,12 +5864,14 @@ static void raid5d(struct md_thread *thread)
pr_debug("%d stripes handled\n", handled);
spin_unlock_irq(&conf->device_lock);
- if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) {
+ if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) &&
+ mutex_trylock(&conf->cache_size_mutex)) {
grow_one_stripe(conf, __GFP_NOWARN);
/* Set flag even if allocation failed. This helps
* slow down allocation requests when mem is short
*/
set_bit(R5_DID_ALLOC, &conf->cache_state);
+ mutex_unlock(&conf->cache_size_mutex);
}
async_tx_issue_pending_all();
@@ -5894,18 +5903,22 @@ raid5_set_cache_size(struct mddev *mddev, int size)
return -EINVAL;
conf->min_nr_stripes = size;
+ mutex_lock(&conf->cache_size_mutex);
while (size < conf->max_nr_stripes &&
drop_one_stripe(conf))
;
+ mutex_unlock(&conf->cache_size_mutex);
err = md_allow_write(mddev);
if (err)
return err;
+ mutex_lock(&conf->cache_size_mutex);
while (size > conf->max_nr_stripes)
if (!grow_one_stripe(conf, GFP_KERNEL))
break;
+ mutex_unlock(&conf->cache_size_mutex);
return 0;
}
@@ -6371,11 +6384,18 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
- int ret = 0;
- while (ret < sc->nr_to_scan) {
- if (drop_one_stripe(conf) == 0)
- return SHRINK_STOP;
- ret++;
+ unsigned long ret = SHRINK_STOP;
+
+ if (mutex_trylock(&conf->cache_size_mutex)) {
+ ret= 0;
+ while (ret < sc->nr_to_scan) {
+ if (drop_one_stripe(conf) == 0) {
+ ret = SHRINK_STOP;
+ break;
+ }
+ ret++;
+ }
+ mutex_unlock(&conf->cache_size_mutex);
}
return ret;
}
@@ -6444,6 +6464,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
goto abort;
spin_lock_init(&conf->device_lock);
seqcount_init(&conf->gen_lock);
+ mutex_init(&conf->cache_size_mutex);
init_waitqueue_head(&conf->wait_for_quiescent);
for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
init_waitqueue_head(&conf->wait_for_stripe[i]);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 02c3bf8fbfe7..d05144278690 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -482,7 +482,8 @@ struct r5conf {
*/
int active_name;
char cache_name[2][32];
- struct kmem_cache *slab_cache; /* for allocating stripes */
+ struct kmem_cache *slab_cache; /* for allocating stripes */
+ struct mutex cache_size_mutex; /* Protect changes to cache size */
int seq_flush, seq_write;
int quiesce;
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index 4cb365d4ffdc..8b95eefb610b 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -38,6 +38,8 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fb.h>
@@ -1171,6 +1173,13 @@ static int ivtvfb_init_card(struct ivtv *itv)
{
int rc;
+#ifdef CONFIG_X86_64
+ if (pat_enabled()) {
+ pr_warn("ivtvfb needs PAT disabled, boot with nopat kernel parameter\n");
+ return -ENODEV;
+ }
+#endif
+
if (itv->osd_info) {
IVTVFB_ERR("Card %d already initialised\n", ivtvfb_card_id);
return -EBUSY;
@@ -1265,12 +1274,6 @@ static int __init ivtvfb_init(void)
int registered = 0;
int err;
-#ifdef CONFIG_X86_64
- if (WARN(pat_enabled(),
- "ivtvfb needs PAT disabled, boot with nopat kernel parameter\n")) {
- return -ENODEV;
- }
-#endif
if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) {
printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n",
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 8eb0a9500a90..e9513d651cd3 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -682,7 +682,7 @@ int mei_register(struct mei_device *dev, struct device *parent)
/* Fill in the data structures */
devno = MKDEV(MAJOR(mei_devt), dev->minor);
cdev_init(&dev->cdev, &mei_fops);
- dev->cdev.owner = mei_fops.owner;
+ dev->cdev.owner = parent->driver->owner;
/* Add the device */
ret = cdev_add(&dev->cdev, devno, 1);
diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c
index 41e3bdb10061..6dfdae3452d6 100644
--- a/drivers/misc/mic/scif/scif_nodeqp.c
+++ b/drivers/misc/mic/scif/scif_nodeqp.c
@@ -357,7 +357,7 @@ static void scif_p2p_freesg(struct scatterlist *sg)
}
static struct scatterlist *
-scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt)
+scif_p2p_setsg(phys_addr_t pa, int page_size, int page_cnt)
{
struct scatterlist *sg;
struct page *page;
@@ -368,16 +368,11 @@ scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt)
return NULL;
sg_init_table(sg, page_cnt);
for (i = 0; i < page_cnt; i++) {
- page = vmalloc_to_page((void __force *)va);
- if (!page)
- goto p2p_sg_err;
+ page = pfn_to_page(pa >> PAGE_SHIFT);
sg_set_page(&sg[i], page, page_size, 0);
- va += page_size;
+ pa += page_size;
}
return sg;
-p2p_sg_err:
- kfree(sg);
- return NULL;
}
/* Init p2p mappings required to access peerdev from scifdev */
@@ -395,14 +390,14 @@ scif_init_p2p_info(struct scif_dev *scifdev, struct scif_dev *peerdev)
p2p = kzalloc(sizeof(*p2p), GFP_KERNEL);
if (!p2p)
return NULL;
- p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->va,
+ p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->pa,
PAGE_SIZE, num_mmio_pages);
if (!p2p->ppi_sg[SCIF_PPI_MMIO])
goto free_p2p;
p2p->sg_nentries[SCIF_PPI_MMIO] = num_mmio_pages;
sg_page_shift = get_order(min(psdev->aper->len, (u64)(1 << 30)));
num_aper_chunks = num_aper_pages >> (sg_page_shift - PAGE_SHIFT);
- p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->va,
+ p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->pa,
1 << sg_page_shift,
num_aper_chunks);
p2p->sg_nentries[SCIF_PPI_APER] = num_aper_chunks;
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c9c3d20b784b..a1b820fcb2a6 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -208,6 +208,8 @@ static ssize_t power_ro_lock_show(struct device *dev,
ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
+ mmc_blk_put(md);
+
return ret;
}
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index fd9a58e216a5..6a0f9c79be26 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -779,6 +779,7 @@ config MMC_TOSHIBA_PCI
config MMC_MTK
tristate "MediaTek SD/MMC Card Interface support"
+ depends on HAS_DMA
help
This selects the MediaTek(R) Secure digital and Multimedia card Interface.
If you have a machine with a integrated SD/MMC card reader, say Y or M here.
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index b2b411da297b..4d1203236890 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1062,9 +1062,14 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
if (status & (CTO_EN | CCRC_EN))
end_cmd = 1;
+ if (host->data || host->response_busy) {
+ end_trans = !end_cmd;
+ host->response_busy = 0;
+ }
if (status & (CTO_EN | DTO_EN))
hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd);
- else if (status & (CCRC_EN | DCRC_EN))
+ else if (status & (CCRC_EN | DCRC_EN | DEB_EN | CEB_EN |
+ BADA_EN))
hsmmc_command_incomplete(host, -EILSEQ, end_cmd);
if (status & ACE_EN) {
@@ -1081,10 +1086,6 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
}
dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12);
}
- if (host->data || host->response_busy) {
- end_trans = !end_cmd;
- host->response_busy = 0;
- }
}
OMAP_HSMMC_WRITE(host->base, STAT, status);
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index faf0cb910c96..c6b9f6492e1a 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -581,13 +581,8 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
- struct esdhc_platform_data *boarddata = &imx_data->boarddata;
- if (boarddata->f_max && (boarddata->f_max < pltfm_host->clock))
- return boarddata->f_max;
- else
- return pltfm_host->clock;
+ return pltfm_host->clock;
}
static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
@@ -878,34 +873,19 @@ static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
static int
sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
struct sdhci_host *host,
- struct esdhc_platform_data *boarddata)
+ struct pltfm_imx_data *imx_data)
{
struct device_node *np = pdev->dev.of_node;
-
- if (!np)
- return -ENODEV;
-
- if (of_get_property(np, "non-removable", NULL))
- boarddata->cd_type = ESDHC_CD_PERMANENT;
-
- if (of_get_property(np, "fsl,cd-controller", NULL))
- boarddata->cd_type = ESDHC_CD_CONTROLLER;
+ struct esdhc_platform_data *boarddata = &imx_data->boarddata;
+ int ret;
if (of_get_property(np, "fsl,wp-controller", NULL))
boarddata->wp_type = ESDHC_WP_CONTROLLER;
- boarddata->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
- if (gpio_is_valid(boarddata->cd_gpio))
- boarddata->cd_type = ESDHC_CD_GPIO;
-
boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
if (gpio_is_valid(boarddata->wp_gpio))
boarddata->wp_type = ESDHC_WP_GPIO;
- of_property_read_u32(np, "bus-width", &boarddata->max_bus_width);
-
- of_property_read_u32(np, "max-frequency", &boarddata->f_max);
-
if (of_find_property(np, "no-1-8-v", NULL))
boarddata->support_vsel = false;
else
@@ -916,29 +896,119 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
mmc_of_parse_voltage(np, &host->ocr_mask);
+ /* sdr50 and sdr104 needs work on 1.8v signal voltage */
+ if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) &&
+ !IS_ERR(imx_data->pins_default)) {
+ imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
+ ESDHC_PINCTRL_STATE_100MHZ);
+ imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
+ ESDHC_PINCTRL_STATE_200MHZ);
+ if (IS_ERR(imx_data->pins_100mhz) ||
+ IS_ERR(imx_data->pins_200mhz)) {
+ dev_warn(mmc_dev(host->mmc),
+ "could not get ultra high speed state, work on normal mode\n");
+ /*
+ * fall back to not support uhs by specify no 1.8v quirk
+ */
+ host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+ }
+ } else {
+ host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+ }
+
/* call to generic mmc_of_parse to support additional capabilities */
- return mmc_of_parse(host->mmc);
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ return ret;
+
+ if (!IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
+ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+
+ return 0;
}
#else
static inline int
sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
struct sdhci_host *host,
- struct esdhc_platform_data *boarddata)
+ struct pltfm_imx_data *imx_data)
{
return -ENODEV;
}
#endif
+static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev,
+ struct sdhci_host *host,
+ struct pltfm_imx_data *imx_data)
+{
+ struct esdhc_platform_data *boarddata = &imx_data->boarddata;
+ int err;
+
+ if (!host->mmc->parent->platform_data) {
+ dev_err(mmc_dev(host->mmc), "no board data!\n");
+ return -EINVAL;
+ }
+
+ imx_data->boarddata = *((struct esdhc_platform_data *)
+ host->mmc->parent->platform_data);
+ /* write_protect */
+ if (boarddata->wp_type == ESDHC_WP_GPIO) {
+ err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio);
+ if (err) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to request write-protect gpio!\n");
+ return err;
+ }
+ host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+ }
+
+ /* card_detect */
+ switch (boarddata->cd_type) {
+ case ESDHC_CD_GPIO:
+ err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0);
+ if (err) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to request card-detect gpio!\n");
+ return err;
+ }
+ /* fall through */
+
+ case ESDHC_CD_CONTROLLER:
+ /* we have a working card_detect back */
+ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+ break;
+
+ case ESDHC_CD_PERMANENT:
+ host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+ break;
+
+ case ESDHC_CD_NONE:
+ break;
+ }
+
+ switch (boarddata->max_bus_width) {
+ case 8:
+ host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
+ break;
+ case 4:
+ host->mmc->caps |= MMC_CAP_4_BIT_DATA;
+ break;
+ case 1:
+ default:
+ host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
+ break;
+ }
+
+ return 0;
+}
+
static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(imx_esdhc_dt_ids, &pdev->dev);
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_host *host;
- struct esdhc_platform_data *boarddata;
int err;
struct pltfm_imx_data *imx_data;
- bool dt = true;
host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0);
if (IS_ERR(host))
@@ -1030,84 +1100,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
- boarddata = &imx_data->boarddata;
- if (sdhci_esdhc_imx_probe_dt(pdev, host, boarddata) < 0) {
- if (!host->mmc->parent->platform_data) {
- dev_err(mmc_dev(host->mmc), "no board data!\n");
- err = -EINVAL;
- goto disable_clk;
- }
- imx_data->boarddata = *((struct esdhc_platform_data *)
- host->mmc->parent->platform_data);
- dt = false;
- }
- /* write_protect */
- if (boarddata->wp_type == ESDHC_WP_GPIO && !dt) {
- err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio);
- if (err) {
- dev_err(mmc_dev(host->mmc),
- "failed to request write-protect gpio!\n");
- goto disable_clk;
- }
- host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
- }
-
- /* card_detect */
- switch (boarddata->cd_type) {
- case ESDHC_CD_GPIO:
- if (dt)
- break;
- err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0);
- if (err) {
- dev_err(mmc_dev(host->mmc),
- "failed to request card-detect gpio!\n");
- goto disable_clk;
- }
- /* fall through */
-
- case ESDHC_CD_CONTROLLER:
- /* we have a working card_detect back */
- host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
- break;
-
- case ESDHC_CD_PERMANENT:
- host->mmc->caps |= MMC_CAP_NONREMOVABLE;
- break;
-
- case ESDHC_CD_NONE:
- break;
- }
-
- switch (boarddata->max_bus_width) {
- case 8:
- host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
- break;
- case 4:
- host->mmc->caps |= MMC_CAP_4_BIT_DATA;
- break;
- case 1:
- default:
- host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
- break;
- }
-
- /* sdr50 and sdr104 needs work on 1.8v signal voltage */
- if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) &&
- !IS_ERR(imx_data->pins_default)) {
- imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
- ESDHC_PINCTRL_STATE_100MHZ);
- imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
- ESDHC_PINCTRL_STATE_200MHZ);
- if (IS_ERR(imx_data->pins_100mhz) ||
- IS_ERR(imx_data->pins_200mhz)) {
- dev_warn(mmc_dev(host->mmc),
- "could not get ultra high speed state, work on normal mode\n");
- /* fall back to not support uhs by specify no 1.8v quirk */
- host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
- }
- } else {
- host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
- }
+ if (of_id)
+ err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
+ else
+ err = sdhci_esdhc_imx_probe_nondt(pdev, host, imx_data);
+ if (err)
+ goto disable_clk;
err = sdhci_add_host(host);
if (err)
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index 3497cfaf683c..a870c42731d7 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -45,6 +45,6 @@
#define ESDHC_DMA_SYSCTL 0x40c
#define ESDHC_DMA_SNOOP 0x00000040
-#define ESDHC_HOST_CONTROL_RES 0x05
+#define ESDHC_HOST_CONTROL_RES 0x01
#endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 9cd5fc62f130..946d37f94a31 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -411,6 +411,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
goto err_of_parse;
sdhci_get_of_property(pdev);
pdata = pxav3_get_mmc_pdata(dev);
+ pdev->dev.platform_data = pdata;
} else if (pdata) {
/* on-chip device */
if (pdata->flags & PXA_FLAG_CARD_PERMANENT)
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index bc1445238fb3..1dbe93232030 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2866,6 +2866,7 @@ int sdhci_add_host(struct sdhci_host *host)
u32 max_current_caps;
unsigned int ocr_avail;
unsigned int override_timeout_clk;
+ u32 max_clk;
int ret;
WARN_ON(host == NULL);
@@ -2978,8 +2979,11 @@ int sdhci_add_host(struct sdhci_host *host)
GFP_KERNEL);
host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
if (!host->adma_table || !host->align_buffer) {
- dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
- host->adma_table, host->adma_addr);
+ if (host->adma_table)
+ dma_free_coherent(mmc_dev(mmc),
+ host->adma_table_sz,
+ host->adma_table,
+ host->adma_addr);
kfree(host->align_buffer);
pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
mmc_hostname(mmc));
@@ -3047,18 +3051,22 @@ int sdhci_add_host(struct sdhci_host *host)
* Set host parameters.
*/
mmc->ops = &sdhci_ops;
- mmc->f_max = host->max_clk;
+ max_clk = host->max_clk;
+
if (host->ops->get_min_clock)
mmc->f_min = host->ops->get_min_clock(host);
else if (host->version >= SDHCI_SPEC_300) {
if (host->clk_mul) {
mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
- mmc->f_max = host->max_clk * host->clk_mul;
+ max_clk = host->max_clk * host->clk_mul;
} else
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
} else
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
+ if (!mmc->f_max || (mmc->f_max && (mmc->f_max > max_clk)))
+ mmc->f_max = max_clk;
+
if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >>
SDHCI_TIMEOUT_CLK_SHIFT;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 317a49480475..e1ccefce9a9d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -625,6 +625,23 @@ static void bond_set_dev_addr(struct net_device *bond_dev,
call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
}
+static struct slave *bond_get_old_active(struct bonding *bond,
+ struct slave *new_active)
+{
+ struct slave *slave;
+ struct list_head *iter;
+
+ bond_for_each_slave(bond, slave, iter) {
+ if (slave == new_active)
+ continue;
+
+ if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
+ return slave;
+ }
+
+ return NULL;
+}
+
/* bond_do_fail_over_mac
*
* Perform special MAC address swapping for fail_over_mac settings
@@ -652,6 +669,9 @@ static void bond_do_fail_over_mac(struct bonding *bond,
if (!new_active)
return;
+ if (!old_active)
+ old_active = bond_get_old_active(bond, new_active);
+
if (old_active) {
ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
ether_addr_copy(saddr.sa_data,
@@ -1725,9 +1745,16 @@ err_free:
err_undo_flags:
/* Enslave of first slave has failed and we need to fix master's mac */
- if (!bond_has_slaves(bond) &&
- ether_addr_equal_64bits(bond_dev->dev_addr, slave_dev->dev_addr))
- eth_hw_addr_random(bond_dev);
+ if (!bond_has_slaves(bond)) {
+ if (ether_addr_equal_64bits(bond_dev->dev_addr,
+ slave_dev->dev_addr))
+ eth_hw_addr_random(bond_dev);
+ if (bond_dev->type != ARPHRD_ETHER) {
+ ether_setup(bond_dev);
+ bond_dev->flags |= IFF_MASTER;
+ bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ }
+ }
return res;
}
@@ -1916,6 +1943,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
netdev_info(bond_dev, "Destroying bond %s\n",
bond_dev->name);
+ bond_remove_proc_entry(bond);
unregister_netdevice(bond_dev);
}
return ret;
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index f4e40aa4d2a2..945c0955a967 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -577,10 +577,10 @@ static void at91_rx_overflow_err(struct net_device *dev)
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- netif_receive_skb(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
}
/**
@@ -642,10 +642,10 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
}
at91_read_mb(dev, mb, cf);
- netif_receive_skb(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
can_led_event(dev, CAN_LED_EVENT_RX);
}
@@ -802,10 +802,10 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
return 0;
at91_poll_err_frame(dev, cf, reg_sr);
- netif_receive_skb(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
return 1;
}
@@ -1067,10 +1067,10 @@ static void at91_irq_err(struct net_device *dev)
return;
at91_irq_err_state(dev, cf, new_state);
- netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += cf->can_dlc;
+ netif_rx(skb);
priv->can.state = new_state;
}
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 27ad312e7abf..57dadd52b428 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -424,10 +424,9 @@ static void bfin_can_rx(struct net_device *dev, u16 isrc)
cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0;
}
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
@@ -508,10 +507,9 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
priv->can.state = state;
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
return 0;
}
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index c11d44984036..70a8cbb29e75 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -504,10 +504,10 @@ static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1)
for (i = 0; i < cf->can_dlc; i++)
cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]);
}
- netif_rx(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
static int cc770_err(struct net_device *dev, u8 status)
@@ -584,10 +584,10 @@ static int cc770_err(struct net_device *dev, u8 status)
}
}
- netif_rx(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
return 0;
}
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 6201c5a1a884..b1e8d729851c 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -577,10 +577,10 @@ static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr)
return 0;
do_bus_err(dev, cf, reg_esr);
- netif_receive_skb(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
return 1;
}
@@ -622,10 +622,9 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
if (unlikely(new_state == CAN_STATE_BUS_OFF))
can_bus_off(dev);
- netif_receive_skb(skb);
-
dev->stats.rx_packets++;
dev->stats.rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
return 1;
}
@@ -670,10 +669,10 @@ static int flexcan_read_frame(struct net_device *dev)
}
flexcan_read_fifo(dev, cf);
- netif_receive_skb(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
can_led_event(dev, CAN_LED_EVENT_RX);
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index e3d7e22a4fa0..db9538d4b358 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1216,11 +1216,12 @@ static int grcan_receive(struct net_device *dev, int budget)
cf->data[i] = (u8)(slot[j] >> shift);
}
}
- netif_receive_skb(skb);
/* Update statistics and read pointer */
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
+
rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size);
}
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 32bd7f451aa4..7b92e911a616 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -377,10 +377,9 @@ static void sja1000_rx(struct net_device *dev)
/* release receive buffer */
sja1000_write_cmdreg(priv, CMD_RRB);
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
can_led_event(dev, CAN_LED_EVENT_RX);
}
@@ -484,10 +483,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
can_bus_off(dev);
}
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
return 0;
}
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index a23a7af8eb9a..9a3f15cb7ef4 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -218,10 +218,10 @@ static void slc_bump(struct slcan *sl)
memcpy(skb_put(skb, sizeof(struct can_frame)),
&cf, sizeof(struct can_frame));
- netif_rx_ni(skb);
sl->dev->stats.rx_packets++;
sl->dev->stats.rx_bytes += cf.can_dlc;
+ netif_rx_ni(skb);
}
/* parse tty input stream */
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index c1a95a34d62e..b7e83c212023 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1086,8 +1086,8 @@ static int mcp251x_can_probe(struct spi_device *spi)
if (ret)
goto out_clk;
- priv->power = devm_regulator_get(&spi->dev, "vdd");
- priv->transceiver = devm_regulator_get(&spi->dev, "xceiver");
+ priv->power = devm_regulator_get_optional(&spi->dev, "vdd");
+ priv->transceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
(PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
ret = -EPROBE_DEFER;
@@ -1222,17 +1222,16 @@ static int __maybe_unused mcp251x_can_resume(struct device *dev)
struct spi_device *spi = to_spi_device(dev);
struct mcp251x_priv *priv = spi_get_drvdata(spi);
- if (priv->after_suspend & AFTER_SUSPEND_POWER) {
+ if (priv->after_suspend & AFTER_SUSPEND_POWER)
mcp251x_power_enable(priv->power, 1);
+
+ if (priv->after_suspend & AFTER_SUSPEND_UP) {
+ mcp251x_power_enable(priv->transceiver, 1);
queue_work(priv->wq, &priv->restart_work);
} else {
- if (priv->after_suspend & AFTER_SUSPEND_UP) {
- mcp251x_power_enable(priv->transceiver, 1);
- queue_work(priv->wq, &priv->restart_work);
- } else {
- priv->after_suspend = 0;
- }
+ priv->after_suspend = 0;
}
+
priv->force_quit = 0;
enable_irq(spi->irq);
return 0;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index e95a9e1a889f..cf345cbfe819 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -747,9 +747,9 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
}
}
- netif_rx(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
return 0;
}
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 866bac0ae7e9..2d390384ef3b 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -324,10 +324,9 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
cf->data[i] = msg->msg.can_msg.msg[i];
}
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
@@ -400,10 +399,9 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
stats->rx_errors++;
}
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
/*
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 411c1af92c62..0e5a4493ba4f 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -301,13 +301,12 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
cf->data[7] = rxerr;
}
- netif_rx(skb);
-
priv->bec.txerr = txerr;
priv->bec.rxerr = rxerr;
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
}
@@ -347,10 +346,9 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
cf->data[i] = msg->msg.rx.data[i];
}
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
return;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 72427f21edff..6b94007ae052 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -526,9 +526,9 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
hwts->hwtstamp = timeval_to_ktime(tv);
}
- netif_rx(skb);
mc->netdev->stats.rx_packets++;
mc->netdev->stats.rx_bytes += cf->can_dlc;
+ netif_rx(skb);
return 0;
}
@@ -659,12 +659,11 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
hwts = skb_hwtstamps(skb);
hwts->hwtstamp = timeval_to_ktime(tv);
- /* push the skb */
- netif_rx(skb);
-
/* update statistics */
mc->netdev->stats.rx_packets++;
mc->netdev->stats.rx_bytes += cf->can_dlc;
+ /* push the skb */
+ netif_rx(skb);
return 0;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index dec51717635e..7d61b3279798 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -553,9 +553,9 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
hwts = skb_hwtstamps(skb);
hwts->hwtstamp = timeval_to_ktime(tv);
- netif_rx(skb);
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += can_frame->can_dlc;
+ netif_rx(skb);
return 0;
}
@@ -670,9 +670,9 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv);
hwts = skb_hwtstamps(skb);
hwts->hwtstamp = timeval_to_ktime(tv);
- netif_rx(skb);
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += can_frame->can_dlc;
+ netif_rx(skb);
return 0;
}
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index dd52c7a4c80d..de95b1ccba3e 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -461,10 +461,9 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
priv->bec.txerr = txerr;
priv->bec.rxerr = rxerr;
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
/* Read data and status frames */
@@ -494,10 +493,9 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv,
else
memcpy(cf->data, msg->data, cf->can_dlc);
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
can_led_event(priv->netdev, CAN_LED_EVENT_RX);
} else {
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 972982f8bea7..079897b3a955 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -696,9 +696,20 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
}
/* Include the pseudo-PHY address and the broadcast PHY address to
- * divert reads towards our workaround
+ * divert reads towards our workaround. This is only required for
+ * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such
+ * that we can use the regular SWITCH_MDIO master controller instead.
+ *
+ * By default, DSA initializes ds->phys_mii_mask to ds->phys_port_mask
+ * to have a 1:1 mapping between Port address and PHY address in order
+ * to utilize the slave_mii_bus instance to read from Port PHYs. This is
+ * not what we want here, so we initialize phys_mii_mask 0 to always
+ * utilize the "master" MDIO bus backed by the "mdio-unimac" driver.
*/
- ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
+ if (of_machine_is_compatible("brcm,bcm7445d0"))
+ ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
+ else
+ ds->phys_mii_mask = 0;
rev = reg_readl(priv, REG_SWITCH_REVISION);
priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index fd8547c2b79d..561342466076 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -1163,7 +1163,7 @@ int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
newfid = __ffs(ps->fid_mask);
ps->fid[port] = newfid;
- ps->fid_mask &= (1 << newfid);
+ ps->fid_mask &= ~(1 << newfid);
ps->bridge_mask[fid] &= ~(1 << port);
ps->bridge_mask[newfid] = 1 << port;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 42e20e5385ac..1f89c59b4353 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -24,7 +24,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/pm_runtime.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
@@ -78,7 +77,6 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
#define FEC_ENET_RAEM_V 0x8
#define FEC_ENET_RAFL_V 0x8
#define FEC_ENET_OPD_V 0xFFF0
-#define FEC_MDIO_PM_TIMEOUT 100 /* ms */
static struct platform_device_id fec_devtype[] = {
{
@@ -1769,13 +1767,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
struct fec_enet_private *fep = bus->priv;
- struct device *dev = &fep->pdev->dev;
unsigned long time_left;
- int ret = 0;
-
- ret = pm_runtime_get_sync(dev);
- if (IS_ERR_VALUE(ret))
- return ret;
fep->mii_timeout = 0;
init_completion(&fep->mdio_done);
@@ -1791,30 +1783,18 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
if (time_left == 0) {
fep->mii_timeout = 1;
netdev_err(fep->netdev, "MDIO read timeout\n");
- ret = -ETIMEDOUT;
- goto out;
+ return -ETIMEDOUT;
}
- ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
-
-out:
- pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
-
- return ret;
+ /* return value */
+ return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
}
static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
u16 value)
{
struct fec_enet_private *fep = bus->priv;
- struct device *dev = &fep->pdev->dev;
unsigned long time_left;
- int ret = 0;
-
- ret = pm_runtime_get_sync(dev);
- if (IS_ERR_VALUE(ret))
- return ret;
fep->mii_timeout = 0;
init_completion(&fep->mdio_done);
@@ -1831,13 +1811,10 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
if (time_left == 0) {
fep->mii_timeout = 1;
netdev_err(fep->netdev, "MDIO write timeout\n");
- ret = -ETIMEDOUT;
+ return -ETIMEDOUT;
}
- pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
-
- return ret;
+ return 0;
}
static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
@@ -1849,6 +1826,9 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
ret = clk_prepare_enable(fep->clk_ahb);
if (ret)
return ret;
+ ret = clk_prepare_enable(fep->clk_ipg);
+ if (ret)
+ goto failed_clk_ipg;
if (fep->clk_enet_out) {
ret = clk_prepare_enable(fep->clk_enet_out);
if (ret)
@@ -1872,6 +1852,7 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
}
} else {
clk_disable_unprepare(fep->clk_ahb);
+ clk_disable_unprepare(fep->clk_ipg);
if (fep->clk_enet_out)
clk_disable_unprepare(fep->clk_enet_out);
if (fep->clk_ptp) {
@@ -1893,6 +1874,8 @@ failed_clk_ptp:
if (fep->clk_enet_out)
clk_disable_unprepare(fep->clk_enet_out);
failed_clk_enet_out:
+ clk_disable_unprepare(fep->clk_ipg);
+failed_clk_ipg:
clk_disable_unprepare(fep->clk_ahb);
return ret;
@@ -2864,14 +2847,10 @@ fec_enet_open(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
int ret;
- ret = pm_runtime_get_sync(&fep->pdev->dev);
- if (IS_ERR_VALUE(ret))
- return ret;
-
pinctrl_pm_select_default_state(&fep->pdev->dev);
ret = fec_enet_clk_enable(ndev, true);
if (ret)
- goto clk_enable;
+ return ret;
/* I should reset the ring buffers here, but I don't yet know
* a simple way to do that.
@@ -2902,9 +2881,6 @@ err_enet_mii_probe:
fec_enet_free_buffers(ndev);
err_enet_alloc:
fec_enet_clk_enable(ndev, false);
-clk_enable:
- pm_runtime_mark_last_busy(&fep->pdev->dev);
- pm_runtime_put_autosuspend(&fep->pdev->dev);
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
return ret;
}
@@ -2927,9 +2903,6 @@ fec_enet_close(struct net_device *ndev)
fec_enet_clk_enable(ndev, false);
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
- pm_runtime_mark_last_busy(&fep->pdev->dev);
- pm_runtime_put_autosuspend(&fep->pdev->dev);
-
fec_enet_free_buffers(ndev);
return 0;
@@ -3415,10 +3388,6 @@ fec_probe(struct platform_device *pdev)
if (ret)
goto failed_clk;
- ret = clk_prepare_enable(fep->clk_ipg);
- if (ret)
- goto failed_clk_ipg;
-
fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
if (!IS_ERR(fep->reg_phy)) {
ret = regulator_enable(fep->reg_phy);
@@ -3465,8 +3434,6 @@ fec_probe(struct platform_device *pdev)
netif_carrier_off(ndev);
fec_enet_clk_enable(ndev, false);
pinctrl_pm_select_sleep_state(&pdev->dev);
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
ret = register_netdev(ndev);
if (ret)
@@ -3480,12 +3447,6 @@ fec_probe(struct platform_device *pdev)
fep->rx_copybreak = COPYBREAK_DEFAULT;
INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
-
- pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_mark_last_busy(&pdev->dev);
- pm_runtime_put_autosuspend(&pdev->dev);
-
return 0;
failed_register:
@@ -3496,8 +3457,6 @@ failed_init:
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
failed_regulator:
- clk_disable_unprepare(fep->clk_ipg);
-failed_clk_ipg:
fec_enet_clk_enable(ndev, false);
failed_clk:
failed_phy:
@@ -3609,28 +3568,7 @@ failed_clk:
return ret;
}
-static int __maybe_unused fec_runtime_suspend(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct fec_enet_private *fep = netdev_priv(ndev);
-
- clk_disable_unprepare(fep->clk_ipg);
-
- return 0;
-}
-
-static int __maybe_unused fec_runtime_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct fec_enet_private *fep = netdev_priv(ndev);
-
- return clk_prepare_enable(fep->clk_ipg);
-}
-
-static const struct dev_pm_ops fec_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
- SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
-};
+static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume);
static struct platform_driver fec_driver = {
.driver = {
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 370e20ed224c..62e48bc0cb23 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1462,7 +1462,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
struct mvneta_rx_queue *rxq)
{
struct net_device *dev = pp->dev;
- int rx_done, rx_filled;
+ int rx_done;
u32 rcvd_pkts = 0;
u32 rcvd_bytes = 0;
@@ -1473,7 +1473,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
rx_todo = rx_done;
rx_done = 0;
- rx_filled = 0;
/* Fairness NAPI loop */
while (rx_done < rx_todo) {
@@ -1484,7 +1483,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
int rx_bytes, err;
rx_done++;
- rx_filled++;
rx_status = rx_desc->status;
rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
data = (unsigned char *)rx_desc->buf_cookie;
@@ -1524,6 +1522,14 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
continue;
}
+ /* Refill processing */
+ err = mvneta_rx_refill(pp, rx_desc);
+ if (err) {
+ netdev_err(dev, "Linux processing - Can't refill\n");
+ rxq->missed++;
+ goto err_drop_frame;
+ }
+
skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
if (!skb)
goto err_drop_frame;
@@ -1543,14 +1549,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
mvneta_rx_csum(pp, rx_status, skb);
napi_gro_receive(&pp->napi, skb);
-
- /* Refill processing */
- err = mvneta_rx_refill(pp, rx_desc);
- if (err) {
- netdev_err(dev, "Linux processing - Can't refill\n");
- rxq->missed++;
- rx_filled--;
- }
}
if (rcvd_pkts) {
@@ -1563,7 +1561,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
}
/* Update rxq management counters */
- mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
+ mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
return rx_done;
}
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index fd9745714d90..78849dd4ef8e 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -228,9 +228,7 @@ static void ravb_ring_format(struct net_device *ndev, int q)
struct ravb_desc *desc = NULL;
int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q];
- struct sk_buff *skb;
dma_addr_t dma_addr;
- void *buffer;
int i;
priv->cur_rx[q] = 0;
@@ -241,41 +239,28 @@ static void ravb_ring_format(struct net_device *ndev, int q)
memset(priv->rx_ring[q], 0, rx_ring_size);
/* Build RX ring buffer */
for (i = 0; i < priv->num_rx_ring[q]; i++) {
- priv->rx_skb[q][i] = NULL;
- skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
- if (!skb)
- break;
- ravb_set_buffer_align(skb);
/* RX descriptor */
rx_desc = &priv->rx_ring[q][i];
/* The size of the buffer should be on 16-byte boundary. */
rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
- dma_addr = dma_map_single(&ndev->dev, skb->data,
+ dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data,
ALIGN(PKT_BUF_SZ, 16),
DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, dma_addr)) {
- dev_kfree_skb(skb);
- break;
- }
- priv->rx_skb[q][i] = skb;
+ /* We just set the data size to 0 for a failed mapping which
+ * should prevent DMA from happening...
+ */
+ if (dma_mapping_error(&ndev->dev, dma_addr))
+ rx_desc->ds_cc = cpu_to_le16(0);
rx_desc->dptr = cpu_to_le32(dma_addr);
rx_desc->die_dt = DT_FEMPTY;
}
rx_desc = &priv->rx_ring[q][i];
rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
rx_desc->die_dt = DT_LINKFIX; /* type */
- priv->dirty_rx[q] = (u32)(i - priv->num_rx_ring[q]);
memset(priv->tx_ring[q], 0, tx_ring_size);
/* Build TX ring buffer */
for (i = 0; i < priv->num_tx_ring[q]; i++) {
- priv->tx_skb[q][i] = NULL;
- priv->tx_buffers[q][i] = NULL;
- buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
- if (!buffer)
- break;
- /* Aligned TX buffer */
- priv->tx_buffers[q][i] = buffer;
tx_desc = &priv->tx_ring[q][i];
tx_desc->die_dt = DT_EEMPTY;
}
@@ -298,7 +283,10 @@ static void ravb_ring_format(struct net_device *ndev, int q)
static int ravb_ring_init(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
+ struct sk_buff *skb;
int ring_size;
+ void *buffer;
+ int i;
/* Allocate RX and TX skb rings */
priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
@@ -308,12 +296,28 @@ static int ravb_ring_init(struct net_device *ndev, int q)
if (!priv->rx_skb[q] || !priv->tx_skb[q])
goto error;
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
+ if (!skb)
+ goto error;
+ ravb_set_buffer_align(skb);
+ priv->rx_skb[q][i] = skb;
+ }
+
/* Allocate rings for the aligned buffers */
priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q],
sizeof(*priv->tx_buffers[q]), GFP_KERNEL);
if (!priv->tx_buffers[q])
goto error;
+ for (i = 0; i < priv->num_tx_ring[q]; i++) {
+ buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
+ if (!buffer)
+ goto error;
+ /* Aligned TX buffer */
+ priv->tx_buffers[q][i] = buffer;
+ }
+
/* Allocate all RX descriptors. */
ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size,
@@ -524,6 +528,10 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
if (--boguscnt < 0)
break;
+ /* We use 0-byte descriptors to mark the DMA mapping errors */
+ if (!pkt_len)
+ continue;
+
if (desc_status & MSC_MC)
stats->multicast++;
@@ -543,10 +551,9 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
skb = priv->rx_skb[q][entry];
priv->rx_skb[q][entry] = NULL;
- dma_sync_single_for_cpu(&ndev->dev,
- le32_to_cpu(desc->dptr),
- ALIGN(PKT_BUF_SZ, 16),
- DMA_FROM_DEVICE);
+ dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+ ALIGN(PKT_BUF_SZ, 16),
+ DMA_FROM_DEVICE);
get_ts &= (q == RAVB_NC) ?
RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
@@ -584,17 +591,15 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
if (!skb)
break; /* Better luck next round. */
ravb_set_buffer_align(skb);
- dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
- ALIGN(PKT_BUF_SZ, 16),
- DMA_FROM_DEVICE);
dma_addr = dma_map_single(&ndev->dev, skb->data,
le16_to_cpu(desc->ds_cc),
DMA_FROM_DEVICE);
skb_checksum_none_assert(skb);
- if (dma_mapping_error(&ndev->dev, dma_addr)) {
- dev_kfree_skb_any(skb);
- break;
- }
+ /* We just set the data size to 0 for a failed mapping
+ * which should prevent DMA from happening...
+ */
+ if (dma_mapping_error(&ndev->dev, dma_addr))
+ desc->ds_cc = cpu_to_le16(0);
desc->dptr = cpu_to_le32(dma_addr);
priv->rx_skb[q][entry] = skb;
}
@@ -1279,7 +1284,6 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 dma_addr;
void *buffer;
u32 entry;
- u32 tccr;
spin_lock_irqsave(&priv->lock, flags);
if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) {
@@ -1328,9 +1332,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
dma_wmb();
desc->die_dt = DT_FSINGLE;
- tccr = ravb_read(ndev, TCCR);
- if (!(tccr & (TCCR_TSRQ0 << q)))
- ravb_write(ndev, tccr | (TCCR_TSRQ0 << q), TCCR);
+ ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR);
priv->cur_tx[q]++;
if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] &&
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 50f7a7a26821..864b476f7fd5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2843,7 +2843,7 @@ int stmmac_dvr_probe(struct device *device,
if (res->mac)
memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
- dev_set_drvdata(device, priv);
+ dev_set_drvdata(device, priv->dev);
/* Verify driver arguments */
stmmac_verify_args();
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index f335bf119ab5..d155bf2573cd 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -793,9 +793,7 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
static int cpsw_poll(struct napi_struct *napi, int budget)
{
struct cpsw_priv *priv = napi_to_priv(napi);
- int num_tx, num_rx;
-
- num_tx = cpdma_chan_process(priv->txch, 128);
+ int num_rx;
num_rx = cpdma_chan_process(priv->rxch, budget);
if (num_rx < budget) {
@@ -810,9 +808,8 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
}
}
- if (num_rx || num_tx)
- cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n",
- num_rx, num_tx);
+ if (num_rx)
+ cpsw_dbg(priv, intr, "poll %d rx pkts\n", num_rx);
return num_rx;
}
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 5ec4ed3f6c8d..ec8ed30196f3 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1617,11 +1617,11 @@ static int netcp_ndo_open(struct net_device *ndev)
}
mutex_unlock(&netcp_modules_lock);
- netcp_rxpool_refill(netcp);
napi_enable(&netcp->rx_napi);
napi_enable(&netcp->tx_napi);
knav_queue_enable_notify(netcp->tx_compl_q);
knav_queue_enable_notify(netcp->rx_queue);
+ netcp_rxpool_refill(netcp);
netif_tx_wake_all_queues(ndev);
dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name);
return 0;
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 953a97492fab..9542b7bac61a 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -67,8 +67,6 @@ struct ipvl_dev {
struct ipvl_port *port;
struct net_device *phy_dev;
struct list_head addrs;
- int ipv4cnt;
- int ipv6cnt;
struct ipvl_pcpu_stats __percpu *pcpu_stats;
DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE);
netdev_features_t sfeatures;
@@ -106,6 +104,11 @@ static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d)
return rcu_dereference(d->rx_handler_data);
}
+static inline struct ipvl_port *ipvlan_port_get_rcu_bh(const struct net_device *d)
+{
+ return rcu_dereference_bh(d->rx_handler_data);
+}
+
static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d)
{
return rtnl_dereference(d->rx_handler_data);
@@ -124,5 +127,5 @@ struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6);
struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
const void *iaddr, bool is_v6);
-void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync);
+void ipvlan_ht_addr_del(struct ipvl_addr *addr);
#endif /* __IPVLAN_H */
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 8afbedad620d..207f62e8de9a 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -85,11 +85,9 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
}
-void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync)
+void ipvlan_ht_addr_del(struct ipvl_addr *addr)
{
hlist_del_init_rcu(&addr->hlnode);
- if (sync)
- synchronize_rcu();
}
struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
@@ -531,7 +529,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
- struct ipvl_port *port = ipvlan_port_get_rcu(ipvlan->phy_dev);
+ struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev);
if (!port)
goto out;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 1acc283160d9..20b58bdecf75 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -153,10 +153,9 @@ static int ipvlan_open(struct net_device *dev)
else
dev->flags &= ~IFF_NOARP;
- if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
- list_for_each_entry(addr, &ipvlan->addrs, anode)
- ipvlan_ht_addr_add(ipvlan, addr);
- }
+ list_for_each_entry(addr, &ipvlan->addrs, anode)
+ ipvlan_ht_addr_add(ipvlan, addr);
+
return dev_uc_add(phy_dev, phy_dev->dev_addr);
}
@@ -171,10 +170,9 @@ static int ipvlan_stop(struct net_device *dev)
dev_uc_del(phy_dev, phy_dev->dev_addr);
- if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
- list_for_each_entry(addr, &ipvlan->addrs, anode)
- ipvlan_ht_addr_del(addr, !dev->dismantle);
- }
+ list_for_each_entry(addr, &ipvlan->addrs, anode)
+ ipvlan_ht_addr_del(addr);
+
return 0;
}
@@ -471,8 +469,6 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
ipvlan->port = port;
ipvlan->sfeatures = IPVLAN_FEATURES;
INIT_LIST_HEAD(&ipvlan->addrs);
- ipvlan->ipv4cnt = 0;
- ipvlan->ipv6cnt = 0;
/* TODO Probably put random address here to be presented to the
* world but keep using the physical-dev address for the outgoing
@@ -508,12 +504,12 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ipvl_addr *addr, *next;
- if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
- list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
- ipvlan_ht_addr_del(addr, !dev->dismantle);
- list_del(&addr->anode);
- }
+ list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
+ ipvlan_ht_addr_del(addr);
+ list_del(&addr->anode);
+ kfree_rcu(addr, rcu);
}
+
list_del_rcu(&ipvlan->pnode);
unregister_netdevice_queue(dev, head);
netdev_upper_dev_unlink(ipvlan->phy_dev, dev);
@@ -627,7 +623,7 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr));
addr->atype = IPVL_IPV6;
list_add_tail(&addr->anode, &ipvlan->addrs);
- ipvlan->ipv6cnt++;
+
/* If the interface is not up, the address will be added to the hash
* list by ipvlan_open.
*/
@@ -645,10 +641,8 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
if (!addr)
return;
- ipvlan_ht_addr_del(addr, true);
+ ipvlan_ht_addr_del(addr);
list_del(&addr->anode);
- ipvlan->ipv6cnt--;
- WARN_ON(ipvlan->ipv6cnt < 0);
kfree_rcu(addr, rcu);
return;
@@ -661,6 +655,10 @@ static int ipvlan_addr6_event(struct notifier_block *unused,
struct net_device *dev = (struct net_device *)if6->idev->dev;
struct ipvl_dev *ipvlan = netdev_priv(dev);
+ /* FIXME IPv6 autoconf calls us from bh without RTNL */
+ if (in_softirq())
+ return NOTIFY_DONE;
+
if (!netif_is_ipvlan(dev))
return NOTIFY_DONE;
@@ -699,7 +697,7 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr));
addr->atype = IPVL_IPV4;
list_add_tail(&addr->anode, &ipvlan->addrs);
- ipvlan->ipv4cnt++;
+
/* If the interface is not up, the address will be added to the hash
* list by ipvlan_open.
*/
@@ -717,10 +715,8 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
if (!addr)
return;
- ipvlan_ht_addr_del(addr, true);
+ ipvlan_ht_addr_del(addr);
list_del(&addr->anode);
- ipvlan->ipv4cnt--;
- WARN_ON(ipvlan->ipv4cnt < 0);
kfree_rcu(addr, rcu);
return;
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index c7a12e2e07b7..8a3bf5469892 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -164,7 +164,7 @@ static int dp83867_config_init(struct phy_device *phydev)
return ret;
}
- if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) ||
+ if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) &&
(phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) {
val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL,
DP83867_DEVADDR, phydev->addr);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 095ef3fe369a..46a14cbb0215 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -421,6 +421,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
{
struct phy_device *phydev = to_phy_device(dev);
struct phy_driver *phydrv = to_phy_driver(drv);
+ const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids);
+ int i;
if (of_driver_match_device(dev, drv))
return 1;
@@ -428,8 +430,21 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
if (phydrv->match_phy_device)
return phydrv->match_phy_device(phydev);
- return (phydrv->phy_id & phydrv->phy_id_mask) ==
- (phydev->phy_id & phydrv->phy_id_mask);
+ if (phydev->is_c45) {
+ for (i = 1; i < num_ids; i++) {
+ if (!(phydev->c45_ids.devices_in_package & (1 << i)))
+ continue;
+
+ if ((phydrv->phy_id & phydrv->phy_id_mask) ==
+ (phydev->c45_ids.device_ids[i] &
+ phydrv->phy_id_mask))
+ return 1;
+ }
+ return 0;
+ } else {
+ return (phydrv->phy_id & phydrv->phy_id_mask) ==
+ (phydev->phy_id & phydrv->phy_id_mask);
+ }
}
#ifdef CONFIG_PM
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index f603f362504b..9d43460ce3c7 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -757,6 +757,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
{QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
{QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
+ {QMI_FIXED_INTF(0x1199, 0x9041, 10)}, /* Sierra Wireless MC7305/MC7355 */
{QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
{QMI_FIXED_INTF(0x1199, 0x9053, 8)}, /* Sierra Wireless Modem */
{QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 63c7810e1545..7fbca37a1adf 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1828,7 +1828,8 @@ static int virtnet_probe(struct virtio_device *vdev)
else
vi->hdr_len = sizeof(struct virtio_net_hdr);
- if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT))
+ if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
+ virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
vi->any_header_sg = true;
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 5e15e8e10ed3..a31a6804dc34 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -279,6 +279,7 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
return;
case AR9300_DEVID_QCA956X:
ah->hw_version.macVersion = AR_SREV_VERSION_9561;
+ return;
}
val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index d56064861a9c..d45dc021cda2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -438,6 +438,12 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
#define RX_QUEUE_MASK 255
#define RX_QUEUE_SIZE_LOG 8
+/*
+ * RX related structures and functions
+ */
+#define RX_FREE_BUFFERS 64
+#define RX_LOW_WATERMARK 8
+
/**
* struct iwl_rb_status - reserve buffer status
* host memory mapped FH registers
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 80fefe7d7b8c..3b8e85e51002 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -540,13 +540,11 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
hw_addr = (const u8 *)(mac_override +
MAC_ADDRESS_OVERRIDE_FAMILY_8000);
- /* The byte order is little endian 16 bit, meaning 214365 */
- data->hw_addr[0] = hw_addr[1];
- data->hw_addr[1] = hw_addr[0];
- data->hw_addr[2] = hw_addr[3];
- data->hw_addr[3] = hw_addr[2];
- data->hw_addr[4] = hw_addr[5];
- data->hw_addr[5] = hw_addr[4];
+ /*
+ * Store the MAC address from MAO section.
+ * No byte swapping is required in MAO section
+ */
+ memcpy(data->hw_addr, hw_addr, ETH_ALEN);
/*
* Force the use of the OTP MAC address in case of reserved MAC
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 5e4cbdb44c60..737774a01c74 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -660,7 +660,8 @@ struct iwl_scan_config {
* iwl_umac_scan_flags
*@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
* can be preempted by other scan requests with higher priority.
- * The low priority scan is aborted.
+ * The low priority scan will be resumed when the higher proirity scan is
+ * completed.
*@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
* when scan starts.
*/
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 5de144968723..5000bfcded61 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -1109,6 +1109,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd->uid = cpu_to_le32(uid);
cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params));
+ if (type == IWL_MVM_SCAN_SCHED)
+ cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
+
if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations))
cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index d68dc697a4a0..26f076e82149 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -1401,6 +1401,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
u8 sta_id;
int ret;
+ static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
lockdep_assert_held(&mvm->mutex);
@@ -1467,7 +1468,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
end:
IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
keyconf->cipher, keyconf->keylen, keyconf->keyidx,
- sta->addr, ret);
+ sta ? sta->addr : zero_addr, ret);
return ret;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index d24b6a83e68c..e472729e5f14 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -86,7 +86,7 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
{
lockdep_assert_held(&mvm->time_event_lock);
- if (te_data->id == TE_MAX)
+ if (!te_data->vif)
return;
list_del(&te_data->list);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 7ba7a118ff5c..89116864d2a0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -252,7 +252,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
if (info->band == IEEE80211_BAND_2GHZ &&
!iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
- rate_flags = BIT(mvm->cfg->non_shared_ant) << RATE_MCS_ANT_POS;
+ rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
else
rate_flags =
BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 2ed1e4d2774d..9f65c1cff1b1 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -368,12 +368,14 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* 3165 Series */
{IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)},
/* 7265 Series */
{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
@@ -426,9 +428,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 31f72a61cc3f..376b84e54ad7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -44,15 +44,6 @@
#include "iwl-io.h"
#include "iwl-op-mode.h"
-/*
- * RX related structures and functions
- */
-#define RX_NUM_QUEUES 1
-#define RX_POST_REQ_ALLOC 2
-#define RX_CLAIM_REQ_ALLOC 8
-#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
-#define RX_LOW_WATERMARK 8
-
struct iwl_host_cmd;
/*This file includes the declaration that are internal to the
@@ -86,29 +77,29 @@ struct isr_statistics {
* struct iwl_rxq - Rx queue
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
+ * @pool:
+ * @queue:
* @read: Shared index to newest available Rx buffer
* @write: Shared index to oldest written Rx packet
* @free_count: Number of pre-allocated buffers in rx_free
- * @used_count: Number of RBDs handled to allocator to use for allocation
* @write_actual:
- * @rx_free: list of RBDs with allocated RB ready for use
- * @rx_used: list of RBDs with no RB attached
+ * @rx_free: list of free SKBs for use
+ * @rx_used: List of Rx buffers with no SKB
* @need_update: flag to indicate we need to update read/write index
* @rb_stts: driver's pointer to receive buffer status
* @rb_stts_dma: bus address of receive buffer status
* @lock:
- * @pool: initial pool of iwl_rx_mem_buffer for the queue
- * @queue: actual rx queue
*
* NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
*/
struct iwl_rxq {
__le32 *bd;
dma_addr_t bd_dma;
+ struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
+ struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
u32 read;
u32 write;
u32 free_count;
- u32 used_count;
u32 write_actual;
struct list_head rx_free;
struct list_head rx_used;
@@ -116,32 +107,6 @@ struct iwl_rxq {
struct iwl_rb_status *rb_stts;
dma_addr_t rb_stts_dma;
spinlock_t lock;
- struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
- struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
-};
-
-/**
- * struct iwl_rb_allocator - Rx allocator
- * @pool: initial pool of allocator
- * @req_pending: number of requests the allcator had not processed yet
- * @req_ready: number of requests honored and ready for claiming
- * @rbd_allocated: RBDs with pages allocated and ready to be handled to
- * the queue. This is a list of &struct iwl_rx_mem_buffer
- * @rbd_empty: RBDs with no page attached for allocator use. This is a list
- * of &struct iwl_rx_mem_buffer
- * @lock: protects the rbd_allocated and rbd_empty lists
- * @alloc_wq: work queue for background calls
- * @rx_alloc: work struct for background calls
- */
-struct iwl_rb_allocator {
- struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
- atomic_t req_pending;
- atomic_t req_ready;
- struct list_head rbd_allocated;
- struct list_head rbd_empty;
- spinlock_t lock;
- struct workqueue_struct *alloc_wq;
- struct work_struct rx_alloc;
};
struct iwl_dma_ptr {
@@ -285,7 +250,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
/**
* struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data
- * @rba: allocator for RX replenishing
+ * @rx_replenish: work that will be called when buffers need to be allocated
* @drv - pointer to iwl_drv
* @trans: pointer to the generic transport area
* @scd_base_addr: scheduler sram base address in SRAM
@@ -308,7 +273,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
*/
struct iwl_trans_pcie {
struct iwl_rxq rxq;
- struct iwl_rb_allocator rba;
+ struct work_struct rx_replenish;
struct iwl_trans *trans;
struct iwl_drv *drv;
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index a3fbaa0ef5e0..adad8d0fae7f 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -1,7 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -74,29 +74,16 @@
* resets the Rx queue buffers with new memory.
*
* The management in the driver is as follows:
- * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
- * When the interrupt handler is called, the request is processed.
- * The page is either stolen - transferred to the upper layer
- * or reused - added immediately to the iwl->rxq->rx_free list.
- * + When the page is stolen - the driver updates the matching queue's used
- * count, detaches the RBD and transfers it to the queue used list.
- * When there are two used RBDs - they are transferred to the allocator empty
- * list. Work is then scheduled for the allocator to start allocating
- * eight buffers.
- * When there are another 6 used RBDs - they are transferred to the allocator
- * empty list and the driver tries to claim the pre-allocated buffers and
- * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
- * until ready.
- * When there are 8+ buffers in the free list - either from allocation or from
- * 8 reused unstolen pages - restock is called to update the FW and indexes.
- * + In order to make sure the allocator always has RBDs to use for allocation
- * the allocator has initial pool in the size of num_queues*(8-2) - the
- * maximum missing RBDs per allocation request (request posted with 2
- * empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
- * The queues supplies the recycle of the rest of the RBDs.
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
+ * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ * to replenish the iwl->rxq->rx_free.
+ * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
+ * iwl->rxq is replenished and the READ INDEX is updated (updating the
+ * 'processed' and 'read' driver indexes as well)
* + A received packet is processed and handed to the kernel network stack,
* detached from the iwl->rxq. The driver 'processed' index is updated.
- * + If there are no allocated buffers in iwl->rxq->rx_free,
+ * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
+ * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
* the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
* If there were enough free buffers and RX_STALLED is set it is cleared.
*
@@ -105,32 +92,18 @@
*
* iwl_rxq_alloc() Allocates rx_free
* iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
- * iwl_pcie_rxq_restock.
- * Used only during initialization.
+ * iwl_pcie_rxq_restock
* iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
* queue, updates firmware pointers, and updates
- * the WRITE index.
- * iwl_pcie_rx_allocator() Background work for allocating pages.
+ * the WRITE index. If insufficient rx_free buffers
+ * are available, schedules iwl_pcie_rx_replenish
*
* -- enable interrupts --
* ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
* READ INDEX, detaching the SKB from the pool.
* Moves the packet buffer from queue to rx_used.
- * Posts and claims requests to the allocator.
* Calls iwl_pcie_rxq_restock to refill any empty
* slots.
- *
- * RBD life-cycle:
- *
- * Init:
- * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
- *
- * Regular Receive interrupt:
- * Page Stolen:
- * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
- * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
- * Page not Stolen:
- * rxq.queue -> rxq.rx_free -> rxq.queue
* ...
*
*/
@@ -267,6 +240,10 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
rxq->free_count--;
}
spin_unlock(&rxq->lock);
+ /* If the pre-allocated buffer pool is dropping low, schedule to
+ * refill it */
+ if (rxq->free_count <= RX_LOW_WATERMARK)
+ schedule_work(&trans_pcie->rx_replenish);
/* If we've added more space for the firmware to place data, tell it.
* Increment device's write pointer in multiples of 8. */
@@ -278,44 +255,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
}
/*
- * iwl_pcie_rx_alloc_page - allocates and returns a page.
- *
- */
-static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rxq *rxq = &trans_pcie->rxq;
- struct page *page;
- gfp_t gfp_mask = GFP_KERNEL;
-
- if (rxq->free_count > RX_LOW_WATERMARK)
- gfp_mask |= __GFP_NOWARN;
-
- if (trans_pcie->rx_page_order > 0)
- gfp_mask |= __GFP_COMP;
-
- /* Alloc a new receive buffer */
- page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
- if (!page) {
- if (net_ratelimit())
- IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
- trans_pcie->rx_page_order);
- /* Issue an error if the hardware has consumed more than half
- * of its free buffer list and we don't have enough
- * pre-allocated buffers.
-` */
- if (rxq->free_count <= RX_LOW_WATERMARK &&
- iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
- net_ratelimit())
- IWL_CRIT(trans,
- "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
- rxq->free_count);
- return NULL;
- }
- return page;
-}
-
-/*
* iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
*
* A used RBD is an Rx buffer that has been given to the stack. To use it again
@@ -324,12 +263,13 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
* iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
* allocated buffers.
*/
-static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
+static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rx_mem_buffer *rxb;
struct page *page;
+ gfp_t gfp_mask = priority;
while (1) {
spin_lock(&rxq->lock);
@@ -339,10 +279,32 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
}
spin_unlock(&rxq->lock);
+ if (rxq->free_count > RX_LOW_WATERMARK)
+ gfp_mask |= __GFP_NOWARN;
+
+ if (trans_pcie->rx_page_order > 0)
+ gfp_mask |= __GFP_COMP;
+
/* Alloc a new receive buffer */
- page = iwl_pcie_rx_alloc_page(trans);
- if (!page)
+ page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
+ if (!page) {
+ if (net_ratelimit())
+ IWL_DEBUG_INFO(trans, "alloc_pages failed, "
+ "order: %d\n",
+ trans_pcie->rx_page_order);
+
+ if ((rxq->free_count <= RX_LOW_WATERMARK) &&
+ net_ratelimit())
+ IWL_CRIT(trans, "Failed to alloc_pages with %s."
+ "Only %u free buffers remaining.\n",
+ priority == GFP_ATOMIC ?
+ "GFP_ATOMIC" : "GFP_KERNEL",
+ rxq->free_count);
+ /* We don't reschedule replenish work here -- we will
+ * call the restock method and if it still needs
+ * more buffers it will schedule replenish */
return;
+ }
spin_lock(&rxq->lock);
@@ -393,7 +355,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
lockdep_assert_held(&rxq->lock);
- for (i = 0; i < RX_QUEUE_SIZE; i++) {
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
if (!rxq->pool[i].page)
continue;
dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
@@ -410,144 +372,32 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
* When moving to rx_free an page is allocated for the slot.
*
* Also restock the Rx queue via iwl_pcie_rxq_restock.
- * This is called only during initialization
+ * This is called as a scheduled work item (except for during initialization)
*/
-static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
+static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
{
- iwl_pcie_rxq_alloc_rbs(trans);
+ iwl_pcie_rxq_alloc_rbs(trans, gfp);
iwl_pcie_rxq_restock(trans);
}
-/*
- * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
- *
- * Allocates for each received request 8 pages
- * Called as a scheduled work item.
- */
-static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
-
- while (atomic_read(&rba->req_pending)) {
- int i;
- struct list_head local_empty;
- struct list_head local_allocated;
-
- INIT_LIST_HEAD(&local_allocated);
- spin_lock(&rba->lock);
- /* swap out the entire rba->rbd_empty to a local list */
- list_replace_init(&rba->rbd_empty, &local_empty);
- spin_unlock(&rba->lock);
-
- for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
- struct iwl_rx_mem_buffer *rxb;
- struct page *page;
-
- /* List should never be empty - each reused RBD is
- * returned to the list, and initial pool covers any
- * possible gap between the time the page is allocated
- * to the time the RBD is added.
- */
- BUG_ON(list_empty(&local_empty));
- /* Get the first rxb from the rbd list */
- rxb = list_first_entry(&local_empty,
- struct iwl_rx_mem_buffer, list);
- BUG_ON(rxb->page);
-
- /* Alloc a new receive buffer */
- page = iwl_pcie_rx_alloc_page(trans);
- if (!page)
- continue;
- rxb->page = page;
-
- /* Get physical address of the RB */
- rxb->page_dma = dma_map_page(trans->dev, page, 0,
- PAGE_SIZE << trans_pcie->rx_page_order,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(trans->dev, rxb->page_dma)) {
- rxb->page = NULL;
- __free_pages(page, trans_pcie->rx_page_order);
- continue;
- }
- /* dma address must be no more than 36 bits */
- BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
- /* and also 256 byte aligned! */
- BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
-
- /* move the allocated entry to the out list */
- list_move(&rxb->list, &local_allocated);
- i++;
- }
-
- spin_lock(&rba->lock);
- /* add the allocated rbds to the allocator allocated list */
- list_splice_tail(&local_allocated, &rba->rbd_allocated);
- /* add the unused rbds back to the allocator empty list */
- list_splice_tail(&local_empty, &rba->rbd_empty);
- spin_unlock(&rba->lock);
-
- atomic_dec(&rba->req_pending);
- atomic_inc(&rba->req_ready);
- }
-}
-
-/*
- * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
-.*
-.* Called by queue when the queue posted allocation request and
- * has freed 8 RBDs in order to restock itself.
- */
-static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
- struct iwl_rx_mem_buffer
- *out[RX_CLAIM_REQ_ALLOC])
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
- int i;
-
- if (atomic_dec_return(&rba->req_ready) < 0) {
- atomic_inc(&rba->req_ready);
- IWL_DEBUG_RX(trans,
- "Allocation request not ready, pending requests = %d\n",
- atomic_read(&rba->req_pending));
- return -ENOMEM;
- }
-
- spin_lock(&rba->lock);
- for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
- /* Get next free Rx buffer, remove it from free list */
- out[i] = list_first_entry(&rba->rbd_allocated,
- struct iwl_rx_mem_buffer, list);
- list_del(&out[i]->list);
- }
- spin_unlock(&rba->lock);
-
- return 0;
-}
-
-static void iwl_pcie_rx_allocator_work(struct work_struct *data)
+static void iwl_pcie_rx_replenish_work(struct work_struct *data)
{
- struct iwl_rb_allocator *rba_p =
- container_of(data, struct iwl_rb_allocator, rx_alloc);
struct iwl_trans_pcie *trans_pcie =
- container_of(rba_p, struct iwl_trans_pcie, rba);
+ container_of(data, struct iwl_trans_pcie, rx_replenish);
- iwl_pcie_rx_allocator(trans_pcie->trans);
+ iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
}
static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
struct device *dev = trans->dev;
memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
spin_lock_init(&rxq->lock);
- spin_lock_init(&rba->lock);
if (WARN_ON(rxq->bd || rxq->rb_stts))
return -EINVAL;
@@ -637,49 +487,15 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
INIT_LIST_HEAD(&rxq->rx_free);
INIT_LIST_HEAD(&rxq->rx_used);
rxq->free_count = 0;
- rxq->used_count = 0;
- for (i = 0; i < RX_QUEUE_SIZE; i++)
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
list_add(&rxq->pool[i].list, &rxq->rx_used);
}
-static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
-{
- int i;
-
- lockdep_assert_held(&rba->lock);
-
- INIT_LIST_HEAD(&rba->rbd_allocated);
- INIT_LIST_HEAD(&rba->rbd_empty);
-
- for (i = 0; i < RX_POOL_SIZE; i++)
- list_add(&rba->pool[i].list, &rba->rbd_empty);
-}
-
-static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
- int i;
-
- lockdep_assert_held(&rba->lock);
-
- for (i = 0; i < RX_POOL_SIZE; i++) {
- if (!rba->pool[i].page)
- continue;
- dma_unmap_page(trans->dev, rba->pool[i].page_dma,
- PAGE_SIZE << trans_pcie->rx_page_order,
- DMA_FROM_DEVICE);
- __free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
- rba->pool[i].page = NULL;
- }
-}
-
int iwl_pcie_rx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
int i, err;
if (!rxq->bd) {
@@ -687,21 +503,11 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
if (err)
return err;
}
- if (!rba->alloc_wq)
- rba->alloc_wq = alloc_workqueue("rb_allocator",
- WQ_HIGHPRI | WQ_UNBOUND, 1);
- INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
-
- spin_lock(&rba->lock);
- atomic_set(&rba->req_pending, 0);
- atomic_set(&rba->req_ready, 0);
- /* free all first - we might be reconfigured for a different size */
- iwl_pcie_rx_free_rba(trans);
- iwl_pcie_rx_init_rba(rba);
- spin_unlock(&rba->lock);
spin_lock(&rxq->lock);
+ INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
+
/* free all first - we might be reconfigured for a different size */
iwl_pcie_rxq_free_rbs(trans);
iwl_pcie_rx_init_rxb_lists(rxq);
@@ -716,7 +522,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
spin_unlock(&rxq->lock);
- iwl_pcie_rx_replenish(trans);
+ iwl_pcie_rx_replenish(trans, GFP_KERNEL);
iwl_pcie_rx_hw_init(trans, rxq);
@@ -731,7 +537,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
/*if rxq->bd is NULL, it means that nothing has been allocated,
* exit now */
@@ -740,15 +545,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
return;
}
- cancel_work_sync(&rba->rx_alloc);
- if (rba->alloc_wq) {
- destroy_workqueue(rba->alloc_wq);
- rba->alloc_wq = NULL;
- }
-
- spin_lock(&rba->lock);
- iwl_pcie_rx_free_rba(trans);
- spin_unlock(&rba->lock);
+ cancel_work_sync(&trans_pcie->rx_replenish);
spin_lock(&rxq->lock);
iwl_pcie_rxq_free_rbs(trans);
@@ -769,43 +566,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
rxq->rb_stts = NULL;
}
-/*
- * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
- *
- * Called when a RBD can be reused. The RBD is transferred to the allocator.
- * When there are 2 empty RBDs - a request for allocation is posted
- */
-static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
- struct iwl_rx_mem_buffer *rxb,
- struct iwl_rxq *rxq)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
-
- /* Count the used RBDs */
- rxq->used_count++;
-
- /* Move the RBD to the used list, will be moved to allocator in batches
- * before claiming or posting a request*/
- list_add_tail(&rxb->list, &rxq->rx_used);
-
- /* If we have RX_POST_REQ_ALLOC new released rx buffers -
- * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
- * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
- * after but we still need to post another request.
- */
- if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
- /* Move the 2 RBDs to the allocator ownership.
- Allocator has another 6 from pool for the request completion*/
- spin_lock(&rba->lock);
- list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
- spin_unlock(&rba->lock);
-
- atomic_inc(&rba->req_pending);
- queue_work(rba->alloc_wq, &rba->rx_alloc);
- }
-}
-
static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
struct iwl_rx_mem_buffer *rxb)
{
@@ -928,13 +688,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
*/
__free_pages(rxb->page, trans_pcie->rx_page_order);
rxb->page = NULL;
- iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
+ list_add_tail(&rxb->list, &rxq->rx_used);
} else {
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
}
} else
- iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
+ list_add_tail(&rxb->list, &rxq->rx_used);
}
/*
@@ -944,7 +704,10 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
- u32 r, i, j;
+ u32 r, i;
+ u8 fill_rx = 0;
+ u32 count = 8;
+ int total_empty;
restart:
spin_lock(&rxq->lock);
@@ -957,6 +720,14 @@ restart:
if (i == r)
IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
+ /* calculate total frames need to be restock after handling RX */
+ total_empty = r - rxq->write_actual;
+ if (total_empty < 0)
+ total_empty += RX_QUEUE_SIZE;
+
+ if (total_empty > (RX_QUEUE_SIZE / 2))
+ fill_rx = 1;
+
while (i != r) {
struct iwl_rx_mem_buffer *rxb;
@@ -968,48 +739,29 @@ restart:
iwl_pcie_rx_handle_rb(trans, rxb);
i = (i + 1) & RX_QUEUE_MASK;
-
- /* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
- * try to claim the pre-allocated buffers from the allocator */
- if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
- struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
-
- /* Add the remaining 6 empty RBDs for allocator use */
- spin_lock(&rba->lock);
- list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
- spin_unlock(&rba->lock);
-
- /* If not ready - continue, will try to reclaim later.
- * No need to reschedule work - allocator exits only on
- * success */
- if (!iwl_pcie_rx_allocator_get(trans, out)) {
- /* If success - then RX_CLAIM_REQ_ALLOC
- * buffers were retrieved and should be added
- * to free list */
- rxq->used_count -= RX_CLAIM_REQ_ALLOC;
- for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
- list_add_tail(&out[j]->list,
- &rxq->rx_free);
- rxq->free_count++;
- }
+ /* If there are a lot of unused frames,
+ * restock the Rx queue so ucode wont assert. */
+ if (fill_rx) {
+ count++;
+ if (count >= 8) {
+ rxq->read = i;
+ spin_unlock(&rxq->lock);
+ iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
+ count = 0;
+ goto restart;
}
}
- /* handle restock for two cases:
- * - we just pulled buffers from the allocator
- * - we have 8+ unstolen pages accumulated */
- if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) {
- rxq->read = i;
- spin_unlock(&rxq->lock);
- iwl_pcie_rxq_restock(trans);
- goto restart;
- }
}
/* Backtrack one entry */
rxq->read = i;
spin_unlock(&rxq->lock);
+ if (fill_rx)
+ iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
+ else
+ iwl_pcie_rxq_restock(trans);
+
if (trans_pcie->napi.poll)
napi_gro_flush(&trans_pcie->napi, false);
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 43ae658af6ec..6203c4ad9bba 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -182,7 +182,7 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
{
- if (!trans->cfg->apmg_not_supported)
+ if (trans->cfg->apmg_not_supported)
return;
if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
@@ -2459,7 +2459,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
struct iwl_trans_pcie *trans_pcie;
struct iwl_trans *trans;
u16 pci_cmd;
- int err;
+ int ret;
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
&pdev->dev, cfg, &trans_ops_pcie, 0);
@@ -2474,8 +2474,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
spin_lock_init(&trans_pcie->ref_lock);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
- err = pci_enable_device(pdev);
- if (err)
+ ret = pci_enable_device(pdev);
+ if (ret)
goto out_no_pci;
if (!cfg->base_params->pcie_l1_allowed) {
@@ -2491,23 +2491,23 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
pci_set_master(pdev);
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
- if (err) {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev,
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (!ret)
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (ret) {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (!ret)
+ ret = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32));
/* both attempts failed: */
- if (err) {
+ if (ret) {
dev_err(&pdev->dev, "No suitable DMA available\n");
goto out_pci_disable_device;
}
}
- err = pci_request_regions(pdev, DRV_NAME);
- if (err) {
+ ret = pci_request_regions(pdev, DRV_NAME);
+ if (ret) {
dev_err(&pdev->dev, "pci_request_regions failed\n");
goto out_pci_disable_device;
}
@@ -2515,7 +2515,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
if (!trans_pcie->hw_base) {
dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
- err = -ENODEV;
+ ret = -ENODEV;
goto out_pci_release_regions;
}
@@ -2527,9 +2527,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->pci_dev = pdev;
iwl_disable_interrupts(trans);
- err = pci_enable_msi(pdev);
- if (err) {
- dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
+ ret = pci_enable_msi(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
/* enable rfkill interrupt: hw bug w/a */
pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
@@ -2547,11 +2547,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
*/
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
unsigned long flags;
- int ret;
trans->hw_rev = (trans->hw_rev & 0xfff0) |
(CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
+ ret = iwl_pcie_prepare_card_hw(trans);
+ if (ret) {
+ IWL_WARN(trans, "Exit HW not ready\n");
+ goto out_pci_disable_msi;
+ }
+
/*
* in-order to recognize C step driver should read chip version
* id located at the AUX bus MISC address space.
@@ -2591,13 +2596,14 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
/* Initialize the wait queue for commands */
init_waitqueue_head(&trans_pcie->wait_command_queue);
- if (iwl_pcie_alloc_ict(trans))
+ ret = iwl_pcie_alloc_ict(trans);
+ if (ret)
goto out_pci_disable_msi;
- err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
+ ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
iwl_pcie_irq_handler,
IRQF_SHARED, DRV_NAME, trans);
- if (err) {
+ if (ret) {
IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
goto out_free_ict;
}
@@ -2617,5 +2623,5 @@ out_pci_disable_device:
pci_disable_device(pdev);
out_no_pci:
iwl_trans_free(trans);
- return ERR_PTR(err);
+ return ERR_PTR(ret);
}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 880d0d63e872..7d50711476fe 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1566,13 +1566,13 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
smp_rmb();
while (dc != dp) {
- BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS);
+ BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
pending_idx =
queue->dealloc_ring[pending_index(dc++)];
- pending_idx_release[gop-queue->tx_unmap_ops] =
+ pending_idx_release[gop - queue->tx_unmap_ops] =
pending_idx;
- queue->pages_to_unmap[gop-queue->tx_unmap_ops] =
+ queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
queue->mmap_pages[pending_idx];
gnttab_set_unmap_op(gop,
idx_to_kaddr(queue, pending_idx),
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index a5233422f9dc..7384455792bf 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -458,10 +458,15 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
nvdimm_bus_unlock(dev);
}
if (is_nd_btt(dev) && probe) {
+ struct nd_btt *nd_btt = to_nd_btt(dev);
+
nd_region = to_nd_region(dev->parent);
nvdimm_bus_lock(dev);
if (nd_region->btt_seed == dev)
nd_region_create_btt_seed(nd_region);
+ if (nd_region->ns_seed == &nd_btt->ndns->dev &&
+ is_nd_blk(dev->parent))
+ nd_region_create_blk_seed(nd_region);
nvdimm_bus_unlock(dev);
}
}
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 8067f54ce050..5ce5ef211bdb 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -891,8 +891,10 @@ parport_register_dev_model(struct parport *port, const char *name,
par_dev->dev.release = free_pardevice;
par_dev->devmodel = true;
ret = device_register(&par_dev->dev);
- if (ret)
- goto err_put_dev;
+ if (ret) {
+ put_device(&par_dev->dev);
+ goto err_put_port;
+ }
/* Chain this onto the list */
par_dev->prev = NULL;
@@ -907,7 +909,8 @@ parport_register_dev_model(struct parport *port, const char *name,
spin_unlock(&port->physport->pardevice_lock);
pr_debug("%s: cannot grant exclusive access for device %s\n",
port->name, name);
- goto err_put_dev;
+ device_unregister(&par_dev->dev);
+ goto err_put_port;
}
port->flags |= PARPORT_FLAG_EXCL;
}
@@ -938,8 +941,6 @@ parport_register_dev_model(struct parport *port, const char *name,
return par_dev;
-err_put_dev:
- put_device(&par_dev->dev);
err_free_devname:
kfree(devname);
err_free_par_dev:
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index c0e6ede3e27d..6b8dd162f644 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -56,6 +56,7 @@ config PHY_EXYNOS_MIPI_VIDEO
config PHY_PXA_28NM_HSIC
tristate "Marvell USB HSIC 28nm PHY Driver"
+ depends on HAS_IOMEM
select GENERIC_PHY
help
Enable this to support Marvell USB HSIC PHY driver for Marvell
@@ -66,6 +67,7 @@ config PHY_PXA_28NM_HSIC
config PHY_PXA_28NM_USB2
tristate "Marvell USB 2.0 28nm PHY Driver"
+ depends on HAS_IOMEM
select GENERIC_PHY
help
Enable this to support Marvell USB 2.0 PHY driver for Marvell
diff --git a/drivers/phy/phy-berlin-usb.c b/drivers/phy/phy-berlin-usb.c
index c6fc95b53083..335e06d66ed9 100644
--- a/drivers/phy/phy-berlin-usb.c
+++ b/drivers/phy/phy-berlin-usb.c
@@ -105,9 +105,9 @@
static const u32 phy_berlin_pll_dividers[] = {
/* Berlin 2 */
- CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54),
- /* Berlin 2CD */
CLK_REF_DIV(0x6) | FEEDBACK_CLK_DIV(0x55),
+ /* Berlin 2CD/Q */
+ CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54),
};
struct phy_berlin_usb_priv {
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
index 53f295c1bab1..3510b81db3fa 100644
--- a/drivers/phy/phy-ti-pipe3.c
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -28,7 +28,6 @@
#include <linux/delay.h>
#include <linux/phy/omap_control_phy.h>
#include <linux/of_platform.h>
-#include <linux/spinlock.h>
#define PLL_STATUS 0x00000004
#define PLL_GO 0x00000008
@@ -83,10 +82,6 @@ struct ti_pipe3 {
struct clk *refclk;
struct clk *div_clk;
struct pipe3_dpll_map *dpll_map;
- bool enabled;
- spinlock_t lock; /* serialize clock enable/disable */
- /* the below flag is needed specifically for SATA */
- bool refclk_enabled;
};
static struct pipe3_dpll_map dpll_map_usb[] = {
@@ -137,6 +132,9 @@ static struct pipe3_dpll_params *ti_pipe3_get_dpll_params(struct ti_pipe3 *phy)
return NULL;
}
+static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy);
+static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy);
+
static int ti_pipe3_power_off(struct phy *x)
{
struct ti_pipe3 *phy = phy_get_drvdata(x);
@@ -217,6 +215,7 @@ static int ti_pipe3_init(struct phy *x)
u32 val;
int ret = 0;
+ ti_pipe3_enable_clocks(phy);
/*
* Set pcie_pcs register to 0x96 for proper functioning of phy
* as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table
@@ -250,33 +249,35 @@ static int ti_pipe3_exit(struct phy *x)
u32 val;
unsigned long timeout;
- /* SATA DPLL can't be powered down due to Errata i783 and PCIe
- * does not have internal DPLL
- */
- if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") ||
- of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie"))
+ /* SATA DPLL can't be powered down due to Errata i783 */
+ if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata"))
return 0;
- /* Put DPLL in IDLE mode */
- val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
- val |= PLL_IDLE;
- ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
-
- /* wait for LDO and Oscillator to power down */
- timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME);
- do {
- cpu_relax();
- val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
- if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN))
- break;
- } while (!time_after(jiffies, timeout));
+ /* PCIe doesn't have internal DPLL */
+ if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) {
+ /* Put DPLL in IDLE mode */
+ val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
+ val |= PLL_IDLE;
+ ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
- if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) {
- dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n",
- val);
- return -EBUSY;
+ /* wait for LDO and Oscillator to power down */
+ timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME);
+ do {
+ cpu_relax();
+ val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
+ if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN))
+ break;
+ } while (!time_after(jiffies, timeout));
+
+ if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) {
+ dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n",
+ val);
+ return -EBUSY;
+ }
}
+ ti_pipe3_disable_clocks(phy);
+
return 0;
}
static struct phy_ops ops = {
@@ -306,7 +307,6 @@ static int ti_pipe3_probe(struct platform_device *pdev)
return -ENOMEM;
phy->dev = &pdev->dev;
- spin_lock_init(&phy->lock);
if (!of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
match = of_match_device(ti_pipe3_id_table, &pdev->dev);
@@ -402,6 +402,10 @@ static int ti_pipe3_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, phy);
pm_runtime_enable(phy->dev);
+ /* Prevent auto-disable of refclk for SATA PHY due to Errata i783 */
+ if (of_device_is_compatible(node, "ti,phy-pipe3-sata"))
+ if (!IS_ERR(phy->refclk))
+ clk_prepare_enable(phy->refclk);
generic_phy = devm_phy_create(phy->dev, NULL, &ops);
if (IS_ERR(generic_phy))
@@ -413,63 +417,33 @@ static int ti_pipe3_probe(struct platform_device *pdev)
if (IS_ERR(phy_provider))
return PTR_ERR(phy_provider);
- pm_runtime_get(&pdev->dev);
-
return 0;
}
static int ti_pipe3_remove(struct platform_device *pdev)
{
- if (!pm_runtime_suspended(&pdev->dev))
- pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
-#ifdef CONFIG_PM
-static int ti_pipe3_enable_refclk(struct ti_pipe3 *phy)
+static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
{
- if (!IS_ERR(phy->refclk) && !phy->refclk_enabled) {
- int ret;
+ int ret = 0;
+ if (!IS_ERR(phy->refclk)) {
ret = clk_prepare_enable(phy->refclk);
if (ret) {
dev_err(phy->dev, "Failed to enable refclk %d\n", ret);
return ret;
}
- phy->refclk_enabled = true;
}
- return 0;
-}
-
-static void ti_pipe3_disable_refclk(struct ti_pipe3 *phy)
-{
- if (!IS_ERR(phy->refclk))
- clk_disable_unprepare(phy->refclk);
-
- phy->refclk_enabled = false;
-}
-
-static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
-{
- int ret = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&phy->lock, flags);
- if (phy->enabled)
- goto err1;
-
- ret = ti_pipe3_enable_refclk(phy);
- if (ret)
- goto err1;
-
if (!IS_ERR(phy->wkupclk)) {
ret = clk_prepare_enable(phy->wkupclk);
if (ret) {
dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret);
- goto err2;
+ goto disable_refclk;
}
}
@@ -477,96 +451,33 @@ static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
ret = clk_prepare_enable(phy->div_clk);
if (ret) {
dev_err(phy->dev, "Failed to enable div_clk %d\n", ret);
- goto err3;
+ goto disable_wkupclk;
}
}
- phy->enabled = true;
- spin_unlock_irqrestore(&phy->lock, flags);
return 0;
-err3:
+disable_wkupclk:
if (!IS_ERR(phy->wkupclk))
clk_disable_unprepare(phy->wkupclk);
-err2:
+disable_refclk:
if (!IS_ERR(phy->refclk))
clk_disable_unprepare(phy->refclk);
- ti_pipe3_disable_refclk(phy);
-err1:
- spin_unlock_irqrestore(&phy->lock, flags);
return ret;
}
static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy)
{
- unsigned long flags;
-
- spin_lock_irqsave(&phy->lock, flags);
- if (!phy->enabled) {
- spin_unlock_irqrestore(&phy->lock, flags);
- return;
- }
-
if (!IS_ERR(phy->wkupclk))
clk_disable_unprepare(phy->wkupclk);
- /* Don't disable refclk for SATA PHY due to Errata i783 */
- if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata"))
- ti_pipe3_disable_refclk(phy);
+ if (!IS_ERR(phy->refclk))
+ clk_disable_unprepare(phy->refclk);
if (!IS_ERR(phy->div_clk))
clk_disable_unprepare(phy->div_clk);
- phy->enabled = false;
- spin_unlock_irqrestore(&phy->lock, flags);
-}
-
-static int ti_pipe3_runtime_suspend(struct device *dev)
-{
- struct ti_pipe3 *phy = dev_get_drvdata(dev);
-
- ti_pipe3_disable_clocks(phy);
- return 0;
}
-static int ti_pipe3_runtime_resume(struct device *dev)
-{
- struct ti_pipe3 *phy = dev_get_drvdata(dev);
- int ret = 0;
-
- ret = ti_pipe3_enable_clocks(phy);
- return ret;
-}
-
-static int ti_pipe3_suspend(struct device *dev)
-{
- struct ti_pipe3 *phy = dev_get_drvdata(dev);
-
- ti_pipe3_disable_clocks(phy);
- return 0;
-}
-
-static int ti_pipe3_resume(struct device *dev)
-{
- struct ti_pipe3 *phy = dev_get_drvdata(dev);
- int ret;
-
- ret = ti_pipe3_enable_clocks(phy);
- if (ret)
- return ret;
-
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops ti_pipe3_pm_ops = {
- SET_RUNTIME_PM_OPS(ti_pipe3_runtime_suspend,
- ti_pipe3_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(ti_pipe3_suspend, ti_pipe3_resume)
-};
-
static const struct of_device_id ti_pipe3_id_table[] = {
{
.compatible = "ti,phy-usb3",
@@ -592,7 +503,6 @@ static struct platform_driver ti_pipe3_driver = {
.remove = ti_pipe3_remove,
.driver = {
.name = "ti-pipe3",
- .pm = &ti_pipe3_pm_ops,
.of_match_table = ti_pipe3_id_table,
},
};
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index efcf2a2b3975..6177315ab74e 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -473,6 +473,8 @@ static void bcm2835_gpio_irq_disable(struct irq_data *data)
spin_lock_irqsave(&pc->irq_lock[bank], flags);
bcm2835_gpio_irq_config(pc, gpio, false);
+ /* Clear events that were latched prior to clearing event sources */
+ bcm2835_gpio_set_bit(pc, GPEDS0, gpio);
clear_bit(offset, &pc->enabled_irq_map[bank]);
spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index 5fd4437cee15..88a7fac11bd4 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -403,14 +403,13 @@ static int imx1_pinconf_set(struct pinctrl_dev *pctldev,
unsigned num_configs)
{
struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx1_pinctrl_soc_info *info = ipctl->info;
int i;
for (i = 0; i != num_configs; ++i) {
imx1_write_bit(ipctl, pin_id, configs[i] & 0x01, MX1_PUEN);
dev_dbg(ipctl->dev, "pinconf set pullup pin %s\n",
- info->pins[pin_id].name);
+ pin_desc_get(pctldev, pin_id)->name);
}
return 0;
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 557d0f2a3031..97681fac082e 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -787,7 +787,6 @@ static const struct pinmux_ops abx500_pinmux_ops = {
.set_mux = abx500_pmx_set,
.gpio_request_enable = abx500_gpio_request_enable,
.gpio_disable_free = abx500_gpio_disable_free,
- .strict = true,
};
static int abx500_get_groups_cnt(struct pinctrl_dev *pctldev)
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
index ef0b697639a7..347c763a6a78 100644
--- a/drivers/pinctrl/pinctrl-lpc18xx.c
+++ b/drivers/pinctrl/pinctrl-lpc18xx.c
@@ -823,7 +823,7 @@ static int lpc18xx_pconf_set_i2c0(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
- if (param)
+ if (param_val)
*reg &= ~(LPC18XX_SCU_I2C0_ZIF << shift);
else
*reg |= (LPC18XX_SCU_I2C0_ZIF << shift);
@@ -876,7 +876,7 @@ static int lpc18xx_pconf_set_pin(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
- if (param)
+ if (param_val)
*reg &= ~LPC18XX_SCU_PIN_ZIF;
else
*reg |= LPC18XX_SCU_PIN_ZIF;
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index b2de09d3b1a0..0b8d480171a3 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1760,7 +1760,8 @@ static int pcs_irq_init_chained_handler(struct pcs_device *pcs,
int res;
res = request_irq(pcs_soc->irq, pcs_irq_handler,
- IRQF_SHARED | IRQF_NO_SUSPEND,
+ IRQF_SHARED | IRQF_NO_SUSPEND |
+ IRQF_NO_THREAD,
name, pcs_soc);
if (res) {
pcs_soc->irq = -1;
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 3dd5a3b2ac62..c760bf43d116 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -33,11 +33,6 @@
#include "../core.h"
#include "pinctrl-samsung.h"
-#define GROUP_SUFFIX "-grp"
-#define GSUFFIX_LEN sizeof(GROUP_SUFFIX)
-#define FUNCTION_SUFFIX "-mux"
-#define FSUFFIX_LEN sizeof(FUNCTION_SUFFIX)
-
/* list of all possible config options supported */
static struct pin_config {
const char *property;
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index c7508d5f6886..0874cfee6889 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -224,7 +224,7 @@ struct sh_pfc_soc_info {
/* PINMUX_GPIO_GP_ALL - Expand to a list of sh_pfc_pin entries */
#define _GP_GPIO(bank, _pin, _name, sfx) \
- [(bank * 32) + _pin] = { \
+ { \
.pin = (bank * 32) + _pin, \
.name = __stringify(_name), \
.enum_id = _name##_DATA, \
diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800.c
index 832932bdc977..7fd4f511d78f 100644
--- a/drivers/regulator/88pm800.c
+++ b/drivers/regulator/88pm800.c
@@ -130,7 +130,7 @@ struct pm800_regulators {
.owner = THIS_MODULE, \
.n_voltages = ARRAY_SIZE(ldo_volt_table), \
.vsel_reg = PM800_##vreg##_VOUT, \
- .vsel_mask = 0x1f, \
+ .vsel_mask = 0xf, \
.enable_reg = PM800_##ereg, \
.enable_mask = 1 << (ebit), \
.volt_table = ldo_volt_table, \
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index c9f72019bd68..78387a6cbae5 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -109,6 +109,7 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
static struct regulator *create_regulator(struct regulator_dev *rdev,
struct device *dev,
const char *supply_name);
+static void _regulator_put(struct regulator *regulator);
static const char *rdev_get_name(struct regulator_dev *rdev)
{
@@ -1105,6 +1106,9 @@ static int set_supply(struct regulator_dev *rdev,
rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev));
+ if (!try_module_get(supply_rdev->owner))
+ return -ENODEV;
+
rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY");
if (rdev->supply == NULL) {
err = -ENOMEM;
@@ -1381,9 +1385,13 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
}
if (!r) {
- dev_err(dev, "Failed to resolve %s-supply for %s\n",
- rdev->supply_name, rdev->desc->name);
- return -EPROBE_DEFER;
+ if (have_full_constraints()) {
+ r = dummy_regulator_rdev;
+ } else {
+ dev_err(dev, "Failed to resolve %s-supply for %s\n",
+ rdev->supply_name, rdev->desc->name);
+ return -EPROBE_DEFER;
+ }
}
/* Recursively resolve the supply of the supply */
@@ -1398,8 +1406,11 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
/* Cascade always-on state to supply */
if (_regulator_is_enabled(rdev)) {
ret = regulator_enable(rdev->supply);
- if (ret < 0)
+ if (ret < 0) {
+ if (rdev->supply)
+ _regulator_put(rdev->supply);
return ret;
+ }
}
return 0;
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 6f2bdad8b4d8..e94ddcf97722 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -450,7 +450,7 @@ static struct max8973_regulator_platform_data *max8973_parse_dt(
pdata->control_flags |= MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE;
if (of_property_read_bool(np, "maxim,enable-bias-control"))
- pdata->control_flags |= MAX8973_BIAS_ENABLE;
+ pdata->control_flags |= MAX8973_CONTROL_BIAS_ENABLE;
return pdata;
}
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 326ffb553371..72fc3c32db49 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -34,6 +34,8 @@
#include <linux/mfd/samsung/s2mps14.h>
#include <linux/mfd/samsung/s2mpu02.h>
+/* The highest number of possible regulators for supported devices. */
+#define S2MPS_REGULATOR_MAX S2MPS13_REGULATOR_MAX
struct s2mps11_info {
unsigned int rdev_num;
int ramp_delay2;
@@ -49,7 +51,7 @@ struct s2mps11_info {
* One bit for each S2MPS13/S2MPS14/S2MPU02 regulator whether
* the suspend mode was enabled.
*/
- unsigned long long s2mps14_suspend_state:50;
+ DECLARE_BITMAP(suspend_state, S2MPS_REGULATOR_MAX);
/* Array of size rdev_num with GPIO-s for external sleep control */
int *ext_control_gpio;
@@ -500,7 +502,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
switch (s2mps11->dev_type) {
case S2MPS13X:
case S2MPS14X:
- if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev)))
+ if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
val = S2MPS14_ENABLE_SUSPEND;
else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)]))
val = S2MPS14_ENABLE_EXT_CONTROL;
@@ -508,7 +510,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
val = rdev->desc->enable_mask;
break;
case S2MPU02:
- if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev)))
+ if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
val = S2MPU02_ENABLE_SUSPEND;
else
val = rdev->desc->enable_mask;
@@ -562,7 +564,7 @@ static int s2mps14_regulator_set_suspend_disable(struct regulator_dev *rdev)
if (ret < 0)
return ret;
- s2mps11->s2mps14_suspend_state |= (1 << rdev_get_id(rdev));
+ set_bit(rdev_get_id(rdev), s2mps11->suspend_state);
/*
* Don't enable suspend mode if regulator is already disabled because
* this would effectively for a short time turn on the regulator after
@@ -960,18 +962,22 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
case S2MPS11X:
s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators);
regulators = s2mps11_regulators;
+ BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
break;
case S2MPS13X:
s2mps11->rdev_num = ARRAY_SIZE(s2mps13_regulators);
regulators = s2mps13_regulators;
+ BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
break;
case S2MPS14X:
s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators);
regulators = s2mps14_regulators;
+ BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
break;
case S2MPU02:
s2mps11->rdev_num = ARRAY_SIZE(s2mpu02_regulators);
regulators = s2mpu02_regulators;
+ BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
break;
default:
dev_err(&pdev->dev, "Invalid device type: %u\n",
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
index 95bccfd3f169..e5225ad9c5b1 100644
--- a/drivers/s390/Makefile
+++ b/drivers/s390/Makefile
@@ -2,7 +2,7 @@
# Makefile for the S/390 specific device drivers
#
-obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/
+obj-y += cio/ block/ char/ crypto/ net/ scsi/ virtio/
drivers-y += drivers/s390/built-in.o
diff --git a/drivers/s390/kvm/Makefile b/drivers/s390/virtio/Makefile
index 241891a57caf..241891a57caf 100644
--- a/drivers/s390/kvm/Makefile
+++ b/drivers/s390/virtio/Makefile
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c
index 53fb975c404b..53fb975c404b 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/virtio/kvm_virtio.c
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index f8d8fdb26b72..f8d8fdb26b72 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 285f77544c36..7dbbb29d24c6 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -949,7 +949,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
{
struct Scsi_Host *shost;
struct virtio_scsi *vscsi;
- int err, host_prot;
+ int err;
u32 sg_elems, num_targets;
u32 cmd_per_lun;
u32 num_queues;
@@ -1009,6 +1009,8 @@ static int virtscsi_probe(struct virtio_device *vdev)
#ifdef CONFIG_BLK_DEV_INTEGRITY
if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
+ int host_prot;
+
host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 0cae1694014d..b0f30fb68914 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -612,7 +612,7 @@ config SPI_XTENSA_XTFPGA
config SPI_ZYNQMP_GQSPI
tristate "Xilinx ZynqMP GQSPI controller"
- depends on SPI_MASTER
+ depends on SPI_MASTER && HAS_DMA
help
Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC.
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 788e2b176a4f..acce90ac7371 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -40,6 +40,7 @@
#define SPFI_CONTROL_SOFT_RESET BIT(11)
#define SPFI_CONTROL_SEND_DMA BIT(10)
#define SPFI_CONTROL_GET_DMA BIT(9)
+#define SPFI_CONTROL_SE BIT(8)
#define SPFI_CONTROL_TMODE_SHIFT 5
#define SPFI_CONTROL_TMODE_MASK 0x7
#define SPFI_CONTROL_TMODE_SINGLE 0
@@ -491,6 +492,7 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
else if (xfer->tx_nbits == SPI_NBITS_QUAD &&
xfer->rx_nbits == SPI_NBITS_QUAD)
val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT;
+ val |= SPFI_CONTROL_SE;
spfi_writel(spfi, val, SPFI_CONTROL);
}
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index eb7d3a6fb14c..f9deb84e4e55 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -201,8 +201,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
- if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml)
- && (transfer->len > spi_imx->tx_wml))
+ if (spi_imx->dma_is_inited
+ && transfer->len > spi_imx->rx_wml * sizeof(u32)
+ && transfer->len > spi_imx->tx_wml * sizeof(u32))
return true;
return false;
}
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 87b20a511a6b..f23f36ebaf3d 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -214,6 +214,7 @@ static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
case GQSPI_SELECT_FLASH_CS_BOTH:
instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER |
GQSPI_GENFIFO_CS_UPPER;
+ break;
case GQSPI_SELECT_FLASH_CS_UPPER:
instanceptr->genfifocs = GQSPI_GENFIFO_CS_UPPER;
break;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index dd616ff0ffc5..c7de64171c45 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -693,6 +693,7 @@ static struct class *spidev_class;
#ifdef CONFIG_OF
static const struct of_device_id spidev_dt_ids[] = {
{ .compatible = "rohm,dh2228fv" },
+ { .compatible = "lineartechnology,ltc2488" },
{},
};
MODULE_DEVICE_TABLE(of, spidev_dt_ids);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index c9c27f69e101..ee8bfacf2071 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1108,19 +1108,29 @@ static void eraser(unsigned char c, struct tty_struct *tty)
* Locking: ctrl_lock
*/
-static void isig(int sig, struct tty_struct *tty)
+static void __isig(int sig, struct tty_struct *tty)
{
- struct n_tty_data *ldata = tty->disc_data;
struct pid *tty_pgrp = tty_get_pgrp(tty);
if (tty_pgrp) {
kill_pgrp(tty_pgrp, sig, 1);
put_pid(tty_pgrp);
}
+}
- if (!L_NOFLSH(tty)) {
+static void isig(int sig, struct tty_struct *tty)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+
+ if (L_NOFLSH(tty)) {
+ /* signal only */
+ __isig(sig, tty);
+
+ } else { /* signal and flush */
up_read(&tty->termios_rwsem);
down_write(&tty->termios_rwsem);
+ __isig(sig, tty);
+
/* clear echo buffer */
mutex_lock(&ldata->output_lock);
ldata->echo_head = ldata->echo_tail = 0;
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 76e65b714471..15b4079a335e 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1185,7 +1185,7 @@ config SERIAL_SC16IS7XX_CORE
config SERIAL_SC16IS7XX
tristate "SC16IS7xx serial support"
select SERIAL_CORE
- depends on I2C || SPI_MASTER
+ depends on (SPI_MASTER && !I2C) || I2C
help
This selects support for SC16IS7xx serial ports.
Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752,
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 50cf5b10ceed..fd27e986b1dd 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2310,8 +2310,8 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
void __iomem *base;
base = devm_ioremap_resource(dev, mmiobase);
- if (!base)
- return -ENOMEM;
+ if (IS_ERR(base))
+ return PTR_ERR(base);
index = pl011_probe_dt_alias(index, dev);
diff --git a/drivers/tty/serial/etraxfs-uart.c b/drivers/tty/serial/etraxfs-uart.c
index a57301a6fe42..679709f51fd4 100644
--- a/drivers/tty/serial/etraxfs-uart.c
+++ b/drivers/tty/serial/etraxfs-uart.c
@@ -950,7 +950,7 @@ static int etraxfs_uart_remove(struct platform_device *pdev)
port = platform_get_drvdata(pdev);
uart_remove_one_port(&etraxfs_uart_driver, port);
- etraxfs_uart_ports[pdev->id] = NULL;
+ etraxfs_uart_ports[port->line] = NULL;
return 0;
}
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 2c90dc31bfaa..54fdc7866ea1 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1121,11 +1121,6 @@ static int imx_startup(struct uart_port *port)
writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
- /* Can we enable the DMA support? */
- if (is_imx6q_uart(sport) && !uart_console(port) &&
- !sport->dma_is_inited)
- imx_uart_dma_init(sport);
-
spin_lock_irqsave(&sport->port.lock, flags);
/* Reset fifo's and state machines */
i = 100;
@@ -1143,9 +1138,6 @@ static int imx_startup(struct uart_port *port)
writel(USR1_RTSD, sport->port.membase + USR1);
writel(USR2_ORE, sport->port.membase + USR2);
- if (sport->dma_is_inited && !sport->dma_is_enabled)
- imx_enable_dma(sport);
-
temp = readl(sport->port.membase + UCR1);
temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN;
@@ -1316,6 +1308,11 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
} else {
ucr2 |= UCR2_CTSC;
}
+
+ /* Can we enable the DMA support? */
+ if (is_imx6q_uart(sport) && !uart_console(port)
+ && !sport->dma_is_inited)
+ imx_uart_dma_init(sport);
} else {
termios->c_cflag &= ~CRTSCTS;
}
@@ -1432,6 +1429,8 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
imx_enable_ms(&sport->port);
+ if (sport->dma_is_inited && !sport->dma_is_enabled)
+ imx_enable_dma(sport);
spin_unlock_irqrestore(&sport->port.lock, flags);
}
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 9e6576004a42..5ccc698cbbfa 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -354,6 +354,26 @@ static void sc16is7xx_port_write(struct uart_port *port, u8 reg, u8 val)
(reg << SC16IS7XX_REG_SHIFT) | port->line, val);
}
+static void sc16is7xx_fifo_read(struct uart_port *port, unsigned int rxlen)
+{
+ struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+ u8 addr = (SC16IS7XX_RHR_REG << SC16IS7XX_REG_SHIFT) | port->line;
+
+ regcache_cache_bypass(s->regmap, true);
+ regmap_raw_read(s->regmap, addr, s->buf, rxlen);
+ regcache_cache_bypass(s->regmap, false);
+}
+
+static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send)
+{
+ struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+ u8 addr = (SC16IS7XX_THR_REG << SC16IS7XX_REG_SHIFT) | port->line;
+
+ regcache_cache_bypass(s->regmap, true);
+ regmap_raw_write(s->regmap, addr, s->buf, to_send);
+ regcache_cache_bypass(s->regmap, false);
+}
+
static void sc16is7xx_port_update(struct uart_port *port, u8 reg,
u8 mask, u8 val)
{
@@ -508,10 +528,7 @@ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen,
s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG);
bytes_read = 1;
} else {
- regcache_cache_bypass(s->regmap, true);
- regmap_raw_read(s->regmap, SC16IS7XX_RHR_REG,
- s->buf, rxlen);
- regcache_cache_bypass(s->regmap, false);
+ sc16is7xx_fifo_read(port, rxlen);
bytes_read = rxlen;
}
@@ -591,9 +608,8 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
s->buf[i] = xmit->buf[xmit->tail];
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
}
- regcache_cache_bypass(s->regmap, true);
- regmap_raw_write(s->regmap, SC16IS7XX_THR_REG, s->buf, to_send);
- regcache_cache_bypass(s->regmap, false);
+
+ sc16is7xx_fifo_write(port, to_send);
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 7ae1592f7ec9..f36852067f20 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1418,7 +1418,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
mutex_lock(&port->mutex);
uart_shutdown(tty, state);
tty_port_tty_set(port, NULL);
- tty->closing = 0;
+
spin_lock_irqsave(&port->lock, flags);
if (port->blocked_open) {
@@ -1444,6 +1444,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
mutex_unlock(&port->mutex);
tty_ldisc_flush(tty);
+ tty->closing = 0;
}
static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index ea27804d87af..381a2b13682c 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -356,6 +356,7 @@ int paste_selection(struct tty_struct *tty)
schedule();
continue;
}
+ __set_current_state(TASK_RUNNING);
count = sel_buffer_lth - pasted;
count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL,
count);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 8fe52989b380..4462d167900c 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -742,6 +742,8 @@ static void visual_init(struct vc_data *vc, int num, int init)
__module_get(vc->vc_sw->owner);
vc->vc_num = num;
vc->vc_display_fg = &master_display_fg;
+ if (vc->vc_uni_pagedir_loc)
+ con_free_unimap(vc);
vc->vc_uni_pagedir_loc = &vc->vc_uni_pagedir;
vc->vc_uni_pagedir = NULL;
vc->vc_hi_font_mask = 0;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 519a77ba214c..b30e7423549b 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1944,6 +1944,7 @@ static void __exit acm_exit(void)
usb_deregister(&acm_driver);
tty_unregister_driver(acm_tty_driver);
put_tty_driver(acm_tty_driver);
+ idr_destroy(&acm_minors);
}
module_init(acm_init);
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
index 0e6f968e93fe..01c0c0477a9e 100644
--- a/drivers/usb/common/ulpi.c
+++ b/drivers/usb/common/ulpi.c
@@ -242,7 +242,7 @@ static int __init ulpi_init(void)
{
return bus_register(&ulpi_bus);
}
-module_init(ulpi_init);
+subsys_initcall(ulpi_init);
static void __exit ulpi_exit(void)
{
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index be5b2074f906..cbcd0920fb51 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1022,9 +1022,12 @@ static int register_root_hub(struct usb_hcd *hcd)
dev_name(&usb_dev->dev), retval);
return (retval < 0) ? retval : -EMSGSIZE;
}
- if (usb_dev->speed == USB_SPEED_SUPER) {
+
+ if (le16_to_cpu(usb_dev->descriptor.bcdUSB) >= 0x0201) {
retval = usb_get_bos_descriptor(usb_dev);
- if (retval < 0) {
+ if (!retval) {
+ usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
+ } else if (usb_dev->speed == USB_SPEED_SUPER) {
mutex_unlock(&usb_bus_list_lock);
dev_dbg(parent_dev, "can't read %s bos descriptor %d\n",
dev_name(&usb_dev->dev), retval);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 43cb2f2e3b43..73dfa194160b 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -122,7 +122,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
return usb_get_intfdata(hdev->actconfig->interface[0]);
}
-static int usb_device_supports_lpm(struct usb_device *udev)
+int usb_device_supports_lpm(struct usb_device *udev)
{
/* USB 2.1 (and greater) devices indicate LPM support through
* their USB 2.0 Extended Capabilities BOS descriptor.
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 7eb1e26798e5..457255a3306a 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -65,6 +65,7 @@ extern int usb_hub_init(void);
extern void usb_hub_cleanup(void);
extern int usb_major_init(void);
extern void usb_major_cleanup(void);
+extern int usb_device_supports_lpm(struct usb_device *udev);
#ifdef CONFIG_PM
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 2ef3c8d6a9db..69e769c35cf5 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -727,6 +727,10 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
break;
+ case USB_REQ_SET_INTERFACE:
+ dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
+ dwc->start_config_issued = false;
+ /* Fall through */
default:
dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
ret = dwc3_ep0_delegate_req(dwc, ctrl);
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index d32160d6463f..5da37c957b53 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -2167,7 +2167,7 @@ static int mv_udc_probe(struct platform_device *pdev)
return -ENODEV;
}
- udc->phy_regs = ioremap(r->start, resource_size(r));
+ udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (udc->phy_regs == NULL) {
dev_err(&pdev->dev, "failed to map phy I/O memory\n");
return -EBUSY;
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index d69c35558f68..362ee8af5fce 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -60,13 +60,15 @@ static DEFINE_MUTEX(udc_lock);
int usb_gadget_map_request(struct usb_gadget *gadget,
struct usb_request *req, int is_in)
{
+ struct device *dev = gadget->dev.parent;
+
if (req->length == 0)
return 0;
if (req->num_sgs) {
int mapped;
- mapped = dma_map_sg(&gadget->dev, req->sg, req->num_sgs,
+ mapped = dma_map_sg(dev, req->sg, req->num_sgs,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (mapped == 0) {
dev_err(&gadget->dev, "failed to map SGs\n");
@@ -75,11 +77,11 @@ int usb_gadget_map_request(struct usb_gadget *gadget,
req->num_mapped_sgs = mapped;
} else {
- req->dma = dma_map_single(&gadget->dev, req->buf, req->length,
+ req->dma = dma_map_single(dev, req->buf, req->length,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- if (dma_mapping_error(&gadget->dev, req->dma)) {
- dev_err(&gadget->dev, "failed to map buffer\n");
+ if (dma_mapping_error(dev, req->dma)) {
+ dev_err(dev, "failed to map buffer\n");
return -EFAULT;
}
}
@@ -95,12 +97,12 @@ void usb_gadget_unmap_request(struct usb_gadget *gadget,
return;
if (req->num_mapped_sgs) {
- dma_unmap_sg(&gadget->dev, req->sg, req->num_mapped_sgs,
+ dma_unmap_sg(gadget->dev.parent, req->sg, req->num_mapped_sgs,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
req->num_mapped_sgs = 0;
} else {
- dma_unmap_single(&gadget->dev, req->dma, req->length,
+ dma_unmap_single(gadget->dev.parent, req->dma, req->length,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
}
}
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index f7d561ed3c23..d029bbe9eb36 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -981,10 +981,6 @@ rescan_all:
int completed, modified;
__hc32 *prev;
- /* Is this ED already invisible to the hardware? */
- if (ed->state == ED_IDLE)
- goto ed_idle;
-
/* only take off EDs that the HC isn't using, accounting for
* frame counter wraps and EDs with partially retired TDs
*/
@@ -1012,12 +1008,10 @@ skip_ed:
}
/* ED's now officially unlinked, hc doesn't see */
- ed->state = ED_IDLE;
ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
ed->hwNextED = 0;
wmb();
ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE);
-ed_idle:
/* reentrancy: if we drop the schedule lock, someone might
* have modified this list. normally it's just prepending
@@ -1088,6 +1082,7 @@ rescan_this:
if (list_empty(&ed->td_list)) {
*last = ed->ed_next;
ed->ed_next = NULL;
+ ed->state = ED_IDLE;
list_del(&ed->in_use_list);
} else if (ohci->rh_state == OHCI_RH_RUNNING) {
*last = ed->ed_next;
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index e9a6eec39142..cfcfadfc94fc 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -58,7 +58,7 @@
#define CCR_PM_CKRNEN 0x0002
#define CCR_PM_USBPW1 0x0004
#define CCR_PM_USBPW2 0x0008
-#define CCR_PM_USBPW3 0x0008
+#define CCR_PM_USBPW3 0x0010
#define CCR_PM_PMEE 0x0100
#define CCR_PM_PMES 0x8000
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index e75c565feb53..78241b5550df 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -484,10 +484,13 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
u32 pls = status_reg & PORT_PLS_MASK;
/* resume state is a xHCI internal state.
- * Do not report it to usb core.
+ * Do not report it to usb core, instead, pretend to be U3,
+ * thus usb core knows it's not ready for transfer
*/
- if (pls == XDEV_RESUME)
+ if (pls == XDEV_RESUME) {
+ *status |= USB_SS_PORT_LS_U3;
return;
+ }
/* When the CAS bit is set then warm reset
* should be performed on port
@@ -588,7 +591,14 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
status |= USB_PORT_STAT_C_RESET << 16;
/* USB3.0 only */
if (hcd->speed == HCD_USB3) {
- if ((raw_port_status & PORT_PLC))
+ /* Port link change with port in resume state should not be
+ * reported to usbcore, as this is an internal state to be
+ * handled by xhci driver. Reporting PLC to usbcore may
+ * cause usbcore clearing PLC first and port change event
+ * irq won't be generated.
+ */
+ if ((raw_port_status & PORT_PLC) &&
+ (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME)
status |= USB_PORT_STAT_C_LINK_STATE << 16;
if ((raw_port_status & PORT_WRC))
status |= USB_PORT_STAT_C_BH_RESET << 16;
@@ -1120,10 +1130,10 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
spin_lock_irqsave(&xhci->lock, flags);
if (hcd->self.root_hub->do_remote_wakeup) {
- if (bus_state->resuming_ports) {
+ if (bus_state->resuming_ports || /* USB2 */
+ bus_state->port_remote_wakeup) { /* USB3 */
spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_dbg(xhci, "suspend failed because "
- "a port is resuming\n");
+ xhci_dbg(xhci, "suspend failed because a port is resuming\n");
return -EBUSY;
}
}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index f8336408ef07..3e442f77a2b9 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1427,10 +1427,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
/* Attempt to use the ring cache */
if (virt_dev->num_rings_cached == 0)
return -ENOMEM;
+ virt_dev->num_rings_cached--;
virt_dev->eps[ep_index].new_ring =
virt_dev->ring_cache[virt_dev->num_rings_cached];
virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
- virt_dev->num_rings_cached--;
xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
1, type);
}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 4a4cb1d91ac8..5590eac2b22d 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -23,10 +23,15 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/acpi.h>
#include "xhci.h"
#include "xhci-trace.h"
+#define PORT2_SSIC_CONFIG_REG2 0x883c
+#define PROG_DONE (1 << 30)
+#define SSIC_PORT_UNUSED (1 << 31)
+
/* Device for a quirk */
#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
@@ -176,20 +181,63 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
}
/*
+ * In some Intel xHCI controllers, in order to get D3 working,
+ * through a vendor specific SSIC CONFIG register at offset 0x883c,
+ * SSIC PORT need to be marked as "unused" before putting xHCI
+ * into D3. After D3 exit, the SSIC port need to be marked as "used".
+ * Without this change, xHCI might not enter D3 state.
* Make sure PME works on some Intel xHCI controllers by writing 1 to clear
* the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
*/
-static void xhci_pme_quirk(struct xhci_hcd *xhci)
+static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
u32 val;
void __iomem *reg;
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
+
+ reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
+
+ /* Notify SSIC that SSIC profile programming is not done */
+ val = readl(reg) & ~PROG_DONE;
+ writel(val, reg);
+
+ /* Mark SSIC port as unused(suspend) or used(resume) */
+ val = readl(reg);
+ if (suspend)
+ val |= SSIC_PORT_UNUSED;
+ else
+ val &= ~SSIC_PORT_UNUSED;
+ writel(val, reg);
+
+ /* Notify SSIC that SSIC profile programming is done */
+ val = readl(reg) | PROG_DONE;
+ writel(val, reg);
+ readl(reg);
+ }
+
reg = (void __iomem *) xhci->cap_regs + 0x80a4;
val = readl(reg);
writel(val | BIT(28), reg);
readl(reg);
}
+#ifdef CONFIG_ACPI
+static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
+{
+ static const u8 intel_dsm_uuid[] = {
+ 0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45,
+ 0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23,
+ };
+ acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1, NULL);
+}
+#else
+ static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
+#endif /* CONFIG_ACPI */
+
/* called during probe() after chip reset completes */
static int xhci_pci_setup(struct usb_hcd *hcd)
{
@@ -263,6 +311,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
HCC_MAX_PSA(xhci->hcc_params) >= 4)
xhci->shared_hcd->can_do_streams = 1;
+ if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
+ xhci_pme_acpi_rtd3_enable(dev);
+
/* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
pm_runtime_put_noidle(&dev->dev);
@@ -307,7 +358,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
pdev->no_d3cold = true;
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
- xhci_pme_quirk(xhci);
+ xhci_pme_quirk(hcd, true);
return xhci_suspend(xhci, do_wakeup);
}
@@ -340,7 +391,7 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
usb_enable_intel_xhci_ports(pdev);
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
- xhci_pme_quirk(xhci);
+ xhci_pme_quirk(hcd, false);
retval = xhci_resume(xhci, hibernated);
return retval;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 94416ff70810..6a8fc52aed58 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1546,6 +1546,9 @@ static void handle_port_status(struct xhci_hcd *xhci,
usb_hcd_resume_root_hub(hcd);
}
+ if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
+ bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
+
if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
xhci_dbg(xhci, "port resume event for port %d\n", port_id);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 7da0d6043d33..526ebc0c7e72 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3453,6 +3453,9 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
return -EINVAL;
}
+ if (virt_dev->tt_info)
+ old_active_eps = virt_dev->tt_info->active_eps;
+
if (virt_dev->udev != udev) {
/* If the virt_dev and the udev does not match, this virt_dev
* may belong to another udev.
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 31e46cc55807..ed2ebf647c38 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -285,6 +285,7 @@ struct xhci_op_regs {
#define XDEV_U0 (0x0 << 5)
#define XDEV_U2 (0x2 << 5)
#define XDEV_U3 (0x3 << 5)
+#define XDEV_INACTIVE (0x6 << 5)
#define XDEV_RESUME (0xf << 5)
/* true: port has power (see HCC_PPC) */
#define PORT_POWER (1 << 9)
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index caf188800c67..6b2479123de7 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2065,6 +2065,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_READ_DISC_INFO ),
+/* Reported by Oliver Neukum <oneukum@suse.com>
+ * This device morphes spontaneously into another device if the access
+ * pattern of Windows isn't followed. Thus writable media would be dirty
+ * if the initial instance is used. So the device is limited to its
+ * virtual CD.
+ * And yes, the concept that BCD goes up to 9 is not heeded */
+UNUSUAL_DEV( 0x19d2, 0x1225, 0x0000, 0xffff,
+ "ZTE,Incorporated",
+ "ZTE WCDMA Technologies MSM",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_SINGLE_LUN ),
+
/* Reported by Sven Geggus <sven-usbst@geggus.net>
* This encrypted pen drive returns bogus data for the initial READ(10).
*/
@@ -2074,6 +2086,17 @@ UNUSUAL_DEV( 0x1b1c, 0x1ab5, 0x0200, 0x0200,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_INITIAL_READ10 ),
+/* Reported by Hans de Goede <hdegoede@redhat.com>
+ * These are mini projectors using USB for both power and video data transport
+ * The usb-storage interface is a virtual windows driver CD, which the gm12u320
+ * driver automatically converts into framebuffer & kms dri device nodes.
+ */
+UNUSUAL_DEV( 0x1de1, 0xc102, 0x0000, 0xffff,
+ "Grain-media Technology Corp.",
+ "USB3.0 Device GM12U320",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_DEVICE ),
+
/* Patch by Richard Schütz <r.schtz@t-online.de>
* This external hard drive enclosure uses a JMicron chip which
* needs the US_FL_IGNORE_RESIDUE flag to work properly. */
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 9e8e004bb1c3..a9fe859f43c8 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -22,14 +22,20 @@
#include <linux/file.h>
#include <linux/highmem.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <linux/kthread.h>
#include <linux/cgroup.h>
#include <linux/module.h>
+#include <linux/sort.h>
#include "vhost.h"
+static ushort max_mem_regions = 64;
+module_param(max_mem_regions, ushort, 0444);
+MODULE_PARM_DESC(max_mem_regions,
+ "Maximum number of memory regions in memory map. (default: 64)");
+
enum {
- VHOST_MEMORY_MAX_NREGIONS = 64,
VHOST_MEMORY_F_LOG = 0x1,
};
@@ -543,7 +549,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
fput(dev->log_file);
dev->log_file = NULL;
/* No one will access memory at this point */
- kfree(dev->memory);
+ kvfree(dev->memory);
dev->memory = NULL;
WARN_ON(!list_empty(&dev->work_list));
if (dev->worker) {
@@ -663,6 +669,28 @@ int vhost_vq_access_ok(struct vhost_virtqueue *vq)
}
EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
+static int vhost_memory_reg_sort_cmp(const void *p1, const void *p2)
+{
+ const struct vhost_memory_region *r1 = p1, *r2 = p2;
+ if (r1->guest_phys_addr < r2->guest_phys_addr)
+ return 1;
+ if (r1->guest_phys_addr > r2->guest_phys_addr)
+ return -1;
+ return 0;
+}
+
+static void *vhost_kvzalloc(unsigned long size)
+{
+ void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+
+ if (!n) {
+ n = vzalloc(size);
+ if (!n)
+ return ERR_PTR(-ENOMEM);
+ }
+ return n;
+}
+
static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
{
struct vhost_memory mem, *newmem, *oldmem;
@@ -673,21 +701,23 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
return -EFAULT;
if (mem.padding)
return -EOPNOTSUPP;
- if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
+ if (mem.nregions > max_mem_regions)
return -E2BIG;
- newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL);
+ newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions));
if (!newmem)
return -ENOMEM;
memcpy(newmem, &mem, size);
if (copy_from_user(newmem->regions, m->regions,
mem.nregions * sizeof *m->regions)) {
- kfree(newmem);
+ kvfree(newmem);
return -EFAULT;
}
+ sort(newmem->regions, newmem->nregions, sizeof(*newmem->regions),
+ vhost_memory_reg_sort_cmp, NULL);
if (!memory_access_ok(d, newmem, 0)) {
- kfree(newmem);
+ kvfree(newmem);
return -EFAULT;
}
oldmem = d->memory;
@@ -699,7 +729,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
d->vqs[i]->memory = newmem;
mutex_unlock(&d->vqs[i]->mutex);
}
- kfree(oldmem);
+ kvfree(oldmem);
return 0;
}
@@ -992,17 +1022,22 @@ EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
__u64 addr, __u32 len)
{
- struct vhost_memory_region *reg;
- int i;
+ const struct vhost_memory_region *reg;
+ int start = 0, end = mem->nregions;
- /* linear search is not brilliant, but we really have on the order of 6
- * regions in practice */
- for (i = 0; i < mem->nregions; ++i) {
- reg = mem->regions + i;
- if (reg->guest_phys_addr <= addr &&
- reg->guest_phys_addr + reg->memory_size - 1 >= addr)
- return reg;
+ while (start < end) {
+ int slot = start + (end - start) / 2;
+ reg = mem->regions + slot;
+ if (addr >= reg->guest_phys_addr)
+ end = slot;
+ else
+ start = slot + 1;
}
+
+ reg = mem->regions + start;
+ if (addr >= reg->guest_phys_addr &&
+ reg->guest_phys_addr + reg->memory_size > addr)
+ return reg;
return NULL;
}