summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-05-04 00:52:29 -0400
committerDavid S. Miller <davem@davemloft.net>2016-05-04 00:52:29 -0400
commitcba653210056cf47cc1969f831f05ddfb99ee2bd (patch)
tree92d93a3eee5b12d77af3696b9da8026e71df5752
parent26879da58711aa604a1b866cbeedd7e0f78f90ad (diff)
parent7391daf2ffc780679d6ab3fad1db2619e5dd2c2a (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: net/ipv4/ip_gre.c Minor conflicts between tunnel bug fixes in net and ipv6 tunnel cleanups in net-next. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--.mailmap4
-rw-r--r--Documentation/devicetree/bindings/arc/archs-pct.txt2
-rw-r--r--Documentation/devicetree/bindings/arc/pct.txt2
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-rk3x.txt4
-rw-r--r--Documentation/devicetree/bindings/net/cpsw.txt6
-rw-r--r--Documentation/networking/altera_tse.txt6
-rw-r--r--Documentation/networking/ipvlan.txt6
-rw-r--r--Documentation/networking/pktgen.txt6
-rw-r--r--Documentation/networking/vrf.txt2
-rw-r--r--Documentation/networking/xfrm_sync.txt6
-rw-r--r--Documentation/sysctl/vm.txt19
-rw-r--r--MAINTAINERS13
-rw-r--r--Makefile4
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arc/include/asm/irqflags-arcv2.h36
-rw-r--r--arch/arc/kernel/entry-arcv2.S10
-rw-r--r--arch/arc/kernel/entry-compact.S3
-rw-r--r--arch/arc/mm/init.c4
-rw-r--r--arch/nios2/lib/memset.c2
-rw-r--r--arch/powerpc/include/asm/systbl.h2
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h2
-rw-r--r--arch/s390/include/asm/mmu.h2
-rw-r--r--arch/s390/include/asm/mmu_context.h28
-rw-r--r--arch/s390/include/asm/pgalloc.h4
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/tlbflush.h9
-rw-r--r--arch/s390/mm/init.c3
-rw-r--r--arch/s390/mm/mmap.c6
-rw-r--r--arch/s390/mm/pgalloc.c85
-rw-r--r--arch/s390/pci/pci_dma.c16
-rw-r--r--arch/sparc/configs/sparc32_defconfig1
-rw-r--r--arch/sparc/configs/sparc64_defconfig1
-rw-r--r--arch/sparc/include/asm/spitfire.h1
-rw-r--r--arch/sparc/include/uapi/asm/unistd.h4
-rw-r--r--arch/sparc/kernel/cherrs.S14
-rw-r--r--arch/sparc/kernel/cpu.c6
-rw-r--r--arch/sparc/kernel/cpumap.c1
-rw-r--r--arch/sparc/kernel/fpu_traps.S11
-rw-r--r--arch/sparc/kernel/head_64.S32
-rw-r--r--arch/sparc/kernel/misctrap.S12
-rw-r--r--arch/sparc/kernel/pci.c42
-rw-r--r--arch/sparc/kernel/setup_64.c7
-rw-r--r--arch/sparc/kernel/spiterrs.S18
-rw-r--r--arch/sparc/kernel/systbls_32.S2
-rw-r--r--arch/sparc/kernel/systbls_64.S4
-rw-r--r--arch/sparc/kernel/utrap.S3
-rw-r--r--arch/sparc/kernel/vio.c18
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S4
-rw-r--r--arch/sparc/kernel/winfixup.S3
-rw-r--r--arch/sparc/mm/init_64.c3
-rw-r--r--arch/x86/events/amd/core.c2
-rw-r--r--arch/x86/events/intel/core.c1
-rw-r--r--arch/x86/events/intel/lbr.c6
-rw-r--r--arch/x86/events/intel/pt.c75
-rw-r--r--arch/x86/events/intel/pt.h3
-rw-r--r--arch/x86/events/intel/rapl.c1
-rw-r--r--arch/x86/include/asm/perf_event.h4
-rw-r--r--arch/x86/kernel/apic/vector.c3
-rw-r--r--arch/x86/kernel/head_32.S6
-rw-r--r--arch/x86/kvm/vmx.c4
-rw-r--r--arch/x86/mm/setup_nx.c5
-rw-r--r--arch/x86/xen/spinlock.c6
-rw-r--r--drivers/block/rbd.c52
-rw-r--r--drivers/clk/imx/clk-imx6q.c2
-rw-r--r--drivers/cpufreq/cpufreq_governor.c8
-rw-r--r--drivers/cpufreq/intel_pstate.c5
-rw-r--r--drivers/edac/i7core_edac.c2
-rw-r--r--drivers/edac/sb_edac.c2
-rw-r--r--drivers/firmware/efi/vars.c37
-rw-r--r--drivers/gpio/gpio-rcar.c65
-rw-r--r--drivers/gpio/gpiolib-acpi.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c5
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c20
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c31
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c154
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h46
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c17
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c6
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/wacom_wac.c6
-rw-r--r--drivers/i2c/busses/Kconfig4
-rw-r--r--drivers/i2c/busses/i2c-cpm.c4
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c24
-rw-r--r--drivers/i2c/busses/i2c-ismt.c2
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c1
-rw-r--r--drivers/infiniband/core/cache.c3
-rw-r--r--drivers/infiniband/core/ucm.c4
-rw-r--r--drivers/infiniband/core/ucma.c3
-rw-r--r--drivers/infiniband/core/uverbs_main.c5
-rw-r--r--drivers/infiniband/core/verbs.c3
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c24
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c5
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c4
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c7
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c20
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-v4l2.c20
-rw-r--r--drivers/misc/cxl/context.c7
-rw-r--r--drivers/misc/cxl/cxl.h2
-rw-r--r--drivers/misc/cxl/irq.c1
-rw-r--r--drivers/misc/cxl/native.c31
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/sdhci-acpi.c81
-rw-r--r--drivers/mmc/host/sunxi-mmc.c5
-rw-r--r--drivers/net/dsa/mv88e6xxx.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c53
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h13
-rw-r--r--drivers/net/ethernet/cadence/macb.c34
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c3
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c6
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.h11
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c15
-rw-r--r--drivers/net/ethernet/ti/cpsw.c67
-rw-r--r--drivers/net/ethernet/ti/cpsw.h1
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c5
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c2
-rw-r--r--drivers/net/phy/at803x.c40
-rw-r--r--drivers/net/usb/lan78xx.c44
-rw-r--r--drivers/net/usb/pegasus.c10
-rw-r--r--drivers/net/usb/smsc75xx.c12
-rw-r--r--drivers/net/usb/smsc95xx.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c10
-rw-r--r--drivers/platform/x86/toshiba_acpi.c2
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c4
-rw-r--r--drivers/s390/char/sclp_ctl.c12
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c54
-rw-r--r--drivers/staging/rdma/hfi1/TODO2
-rw-r--r--drivers/staging/rdma/hfi1/file_ops.c91
-rw-r--r--drivers/staging/rdma/hfi1/mmu_rb.c40
-rw-r--r--drivers/staging/rdma/hfi1/mmu_rb.h3
-rw-r--r--drivers/staging/rdma/hfi1/qp.c2
-rw-r--r--drivers/staging/rdma/hfi1/user_exp_rcv.c11
-rw-r--r--drivers/staging/rdma/hfi1/user_sdma.c33
-rw-r--r--drivers/thermal/hisi_thermal.c4
-rw-r--r--drivers/thermal/thermal_core.c2
-rw-r--r--fs/ceph/mds_client.c6
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c2
-rw-r--r--fs/proc/task_mmu.c33
-rw-r--r--fs/udf/super.c4
-rw-r--r--fs/udf/udfdecl.h2
-rw-r--r--fs/udf/unicode.c16
-rw-r--r--include/linux/bpf.h3
-rw-r--r--include/linux/ceph/auth.h10
-rw-r--r--include/linux/ceph/osd_client.h1
-rw-r--r--include/linux/cgroup-defs.h1
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--include/linux/hash.h20
-rw-r--r--include/linux/huge_mm.h5
-rw-r--r--include/linux/if_ether.h5
-rw-r--r--include/linux/lockdep.h8
-rw-r--r--include/linux/mlx5/device.h11
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/net.h10
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/media/videobuf2-core.h8
-rw-r--r--include/net/vxlan.h4
-rw-r--r--include/rdma/ib.h16
-rw-r--r--include/sound/hda_i915.h5
-rw-r--r--include/uapi/linux/v4l2-dv-timings.h30
-rw-r--r--kernel/bpf/inode.c7
-rw-r--r--kernel/bpf/syscall.c24
-rw-r--r--kernel/bpf/verifier.c76
-rw-r--r--kernel/cgroup.c7
-rw-r--r--kernel/cpuset.c4
-rw-r--r--kernel/events/core.c55
-rw-r--r--kernel/kcov.c3
-rw-r--r--kernel/kexec_core.c7
-rw-r--r--kernel/locking/lockdep.c37
-rw-r--r--kernel/locking/lockdep_proc.c2
-rw-r--r--kernel/workqueue.c29
-rw-r--r--lib/stackdepot.c4
-rw-r--r--mm/huge_memory.c12
-rw-r--r--mm/memcontrol.c37
-rw-r--r--mm/memory-failure.c10
-rw-r--r--mm/memory.c40
-rw-r--r--mm/migrate.c8
-rw-r--r--mm/page_io.c6
-rw-r--r--mm/swap.c5
-rw-r--r--mm/vmscan.c30
-rw-r--r--net/batman-adv/bat_v.c12
-rw-r--r--net/batman-adv/distributed-arp-table.c17
-rw-r--r--net/batman-adv/hard-interface.c6
-rw-r--r--net/batman-adv/originator.c17
-rw-r--r--net/batman-adv/routing.c9
-rw-r--r--net/batman-adv/send.c6
-rw-r--r--net/batman-adv/soft-interface.c8
-rw-r--r--net/batman-adv/translation-table.c42
-rw-r--r--net/batman-adv/types.h7
-rw-r--r--net/ceph/auth.c8
-rw-r--r--net/ceph/auth_none.c71
-rw-r--r--net/ceph/auth_none.h3
-rw-r--r--net/ceph/auth_x.c21
-rw-r--r--net/ceph/auth_x.h1
-rw-r--r--net/ceph/osd_client.c6
-rw-r--r--net/core/dev.c2
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/ip_gre.c19
-rw-r--r--net/ipv4/ip_tunnel.c4
-rw-r--r--net/ipv6/ila/ila_lwt.c3
-rw-r--r--net/l2tp/l2tp_core.c4
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/rds/tcp.c3
-rw-r--r--net/rds/tcp.h4
-rw-r--r--net/rds/tcp_connect.c8
-rw-r--r--net/rds/tcp_listen.c54
-rw-r--r--net/sched/sch_netem.c61
-rw-r--r--net/tipc/node.c5
-rw-r--r--samples/bpf/trace_output_kern.c1
-rw-r--r--sound/hda/ext/hdac_ext_stream.c5
-rw-r--r--sound/hda/hdac_i915.c62
-rw-r--r--sound/pci/hda/hda_intel.c56
-rw-r--r--sound/pci/hda/patch_hdmi.c1
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/soc/codecs/Kconfig1
-rw-r--r--sound/soc/codecs/arizona.c12
-rw-r--r--sound/soc/codecs/arizona.h2
-rw-r--r--sound/soc/codecs/cs35l32.c17
-rw-r--r--sound/soc/codecs/cs47l24.c3
-rw-r--r--sound/soc/codecs/hdac_hdmi.c94
-rw-r--r--sound/soc/codecs/nau8825.c126
-rw-r--r--sound/soc/codecs/rt5640.c2
-rw-r--r--sound/soc/codecs/rt5640.h36
-rw-r--r--sound/soc/codecs/wm5102.c5
-rw-r--r--sound/soc/codecs/wm5110.c2
-rw-r--r--sound/soc/codecs/wm8962.c2
-rw-r--r--sound/soc/codecs/wm8997.c2
-rw-r--r--sound/soc/codecs/wm8998.c2
-rw-r--r--sound/soc/intel/Kconfig1
-rw-r--r--sound/soc/intel/haswell/sst-haswell-ipc.c2
-rw-r--r--sound/soc/intel/skylake/skl-sst-dsp.c5
-rw-r--r--sound/soc/intel/skylake/skl-topology.c42
-rw-r--r--sound/soc/intel/skylake/skl-topology.h8
-rw-r--r--sound/soc/intel/skylake/skl.c32
-rw-r--r--sound/soc/soc-dapm.c7
263 files changed, 2446 insertions, 1230 deletions
diff --git a/.mailmap b/.mailmap
index 90c0aefc276d..c156a8b4d845 100644
--- a/.mailmap
+++ b/.mailmap
@@ -48,6 +48,9 @@ Felix Kuhling <fxkuehl@gmx.de>
Felix Moeller <felix@derklecks.de>
Filipe Lautert <filipe@icewall.org>
Franck Bui-Huu <vagabon.xyz@gmail.com>
+Frank Rowand <frowand.list@gmail.com> <frowand@mvista.com>
+Frank Rowand <frowand.list@gmail.com> <frank.rowand@am.sony.com>
+Frank Rowand <frowand.list@gmail.com> <frank.rowand@sonymobile.com>
Frank Zago <fzago@systemfabricworks.com>
Greg Kroah-Hartman <greg@echidna.(none)>
Greg Kroah-Hartman <gregkh@suse.de>
@@ -79,6 +82,7 @@ Kay Sievers <kay.sievers@vrfy.org>
Kenneth W Chen <kenneth.w.chen@intel.com>
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
Koushik <raghavendra.koushik@neterion.com>
+Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
Leonid I Ananiev <leonid.i.ananiev@intel.com>
Linas Vepstas <linas@austin.ibm.com>
diff --git a/Documentation/devicetree/bindings/arc/archs-pct.txt b/Documentation/devicetree/bindings/arc/archs-pct.txt
index 1ae98b87c640..e4b9dcee6d41 100644
--- a/Documentation/devicetree/bindings/arc/archs-pct.txt
+++ b/Documentation/devicetree/bindings/arc/archs-pct.txt
@@ -2,7 +2,7 @@
The ARC HS can be configured with a pipeline performance monitor for counting
CPU and cache events like cache misses and hits. Like conventional PCT there
-are 100+ hardware conditions dynamically mapped to upto 32 counters.
+are 100+ hardware conditions dynamically mapped to up to 32 counters.
It also supports overflow interrupts.
Required properties:
diff --git a/Documentation/devicetree/bindings/arc/pct.txt b/Documentation/devicetree/bindings/arc/pct.txt
index 7b9588444f20..4e874d9a38a6 100644
--- a/Documentation/devicetree/bindings/arc/pct.txt
+++ b/Documentation/devicetree/bindings/arc/pct.txt
@@ -2,7 +2,7 @@
The ARC700 can be configured with a pipeline performance monitor for counting
CPU and cache events like cache misses and hits. Like conventional PCT there
-are 100+ hardware conditions dynamically mapped to upto 32 counters
+are 100+ hardware conditions dynamically mapped to up to 32 counters
Note that:
* The ARC 700 PCT does not support interrupts; although HW events may be
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
index f0d71bc52e64..0b4a85fe2d86 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
@@ -6,8 +6,8 @@ RK3xxx SoCs.
Required properties :
- reg : Offset and length of the register set for the device
- - compatible : should be "rockchip,rk3066-i2c", "rockchip,rk3188-i2c" or
- "rockchip,rk3288-i2c".
+ - compatible : should be "rockchip,rk3066-i2c", "rockchip,rk3188-i2c",
+ "rockchip,rk3228-i2c" or "rockchip,rk3288-i2c".
- interrupts : interrupt number
- clocks : parent clock
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt
index 28a4781ab6d7..0ae06491b430 100644
--- a/Documentation/devicetree/bindings/net/cpsw.txt
+++ b/Documentation/devicetree/bindings/net/cpsw.txt
@@ -45,13 +45,13 @@ Required properties:
Optional properties:
- dual_emac_res_vlan : Specifies VID to be used to segregate the ports
- mac-address : See ethernet.txt file in the same directory
-- phy_id : Specifies slave phy id
+- phy_id : Specifies slave phy id (deprecated, use phy-handle)
- phy-handle : See ethernet.txt file in the same directory
Slave sub-nodes:
- fixed-link : See fixed-link.txt file in the same directory
- Either the property phy_id, or the sub-node
- fixed-link can be specified
+
+Note: Exactly one of phy_id, phy-handle, or fixed-link must be specified.
Note: "ti,hwmods" field is used to fetch the base address and irq
resources from TI, omap hwmod data base during device registration.
diff --git a/Documentation/networking/altera_tse.txt b/Documentation/networking/altera_tse.txt
index 3f24df8c6e65..50b8589d12fd 100644
--- a/Documentation/networking/altera_tse.txt
+++ b/Documentation/networking/altera_tse.txt
@@ -6,7 +6,7 @@ This is the driver for the Altera Triple-Speed Ethernet (TSE) controllers
using the SGDMA and MSGDMA soft DMA IP components. The driver uses the
platform bus to obtain component resources. The designs used to test this
driver were built for a Cyclone(R) V SOC FPGA board, a Cyclone(R) V FPGA board,
-and tested with ARM and NIOS processor hosts seperately. The anticipated use
+and tested with ARM and NIOS processor hosts separately. The anticipated use
cases are simple communications between an embedded system and an external peer
for status and simple configuration of the embedded system.
@@ -65,14 +65,14 @@ Driver parameters can be also passed in command line by using:
4.1) Transmit process
When the driver's transmit routine is called by the kernel, it sets up a
transmit descriptor by calling the underlying DMA transmit routine (SGDMA or
-MSGDMA), and initites a transmit operation. Once the transmit is complete, an
+MSGDMA), and initiates a transmit operation. Once the transmit is complete, an
interrupt is driven by the transmit DMA logic. The driver handles the transmit
completion in the context of the interrupt handling chain by recycling
resource required to send and track the requested transmit operation.
4.2) Receive process
The driver will post receive buffers to the receive DMA logic during driver
-intialization. Receive buffers may or may not be queued depending upon the
+initialization. Receive buffers may or may not be queued depending upon the
underlying DMA logic (MSGDMA is able queue receive buffers, SGDMA is not able
to queue receive buffers to the SGDMA receive logic). When a packet is
received, the DMA logic generates an interrupt. The driver handles a receive
diff --git a/Documentation/networking/ipvlan.txt b/Documentation/networking/ipvlan.txt
index cf996394e466..14422f8fcdc4 100644
--- a/Documentation/networking/ipvlan.txt
+++ b/Documentation/networking/ipvlan.txt
@@ -8,7 +8,7 @@ Initial Release:
This is conceptually very similar to the macvlan driver with one major
exception of using L3 for mux-ing /demux-ing among slaves. This property makes
the master device share the L2 with it's slave devices. I have developed this
-driver in conjuntion with network namespaces and not sure if there is use case
+driver in conjunction with network namespaces and not sure if there is use case
outside of it.
@@ -42,7 +42,7 @@ out. In this mode the slaves will RX/TX multicast and broadcast (if applicable)
as well.
4.2 L3 mode:
- In this mode TX processing upto L3 happens on the stack instance attached
+ In this mode TX processing up to L3 happens on the stack instance attached
to the slave device and packets are switched to the stack instance of the
master device for the L2 processing and routing from that instance will be
used before packets are queued on the outbound device. In this mode the slaves
@@ -56,7 +56,7 @@ situations defines your use case then you can choose to use ipvlan -
(a) The Linux host that is connected to the external switch / router has
policy configured that allows only one mac per port.
(b) No of virtual devices created on a master exceed the mac capacity and
-puts the NIC in promiscous mode and degraded performance is a concern.
+puts the NIC in promiscuous mode and degraded performance is a concern.
(c) If the slave device is to be put into the hostile / untrusted network
namespace where L2 on the slave could be changed / misused.
diff --git a/Documentation/networking/pktgen.txt b/Documentation/networking/pktgen.txt
index f4be85e96005..2c4e3354e128 100644
--- a/Documentation/networking/pktgen.txt
+++ b/Documentation/networking/pktgen.txt
@@ -67,12 +67,12 @@ The two basic thread commands are:
* add_device DEVICE@NAME -- adds a single device
* rem_device_all -- remove all associated devices
-When adding a device to a thread, a corrosponding procfile is created
+When adding a device to a thread, a corresponding procfile is created
which is used for configuring this device. Thus, device names need to
be unique.
To support adding the same device to multiple threads, which is useful
-with multi queue NICs, a the device naming scheme is extended with "@":
+with multi queue NICs, the device naming scheme is extended with "@":
device@something
The part after "@" can be anything, but it is custom to use the thread
@@ -221,7 +221,7 @@ Sample scripts
A collection of tutorial scripts and helpers for pktgen is in the
samples/pktgen directory. The helper parameters.sh file support easy
-and consistant parameter parsing across the sample scripts.
+and consistent parameter parsing across the sample scripts.
Usage example and help:
./pktgen_sample01_simple.sh -i eth4 -m 00:1B:21:3C:9D:F8 -d 192.168.8.2
diff --git a/Documentation/networking/vrf.txt b/Documentation/networking/vrf.txt
index d52aa10cfe91..5da679c573d2 100644
--- a/Documentation/networking/vrf.txt
+++ b/Documentation/networking/vrf.txt
@@ -41,7 +41,7 @@ using an rx_handler which gives the impression that packets flow through
the VRF device. Similarly on egress routing rules are used to send packets
to the VRF device driver before getting sent out the actual interface. This
allows tcpdump on a VRF device to capture all packets into and out of the
-VRF as a whole.[1] Similiarly, netfilter [2] and tc rules can be applied
+VRF as a whole.[1] Similarly, netfilter [2] and tc rules can be applied
using the VRF device to specify rules that apply to the VRF domain as a whole.
[1] Packets in the forwarded state do not flow through the device, so those
diff --git a/Documentation/networking/xfrm_sync.txt b/Documentation/networking/xfrm_sync.txt
index d7aac9dedeb4..8d88e0f2ec49 100644
--- a/Documentation/networking/xfrm_sync.txt
+++ b/Documentation/networking/xfrm_sync.txt
@@ -4,7 +4,7 @@ Krisztian <hidden@balabit.hu> and others and additional patches
from Jamal <hadi@cyberus.ca>.
The end goal for syncing is to be able to insert attributes + generate
-events so that the an SA can be safely moved from one machine to another
+events so that the SA can be safely moved from one machine to another
for HA purposes.
The idea is to synchronize the SA so that the takeover machine can do
the processing of the SA as accurate as possible if it has access to it.
@@ -13,7 +13,7 @@ We already have the ability to generate SA add/del/upd events.
These patches add ability to sync and have accurate lifetime byte (to
ensure proper decay of SAs) and replay counters to avoid replay attacks
with as minimal loss at failover time.
-This way a backup stays as closely uptodate as an active member.
+This way a backup stays as closely up-to-date as an active member.
Because the above items change for every packet the SA receives,
it is possible for a lot of the events to be generated.
@@ -163,7 +163,7 @@ If you have an SA that is getting hit by traffic in bursts such that
there is a period where the timer threshold expires with no packets
seen, then an odd behavior is seen as follows:
The first packet arrival after a timer expiry will trigger a timeout
-aevent; i.e we dont wait for a timeout period or a packet threshold
+event; i.e we don't wait for a timeout period or a packet threshold
to be reached. This is done for simplicity and efficiency reasons.
-JHS
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index cb0368459da3..34a5fece3121 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -581,15 +581,16 @@ Specify "[Nn]ode" for node order
"Zone Order" orders the zonelists by zone type, then by node within each
zone. Specify "[Zz]one" for zone order.
-Specify "[Dd]efault" to request automatic configuration. Autoconfiguration
-will select "node" order in following case.
-(1) if the DMA zone does not exist or
-(2) if the DMA zone comprises greater than 50% of the available memory or
-(3) if any node's DMA zone comprises greater than 70% of its local memory and
- the amount of local memory is big enough.
-
-Otherwise, "zone" order will be selected. Default order is recommended unless
-this is causing problems for your system/application.
+Specify "[Dd]efault" to request automatic configuration.
+
+On 32-bit, the Normal zone needs to be preserved for allocations accessible
+by the kernel, so "zone" order will be selected.
+
+On 64-bit, devices that require DMA32/DMA are relatively rare, so "node"
+order will be selected.
+
+Default order is recommended unless this is causing problems for your
+system/application.
==============================================================
diff --git a/MAINTAINERS b/MAINTAINERS
index ab008013cfec..867d6be32cc8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4745,7 +4745,7 @@ F: drivers/platform/x86/fujitsu-tablet.c
FUSE: FILESYSTEM IN USERSPACE
M: Miklos Szeredi <miklos@szeredi.hu>
-L: fuse-devel@lists.sourceforge.net
+L: linux-fsdevel@vger.kernel.org
W: http://fuse.sourceforge.net/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse.git
S: Maintained
@@ -4904,7 +4904,7 @@ F: net/ipv4/gre_offload.c
F: include/net/gre.h
GRETH 10/100/1G Ethernet MAC device driver
-M: Kristoffer Glembo <kristoffer@gaisler.com>
+M: Andreas Larsson <andreas@gaisler.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/aeroflex/
@@ -6028,7 +6028,7 @@ F: include/scsi/*iscsi*
ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
M: Or Gerlitz <ogerlitz@mellanox.com>
-M: Sagi Grimberg <sagig@mellanox.com>
+M: Sagi Grimberg <sagi@grimberg.me>
M: Roi Dayan <roid@mellanox.com>
L: linux-rdma@vger.kernel.org
S: Supported
@@ -6038,7 +6038,7 @@ Q: http://patchwork.kernel.org/project/linux-rdma/list/
F: drivers/infiniband/ulp/iser/
ISCSI EXTENSIONS FOR RDMA (ISER) TARGET
-M: Sagi Grimberg <sagig@mellanox.com>
+M: Sagi Grimberg <sagi@grimberg.me>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
L: linux-rdma@vger.kernel.org
L: target-devel@vger.kernel.org
@@ -6401,7 +6401,7 @@ F: mm/kmemleak.c
F: mm/kmemleak-test.c
KPROBES
-M: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
+M: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
M: "David S. Miller" <davem@davemloft.net>
M: Masami Hiramatsu <mhiramat@kernel.org>
@@ -10015,7 +10015,8 @@ F: drivers/infiniband/hw/ocrdma/
SFC NETWORK DRIVER
M: Solarflare linux maintainers <linux-net-drivers@solarflare.com>
-M: Shradha Shah <sshah@solarflare.com>
+M: Edward Cree <ecree@solarflare.com>
+M: Bert Kenward <bkenward@solarflare.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/sfc/
diff --git a/Makefile b/Makefile
index 9496df81b9a8..7466de60ddc7 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
VERSION = 4
PATCHLEVEL = 6
SUBLEVEL = 0
-EXTRAVERSION = -rc5
-NAME = Blurry Fish Butt
+EXTRAVERSION = -rc6
+NAME = Charred Weasel
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 12d0284a46e5..ec4791ea6911 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -35,8 +35,10 @@ config ARC
select NO_BOOTMEM
select OF
select OF_EARLY_FLATTREE
+ select OF_RESERVED_MEM
select PERF_USE_VMALLOC
select HAVE_DEBUG_STACKOVERFLOW
+ select HAVE_GENERIC_DMA_COHERENT
config MIGHT_HAVE_PCI
bool
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
index 37c2f751eebf..d1ec7f6b31e0 100644
--- a/arch/arc/include/asm/irqflags-arcv2.h
+++ b/arch/arc/include/asm/irqflags-arcv2.h
@@ -18,6 +18,12 @@
#define STATUS_AD_MASK (1<<STATUS_AD_BIT)
#define STATUS_IE_MASK (1<<STATUS_IE_BIT)
+/* status32 Bits as encoded/expected by CLRI/SETI */
+#define CLRI_STATUS_IE_BIT 4
+
+#define CLRI_STATUS_E_MASK 0xF
+#define CLRI_STATUS_IE_MASK (1 << CLRI_STATUS_IE_BIT)
+
#define AUX_USER_SP 0x00D
#define AUX_IRQ_CTRL 0x00E
#define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */
@@ -100,6 +106,13 @@ static inline long arch_local_save_flags(void)
:
: "memory");
+ /* To be compatible with irq_save()/irq_restore()
+ * encode the irq bits as expected by CLRI/SETI
+ * (this was needed to make CONFIG_TRACE_IRQFLAGS work)
+ */
+ temp = (1 << 5) |
+ ((!!(temp & STATUS_IE_MASK)) << CLRI_STATUS_IE_BIT) |
+ (temp & CLRI_STATUS_E_MASK);
return temp;
}
@@ -108,7 +121,7 @@ static inline long arch_local_save_flags(void)
*/
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
- return !(flags & (STATUS_IE_MASK));
+ return !(flags & CLRI_STATUS_IE_MASK);
}
static inline int arch_irqs_disabled(void)
@@ -128,11 +141,32 @@ static inline void arc_softirq_clear(int irq)
#else
+#ifdef CONFIG_TRACE_IRQFLAGS
+
+.macro TRACE_ASM_IRQ_DISABLE
+ bl trace_hardirqs_off
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+ bl trace_hardirqs_on
+.endm
+
+#else
+
+.macro TRACE_ASM_IRQ_DISABLE
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+.endm
+
+#endif
.macro IRQ_DISABLE scratch
clri
+ TRACE_ASM_IRQ_DISABLE
.endm
.macro IRQ_ENABLE scratch
+ TRACE_ASM_IRQ_ENABLE
seti
.endm
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index c1264607bbff..7a1c124ff021 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -69,8 +69,11 @@ ENTRY(handle_interrupt)
clri ; To make status32.IE agree with CPU internal state
- lr r0, [ICAUSE]
+#ifdef CONFIG_TRACE_IRQFLAGS
+ TRACE_ASM_IRQ_DISABLE
+#endif
+ lr r0, [ICAUSE]
mov blink, ret_from_exception
b.d arch_do_IRQ
@@ -169,6 +172,11 @@ END(EV_TLBProtV)
.Lrestore_regs:
+ # Interrpts are actually disabled from this point on, but will get
+ # reenabled after we return from interrupt/exception.
+ # But irq tracer needs to be told now...
+ TRACE_ASM_IRQ_ENABLE
+
ld r0, [sp, PT_status32] ; U/K mode at time of entry
lr r10, [AUX_IRQ_ACT]
diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S
index 431433929189..0cb0abaa0479 100644
--- a/arch/arc/kernel/entry-compact.S
+++ b/arch/arc/kernel/entry-compact.S
@@ -341,6 +341,9 @@ END(call_do_page_fault)
.Lrestore_regs:
+ # Interrpts are actually disabled from this point on, but will get
+ # reenabled after we return from interrupt/exception.
+ # But irq tracer needs to be told now...
TRACE_ASM_IRQ_ENABLE
lr r10, [status32]
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 7d2c4fbf4f22..5487d0b97400 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -13,6 +13,7 @@
#ifdef CONFIG_BLK_DEV_INITRD
#include <linux/initrd.h>
#endif
+#include <linux/of_fdt.h>
#include <linux/swap.h>
#include <linux/module.h>
#include <linux/highmem.h>
@@ -136,6 +137,9 @@ void __init setup_arch_memory(void)
memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
#endif
+ early_init_fdt_reserve_self();
+ early_init_fdt_scan_reserved_mem();
+
memblock_dump_all();
/*----------------- node/zones setup --------------------------*/
diff --git a/arch/nios2/lib/memset.c b/arch/nios2/lib/memset.c
index c2cfcb121e34..2fcefe720283 100644
--- a/arch/nios2/lib/memset.c
+++ b/arch/nios2/lib/memset.c
@@ -68,7 +68,7 @@ void *memset(void *s, int c, size_t count)
"=r" (charcnt), /* %1 Output */
"=r" (dwordcnt), /* %2 Output */
"=r" (fill8reg), /* %3 Output */
- "=r" (wrkrega) /* %4 Output */
+ "=&r" (wrkrega) /* %4 Output only */
: "r" (c), /* %5 Input */
"0" (s), /* %0 Input/Output */
"1" (count) /* %1 Input/Output */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 3fa9df70aa20..2fc5d4db503c 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -384,3 +384,5 @@ SYSCALL(ni_syscall)
SYSCALL(ni_syscall)
SYSCALL(mlock2)
SYSCALL(copy_file_range)
+COMPAT_SYS_SPU(preadv2)
+COMPAT_SYS_SPU(pwritev2)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 1f2594d45605..cf12c580f6b2 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
-#define NR_syscalls 380
+#define NR_syscalls 382
#define __NR__exit __NR_exit
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 940290d45b08..e9f5f41aa55a 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -390,5 +390,7 @@
#define __NR_membarrier 365
#define __NR_mlock2 378
#define __NR_copy_file_range 379
+#define __NR_preadv2 380
+#define __NR_pwritev2 381
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index d29ad9545b41..081b2ad99d73 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -11,7 +11,7 @@ typedef struct {
spinlock_t list_lock;
struct list_head pgtable_list;
struct list_head gmap_list;
- unsigned long asce_bits;
+ unsigned long asce;
unsigned long asce_limit;
unsigned long vdso_base;
/* The mmu context allocates 4K page tables. */
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index d321469eeda7..c837b79b455d 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -26,12 +26,28 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.has_pgste = 0;
mm->context.use_skey = 0;
#endif
- if (mm->context.asce_limit == 0) {
+ switch (mm->context.asce_limit) {
+ case 1UL << 42:
+ /*
+ * forked 3-level task, fall through to set new asce with new
+ * mm->pgd
+ */
+ case 0:
/* context created by exec, set asce limit to 4TB */
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
mm->context.asce_limit = STACK_TOP_MAX;
- } else if (mm->context.asce_limit == (1UL << 31)) {
+ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
+ break;
+ case 1UL << 53:
+ /* forked 4-level task, set new asce with new mm->pgd */
+ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
+ break;
+ case 1UL << 31:
+ /* forked 2-level compat task, set new asce with new mm->pgd */
+ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
+ /* pgd_alloc() did not increase mm->nr_pmds */
mm_inc_nr_pmds(mm);
}
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
@@ -42,7 +58,7 @@ static inline int init_new_context(struct task_struct *tsk,
static inline void set_user_asce(struct mm_struct *mm)
{
- S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd);
+ S390_lowcore.user_asce = mm->context.asce;
if (current->thread.mm_segment.ar4)
__ctl_load(S390_lowcore.user_asce, 7, 7);
set_cpu_flag(CIF_ASCE);
@@ -71,7 +87,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
{
int cpu = smp_processor_id();
- S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
+ S390_lowcore.user_asce = next->context.asce;
if (prev == next)
return;
if (MACHINE_HAS_TLB_LC)
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 9b3d9b6099f2..da34cb6b1f3b 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -52,8 +52,8 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
return _REGION2_ENTRY_EMPTY;
}
-int crst_table_upgrade(struct mm_struct *, unsigned long limit);
-void crst_table_downgrade(struct mm_struct *, unsigned long limit);
+int crst_table_upgrade(struct mm_struct *);
+void crst_table_downgrade(struct mm_struct *);
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
{
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index d6fd22ea270d..18cdede1aeda 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -175,7 +175,7 @@ extern __vector128 init_task_fpu_regs[__NUM_VXRS];
regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \
regs->psw.addr = new_psw; \
regs->gprs[15] = new_stackp; \
- crst_table_downgrade(current->mm, 1UL << 31); \
+ crst_table_downgrade(current->mm); \
execve_tail(); \
} while (0)
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index ca148f7c3eaa..a2e6ef32e054 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -110,8 +110,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
static inline void __tlb_flush_kernel(void)
{
if (MACHINE_HAS_IDTE)
- __tlb_flush_idte((unsigned long) init_mm.pgd |
- init_mm.context.asce_bits);
+ __tlb_flush_idte(init_mm.context.asce);
else
__tlb_flush_global();
}
@@ -133,8 +132,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
static inline void __tlb_flush_kernel(void)
{
if (MACHINE_HAS_TLB_LC)
- __tlb_flush_idte_local((unsigned long) init_mm.pgd |
- init_mm.context.asce_bits);
+ __tlb_flush_idte_local(init_mm.context.asce);
else
__tlb_flush_local();
}
@@ -148,8 +146,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
* only ran on the local cpu.
*/
if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
- __tlb_flush_asce(mm, (unsigned long) mm->pgd |
- mm->context.asce_bits);
+ __tlb_flush_asce(mm, mm->context.asce);
else
__tlb_flush_full(mm);
}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index c7b0451397d6..2489b2e917c8 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -89,7 +89,8 @@ void __init paging_init(void)
asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
pgd_type = _REGION3_ENTRY_EMPTY;
}
- S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
+ init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
+ S390_lowcore.kernel_asce = init_mm.context.asce;
clear_table((unsigned long *) init_mm.pgd, pgd_type,
sizeof(unsigned long)*2048);
vmem_map_init();
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 45c4daa49930..89cf09e5f168 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -174,7 +174,7 @@ int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
if (!(flags & MAP_FIXED))
addr = 0;
if ((addr + len) >= TASK_SIZE)
- return crst_table_upgrade(current->mm, TASK_MAX_SIZE);
+ return crst_table_upgrade(current->mm);
return 0;
}
@@ -191,7 +191,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
return area;
if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
/* Upgrade the page table to 4 levels and retry. */
- rc = crst_table_upgrade(mm, TASK_MAX_SIZE);
+ rc = crst_table_upgrade(mm);
if (rc)
return (unsigned long) rc;
area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
@@ -213,7 +213,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
return area;
if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
/* Upgrade the page table to 4 levels and retry. */
- rc = crst_table_upgrade(mm, TASK_MAX_SIZE);
+ rc = crst_table_upgrade(mm);
if (rc)
return (unsigned long) rc;
area = arch_get_unmapped_area_topdown(filp, addr, len,
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index f6c3de26cda8..e8b5962ac12a 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -76,81 +76,52 @@ static void __crst_table_upgrade(void *arg)
__tlb_flush_local();
}
-int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
+int crst_table_upgrade(struct mm_struct *mm)
{
unsigned long *table, *pgd;
- unsigned long entry;
- int flush;
- BUG_ON(limit > TASK_MAX_SIZE);
- flush = 0;
-repeat:
+ /* upgrade should only happen from 3 to 4 levels */
+ BUG_ON(mm->context.asce_limit != (1UL << 42));
+
table = crst_table_alloc(mm);
if (!table)
return -ENOMEM;
+
spin_lock_bh(&mm->page_table_lock);
- if (mm->context.asce_limit < limit) {
- pgd = (unsigned long *) mm->pgd;
- if (mm->context.asce_limit <= (1UL << 31)) {
- entry = _REGION3_ENTRY_EMPTY;
- mm->context.asce_limit = 1UL << 42;
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS |
- _ASCE_TYPE_REGION3;
- } else {
- entry = _REGION2_ENTRY_EMPTY;
- mm->context.asce_limit = 1UL << 53;
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS |
- _ASCE_TYPE_REGION2;
- }
- crst_table_init(table, entry);
- pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
- mm->pgd = (pgd_t *) table;
- mm->task_size = mm->context.asce_limit;
- table = NULL;
- flush = 1;
- }
+ pgd = (unsigned long *) mm->pgd;
+ crst_table_init(table, _REGION2_ENTRY_EMPTY);
+ pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
+ mm->pgd = (pgd_t *) table;
+ mm->context.asce_limit = 1UL << 53;
+ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
+ mm->task_size = mm->context.asce_limit;
spin_unlock_bh(&mm->page_table_lock);
- if (table)
- crst_table_free(mm, table);
- if (mm->context.asce_limit < limit)
- goto repeat;
- if (flush)
- on_each_cpu(__crst_table_upgrade, mm, 0);
+
+ on_each_cpu(__crst_table_upgrade, mm, 0);
return 0;
}
-void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
+void crst_table_downgrade(struct mm_struct *mm)
{
pgd_t *pgd;
+ /* downgrade should only happen from 3 to 2 levels (compat only) */
+ BUG_ON(mm->context.asce_limit != (1UL << 42));
+
if (current->active_mm == mm) {
clear_user_asce();
__tlb_flush_mm(mm);
}
- while (mm->context.asce_limit > limit) {
- pgd = mm->pgd;
- switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
- case _REGION_ENTRY_TYPE_R2:
- mm->context.asce_limit = 1UL << 42;
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS |
- _ASCE_TYPE_REGION3;
- break;
- case _REGION_ENTRY_TYPE_R3:
- mm->context.asce_limit = 1UL << 31;
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS |
- _ASCE_TYPE_SEGMENT;
- break;
- default:
- BUG();
- }
- mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
- mm->task_size = mm->context.asce_limit;
- crst_table_free(mm, (unsigned long *) pgd);
- }
+
+ pgd = mm->pgd;
+ mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
+ mm->context.asce_limit = 1UL << 31;
+ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
+ mm->task_size = mm->context.asce_limit;
+ crst_table_free(mm, (unsigned long *) pgd);
+
if (current->active_mm == mm)
set_user_asce(mm);
}
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index e595e89eac65..1ea8c07eab84 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -457,7 +457,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
zdev->dma_table = dma_alloc_cpu_table();
if (!zdev->dma_table) {
rc = -ENOMEM;
- goto out_clean;
+ goto out;
}
/*
@@ -477,18 +477,22 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
if (!zdev->iommu_bitmap) {
rc = -ENOMEM;
- goto out_reg;
+ goto free_dma_table;
}
rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
(u64) zdev->dma_table);
if (rc)
- goto out_reg;
- return 0;
+ goto free_bitmap;
-out_reg:
+ return 0;
+free_bitmap:
+ vfree(zdev->iommu_bitmap);
+ zdev->iommu_bitmap = NULL;
+free_dma_table:
dma_free_cpu_table(zdev->dma_table);
-out_clean:
+ zdev->dma_table = NULL;
+out:
return rc;
}
diff --git a/arch/sparc/configs/sparc32_defconfig b/arch/sparc/configs/sparc32_defconfig
index fb23fd6b186a..c74d3701ad68 100644
--- a/arch/sparc/configs/sparc32_defconfig
+++ b/arch/sparc/configs/sparc32_defconfig
@@ -24,7 +24,6 @@ CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
# CONFIG_INET_LRO is not set
-CONFIG_IPV6_PRIVACY=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
index 04920ab8e292..3583d676a916 100644
--- a/arch/sparc/configs/sparc64_defconfig
+++ b/arch/sparc/configs/sparc64_defconfig
@@ -48,7 +48,6 @@ CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
-CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
CONFIG_IPV6_OPTIMISTIC_DAD=y
diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h
index 56f933816144..1d8321c827a8 100644
--- a/arch/sparc/include/asm/spitfire.h
+++ b/arch/sparc/include/asm/spitfire.h
@@ -48,6 +48,7 @@
#define SUN4V_CHIP_SPARC_M6 0x06
#define SUN4V_CHIP_SPARC_M7 0x07
#define SUN4V_CHIP_SPARC64X 0x8a
+#define SUN4V_CHIP_SPARC_SN 0x8b
#define SUN4V_CHIP_UNKNOWN 0xff
#ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h
index b6de8b10a55b..36eee8132c22 100644
--- a/arch/sparc/include/uapi/asm/unistd.h
+++ b/arch/sparc/include/uapi/asm/unistd.h
@@ -423,8 +423,10 @@
#define __NR_setsockopt 355
#define __NR_mlock2 356
#define __NR_copy_file_range 357
+#define __NR_preadv2 358
+#define __NR_pwritev2 359
-#define NR_syscalls 358
+#define NR_syscalls 360
/* Bitmask values returned from kern_features system call. */
#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
diff --git a/arch/sparc/kernel/cherrs.S b/arch/sparc/kernel/cherrs.S
index 4ee1ad420862..655628def68e 100644
--- a/arch/sparc/kernel/cherrs.S
+++ b/arch/sparc/kernel/cherrs.S
@@ -214,8 +214,7 @@ do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
subcc %g1, %g2, %g1 ! Next cacheline
bge,pt %icc, 1b
nop
- ba,pt %xcc, dcpe_icpe_tl1_common
- nop
+ ba,a,pt %xcc, dcpe_icpe_tl1_common
do_dcpe_tl1_fatal:
sethi %hi(1f), %g7
@@ -224,8 +223,7 @@ do_dcpe_tl1_fatal:
mov 0x2, %o0
call cheetah_plus_parity_error
add %sp, PTREGS_OFF, %o1
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size do_dcpe_tl1,.-do_dcpe_tl1
.globl do_icpe_tl1
@@ -259,8 +257,7 @@ do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
subcc %g1, %g2, %g1
bge,pt %icc, 1b
nop
- ba,pt %xcc, dcpe_icpe_tl1_common
- nop
+ ba,a,pt %xcc, dcpe_icpe_tl1_common
do_icpe_tl1_fatal:
sethi %hi(1f), %g7
@@ -269,8 +266,7 @@ do_icpe_tl1_fatal:
mov 0x3, %o0
call cheetah_plus_parity_error
add %sp, PTREGS_OFF, %o1
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size do_icpe_tl1,.-do_icpe_tl1
.type dcpe_icpe_tl1_common,#function
@@ -456,7 +452,7 @@ __cheetah_log_error:
cmp %g2, 0x63
be c_cee
nop
- ba,pt %xcc, c_deferred
+ ba,a,pt %xcc, c_deferred
.size __cheetah_log_error,.-__cheetah_log_error
/* Cheetah FECC trap handling, we get here from tl{0,1}_fecc
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index dfad8b1aea9f..493e023a468a 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -506,6 +506,12 @@ static void __init sun4v_cpu_probe(void)
sparc_pmu_type = "sparc-m7";
break;
+ case SUN4V_CHIP_SPARC_SN:
+ sparc_cpu_type = "SPARC-SN";
+ sparc_fpu_type = "SPARC-SN integrated FPU";
+ sparc_pmu_type = "sparc-sn";
+ break;
+
case SUN4V_CHIP_SPARC64X:
sparc_cpu_type = "SPARC64-X";
sparc_fpu_type = "SPARC64-X integrated FPU";
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
index e69ec0e3f155..45c820e1cba5 100644
--- a/arch/sparc/kernel/cpumap.c
+++ b/arch/sparc/kernel/cpumap.c
@@ -328,6 +328,7 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
case SUN4V_CHIP_NIAGARA5:
case SUN4V_CHIP_SPARC_M6:
case SUN4V_CHIP_SPARC_M7:
+ case SUN4V_CHIP_SPARC_SN:
case SUN4V_CHIP_SPARC64X:
rover_inc_table = niagara_iterate_method;
break;
diff --git a/arch/sparc/kernel/fpu_traps.S b/arch/sparc/kernel/fpu_traps.S
index a6864826a4bd..336d2750fe78 100644
--- a/arch/sparc/kernel/fpu_traps.S
+++ b/arch/sparc/kernel/fpu_traps.S
@@ -100,8 +100,8 @@ do_fpdis:
fmuld %f0, %f2, %f26
faddd %f0, %f2, %f28
fmuld %f0, %f2, %f30
- b,pt %xcc, fpdis_exit
- nop
+ ba,a,pt %xcc, fpdis_exit
+
2: andcc %g5, FPRS_DU, %g0
bne,pt %icc, 3f
fzero %f32
@@ -144,8 +144,8 @@ do_fpdis:
fmuld %f32, %f34, %f58
faddd %f32, %f34, %f60
fmuld %f32, %f34, %f62
- ba,pt %xcc, fpdis_exit
- nop
+ ba,a,pt %xcc, fpdis_exit
+
3: mov SECONDARY_CONTEXT, %g3
add %g6, TI_FPREGS, %g1
@@ -197,8 +197,7 @@ fpdis_exit2:
fp_other_bounce:
call do_fpother
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size fp_other_bounce,.-fp_other_bounce
.align 32
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index cd1f592cd347..a076b4249e62 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -414,6 +414,8 @@ sun4v_chip_type:
cmp %g2, 'T'
be,pt %xcc, 70f
cmp %g2, 'M'
+ be,pt %xcc, 70f
+ cmp %g2, 'S'
bne,pn %xcc, 49f
nop
@@ -433,6 +435,9 @@ sun4v_chip_type:
cmp %g2, '7'
be,pt %xcc, 5f
mov SUN4V_CHIP_SPARC_M7, %g4
+ cmp %g2, 'N'
+ be,pt %xcc, 5f
+ mov SUN4V_CHIP_SPARC_SN, %g4
ba,pt %xcc, 49f
nop
@@ -461,9 +466,8 @@ sun4v_chip_type:
subcc %g3, 1, %g3
bne,pt %xcc, 41b
add %g1, 1, %g1
- mov SUN4V_CHIP_SPARC64X, %g4
ba,pt %xcc, 5f
- nop
+ mov SUN4V_CHIP_SPARC64X, %g4
49:
mov SUN4V_CHIP_UNKNOWN, %g4
@@ -548,8 +552,7 @@ sun4u_init:
stxa %g0, [%g7] ASI_DMMU
membar #Sync
- ba,pt %xcc, sun4u_continue
- nop
+ ba,a,pt %xcc, sun4u_continue
sun4v_init:
/* Set ctx 0 */
@@ -560,14 +563,12 @@ sun4v_init:
mov SECONDARY_CONTEXT, %g7
stxa %g0, [%g7] ASI_MMU
membar #Sync
- ba,pt %xcc, niagara_tlb_fixup
- nop
+ ba,a,pt %xcc, niagara_tlb_fixup
sun4u_continue:
BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup)
- ba,pt %xcc, spitfire_tlb_fixup
- nop
+ ba,a,pt %xcc, spitfire_tlb_fixup
niagara_tlb_fixup:
mov 3, %g2 /* Set TLB type to hypervisor. */
@@ -597,6 +598,9 @@ niagara_tlb_fixup:
cmp %g1, SUN4V_CHIP_SPARC_M7
be,pt %xcc, niagara4_patch
nop
+ cmp %g1, SUN4V_CHIP_SPARC_SN
+ be,pt %xcc, niagara4_patch
+ nop
call generic_patch_copyops
nop
@@ -639,8 +643,7 @@ niagara_patch:
call hypervisor_patch_cachetlbops
nop
- ba,pt %xcc, tlb_fixup_done
- nop
+ ba,a,pt %xcc, tlb_fixup_done
cheetah_tlb_fixup:
mov 2, %g2 /* Set TLB type to cheetah+. */
@@ -659,8 +662,7 @@ cheetah_tlb_fixup:
call cheetah_patch_cachetlbops
nop
- ba,pt %xcc, tlb_fixup_done
- nop
+ ba,a,pt %xcc, tlb_fixup_done
spitfire_tlb_fixup:
/* Set TLB type to spitfire. */
@@ -774,8 +776,7 @@ setup_trap_table:
call %o1
add %sp, (2047 + 128), %o0
- ba,pt %xcc, 2f
- nop
+ ba,a,pt %xcc, 2f
1: sethi %hi(sparc64_ttable_tl0), %o0
set prom_set_trap_table_name, %g2
@@ -814,8 +815,7 @@ setup_trap_table:
BRANCH_IF_ANY_CHEETAH(o2, o3, 1f)
- ba,pt %xcc, 2f
- nop
+ ba,a,pt %xcc, 2f
/* Disable STICK_INT interrupts. */
1:
diff --git a/arch/sparc/kernel/misctrap.S b/arch/sparc/kernel/misctrap.S
index 753b4f031bfb..34b4933900bf 100644
--- a/arch/sparc/kernel/misctrap.S
+++ b/arch/sparc/kernel/misctrap.S
@@ -18,8 +18,7 @@ __do_privact:
109: or %g7, %lo(109b), %g7
call do_privact
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size __do_privact,.-__do_privact
.type do_mna,#function
@@ -46,8 +45,7 @@ do_mna:
mov %l5, %o2
call mem_address_unaligned
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size do_mna,.-do_mna
.type do_lddfmna,#function
@@ -65,8 +63,7 @@ do_lddfmna:
mov %l5, %o2
call handle_lddfmna
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size do_lddfmna,.-do_lddfmna
.type do_stdfmna,#function
@@ -84,8 +81,7 @@ do_stdfmna:
mov %l5, %o2
call handle_stdfmna
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size do_stdfmna,.-do_stdfmna
.type breakpoint_trap,#function
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index badf0951d73c..c2b202d763a1 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -245,6 +245,18 @@ static void pci_parse_of_addrs(struct platform_device *op,
}
}
+static void pci_init_dev_archdata(struct dev_archdata *sd, void *iommu,
+ void *stc, void *host_controller,
+ struct platform_device *op,
+ int numa_node)
+{
+ sd->iommu = iommu;
+ sd->stc = stc;
+ sd->host_controller = host_controller;
+ sd->op = op;
+ sd->numa_node = numa_node;
+}
+
static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
struct device_node *node,
struct pci_bus *bus, int devfn)
@@ -259,13 +271,10 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
if (!dev)
return NULL;
+ op = of_find_device_by_node(node);
sd = &dev->dev.archdata;
- sd->iommu = pbm->iommu;
- sd->stc = &pbm->stc;
- sd->host_controller = pbm;
- sd->op = op = of_find_device_by_node(node);
- sd->numa_node = pbm->numa_node;
-
+ pci_init_dev_archdata(sd, pbm->iommu, &pbm->stc, pbm, op,
+ pbm->numa_node);
sd = &op->dev.archdata;
sd->iommu = pbm->iommu;
sd->stc = &pbm->stc;
@@ -994,6 +1003,27 @@ void pcibios_set_master(struct pci_dev *dev)
/* No special bus mastering setup handling */
}
+#ifdef CONFIG_PCI_IOV
+int pcibios_add_device(struct pci_dev *dev)
+{
+ struct pci_dev *pdev;
+
+ /* Add sriov arch specific initialization here.
+ * Copy dev_archdata from PF to VF
+ */
+ if (dev->is_virtfn) {
+ struct dev_archdata *psd;
+
+ pdev = dev->physfn;
+ psd = &pdev->dev.archdata;
+ pci_init_dev_archdata(&dev->dev.archdata, psd->iommu,
+ psd->stc, psd->host_controller, NULL,
+ psd->numa_node);
+ }
+ return 0;
+}
+#endif /* CONFIG_PCI_IOV */
+
static int __init pcibios_init(void)
{
pci_dfl_cache_line_size = 64 >> 2;
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 26db95b54ee9..599f1207eed2 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -285,7 +285,8 @@ static void __init sun4v_patch(void)
sun4v_patch_2insn_range(&__sun4v_2insn_patch,
&__sun4v_2insn_patch_end);
- if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7)
+ if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_SN)
sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
&__sun_m7_2insn_patch_end);
@@ -524,6 +525,7 @@ static void __init init_sparc64_elf_hwcap(void)
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= HWCAP_SPARC_BLKINIT;
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
@@ -532,6 +534,7 @@ static void __init init_sparc64_elf_hwcap(void)
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= HWCAP_SPARC_N2;
}
@@ -561,6 +564,7 @@ static void __init init_sparc64_elf_hwcap(void)
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
AV_SPARC_ASI_BLK_INIT |
@@ -570,6 +574,7 @@ static void __init init_sparc64_elf_hwcap(void)
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
AV_SPARC_FMAF);
diff --git a/arch/sparc/kernel/spiterrs.S b/arch/sparc/kernel/spiterrs.S
index c357e40ffd01..4a73009f66a5 100644
--- a/arch/sparc/kernel/spiterrs.S
+++ b/arch/sparc/kernel/spiterrs.S
@@ -85,8 +85,7 @@ __spitfire_cee_trap_continue:
ba,pt %xcc, etraptl1
rd %pc, %g7
- ba,pt %xcc, 2f
- nop
+ ba,a,pt %xcc, 2f
1: ba,pt %xcc, etrap_irq
rd %pc, %g7
@@ -100,8 +99,7 @@ __spitfire_cee_trap_continue:
mov %l5, %o2
call spitfire_access_error
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size __spitfire_access_error,.-__spitfire_access_error
/* This is the trap handler entry point for ECC correctable
@@ -179,8 +177,7 @@ __spitfire_data_access_exception_tl1:
mov %l5, %o2
call spitfire_data_access_exception_tl1
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size __spitfire_data_access_exception_tl1,.-__spitfire_data_access_exception_tl1
.type __spitfire_data_access_exception,#function
@@ -200,8 +197,7 @@ __spitfire_data_access_exception:
mov %l5, %o2
call spitfire_data_access_exception
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size __spitfire_data_access_exception,.-__spitfire_data_access_exception
.type __spitfire_insn_access_exception_tl1,#function
@@ -220,8 +216,7 @@ __spitfire_insn_access_exception_tl1:
mov %l5, %o2
call spitfire_insn_access_exception_tl1
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size __spitfire_insn_access_exception_tl1,.-__spitfire_insn_access_exception_tl1
.type __spitfire_insn_access_exception,#function
@@ -240,6 +235,5 @@ __spitfire_insn_access_exception:
mov %l5, %o2
call spitfire_insn_access_exception
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size __spitfire_insn_access_exception,.-__spitfire_insn_access_exception
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 6c3dd6c52f8b..eac7f0db5c8c 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -88,4 +88,4 @@ sys_call_table:
/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
-/*355*/ .long sys_setsockopt, sys_mlock2, sys_copy_file_range
+/*355*/ .long sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 12b524cfcfa0..b0f17ff2ddba 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -89,7 +89,7 @@ sys_call_table32:
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
.word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
- .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range
+ .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range, compat_sys_preadv2, compat_sys_pwritev2
#endif /* CONFIG_COMPAT */
@@ -170,4 +170,4 @@ sys_call_table:
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
.word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
- .word sys_setsockopt, sys_mlock2, sys_copy_file_range
+ .word sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
diff --git a/arch/sparc/kernel/utrap.S b/arch/sparc/kernel/utrap.S
index b7f0f3f3a909..c731e8023d3e 100644
--- a/arch/sparc/kernel/utrap.S
+++ b/arch/sparc/kernel/utrap.S
@@ -11,8 +11,7 @@ utrap_trap: /* %g3=handler,%g4=level */
mov %l4, %o1
call bad_trap
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
invoke_utrap:
sllx %g3, 3, %g3
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index cb5789c9f961..f6bb857254fc 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -45,6 +45,14 @@ static const struct vio_device_id *vio_match_device(
return NULL;
}
+static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
+{
+ const struct vio_dev *vio_dev = to_vio_dev(dev);
+
+ add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, vio_dev->compat);
+ return 0;
+}
+
static int vio_bus_match(struct device *dev, struct device_driver *drv)
{
struct vio_dev *vio_dev = to_vio_dev(dev);
@@ -105,15 +113,25 @@ static ssize_t type_show(struct device *dev,
return sprintf(buf, "%s\n", vdev->type);
}
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ const struct vio_dev *vdev = to_vio_dev(dev);
+
+ return sprintf(buf, "vio:T%sS%s\n", vdev->type, vdev->compat);
+}
+
static struct device_attribute vio_dev_attrs[] = {
__ATTR_RO(devspec),
__ATTR_RO(type),
+ __ATTR_RO(modalias),
__ATTR_NULL
};
static struct bus_type vio_bus_type = {
.name = "vio",
.dev_attrs = vio_dev_attrs,
+ .uevent = vio_hotplug,
.match = vio_bus_match,
.probe = vio_device_probe,
.remove = vio_device_remove,
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index aadd321aa05d..7d02b1fef025 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -33,6 +33,10 @@ ENTRY(_start)
jiffies = jiffies_64;
#endif
+#ifdef CONFIG_SPARC64
+ASSERT((swapper_tsb == 0x0000000000408000), "Error: sparc64 early assembler too large")
+#endif
+
SECTIONS
{
#ifdef CONFIG_SPARC64
diff --git a/arch/sparc/kernel/winfixup.S b/arch/sparc/kernel/winfixup.S
index 1e67ce958369..855019a8590e 100644
--- a/arch/sparc/kernel/winfixup.S
+++ b/arch/sparc/kernel/winfixup.S
@@ -32,8 +32,7 @@ fill_fixup:
rd %pc, %g7
call do_sparc64_fault
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
/* Be very careful about usage of the trap globals here.
* You cannot touch %g5 as that has the fault information.
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 1cfe6aab7a11..09e838801e39 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1769,6 +1769,7 @@ static void __init setup_page_offset(void)
max_phys_bits = 47;
break;
case SUN4V_CHIP_SPARC_M7:
+ case SUN4V_CHIP_SPARC_SN:
default:
/* M7 and later support 52-bit virtual addresses. */
sparc64_va_hole_top = 0xfff8000000000000UL;
@@ -1986,6 +1987,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
*/
switch (sun4v_chip_type) {
case SUN4V_CHIP_SPARC_M7:
+ case SUN4V_CHIP_SPARC_SN:
pagecv_flag = 0x00;
break;
default:
@@ -2138,6 +2140,7 @@ void __init paging_init(void)
*/
switch (sun4v_chip_type) {
case SUN4V_CHIP_SPARC_M7:
+ case SUN4V_CHIP_SPARC_SN:
page_cache4v_flag = _PAGE_CP_4V;
break;
default:
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index 86a9bec18dab..bd3e8421b57c 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -115,7 +115,7 @@ static __initconst const u64 amd_hw_cache_event_ids
/*
* AMD Performance Monitor K7 and later.
*/
-static const u64 amd_perfmon_event_map[] =
+static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
{
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 68fa55b4d42e..aff79884e17d 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3639,6 +3639,7 @@ __init int intel_pmu_init(void)
case 78: /* 14nm Skylake Mobile */
case 94: /* 14nm Skylake Desktop */
+ case 85: /* 14nm Skylake Server */
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 6c3b7c1780c9..1ca5d1e7d4f2 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -63,7 +63,7 @@ static enum {
#define LBR_PLM (LBR_KERNEL | LBR_USER)
-#define LBR_SEL_MASK 0x1ff /* valid bits in LBR_SELECT */
+#define LBR_SEL_MASK 0x3ff /* valid bits in LBR_SELECT */
#define LBR_NOT_SUPP -1 /* LBR filter not supported */
#define LBR_IGN 0 /* ignored */
@@ -610,8 +610,10 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
* The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
* in suppress mode. So LBR_SELECT should be set to
* (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
+ * But the 10th bit LBR_CALL_STACK does not operate
+ * in suppress mode.
*/
- reg->config = mask ^ x86_pmu.lbr_sel_mask;
+ reg->config = mask ^ (x86_pmu.lbr_sel_mask & ~LBR_CALL_STACK);
if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
(br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index 6af7cf71d6b2..09a77dbc73c9 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -136,9 +136,21 @@ static int __init pt_pmu_hw_init(void)
struct dev_ext_attribute *de_attrs;
struct attribute **attrs;
size_t size;
+ u64 reg;
int ret;
long i;
+ if (boot_cpu_has(X86_FEATURE_VMX)) {
+ /*
+ * Intel SDM, 36.5 "Tracing post-VMXON" says that
+ * "IA32_VMX_MISC[bit 14]" being 1 means PT can trace
+ * post-VMXON.
+ */
+ rdmsrl(MSR_IA32_VMX_MISC, reg);
+ if (reg & BIT(14))
+ pt_pmu.vmx = true;
+ }
+
attrs = NULL;
for (i = 0; i < PT_CPUID_LEAVES; i++) {
@@ -269,20 +281,23 @@ static void pt_config(struct perf_event *event)
reg |= (event->attr.config & PT_CONFIG_MASK);
+ event->hw.config = reg;
wrmsrl(MSR_IA32_RTIT_CTL, reg);
}
-static void pt_config_start(bool start)
+static void pt_config_stop(struct perf_event *event)
{
- u64 ctl;
+ u64 ctl = READ_ONCE(event->hw.config);
+
+ /* may be already stopped by a PMI */
+ if (!(ctl & RTIT_CTL_TRACEEN))
+ return;
- rdmsrl(MSR_IA32_RTIT_CTL, ctl);
- if (start)
- ctl |= RTIT_CTL_TRACEEN;
- else
- ctl &= ~RTIT_CTL_TRACEEN;
+ ctl &= ~RTIT_CTL_TRACEEN;
wrmsrl(MSR_IA32_RTIT_CTL, ctl);
+ WRITE_ONCE(event->hw.config, ctl);
+
/*
* A wrmsr that disables trace generation serializes other PT
* registers and causes all data packets to be written to memory,
@@ -291,8 +306,7 @@ static void pt_config_start(bool start)
* The below WMB, separating data store and aux_head store matches
* the consumer's RMB that separates aux_head load and data load.
*/
- if (!start)
- wmb();
+ wmb();
}
static void pt_config_buffer(void *buf, unsigned int topa_idx,
@@ -942,11 +956,17 @@ void intel_pt_interrupt(void)
if (!ACCESS_ONCE(pt->handle_nmi))
return;
- pt_config_start(false);
+ /*
+ * If VMX is on and PT does not support it, don't touch anything.
+ */
+ if (READ_ONCE(pt->vmx_on))
+ return;
if (!event)
return;
+ pt_config_stop(event);
+
buf = perf_get_aux(&pt->handle);
if (!buf)
return;
@@ -983,6 +1003,35 @@ void intel_pt_interrupt(void)
}
}
+void intel_pt_handle_vmx(int on)
+{
+ struct pt *pt = this_cpu_ptr(&pt_ctx);
+ struct perf_event *event;
+ unsigned long flags;
+
+ /* PT plays nice with VMX, do nothing */
+ if (pt_pmu.vmx)
+ return;
+
+ /*
+ * VMXON will clear RTIT_CTL.TraceEn; we need to make
+ * sure to not try to set it while VMX is on. Disable
+ * interrupts to avoid racing with pmu callbacks;
+ * concurrent PMI should be handled fine.
+ */
+ local_irq_save(flags);
+ WRITE_ONCE(pt->vmx_on, on);
+
+ if (on) {
+ /* prevent pt_config_stop() from writing RTIT_CTL */
+ event = pt->handle.event;
+ if (event)
+ event->hw.config = 0;
+ }
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(intel_pt_handle_vmx);
+
/*
* PMU callbacks
*/
@@ -992,6 +1041,9 @@ static void pt_event_start(struct perf_event *event, int mode)
struct pt *pt = this_cpu_ptr(&pt_ctx);
struct pt_buffer *buf = perf_get_aux(&pt->handle);
+ if (READ_ONCE(pt->vmx_on))
+ return;
+
if (!buf || pt_buffer_is_full(buf, pt)) {
event->hw.state = PERF_HES_STOPPED;
return;
@@ -1014,7 +1066,8 @@ static void pt_event_stop(struct perf_event *event, int mode)
* see comment in intel_pt_interrupt().
*/
ACCESS_ONCE(pt->handle_nmi) = 0;
- pt_config_start(false);
+
+ pt_config_stop(event);
if (event->hw.state == PERF_HES_STOPPED)
return;
diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h
index 336878a5d205..3abb5f5cccc8 100644
--- a/arch/x86/events/intel/pt.h
+++ b/arch/x86/events/intel/pt.h
@@ -65,6 +65,7 @@ enum pt_capabilities {
struct pt_pmu {
struct pmu pmu;
u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
+ bool vmx;
};
/**
@@ -107,10 +108,12 @@ struct pt_buffer {
* struct pt - per-cpu pt context
* @handle: perf output handle
* @handle_nmi: do handle PT PMI on this cpu, there's an active event
+ * @vmx_on: 1 if VMX is ON on this cpu
*/
struct pt {
struct perf_output_handle handle;
int handle_nmi;
+ int vmx_on;
};
#endif /* __INTEL_PT_H__ */
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 70c93f9b03ac..1705c9d75e44 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -718,6 +718,7 @@ static int __init rapl_pmu_init(void)
break;
case 60: /* Haswell */
case 69: /* Haswell-Celeron */
+ case 70: /* Haswell GT3e */
case 61: /* Broadwell */
case 71: /* Broadwell-H */
rapl_cntr_mask = RAPL_IDX_HSW;
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 5a2ed3ed2f26..f353061bba1d 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -285,6 +285,10 @@ static inline void perf_events_lapic_init(void) { }
static inline void perf_check_microcode(void) { }
#endif
+#ifdef CONFIG_CPU_SUP_INTEL
+ extern void intel_pt_handle_vmx(int on);
+#endif
+
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
extern void amd_pmu_enable_virt(void);
extern void amd_pmu_disable_virt(void);
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index ad59d70bcb1a..ef495511f019 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -256,7 +256,8 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
struct irq_desc *desc;
int cpu, vector;
- BUG_ON(!data->cfg.vector);
+ if (!data->cfg.vector)
+ return;
vector = data->cfg.vector;
for_each_cpu_and(cpu, data->domain, cpu_online_mask)
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 54cdbd2003fe..af1112980dd4 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -389,12 +389,6 @@ default_entry:
/* Make changes effective */
wrmsr
- /*
- * And make sure that all the mappings we set up have NX set from
- * the beginning.
- */
- orl $(1 << (_PAGE_BIT_NX - 32)), pa(__supported_pte_mask + 4)
-
enable_paging:
/*
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ee1c8a93871c..133679d520af 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3103,6 +3103,8 @@ static __init int vmx_disabled_by_bios(void)
static void kvm_cpu_vmxon(u64 addr)
{
+ intel_pt_handle_vmx(1);
+
asm volatile (ASM_VMX_VMXON_RAX
: : "a"(&addr), "m"(addr)
: "memory", "cc");
@@ -3172,6 +3174,8 @@ static void vmclear_local_loaded_vmcss(void)
static void kvm_cpu_vmxoff(void)
{
asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
+
+ intel_pt_handle_vmx(0);
}
static void hardware_disable(void)
diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
index 8bea84724a7d..f65a33f505b6 100644
--- a/arch/x86/mm/setup_nx.c
+++ b/arch/x86/mm/setup_nx.c
@@ -32,8 +32,9 @@ early_param("noexec", noexec_setup);
void x86_configure_nx(void)
{
- /* If disable_nx is set, clear NX on all new mappings going forward. */
- if (disable_nx)
+ if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx)
+ __supported_pte_mask |= _PAGE_NX;
+ else
__supported_pte_mask &= ~_PAGE_NX;
}
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 9e2ba5c6e1dd..f42e78de1e10 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -27,6 +27,12 @@ static bool xen_pvspin = true;
static void xen_qlock_kick(int cpu)
{
+ int irq = per_cpu(lock_kicker_irq, cpu);
+
+ /* Don't kick if the target's kicker interrupt is not initialized. */
+ if (irq == -1)
+ return;
+
xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 94a1843b0426..0ede6d7e2568 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -538,7 +538,6 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
u8 *order, u64 *snap_size);
static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
u64 *snap_features);
-static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
@@ -3127,9 +3126,6 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
struct rbd_device *rbd_dev = (struct rbd_device *)data;
int ret;
- if (!rbd_dev)
- return;
-
dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
rbd_dev->header_name, (unsigned long long)notify_id,
(unsigned int)opcode);
@@ -3263,6 +3259,9 @@ static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
ceph_osdc_cancel_event(rbd_dev->watch_event);
rbd_dev->watch_event = NULL;
+
+ dout("%s flushing notifies\n", __func__);
+ ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
}
/*
@@ -3642,21 +3641,14 @@ static void rbd_exists_validate(struct rbd_device *rbd_dev)
static void rbd_dev_update_size(struct rbd_device *rbd_dev)
{
sector_t size;
- bool removing;
/*
- * Don't hold the lock while doing disk operations,
- * or lock ordering will conflict with the bdev mutex via:
- * rbd_add() -> blkdev_get() -> rbd_open()
+ * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
+ * try to update its size. If REMOVING is set, updating size
+ * is just useless work since the device can't be opened.
*/
- spin_lock_irq(&rbd_dev->lock);
- removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
- spin_unlock_irq(&rbd_dev->lock);
- /*
- * If the device is being removed, rbd_dev->disk has
- * been destroyed, so don't try to update its size
- */
- if (!removing) {
+ if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
+ !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
dout("setting size to %llu sectors", (unsigned long long)size);
set_capacity(rbd_dev->disk, size);
@@ -4191,7 +4183,7 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
__le64 features;
__le64 incompat;
} __attribute__ ((packed)) features_buf = { 0 };
- u64 incompat;
+ u64 unsup;
int ret;
ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
@@ -4204,9 +4196,12 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
if (ret < sizeof (features_buf))
return -ERANGE;
- incompat = le64_to_cpu(features_buf.incompat);
- if (incompat & ~RBD_FEATURES_SUPPORTED)
+ unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
+ if (unsup) {
+ rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
+ unsup);
return -ENXIO;
+ }
*snap_features = le64_to_cpu(features_buf.features);
@@ -5187,6 +5182,10 @@ out_err:
return ret;
}
+/*
+ * rbd_dev->header_rwsem must be locked for write and will be unlocked
+ * upon return.
+ */
static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
{
int ret;
@@ -5195,7 +5194,7 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
ret = rbd_dev_id_get(rbd_dev);
if (ret)
- return ret;
+ goto err_out_unlock;
BUILD_BUG_ON(DEV_NAME_LEN
< sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
@@ -5236,8 +5235,9 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
/* Everything's ready. Announce the disk to the world. */
set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
- add_disk(rbd_dev->disk);
+ up_write(&rbd_dev->header_rwsem);
+ add_disk(rbd_dev->disk);
pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
(unsigned long long) rbd_dev->mapping.size);
@@ -5252,6 +5252,8 @@ err_out_blkdev:
unregister_blkdev(rbd_dev->major, rbd_dev->name);
err_out_id:
rbd_dev_id_put(rbd_dev);
+err_out_unlock:
+ up_write(&rbd_dev->header_rwsem);
return ret;
}
@@ -5442,6 +5444,7 @@ static ssize_t do_rbd_add(struct bus_type *bus,
spec = NULL; /* rbd_dev now owns this */
rbd_opts = NULL; /* rbd_dev now owns this */
+ down_write(&rbd_dev->header_rwsem);
rc = rbd_dev_image_probe(rbd_dev, 0);
if (rc < 0)
goto err_out_rbd_dev;
@@ -5471,6 +5474,7 @@ out:
return rc;
err_out_rbd_dev:
+ up_write(&rbd_dev->header_rwsem);
rbd_dev_destroy(rbd_dev);
err_out_client:
rbd_put_client(rbdc);
@@ -5577,12 +5581,6 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
return ret;
rbd_dev_header_unwatch_sync(rbd_dev);
- /*
- * flush remaining watch callbacks - these must be complete
- * before the osd_client is shutdown
- */
- dout("%s: flushing notifies", __func__);
- ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
/*
* Don't free anything from rbd_dev->disk until after all
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index 02e18182fcb5..2beb396fe652 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -394,7 +394,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1", 2, 7);
} else {
clk[IMX6QDL_CLK_ECSPI_ROOT] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6);
- clk[IMX6QDL_CLK_CAN_ROOT] = imx_clk_divider("can_root", "pll3_60", base + 0x20, 2, 6);
+ clk[IMX6QDL_CLK_CAN_ROOT] = imx_clk_divider("can_root", "pll3_60m", base + 0x20, 2, 6);
clk[IMX6QDL_CLK_IPG_PER] = imx_clk_fixup_divider("ipg_per", "ipg", base + 0x1c, 0, 6, imx_cscmr1_fixup);
clk[IMX6QDL_CLK_UART_SERIAL_PODF] = imx_clk_divider("uart_serial_podf", "pll3_80m", base + 0x24, 0, 6);
clk[IMX6QDL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 10a5cfeae8c5..5f1147fa9239 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -193,12 +193,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
wall_time = cur_wall_time - j_cdbs->prev_cpu_wall;
j_cdbs->prev_cpu_wall = cur_wall_time;
- if (cur_idle_time <= j_cdbs->prev_cpu_idle) {
- idle_time = 0;
- } else {
- idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
- j_cdbs->prev_cpu_idle = cur_idle_time;
- }
+ idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
+ j_cdbs->prev_cpu_idle = cur_idle_time;
if (ignore_nice) {
u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 30fe323c4551..f502d5b90c25 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -813,6 +813,11 @@ static int core_get_max_pstate(void)
if (err)
goto skip_tar;
+ /* For level 1 and 2, bits[23:16] contain the ratio */
+ if (tdp_ctrl)
+ tdp_ratio >>= 16;
+
+ tdp_ratio &= 0xff; /* ratios are only 8 bits long */
if (tdp_ratio - 1 == tar) {
max_pstate = tar;
pr_debug("max_pstate=TAC %x\n", max_pstate);
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 01087a38da22..792bdae2b91d 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1866,7 +1866,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
i7_dev = get_i7core_dev(mce->socketid);
if (!i7_dev)
- return NOTIFY_BAD;
+ return NOTIFY_DONE;
mci = i7_dev->mci;
pvt = mci->pvt_info;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 468447aff8eb..8bf745d2da7e 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -3168,7 +3168,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
mci = get_mci_for_node_id(mce->socketid);
if (!mci)
- return NOTIFY_BAD;
+ return NOTIFY_DONE;
pvt = mci->pvt_info;
/*
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 0ac594c0a234..34b741940494 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -202,29 +202,44 @@ static const struct variable_validate variable_validate[] = {
{ NULL_GUID, "", NULL },
};
+/*
+ * Check if @var_name matches the pattern given in @match_name.
+ *
+ * @var_name: an array of @len non-NUL characters.
+ * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
+ * final "*" character matches any trailing characters @var_name,
+ * including the case when there are none left in @var_name.
+ * @match: on output, the number of non-wildcard characters in @match_name
+ * that @var_name matches, regardless of the return value.
+ * @return: whether @var_name fully matches @match_name.
+ */
static bool
variable_matches(const char *var_name, size_t len, const char *match_name,
int *match)
{
for (*match = 0; ; (*match)++) {
char c = match_name[*match];
- char u = var_name[*match];
- /* Wildcard in the matching name means we've matched */
- if (c == '*')
+ switch (c) {
+ case '*':
+ /* Wildcard in @match_name means we've matched. */
return true;
- /* Case sensitive match */
- if (!c && *match == len)
- return true;
+ case '\0':
+ /* @match_name has ended. Has @var_name too? */
+ return (*match == len);
- if (c != u)
+ default:
+ /*
+ * We've reached a non-wildcard char in @match_name.
+ * Continue only if there's an identical character in
+ * @var_name.
+ */
+ if (*match < len && c == var_name[*match])
+ continue;
return false;
-
- if (!c)
- return true;
+ }
}
- return true;
}
bool
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index d9ab0cd1d205..4d9a315cfd43 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -196,44 +196,6 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
return 0;
}
-static void gpio_rcar_irq_bus_lock(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-
- pm_runtime_get_sync(&p->pdev->dev);
-}
-
-static void gpio_rcar_irq_bus_sync_unlock(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-
- pm_runtime_put(&p->pdev->dev);
-}
-
-
-static int gpio_rcar_irq_request_resources(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct gpio_rcar_priv *p = gpiochip_get_data(gc);
- int error;
-
- error = pm_runtime_get_sync(&p->pdev->dev);
- if (error < 0)
- return error;
-
- return 0;
-}
-
-static void gpio_rcar_irq_release_resources(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-
- pm_runtime_put(&p->pdev->dev);
-}
-
static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
{
struct gpio_rcar_priv *p = dev_id;
@@ -280,32 +242,18 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
{
- struct gpio_rcar_priv *p = gpiochip_get_data(chip);
- int error;
-
- error = pm_runtime_get_sync(&p->pdev->dev);
- if (error < 0)
- return error;
-
- error = pinctrl_request_gpio(chip->base + offset);
- if (error)
- pm_runtime_put(&p->pdev->dev);
-
- return error;
+ return pinctrl_request_gpio(chip->base + offset);
}
static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset)
{
- struct gpio_rcar_priv *p = gpiochip_get_data(chip);
-
pinctrl_free_gpio(chip->base + offset);
- /* Set the GPIO as an input to ensure that the next GPIO request won't
+ /*
+ * Set the GPIO as an input to ensure that the next GPIO request won't
* drive the GPIO pin as an output.
*/
gpio_rcar_config_general_input_output_mode(chip, offset, false);
-
- pm_runtime_put(&p->pdev->dev);
}
static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -452,6 +400,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
}
pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -488,10 +437,6 @@ static int gpio_rcar_probe(struct platform_device *pdev)
irq_chip->irq_unmask = gpio_rcar_irq_enable;
irq_chip->irq_set_type = gpio_rcar_irq_set_type;
irq_chip->irq_set_wake = gpio_rcar_irq_set_wake;
- irq_chip->irq_bus_lock = gpio_rcar_irq_bus_lock;
- irq_chip->irq_bus_sync_unlock = gpio_rcar_irq_bus_sync_unlock;
- irq_chip->irq_request_resources = gpio_rcar_irq_request_resources;
- irq_chip->irq_release_resources = gpio_rcar_irq_release_resources;
irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
ret = gpiochip_add_data(gpio_chip, p);
@@ -522,6 +467,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
err1:
gpiochip_remove(gpio_chip);
err0:
+ pm_runtime_put(dev);
pm_runtime_disable(dev);
return ret;
}
@@ -532,6 +478,7 @@ static int gpio_rcar_remove(struct platform_device *pdev)
gpiochip_remove(&p->gpio_chip);
+ pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 682070d20f00..2dc52585e3f2 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -977,7 +977,7 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
lookup = kmalloc(sizeof(*lookup), GFP_KERNEL);
if (lookup) {
lookup->adev = adev;
- lookup->con_id = con_id;
+ lookup->con_id = kstrdup(con_id, GFP_KERNEL);
list_add_tail(&lookup->node, &acpi_crs_lookup_list);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 0020a0ea43ff..35a1248aaa77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -63,10 +63,6 @@ bool amdgpu_has_atpx(void) {
return amdgpu_atpx_priv.atpx_detected;
}
-bool amdgpu_has_atpx_dgpu_power_cntl(void) {
- return amdgpu_atpx_priv.atpx.functions.power_cntl;
-}
-
/**
* amdgpu_atpx_call - call an ATPX method
*
@@ -146,6 +142,13 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
*/
static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
{
+ /* make sure required functions are enabled */
+ /* dGPU power control is required */
+ if (atpx->functions.power_cntl == false) {
+ printk("ATPX dGPU power cntl not present, forcing\n");
+ atpx->functions.power_cntl = true;
+ }
+
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 612117478b57..2139da773da6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -62,12 +62,6 @@ static const char *amdgpu_asic_name[] = {
"LAST",
};
-#if defined(CONFIG_VGA_SWITCHEROO)
-bool amdgpu_has_atpx_dgpu_power_cntl(void);
-#else
-static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
-#endif
-
bool amdgpu_device_is_px(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
@@ -1485,7 +1479,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (amdgpu_runtime_pm == 1)
runtime = true;
- if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl())
+ if (amdgpu_device_is_px(ddev))
runtime = true;
vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
if (runtime)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 05b0353d3880..a4a2e6cc61bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -910,7 +910,10 @@ static int gmc_v7_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
+ return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ else
+ return 0;
}
static int gmc_v7_0_sw_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 02deb3229405..7a9db2c72c89 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -870,7 +870,10 @@ static int gmc_v8_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
+ return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ else
+ return 0;
}
#define mmMC_SEQ_MISC0_FIJI 0xA71
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index e17fbdaf874b..71ea0521ea96 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1796,6 +1796,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
req_payload.start_slot = cur_slots;
if (mgr->proposed_vcpis[i]) {
port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port) {
+ mutex_unlock(&mgr->payload_lock);
+ return -EINVAL;
+ }
req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
} else {
@@ -1823,6 +1828,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
mgr->payloads[i].payload_state = req_payload.payload_state;
}
cur_slots += req_payload.num_slots;
+
+ if (port)
+ drm_dp_put_port(port);
}
for (i = 0; i < mgr->max_payloads; i++) {
@@ -2128,6 +2136,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
if (mgr->mst_primary) {
int sret;
+ u8 guid[16];
+
sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
if (sret != DP_RECEIVER_CAP_SIZE) {
DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
@@ -2142,6 +2152,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
ret = -1;
goto out_unlock;
}
+
+ /* Some hubs forget their guids after they resume */
+ sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
+ if (sret != 16) {
+ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+ ret = -1;
+ goto out_unlock;
+ }
+ drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+
ret = 0;
} else
ret = -1;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 09198d0b5814..306dde18a94a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -572,6 +572,24 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
goto fail;
}
+ /*
+ * Set the GPU linear window to be at the end of the DMA window, where
+ * the CMA area is likely to reside. This ensures that we are able to
+ * map the command buffers while having the linear window overlap as
+ * much RAM as possible, so we can optimize mappings for other buffers.
+ *
+ * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
+ * to different views of the memory on the individual engines.
+ */
+ if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
+ (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
+ u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
+ if (dma_mask < PHYS_OFFSET + SZ_2G)
+ gpu->memory_base = PHYS_OFFSET;
+ else
+ gpu->memory_base = dma_mask - SZ_2G + 1;
+ }
+
ret = etnaviv_hw_reset(gpu);
if (ret)
goto fail;
@@ -1566,7 +1584,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct etnaviv_gpu *gpu;
- u32 dma_mask;
int err = 0;
gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
@@ -1576,18 +1593,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
gpu->dev = &pdev->dev;
mutex_init(&gpu->lock);
- /*
- * Set the GPU linear window to be at the end of the DMA window, where
- * the CMA area is likely to reside. This ensures that we are able to
- * map the command buffers while having the linear window overlap as
- * much RAM as possible, so we can optimize mappings for other buffers.
- */
- dma_mask = (u32)dma_get_required_mask(dev);
- if (dma_mask < PHYS_OFFSET + SZ_2G)
- gpu->memory_base = PHYS_OFFSET;
- else
- gpu->memory_base = dma_mask - SZ_2G + 1;
-
/* Map registers: */
gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
if (IS_ERR(gpu->mmio))
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 76c4bdf21b20..34f7a29d9366 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2608,10 +2608,152 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT1_CNTL, 0);
}
+static const unsigned ni_dig_offsets[] =
+{
+ NI_DIG0_REGISTER_OFFSET,
+ NI_DIG1_REGISTER_OFFSET,
+ NI_DIG2_REGISTER_OFFSET,
+ NI_DIG3_REGISTER_OFFSET,
+ NI_DIG4_REGISTER_OFFSET,
+ NI_DIG5_REGISTER_OFFSET
+};
+
+static const unsigned ni_tx_offsets[] =
+{
+ NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
+};
+
+static const unsigned evergreen_dp_offsets[] =
+{
+ EVERGREEN_DP0_REGISTER_OFFSET,
+ EVERGREEN_DP1_REGISTER_OFFSET,
+ EVERGREEN_DP2_REGISTER_OFFSET,
+ EVERGREEN_DP3_REGISTER_OFFSET,
+ EVERGREEN_DP4_REGISTER_OFFSET,
+ EVERGREEN_DP5_REGISTER_OFFSET
+};
+
+
+/*
+ * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
+ * We go from crtc to connector and it is not relible since it
+ * should be an opposite direction .If crtc is enable then
+ * find the dig_fe which selects this crtc and insure that it enable.
+ * if such dig_fe is found then find dig_be which selects found dig_be and
+ * insure that it enable and in DP_SST mode.
+ * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
+ * from dp symbols clocks .
+ */
+static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
+ unsigned crtc_id, unsigned *ret_dig_fe)
+{
+ unsigned i;
+ unsigned dig_fe;
+ unsigned dig_be;
+ unsigned dig_en_be;
+ unsigned uniphy_pll;
+ unsigned digs_fe_selected;
+ unsigned dig_be_mode;
+ unsigned dig_fe_mask;
+ bool is_enabled = false;
+ bool found_crtc = false;
+
+ /* loop through all running dig_fe to find selected crtc */
+ for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
+ dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
+ if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
+ crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
+ /* found running pipe */
+ found_crtc = true;
+ dig_fe_mask = 1 << i;
+ dig_fe = i;
+ break;
+ }
+ }
+
+ if (found_crtc) {
+ /* loop through all running dig_be to find selected dig_fe */
+ for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
+ dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
+ /* if dig_fe_selected by dig_be? */
+ digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
+ dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
+ if (dig_fe_mask & digs_fe_selected &&
+ /* if dig_be in sst mode? */
+ dig_be_mode == NI_DIG_BE_DPSST) {
+ dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
+ ni_dig_offsets[i]);
+ uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
+ ni_tx_offsets[i]);
+ /* dig_be enable and tx is running */
+ if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
+ dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
+ uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
+ is_enabled = true;
+ *ret_dig_fe = dig_fe;
+ break;
+ }
+ }
+ }
+ }
+
+ return is_enabled;
+}
+
+/*
+ * Blank dig when in dp sst mode
+ * Dig ignores crtc timing
+ */
+static void evergreen_blank_dp_output(struct radeon_device *rdev,
+ unsigned dig_fe)
+{
+ unsigned stream_ctrl;
+ unsigned fifo_ctrl;
+ unsigned counter = 0;
+
+ if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
+ DRM_ERROR("invalid dig_fe %d\n", dig_fe);
+ return;
+ }
+
+ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe]);
+ if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
+ DRM_ERROR("dig %d , should be enable\n", dig_fe);
+ return;
+ }
+
+ stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
+ WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe], stream_ctrl);
+
+ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe]);
+ while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
+ msleep(1);
+ counter++;
+ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe]);
+ }
+ if (counter >= 32 )
+ DRM_ERROR("counter exceeds %d\n", counter);
+
+ fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
+ fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
+ WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
+
+}
+
void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
u32 crtc_enabled, tmp, frame_count, blackout;
int i, j;
+ unsigned dig_fe;
if (!ASIC_IS_NODCE(rdev)) {
save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
@@ -2651,7 +2793,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
break;
udelay(1);
}
-
+ /*we should disable dig if it drives dp sst*/
+ /*but we are in radeon_device_init and the topology is unknown*/
+ /*and it is available after radeon_modeset_init*/
+ /*the following method radeon_atom_encoder_dpms_dig*/
+ /*does the job if we initialize it properly*/
+ /*for now we do it this manually*/
+ /**/
+ if (ASIC_IS_DCE5(rdev) &&
+ evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
+ evergreen_blank_dp_output(rdev, dig_fe);
+ /*we could remove 6 lines below*/
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index aa939dfed3a3..b436badf9efa 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -250,8 +250,43 @@
/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
#define EVERGREEN_HDMI_BASE 0x7030
+/*DIG block*/
+#define NI_DIG0_REGISTER_OFFSET (0x7000 - 0x7000)
+#define NI_DIG1_REGISTER_OFFSET (0x7C00 - 0x7000)
+#define NI_DIG2_REGISTER_OFFSET (0x10800 - 0x7000)
+#define NI_DIG3_REGISTER_OFFSET (0x11400 - 0x7000)
+#define NI_DIG4_REGISTER_OFFSET (0x12000 - 0x7000)
+#define NI_DIG5_REGISTER_OFFSET (0x12C00 - 0x7000)
+
+
+#define NI_DIG_FE_CNTL 0x7000
+# define NI_DIG_FE_CNTL_SOURCE_SELECT(x) ((x) & 0x3)
+# define NI_DIG_FE_CNTL_SYMCLK_FE_ON (1<<24)
+
+
+#define NI_DIG_BE_CNTL 0x7140
+# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F)
+# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 )
+
+#define NI_DIG_BE_EN_CNTL 0x7144
+# define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0)
+# define NI_DIG_BE_EN_CNTL_SYMBCLK_ON (1 << 8)
+# define NI_DIG_BE_DPSST 0
/* Display Port block */
+#define EVERGREEN_DP0_REGISTER_OFFSET (0x730C - 0x730C)
+#define EVERGREEN_DP1_REGISTER_OFFSET (0x7F0C - 0x730C)
+#define EVERGREEN_DP2_REGISTER_OFFSET (0x10B0C - 0x730C)
+#define EVERGREEN_DP3_REGISTER_OFFSET (0x1170C - 0x730C)
+#define EVERGREEN_DP4_REGISTER_OFFSET (0x1230C - 0x730C)
+#define EVERGREEN_DP5_REGISTER_OFFSET (0x12F0C - 0x730C)
+
+
+#define EVERGREEN_DP_VID_STREAM_CNTL 0x730C
+# define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0)
+# define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16)
+#define EVERGREEN_DP_STEER_FIFO 0x7310
+# define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0)
#define EVERGREEN_DP_SEC_CNTL 0x7280
# define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0)
# define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4)
@@ -266,4 +301,15 @@
# define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24)
# define EVERGREEN_DP_SEC_SS_EN (1 << 28)
+/*DCIO_UNIPHY block*/
+#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600)
+#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600)
+#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600)
+#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600)
+#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600)
+#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 (0x6740 - 0x6600)
+
+#define NI_DCIO_UNIPHY0_PLL_CONTROL1 0x6618
+# define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE (1 << 0)
+
#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 4cbf26555093..e3daafa1be13 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -230,22 +230,13 @@ EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *man;
+ int put_count = 0;
lockdep_assert_held(&bo->resv->lock.base);
- if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
- list_del_init(&bo->swap);
- list_del_init(&bo->lru);
-
- } else {
- if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
- list_move_tail(&bo->swap, &bo->glob->swap_lru);
-
- man = &bdev->man[bo->mem.mem_type];
- list_move_tail(&bo->lru, &man->lru);
- }
+ put_count = ttm_bo_del_from_lru(bo);
+ ttm_bo_list_ref_sub(bo, put_count, true);
+ ttm_bo_add_to_lru(bo);
}
EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 4854dac87e24..5fd1fd06effc 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -267,11 +267,23 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
return 0;
}
+static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (crtc->state->event)
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+}
+
static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
.enable = virtio_gpu_crtc_enable,
.disable = virtio_gpu_crtc_disable,
.mode_set_nofb = virtio_gpu_crtc_mode_set_nofb,
.atomic_check = virtio_gpu_crtc_atomic_check,
+ .atomic_flush = virtio_gpu_crtc_atomic_flush,
};
static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 723ba16c6084..1a1a87cbf109 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3293,19 +3293,19 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
&vmw_cmd_dx_cid_check, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok,
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
- &vmw_cmd_ok, true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok,
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid,
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
true, false, true),
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 3b1faf7862a5..679a4cb98ee3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -573,9 +573,9 @@ static int vmw_fb_set_par(struct fb_info *info)
mode = old_mode;
old_mode = NULL;
} else if (!vmw_kms_validate_mode_vram(vmw_priv,
- mode->hdisplay *
- (var->bits_per_pixel + 7) / 8,
- mode->vdisplay)) {
+ mode->hdisplay *
+ DIV_ROUND_UP(var->bits_per_pixel, 8),
+ mode->vdisplay)) {
drm_mode_destroy(vmw_priv->dev, mode);
return -EINVAL;
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index c6eaff5f8845..0238f0169e48 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -259,6 +259,7 @@
#define USB_DEVICE_ID_CORSAIR_K90 0x1b02
#define USB_VENDOR_ID_CREATIVELABS 0x041e
+#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c
#define USB_DEVICE_ID_PRODIKEYS_PCMIDI 0x2801
#define USB_VENDOR_ID_CVTOUCH 0x1ff7
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index ed2f68edc8f1..53fc856d6867 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -71,6 +71,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 02c4efea241c..cf2ba43453fd 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -684,6 +684,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
wacom->tool[idx] = wacom_intuos_get_tool_type(wacom->id[idx]);
+ wacom->shared->stylus_in_proximity = true;
return 1;
}
@@ -3395,6 +3396,10 @@ static const struct wacom_features wacom_features_0x33E =
{ "Wacom Intuos PT M 2", 21600, 13500, 2047, 63,
INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
+static const struct wacom_features wacom_features_0x343 =
+ { "Wacom DTK1651", 34616, 19559, 1023, 0,
+ DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_HID_ANY_ID =
{ "Wacom HID", .type = HID_GENERIC };
@@ -3560,6 +3565,7 @@ const struct hid_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x33C) },
{ USB_DEVICE_WACOM(0x33D) },
{ USB_DEVICE_WACOM(0x33E) },
+ { USB_DEVICE_WACOM(0x343) },
{ USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x4004) },
{ USB_DEVICE_WACOM(0x5000) },
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index faa8e6821fea..0967e1a5b3a2 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -975,10 +975,10 @@ config I2C_XLR
config I2C_XLP9XX
tristate "XLP9XX I2C support"
- depends on CPU_XLP || COMPILE_TEST
+ depends on CPU_XLP || ARCH_VULCAN || COMPILE_TEST
help
This driver enables support for the on-chip I2C interface of
- the Broadcom XLP9xx/XLP5xx MIPS processors.
+ the Broadcom XLP9xx/XLP5xx MIPS and Vulcan ARM64 processors.
This driver can also be built as a module. If so, the module will
be called i2c-xlp9xx.
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 714bdc837769..b167ab25310a 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -116,8 +116,8 @@ struct cpm_i2c {
cbd_t __iomem *rbase;
u_char *txbuf[CPM_MAXBD];
u_char *rxbuf[CPM_MAXBD];
- u32 txdma[CPM_MAXBD];
- u32 rxdma[CPM_MAXBD];
+ dma_addr_t txdma[CPM_MAXBD];
+ dma_addr_t rxdma[CPM_MAXBD];
};
static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index b29c7500461a..f54ece8fce78 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -671,7 +671,9 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
return -EIO;
}
- clk_prepare_enable(i2c->clk);
+ ret = clk_enable(i2c->clk);
+ if (ret)
+ return ret;
for (i = 0; i < num; i++, msgs++) {
stop = (i == num - 1);
@@ -695,7 +697,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
}
out:
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
return ret;
}
@@ -747,7 +749,9 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
return -ENOENT;
}
- clk_prepare_enable(i2c->clk);
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret)
+ return ret;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
@@ -799,6 +803,10 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, i2c);
+ clk_disable(i2c->clk);
+
+ return 0;
+
err_clk:
clk_disable_unprepare(i2c->clk);
return ret;
@@ -810,6 +818,8 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&i2c->adap);
+ clk_unprepare(i2c->clk);
+
return 0;
}
@@ -821,6 +831,8 @@ static int exynos5_i2c_suspend_noirq(struct device *dev)
i2c->suspended = 1;
+ clk_unprepare(i2c->clk);
+
return 0;
}
@@ -830,7 +842,9 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
int ret = 0;
- clk_prepare_enable(i2c->clk);
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret)
+ return ret;
ret = exynos5_hsi2c_clock_setup(i2c);
if (ret) {
@@ -839,7 +853,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
}
exynos5_i2c_init(i2c);
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
i2c->suspended = 0;
return 0;
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 7ba795b24e75..1c8707710098 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -75,6 +75,7 @@
/* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */
#define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59
#define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a
+#define PCI_DEVICE_ID_INTEL_DNV_SMT 0x19ac
#define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15
#define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */
@@ -180,6 +181,7 @@ struct ismt_priv {
static const struct pci_device_id ismt_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMT) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 9096d17beb5b..3dcc5f3f26cb 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -855,6 +855,7 @@ static struct rk3x_i2c_soc_data soc_data[3] = {
static const struct of_device_id rk3x_i2c_match[] = {
{ .compatible = "rockchip,rk3066-i2c", .data = (void *)&soc_data[0] },
{ .compatible = "rockchip,rk3188-i2c", .data = (void *)&soc_data[1] },
+ { .compatible = "rockchip,rk3228-i2c", .data = (void *)&soc_data[2] },
{ .compatible = "rockchip,rk3288-i2c", .data = (void *)&soc_data[2] },
{},
};
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index cb00d59da456..c2e257d97eff 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -691,7 +691,8 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
NULL);
/* Coudn't find default GID location */
- WARN_ON(ix < 0);
+ if (WARN_ON(ix < 0))
+ goto release;
zattr_type.gid_type = gid_type;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 4a9aa0433b07..7713ef089c3c 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -48,6 +48,7 @@
#include <asm/uaccess.h>
+#include <rdma/ib.h>
#include <rdma/ib_cm.h>
#include <rdma/ib_user_cm.h>
#include <rdma/ib_marshall.h>
@@ -1103,6 +1104,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
struct ib_ucm_cmd_hdr hdr;
ssize_t result;
+ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+ return -EACCES;
+
if (len < sizeof(hdr))
return -EINVAL;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index dd3bcceadfde..c0f3826abb30 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1574,6 +1574,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
struct rdma_ucm_cmd_hdr hdr;
ssize_t ret;
+ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+ return -EACCES;
+
if (len < sizeof(hdr))
return -EINVAL;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 28ba2cc81535..31f422a70623 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -48,6 +48,8 @@
#include <asm/uaccess.h>
+#include <rdma/ib.h>
+
#include "uverbs.h"
MODULE_AUTHOR("Roland Dreier");
@@ -709,6 +711,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
int srcu_key;
ssize_t ret;
+ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+ return -EACCES;
+
if (count < sizeof hdr)
return -EINVAL;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 15b8adbf39c0..b65b3541e732 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1860,6 +1860,7 @@ EXPORT_SYMBOL(ib_drain_rq);
void ib_drain_qp(struct ib_qp *qp)
{
ib_drain_sq(qp);
- ib_drain_rq(qp);
+ if (!qp->srq)
+ ib_drain_rq(qp);
}
EXPORT_SYMBOL(ib_drain_qp);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 42a7b8952d13..3234a8be16f6 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1390,6 +1390,8 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
dev->ibdev.iwcm->get_qp = iwch_get_qp;
+ memcpy(dev->ibdev.iwcm->ifname, dev->rdev.t3cdev_p->lldev->name,
+ sizeof(dev->ibdev.iwcm->ifname));
ret = ib_register_device(&dev->ibdev, NULL);
if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index b4eeb783573c..b0b955724458 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -162,7 +162,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
&cq->bar2_qid,
user ? &cq->bar2_pa : NULL);
- if (user && !cq->bar2_va) {
+ if (user && !cq->bar2_pa) {
pr_warn(MOD "%s: cqid %u not in BAR2 range.\n",
pci_name(rdev->lldi.pdev), cq->cqid);
ret = -EINVAL;
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 124682dc5709..7574f394fdac 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -580,6 +580,8 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
dev->ibdev.iwcm->get_qp = c4iw_get_qp;
+ memcpy(dev->ibdev.iwcm->ifname, dev->rdev.lldi.ports[0]->name,
+ sizeof(dev->ibdev.iwcm->ifname));
ret = ib_register_device(&dev->ibdev, NULL);
if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index e17fb5d5e033..e8993e49b8b3 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -185,6 +185,10 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
if (pbar2_pa)
*pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
+
+ if (is_t4(rdev->lldi.adapter_type))
+ return NULL;
+
return rdev->bar2_kva + bar2_qoffset;
}
@@ -270,7 +274,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
/*
* User mode must have bar2 access.
*/
- if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) {
+ if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n",
pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
goto free_dma;
@@ -1895,13 +1899,27 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
void c4iw_drain_sq(struct ib_qp *ibqp)
{
struct c4iw_qp *qp = to_c4iw_qp(ibqp);
+ unsigned long flag;
+ bool need_to_wait;
- wait_for_completion(&qp->sq_drained);
+ spin_lock_irqsave(&qp->lock, flag);
+ need_to_wait = !t4_sq_empty(&qp->wq);
+ spin_unlock_irqrestore(&qp->lock, flag);
+
+ if (need_to_wait)
+ wait_for_completion(&qp->sq_drained);
}
void c4iw_drain_rq(struct ib_qp *ibqp)
{
struct c4iw_qp *qp = to_c4iw_qp(ibqp);
+ unsigned long flag;
+ bool need_to_wait;
+
+ spin_lock_irqsave(&qp->lock, flag);
+ need_to_wait = !t4_rq_empty(&qp->wq);
+ spin_unlock_irqrestore(&qp->lock, flag);
- wait_for_completion(&qp->rq_drained);
+ if (need_to_wait)
+ wait_for_completion(&qp->rq_drained);
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 3ff663c35bac..4cb81f68d850 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -530,7 +530,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
sizeof(struct mlx5_wqe_ctrl_seg)) /
sizeof(struct mlx5_wqe_data_seg);
props->max_sge = min(max_rq_sg, max_sq_sg);
- props->max_sge_rd = props->max_sge;
+ props->max_sge_rd = MLX5_MAX_SGE_RD;
props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 99cef26e74b4..77630cad7f81 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -498,9 +498,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
* skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
*/
- if (!netif_carrier_ok(netdev))
- return NETDEV_TX_OK;
-
if (netif_queue_stopped(netdev))
return NETDEV_TX_BUSY;
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index e449e394963f..24f4a782e0f4 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -45,6 +45,8 @@
#include <linux/export.h>
#include <linux/uio.h>
+#include <rdma/ib.h>
+
#include "qib.h"
#include "qib_common.h"
#include "qib_user_sdma.h"
@@ -2067,6 +2069,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
ssize_t ret = 0;
void *dest;
+ if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
+ return -EACCES;
+
if (count < sizeof(cmd.type)) {
ret = -EINVAL;
goto bail;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index bd82a6948dc8..a9e3bcc522c4 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1637,9 +1637,9 @@ bail:
spin_unlock_irqrestore(&qp->s_hlock, flags);
if (nreq) {
if (call_send)
- rdi->driver_f.schedule_send_no_lock(qp);
- else
rdi->driver_f.do_send(qp);
+ else
+ rdi->driver_f.schedule_send_no_lock(qp);
}
return err;
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 194580fba7fd..14d3b37944df 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -284,6 +284,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
* go away inside make_request
*/
sectors = bio_sectors(bio);
+ /* bio could be mergeable after passing to underlayer */
+ bio->bi_rw &= ~REQ_NOMERGE;
mddev->pers->make_request(mddev, bio);
cpu = part_stat_lock();
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 2ea12c6bf659..34783a3c8b3c 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -70,7 +70,6 @@ static void dump_zones(struct mddev *mddev)
(unsigned long long)zone_size>>1);
zone_start = conf->strip_zone[j].zone_end;
}
- printk(KERN_INFO "\n");
}
static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
@@ -85,6 +84,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
unsigned short blksize = 512;
+ *private_conf = ERR_PTR(-ENOMEM);
if (!conf)
return -ENOMEM;
rdev_for_each(rdev1, mddev) {
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8ab8b65e1741..e48c262ce032 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3502,8 +3502,6 @@ returnbi:
dev = &sh->dev[i];
} else if (test_bit(R5_Discard, &dev->flags))
discard_pending = 1;
- WARN_ON(test_bit(R5_SkipCopy, &dev->flags));
- WARN_ON(dev->page != dev->orig_page);
}
r5l_stripe_write_finished(sh);
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index 12f5ebbd0436..ad2f3d27b266 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -1452,13 +1452,6 @@ static int usbvision_probe(struct usb_interface *intf,
printk(KERN_INFO "%s: %s found\n", __func__,
usbvision_device_data[model].model_string);
- /*
- * this is a security check.
- * an exploit using an incorrect bInterfaceNumber is known
- */
- if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum])
- return -ENODEV;
-
if (usbvision_device_data[model].interface >= 0)
interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
else if (ifnum < dev->actconfig->desc.bNumInterfaces)
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 5d016f496e0e..9fbcb67a9ee6 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1645,7 +1645,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
* Will sleep if required for nonblocking == false.
*/
static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
- int nonblocking)
+ void *pb, int nonblocking)
{
unsigned long flags;
int ret;
@@ -1666,10 +1666,10 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
/*
* Only remove the buffer from done_list if v4l2_buffer can handle all
* the planes.
- * Verifying planes is NOT necessary since it already has been checked
- * before the buffer is queued/prepared. So it can never fail.
*/
- list_del(&(*vb)->done_entry);
+ ret = call_bufop(q, verify_planes_array, *vb, pb);
+ if (!ret)
+ list_del(&(*vb)->done_entry);
spin_unlock_irqrestore(&q->done_lock, flags);
return ret;
@@ -1748,7 +1748,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
struct vb2_buffer *vb = NULL;
int ret;
- ret = __vb2_get_done_vb(q, &vb, nonblocking);
+ ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
if (ret < 0)
return ret;
@@ -2298,6 +2298,16 @@ unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
return POLLERR;
/*
+ * If this quirk is set and QBUF hasn't been called yet then
+ * return POLLERR as well. This only affects capture queues, output
+ * queues will always initialize waiting_for_buffers to false.
+ * This quirk is set by V4L2 for backwards compatibility reasons.
+ */
+ if (q->quirk_poll_must_check_waiting_for_buffers &&
+ q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
+ return POLLERR;
+
+ /*
* For output streams you can call write() as long as there are fewer
* buffers queued than there are buffers available.
*/
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index dbec5923fcf0..3c3b517f1d1c 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -49,7 +49,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
vec = frame_vector_create(nr);
if (!vec)
return ERR_PTR(-ENOMEM);
- ret = get_vaddr_frames(start, nr, write, 1, vec);
+ ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
if (ret < 0)
goto out_destroy;
/* We accept only complete set of PFNs */
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 91f552124050..7f366f1b0377 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -74,6 +74,11 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
return 0;
}
+static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
+{
+ return __verify_planes_array(vb, pb);
+}
+
/**
* __verify_length() - Verify that the bytesused value for each plane fits in
* the plane length and that the data offset doesn't exceed the bytesused value.
@@ -437,6 +442,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
}
static const struct vb2_buf_ops v4l2_buf_ops = {
+ .verify_planes_array = __verify_planes_array_core,
.fill_user_buffer = __fill_v4l2_buffer,
.fill_vb2_buffer = __fill_vb2_buffer,
.copy_timestamp = __copy_timestamp,
@@ -765,6 +771,12 @@ int vb2_queue_init(struct vb2_queue *q)
q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
== V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ /*
+ * For compatibility with vb1: if QBUF hasn't been called yet, then
+ * return POLLERR as well. This only affects capture queues, output
+ * queues will always initialize waiting_for_buffers to false.
+ */
+ q->quirk_poll_must_check_waiting_for_buffers = true;
return vb2_core_queue_init(q);
}
@@ -818,14 +830,6 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
poll_wait(file, &fh->wait, wait);
}
- /*
- * For compatibility with vb1: if QBUF hasn't been called yet, then
- * return POLLERR as well. This only affects capture queues, output
- * queues will always initialize waiting_for_buffers to false.
- */
- if (q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
- return POLLERR;
-
return res | vb2_core_poll(q, file, wait);
}
EXPORT_SYMBOL_GPL(vb2_poll);
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 10370f280500..7edea9c19199 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -223,6 +223,13 @@ int __detach_context(struct cxl_context *ctx)
cxl_ops->link_ok(ctx->afu->adapter, ctx->afu));
flush_work(&ctx->fault_work); /* Only needed for dedicated process */
+ /*
+ * Wait until no further interrupts are presented by the PSL
+ * for this context.
+ */
+ if (cxl_ops->irq_wait)
+ cxl_ops->irq_wait(ctx);
+
/* release the reference to the group leader and mm handling pid */
put_pid(ctx->pid);
put_pid(ctx->glpid);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 38e21cf7806e..73dc2a33da74 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -274,6 +274,7 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
#define CXL_PSL_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */
#define CXL_PSL_DSISR_An_AE (1ull << (63-5)) /* AFU Error */
#define CXL_PSL_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */
+#define CXL_PSL_DSISR_PENDING (CXL_PSL_DSISR_TRANS | CXL_PSL_DSISR_An_PE | CXL_PSL_DSISR_An_AE | CXL_PSL_DSISR_An_OC)
/* NOTE: Bits 32:63 are undefined if DSISR[DS] = 1 */
#define CXL_PSL_DSISR_An_M DSISR_NOHPTE /* PTE not found */
#define CXL_PSL_DSISR_An_P DSISR_PROTFAULT /* Storage protection violation */
@@ -855,6 +856,7 @@ struct cxl_backend_ops {
u64 dsisr, u64 errstat);
irqreturn_t (*psl_interrupt)(int irq, void *data);
int (*ack_irq)(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask);
+ void (*irq_wait)(struct cxl_context *ctx);
int (*attach_process)(struct cxl_context *ctx, bool kernel,
u64 wed, u64 amr);
int (*detach_process)(struct cxl_context *ctx);
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index be646dc41a2c..8def4553acba 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -203,7 +203,6 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
void cxl_unmap_irq(unsigned int virq, void *cookie)
{
free_irq(virq, cookie);
- irq_dispose_mapping(virq);
}
int cxl_register_one_irq(struct cxl *adapter,
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 387fcbdf9793..ecf7557cd657 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -14,6 +14,7 @@
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
+#include <linux/delay.h>
#include <asm/synch.h>
#include <misc/cxl-base.h>
@@ -797,6 +798,35 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data)
return fail_psl_irq(afu, &irq_info);
}
+void native_irq_wait(struct cxl_context *ctx)
+{
+ u64 dsisr;
+ int timeout = 1000;
+ int ph;
+
+ /*
+ * Wait until no further interrupts are presented by the PSL
+ * for this context.
+ */
+ while (timeout--) {
+ ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff;
+ if (ph != ctx->pe)
+ return;
+ dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
+ if ((dsisr & CXL_PSL_DSISR_PENDING) == 0)
+ return;
+ /*
+ * We are waiting for the workqueue to process our
+ * irq, so need to let that run here.
+ */
+ msleep(1);
+ }
+
+ dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i"
+ " DSISR %016llx!\n", ph, dsisr);
+ return;
+}
+
static irqreturn_t native_slice_irq_err(int irq, void *data)
{
struct cxl_afu *afu = data;
@@ -1076,6 +1106,7 @@ const struct cxl_backend_ops cxl_native_ops = {
.handle_psl_slice_error = native_handle_psl_slice_error,
.psl_interrupt = NULL,
.ack_irq = native_ack_irq,
+ .irq_wait = native_irq_wait,
.attach_process = native_attach_process,
.detach_process = native_detach_process,
.support_attributes = native_support_attributes,
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 04feea8354cb..e657af0e95fa 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -97,6 +97,7 @@ config MMC_RICOH_MMC
config MMC_SDHCI_ACPI
tristate "SDHCI support for ACPI enumerated SDHCI controllers"
depends on MMC_SDHCI && ACPI
+ select IOSF_MBI if X86
help
This selects support for ACPI enumerated SDHCI controllers,
identified by ACPI Compatibility ID PNP0D40 or specific
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 6839e41c6d58..bed6a494f52c 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -41,6 +41,11 @@
#include <linux/mmc/pm.h>
#include <linux/mmc/slot-gpio.h>
+#ifdef CONFIG_X86
+#include <asm/cpu_device_id.h>
+#include <asm/iosf_mbi.h>
+#endif
+
#include "sdhci.h"
enum {
@@ -116,6 +121,75 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
.ops = &sdhci_acpi_ops_int,
};
+#ifdef CONFIG_X86
+
+static bool sdhci_acpi_byt(void)
+{
+ static const struct x86_cpu_id byt[] = {
+ { X86_VENDOR_INTEL, 6, 0x37 },
+ {}
+ };
+
+ return x86_match_cpu(byt);
+}
+
+#define BYT_IOSF_SCCEP 0x63
+#define BYT_IOSF_OCP_NETCTRL0 0x1078
+#define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8)
+
+static void sdhci_acpi_byt_setting(struct device *dev)
+{
+ u32 val = 0;
+
+ if (!sdhci_acpi_byt())
+ return;
+
+ if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
+ &val)) {
+ dev_err(dev, "%s read error\n", __func__);
+ return;
+ }
+
+ if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
+ return;
+
+ val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
+
+ if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
+ val)) {
+ dev_err(dev, "%s write error\n", __func__);
+ return;
+ }
+
+ dev_dbg(dev, "%s completed\n", __func__);
+}
+
+static bool sdhci_acpi_byt_defer(struct device *dev)
+{
+ if (!sdhci_acpi_byt())
+ return false;
+
+ if (!iosf_mbi_available())
+ return true;
+
+ sdhci_acpi_byt_setting(dev);
+
+ return false;
+}
+
+#else
+
+static inline void sdhci_acpi_byt_setting(struct device *dev)
+{
+}
+
+static inline bool sdhci_acpi_byt_defer(struct device *dev)
+{
+ return false;
+}
+
+#endif
+
static int bxt_get_cd(struct mmc_host *mmc)
{
int gpio_cd = mmc_gpio_get_cd(mmc);
@@ -322,6 +396,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (acpi_bus_get_status(device) || !device->status.present)
return -ENODEV;
+ if (sdhci_acpi_byt_defer(dev))
+ return -EPROBE_DEFER;
+
hid = acpi_device_hid(device);
uid = device->pnp.unique_id;
@@ -447,6 +524,8 @@ static int sdhci_acpi_resume(struct device *dev)
{
struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+ sdhci_acpi_byt_setting(&c->pdev->dev);
+
return sdhci_resume_host(c->host);
}
@@ -470,6 +549,8 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
{
struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+ sdhci_acpi_byt_setting(&c->pdev->dev);
+
return sdhci_runtime_resume_host(c->host);
}
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 8372a413848c..7fc8b7aa83f0 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1129,6 +1129,11 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
MMC_CAP_1_8V_DDR |
MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
+ /* TODO MMC DDR is not working on A80 */
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "allwinner,sun9i-a80-mmc"))
+ mmc->caps &= ~MMC_CAP_1_8V_DDR;
+
ret = mmc_of_parse(mmc);
if (ret)
goto error_free_dma;
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 61150af37bc7..470cfc783baa 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -2202,7 +2202,7 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
struct net_device *bridge)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int i, err;
+ int i, err = 0;
mutex_lock(&ps->smi_mutex);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 4645c44e7c15..1199c2b4bf20 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -588,12 +588,30 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
struct page *page;
dma_addr_t mapping;
u16 sw_prod = rxr->rx_sw_agg_prod;
+ unsigned int offset = 0;
- page = alloc_page(gfp);
- if (!page)
- return -ENOMEM;
+ if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+ page = rxr->rx_page;
+ if (!page) {
+ page = alloc_page(gfp);
+ if (!page)
+ return -ENOMEM;
+ rxr->rx_page = page;
+ rxr->rx_page_offset = 0;
+ }
+ offset = rxr->rx_page_offset;
+ rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
+ if (rxr->rx_page_offset == PAGE_SIZE)
+ rxr->rx_page = NULL;
+ else
+ get_page(page);
+ } else {
+ page = alloc_page(gfp);
+ if (!page)
+ return -ENOMEM;
+ }
- mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE,
+ mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE,
PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&pdev->dev, mapping)) {
__free_page(page);
@@ -608,6 +626,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
rx_agg_buf->page = page;
+ rx_agg_buf->offset = offset;
rx_agg_buf->mapping = mapping;
rxbd->rx_bd_haddr = cpu_to_le64(mapping);
rxbd->rx_bd_opaque = sw_prod;
@@ -649,6 +668,7 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
page = cons_rx_buf->page;
cons_rx_buf->page = NULL;
prod_rx_buf->page = page;
+ prod_rx_buf->offset = cons_rx_buf->offset;
prod_rx_buf->mapping = cons_rx_buf->mapping;
@@ -716,7 +736,8 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
cons_rx_buf = &rxr->rx_agg_ring[cons];
- skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len);
+ skb_fill_page_desc(skb, i, cons_rx_buf->page,
+ cons_rx_buf->offset, frag_len);
__clear_bit(cons, rxr->rx_agg_bmap);
/* It is possible for bnxt_alloc_rx_page() to allocate
@@ -747,7 +768,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
return NULL;
}
- dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE,
+ dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
PCI_DMA_FROMDEVICE);
skb->data_len += frag_len;
@@ -1635,13 +1656,17 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
dma_unmap_page(&pdev->dev,
dma_unmap_addr(rx_agg_buf, mapping),
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
rx_agg_buf->page = NULL;
__clear_bit(j, rxr->rx_agg_bmap);
__free_page(page);
}
+ if (rxr->rx_page) {
+ __free_page(rxr->rx_page);
+ rxr->rx_page = NULL;
+ }
}
}
@@ -2024,7 +2049,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
return 0;
- type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) |
+ type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
bnxt_init_rxbd_pages(ring, type);
@@ -2215,7 +2240,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
bp->rx_agg_nr_pages = 0;
if (bp->flags & BNXT_FLAG_TPA)
- agg_factor = 4;
+ agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
bp->flags &= ~BNXT_FLAG_JUMBO;
if (rx_space > PAGE_SIZE) {
@@ -3076,12 +3101,12 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
/* Number of segs are log2 units, and first packet is not
* included as part of this units.
*/
- if (mss <= PAGE_SIZE) {
- n = PAGE_SIZE / mss;
+ if (mss <= BNXT_RX_PAGE_SIZE) {
+ n = BNXT_RX_PAGE_SIZE / mss;
nsegs = (MAX_SKB_FRAGS - 1) * n;
} else {
- n = mss / PAGE_SIZE;
- if (mss & (PAGE_SIZE - 1))
+ n = mss / BNXT_RX_PAGE_SIZE;
+ if (mss & (BNXT_RX_PAGE_SIZE - 1))
n++;
nsegs = (MAX_SKB_FRAGS - n) / n;
}
@@ -4367,7 +4392,7 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_MSIX_CAP)
rc = bnxt_setup_msix(bp);
- if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
+ if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
/* fallback to INTA */
rc = bnxt_setup_inta(bp);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 26dac2f3c63c..62896352b0df 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -407,6 +407,15 @@ struct rx_tpa_end_cmp_ext {
#define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT)
+/* The RXBD length is 16-bit so we can only support page sizes < 64K */
+#if (PAGE_SHIFT > 15)
+#define BNXT_RX_PAGE_SHIFT 15
+#else
+#define BNXT_RX_PAGE_SHIFT PAGE_SHIFT
+#endif
+
+#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
+
#define BNXT_MIN_PKT_SIZE 45
#define BNXT_NUM_TESTS(bp) 0
@@ -506,6 +515,7 @@ struct bnxt_sw_rx_bd {
struct bnxt_sw_rx_agg_bd {
struct page *page;
+ unsigned int offset;
dma_addr_t mapping;
};
@@ -586,6 +596,9 @@ struct bnxt_rx_ring_info {
unsigned long *rx_agg_bmap;
u16 rx_agg_bmap_size;
+ struct page *rx_page;
+ unsigned int rx_page_offset;
+
dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index eec3200ade4a..cb07d95e3dd9 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -440,7 +440,7 @@ static int macb_mii_init(struct macb *bp)
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
- bp->mii_bus->parent = &bp->dev->dev;
+ bp->mii_bus->parent = &bp->pdev->dev;
pdata = dev_get_platdata(&bp->pdev->dev);
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
@@ -458,7 +458,8 @@ static int macb_mii_init(struct macb *bp)
struct phy_device *phydev;
phydev = mdiobus_scan(bp->mii_bus, i);
- if (IS_ERR(phydev)) {
+ if (IS_ERR(phydev) &&
+ PTR_ERR(phydev) != -ENODEV) {
err = PTR_ERR(phydev);
break;
}
@@ -3005,29 +3006,36 @@ static int macb_probe(struct platform_device *pdev)
if (err)
goto err_out_free_netdev;
+ err = macb_mii_init(bp);
+ if (err)
+ goto err_out_free_netdev;
+
+ phydev = bp->phy_dev;
+
+ netif_carrier_off(dev);
+
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
- goto err_out_unregister_netdev;
+ goto err_out_unregister_mdio;
}
- err = macb_mii_init(bp);
- if (err)
- goto err_out_unregister_netdev;
-
- netif_carrier_off(dev);
+ phy_attached_info(phydev);
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
dev->base_addr, dev->irq, dev->dev_addr);
- phydev = bp->phy_dev;
- phy_attached_info(phydev);
-
return 0;
-err_out_unregister_netdev:
- unregister_netdev(dev);
+err_out_unregister_mdio:
+ phy_disconnect(bp->phy_dev);
+ mdiobus_unregister(bp->mii_bus);
+ mdiobus_free(bp->mii_bus);
+
+ /* Shutdown the PHY if there is a GPIO reset */
+ if (bp->reset_gpio)
+ gpiod_set_value(bp->reset_gpio, 0);
err_out_free_netdev:
free_netdev(dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 60908eab3b3a..43da891fab97 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -576,7 +576,7 @@ static void setup_rss(struct adapter *adap)
unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
u8 cpus[SGE_QSETS + 1];
- u16 rspq_map[RSS_TABLE_SIZE];
+ u16 rspq_map[RSS_TABLE_SIZE + 1];
for (i = 0; i < SGE_QSETS; ++i)
cpus[i] = i;
@@ -586,6 +586,7 @@ static void setup_rss(struct adapter *adap)
rspq_map[i] = i % nq0;
rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
}
+ rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 7fc490225da5..a6d26d351dfc 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3354,8 +3354,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
/* Enable per-CPU interrupts on the CPU that is
* brought up.
*/
- smp_call_function_single(cpu, mvneta_percpu_enable,
- pp, true);
+ mvneta_percpu_enable(pp);
/* Enable per-CPU interrupt on the one CPU we care
* about.
@@ -3387,8 +3386,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
/* Disable per-CPU interrupts on the CPU that is
* brought down.
*/
- smp_call_function_single(cpu, mvneta_percpu_disable,
- pp, true);
+ mvneta_percpu_disable(pp);
break;
case CPU_DEAD:
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 7ace07dad6a3..c442f6ad15ff 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -979,6 +979,8 @@ static int pxa168_init_phy(struct net_device *dev)
return 0;
pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
+ if (IS_ERR(pep->phy))
+ return PTR_ERR(pep->phy);
if (!pep->phy)
return -ENODEV;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 1cf722eba607..559d11a443bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -14,6 +14,7 @@ config MLX5_CORE_EN
bool "Mellanox Technologies ConnectX-4 Ethernet support"
depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
select PTP_1588_CLOCK
+ select VXLAN if MLX5_CORE=y
default n
---help---
Ethernet support in Mellanox Technologies ConnectX-4 NIC.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 34523c48444e..bfa5daaaf5aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -525,6 +525,7 @@ struct mlx5e_priv {
struct mlx5e_vxlan_db vxlan;
struct mlx5e_params params;
+ struct workqueue_struct *wq;
struct work_struct update_carrier_work;
struct work_struct set_rx_mode_work;
struct delayed_work update_stats_work;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 4ccfc1ac62c5..7dfb73aa8e41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -231,9 +231,8 @@ static void mlx5e_update_stats_work(struct work_struct *work)
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
mlx5e_update_stats(priv);
- schedule_delayed_work(dwork,
- msecs_to_jiffies(
- MLX5E_UPDATE_STATS_INTERVAL));
+ queue_delayed_work(priv->wq, dwork,
+ msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
}
mutex_unlock(&priv->state_lock);
}
@@ -249,7 +248,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
switch (event) {
case MLX5_DEV_EVENT_PORT_UP:
case MLX5_DEV_EVENT_PORT_DOWN:
- schedule_work(&priv->update_carrier_work);
+ queue_work(priv->wq, &priv->update_carrier_work);
break;
default:
@@ -1695,7 +1694,7 @@ int mlx5e_open_locked(struct net_device *netdev)
priv->netdev->rx_cpu_rmap = priv->mdev->rmap;
#endif
- schedule_delayed_work(&priv->update_stats_work, 0);
+ queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
return 0;
@@ -2202,7 +2201,7 @@ static void mlx5e_set_rx_mode(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- schedule_work(&priv->set_rx_mode_work);
+ queue_work(priv->wq, &priv->set_rx_mode_work);
}
static int mlx5e_set_mac(struct net_device *netdev, void *addr)
@@ -2217,7 +2216,7 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
ether_addr_copy(netdev->dev_addr, saddr->sa_data);
netif_addr_unlock_bh(netdev);
- schedule_work(&priv->set_rx_mode_work);
+ queue_work(priv->wq, &priv->set_rx_mode_work);
return 0;
}
@@ -2503,7 +2502,7 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev,
if (!mlx5e_vxlan_allowed(priv->mdev))
return;
- mlx5e_vxlan_add_port(priv, be16_to_cpu(port));
+ mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 1);
}
static void mlx5e_del_vxlan_port(struct net_device *netdev,
@@ -2514,7 +2513,7 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev,
if (!mlx5e_vxlan_allowed(priv->mdev))
return;
- mlx5e_vxlan_del_port(priv, be16_to_cpu(port));
+ mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 0);
}
static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
@@ -2947,10 +2946,14 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
priv = netdev_priv(netdev);
+ priv->wq = create_singlethread_workqueue("mlx5e");
+ if (!priv->wq)
+ goto err_free_netdev;
+
err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false);
if (err) {
mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
- goto err_free_netdev;
+ goto err_destroy_wq;
}
err = mlx5_core_alloc_pd(mdev, &priv->pdn);
@@ -3034,7 +3037,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
}
mlx5e_enable_async_events(priv);
- schedule_work(&priv->set_rx_mode_work);
+ queue_work(priv->wq, &priv->set_rx_mode_work);
return priv;
@@ -3072,6 +3075,9 @@ err_dealloc_pd:
err_unmap_free_uar:
mlx5_unmap_free_uar(mdev, &priv->cq_uar);
+err_destroy_wq:
+ destroy_workqueue(priv->wq);
+
err_free_netdev:
free_netdev(netdev);
@@ -3085,9 +3091,9 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
- schedule_work(&priv->set_rx_mode_work);
+ queue_work(priv->wq, &priv->set_rx_mode_work);
mlx5e_disable_async_events(priv);
- flush_scheduled_work();
+ flush_workqueue(priv->wq);
if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
netif_device_detach(netdev);
mutex_lock(&priv->state_lock);
@@ -3111,6 +3117,8 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
+ cancel_delayed_work_sync(&priv->update_stats_work);
+ destroy_workqueue(priv->wq);
if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
free_netdev(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 8ba080e441a1..5ff8af472bf5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -269,8 +269,10 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar);
void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
{
- iounmap(uar->map);
- iounmap(uar->bf_map);
+ if (uar->map)
+ iounmap(uar->map);
+ else
+ iounmap(uar->bf_map);
mlx5_cmd_free_uar(mdev, uar->index);
}
EXPORT_SYMBOL(mlx5_unmap_free_uar);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
index 9f10df25f3cd..f2fd1ef16da7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
@@ -95,21 +95,22 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
return vxlan;
}
-int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port)
+static void mlx5e_vxlan_add_port(struct work_struct *work)
{
+ struct mlx5e_vxlan_work *vxlan_work =
+ container_of(work, struct mlx5e_vxlan_work, work);
+ struct mlx5e_priv *priv = vxlan_work->priv;
struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
+ u16 port = vxlan_work->port;
struct mlx5e_vxlan *vxlan;
int err;
- err = mlx5e_vxlan_core_add_port_cmd(priv->mdev, port);
- if (err)
- return err;
+ if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
+ goto free_work;
vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
- if (!vxlan) {
- err = -ENOMEM;
+ if (!vxlan)
goto err_delete_port;
- }
vxlan->udp_port = port;
@@ -119,13 +120,14 @@ int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port)
if (err)
goto err_free;
- return 0;
+ goto free_work;
err_free:
kfree(vxlan);
err_delete_port:
mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
- return err;
+free_work:
+ kfree(vxlan_work);
}
static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
@@ -145,12 +147,36 @@ static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
kfree(vxlan);
}
-void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port)
+static void mlx5e_vxlan_del_port(struct work_struct *work)
{
- if (!mlx5e_vxlan_lookup_port(priv, port))
- return;
+ struct mlx5e_vxlan_work *vxlan_work =
+ container_of(work, struct mlx5e_vxlan_work, work);
+ struct mlx5e_priv *priv = vxlan_work->priv;
+ u16 port = vxlan_work->port;
__mlx5e_vxlan_core_del_port(priv, port);
+
+ kfree(vxlan_work);
+}
+
+void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
+ u16 port, int add)
+{
+ struct mlx5e_vxlan_work *vxlan_work;
+
+ vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
+ if (!vxlan_work)
+ return;
+
+ if (add)
+ INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_port);
+ else
+ INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_port);
+
+ vxlan_work->priv = priv;
+ vxlan_work->port = port;
+ vxlan_work->sa_family = sa_family;
+ queue_work(priv->wq, &vxlan_work->work);
}
void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
index a01685056ab1..129f3527aa14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
@@ -39,6 +39,13 @@ struct mlx5e_vxlan {
u16 udp_port;
};
+struct mlx5e_vxlan_work {
+ struct work_struct work;
+ struct mlx5e_priv *priv;
+ sa_family_t sa_family;
+ u16 port;
+};
+
static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
{
return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
@@ -46,8 +53,8 @@ static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
}
void mlx5e_vxlan_init(struct mlx5e_priv *priv);
-int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port);
-void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port);
+void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
+ u16 port, int add);
struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port);
void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 270c9eeb7ab6..6d1a956e3f77 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -2668,9 +2668,9 @@ static int myri10ge_close(struct net_device *dev)
del_timer_sync(&mgp->watchdog_timer);
mgp->running = MYRI10GE_ETH_STOPPING;
- local_bh_disable(); /* myri10ge_ss_lock_napi needs bh disabled */
for (i = 0; i < mgp->num_slices; i++) {
napi_disable(&mgp->ss[i].napi);
+ local_bh_disable(); /* myri10ge_ss_lock_napi needs this */
/* Lock the slice to prevent the busy_poll handler from
* accessing it. Later when we bring the NIC up, myri10ge_open
* resets the slice including this lock.
@@ -2679,8 +2679,8 @@ static int myri10ge_close(struct net_device *dev)
pr_info("Slice %d locked\n", i);
mdelay(1);
}
+ local_bh_enable();
}
- local_bh_enable();
netif_carrier_off(dev);
netif_tx_stop_all_queues(dev);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 98d33d462c6c..1681084cc96f 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1920,6 +1920,10 @@ static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context,
return 0;
}
+ if (nic_data->datapath_caps &
+ 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
+ return -EOPNOTSUPP;
+
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
nic_data->vport_id);
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
@@ -2923,9 +2927,16 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
bool replacing)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u32 flags = spec->flags;
memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
+ /* Remove RSS flag if we don't have an RSS context. */
+ if (flags & EFX_FILTER_FLAG_RX_RSS &&
+ spec->rss_context == EFX_FILTER_RSS_CONTEXT_DEFAULT &&
+ nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID)
+ flags &= ~EFX_FILTER_FLAG_RX_RSS;
+
if (replacing) {
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
MC_CMD_FILTER_OP_IN_OP_REPLACE);
@@ -2985,10 +2996,10 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
0 : spec->dmaq_id);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
- (spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
+ (flags & EFX_FILTER_FLAG_RX_RSS) ?
MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
- if (spec->flags & EFX_FILTER_FLAG_RX_RSS)
+ if (flags & EFX_FILTER_FLAG_RX_RSS)
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
spec->rss_context !=
EFX_FILTER_RSS_CONTEXT_DEFAULT ?
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 0fa75a86b1c1..68577ee2e64a 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -367,7 +367,6 @@ struct cpsw_priv {
spinlock_t lock;
struct platform_device *pdev;
struct net_device *ndev;
- struct device_node *phy_node;
struct napi_struct napi_rx;
struct napi_struct napi_tx;
struct device *dev;
@@ -1142,25 +1141,34 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
- if (priv->phy_node)
- slave->phy = of_phy_connect(priv->ndev, priv->phy_node,
+ if (slave->data->phy_node) {
+ slave->phy = of_phy_connect(priv->ndev, slave->data->phy_node,
&cpsw_adjust_link, 0, slave->data->phy_if);
- else
+ if (!slave->phy) {
+ dev_err(priv->dev, "phy \"%s\" not found on slave %d\n",
+ slave->data->phy_node->full_name,
+ slave->slave_num);
+ return;
+ }
+ } else {
slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
&cpsw_adjust_link, slave->data->phy_if);
- if (IS_ERR(slave->phy)) {
- dev_err(priv->dev, "phy %s not found on slave %d\n",
- slave->data->phy_id, slave->slave_num);
- slave->phy = NULL;
- } else {
- phy_attached_info(slave->phy);
+ if (IS_ERR(slave->phy)) {
+ dev_err(priv->dev,
+ "phy \"%s\" not found on slave %d, err %ld\n",
+ slave->data->phy_id, slave->slave_num,
+ PTR_ERR(slave->phy));
+ slave->phy = NULL;
+ return;
+ }
+ }
- phy_start(slave->phy);
+ phy_attached_info(slave->phy);
- /* Configure GMII_SEL register */
- cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface,
- slave->slave_num);
- }
+ phy_start(slave->phy);
+
+ /* Configure GMII_SEL register */
+ cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface, slave->slave_num);
}
static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
@@ -1932,12 +1940,11 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
slave->port_vlan = data->dual_emac_res_vlan;
}
-static int cpsw_probe_dt(struct cpsw_priv *priv,
+static int cpsw_probe_dt(struct cpsw_platform_data *data,
struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct device_node *slave_node;
- struct cpsw_platform_data *data = &priv->data;
int i = 0, ret;
u32 prop;
@@ -2025,25 +2032,21 @@ static int cpsw_probe_dt(struct cpsw_priv *priv,
if (strcmp(slave_node->name, "slave"))
continue;
- priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0);
+ slave_data->phy_node = of_parse_phandle(slave_node,
+ "phy-handle", 0);
parp = of_get_property(slave_node, "phy_id", &lenp);
- if (of_phy_is_fixed_link(slave_node)) {
- struct device_node *phy_node;
- struct phy_device *phy_dev;
-
+ if (slave_data->phy_node) {
+ dev_dbg(&pdev->dev,
+ "slave[%d] using phy-handle=\"%s\"\n",
+ i, slave_data->phy_node->full_name);
+ } else if (of_phy_is_fixed_link(slave_node)) {
/* In the case of a fixed PHY, the DT node associated
* to the PHY is the Ethernet MAC DT node.
*/
ret = of_phy_register_fixed_link(slave_node);
if (ret)
return ret;
- phy_node = of_node_get(slave_node);
- phy_dev = of_phy_find_device(phy_node);
- if (!phy_dev)
- return -ENODEV;
- snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
- PHY_ID_FMT, phy_dev->mdio.bus->id,
- phy_dev->mdio.addr);
+ slave_data->phy_node = of_node_get(slave_node);
} else if (parp) {
u32 phyid;
struct device_node *mdio_node;
@@ -2064,7 +2067,9 @@ static int cpsw_probe_dt(struct cpsw_priv *priv,
snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
PHY_ID_FMT, mdio->name, phyid);
} else {
- dev_err(&pdev->dev, "No slave[%d] phy_id or fixed-link property\n", i);
+ dev_err(&pdev->dev,
+ "No slave[%d] phy_id, phy-handle, or fixed-link property\n",
+ i);
goto no_phy_slave;
}
slave_data->phy_if = of_get_phy_mode(slave_node);
@@ -2266,7 +2271,7 @@ static int cpsw_probe(struct platform_device *pdev)
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
- if (cpsw_probe_dt(priv, pdev)) {
+ if (cpsw_probe_dt(&priv->data, pdev)) {
dev_err(&pdev->dev, "cpsw: platform data missing\n");
ret = -ENODEV;
goto clean_runtime_disable_ret;
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index 442a7038e660..e50afd1b2eda 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -18,6 +18,7 @@
#include <linux/phy.h>
struct cpsw_slave_data {
+ struct device_node *phy_node;
char phy_id[MII_BUS_ID_SIZE];
int phy_if;
u8 mac_addr[ETH_ALEN];
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 58d58f002559..f56d66e6ec15 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1512,7 +1512,10 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
/* TODO: Add phy read and write and private statistics get feature */
- return phy_mii_ioctl(priv->phydev, ifrq, cmd);
+ if (priv->phydev)
+ return phy_mii_ioctl(priv->phydev, ifrq, cmd);
+ else
+ return -EOPNOTSUPP;
}
static int match_first_device(struct device *dev, void *data)
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index 13214a6492ac..743b18266a7c 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -1622,7 +1622,7 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
continue;
/* copy hw scan info */
- memcpy(target->hwinfo, scan_info, scan_info->size);
+ memcpy(target->hwinfo, scan_info, be16_to_cpu(scan_info->size));
target->essid_len = strnlen(scan_info->essid,
sizeof(scan_info->essid));
target->rate_len = 0;
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index b3ffaee30858..f279a897a5c7 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -359,27 +359,25 @@ static void at803x_link_change_notify(struct phy_device *phydev)
* in the FIFO. In such cases, the FIFO enters an error mode it
* cannot recover from by software.
*/
- if (phydev->drv->phy_id == ATH8030_PHY_ID) {
- if (phydev->state == PHY_NOLINK) {
- if (priv->gpiod_reset && !priv->phy_reset) {
- struct at803x_context context;
-
- at803x_context_save(phydev, &context);
-
- gpiod_set_value(priv->gpiod_reset, 1);
- msleep(1);
- gpiod_set_value(priv->gpiod_reset, 0);
- msleep(1);
-
- at803x_context_restore(phydev, &context);
-
- phydev_dbg(phydev, "%s(): phy was reset\n",
- __func__);
- priv->phy_reset = true;
- }
- } else {
- priv->phy_reset = false;
+ if (phydev->state == PHY_NOLINK) {
+ if (priv->gpiod_reset && !priv->phy_reset) {
+ struct at803x_context context;
+
+ at803x_context_save(phydev, &context);
+
+ gpiod_set_value(priv->gpiod_reset, 1);
+ msleep(1);
+ gpiod_set_value(priv->gpiod_reset, 0);
+ msleep(1);
+
+ at803x_context_restore(phydev, &context);
+
+ phydev_dbg(phydev, "%s(): phy was reset\n",
+ __func__);
+ priv->phy_reset = true;
}
+ } else {
+ priv->phy_reset = false;
}
}
@@ -391,7 +389,6 @@ static struct phy_driver at803x_driver[] = {
.phy_id_mask = 0xffffffef,
.probe = at803x_probe,
.config_init = at803x_config_init,
- .link_change_notify = at803x_link_change_notify,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
.suspend = at803x_suspend,
@@ -427,7 +424,6 @@ static struct phy_driver at803x_driver[] = {
.phy_id_mask = 0xffffffef,
.probe = at803x_probe,
.config_init = at803x_config_init,
- .link_change_notify = at803x_link_change_notify,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
.suspend = at803x_suspend,
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index f20890ee03f3..f64778ad9753 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -269,6 +269,7 @@ struct skb_data { /* skb->cb is one of these */
struct lan78xx_net *dev;
enum skb_state state;
size_t length;
+ int num_of_packet;
};
struct usb_context {
@@ -1803,7 +1804,34 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev)
static void lan78xx_link_status_change(struct net_device *net)
{
- /* nothing to do */
+ struct phy_device *phydev = net->phydev;
+ int ret, temp;
+
+ /* At forced 100 F/H mode, chip may fail to set mode correctly
+ * when cable is switched between long(~50+m) and short one.
+ * As workaround, set to 10 before setting to 100
+ * at forced 100 F/H mode.
+ */
+ if (!phydev->autoneg && (phydev->speed == 100)) {
+ /* disable phy interrupt */
+ temp = phy_read(phydev, LAN88XX_INT_MASK);
+ temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
+ ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
+
+ temp = phy_read(phydev, MII_BMCR);
+ temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
+ phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
+ temp |= BMCR_SPEED100;
+ phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
+
+ /* clear pending interrupt generated while workaround */
+ temp = phy_read(phydev, LAN88XX_INT_STS);
+
+ /* enable phy interrupt back */
+ temp = phy_read(phydev, LAN88XX_INT_MASK);
+ temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
+ ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
+ }
}
static int lan78xx_phy_init(struct lan78xx_net *dev)
@@ -2464,7 +2492,7 @@ static void tx_complete(struct urb *urb)
struct lan78xx_net *dev = entry->dev;
if (urb->status == 0) {
- dev->net->stats.tx_packets++;
+ dev->net->stats.tx_packets += entry->num_of_packet;
dev->net->stats.tx_bytes += entry->length;
} else {
dev->net->stats.tx_errors++;
@@ -2681,10 +2709,11 @@ void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
return;
}
- skb->protocol = eth_type_trans(skb, dev->net);
dev->net->stats.rx_packets++;
dev->net->stats.rx_bytes += skb->len;
+ skb->protocol = eth_type_trans(skb, dev->net);
+
netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
skb->len + sizeof(struct ethhdr), skb->protocol);
memset(skb->cb, 0, sizeof(struct skb_data));
@@ -2934,13 +2963,16 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
skb_totallen = 0;
pkt_cnt = 0;
+ count = 0;
+ length = 0;
for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
if (skb_is_gso(skb)) {
if (pkt_cnt) {
/* handle previous packets first */
break;
}
- length = skb->len;
+ count = 1;
+ length = skb->len - TX_OVERHEAD;
skb2 = skb_dequeue(tqp);
goto gso_skb;
}
@@ -2961,14 +2993,13 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
for (count = pos = 0; count < pkt_cnt; count++) {
skb2 = skb_dequeue(tqp);
if (skb2) {
+ length += (skb2->len - TX_OVERHEAD);
memcpy(skb->data + pos, skb2->data, skb2->len);
pos += roundup(skb2->len, sizeof(u32));
dev_kfree_skb(skb2);
}
}
- length = skb_totallen;
-
gso_skb:
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
@@ -2980,6 +3011,7 @@ gso_skb:
entry->urb = urb;
entry->dev = dev;
entry->length = length;
+ entry->num_of_packet = count;
spin_lock_irqsave(&dev->txq.lock, flags);
ret = usb_autopm_get_interface_async(dev->intf);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index f84080215915..82129eef7774 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -411,7 +411,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
int ret;
read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart);
- data[0] = 0xc9;
+ data[0] = 0xc8; /* TX & RX enable, append status, no CRC */
data[1] = 0;
if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL))
data[1] |= 0x20; /* set full duplex */
@@ -497,7 +497,7 @@ static void read_bulk_callback(struct urb *urb)
pkt_len = buf[count - 3] << 8;
pkt_len += buf[count - 4];
pkt_len &= 0xfff;
- pkt_len -= 8;
+ pkt_len -= 4;
}
/*
@@ -528,7 +528,7 @@ static void read_bulk_callback(struct urb *urb)
goon:
usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
usb_rcvbulkpipe(pegasus->usb, 1),
- pegasus->rx_skb->data, PEGASUS_MTU + 8,
+ pegasus->rx_skb->data, PEGASUS_MTU,
read_bulk_callback, pegasus);
rx_status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC);
if (rx_status == -ENODEV)
@@ -569,7 +569,7 @@ static void rx_fixup(unsigned long data)
}
usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
usb_rcvbulkpipe(pegasus->usb, 1),
- pegasus->rx_skb->data, PEGASUS_MTU + 8,
+ pegasus->rx_skb->data, PEGASUS_MTU,
read_bulk_callback, pegasus);
try_again:
status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC);
@@ -823,7 +823,7 @@ static int pegasus_open(struct net_device *net)
usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
usb_rcvbulkpipe(pegasus->usb, 1),
- pegasus->rx_skb->data, PEGASUS_MTU + 8,
+ pegasus->rx_skb->data, PEGASUS_MTU,
read_bulk_callback, pegasus);
if ((res = usb_submit_urb(pegasus->rx_urb, GFP_KERNEL))) {
if (res == -ENODEV)
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 30033dbe6662..c369db99c005 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -29,6 +29,7 @@
#include <linux/crc32.h>
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
+#include <linux/of_net.h>
#include "smsc75xx.h"
#define SMSC_CHIPNAME "smsc75xx"
@@ -761,6 +762,15 @@ static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
static void smsc75xx_init_mac_address(struct usbnet *dev)
{
+ const u8 *mac_addr;
+
+ /* maybe the boot loader passed the MAC address in devicetree */
+ mac_addr = of_get_mac_address(dev->udev->dev.of_node);
+ if (mac_addr) {
+ memcpy(dev->net->dev_addr, mac_addr, ETH_ALEN);
+ return;
+ }
+
/* try reading mac address from EEPROM */
if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
dev->net->dev_addr) == 0) {
@@ -772,7 +782,7 @@ static void smsc75xx_init_mac_address(struct usbnet *dev)
}
}
- /* no eeprom, or eeprom values are invalid. generate random MAC */
+ /* no useful static MAC address found. generate a random one */
eth_hw_addr_random(dev->net);
netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
}
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 66b3ab9f614e..2edc2bc6d1b9 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -29,6 +29,7 @@
#include <linux/crc32.h>
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
+#include <linux/of_net.h>
#include "smsc95xx.h"
#define SMSC_CHIPNAME "smsc95xx"
@@ -765,6 +766,15 @@ static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
static void smsc95xx_init_mac_address(struct usbnet *dev)
{
+ const u8 *mac_addr;
+
+ /* maybe the boot loader passed the MAC address in devicetree */
+ mac_addr = of_get_mac_address(dev->udev->dev.of_node);
+ if (mac_addr) {
+ memcpy(dev->net->dev_addr, mac_addr, ETH_ALEN);
+ return;
+ }
+
/* try reading mac address from EEPROM */
if (smsc95xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
dev->net->dev_addr) == 0) {
@@ -775,7 +785,7 @@ static void smsc95xx_init_mac_address(struct usbnet *dev)
}
}
- /* no eeprom, or eeprom values are invalid. generate random MAC */
+ /* no useful static MAC address found. generate a random one */
eth_hw_addr_random(dev->net);
netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
}
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 8f8793004b9f..1b271b99c49e 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -274,6 +274,9 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
};
static const int inc[4] = { 0, 100, 0, 0 };
+ memset(&mask_m, 0, sizeof(int8_t) * 123);
+ memset(&mask_p, 0, sizeof(int8_t) * 123);
+
cur_bin = -6000;
upper = bin + 100;
lower = bin - 100;
@@ -424,14 +427,9 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
int tmp, new;
int i;
- int8_t mask_m[123];
- int8_t mask_p[123];
int cur_bb_spur;
bool is2GHz = IS_CHAN_2GHZ(chan);
- memset(&mask_m, 0, sizeof(int8_t) * 123);
- memset(&mask_p, 0, sizeof(int8_t) * 123);
-
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
if (AR_NO_SPUR == cur_bb_spur)
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index db6624527d99..53d7445a5d12 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -178,14 +178,9 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
int i;
struct chan_centers centers;
- int8_t mask_m[123];
- int8_t mask_p[123];
int cur_bb_spur;
bool is2GHz = IS_CHAN_2GHZ(chan);
- memset(&mask_m, 0, sizeof(int8_t) * 123);
- memset(&mask_p, 0, sizeof(int8_t) * 123);
-
ath9k_hw_get_channel_centers(ah, chan, &centers);
freq = centers.synth_center;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index a9212a12f4da..2d20556ce22d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -89,7 +89,7 @@
#define IWL8260_SMEM_OFFSET 0x400000
#define IWL8260_SMEM_LEN 0x68000
-#define IWL8000_FW_PRE "iwlwifi-8000"
+#define IWL8000_FW_PRE "iwlwifi-8000C-"
#define IWL8000_MODULE_FIRMWARE(api) \
IWL8000_FW_PRE "-" __stringify(api) ".ucode"
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 4f495d9153a6..ff18b0658677 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -240,19 +240,6 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
name_pre, tag);
- /*
- * Starting 8000B - FW name format has changed. This overwrites the
- * previous name and uses the new format.
- */
- if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
- char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev);
-
- if (rev_step != 'A')
- snprintf(drv->firmware_name,
- sizeof(drv->firmware_name), "%s%c-%s.ucode",
- name_pre, rev_step, tag);
- }
-
IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
(drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
? "EXPERIMENTAL " : "",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index cbb5947b3fab..e25171f9b407 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -609,7 +609,8 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
}
/* Make room for fw's virtual image pages, if it exists */
- if (mvm->fw->img[mvm->cur_ucode].paging_mem_size)
+ if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
+ mvm->fw_paging_db[0].fw_paging_block)
file_len += mvm->num_of_paging_blk *
(sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_paging) +
@@ -750,7 +751,8 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
}
/* Dump fw's virtual image */
- if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) {
+ if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
+ mvm->fw_paging_db[0].fw_paging_block) {
for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
struct iwl_fw_error_dump_paging *paging;
struct page *pages =
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 9e97cf4ff1c5..b70f4530f960 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -149,9 +149,11 @@ void iwl_free_fw_paging(struct iwl_mvm *mvm)
__free_pages(mvm->fw_paging_db[i].fw_paging_block,
get_order(mvm->fw_paging_db[i].fw_paging_size));
+ mvm->fw_paging_db[i].fw_paging_block = NULL;
}
kfree(mvm->trans->paging_download_buf);
mvm->trans->paging_download_buf = NULL;
+ mvm->trans->paging_db = NULL;
memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 41c6dd5b9ccc..de42066fa49b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -479,8 +479,18 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x8110, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x8050, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)},
/* 9000 Series */
{IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9560_2ac_cfg)},
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index df1f1a76a862..01e12d221a8b 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -135,7 +135,7 @@ MODULE_LICENSE("GPL");
/* Field definitions */
#define HCI_ACCEL_MASK 0x7fff
#define HCI_HOTKEY_DISABLE 0x0b
-#define HCI_HOTKEY_ENABLE 0x01
+#define HCI_HOTKEY_ENABLE 0x09
#define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10
#define HCI_LCD_BRIGHTNESS_BITS 3
#define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS)
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 5d4d91846357..96168b819044 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -2669,9 +2669,9 @@ static int __init mport_init(void)
/* Create device class needed by udev */
dev_class = class_create(THIS_MODULE, DRV_NAME);
- if (!dev_class) {
+ if (IS_ERR(dev_class)) {
rmcd_error("Unable to create " DRV_NAME " class");
- return -EINVAL;
+ return PTR_ERR(dev_class);
}
ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME);
diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c
index 648cb86afd42..ea607a4a1bdd 100644
--- a/drivers/s390/char/sclp_ctl.c
+++ b/drivers/s390/char/sclp_ctl.c
@@ -56,6 +56,7 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
{
struct sclp_ctl_sccb ctl_sccb;
struct sccb_header *sccb;
+ unsigned long copied;
int rc;
if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb)))
@@ -65,14 +66,15 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
- if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sizeof(*sccb))) {
+ copied = PAGE_SIZE -
+ copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), PAGE_SIZE);
+ if (offsetof(struct sccb_header, length) +
+ sizeof(sccb->length) > copied || sccb->length > copied) {
rc = -EFAULT;
goto out_free;
}
- if (sccb->length > PAGE_SIZE || sccb->length < 8)
- return -EINVAL;
- if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sccb->length)) {
- rc = -EFAULT;
+ if (sccb->length < 8) {
+ rc = -EINVAL;
goto out_free;
}
rc = sclp_sync_request(ctl_sccb.cmdw, sccb);
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index b793c04028a3..be72a8e5f221 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -172,9 +172,11 @@ static int vpfe_prepare_pipeline(struct vpfe_video_device *video)
static int vpfe_update_pipe_state(struct vpfe_video_device *video)
{
struct vpfe_pipeline *pipe = &video->pipe;
+ int ret;
- if (vpfe_prepare_pipeline(video))
- return vpfe_prepare_pipeline(video);
+ ret = vpfe_prepare_pipeline(video);
+ if (ret)
+ return ret;
/*
* Find out if there is any input video
@@ -182,9 +184,10 @@ static int vpfe_update_pipe_state(struct vpfe_video_device *video)
*/
if (pipe->input_num == 0) {
pipe->state = VPFE_PIPELINE_STREAM_CONTINUOUS;
- if (vpfe_update_current_ext_subdev(video)) {
+ ret = vpfe_update_current_ext_subdev(video);
+ if (ret) {
pr_err("Invalid external subdev\n");
- return vpfe_update_current_ext_subdev(video);
+ return ret;
}
} else {
pipe->state = VPFE_PIPELINE_STREAM_SINGLESHOT;
@@ -667,6 +670,7 @@ static int vpfe_enum_fmt(struct file *file, void *priv,
struct v4l2_subdev *subdev;
struct v4l2_format format;
struct media_pad *remote;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt\n");
@@ -695,10 +699,11 @@ static int vpfe_enum_fmt(struct file *file, void *priv,
sd_fmt.pad = remote->index;
sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
/* get output format of remote subdev */
- if (v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt)) {
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt);
+ if (ret) {
v4l2_err(&vpfe_dev->v4l2_dev,
"invalid remote subdev for video node\n");
- return v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt);
+ return ret;
}
/* convert to pix format */
mbus.code = sd_fmt.format.code;
@@ -725,6 +730,7 @@ static int vpfe_s_fmt(struct file *file, void *priv,
struct vpfe_video_device *video = video_drvdata(file);
struct vpfe_device *vpfe_dev = video->vpfe_dev;
struct v4l2_format format;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt\n");
/* If streaming is started, return error */
@@ -733,8 +739,9 @@ static int vpfe_s_fmt(struct file *file, void *priv,
return -EBUSY;
}
/* get adjacent subdev's output pad format */
- if (__vpfe_video_get_format(video, &format))
- return __vpfe_video_get_format(video, &format);
+ ret = __vpfe_video_get_format(video, &format);
+ if (ret)
+ return ret;
*fmt = format;
video->fmt = *fmt;
return 0;
@@ -757,11 +764,13 @@ static int vpfe_try_fmt(struct file *file, void *priv,
struct vpfe_video_device *video = video_drvdata(file);
struct vpfe_device *vpfe_dev = video->vpfe_dev;
struct v4l2_format format;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt\n");
/* get adjacent subdev's output pad format */
- if (__vpfe_video_get_format(video, &format))
- return __vpfe_video_get_format(video, &format);
+ ret = __vpfe_video_get_format(video, &format);
+ if (ret)
+ return ret;
*fmt = format;
return 0;
@@ -838,8 +847,9 @@ static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n");
- if (mutex_lock_interruptible(&video->lock))
- return mutex_lock_interruptible(&video->lock);
+ ret = mutex_lock_interruptible(&video->lock);
+ if (ret)
+ return ret;
/*
* If streaming is started return device busy
* error
@@ -940,8 +950,9 @@ static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n");
/* Call decoder driver function to set the standard */
- if (mutex_lock_interruptible(&video->lock))
- return mutex_lock_interruptible(&video->lock);
+ ret = mutex_lock_interruptible(&video->lock);
+ if (ret)
+ return ret;
sdinfo = video->current_ext_subdev;
/* If streaming is started, return device busy error */
if (video->started) {
@@ -1327,8 +1338,9 @@ static int vpfe_reqbufs(struct file *file, void *priv,
return -EINVAL;
}
- if (mutex_lock_interruptible(&video->lock))
- return mutex_lock_interruptible(&video->lock);
+ ret = mutex_lock_interruptible(&video->lock);
+ if (ret)
+ return ret;
if (video->io_usrs != 0) {
v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n");
@@ -1354,10 +1366,11 @@ static int vpfe_reqbufs(struct file *file, void *priv,
q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- if (vb2_queue_init(q)) {
+ ret = vb2_queue_init(q);
+ if (ret) {
v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n");
vb2_dma_contig_cleanup_ctx(vpfe_dev->pdev);
- return vb2_queue_init(q);
+ return ret;
}
fh->io_allowed = 1;
@@ -1533,8 +1546,9 @@ static int vpfe_streamoff(struct file *file, void *priv,
return -EINVAL;
}
- if (mutex_lock_interruptible(&video->lock))
- return mutex_lock_interruptible(&video->lock);
+ ret = mutex_lock_interruptible(&video->lock);
+ if (ret)
+ return ret;
vpfe_stop_capture(video);
ret = vb2_streamoff(&video->buffer_queue, buf_type);
diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO
index 05de0dad8762..4c6f1d7d2eaf 100644
--- a/drivers/staging/rdma/hfi1/TODO
+++ b/drivers/staging/rdma/hfi1/TODO
@@ -3,4 +3,4 @@ July, 2015
- Remove unneeded file entries in sysfs
- Remove software processing of IB protocol and place in library for use
by qib, ipath (if still present), hfi1, and eventually soft-roce
-
+- Replace incorrect uAPI
diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c
index 8396dc5fb6c1..c1c5bf82addb 100644
--- a/drivers/staging/rdma/hfi1/file_ops.c
+++ b/drivers/staging/rdma/hfi1/file_ops.c
@@ -49,6 +49,8 @@
#include <linux/vmalloc.h>
#include <linux/io.h>
+#include <rdma/ib.h>
+
#include "hfi.h"
#include "pio.h"
#include "device.h"
@@ -190,6 +192,10 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
int uctxt_required = 1;
int must_be_root = 0;
+ /* FIXME: This interface cannot continue out of staging */
+ if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
+ return -EACCES;
+
if (count < sizeof(cmd)) {
ret = -EINVAL;
goto bail;
@@ -791,15 +797,16 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
spin_unlock_irqrestore(&dd->uctxt_lock, flags);
dd->rcd[uctxt->ctxt] = NULL;
+
+ hfi1_user_exp_rcv_free(fdata);
+ hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
+
uctxt->rcvwait_to = 0;
uctxt->piowait_to = 0;
uctxt->rcvnowait = 0;
uctxt->pionowait = 0;
uctxt->event_flags = 0;
- hfi1_user_exp_rcv_free(fdata);
- hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
-
hfi1_stats.sps_ctxts--;
if (++dd->freectxts == dd->num_user_contexts)
aspm_enable_all(dd);
@@ -1127,27 +1134,13 @@ bail:
static int user_init(struct file *fp)
{
- int ret;
unsigned int rcvctrl_ops = 0;
struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
/* make sure that the context has already been setup */
- if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) {
- ret = -EFAULT;
- goto done;
- }
-
- /*
- * Subctxts don't need to initialize anything since master
- * has done it.
- */
- if (fd->subctxt) {
- ret = wait_event_interruptible(uctxt->wait, !test_bit(
- HFI1_CTXT_MASTER_UNINIT,
- &uctxt->event_flags));
- goto expected;
- }
+ if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
+ return -EFAULT;
/* initialize poll variables... */
uctxt->urgent = 0;
@@ -1202,19 +1195,7 @@ static int user_init(struct file *fp)
wake_up(&uctxt->wait);
}
-expected:
- /*
- * Expected receive has to be setup for all processes (including
- * shared contexts). However, it has to be done after the master
- * context has been fully configured as it depends on the
- * eager/expected split of the RcvArray entries.
- * Setting it up here ensures that the subcontexts will be waiting
- * (due to the above wait_event_interruptible() until the master
- * is setup.
- */
- ret = hfi1_user_exp_rcv_init(fp);
-done:
- return ret;
+ return 0;
}
static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
@@ -1261,7 +1242,7 @@ static int setup_ctxt(struct file *fp)
int ret = 0;
/*
- * Context should be set up only once (including allocation and
+ * Context should be set up only once, including allocation and
* programming of eager buffers. This is done if context sharing
* is not requested or by the master process.
*/
@@ -1282,10 +1263,29 @@ static int setup_ctxt(struct file *fp)
if (ret)
goto done;
}
+ } else {
+ ret = wait_event_interruptible(uctxt->wait, !test_bit(
+ HFI1_CTXT_MASTER_UNINIT,
+ &uctxt->event_flags));
+ if (ret)
+ goto done;
}
+
ret = hfi1_user_sdma_alloc_queues(uctxt, fp);
if (ret)
goto done;
+ /*
+ * Expected receive has to be setup for all processes (including
+ * shared contexts). However, it has to be done after the master
+ * context has been fully configured as it depends on the
+ * eager/expected split of the RcvArray entries.
+ * Setting it up here ensures that the subcontexts will be waiting
+ * (due to the above wait_event_interruptible() until the master
+ * is setup.
+ */
+ ret = hfi1_user_exp_rcv_init(fp);
+ if (ret)
+ goto done;
set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags);
done:
@@ -1565,29 +1565,8 @@ static loff_t ui_lseek(struct file *filp, loff_t offset, int whence)
{
struct hfi1_devdata *dd = filp->private_data;
- switch (whence) {
- case SEEK_SET:
- break;
- case SEEK_CUR:
- offset += filp->f_pos;
- break;
- case SEEK_END:
- offset = ((dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE) -
- offset;
- break;
- default:
- return -EINVAL;
- }
-
- if (offset < 0)
- return -EINVAL;
-
- if (offset >= (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE)
- return -EINVAL;
-
- filp->f_pos = offset;
-
- return filp->f_pos;
+ return fixed_size_llseek(filp, offset, whence,
+ (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE);
}
/* NOTE: assumes unsigned long is 8 bytes */
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/staging/rdma/hfi1/mmu_rb.c
index c7ad0164ea9a..b3f0682a36c9 100644
--- a/drivers/staging/rdma/hfi1/mmu_rb.c
+++ b/drivers/staging/rdma/hfi1/mmu_rb.c
@@ -71,6 +71,7 @@ static inline void mmu_notifier_range_start(struct mmu_notifier *,
struct mm_struct *,
unsigned long, unsigned long);
static void mmu_notifier_mem_invalidate(struct mmu_notifier *,
+ struct mm_struct *,
unsigned long, unsigned long);
static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
unsigned long, unsigned long);
@@ -137,7 +138,7 @@ void hfi1_mmu_rb_unregister(struct rb_root *root)
rbnode = rb_entry(node, struct mmu_rb_node, node);
rb_erase(node, root);
if (handler->ops->remove)
- handler->ops->remove(root, rbnode, false);
+ handler->ops->remove(root, rbnode, NULL);
}
}
@@ -176,7 +177,7 @@ unlock:
return ret;
}
-/* Caller must host handler lock */
+/* Caller must hold handler lock */
static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
unsigned long addr,
unsigned long len)
@@ -200,15 +201,21 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
return node;
}
+/* Caller must *not* hold handler lock. */
static void __mmu_rb_remove(struct mmu_rb_handler *handler,
- struct mmu_rb_node *node, bool arg)
+ struct mmu_rb_node *node, struct mm_struct *mm)
{
+ unsigned long flags;
+
/* Validity of handler and node pointers has been checked by caller. */
hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr,
node->len);
+ spin_lock_irqsave(&handler->lock, flags);
__mmu_int_rb_remove(node, handler->root);
+ spin_unlock_irqrestore(&handler->lock, flags);
+
if (handler->ops->remove)
- handler->ops->remove(handler->root, node, arg);
+ handler->ops->remove(handler->root, node, mm);
}
struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr,
@@ -231,14 +238,11 @@ struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr,
void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node)
{
struct mmu_rb_handler *handler = find_mmu_handler(root);
- unsigned long flags;
if (!handler || !node)
return;
- spin_lock_irqsave(&handler->lock, flags);
- __mmu_rb_remove(handler, node, false);
- spin_unlock_irqrestore(&handler->lock, flags);
+ __mmu_rb_remove(handler, node, NULL);
}
static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root)
@@ -260,7 +264,7 @@ unlock:
static inline void mmu_notifier_page(struct mmu_notifier *mn,
struct mm_struct *mm, unsigned long addr)
{
- mmu_notifier_mem_invalidate(mn, addr, addr + PAGE_SIZE);
+ mmu_notifier_mem_invalidate(mn, mm, addr, addr + PAGE_SIZE);
}
static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
@@ -268,25 +272,31 @@ static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
unsigned long start,
unsigned long end)
{
- mmu_notifier_mem_invalidate(mn, start, end);
+ mmu_notifier_mem_invalidate(mn, mm, start, end);
}
static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
+ struct mm_struct *mm,
unsigned long start, unsigned long end)
{
struct mmu_rb_handler *handler =
container_of(mn, struct mmu_rb_handler, mn);
struct rb_root *root = handler->root;
- struct mmu_rb_node *node;
+ struct mmu_rb_node *node, *ptr = NULL;
unsigned long flags;
spin_lock_irqsave(&handler->lock, flags);
- for (node = __mmu_int_rb_iter_first(root, start, end - 1); node;
- node = __mmu_int_rb_iter_next(node, start, end - 1)) {
+ for (node = __mmu_int_rb_iter_first(root, start, end - 1);
+ node; node = ptr) {
+ /* Guard against node removal. */
+ ptr = __mmu_int_rb_iter_next(node, start, end - 1);
hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u",
node->addr, node->len);
- if (handler->ops->invalidate(root, node))
- __mmu_rb_remove(handler, node, true);
+ if (handler->ops->invalidate(root, node)) {
+ spin_unlock_irqrestore(&handler->lock, flags);
+ __mmu_rb_remove(handler, node, mm);
+ spin_lock_irqsave(&handler->lock, flags);
+ }
}
spin_unlock_irqrestore(&handler->lock, flags);
}
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.h b/drivers/staging/rdma/hfi1/mmu_rb.h
index f8523fdb8a18..19a306e83c7d 100644
--- a/drivers/staging/rdma/hfi1/mmu_rb.h
+++ b/drivers/staging/rdma/hfi1/mmu_rb.h
@@ -59,7 +59,8 @@ struct mmu_rb_node {
struct mmu_rb_ops {
bool (*filter)(struct mmu_rb_node *, unsigned long, unsigned long);
int (*insert)(struct rb_root *, struct mmu_rb_node *);
- void (*remove)(struct rb_root *, struct mmu_rb_node *, bool);
+ void (*remove)(struct rb_root *, struct mmu_rb_node *,
+ struct mm_struct *);
int (*invalidate)(struct rb_root *, struct mmu_rb_node *);
};
diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c
index 29a5ad28019b..dc9119e1b458 100644
--- a/drivers/staging/rdma/hfi1/qp.c
+++ b/drivers/staging/rdma/hfi1/qp.c
@@ -519,10 +519,12 @@ static void iowait_sdma_drained(struct iowait *wait)
* do the flush work until that QP's
* sdma work has finished.
*/
+ spin_lock(&qp->s_lock);
if (qp->s_flags & RVT_S_WAIT_DMA) {
qp->s_flags &= ~RVT_S_WAIT_DMA;
hfi1_schedule_send(qp);
}
+ spin_unlock(&qp->s_lock);
}
/**
diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c
index 0861e095df8d..8bd56d5c783d 100644
--- a/drivers/staging/rdma/hfi1/user_exp_rcv.c
+++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c
@@ -87,7 +87,8 @@ static u32 find_phys_blocks(struct page **, unsigned, struct tid_pageset *);
static int set_rcvarray_entry(struct file *, unsigned long, u32,
struct tid_group *, struct page **, unsigned);
static int mmu_rb_insert(struct rb_root *, struct mmu_rb_node *);
-static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *, bool);
+static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *,
+ struct mm_struct *);
static int mmu_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
static int program_rcvarray(struct file *, unsigned long, struct tid_group *,
struct tid_pageset *, unsigned, u16, struct page **,
@@ -254,6 +255,8 @@ int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct tid_group *grp, *gptr;
+ if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
+ return 0;
/*
* The notifier would have been removed when the process'es mm
* was freed.
@@ -899,7 +902,7 @@ static int unprogram_rcvarray(struct file *fp, u32 tidinfo,
if (!node || node->rcventry != (uctxt->expected_base + rcventry))
return -EBADF;
if (HFI1_CAP_IS_USET(TID_UNMAP))
- mmu_rb_remove(&fd->tid_rb_root, &node->mmu, false);
+ mmu_rb_remove(&fd->tid_rb_root, &node->mmu, NULL);
else
hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu);
@@ -965,7 +968,7 @@ static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
continue;
if (HFI1_CAP_IS_USET(TID_UNMAP))
mmu_rb_remove(&fd->tid_rb_root,
- &node->mmu, false);
+ &node->mmu, NULL);
else
hfi1_mmu_rb_remove(&fd->tid_rb_root,
&node->mmu);
@@ -1032,7 +1035,7 @@ static int mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *node)
}
static void mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node,
- bool notifier)
+ struct mm_struct *mm)
{
struct hfi1_filedata *fdata =
container_of(root, struct hfi1_filedata, tid_rb_root);
diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c
index ab6b6a42000f..d53a659548e0 100644
--- a/drivers/staging/rdma/hfi1/user_sdma.c
+++ b/drivers/staging/rdma/hfi1/user_sdma.c
@@ -278,7 +278,8 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *);
static void user_sdma_free_request(struct user_sdma_request *, bool);
static int pin_vector_pages(struct user_sdma_request *,
struct user_sdma_iovec *);
-static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned);
+static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned,
+ unsigned);
static int check_header_template(struct user_sdma_request *,
struct hfi1_pkt_header *, u32, u32);
static int set_txreq_header(struct user_sdma_request *,
@@ -299,7 +300,8 @@ static int defer_packet_queue(
static void activate_packet_queue(struct iowait *, int);
static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long);
static int sdma_rb_insert(struct rb_root *, struct mmu_rb_node *);
-static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *, bool);
+static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *,
+ struct mm_struct *);
static int sdma_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
static struct mmu_rb_ops sdma_rb_ops = {
@@ -1063,8 +1065,10 @@ static int pin_vector_pages(struct user_sdma_request *req,
rb_node = hfi1_mmu_rb_search(&pq->sdma_rb_root,
(unsigned long)iovec->iov.iov_base,
iovec->iov.iov_len);
- if (rb_node)
+ if (rb_node && !IS_ERR(rb_node))
node = container_of(rb_node, struct sdma_mmu_node, rb);
+ else
+ rb_node = NULL;
if (!node) {
node = kzalloc(sizeof(*node), GFP_KERNEL);
@@ -1107,7 +1111,8 @@ retry:
goto bail;
}
if (pinned != npages) {
- unpin_vector_pages(current->mm, pages, pinned);
+ unpin_vector_pages(current->mm, pages, node->npages,
+ pinned);
ret = -EFAULT;
goto bail;
}
@@ -1147,9 +1152,9 @@ bail:
}
static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
- unsigned npages)
+ unsigned start, unsigned npages)
{
- hfi1_release_user_pages(mm, pages, npages, 0);
+ hfi1_release_user_pages(mm, pages + start, npages, 0);
kfree(pages);
}
@@ -1502,7 +1507,7 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
&req->pq->sdma_rb_root,
(unsigned long)req->iovs[i].iov.iov_base,
req->iovs[i].iov.iov_len);
- if (!mnode)
+ if (!mnode || IS_ERR(mnode))
continue;
node = container_of(mnode, struct sdma_mmu_node, rb);
@@ -1547,7 +1552,7 @@ static int sdma_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode)
}
static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
- bool notifier)
+ struct mm_struct *mm)
{
struct sdma_mmu_node *node =
container_of(mnode, struct sdma_mmu_node, rb);
@@ -1557,14 +1562,20 @@ static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
node->pq->n_locked -= node->npages;
spin_unlock(&node->pq->evict_lock);
- unpin_vector_pages(notifier ? NULL : current->mm, node->pages,
+ /*
+ * If mm is set, we are being called by the MMU notifier and we
+ * should not pass a mm_struct to unpin_vector_page(). This is to
+ * prevent a deadlock when hfi1_release_user_pages() attempts to
+ * take the mmap_sem, which the MMU notifier has already taken.
+ */
+ unpin_vector_pages(mm ? NULL : current->mm, node->pages, 0,
node->npages);
/*
* If called by the MMU notifier, we have to adjust the pinned
* page count ourselves.
*/
- if (notifier)
- current->mm->pinned_vm -= node->npages;
+ if (mm)
+ mm->pinned_vm -= node->npages;
kfree(node);
}
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index 36d07295f8e3..5e820b541506 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -68,12 +68,12 @@ static inline int _step_to_temp(int step)
* Every step equals (1 * 200) / 255 celsius, and finally
* need convert to millicelsius.
*/
- return (HISI_TEMP_BASE + (step * 200 / 255)) * 1000;
+ return (HISI_TEMP_BASE * 1000 + (step * 200000 / 255));
}
static inline long _temp_to_step(long temp)
{
- return ((temp / 1000 - HISI_TEMP_BASE) * 255 / 200);
+ return ((temp - HISI_TEMP_BASE * 1000) * 255) / 200000;
}
static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data,
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index f1db49625555..5133cd1e10b7 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -959,7 +959,7 @@ static DEVICE_ATTR(sustainable_power, S_IWUSR | S_IRUGO, sustainable_power_show,
struct thermal_zone_device *tz = to_thermal_zone(dev); \
\
if (tz->tzp) \
- return sprintf(buf, "%u\n", tz->tzp->name); \
+ return sprintf(buf, "%d\n", tz->tzp->name); \
else \
return -EIO; \
} \
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 541ead4d8965..85b8517f17a0 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -386,9 +386,7 @@ void ceph_put_mds_session(struct ceph_mds_session *s)
atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
if (atomic_dec_and_test(&s->s_ref)) {
if (s->s_auth.authorizer)
- ceph_auth_destroy_authorizer(
- s->s_mdsc->fsc->client->monc.auth,
- s->s_auth.authorizer);
+ ceph_auth_destroy_authorizer(s->s_auth.authorizer);
kfree(s);
}
}
@@ -3900,7 +3898,7 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
struct ceph_auth_handshake *auth = &s->s_auth;
if (force_new && auth->authorizer) {
- ceph_auth_destroy_authorizer(ac, auth->authorizer);
+ ceph_auth_destroy_authorizer(auth->authorizer);
auth->authorizer = NULL;
}
if (!auth->authorizer) {
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 719924d6c706..dcad5e210525 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1295,7 +1295,7 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
*nbytesp = nbytes;
- return ret;
+ return ret < 0 ? ret : 0;
}
static inline int fuse_iter_npages(const struct iov_iter *ii_p)
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 9aed6e202201..13719d3f35f8 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2455,6 +2455,8 @@ int dlm_deref_lockres_done_handler(struct o2net_msg *msg, u32 len, void *data,
spin_unlock(&dlm->spinlock);
+ ret = 0;
+
done:
dlm_put(dlm);
return ret;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 229cb546bee0..541583510cfb 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1518,6 +1518,32 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
return page;
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
+ struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ struct page *page;
+ int nid;
+
+ if (!pmd_present(pmd))
+ return NULL;
+
+ page = vm_normal_page_pmd(vma, addr, pmd);
+ if (!page)
+ return NULL;
+
+ if (PageReserved(page))
+ return NULL;
+
+ nid = page_to_nid(page);
+ if (!node_isset(nid, node_states[N_MEMORY]))
+ return NULL;
+
+ return page;
+}
+#endif
+
static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
@@ -1527,14 +1553,14 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
pte_t *orig_pte;
pte_t *pte;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
- pte_t huge_pte = *(pte_t *)pmd;
struct page *page;
- page = can_gather_numa_stats(huge_pte, vma, addr);
+ page = can_gather_numa_stats_pmd(*pmd, vma, addr);
if (page)
- gather_stats(page, md, pte_dirty(huge_pte),
+ gather_stats(page, md, pmd_dirty(*pmd),
HPAGE_PMD_SIZE/PAGE_SIZE);
spin_unlock(ptl);
return 0;
@@ -1542,6 +1568,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
if (pmd_trans_unstable(pmd))
return 0;
+#endif
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, vma, addr);
diff --git a/fs/udf/super.c b/fs/udf/super.c
index fa92fe839fda..36661acaf33b 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -919,14 +919,14 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
#endif
}
- ret = udf_CS0toUTF8(outstr, 31, pvoldesc->volIdent, 32);
+ ret = udf_dstrCS0toUTF8(outstr, 31, pvoldesc->volIdent, 32);
if (ret < 0)
goto out_bh;
strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident);
- ret = udf_CS0toUTF8(outstr, 127, pvoldesc->volSetIdent, 128);
+ ret = udf_dstrCS0toUTF8(outstr, 127, pvoldesc->volSetIdent, 128);
if (ret < 0)
goto out_bh;
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 972b70625614..263829ef1873 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -212,7 +212,7 @@ extern int udf_get_filename(struct super_block *, const uint8_t *, int,
uint8_t *, int);
extern int udf_put_filename(struct super_block *, const uint8_t *, int,
uint8_t *, int);
-extern int udf_CS0toUTF8(uint8_t *, int, const uint8_t *, int);
+extern int udf_dstrCS0toUTF8(uint8_t *, int, const uint8_t *, int);
/* ialloc.c */
extern void udf_free_inode(struct inode *);
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
index 3ff42f4437f3..695389a4fc23 100644
--- a/fs/udf/unicode.c
+++ b/fs/udf/unicode.c
@@ -335,9 +335,21 @@ try_again:
return u_len;
}
-int udf_CS0toUTF8(uint8_t *utf_o, int o_len, const uint8_t *ocu_i, int i_len)
+int udf_dstrCS0toUTF8(uint8_t *utf_o, int o_len,
+ const uint8_t *ocu_i, int i_len)
{
- return udf_name_from_CS0(utf_o, o_len, ocu_i, i_len,
+ int s_len = 0;
+
+ if (i_len > 0) {
+ s_len = ocu_i[i_len - 1];
+ if (s_len >= i_len) {
+ pr_err("incorrect dstring lengths (%d/%d)\n",
+ s_len, i_len);
+ return -EINVAL;
+ }
+ }
+
+ return udf_name_from_CS0(utf_o, o_len, ocu_i, s_len,
udf_uni2char_utf8, 0);
}
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index f63afdc43bec..8ee27b8afe81 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -180,12 +180,13 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl);
void bpf_register_map_type(struct bpf_map_type_list *tl);
struct bpf_prog *bpf_prog_get(u32 ufd);
+struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
void bpf_prog_put_rcu(struct bpf_prog *prog);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
-void bpf_map_inc(struct bpf_map *map, bool uref);
+struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
int bpf_map_precharge_memlock(u32 pages);
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index 260d78b587c4..1563265d2097 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -12,9 +12,12 @@
*/
struct ceph_auth_client;
-struct ceph_authorizer;
struct ceph_msg;
+struct ceph_authorizer {
+ void (*destroy)(struct ceph_authorizer *);
+};
+
struct ceph_auth_handshake {
struct ceph_authorizer *authorizer;
void *authorizer_buf;
@@ -62,8 +65,6 @@ struct ceph_auth_client_ops {
struct ceph_auth_handshake *auth);
int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
struct ceph_authorizer *a, size_t len);
- void (*destroy_authorizer)(struct ceph_auth_client *ac,
- struct ceph_authorizer *a);
void (*invalidate_authorizer)(struct ceph_auth_client *ac,
int peer_type);
@@ -112,8 +113,7 @@ extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac);
extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac,
int peer_type,
struct ceph_auth_handshake *auth);
-extern void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac,
- struct ceph_authorizer *a);
+void ceph_auth_destroy_authorizer(struct ceph_authorizer *a);
extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac,
int peer_type,
struct ceph_auth_handshake *a);
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 4343df806710..cbf460927c42 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -16,7 +16,6 @@ struct ceph_msg;
struct ceph_snap_context;
struct ceph_osd_request;
struct ceph_osd_client;
-struct ceph_authorizer;
/*
* completion callback for async writepages
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 3e39ae5bc799..5b17de62c962 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -444,6 +444,7 @@ struct cgroup_subsys {
int (*can_attach)(struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_taskset *tset);
void (*attach)(struct cgroup_taskset *tset);
+ void (*post_attach)(void);
int (*can_fork)(struct task_struct *task);
void (*cancel_fork)(struct task_struct *task);
void (*fork)(struct task_struct *task);
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index fea160ee5803..85a868ccb493 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -137,8 +137,6 @@ static inline void set_mems_allowed(nodemask_t nodemask)
task_unlock(current);
}
-extern void cpuset_post_attach_flush(void);
-
#else /* !CONFIG_CPUSETS */
static inline bool cpusets_enabled(void) { return false; }
@@ -245,10 +243,6 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
return false;
}
-static inline void cpuset_post_attach_flush(void)
-{
-}
-
#endif /* !CONFIG_CPUSETS */
#endif /* _LINUX_CPUSET_H */
diff --git a/include/linux/hash.h b/include/linux/hash.h
index 1afde47e1528..79c52fa81cac 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -32,12 +32,28 @@
#error Wordsize not 32 or 64
#endif
+/*
+ * The above primes are actively bad for hashing, since they are
+ * too sparse. The 32-bit one is mostly ok, the 64-bit one causes
+ * real problems. Besides, the "prime" part is pointless for the
+ * multiplicative hash.
+ *
+ * Although a random odd number will do, it turns out that the golden
+ * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
+ * properties.
+ *
+ * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2.
+ * (See Knuth vol 3, section 6.4, exercise 9.)
+ */
+#define GOLDEN_RATIO_32 0x61C88647
+#define GOLDEN_RATIO_64 0x61C8864680B583EBull
+
static __always_inline u64 hash_64(u64 val, unsigned int bits)
{
u64 hash = val;
-#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
- hash = hash * GOLDEN_RATIO_PRIME_64;
+#if BITS_PER_LONG == 64
+ hash = hash * GOLDEN_RATIO_64;
#else
/* Sigh, gcc can't optimise this alone like it does for 32 bits. */
u64 n = hash;
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 7008623e24b1..d7b9e5346fba 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -152,6 +152,7 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
}
struct page *get_huge_zero_page(void);
+void put_huge_zero_page(void);
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
@@ -208,6 +209,10 @@ static inline bool is_huge_zero_page(struct page *page)
return false;
}
+static inline void put_huge_zero_page(void)
+{
+ BUILD_BUG();
+}
static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmd, int flags)
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index d5569734f672..548fd535fd02 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -28,6 +28,11 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
return (struct ethhdr *)skb_mac_header(skb);
}
+static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
+{
+ return (struct ethhdr *)skb_inner_mac_header(skb);
+}
+
int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index d026b190c530..d10ef06971b5 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -196,9 +196,11 @@ struct lock_list {
* We record lock dependency chains, so that we can cache them:
*/
struct lock_chain {
- u8 irq_context;
- u8 depth;
- u16 base;
+ /* see BUILD_BUG_ON()s in lookup_chain_cache() */
+ unsigned int irq_context : 2,
+ depth : 6,
+ base : 24;
+ /* 4 byte hole */
struct hlist_node entry;
u64 chain_key;
};
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 6bd429b53b77..8fecd6d6f814 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -393,6 +393,17 @@ enum {
MLX5_CAP_OFF_CMDIF_CSUM = 46,
};
+enum {
+ /*
+ * Max wqe size for rdma read is 512 bytes, so this
+ * limits our max_sge_rd as the wqe needs to fit:
+ * - ctrl segment (16 bytes)
+ * - rdma segment (16 bytes)
+ * - scatter elements (16 bytes each)
+ */
+ MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
+};
+
struct mlx5_inbox_hdr {
__be16 opcode;
u8 rsvd[4];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a55e5be0894f..864d7221de84 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1031,6 +1031,8 @@ static inline bool page_mapped(struct page *page)
page = compound_head(page);
if (atomic_read(compound_mapcount_ptr(page)) >= 0)
return true;
+ if (PageHuge(page))
+ return false;
for (i = 0; i < hpage_nr_pages(page); i++) {
if (atomic_read(&page[i]._mapcount) >= 0)
return true;
@@ -1138,6 +1140,8 @@ struct zap_details {
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
+struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t pmd);
int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
diff --git a/include/linux/net.h b/include/linux/net.h
index 72c1e0622ce2..9aa49a05fe38 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -245,7 +245,15 @@ do { \
net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
#define net_info_ratelimited(fmt, ...) \
net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
-#if defined(DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define net_dbg_ratelimited(fmt, ...) \
+do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
+ net_ratelimit()) \
+ __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
+} while (0)
+#elif defined(DEBUG)
#define net_dbg_ratelimited(fmt, ...) \
net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
#else
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index f2182594160e..bcf012637d10 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3992,7 +3992,7 @@ netdev_features_t netif_skb_features(struct sk_buff *skb);
static inline bool net_gso_ok(netdev_features_t features, int gso_type)
{
- netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
+ netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
/* check flags correspondence */
BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 8a0f55b6c2ba..88e3ab496e8f 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -375,6 +375,9 @@ struct vb2_ops {
/**
* struct vb2_ops - driver-specific callbacks
*
+ * @verify_planes_array: Verify that a given user space structure contains
+ * enough planes for the buffer. This is called
+ * for each dequeued buffer.
* @fill_user_buffer: given a vb2_buffer fill in the userspace structure.
* For V4L2 this is a struct v4l2_buffer.
* @fill_vb2_buffer: given a userspace structure, fill in the vb2_buffer.
@@ -384,6 +387,7 @@ struct vb2_ops {
* the vb2_buffer struct.
*/
struct vb2_buf_ops {
+ int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb);
void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb);
int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb,
struct vb2_plane *planes);
@@ -400,6 +404,9 @@ struct vb2_buf_ops {
* @fileio_read_once: report EOF after reading the first buffer
* @fileio_write_immediately: queue buffer after each write() call
* @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver
+ * @quirk_poll_must_check_waiting_for_buffers: Return POLLERR at poll when QBUF
+ * has not been called. This is a vb1 idiom that has been adopted
+ * also by vb2.
* @lock: pointer to a mutex that protects the vb2_queue struct. The
* driver can set this to a mutex to let the v4l2 core serialize
* the queuing ioctls. If the driver wants to handle locking
@@ -463,6 +470,7 @@ struct vb2_queue {
unsigned fileio_read_once:1;
unsigned fileio_write_immediately:1;
unsigned allow_zero_bytesused:1;
+ unsigned quirk_poll_must_check_waiting_for_buffers:1;
struct mutex *lock;
void *owner;
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 673e9f9e6da7..b8803165df91 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -317,7 +317,9 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
(skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
skb->inner_protocol != htons(ETH_P_TEB) ||
(skb_inner_mac_header(skb) - skb_transport_header(skb) !=
- sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
+ sizeof(struct udphdr) + sizeof(struct vxlanhdr)) ||
+ (skb->ip_summed != CHECKSUM_NONE &&
+ !can_checksum_protocol(features, inner_eth_hdr(skb)->h_proto))))
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
return features;
diff --git a/include/rdma/ib.h b/include/rdma/ib.h
index cf8f9e700e48..a6b93706b0fc 100644
--- a/include/rdma/ib.h
+++ b/include/rdma/ib.h
@@ -34,6 +34,7 @@
#define _RDMA_IB_H
#include <linux/types.h>
+#include <linux/sched.h>
struct ib_addr {
union {
@@ -86,4 +87,19 @@ struct sockaddr_ib {
__u64 sib_scope_id;
};
+/*
+ * The IB interfaces that use write() as bi-directional ioctl() are
+ * fundamentally unsafe, since there are lots of ways to trigger "write()"
+ * calls from various contexts with elevated privileges. That includes the
+ * traditional suid executable error message writes, but also various kernel
+ * interfaces that can write to file descriptors.
+ *
+ * This function provides protection for the legacy API by restricting the
+ * calling context.
+ */
+static inline bool ib_safe_file_access(struct file *filp)
+{
+ return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
+}
+
#endif /* _RDMA_IB_H */
diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
index fa341fcb5829..f5842bcd9c94 100644
--- a/include/sound/hda_i915.h
+++ b/include/sound/hda_i915.h
@@ -9,7 +9,7 @@
#ifdef CONFIG_SND_HDA_I915
int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
-int snd_hdac_get_display_clk(struct hdac_bus *bus);
+void snd_hdac_i915_set_bclk(struct hdac_bus *bus);
int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate);
int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid,
bool *audio_enabled, char *buffer, int max_bytes);
@@ -25,9 +25,8 @@ static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
{
return 0;
}
-static inline int snd_hdac_get_display_clk(struct hdac_bus *bus)
+static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
{
- return 0;
}
static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid,
int rate)
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
index c039f1d68a09..086168e18ca8 100644
--- a/include/uapi/linux/v4l2-dv-timings.h
+++ b/include/uapi/linux/v4l2-dv-timings.h
@@ -183,7 +183,8 @@
#define V4L2_DV_BT_CEA_3840X2160P24 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -191,14 +192,16 @@
#define V4L2_DV_BT_CEA_3840X2160P25 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P30 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -206,14 +209,16 @@
#define V4L2_DV_BT_CEA_3840X2160P50 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P60 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -221,7 +226,8 @@
#define V4L2_DV_BT_CEA_4096X2160P24 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -229,14 +235,16 @@
#define V4L2_DV_BT_CEA_4096X2160P25 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P30 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -244,14 +252,16 @@
#define V4L2_DV_BT_CEA_4096X2160P50 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P60 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index f2ece3c174a5..8f94ca1860cf 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -31,10 +31,10 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
{
switch (type) {
case BPF_TYPE_PROG:
- atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt);
+ raw = bpf_prog_inc(raw);
break;
case BPF_TYPE_MAP:
- bpf_map_inc(raw, true);
+ raw = bpf_map_inc(raw, true);
break;
default:
WARN_ON_ONCE(1);
@@ -297,7 +297,8 @@ static void *bpf_obj_do_get(const struct filename *pathname,
goto out;
raw = bpf_any_get(inode->i_private, *type);
- touch_atime(&path);
+ if (!IS_ERR(raw))
+ touch_atime(&path);
path_put(&path);
return raw;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index adc5e4bd74f8..cf5e9f7ad13a 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -218,11 +218,18 @@ struct bpf_map *__bpf_map_get(struct fd f)
return f.file->private_data;
}
-void bpf_map_inc(struct bpf_map *map, bool uref)
+/* prog's and map's refcnt limit */
+#define BPF_MAX_REFCNT 32768
+
+struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
{
- atomic_inc(&map->refcnt);
+ if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
+ atomic_dec(&map->refcnt);
+ return ERR_PTR(-EBUSY);
+ }
if (uref)
atomic_inc(&map->usercnt);
+ return map;
}
struct bpf_map *bpf_map_get_with_uref(u32 ufd)
@@ -234,7 +241,7 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
if (IS_ERR(map))
return map;
- bpf_map_inc(map, true);
+ map = bpf_map_inc(map, true);
fdput(f);
return map;
@@ -658,6 +665,15 @@ static struct bpf_prog *__bpf_prog_get(struct fd f)
return f.file->private_data;
}
+struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
+{
+ if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
+ atomic_dec(&prog->aux->refcnt);
+ return ERR_PTR(-EBUSY);
+ }
+ return prog;
+}
+
/* called by sockets/tracing/seccomp before attaching program to an event
* pairs with bpf_prog_put()
*/
@@ -670,7 +686,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
if (IS_ERR(prog))
return prog;
- atomic_inc(&prog->aux->refcnt);
+ prog = bpf_prog_inc(prog);
fdput(f);
return prog;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 56f18068b52b..63554b6d4e25 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -249,16 +249,6 @@ static const char * const reg_type_str[] = {
[CONST_IMM] = "imm",
};
-static const struct {
- int map_type;
- int func_id;
-} func_limit[] = {
- {BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
- {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
- {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output},
- {BPF_MAP_TYPE_STACK_TRACE, BPF_FUNC_get_stackid},
-};
-
static void print_verifier_state(struct verifier_env *env)
{
enum bpf_reg_type t;
@@ -943,27 +933,52 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
static int check_map_func_compatibility(struct bpf_map *map, int func_id)
{
- bool bool_map, bool_func;
- int i;
-
if (!map)
return 0;
- for (i = 0; i < ARRAY_SIZE(func_limit); i++) {
- bool_map = (map->map_type == func_limit[i].map_type);
- bool_func = (func_id == func_limit[i].func_id);
- /* only when map & func pair match it can continue.
- * don't allow any other map type to be passed into
- * the special func;
- */
- if (bool_func && bool_map != bool_func) {
- verbose("cannot pass map_type %d into func %d\n",
- map->map_type, func_id);
- return -EINVAL;
- }
+ /* We need a two way check, first is from map perspective ... */
+ switch (map->map_type) {
+ case BPF_MAP_TYPE_PROG_ARRAY:
+ if (func_id != BPF_FUNC_tail_call)
+ goto error;
+ break;
+ case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
+ if (func_id != BPF_FUNC_perf_event_read &&
+ func_id != BPF_FUNC_perf_event_output)
+ goto error;
+ break;
+ case BPF_MAP_TYPE_STACK_TRACE:
+ if (func_id != BPF_FUNC_get_stackid)
+ goto error;
+ break;
+ default:
+ break;
+ }
+
+ /* ... and second from the function itself. */
+ switch (func_id) {
+ case BPF_FUNC_tail_call:
+ if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
+ goto error;
+ break;
+ case BPF_FUNC_perf_event_read:
+ case BPF_FUNC_perf_event_output:
+ if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
+ goto error;
+ break;
+ case BPF_FUNC_get_stackid:
+ if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
+ goto error;
+ break;
+ default:
+ break;
}
return 0;
+error:
+ verbose("cannot pass map_type %d into func %d\n",
+ map->map_type, func_id);
+ return -EINVAL;
}
static int check_raw_mode(const struct bpf_func_proto *fn)
@@ -2111,15 +2126,18 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
return -E2BIG;
}
- /* remember this map */
- env->used_maps[env->used_map_cnt++] = map;
-
/* hold the map. If the program is rejected by verifier,
* the map will be released by release_maps() or it
* will be used by the valid program until it's unloaded
* and all maps are released in free_bpf_prog_info()
*/
- bpf_map_inc(map, false);
+ map = bpf_map_inc(map, false);
+ if (IS_ERR(map)) {
+ fdput(f);
+ return PTR_ERR(map);
+ }
+ env->used_maps[env->used_map_cnt++] = map;
+
fdput(f);
next_insn:
insn++;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 671dc05c0b0f..909a7d31ffd3 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2825,9 +2825,10 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
size_t nbytes, loff_t off, bool threadgroup)
{
struct task_struct *tsk;
+ struct cgroup_subsys *ss;
struct cgroup *cgrp;
pid_t pid;
- int ret;
+ int ssid, ret;
if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
return -EINVAL;
@@ -2875,8 +2876,10 @@ out_unlock_rcu:
rcu_read_unlock();
out_unlock_threadgroup:
percpu_up_write(&cgroup_threadgroup_rwsem);
+ for_each_subsys(ss, ssid)
+ if (ss->post_attach)
+ ss->post_attach();
cgroup_kn_unlock(of->kn);
- cpuset_post_attach_flush();
return ret ?: nbytes;
}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 00ab5c2b7c5b..1902956baba1 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -58,7 +58,6 @@
#include <asm/uaccess.h>
#include <linux/atomic.h>
#include <linux/mutex.h>
-#include <linux/workqueue.h>
#include <linux/cgroup.h>
#include <linux/wait.h>
@@ -1016,7 +1015,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
}
}
-void cpuset_post_attach_flush(void)
+static void cpuset_post_attach(void)
{
flush_workqueue(cpuset_migrate_mm_wq);
}
@@ -2087,6 +2086,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
.can_attach = cpuset_can_attach,
.cancel_attach = cpuset_cancel_attach,
.attach = cpuset_attach,
+ .post_attach = cpuset_post_attach,
.bind = cpuset_bind,
.legacy_cftypes = files,
.early_init = true,
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 9eb23dc27462..0bdc6e7d4908 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -412,7 +412,8 @@ int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
if (ret || !write)
return ret;
- if (sysctl_perf_cpu_time_max_percent == 100) {
+ if (sysctl_perf_cpu_time_max_percent == 100 ||
+ sysctl_perf_cpu_time_max_percent == 0) {
printk(KERN_WARNING
"perf: Dynamic interrupt throttling disabled, can hang your system!\n");
WRITE_ONCE(perf_sample_allowed_ns, 0);
@@ -1105,6 +1106,7 @@ static void put_ctx(struct perf_event_context *ctx)
* function.
*
* Lock order:
+ * cred_guard_mutex
* task_struct::perf_event_mutex
* perf_event_context::mutex
* perf_event::child_mutex;
@@ -3420,7 +3422,6 @@ static struct task_struct *
find_lively_task_by_vpid(pid_t vpid)
{
struct task_struct *task;
- int err;
rcu_read_lock();
if (!vpid)
@@ -3434,16 +3435,7 @@ find_lively_task_by_vpid(pid_t vpid)
if (!task)
return ERR_PTR(-ESRCH);
- /* Reuse ptrace permission checks for now. */
- err = -EACCES;
- if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
- goto errout;
-
return task;
-errout:
- put_task_struct(task);
- return ERR_PTR(err);
-
}
/*
@@ -8446,6 +8438,24 @@ SYSCALL_DEFINE5(perf_event_open,
get_online_cpus();
+ if (task) {
+ err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
+ if (err)
+ goto err_cpus;
+
+ /*
+ * Reuse ptrace permission checks for now.
+ *
+ * We must hold cred_guard_mutex across this and any potential
+ * perf_install_in_context() call for this new event to
+ * serialize against exec() altering our credentials (and the
+ * perf_event_exit_task() that could imply).
+ */
+ err = -EACCES;
+ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
+ goto err_cred;
+ }
+
if (flags & PERF_FLAG_PID_CGROUP)
cgroup_fd = pid;
@@ -8453,7 +8463,7 @@ SYSCALL_DEFINE5(perf_event_open,
NULL, NULL, cgroup_fd);
if (IS_ERR(event)) {
err = PTR_ERR(event);
- goto err_cpus;
+ goto err_cred;
}
if (is_sampling_event(event)) {
@@ -8512,11 +8522,6 @@ SYSCALL_DEFINE5(perf_event_open,
goto err_context;
}
- if (task) {
- put_task_struct(task);
- task = NULL;
- }
-
/*
* Look up the group leader (we will attach this event to it):
*/
@@ -8614,6 +8619,11 @@ SYSCALL_DEFINE5(perf_event_open,
WARN_ON_ONCE(ctx->parent_ctx);
+ /*
+ * This is the point on no return; we cannot fail hereafter. This is
+ * where we start modifying current state.
+ */
+
if (move_group) {
/*
* See perf_event_ctx_lock() for comments on the details
@@ -8685,6 +8695,11 @@ SYSCALL_DEFINE5(perf_event_open,
mutex_unlock(&gctx->mutex);
mutex_unlock(&ctx->mutex);
+ if (task) {
+ mutex_unlock(&task->signal->cred_guard_mutex);
+ put_task_struct(task);
+ }
+
put_online_cpus();
mutex_lock(&current->perf_event_mutex);
@@ -8717,6 +8732,9 @@ err_alloc:
*/
if (!event_file)
free_event(event);
+err_cred:
+ if (task)
+ mutex_unlock(&task->signal->cred_guard_mutex);
err_cpus:
put_online_cpus();
err_task:
@@ -9001,6 +9019,9 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
/*
* When a child task exits, feed back event values to parent events.
+ *
+ * Can be called with cred_guard_mutex held when called from
+ * install_exec_creds().
*/
void perf_event_exit_task(struct task_struct *child)
{
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 3efbee0834a8..a02f2dddd1d7 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -1,5 +1,6 @@
#define pr_fmt(fmt) "kcov: " fmt
+#define DISABLE_BRANCH_PROFILING
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/file.h>
@@ -43,7 +44,7 @@ struct kcov {
* Entry point from instrumented code.
* This is called once per basic-block/edge.
*/
-void __sanitizer_cov_trace_pc(void)
+void notrace __sanitizer_cov_trace_pc(void)
{
struct task_struct *t;
enum kcov_mode mode;
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 8d34308ea449..1391d3ee3b86 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -1415,6 +1415,9 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_OFFSET(page, lru);
VMCOREINFO_OFFSET(page, _mapcount);
VMCOREINFO_OFFSET(page, private);
+ VMCOREINFO_OFFSET(page, compound_dtor);
+ VMCOREINFO_OFFSET(page, compound_order);
+ VMCOREINFO_OFFSET(page, compound_head);
VMCOREINFO_OFFSET(pglist_data, node_zones);
VMCOREINFO_OFFSET(pglist_data, nr_zones);
#ifdef CONFIG_FLAT_NODE_MEM_MAP
@@ -1447,8 +1450,8 @@ static int __init crash_save_vmcoreinfo_init(void)
#ifdef CONFIG_X86
VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE);
#endif
-#ifdef CONFIG_HUGETLBFS
- VMCOREINFO_SYMBOL(free_huge_page);
+#ifdef CONFIG_HUGETLB_PAGE
+ VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR);
#endif
arch_crash_save_vmcoreinfo();
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index ed9410936a22..78c1c0ee6dc1 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2176,15 +2176,37 @@ cache_hit:
chain->irq_context = hlock->irq_context;
i = get_first_held_lock(curr, hlock);
chain->depth = curr->lockdep_depth + 1 - i;
+
+ BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
+ BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks));
+ BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
+
if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
chain->base = nr_chain_hlocks;
- nr_chain_hlocks += chain->depth;
for (j = 0; j < chain->depth - 1; j++, i++) {
int lock_id = curr->held_locks[i].class_idx - 1;
chain_hlocks[chain->base + j] = lock_id;
}
chain_hlocks[chain->base + j] = class - lock_classes;
}
+
+ if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS)
+ nr_chain_hlocks += chain->depth;
+
+#ifdef CONFIG_DEBUG_LOCKDEP
+ /*
+ * Important for check_no_collision().
+ */
+ if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) {
+ if (debug_locks_off_graph_unlock())
+ return 0;
+
+ print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
+ dump_stack();
+ return 0;
+ }
+#endif
+
hlist_add_head_rcu(&chain->entry, hash_head);
debug_atomic_inc(chain_lookup_misses);
inc_chains();
@@ -2932,6 +2954,11 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
return 1;
}
+static inline unsigned int task_irq_context(struct task_struct *task)
+{
+ return 2 * !!task->hardirq_context + !!task->softirq_context;
+}
+
static int separate_irq_context(struct task_struct *curr,
struct held_lock *hlock)
{
@@ -2940,8 +2967,6 @@ static int separate_irq_context(struct task_struct *curr,
/*
* Keep track of points where we cross into an interrupt context:
*/
- hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
- curr->softirq_context;
if (depth) {
struct held_lock *prev_hlock;
@@ -2973,6 +2998,11 @@ static inline int mark_irqflags(struct task_struct *curr,
return 1;
}
+static inline unsigned int task_irq_context(struct task_struct *task)
+{
+ return 0;
+}
+
static inline int separate_irq_context(struct task_struct *curr,
struct held_lock *hlock)
{
@@ -3241,6 +3271,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
hlock->acquire_ip = ip;
hlock->instance = lock;
hlock->nest_lock = nest_lock;
+ hlock->irq_context = task_irq_context(curr);
hlock->trylock = trylock;
hlock->read = read;
hlock->check = check;
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index dbb61a302548..a0f61effad25 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -141,6 +141,8 @@ static int lc_show(struct seq_file *m, void *v)
int i;
if (v == SEQ_START_TOKEN) {
+ if (nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)
+ seq_printf(m, "(buggered) ");
seq_printf(m, "all lock chains:\n");
return 0;
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 2232ae3e3ad6..3bfdff06eea7 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -666,6 +666,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
*/
smp_wmb();
set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
+ /*
+ * The following mb guarantees that previous clear of a PENDING bit
+ * will not be reordered with any speculative LOADS or STORES from
+ * work->current_func, which is executed afterwards. This possible
+ * reordering can lead to a missed execution on attempt to qeueue
+ * the same @work. E.g. consider this case:
+ *
+ * CPU#0 CPU#1
+ * ---------------------------- --------------------------------
+ *
+ * 1 STORE event_indicated
+ * 2 queue_work_on() {
+ * 3 test_and_set_bit(PENDING)
+ * 4 } set_..._and_clear_pending() {
+ * 5 set_work_data() # clear bit
+ * 6 smp_mb()
+ * 7 work->current_func() {
+ * 8 LOAD event_indicated
+ * }
+ *
+ * Without an explicit full barrier speculative LOAD on line 8 can
+ * be executed before CPU#0 does STORE on line 1. If that happens,
+ * CPU#0 observes the PENDING bit is still set and new execution of
+ * a @work is not queued in a hope, that CPU#1 will eventually
+ * finish the queued @work. Meanwhile CPU#1 does not see
+ * event_indicated is set, because speculative LOAD was executed
+ * before actual STORE.
+ */
+ smp_mb();
}
static void clear_work_data(struct work_struct *work)
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 654c9d87e83a..9e0b0315a724 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -210,10 +210,6 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
goto fast_exit;
hash = hash_stack(trace->entries, trace->nr_entries);
- /* Bad luck, we won't store this stack. */
- if (hash == 0)
- goto exit;
-
bucket = &stack_table[hash & STACK_HASH_MASK];
/*
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 86f9f8b82f8e..df67b53ae3c5 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -232,7 +232,7 @@ retry:
return READ_ONCE(huge_zero_page);
}
-static void put_huge_zero_page(void)
+void put_huge_zero_page(void)
{
/*
* Counter should never go to zero here. Only shrinker can put
@@ -1684,12 +1684,12 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
if (vma_is_dax(vma)) {
spin_unlock(ptl);
if (is_huge_zero_pmd(orig_pmd))
- put_huge_zero_page();
+ tlb_remove_page(tlb, pmd_page(orig_pmd));
} else if (is_huge_zero_pmd(orig_pmd)) {
pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
atomic_long_dec(&tlb->mm->nr_ptes);
spin_unlock(ptl);
- put_huge_zero_page();
+ tlb_remove_page(tlb, pmd_page(orig_pmd));
} else {
struct page *page = pmd_page(orig_pmd);
page_remove_rmap(page, true);
@@ -1960,10 +1960,9 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
* page fault if needed.
*/
return 0;
- if (vma->vm_ops)
+ if (vma->vm_ops || (vm_flags & VM_NO_THP))
/* khugepaged not yet working on file or special mappings */
return 0;
- VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK;
if (hstart < hend)
@@ -2352,8 +2351,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
return false;
if (is_vma_temporary_stack(vma))
return false;
- VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
- return true;
+ return !(vma->vm_flags & VM_NO_THP);
}
static void collapse_huge_page(struct mm_struct *mm,
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 36db05fa8acb..fe787f5c41bd 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -207,6 +207,7 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
/* "mc" and its members are protected by cgroup_mutex */
static struct move_charge_struct {
spinlock_t lock; /* for from, to */
+ struct mm_struct *mm;
struct mem_cgroup *from;
struct mem_cgroup *to;
unsigned long flags;
@@ -4667,6 +4668,8 @@ static void __mem_cgroup_clear_mc(void)
static void mem_cgroup_clear_mc(void)
{
+ struct mm_struct *mm = mc.mm;
+
/*
* we must clear moving_task before waking up waiters at the end of
* task migration.
@@ -4676,7 +4679,10 @@ static void mem_cgroup_clear_mc(void)
spin_lock(&mc.lock);
mc.from = NULL;
mc.to = NULL;
+ mc.mm = NULL;
spin_unlock(&mc.lock);
+
+ mmput(mm);
}
static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
@@ -4733,6 +4739,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
VM_BUG_ON(mc.moved_swap);
spin_lock(&mc.lock);
+ mc.mm = mm;
mc.from = from;
mc.to = memcg;
mc.flags = move_flags;
@@ -4742,8 +4749,9 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
ret = mem_cgroup_precharge_mc(mm);
if (ret)
mem_cgroup_clear_mc();
+ } else {
+ mmput(mm);
}
- mmput(mm);
return ret;
}
@@ -4852,11 +4860,11 @@ put: /* get_mctgt_type() gets the page */
return ret;
}
-static void mem_cgroup_move_charge(struct mm_struct *mm)
+static void mem_cgroup_move_charge(void)
{
struct mm_walk mem_cgroup_move_charge_walk = {
.pmd_entry = mem_cgroup_move_charge_pte_range,
- .mm = mm,
+ .mm = mc.mm,
};
lru_add_drain_all();
@@ -4868,7 +4876,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
atomic_inc(&mc.from->moving_account);
synchronize_rcu();
retry:
- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+ if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
/*
* Someone who are holding the mmap_sem might be waiting in
* waitq. So we cancel all extra charges, wake up all waiters,
@@ -4885,23 +4893,16 @@ retry:
* additional charge, the page walk just aborts.
*/
walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
- up_read(&mm->mmap_sem);
+ up_read(&mc.mm->mmap_sem);
atomic_dec(&mc.from->moving_account);
}
-static void mem_cgroup_move_task(struct cgroup_taskset *tset)
+static void mem_cgroup_move_task(void)
{
- struct cgroup_subsys_state *css;
- struct task_struct *p = cgroup_taskset_first(tset, &css);
- struct mm_struct *mm = get_task_mm(p);
-
- if (mm) {
- if (mc.to)
- mem_cgroup_move_charge(mm);
- mmput(mm);
- }
- if (mc.to)
+ if (mc.to) {
+ mem_cgroup_move_charge();
mem_cgroup_clear_mc();
+ }
}
#else /* !CONFIG_MMU */
static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
@@ -4911,7 +4912,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
{
}
-static void mem_cgroup_move_task(struct cgroup_taskset *tset)
+static void mem_cgroup_move_task(void)
{
}
#endif
@@ -5195,7 +5196,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
.css_reset = mem_cgroup_css_reset,
.can_attach = mem_cgroup_can_attach,
.cancel_attach = mem_cgroup_cancel_attach,
- .attach = mem_cgroup_move_task,
+ .post_attach = mem_cgroup_move_task,
.bind = mem_cgroup_bind,
.dfl_cftypes = memory_files,
.legacy_cftypes = mem_cgroup_legacy_files,
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 78f5f2641b91..ca5acee53b7a 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -888,7 +888,15 @@ int get_hwpoison_page(struct page *page)
}
}
- return get_page_unless_zero(head);
+ if (get_page_unless_zero(head)) {
+ if (head == compound_head(page))
+ return 1;
+
+ pr_info("MCE: %#lx cannot catch tail\n", page_to_pfn(page));
+ put_page(head);
+ }
+
+ return 0;
}
EXPORT_SYMBOL_GPL(get_hwpoison_page);
diff --git a/mm/memory.c b/mm/memory.c
index 93897f23cc11..305537fc8640 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -789,6 +789,46 @@ out:
return pfn_to_page(pfn);
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t pmd)
+{
+ unsigned long pfn = pmd_pfn(pmd);
+
+ /*
+ * There is no pmd_special() but there may be special pmds, e.g.
+ * in a direct-access (dax) mapping, so let's just replicate the
+ * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
+ */
+ if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
+ if (vma->vm_flags & VM_MIXEDMAP) {
+ if (!pfn_valid(pfn))
+ return NULL;
+ goto out;
+ } else {
+ unsigned long off;
+ off = (addr - vma->vm_start) >> PAGE_SHIFT;
+ if (pfn == vma->vm_pgoff + off)
+ return NULL;
+ if (!is_cow_mapping(vma->vm_flags))
+ return NULL;
+ }
+ }
+
+ if (is_zero_pfn(pfn))
+ return NULL;
+ if (unlikely(pfn > highest_memmap_pfn))
+ return NULL;
+
+ /*
+ * NOTE! We still have PageReserved() pages in the page tables.
+ * eg. VDSO mappings can cause them to exist.
+ */
+out:
+ return pfn_to_page(pfn);
+}
+#endif
+
/*
* copy one vm_area from one task to the other. Assumes the page tables
* already present in the new task to be cleared in the whole range
diff --git a/mm/migrate.c b/mm/migrate.c
index 6c822a7b27e0..f9dfb18a4eba 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -975,7 +975,13 @@ out:
dec_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
/* Soft-offlined page shouldn't go through lru cache list */
- if (reason == MR_MEMORY_FAILURE) {
+ if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) {
+ /*
+ * With this release, we free successfully migrated
+ * page and set PG_HWPoison on just freed page
+ * intentionally. Although it's rather weird, it's how
+ * HWPoison flag works at the moment.
+ */
put_page(page);
if (!test_set_page_hwpoison(page))
num_poisoned_pages_inc();
diff --git a/mm/page_io.c b/mm/page_io.c
index cd92e3d67a32..985f23cfa79b 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -353,7 +353,11 @@ int swap_readpage(struct page *page)
ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
if (!ret) {
- swap_slot_free_notify(page);
+ if (trylock_page(page)) {
+ swap_slot_free_notify(page);
+ unlock_page(page);
+ }
+
count_vm_event(PSWPIN);
return 0;
}
diff --git a/mm/swap.c b/mm/swap.c
index a0bc206b4ac6..03aacbcb013f 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -728,6 +728,11 @@ void release_pages(struct page **pages, int nr, bool cold)
zone = NULL;
}
+ if (is_huge_zero_page(page)) {
+ put_huge_zero_page();
+ continue;
+ }
+
page = compound_head(page);
if (!put_page_testzero(page))
continue;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b934223eaa45..142cb61f4822 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2553,7 +2553,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
sc->gfp_mask |= __GFP_HIGHMEM;
for_each_zone_zonelist_nodemask(zone, z, zonelist,
- requested_highidx, sc->nodemask) {
+ gfp_zone(sc->gfp_mask), sc->nodemask) {
enum zone_type classzone_idx;
if (!populated_zone(zone))
@@ -3318,6 +3318,20 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order,
/* Try to sleep for a short interval */
if (prepare_kswapd_sleep(pgdat, order, remaining,
balanced_classzone_idx)) {
+ /*
+ * Compaction records what page blocks it recently failed to
+ * isolate pages from and skips them in the future scanning.
+ * When kswapd is going to sleep, it is reasonable to assume
+ * that pages and compaction may succeed so reset the cache.
+ */
+ reset_isolation_suitable(pgdat);
+
+ /*
+ * We have freed the memory, now we should compact it to make
+ * allocation of the requested order possible.
+ */
+ wakeup_kcompactd(pgdat, order, classzone_idx);
+
remaining = schedule_timeout(HZ/10);
finish_wait(&pgdat->kswapd_wait, &wait);
prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
@@ -3341,20 +3355,6 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order,
*/
set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
- /*
- * Compaction records what page blocks it recently failed to
- * isolate pages from and skips them in the future scanning.
- * When kswapd is going to sleep, it is reasonable to assume
- * that pages and compaction may succeed so reset the cache.
- */
- reset_isolation_suitable(pgdat);
-
- /*
- * We have freed the memory, now we should compact it to make
- * allocation of the requested order possible.
- */
- wakeup_kcompactd(pgdat, order, classzone_idx);
-
if (!kthread_should_stop())
schedule();
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index 3315b9a598af..4026f198a734 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -32,10 +32,21 @@
#include "bat_v_elp.h"
#include "bat_v_ogm.h"
+#include "hard-interface.h"
#include "hash.h"
#include "originator.h"
#include "packet.h"
+static void batadv_v_iface_activate(struct batadv_hard_iface *hard_iface)
+{
+ /* B.A.T.M.A.N. V does not use any queuing mechanism, therefore it can
+ * set the interface as ACTIVE right away, without any risk of race
+ * condition
+ */
+ if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
+ hard_iface->if_status = BATADV_IF_ACTIVE;
+}
+
static int batadv_v_iface_enable(struct batadv_hard_iface *hard_iface)
{
int ret;
@@ -274,6 +285,7 @@ static bool batadv_v_neigh_is_sob(struct batadv_neigh_node *neigh1,
static struct batadv_algo_ops batadv_batman_v __read_mostly = {
.name = "BATMAN_V",
+ .bat_iface_activate = batadv_v_iface_activate,
.bat_iface_enable = batadv_v_iface_enable,
.bat_iface_disable = batadv_v_iface_disable,
.bat_iface_update_mac = batadv_v_iface_update_mac,
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index e96d7c745b4a..3e6b2624f980 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -568,6 +568,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
* be sent to
* @bat_priv: the bat priv with all the soft interface information
* @ip_dst: ipv4 to look up in the DHT
+ * @vid: VLAN identifier
*
* An originator O is selected if and only if its DHT_ID value is one of three
* closest values (from the LEFT, with wrap around if needed) then the hash
@@ -576,7 +577,8 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
* Return: the candidate array of size BATADV_DAT_CANDIDATE_NUM.
*/
static struct batadv_dat_candidate *
-batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
+batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
+ unsigned short vid)
{
int select;
batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key;
@@ -592,7 +594,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
return NULL;
dat.ip = ip_dst;
- dat.vid = 0;
+ dat.vid = vid;
ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat,
BATADV_DAT_ADDR_MAX);
@@ -612,6 +614,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
* @bat_priv: the bat priv with all the soft interface information
* @skb: payload to send
* @ip: the DHT key
+ * @vid: VLAN identifier
* @packet_subtype: unicast4addr packet subtype to use
*
* This function copies the skb with pskb_copy() and is sent as unicast packet
@@ -622,7 +625,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
*/
static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
struct sk_buff *skb, __be32 ip,
- int packet_subtype)
+ unsigned short vid, int packet_subtype)
{
int i;
bool ret = false;
@@ -631,7 +634,7 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
struct sk_buff *tmp_skb;
struct batadv_dat_candidate *cand;
- cand = batadv_dat_select_candidates(bat_priv, ip);
+ cand = batadv_dat_select_candidates(bat_priv, ip, vid);
if (!cand)
goto out;
@@ -1022,7 +1025,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
ret = true;
} else {
/* Send the request to the DHT */
- ret = batadv_dat_send_data(bat_priv, skb, ip_dst,
+ ret = batadv_dat_send_data(bat_priv, skb, ip_dst, vid,
BATADV_P_DAT_DHT_GET);
}
out:
@@ -1150,8 +1153,8 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
/* Send the ARP reply to the candidates for both the IP addresses that
* the node obtained from the ARP reply
*/
- batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT);
- batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT);
+ batadv_dat_send_data(bat_priv, skb, ip_src, vid, BATADV_P_DAT_DHT_PUT);
+ batadv_dat_send_data(bat_priv, skb, ip_dst, vid, BATADV_P_DAT_DHT_PUT);
}
/**
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index b22b2775a0a5..0a7deaf2670a 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -407,6 +407,9 @@ batadv_hardif_activate_interface(struct batadv_hard_iface *hard_iface)
batadv_update_min_mtu(hard_iface->soft_iface);
+ if (bat_priv->bat_algo_ops->bat_iface_activate)
+ bat_priv->bat_algo_ops->bat_iface_activate(hard_iface);
+
out:
if (primary_if)
batadv_hardif_put(primary_if);
@@ -572,8 +575,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct batadv_hard_iface *primary_if = NULL;
- if (hard_iface->if_status == BATADV_IF_ACTIVE)
- batadv_hardif_deactivate_interface(hard_iface);
+ batadv_hardif_deactivate_interface(hard_iface);
if (hard_iface->if_status != BATADV_IF_INACTIVE)
goto out;
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index e4cbb0753e37..c355a824713c 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -250,7 +250,6 @@ static void batadv_neigh_node_release(struct kref *ref)
{
struct hlist_node *node_tmp;
struct batadv_neigh_node *neigh_node;
- struct batadv_hardif_neigh_node *hardif_neigh;
struct batadv_neigh_ifinfo *neigh_ifinfo;
struct batadv_algo_ops *bao;
@@ -262,13 +261,7 @@ static void batadv_neigh_node_release(struct kref *ref)
batadv_neigh_ifinfo_put(neigh_ifinfo);
}
- hardif_neigh = batadv_hardif_neigh_get(neigh_node->if_incoming,
- neigh_node->addr);
- if (hardif_neigh) {
- /* batadv_hardif_neigh_get() increases refcount too */
- batadv_hardif_neigh_put(hardif_neigh);
- batadv_hardif_neigh_put(hardif_neigh);
- }
+ batadv_hardif_neigh_put(neigh_node->hardif_neigh);
if (bao->bat_neigh_free)
bao->bat_neigh_free(neigh_node);
@@ -663,6 +656,11 @@ batadv_neigh_node_new(struct batadv_orig_node *orig_node,
ether_addr_copy(neigh_node->addr, neigh_addr);
neigh_node->if_incoming = hard_iface;
neigh_node->orig_node = orig_node;
+ neigh_node->last_seen = jiffies;
+
+ /* increment unique neighbor refcount */
+ kref_get(&hardif_neigh->refcount);
+ neigh_node->hardif_neigh = hardif_neigh;
/* extra reference for return */
kref_init(&neigh_node->refcount);
@@ -672,9 +670,6 @@ batadv_neigh_node_new(struct batadv_orig_node *orig_node,
hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
spin_unlock_bh(&orig_node->neigh_list_lock);
- /* increment unique neighbor refcount */
- kref_get(&hardif_neigh->refcount);
-
batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
"Creating new neighbor %pM for orig_node %pM on interface %s\n",
neigh_addr, orig_node->orig, hard_iface->net_dev->name);
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 4dd646a52f1a..b781bf753250 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -105,6 +105,15 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
neigh_node = NULL;
spin_lock_bh(&orig_node->neigh_list_lock);
+ /* curr_router used earlier may not be the current orig_ifinfo->router
+ * anymore because it was dereferenced outside of the neigh_list_lock
+ * protected region. After the new best neighbor has replace the current
+ * best neighbor the reference counter needs to decrease. Consequently,
+ * the code needs to ensure the curr_router variable contains a pointer
+ * to the replaced best neighbor.
+ */
+ curr_router = rcu_dereference_protected(orig_ifinfo->router, true);
+
rcu_assign_pointer(orig_ifinfo->router, neigh_node);
spin_unlock_bh(&orig_node->neigh_list_lock);
batadv_orig_ifinfo_put(orig_ifinfo);
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 3ce06e0a91b1..76417850d3fc 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -675,6 +675,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
if (pending) {
hlist_del(&forw_packet->list);
+ if (!forw_packet->own)
+ atomic_inc(&bat_priv->bcast_queue_left);
+
batadv_forw_packet_free(forw_packet);
}
}
@@ -702,6 +705,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
if (pending) {
hlist_del(&forw_packet->list);
+ if (!forw_packet->own)
+ atomic_inc(&bat_priv->batman_queue_left);
+
batadv_forw_packet_free(forw_packet);
}
}
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 0710379491bf..8a136b6a1ff0 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -408,11 +408,17 @@ void batadv_interface_rx(struct net_device *soft_iface,
*/
nf_reset(skb);
+ if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
+ goto dropped;
+
vid = batadv_get_vid(skb, 0);
ethhdr = eth_hdr(skb);
switch (ntohs(ethhdr->h_proto)) {
case ETH_P_8021Q:
+ if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
+ goto dropped;
+
vhdr = (struct vlan_ethhdr *)skb->data;
if (vhdr->h_vlan_encapsulated_proto != ethertype)
@@ -424,8 +430,6 @@ void batadv_interface_rx(struct net_device *soft_iface,
}
/* skb->dev & skb->pkt_type are set here */
- if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
- goto dropped;
skb->protocol = eth_type_trans(skb, soft_iface);
/* should not be necessary anymore as we use skb_pull_rcsum()
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 0b43e86328a5..9b4551a86535 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -215,6 +215,8 @@ static void batadv_tt_local_entry_release(struct kref *ref)
tt_local_entry = container_of(ref, struct batadv_tt_local_entry,
common.refcount);
+ batadv_softif_vlan_put(tt_local_entry->vlan);
+
kfree_rcu(tt_local_entry, common.rcu);
}
@@ -673,6 +675,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
kref_get(&tt_local->common.refcount);
tt_local->last_seen = jiffies;
tt_local->common.added_at = tt_local->last_seen;
+ tt_local->vlan = vlan;
/* the batman interface mac and multicast addresses should never be
* purged
@@ -991,7 +994,6 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_tt_local_entry *tt_local;
struct batadv_hard_iface *primary_if;
- struct batadv_softif_vlan *vlan;
struct hlist_head *head;
unsigned short vid;
u32 i;
@@ -1027,14 +1029,6 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
last_seen_msecs = last_seen_msecs % 1000;
no_purge = tt_common_entry->flags & np_flag;
-
- vlan = batadv_softif_vlan_get(bat_priv, vid);
- if (!vlan) {
- seq_printf(seq, "Cannot retrieve VLAN %d\n",
- BATADV_PRINT_VID(vid));
- continue;
- }
-
seq_printf(seq,
" * %pM %4i [%c%c%c%c%c%c] %3u.%03u (%#.8x)\n",
tt_common_entry->addr,
@@ -1052,9 +1046,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
BATADV_TT_CLIENT_ISOLA) ? 'I' : '.'),
no_purge ? 0 : last_seen_secs,
no_purge ? 0 : last_seen_msecs,
- vlan->tt.crc);
-
- batadv_softif_vlan_put(vlan);
+ tt_local->vlan->tt.crc);
}
rcu_read_unlock();
}
@@ -1099,7 +1091,6 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
{
struct batadv_tt_local_entry *tt_local_entry;
u16 flags, curr_flags = BATADV_NO_FLAGS;
- struct batadv_softif_vlan *vlan;
void *tt_entry_exists;
tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
@@ -1139,14 +1130,6 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
/* extra call to free the local tt entry */
batadv_tt_local_entry_put(tt_local_entry);
- /* decrease the reference held for this vlan */
- vlan = batadv_softif_vlan_get(bat_priv, vid);
- if (!vlan)
- goto out;
-
- batadv_softif_vlan_put(vlan);
- batadv_softif_vlan_put(vlan);
-
out:
if (tt_local_entry)
batadv_tt_local_entry_put(tt_local_entry);
@@ -1219,7 +1202,6 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
spinlock_t *list_lock; /* protects write access to the hash lists */
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_tt_local_entry *tt_local;
- struct batadv_softif_vlan *vlan;
struct hlist_node *node_tmp;
struct hlist_head *head;
u32 i;
@@ -1241,14 +1223,6 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
struct batadv_tt_local_entry,
common);
- /* decrease the reference held for this vlan */
- vlan = batadv_softif_vlan_get(bat_priv,
- tt_common_entry->vid);
- if (vlan) {
- batadv_softif_vlan_put(vlan);
- batadv_softif_vlan_put(vlan);
- }
-
batadv_tt_local_entry_put(tt_local);
}
spin_unlock_bh(list_lock);
@@ -3309,7 +3283,6 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
struct batadv_hashtable *hash = bat_priv->tt.local_hash;
struct batadv_tt_common_entry *tt_common;
struct batadv_tt_local_entry *tt_local;
- struct batadv_softif_vlan *vlan;
struct hlist_node *node_tmp;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -3339,13 +3312,6 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
struct batadv_tt_local_entry,
common);
- /* decrease the reference held for this vlan */
- vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
- if (vlan) {
- batadv_softif_vlan_put(vlan);
- batadv_softif_vlan_put(vlan);
- }
-
batadv_tt_local_entry_put(tt_local);
}
spin_unlock_bh(list_lock);
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 9abfb3e73c34..1e47fbe8bb7b 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -433,6 +433,7 @@ struct batadv_hardif_neigh_node {
* @ifinfo_lock: lock protecting private ifinfo members and list
* @if_incoming: pointer to incoming hard-interface
* @last_seen: when last packet via this neighbor was received
+ * @hardif_neigh: hardif_neigh of this neighbor
* @refcount: number of contexts the object is used
* @rcu: struct used for freeing in an RCU-safe manner
*/
@@ -444,6 +445,7 @@ struct batadv_neigh_node {
spinlock_t ifinfo_lock; /* protects ifinfo_list and its members */
struct batadv_hard_iface *if_incoming;
unsigned long last_seen;
+ struct batadv_hardif_neigh_node *hardif_neigh;
struct kref refcount;
struct rcu_head rcu;
};
@@ -1073,10 +1075,12 @@ struct batadv_tt_common_entry {
* struct batadv_tt_local_entry - translation table local entry data
* @common: general translation table data
* @last_seen: timestamp used for purging stale tt local entries
+ * @vlan: soft-interface vlan of the entry
*/
struct batadv_tt_local_entry {
struct batadv_tt_common_entry common;
unsigned long last_seen;
+ struct batadv_softif_vlan *vlan;
};
/**
@@ -1250,6 +1254,8 @@ struct batadv_forw_packet {
* struct batadv_algo_ops - mesh algorithm callbacks
* @list: list node for the batadv_algo_list
* @name: name of the algorithm
+ * @bat_iface_activate: start routing mechanisms when hard-interface is brought
+ * up
* @bat_iface_enable: init routing info when hard-interface is enabled
* @bat_iface_disable: de-init routing info when hard-interface is disabled
* @bat_iface_update_mac: (re-)init mac addresses of the protocol information
@@ -1277,6 +1283,7 @@ struct batadv_forw_packet {
struct batadv_algo_ops {
struct hlist_node list;
char *name;
+ void (*bat_iface_activate)(struct batadv_hard_iface *hard_iface);
int (*bat_iface_enable)(struct batadv_hard_iface *hard_iface);
void (*bat_iface_disable)(struct batadv_hard_iface *hard_iface);
void (*bat_iface_update_mac)(struct batadv_hard_iface *hard_iface);
diff --git a/net/ceph/auth.c b/net/ceph/auth.c
index 6b923bcaa2a4..2bc5965fdd1e 100644
--- a/net/ceph/auth.c
+++ b/net/ceph/auth.c
@@ -293,13 +293,9 @@ int ceph_auth_create_authorizer(struct ceph_auth_client *ac,
}
EXPORT_SYMBOL(ceph_auth_create_authorizer);
-void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac,
- struct ceph_authorizer *a)
+void ceph_auth_destroy_authorizer(struct ceph_authorizer *a)
{
- mutex_lock(&ac->mutex);
- if (ac->ops && ac->ops->destroy_authorizer)
- ac->ops->destroy_authorizer(ac, a);
- mutex_unlock(&ac->mutex);
+ a->destroy(a);
}
EXPORT_SYMBOL(ceph_auth_destroy_authorizer);
diff --git a/net/ceph/auth_none.c b/net/ceph/auth_none.c
index 8c93fa8d81bc..5f836f02ae36 100644
--- a/net/ceph/auth_none.c
+++ b/net/ceph/auth_none.c
@@ -16,7 +16,6 @@ static void reset(struct ceph_auth_client *ac)
struct ceph_auth_none_info *xi = ac->private;
xi->starting = true;
- xi->built_authorizer = false;
}
static void destroy(struct ceph_auth_client *ac)
@@ -39,6 +38,27 @@ static int should_authenticate(struct ceph_auth_client *ac)
return xi->starting;
}
+static int ceph_auth_none_build_authorizer(struct ceph_auth_client *ac,
+ struct ceph_none_authorizer *au)
+{
+ void *p = au->buf;
+ void *const end = p + sizeof(au->buf);
+ int ret;
+
+ ceph_encode_8_safe(&p, end, 1, e_range);
+ ret = ceph_entity_name_encode(ac->name, &p, end);
+ if (ret < 0)
+ return ret;
+
+ ceph_encode_64_safe(&p, end, ac->global_id, e_range);
+ au->buf_len = p - (void *)au->buf;
+ dout("%s built authorizer len %d\n", __func__, au->buf_len);
+ return 0;
+
+e_range:
+ return -ERANGE;
+}
+
static int build_request(struct ceph_auth_client *ac, void *buf, void *end)
{
return 0;
@@ -57,32 +77,32 @@ static int handle_reply(struct ceph_auth_client *ac, int result,
return result;
}
+static void ceph_auth_none_destroy_authorizer(struct ceph_authorizer *a)
+{
+ kfree(a);
+}
+
/*
- * build an 'authorizer' with our entity_name and global_id. we can
- * reuse a single static copy since it is identical for all services
- * we connect to.
+ * build an 'authorizer' with our entity_name and global_id. it is
+ * identical for all services we connect to.
*/
static int ceph_auth_none_create_authorizer(
struct ceph_auth_client *ac, int peer_type,
struct ceph_auth_handshake *auth)
{
- struct ceph_auth_none_info *ai = ac->private;
- struct ceph_none_authorizer *au = &ai->au;
- void *p, *end;
+ struct ceph_none_authorizer *au;
int ret;
- if (!ai->built_authorizer) {
- p = au->buf;
- end = p + sizeof(au->buf);
- ceph_encode_8(&p, 1);
- ret = ceph_entity_name_encode(ac->name, &p, end - 8);
- if (ret < 0)
- goto bad;
- ceph_decode_need(&p, end, sizeof(u64), bad2);
- ceph_encode_64(&p, ac->global_id);
- au->buf_len = p - (void *)au->buf;
- ai->built_authorizer = true;
- dout("built authorizer len %d\n", au->buf_len);
+ au = kmalloc(sizeof(*au), GFP_NOFS);
+ if (!au)
+ return -ENOMEM;
+
+ au->base.destroy = ceph_auth_none_destroy_authorizer;
+
+ ret = ceph_auth_none_build_authorizer(ac, au);
+ if (ret) {
+ kfree(au);
+ return ret;
}
auth->authorizer = (struct ceph_authorizer *) au;
@@ -92,17 +112,6 @@ static int ceph_auth_none_create_authorizer(
auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
return 0;
-
-bad2:
- ret = -ERANGE;
-bad:
- return ret;
-}
-
-static void ceph_auth_none_destroy_authorizer(struct ceph_auth_client *ac,
- struct ceph_authorizer *a)
-{
- /* nothing to do */
}
static const struct ceph_auth_client_ops ceph_auth_none_ops = {
@@ -114,7 +123,6 @@ static const struct ceph_auth_client_ops ceph_auth_none_ops = {
.build_request = build_request,
.handle_reply = handle_reply,
.create_authorizer = ceph_auth_none_create_authorizer,
- .destroy_authorizer = ceph_auth_none_destroy_authorizer,
};
int ceph_auth_none_init(struct ceph_auth_client *ac)
@@ -127,7 +135,6 @@ int ceph_auth_none_init(struct ceph_auth_client *ac)
return -ENOMEM;
xi->starting = true;
- xi->built_authorizer = false;
ac->protocol = CEPH_AUTH_NONE;
ac->private = xi;
diff --git a/net/ceph/auth_none.h b/net/ceph/auth_none.h
index 059a3ce4b53f..62021535ae4a 100644
--- a/net/ceph/auth_none.h
+++ b/net/ceph/auth_none.h
@@ -12,6 +12,7 @@
*/
struct ceph_none_authorizer {
+ struct ceph_authorizer base;
char buf[128];
int buf_len;
char reply_buf[0];
@@ -19,8 +20,6 @@ struct ceph_none_authorizer {
struct ceph_auth_none_info {
bool starting;
- bool built_authorizer;
- struct ceph_none_authorizer au; /* we only need one; it's static */
};
int ceph_auth_none_init(struct ceph_auth_client *ac);
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 9e43a315e662..a0905f04bd13 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -565,6 +565,14 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
return -EAGAIN;
}
+static void ceph_x_destroy_authorizer(struct ceph_authorizer *a)
+{
+ struct ceph_x_authorizer *au = (void *)a;
+
+ ceph_x_authorizer_cleanup(au);
+ kfree(au);
+}
+
static int ceph_x_create_authorizer(
struct ceph_auth_client *ac, int peer_type,
struct ceph_auth_handshake *auth)
@@ -581,6 +589,8 @@ static int ceph_x_create_authorizer(
if (!au)
return -ENOMEM;
+ au->base.destroy = ceph_x_destroy_authorizer;
+
ret = ceph_x_build_authorizer(ac, th, au);
if (ret) {
kfree(au);
@@ -643,16 +653,6 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
return ret;
}
-static void ceph_x_destroy_authorizer(struct ceph_auth_client *ac,
- struct ceph_authorizer *a)
-{
- struct ceph_x_authorizer *au = (void *)a;
-
- ceph_x_authorizer_cleanup(au);
- kfree(au);
-}
-
-
static void ceph_x_reset(struct ceph_auth_client *ac)
{
struct ceph_x_info *xi = ac->private;
@@ -770,7 +770,6 @@ static const struct ceph_auth_client_ops ceph_x_ops = {
.create_authorizer = ceph_x_create_authorizer,
.update_authorizer = ceph_x_update_authorizer,
.verify_authorizer_reply = ceph_x_verify_authorizer_reply,
- .destroy_authorizer = ceph_x_destroy_authorizer,
.invalidate_authorizer = ceph_x_invalidate_authorizer,
.reset = ceph_x_reset,
.destroy = ceph_x_destroy,
diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h
index 40b1a3cf7397..21a5af904bae 100644
--- a/net/ceph/auth_x.h
+++ b/net/ceph/auth_x.h
@@ -26,6 +26,7 @@ struct ceph_x_ticket_handler {
struct ceph_x_authorizer {
+ struct ceph_authorizer base;
struct ceph_crypto_key session_key;
struct ceph_buffer *buf;
unsigned int service;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 32355d9d0103..40a53a70efdf 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1087,10 +1087,8 @@ static void put_osd(struct ceph_osd *osd)
dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
atomic_read(&osd->o_ref) - 1);
if (atomic_dec_and_test(&osd->o_ref)) {
- struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
-
if (osd->o_auth.authorizer)
- ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer);
+ ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
kfree(osd);
}
}
@@ -2984,7 +2982,7 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
struct ceph_auth_handshake *auth = &o->o_auth;
if (force_new && auth->authorizer) {
- ceph_auth_destroy_authorizer(ac, auth->authorizer);
+ ceph_auth_destroy_authorizer(auth->authorizer);
auth->authorizer = NULL;
}
if (!auth->authorizer) {
diff --git a/net/core/dev.c b/net/core/dev.c
index d91dfbec0fc6..673d1f118bfb 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2815,7 +2815,7 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
if (skb->ip_summed != CHECKSUM_NONE &&
!can_checksum_protocol(features, type)) {
- features &= ~NETIF_F_CSUM_MASK;
+ features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
} else if (illegal_highdma(skb->dev, skb)) {
features &= ~NETIF_F_SG;
}
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 3177211ab651..77c20a489218 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -438,6 +438,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
const struct sock *sk2,
bool match_wildcard))
{
+ struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
struct sock *sk2;
kuid_t uid = sock_i_uid(sk);
@@ -446,6 +447,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
sk2->sk_family == sk->sk_family &&
ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
+ inet_csk(sk2)->icsk_bind_hash == tb &&
sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
saddr_same(sk, sk2, false))
return reuseport_add_sock(sk, sk2);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 2480d79b0e37..b99213c46aac 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -369,7 +369,8 @@ static struct rtable *gre_get_rt(struct sk_buff *skb,
return ip_route_output_key(net, fl);
}
-static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
+static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
+ __be16 proto)
{
struct ip_tunnel_info *tun_info;
const struct ip_tunnel_key *key;
@@ -418,7 +419,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
goto err_free_rt;
flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
- gre_build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB),
+ gre_build_header(skb, tunnel_hlen, flags, proto,
tunnel_id_to_key(tun_info->key.tun_id), 0);
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
@@ -459,7 +460,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
const struct iphdr *tnl_params;
if (tunnel->collect_md) {
- gre_fb_xmit(skb, dev);
+ gre_fb_xmit(skb, dev, skb->protocol);
return NETDEV_TX_OK;
}
@@ -501,7 +502,7 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
struct ip_tunnel *tunnel = netdev_priv(dev);
if (tunnel->collect_md) {
- gre_fb_xmit(skb, dev);
+ gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
return NETDEV_TX_OK;
}
@@ -732,7 +733,7 @@ static int ipgre_tunnel_init(struct net_device *dev)
netif_keep_dst(dev);
dev->addr_len = 4;
- if (iph->daddr) {
+ if (iph->daddr && !tunnel->collect_md) {
#ifdef CONFIG_NET_IPGRE_BROADCAST
if (ipv4_is_multicast(iph->daddr)) {
if (!iph->saddr)
@@ -741,8 +742,9 @@ static int ipgre_tunnel_init(struct net_device *dev)
dev->header_ops = &ipgre_header_ops;
}
#endif
- } else
+ } else if (!tunnel->collect_md) {
dev->header_ops = &ipgre_header_ops;
+ }
return ip_tunnel_init(dev);
}
@@ -785,6 +787,11 @@ static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
if (flags & (GRE_VERSION|GRE_ROUTING))
return -EINVAL;
+ if (data[IFLA_GRE_COLLECT_METADATA] &&
+ data[IFLA_GRE_ENCAP_TYPE] &&
+ nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
+ return -EINVAL;
+
return 0;
}
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 6aad0192443d..a69ed94bda1b 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -326,12 +326,12 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
if (!IS_ERR(rt)) {
tdev = rt->dst.dev;
- dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst,
- fl4.saddr);
ip_rt_put(rt);
}
if (dev->type != ARPHRD_ETHER)
dev->flags |= IFF_POINTOPOINT;
+
+ dst_cache_reset(&tunnel->dst_cache);
}
if (!tdev && tunnel->parms.link)
diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
index 4985e1a735a6..17038e1ede98 100644
--- a/net/ipv6/ila/ila_lwt.c
+++ b/net/ipv6/ila/ila_lwt.c
@@ -144,8 +144,7 @@ nla_put_failure:
static int ila_encap_nlsize(struct lwtunnel_state *lwtstate)
{
- /* No encapsulation overhead */
- return 0;
+ return nla_total_size(sizeof(u64)); /* ILA_ATTR_LOCATOR */
}
static int ila_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index afca2eb4dfa7..6edfa9980314 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1376,9 +1376,9 @@ static int l2tp_tunnel_sock_create(struct net *net,
memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
sizeof(udp_conf.peer_ip6));
udp_conf.use_udp6_tx_checksums =
- cfg->udp6_zero_tx_checksums;
+ ! cfg->udp6_zero_tx_checksums;
udp_conf.use_udp6_rx_checksums =
- cfg->udp6_zero_rx_checksums;
+ ! cfg->udp6_zero_rx_checksums;
} else
#endif
{
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 6a33f0b4d839..c59af3eb9fa4 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1761,7 +1761,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
ret = dev_alloc_name(ndev, ndev->name);
if (ret < 0) {
- free_netdev(ndev);
+ ieee80211_if_free(ndev);
return ret;
}
@@ -1847,7 +1847,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
ret = register_netdevice(ndev);
if (ret) {
- free_netdev(ndev);
+ ieee80211_if_free(ndev);
return ret;
}
}
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 61ed2a8764ba..86187dad1440 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -127,7 +127,7 @@ void rds_tcp_restore_callbacks(struct socket *sock,
/*
* This is the only path that sets tc->t_sock. Send and receive trust that
- * it is set. The RDS_CONN_CONNECTED bit protects those paths from being
+ * it is set. The RDS_CONN_UP bit protects those paths from being
* called while it isn't set.
*/
void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn)
@@ -216,6 +216,7 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
if (!tc)
return -ENOMEM;
+ mutex_init(&tc->t_conn_lock);
tc->t_sock = NULL;
tc->t_tinc = NULL;
tc->t_tinc_hdr_rem = sizeof(struct rds_header);
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 64f873c0c6b6..41c228300525 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -12,6 +12,10 @@ struct rds_tcp_connection {
struct list_head t_tcp_node;
struct rds_connection *conn;
+ /* t_conn_lock synchronizes the connection establishment between
+ * rds_tcp_accept_one and rds_tcp_conn_connect
+ */
+ struct mutex t_conn_lock;
struct socket *t_sock;
void *t_orig_write_space;
void *t_orig_data_ready;
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index 5cb16875c460..49a3fcfed360 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -78,7 +78,14 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
struct socket *sock = NULL;
struct sockaddr_in src, dest;
int ret;
+ struct rds_tcp_connection *tc = conn->c_transport_data;
+
+ mutex_lock(&tc->t_conn_lock);
+ if (rds_conn_up(conn)) {
+ mutex_unlock(&tc->t_conn_lock);
+ return 0;
+ }
ret = sock_create_kern(rds_conn_net(conn), PF_INET,
SOCK_STREAM, IPPROTO_TCP, &sock);
if (ret < 0)
@@ -120,6 +127,7 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
}
out:
+ mutex_unlock(&tc->t_conn_lock);
if (sock)
sock_release(sock);
return ret;
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 0936a4a32b47..be263cdf268b 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -76,7 +76,9 @@ int rds_tcp_accept_one(struct socket *sock)
struct rds_connection *conn;
int ret;
struct inet_sock *inet;
- struct rds_tcp_connection *rs_tcp;
+ struct rds_tcp_connection *rs_tcp = NULL;
+ int conn_state;
+ struct sock *nsk;
ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family,
sock->sk->sk_type, sock->sk->sk_protocol,
@@ -115,28 +117,44 @@ int rds_tcp_accept_one(struct socket *sock)
* rds_tcp_state_change() will do that cleanup
*/
rs_tcp = (struct rds_tcp_connection *)conn->c_transport_data;
- if (rs_tcp->t_sock &&
- ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr)) {
- struct sock *nsk = new_sock->sk;
-
- nsk->sk_user_data = NULL;
- nsk->sk_prot->disconnect(nsk, 0);
- tcp_done(nsk);
- new_sock = NULL;
- ret = 0;
- goto out;
- } else if (rs_tcp->t_sock) {
- rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
- conn->c_outgoing = 0;
- }
-
rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING);
+ mutex_lock(&rs_tcp->t_conn_lock);
+ conn_state = rds_conn_state(conn);
+ if (conn_state != RDS_CONN_CONNECTING && conn_state != RDS_CONN_UP)
+ goto rst_nsk;
+ if (rs_tcp->t_sock) {
+ /* Need to resolve a duelling SYN between peers.
+ * We have an outstanding SYN to this peer, which may
+ * potentially have transitioned to the RDS_CONN_UP state,
+ * so we must quiesce any send threads before resetting
+ * c_transport_data.
+ */
+ wait_event(conn->c_waitq,
+ !test_bit(RDS_IN_XMIT, &conn->c_flags));
+ if (ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr)) {
+ goto rst_nsk;
+ } else if (rs_tcp->t_sock) {
+ rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
+ conn->c_outgoing = 0;
+ }
+ }
rds_tcp_set_callbacks(new_sock, conn);
- rds_connect_complete(conn);
+ rds_connect_complete(conn); /* marks RDS_CONN_UP */
+ new_sock = NULL;
+ ret = 0;
+ goto out;
+rst_nsk:
+ /* reset the newly returned accept sock and bail */
+ nsk = new_sock->sk;
+ rds_tcp_stats_inc(s_tcp_listen_closed_stale);
+ nsk->sk_user_data = NULL;
+ nsk->sk_prot->disconnect(nsk, 0);
+ tcp_done(nsk);
new_sock = NULL;
ret = 0;
-
out:
+ if (rs_tcp)
+ mutex_unlock(&rs_tcp->t_conn_lock);
if (new_sock)
sock_release(new_sock);
return ret;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 491d6fd6430c..205bed00dd34 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -395,6 +395,25 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
sch->q.qlen++;
}
+/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
+ * when we statistically choose to corrupt one, we instead segment it, returning
+ * the first packet to be corrupted, and re-enqueue the remaining frames
+ */
+static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
+{
+ struct sk_buff *segs;
+ netdev_features_t features = netif_skb_features(skb);
+
+ segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+
+ if (IS_ERR_OR_NULL(segs)) {
+ qdisc_reshape_fail(skb, sch);
+ return NULL;
+ }
+ consume_skb(skb);
+ return segs;
+}
+
/*
* Insert one skb into qdisc.
* Note: parent depends on return value to account for queue length.
@@ -407,7 +426,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
/* We don't fill cb now as skb_unshare() may invalidate it */
struct netem_skb_cb *cb;
struct sk_buff *skb2;
+ struct sk_buff *segs = NULL;
+ unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
+ int nb = 0;
int count = 1;
+ int rc = NET_XMIT_SUCCESS;
/* Random duplication */
if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
@@ -453,10 +476,23 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
* do it now in software before we mangle it.
*/
if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
+ if (skb_is_gso(skb)) {
+ segs = netem_segment(skb, sch);
+ if (!segs)
+ return NET_XMIT_DROP;
+ } else {
+ segs = skb;
+ }
+
+ skb = segs;
+ segs = segs->next;
+
if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
(skb->ip_summed == CHECKSUM_PARTIAL &&
- skb_checksum_help(skb)))
- return qdisc_drop(skb, sch);
+ skb_checksum_help(skb))) {
+ rc = qdisc_drop(skb, sch);
+ goto finish_segs;
+ }
skb->data[prandom_u32() % skb_headlen(skb)] ^=
1<<(prandom_u32() % 8);
@@ -516,6 +552,27 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
sch->qstats.requeues++;
}
+finish_segs:
+ if (segs) {
+ while (segs) {
+ skb2 = segs->next;
+ segs->next = NULL;
+ qdisc_skb_cb(segs)->pkt_len = segs->len;
+ last_len = segs->len;
+ rc = qdisc_enqueue(segs, sch);
+ if (rc != NET_XMIT_SUCCESS) {
+ if (net_xmit_drop_count(rc))
+ qdisc_qstats_drop(sch);
+ } else {
+ nb++;
+ len += last_len;
+ }
+ segs = skb2;
+ }
+ sch->q.qlen += nb;
+ if (nb > 1)
+ qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
+ }
return NET_XMIT_SUCCESS;
}
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 29cc85319327..d903f560e2fd 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1469,6 +1469,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
int bearer_id = b->identity;
struct tipc_link_entry *le;
u16 bc_ack = msg_bcast_ack(hdr);
+ u32 self = tipc_own_addr(net);
int rc = 0;
__skb_queue_head_init(&xmitq);
@@ -1485,6 +1486,10 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
return tipc_node_bc_rcv(net, skb, bearer_id);
}
+ /* Discard unicast link messages destined for another node */
+ if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
+ goto discard;
+
/* Locate neighboring node that sent packet */
n = tipc_node_find(net, msg_prevnode(hdr));
if (unlikely(!n))
diff --git a/samples/bpf/trace_output_kern.c b/samples/bpf/trace_output_kern.c
index 8d8d1ec429eb..9b96f4fb8cea 100644
--- a/samples/bpf/trace_output_kern.c
+++ b/samples/bpf/trace_output_kern.c
@@ -18,7 +18,6 @@ int bpf_prog1(struct pt_regs *ctx)
u64 cookie;
} data;
- memset(&data, 0, sizeof(data));
data.pid = bpf_get_current_pid_tgid();
data.cookie = 0x12345678;
diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c
index 023cc4cad5c1..626f3bb24c55 100644
--- a/sound/hda/ext/hdac_ext_stream.c
+++ b/sound/hda/ext/hdac_ext_stream.c
@@ -104,12 +104,11 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_init_all);
*/
void snd_hdac_stream_free_all(struct hdac_ext_bus *ebus)
{
- struct hdac_stream *s;
+ struct hdac_stream *s, *_s;
struct hdac_ext_stream *stream;
struct hdac_bus *bus = ebus_to_hbus(ebus);
- while (!list_empty(&bus->stream_list)) {
- s = list_first_entry(&bus->stream_list, struct hdac_stream, list);
+ list_for_each_entry_safe(s, _s, &bus->stream_list, list) {
stream = stream_to_hdac_ext_stream(s);
snd_hdac_ext_stream_decouple(ebus, stream, false);
list_del(&s->list);
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index 54babe1c0b16..607bbeaebddf 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -20,6 +20,7 @@
#include <sound/core.h>
#include <sound/hdaudio.h>
#include <sound/hda_i915.h>
+#include <sound/hda_register.h>
static struct i915_audio_component *hdac_acomp;
@@ -97,26 +98,65 @@ int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
}
EXPORT_SYMBOL_GPL(snd_hdac_display_power);
+#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
+ ((pci)->device == 0x0c0c) || \
+ ((pci)->device == 0x0d0c) || \
+ ((pci)->device == 0x160c))
+
/**
- * snd_hdac_get_display_clk - Get CDCLK in kHz
+ * snd_hdac_i915_set_bclk - Reprogram BCLK for HSW/BDW
* @bus: HDA core bus
*
- * This function is supposed to be used only by a HD-audio controller
- * driver that needs the interaction with i915 graphics.
+ * Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
+ * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
+ * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
+ * BCLK = CDCLK * M / N
+ * The values will be lost when the display power well is disabled and need to
+ * be restored to avoid abnormal playback speed.
*
- * This function queries CDCLK value in kHz from the graphics driver and
- * returns the value. A negative code is returned in error.
+ * Call this function at initializing and changing power well, as well as
+ * at ELD notifier for the hotplug.
*/
-int snd_hdac_get_display_clk(struct hdac_bus *bus)
+void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
{
struct i915_audio_component *acomp = bus->audio_component;
+ struct pci_dev *pci = to_pci_dev(bus->dev);
+ int cdclk_freq;
+ unsigned int bclk_m, bclk_n;
+
+ if (!acomp || !acomp->ops || !acomp->ops->get_cdclk_freq)
+ return; /* only for i915 binding */
+ if (!CONTROLLER_IN_GPU(pci))
+ return; /* only HSW/BDW */
+
+ cdclk_freq = acomp->ops->get_cdclk_freq(acomp->dev);
+ switch (cdclk_freq) {
+ case 337500:
+ bclk_m = 16;
+ bclk_n = 225;
+ break;
+
+ case 450000:
+ default: /* default CDCLK 450MHz */
+ bclk_m = 4;
+ bclk_n = 75;
+ break;
+
+ case 540000:
+ bclk_m = 4;
+ bclk_n = 90;
+ break;
+
+ case 675000:
+ bclk_m = 8;
+ bclk_n = 225;
+ break;
+ }
- if (!acomp || !acomp->ops)
- return -ENODEV;
-
- return acomp->ops->get_cdclk_freq(acomp->dev);
+ snd_hdac_chip_writew(bus, HSW_EM4, bclk_m);
+ snd_hdac_chip_writew(bus, HSW_EM5, bclk_n);
}
-EXPORT_SYMBOL_GPL(snd_hdac_get_display_clk);
+EXPORT_SYMBOL_GPL(snd_hdac_i915_set_bclk);
/* There is a fixed mapping between audio pin node and display port
* on current Intel platforms:
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 637b8a0e2a91..9a0d1445ca5c 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -857,50 +857,6 @@ static int param_set_xint(const char *val, const struct kernel_param *kp)
#define azx_del_card_list(chip) /* NOP */
#endif /* CONFIG_PM */
-/* Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
- * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
- * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
- * BCLK = CDCLK * M / N
- * The values will be lost when the display power well is disabled and need to
- * be restored to avoid abnormal playback speed.
- */
-static void haswell_set_bclk(struct hda_intel *hda)
-{
- struct azx *chip = &hda->chip;
- int cdclk_freq;
- unsigned int bclk_m, bclk_n;
-
- if (!hda->need_i915_power)
- return;
-
- cdclk_freq = snd_hdac_get_display_clk(azx_bus(chip));
- switch (cdclk_freq) {
- case 337500:
- bclk_m = 16;
- bclk_n = 225;
- break;
-
- case 450000:
- default: /* default CDCLK 450MHz */
- bclk_m = 4;
- bclk_n = 75;
- break;
-
- case 540000:
- bclk_m = 4;
- bclk_n = 90;
- break;
-
- case 675000:
- bclk_m = 8;
- bclk_n = 225;
- break;
- }
-
- azx_writew(chip, HSW_EM4, bclk_m);
- azx_writew(chip, HSW_EM5, bclk_n);
-}
-
#if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO)
/*
* power management
@@ -958,7 +914,7 @@ static int azx_resume(struct device *dev)
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
&& hda->need_i915_power) {
snd_hdac_display_power(azx_bus(chip), true);
- haswell_set_bclk(hda);
+ snd_hdac_i915_set_bclk(azx_bus(chip));
}
if (chip->msi)
if (pci_enable_msi(pci) < 0)
@@ -1058,7 +1014,7 @@ static int azx_runtime_resume(struct device *dev)
bus = azx_bus(chip);
if (hda->need_i915_power) {
snd_hdac_display_power(bus, true);
- haswell_set_bclk(hda);
+ snd_hdac_i915_set_bclk(bus);
} else {
/* toggle codec wakeup bit for STATESTS read */
snd_hdac_set_codec_wakeup(bus, true);
@@ -1796,12 +1752,8 @@ static int azx_first_init(struct azx *chip)
/* initialize chip */
azx_init_pci(chip);
- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
- struct hda_intel *hda;
-
- hda = container_of(chip, struct hda_intel, chip);
- haswell_set_bclk(hda);
- }
+ if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
+ snd_hdac_i915_set_bclk(bus);
hda_intel_init_chip(chip, (probe_only[dev] & 2) == 0);
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 40933aa33afe..1483f85999ec 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -2232,6 +2232,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port)
if (atomic_read(&(codec)->core.in_pm))
return;
+ snd_hdac_i915_set_bclk(&codec->bus->core);
check_presence_and_report(codec, pin_nid);
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 810bceee4fd2..ac4490a96863 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5584,6 +5584,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 649e92a252ae..7ef3a0c16478 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -629,6 +629,7 @@ config SND_SOC_RT5514
config SND_SOC_RT5616
tristate "Realtek RT5616 CODEC"
+ depends on I2C
config SND_SOC_RT5631
tristate "Realtek ALC5631/RT5631 CODEC"
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 92d22a018d68..83959312f7a0 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -249,6 +249,18 @@ int arizona_init_spk(struct snd_soc_codec *codec)
}
EXPORT_SYMBOL_GPL(arizona_init_spk);
+int arizona_free_spk(struct snd_soc_codec *codec)
+{
+ struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona *arizona = priv->arizona;
+
+ arizona_free_irq(arizona, ARIZONA_IRQ_SPK_OVERHEAT_WARN, arizona);
+ arizona_free_irq(arizona, ARIZONA_IRQ_SPK_OVERHEAT, arizona);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(arizona_free_spk);
+
static const struct snd_soc_dapm_route arizona_mono_routes[] = {
{ "OUT1R", NULL, "OUT1L" },
{ "OUT2R", NULL, "OUT2L" },
diff --git a/sound/soc/codecs/arizona.h b/sound/soc/codecs/arizona.h
index 1ea8e4ecf8d4..ce0531b8c632 100644
--- a/sound/soc/codecs/arizona.h
+++ b/sound/soc/codecs/arizona.h
@@ -307,6 +307,8 @@ extern int arizona_init_spk(struct snd_soc_codec *codec);
extern int arizona_init_gpio(struct snd_soc_codec *codec);
extern int arizona_init_mono(struct snd_soc_codec *codec);
+extern int arizona_free_spk(struct snd_soc_codec *codec);
+
extern int arizona_init_dai(struct arizona_priv *priv, int dai);
int arizona_set_output_mode(struct snd_soc_codec *codec, int output,
diff --git a/sound/soc/codecs/cs35l32.c b/sound/soc/codecs/cs35l32.c
index 44c30fe3e315..287d13740be4 100644
--- a/sound/soc/codecs/cs35l32.c
+++ b/sound/soc/codecs/cs35l32.c
@@ -274,7 +274,9 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
if (of_property_read_u32(np, "cirrus,sdout-share", &val) >= 0)
pdata->sdout_share = val;
- of_property_read_u32(np, "cirrus,boost-manager", &val);
+ if (of_property_read_u32(np, "cirrus,boost-manager", &val))
+ val = -1u;
+
switch (val) {
case CS35L32_BOOST_MGR_AUTO:
case CS35L32_BOOST_MGR_AUTO_AUDIO:
@@ -282,13 +284,15 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
case CS35L32_BOOST_MGR_FIXED:
pdata->boost_mng = val;
break;
+ case -1u:
default:
dev_err(&i2c_client->dev,
"Wrong cirrus,boost-manager DT value %d\n", val);
pdata->boost_mng = CS35L32_BOOST_MGR_BYPASS;
}
- of_property_read_u32(np, "cirrus,sdout-datacfg", &val);
+ if (of_property_read_u32(np, "cirrus,sdout-datacfg", &val))
+ val = -1u;
switch (val) {
case CS35L32_DATA_CFG_LR_VP:
case CS35L32_DATA_CFG_LR_STAT:
@@ -296,13 +300,15 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
case CS35L32_DATA_CFG_LR_VPSTAT:
pdata->sdout_datacfg = val;
break;
+ case -1u:
default:
dev_err(&i2c_client->dev,
"Wrong cirrus,sdout-datacfg DT value %d\n", val);
pdata->sdout_datacfg = CS35L32_DATA_CFG_LR;
}
- of_property_read_u32(np, "cirrus,battery-threshold", &val);
+ if (of_property_read_u32(np, "cirrus,battery-threshold", &val))
+ val = -1u;
switch (val) {
case CS35L32_BATT_THRESH_3_1V:
case CS35L32_BATT_THRESH_3_2V:
@@ -310,13 +316,15 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
case CS35L32_BATT_THRESH_3_4V:
pdata->batt_thresh = val;
break;
+ case -1u:
default:
dev_err(&i2c_client->dev,
"Wrong cirrus,battery-threshold DT value %d\n", val);
pdata->batt_thresh = CS35L32_BATT_THRESH_3_3V;
}
- of_property_read_u32(np, "cirrus,battery-recovery", &val);
+ if (of_property_read_u32(np, "cirrus,battery-recovery", &val))
+ val = -1u;
switch (val) {
case CS35L32_BATT_RECOV_3_1V:
case CS35L32_BATT_RECOV_3_2V:
@@ -326,6 +334,7 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
case CS35L32_BATT_RECOV_3_6V:
pdata->batt_recov = val;
break;
+ case -1u:
default:
dev_err(&i2c_client->dev,
"Wrong cirrus,battery-recovery DT value %d\n", val);
diff --git a/sound/soc/codecs/cs47l24.c b/sound/soc/codecs/cs47l24.c
index 576087bda330..00e9b6fc1b5c 100644
--- a/sound/soc/codecs/cs47l24.c
+++ b/sound/soc/codecs/cs47l24.c
@@ -1108,6 +1108,9 @@ static int cs47l24_codec_remove(struct snd_soc_codec *codec)
priv->core.arizona->dapm = NULL;
arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
+
+ arizona_free_spk(codec);
+
return 0;
}
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index 26f9459cb3bc..aaa038ffc8a5 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -1420,32 +1420,39 @@ static int hdmi_codec_remove(struct snd_soc_codec *codec)
}
#ifdef CONFIG_PM
-static int hdmi_codec_resume(struct snd_soc_codec *codec)
+static int hdmi_codec_prepare(struct device *dev)
{
- struct hdac_ext_device *edev = snd_soc_codec_get_drvdata(codec);
+ struct hdac_ext_device *edev = to_hda_ext_device(dev);
+ struct hdac_device *hdac = &edev->hdac;
+
+ pm_runtime_get_sync(&edev->hdac.dev);
+
+ /*
+ * Power down afg.
+ * codec_read is preferred over codec_write to set the power state.
+ * This way verb is send to set the power state and response
+ * is received. So setting power state is ensured without using loop
+ * to read the state.
+ */
+ snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+ AC_PWRST_D3);
+
+ return 0;
+}
+
+static void hdmi_codec_complete(struct device *dev)
+{
+ struct hdac_ext_device *edev = to_hda_ext_device(dev);
struct hdac_hdmi_priv *hdmi = edev->private_data;
struct hdac_hdmi_pin *pin;
struct hdac_device *hdac = &edev->hdac;
- struct hdac_bus *bus = hdac->bus;
- int err;
- unsigned long timeout;
-
- hdac_hdmi_skl_enable_all_pins(&edev->hdac);
- hdac_hdmi_skl_enable_dp12(&edev->hdac);
/* Power up afg */
- if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0)) {
-
- snd_hdac_codec_write(hdac, hdac->afg, 0,
- AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
+ snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+ AC_PWRST_D0);
- /* Wait till power state is set to D0 */
- timeout = jiffies + msecs_to_jiffies(1000);
- while (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0)
- && time_before(jiffies, timeout)) {
- msleep(50);
- }
- }
+ hdac_hdmi_skl_enable_all_pins(&edev->hdac);
+ hdac_hdmi_skl_enable_dp12(&edev->hdac);
/*
* As the ELD notify callback request is not entertained while the
@@ -1455,28 +1462,16 @@ static int hdmi_codec_resume(struct snd_soc_codec *codec)
list_for_each_entry(pin, &hdmi->pin_list, head)
hdac_hdmi_present_sense(pin, 1);
- /*
- * Codec power is turned ON during controller resume.
- * Turn it OFF here
- */
- err = snd_hdac_display_power(bus, false);
- if (err < 0) {
- dev_err(bus->dev,
- "Cannot turn OFF display power on i915, err: %d\n",
- err);
- return err;
- }
-
- return 0;
+ pm_runtime_put_sync(&edev->hdac.dev);
}
#else
-#define hdmi_codec_resume NULL
+#define hdmi_codec_prepare NULL
+#define hdmi_codec_complete NULL
#endif
static struct snd_soc_codec_driver hdmi_hda_codec = {
.probe = hdmi_codec_probe,
.remove = hdmi_codec_remove,
- .resume = hdmi_codec_resume,
.idle_bias_off = true,
};
@@ -1561,7 +1556,6 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
struct hdac_ext_device *edev = to_hda_ext_device(dev);
struct hdac_device *hdac = &edev->hdac;
struct hdac_bus *bus = hdac->bus;
- unsigned long timeout;
int err;
dev_dbg(dev, "Enter: %s\n", __func__);
@@ -1570,20 +1564,15 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
if (!bus)
return 0;
- /* Power down afg */
- if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D3)) {
- snd_hdac_codec_write(hdac, hdac->afg, 0,
- AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
-
- /* Wait till power state is set to D3 */
- timeout = jiffies + msecs_to_jiffies(1000);
- while (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D3)
- && time_before(jiffies, timeout)) {
-
- msleep(50);
- }
- }
-
+ /*
+ * Power down afg.
+ * codec_read is preferred over codec_write to set the power state.
+ * This way verb is send to set the power state and response
+ * is received. So setting power state is ensured without using loop
+ * to read the state.
+ */
+ snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+ AC_PWRST_D3);
err = snd_hdac_display_power(bus, false);
if (err < 0) {
dev_err(bus->dev, "Cannot turn on display power on i915\n");
@@ -1616,9 +1605,8 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
hdac_hdmi_skl_enable_dp12(&edev->hdac);
/* Power up afg */
- if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0))
- snd_hdac_codec_write(hdac, hdac->afg, 0,
- AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
+ snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+ AC_PWRST_D0);
return 0;
}
@@ -1629,6 +1617,8 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
static const struct dev_pm_ops hdac_hdmi_pm = {
SET_RUNTIME_PM_OPS(hdac_hdmi_runtime_suspend, hdac_hdmi_runtime_resume, NULL)
+ .prepare = hdmi_codec_prepare,
+ .complete = hdmi_codec_complete,
};
static const struct hda_device_id hdmi_list[] = {
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index 1c8729984c2b..683769f0f246 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -343,9 +343,12 @@ static const struct snd_soc_dapm_widget nau8825_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("ADC Power", NAU8825_REG_ANALOG_ADC_2, 6, 0, NULL,
0),
- /* ADC for button press detection */
- SND_SOC_DAPM_ADC("SAR", NULL, NAU8825_REG_SAR_CTRL,
- NAU8825_SAR_ADC_EN_SFT, 0),
+ /* ADC for button press detection. A dapm supply widget is used to
+ * prevent dapm_power_widgets keeping the codec at SND_SOC_BIAS_ON
+ * during suspend.
+ */
+ SND_SOC_DAPM_SUPPLY("SAR", NAU8825_REG_SAR_CTRL,
+ NAU8825_SAR_ADC_EN_SFT, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("ADACL", 2, NAU8825_REG_RDAC, 12, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("ADACR", 2, NAU8825_REG_RDAC, 13, 0, NULL, 0),
@@ -607,6 +610,16 @@ static bool nau8825_is_jack_inserted(struct regmap *regmap)
static void nau8825_restart_jack_detection(struct regmap *regmap)
{
+ /* Chip needs one FSCLK cycle in order to generate interrupts,
+ * as we cannot guarantee one will be provided by the system. Turning
+ * master mode on then off enables us to generate that FSCLK cycle
+ * with a minimum of contention on the clock bus.
+ */
+ regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
+ NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_MASTER);
+ regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
+ NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_SLAVE);
+
/* this will restart the entire jack detection process including MIC/GND
* switching and create interrupts. We have to go from 0 to 1 and back
* to 0 to restart.
@@ -728,7 +741,10 @@ static irqreturn_t nau8825_interrupt(int irq, void *data)
struct regmap *regmap = nau8825->regmap;
int active_irq, clear_irq = 0, event = 0, event_mask = 0;
- regmap_read(regmap, NAU8825_REG_IRQ_STATUS, &active_irq);
+ if (regmap_read(regmap, NAU8825_REG_IRQ_STATUS, &active_irq)) {
+ dev_err(nau8825->dev, "failed to read irq status\n");
+ return IRQ_NONE;
+ }
if ((active_irq & NAU8825_JACK_EJECTION_IRQ_MASK) ==
NAU8825_JACK_EJECTION_DETECTED) {
@@ -1141,33 +1157,74 @@ static int nau8825_set_bias_level(struct snd_soc_codec *codec,
return ret;
}
}
-
- ret = regcache_sync(nau8825->regmap);
- if (ret) {
- dev_err(codec->dev,
- "Failed to sync cache: %d\n", ret);
- return ret;
- }
}
-
break;
case SND_SOC_BIAS_OFF:
if (nau8825->mclk_freq)
clk_disable_unprepare(nau8825->mclk);
-
- regcache_mark_dirty(nau8825->regmap);
break;
}
return 0;
}
+#ifdef CONFIG_PM
+static int nau8825_suspend(struct snd_soc_codec *codec)
+{
+ struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
+
+ disable_irq(nau8825->irq);
+ regcache_cache_only(nau8825->regmap, true);
+ regcache_mark_dirty(nau8825->regmap);
+
+ return 0;
+}
+
+static int nau8825_resume(struct snd_soc_codec *codec)
+{
+ struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
+
+ /* The chip may lose power and reset in S3. regcache_sync restores
+ * register values including configurations for sysclk, irq, and
+ * jack/button detection.
+ */
+ regcache_cache_only(nau8825->regmap, false);
+ regcache_sync(nau8825->regmap);
+
+ /* Check the jack plug status directly. If the headset is unplugged
+ * during S3 when the chip has no power, there will be no jack
+ * detection irq even after the nau8825_restart_jack_detection below,
+ * because the chip just thinks no headset has ever been plugged in.
+ */
+ if (!nau8825_is_jack_inserted(nau8825->regmap)) {
+ nau8825_eject_jack(nau8825);
+ snd_soc_jack_report(nau8825->jack, 0, SND_JACK_HEADSET);
+ }
+
+ enable_irq(nau8825->irq);
+
+ /* Run jack detection to check the type (OMTP or CTIA) of the headset
+ * if there is one. This handles the case where a different type of
+ * headset is plugged in during S3. This triggers an IRQ iff a headset
+ * is already plugged in.
+ */
+ nau8825_restart_jack_detection(nau8825->regmap);
+
+ return 0;
+}
+#else
+#define nau8825_suspend NULL
+#define nau8825_resume NULL
+#endif
+
static struct snd_soc_codec_driver nau8825_codec_driver = {
.probe = nau8825_codec_probe,
.set_sysclk = nau8825_set_sysclk,
.set_pll = nau8825_set_pll,
.set_bias_level = nau8825_set_bias_level,
.suspend_bias_off = true,
+ .suspend = nau8825_suspend,
+ .resume = nau8825_resume,
.controls = nau8825_controls,
.num_controls = ARRAY_SIZE(nau8825_controls),
@@ -1277,16 +1334,6 @@ static int nau8825_setup_irq(struct nau8825 *nau8825)
regmap_update_bits(regmap, NAU8825_REG_ENA_CTRL,
NAU8825_ENABLE_DACR, NAU8825_ENABLE_DACR);
- /* Chip needs one FSCLK cycle in order to generate interrupts,
- * as we cannot guarantee one will be provided by the system. Turning
- * master mode on then off enables us to generate that FSCLK cycle
- * with a minimum of contention on the clock bus.
- */
- regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
- NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_MASTER);
- regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
- NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_SLAVE);
-
ret = devm_request_threaded_irq(nau8825->dev, nau8825->irq, NULL,
nau8825_interrupt, IRQF_TRIGGER_LOW | IRQF_ONESHOT,
"nau8825", nau8825);
@@ -1354,36 +1401,6 @@ static int nau8825_i2c_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int nau8825_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct nau8825 *nau8825 = dev_get_drvdata(dev);
-
- disable_irq(client->irq);
- regcache_cache_only(nau8825->regmap, true);
- regcache_mark_dirty(nau8825->regmap);
-
- return 0;
-}
-
-static int nau8825_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct nau8825 *nau8825 = dev_get_drvdata(dev);
-
- regcache_cache_only(nau8825->regmap, false);
- regcache_sync(nau8825->regmap);
- enable_irq(client->irq);
-
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops nau8825_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(nau8825_suspend, nau8825_resume)
-};
-
static const struct i2c_device_id nau8825_i2c_ids[] = {
{ "nau8825", 0 },
{ }
@@ -1410,7 +1427,6 @@ static struct i2c_driver nau8825_driver = {
.name = "nau8825",
.of_match_table = of_match_ptr(nau8825_of_ids),
.acpi_match_table = ACPI_PTR(nau8825_acpi_match),
- .pm = &nau8825_pm,
},
.probe = nau8825_i2c_probe,
.remove = nau8825_i2c_remove,
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index e8b5ba04417a..09e8988bbb2d 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -359,7 +359,7 @@ static const DECLARE_TLV_DB_RANGE(bst_tlv,
/* Interface data select */
static const char * const rt5640_data_select[] = {
- "Normal", "left copy to right", "right copy to left", "Swap"};
+ "Normal", "Swap", "left copy to right", "right copy to left"};
static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA,
RT5640_IF1_DAC_SEL_SFT, rt5640_data_select);
diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
index 1761c3a98b76..58b664b06c16 100644
--- a/sound/soc/codecs/rt5640.h
+++ b/sound/soc/codecs/rt5640.h
@@ -443,39 +443,39 @@
#define RT5640_IF1_DAC_SEL_MASK (0x3 << 14)
#define RT5640_IF1_DAC_SEL_SFT 14
#define RT5640_IF1_DAC_SEL_NOR (0x0 << 14)
-#define RT5640_IF1_DAC_SEL_L2R (0x1 << 14)
-#define RT5640_IF1_DAC_SEL_R2L (0x2 << 14)
-#define RT5640_IF1_DAC_SEL_SWAP (0x3 << 14)
+#define RT5640_IF1_DAC_SEL_SWAP (0x1 << 14)
+#define RT5640_IF1_DAC_SEL_L2R (0x2 << 14)
+#define RT5640_IF1_DAC_SEL_R2L (0x3 << 14)
#define RT5640_IF1_ADC_SEL_MASK (0x3 << 12)
#define RT5640_IF1_ADC_SEL_SFT 12
#define RT5640_IF1_ADC_SEL_NOR (0x0 << 12)
-#define RT5640_IF1_ADC_SEL_L2R (0x1 << 12)
-#define RT5640_IF1_ADC_SEL_R2L (0x2 << 12)
-#define RT5640_IF1_ADC_SEL_SWAP (0x3 << 12)
+#define RT5640_IF1_ADC_SEL_SWAP (0x1 << 12)
+#define RT5640_IF1_ADC_SEL_L2R (0x2 << 12)
+#define RT5640_IF1_ADC_SEL_R2L (0x3 << 12)
#define RT5640_IF2_DAC_SEL_MASK (0x3 << 10)
#define RT5640_IF2_DAC_SEL_SFT 10
#define RT5640_IF2_DAC_SEL_NOR (0x0 << 10)
-#define RT5640_IF2_DAC_SEL_L2R (0x1 << 10)
-#define RT5640_IF2_DAC_SEL_R2L (0x2 << 10)
-#define RT5640_IF2_DAC_SEL_SWAP (0x3 << 10)
+#define RT5640_IF2_DAC_SEL_SWAP (0x1 << 10)
+#define RT5640_IF2_DAC_SEL_L2R (0x2 << 10)
+#define RT5640_IF2_DAC_SEL_R2L (0x3 << 10)
#define RT5640_IF2_ADC_SEL_MASK (0x3 << 8)
#define RT5640_IF2_ADC_SEL_SFT 8
#define RT5640_IF2_ADC_SEL_NOR (0x0 << 8)
-#define RT5640_IF2_ADC_SEL_L2R (0x1 << 8)
-#define RT5640_IF2_ADC_SEL_R2L (0x2 << 8)
-#define RT5640_IF2_ADC_SEL_SWAP (0x3 << 8)
+#define RT5640_IF2_ADC_SEL_SWAP (0x1 << 8)
+#define RT5640_IF2_ADC_SEL_L2R (0x2 << 8)
+#define RT5640_IF2_ADC_SEL_R2L (0x3 << 8)
#define RT5640_IF3_DAC_SEL_MASK (0x3 << 6)
#define RT5640_IF3_DAC_SEL_SFT 6
#define RT5640_IF3_DAC_SEL_NOR (0x0 << 6)
-#define RT5640_IF3_DAC_SEL_L2R (0x1 << 6)
-#define RT5640_IF3_DAC_SEL_R2L (0x2 << 6)
-#define RT5640_IF3_DAC_SEL_SWAP (0x3 << 6)
+#define RT5640_IF3_DAC_SEL_SWAP (0x1 << 6)
+#define RT5640_IF3_DAC_SEL_L2R (0x2 << 6)
+#define RT5640_IF3_DAC_SEL_R2L (0x3 << 6)
#define RT5640_IF3_ADC_SEL_MASK (0x3 << 4)
#define RT5640_IF3_ADC_SEL_SFT 4
#define RT5640_IF3_ADC_SEL_NOR (0x0 << 4)
-#define RT5640_IF3_ADC_SEL_L2R (0x1 << 4)
-#define RT5640_IF3_ADC_SEL_R2L (0x2 << 4)
-#define RT5640_IF3_ADC_SEL_SWAP (0x3 << 4)
+#define RT5640_IF3_ADC_SEL_SWAP (0x1 << 4)
+#define RT5640_IF3_ADC_SEL_L2R (0x2 << 4)
+#define RT5640_IF3_ADC_SEL_R2L (0x3 << 4)
/* REC Left Mixer Control 1 (0x3b) */
#define RT5640_G_HP_L_RM_L_MASK (0x7 << 13)
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index a8b3e3f701f9..1bae17ee8817 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -1955,11 +1955,16 @@ err_adsp2_codec_probe:
static int wm5102_codec_remove(struct snd_soc_codec *codec)
{
struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona *arizona = priv->core.arizona;
wm_adsp2_codec_remove(&priv->core.adsp[0], codec);
priv->core.arizona->dapm = NULL;
+ arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
+
+ arizona_free_spk(codec);
+
return 0;
}
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 83ba70fe16e6..2728ac545ffe 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -2298,6 +2298,8 @@ static int wm5110_codec_remove(struct snd_soc_codec *codec)
arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
+ arizona_free_spk(codec);
+
return 0;
}
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 88223608a33f..720a14e0687d 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -2471,7 +2471,7 @@ static void wm8962_configure_bclk(struct snd_soc_codec *codec)
break;
default:
dev_warn(codec->dev, "Unknown DSPCLK divisor read back\n");
- dspclk = wm8962->sysclk;
+ dspclk = wm8962->sysclk_rate;
}
dev_dbg(codec->dev, "DSPCLK is %dHz, BCLK %d\n", dspclk, wm8962->bclk);
diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
index 52d766efe14f..6b0785b5a5c5 100644
--- a/sound/soc/codecs/wm8997.c
+++ b/sound/soc/codecs/wm8997.c
@@ -1072,6 +1072,8 @@ static int wm8997_codec_remove(struct snd_soc_codec *codec)
priv->core.arizona->dapm = NULL;
+ arizona_free_spk(codec);
+
return 0;
}
diff --git a/sound/soc/codecs/wm8998.c b/sound/soc/codecs/wm8998.c
index 012396074a8a..449f66636205 100644
--- a/sound/soc/codecs/wm8998.c
+++ b/sound/soc/codecs/wm8998.c
@@ -1324,6 +1324,8 @@ static int wm8998_codec_remove(struct snd_soc_codec *codec)
priv->core.arizona->dapm = NULL;
+ arizona_free_spk(codec);
+
return 0;
}
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index b3e6c2300457..1120f4f4d011 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -163,7 +163,6 @@ config SND_SOC_INTEL_SKYLAKE
tristate
select SND_HDA_EXT_CORE
select SND_SOC_TOPOLOGY
- select SND_HDA_I915
select SND_SOC_INTEL_SST
config SND_SOC_INTEL_SKL_RT286_MACH
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c
index ac60f1301e21..91565229d074 100644
--- a/sound/soc/intel/haswell/sst-haswell-ipc.c
+++ b/sound/soc/intel/haswell/sst-haswell-ipc.c
@@ -1345,7 +1345,7 @@ int sst_hsw_stream_reset(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
return 0;
/* wait for pause to complete before we reset the stream */
- while (stream->running && tries--)
+ while (stream->running && --tries)
msleep(1);
if (!tries) {
dev_err(hsw->dev, "error: reset stream %d still running\n",
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.c b/sound/soc/intel/skylake/skl-sst-dsp.c
index a5267e8a96e0..2962ef22fc84 100644
--- a/sound/soc/intel/skylake/skl-sst-dsp.c
+++ b/sound/soc/intel/skylake/skl-sst-dsp.c
@@ -336,6 +336,11 @@ void skl_dsp_free(struct sst_dsp *dsp)
skl_ipc_int_disable(dsp);
free_irq(dsp->irq, dsp);
+ dsp->cl_dev.ops.cl_cleanup_controller(dsp);
+ skl_cldma_int_disable(dsp);
+ skl_ipc_op_int_disable(dsp);
+ skl_ipc_int_disable(dsp);
+
skl_dsp_disable_core(dsp);
}
EXPORT_SYMBOL_GPL(skl_dsp_free);
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index 545b4e77b8aa..cdb78b7e5a14 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -239,6 +239,7 @@ static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
{
int multiplier = 1;
struct skl_module_fmt *in_fmt, *out_fmt;
+ int in_rate, out_rate;
/* Since fixups is applied to pin 0 only, ibs, obs needs
@@ -249,15 +250,24 @@ static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
multiplier = 5;
- mcfg->ibs = (in_fmt->s_freq / 1000) *
- (mcfg->in_fmt->channels) *
- (mcfg->in_fmt->bit_depth >> 3) *
- multiplier;
-
- mcfg->obs = (mcfg->out_fmt->s_freq / 1000) *
- (mcfg->out_fmt->channels) *
- (mcfg->out_fmt->bit_depth >> 3) *
- multiplier;
+
+ if (in_fmt->s_freq % 1000)
+ in_rate = (in_fmt->s_freq / 1000) + 1;
+ else
+ in_rate = (in_fmt->s_freq / 1000);
+
+ mcfg->ibs = in_rate * (mcfg->in_fmt->channels) *
+ (mcfg->in_fmt->bit_depth >> 3) *
+ multiplier;
+
+ if (mcfg->out_fmt->s_freq % 1000)
+ out_rate = (mcfg->out_fmt->s_freq / 1000) + 1;
+ else
+ out_rate = (mcfg->out_fmt->s_freq / 1000);
+
+ mcfg->obs = out_rate * (mcfg->out_fmt->channels) *
+ (mcfg->out_fmt->bit_depth >> 3) *
+ multiplier;
}
static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
@@ -485,11 +495,15 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
if (!skl_is_pipe_mcps_avail(skl, mconfig))
return -ENOMEM;
+ skl_tplg_alloc_pipe_mcps(skl, mconfig);
+
if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
mconfig->id.module_id, mconfig->guid);
if (ret < 0)
return ret;
+
+ mconfig->m_state = SKL_MODULE_LOADED;
}
/* update blob if blob is null for be with default value */
@@ -509,7 +523,6 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
ret = skl_tplg_set_module_params(w, ctx);
if (ret < 0)
return ret;
- skl_tplg_alloc_pipe_mcps(skl, mconfig);
}
return 0;
@@ -524,7 +537,8 @@ static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
list_for_each_entry(w_module, &pipe->w_list, node) {
mconfig = w_module->w->priv;
- if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod)
+ if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod &&
+ mconfig->m_state > SKL_MODULE_UNINIT)
return ctx->dsp->fw_ops.unload_mod(ctx->dsp,
mconfig->id.module_id);
}
@@ -558,6 +572,9 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
if (!skl_is_pipe_mem_avail(skl, mconfig))
return -ENOMEM;
+ skl_tplg_alloc_pipe_mem(skl, mconfig);
+ skl_tplg_alloc_pipe_mcps(skl, mconfig);
+
/*
* Create a list of modules for pipe.
* This list contains modules from source to sink
@@ -601,9 +618,6 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
src_module = dst_module;
}
- skl_tplg_alloc_pipe_mem(skl, mconfig);
- skl_tplg_alloc_pipe_mcps(skl, mconfig);
-
return 0;
}
diff --git a/sound/soc/intel/skylake/skl-topology.h b/sound/soc/intel/skylake/skl-topology.h
index de3c401284d9..d2d923002d5c 100644
--- a/sound/soc/intel/skylake/skl-topology.h
+++ b/sound/soc/intel/skylake/skl-topology.h
@@ -274,10 +274,10 @@ struct skl_pipe {
enum skl_module_state {
SKL_MODULE_UNINIT = 0,
- SKL_MODULE_INIT_DONE = 1,
- SKL_MODULE_LOADED = 2,
- SKL_MODULE_UNLOADED = 3,
- SKL_MODULE_BIND_DONE = 4
+ SKL_MODULE_LOADED = 1,
+ SKL_MODULE_INIT_DONE = 2,
+ SKL_MODULE_BIND_DONE = 3,
+ SKL_MODULE_UNLOADED = 4,
};
struct skl_module_cfg {
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index ab5e25aaeee3..3982f5536f2d 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -222,6 +222,7 @@ static int skl_suspend(struct device *dev)
struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
struct skl *skl = ebus_to_skl(ebus);
struct hdac_bus *bus = ebus_to_hbus(ebus);
+ int ret = 0;
/*
* Do not suspend if streams which are marked ignore suspend are
@@ -232,10 +233,20 @@ static int skl_suspend(struct device *dev)
enable_irq_wake(bus->irq);
pci_save_state(pci);
pci_disable_device(pci);
- return 0;
} else {
- return _skl_suspend(ebus);
+ ret = _skl_suspend(ebus);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
+ ret = snd_hdac_display_power(bus, false);
+ if (ret < 0)
+ dev_err(bus->dev,
+ "Cannot turn OFF display power on i915\n");
}
+
+ return ret;
}
static int skl_resume(struct device *dev)
@@ -316,17 +327,20 @@ static int skl_free(struct hdac_ext_bus *ebus)
if (bus->irq >= 0)
free_irq(bus->irq, (void *)bus);
- if (bus->remap_addr)
- iounmap(bus->remap_addr);
-
snd_hdac_bus_free_stream_pages(bus);
snd_hdac_stream_free_all(ebus);
snd_hdac_link_free_all(ebus);
+
+ if (bus->remap_addr)
+ iounmap(bus->remap_addr);
+
pci_release_regions(skl->pci);
pci_disable_device(skl->pci);
snd_hdac_ext_bus_exit(ebus);
+ if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
+ snd_hdac_i915_exit(&ebus->bus);
return 0;
}
@@ -719,12 +733,12 @@ static void skl_remove(struct pci_dev *pci)
if (skl->tplg)
release_firmware(skl->tplg);
- if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
- snd_hdac_i915_exit(&ebus->bus);
-
if (pci_dev_run_wake(pci))
pm_runtime_get_noresume(&pci->dev);
- pci_dev_put(pci);
+
+ /* codec removal, invoke bus_device_remove */
+ snd_hdac_ext_bus_device_remove(ebus);
+
skl_platform_unregister(&pci->dev);
skl_free_dsp(skl);
skl_machine_device_unregister(skl);
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 801ae1a81dfd..c4464858bf01 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2188,6 +2188,13 @@ static ssize_t dapm_widget_show_component(struct snd_soc_component *cmpnt,
int count = 0;
char *state = "not set";
+ /* card won't be set for the dummy component, as a spot fix
+ * we're checking for that case specifically here but in future
+ * we will ensure that the dummy component looks like others.
+ */
+ if (!cmpnt->card)
+ return 0;
+
list_for_each_entry(w, &cmpnt->card->widgets, list) {
if (w->dapm != dapm)
continue;