summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/atomic-long.h263
-rw-r--r--include/asm-generic/atomic.h11
-rw-r--r--include/asm-generic/atomic64.h4
-rw-r--r--include/asm-generic/barrier.h4
-rw-r--r--include/asm-generic/dma-mapping-common.h118
-rw-r--r--include/asm-generic/early_ioremap.h8
-rw-r--r--include/asm-generic/fixmap.h3
-rw-r--r--include/asm-generic/memory_model.h6
-rw-r--r--include/asm-generic/qrwlock.h78
-rw-r--r--include/asm-generic/qspinlock.h4
-rw-r--r--include/asm-generic/rtc.h29
-rw-r--r--include/asm-generic/vmlinux.lds.h4
-rw-r--r--include/crypto/pkcs7.h13
-rw-r--r--include/crypto/public_key.h18
-rw-r--r--include/drm/bridge/dw_hdmi.h7
-rw-r--r--include/drm/drmP.h57
-rw-r--r--include/drm/drm_atomic.h3
-rw-r--r--include/drm/drm_atomic_helper.h4
-rw-r--r--include/drm/drm_crtc.h83
-rw-r--r--include/drm/drm_crtc_helper.h8
-rw-r--r--include/drm/drm_dp_helper.h3
-rw-r--r--include/drm/drm_fb_helper.h212
-rw-r--r--include/drm/drm_modeset_lock.h1
-rw-r--r--include/drm/drm_plane_helper.h45
-rw-r--r--include/drm/i915_component.h11
-rw-r--r--include/drm/intel-gtt.h4
-rw-r--r--include/dt-bindings/dma/axi-dmac.h48
-rw-r--r--include/dt-bindings/dma/jz4780-dma.h49
-rw-r--r--include/dt-bindings/i2c/i2c.h18
-rw-r--r--include/dt-bindings/media/c8sectpfe.h12
-rw-r--r--include/dt-bindings/mfd/st-lpc.h1
-rw-r--r--include/dt-bindings/pinctrl/qcom,pmic-mpp.h51
-rw-r--r--include/keys/system_keyring.h7
-rw-r--r--include/kvm/arm_arch_timer.h7
-rw-r--r--include/kvm/arm_vgic.h45
-rw-r--r--include/linux/amba/serial.h14
-rw-r--r--include/linux/asn1_ber_bytecode.h16
-rw-r--r--include/linux/atomic.h361
-rw-r--r--include/linux/audit.h4
-rw-r--r--include/linux/average.h61
-rw-r--r--include/linux/backing-dev.h26
-rw-r--r--include/linux/basic_mmio_gpio.h1
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h1
-rw-r--r--include/linux/blk-cgroup.h340
-rw-r--r--include/linux/blk_types.h5
-rw-r--r--include/linux/blkdev.h10
-rw-r--r--include/linux/bpf.h12
-rw-r--r--include/linux/ceph/ceph_features.h1
-rw-r--r--include/linux/ceph/libceph.h2
-rw-r--r--include/linux/ceph/messenger.h6
-rw-r--r--include/linux/ceph/msgr.h4
-rw-r--r--include/linux/cgroup_subsys.h2
-rw-r--r--include/linux/clockchips.h29
-rw-r--r--include/linux/compiler.h7
-rw-r--r--include/linux/cred.h8
-rw-r--r--include/linux/dax.h39
-rw-r--r--include/linux/debugfs.h20
-rw-r--r--include/linux/dmaengine.h75
-rw-r--r--include/linux/dmapool.h6
-rw-r--r--include/linux/etherdevice.h2
-rw-r--r--include/linux/f2fs_fs.h16
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/filter.h17
-rw-r--r--include/linux/fs.h54
-rw-r--r--include/linux/fsl_ifc.h50
-rw-r--r--include/linux/fsnotify_backend.h59
-rw-r--r--include/linux/genalloc.h6
-rw-r--r--include/linux/gfp.h31
-rw-r--r--include/linux/gpio/consumer.h82
-rw-r--r--include/linux/gpio/driver.h37
-rw-r--r--include/linux/gpio/machine.h1
-rw-r--r--include/linux/huge_mm.h20
-rw-r--r--include/linux/hugetlb.h17
-rw-r--r--include/linux/i2c.h19
-rw-r--r--include/linux/ieee80211.h2
-rw-r--r--include/linux/igmp.h1
-rw-r--r--include/linux/input/touchscreen.h11
-rw-r--r--include/linux/intel-iommu.h2
-rw-r--r--include/linux/io-mapping.h2
-rw-r--r--include/linux/io.h33
-rw-r--r--include/linux/ipmi_smi.h7
-rw-r--r--include/linux/ipv6.h5
-rw-r--r--include/linux/irq.h81
-rw-r--r--include/linux/irqchip/arm-gic-v3.h12
-rw-r--r--include/linux/irqchip/arm-gic.h7
-rw-r--r--include/linux/irqchip/mips-gic.h14
-rw-r--r--include/linux/irqdesc.h41
-rw-r--r--include/linux/irqhandler.h2
-rw-r--r--include/linux/jbd.h1047
-rw-r--r--include/linux/jbd2.h44
-rw-r--r--include/linux/jbd_common.h46
-rw-r--r--include/linux/jump_label.h259
-rw-r--r--include/linux/kernfs.h4
-rw-r--r--include/linux/kexec.h17
-rw-r--r--include/linux/kmod.h2
-rw-r--r--include/linux/kthread.h2
-rw-r--r--include/linux/kvm_host.h1
-rw-r--r--include/linux/libnvdimm.h4
-rw-r--r--include/linux/list.h5
-rw-r--r--include/linux/llist.h2
-rw-r--r--include/linux/lsm_audit.h7
-rw-r--r--include/linux/lsm_hooks.h6
-rw-r--r--include/linux/mailbox_controller.h7
-rw-r--r--include/linux/memblock.h4
-rw-r--r--include/linux/memcontrol.h392
-rw-r--r--include/linux/memory_hotplug.h5
-rw-r--r--include/linux/mfd/88pm80x.h162
-rw-r--r--include/linux/mfd/arizona/core.h3
-rw-r--r--include/linux/mfd/arizona/pdata.h14
-rw-r--r--include/linux/mfd/arizona/registers.h257
-rw-r--r--include/linux/mfd/axp20x.h67
-rw-r--r--include/linux/mfd/da9062/core.h50
-rw-r--r--include/linux/mfd/da9062/registers.h1108
-rw-r--r--include/linux/mfd/da9063/core.h1
-rw-r--r--include/linux/mfd/lpc_ich.h6
-rw-r--r--include/linux/mfd/mt6397/core.h1
-rw-r--r--include/linux/microchipphy.h73
-rw-r--r--include/linux/mlx4/cq.h3
-rw-r--r--include/linux/mlx4/device.h8
-rw-r--r--include/linux/mlx4/driver.h1
-rw-r--r--include/linux/mlx4/qp.h3
-rw-r--r--include/linux/mlx5/device.h21
-rw-r--r--include/linux/mlx5/driver.h30
-rw-r--r--include/linux/mlx5/mlx5_ifc.h24
-rw-r--r--include/linux/mm.h102
-rw-r--r--include/linux/mm_types.h14
-rw-r--r--include/linux/mmc/card.h3
-rw-r--r--include/linux/mmc/dw_mmc.h9
-rw-r--r--include/linux/mmc/host.h3
-rw-r--r--include/linux/mmu_notifier.h46
-rw-r--r--include/linux/mmzone.h31
-rw-r--r--include/linux/mpls_iptunnel.h6
-rw-r--r--include/linux/mtd/map.h2
-rw-r--r--include/linux/net.h8
-rw-r--r--include/linux/netdevice.h176
-rw-r--r--include/linux/netfilter.h44
-rw-r--r--include/linux/netfilter/nf_conntrack_zones_common.h23
-rw-r--r--include/linux/netfilter/nfnetlink_acct.h3
-rw-r--r--include/linux/netfilter/x_tables.h8
-rw-r--r--include/linux/netfilter_bridge.h12
-rw-r--r--include/linux/netfilter_ipv6.h18
-rw-r--r--include/linux/netlink.h13
-rw-r--r--include/linux/nfs4.h18
-rw-r--r--include/linux/nfs_fs.h2
-rw-r--r--include/linux/nfs_fs_sb.h5
-rw-r--r--include/linux/nfs_xdr.h8
-rw-r--r--include/linux/nmi.h21
-rw-r--r--include/linux/ntb.h9
-rw-r--r--include/linux/ntb_transport.h1
-rw-r--r--include/linux/of_gpio.h4
-rw-r--r--include/linux/oid_registry.h7
-rw-r--r--include/linux/oom.h38
-rw-r--r--include/linux/page-flags.h11
-rw-r--r--include/linux/page-isolation.h5
-rw-r--r--include/linux/page_ext.h4
-rw-r--r--include/linux/page_idle.h110
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pci_ids.h9
-rw-r--r--include/linux/percpu-rwsem.h20
-rw-r--r--include/linux/perf/arm_pmu.h154
-rw-r--r--include/linux/perf_event.h10
-rw-r--r--include/linux/phy.h14
-rw-r--r--include/linux/phy_fixed.h8
-rw-r--r--include/linux/platform_data/atmel_mxt_ts.h (renamed from include/linux/i2c/atmel_mxt_ts.h)12
-rw-r--r--include/linux/platform_data/gpio-em.h11
-rw-r--r--include/linux/platform_data/i2c-mux-reg.h44
-rw-r--r--include/linux/platform_data/itco_wdt.h19
-rw-r--r--include/linux/platform_data/lp855x.h2
-rw-r--r--include/linux/platform_data/mmc-esdhc-imx.h1
-rw-r--r--include/linux/platform_data/pixcir_i2c_ts.h (renamed from include/linux/input/pixcir_ts.h)1
-rw-r--r--include/linux/platform_data/st_nci.h29
-rw-r--r--include/linux/platform_data/zforce_ts.h3
-rw-r--r--include/linux/pm_opp.h6
-rw-r--r--include/linux/pmem.h115
-rw-r--r--include/linux/poison.h11
-rw-r--r--include/linux/printk.h14
-rw-r--r--include/linux/property.h4
-rw-r--r--include/linux/psci.h52
-rw-r--r--include/linux/ptrace.h1
-rw-r--r--include/linux/pwm.h99
-rw-r--r--include/linux/quotaops.h5
-rw-r--r--include/linux/regmap.h385
-rw-r--r--include/linux/reset.h14
-rw-r--r--include/linux/rmap.h3
-rw-r--r--include/linux/sched.h23
-rw-r--r--include/linux/seccomp.h2
-rw-r--r--include/linux/seq_file.h58
-rw-r--r--include/linux/serio.h2
-rw-r--r--include/linux/shdma-base.h5
-rw-r--r--include/linux/skbuff.h153
-rw-r--r--include/linux/slab.h10
-rw-r--r--include/linux/smpboot.h11
-rw-r--r--include/linux/stmmac.h22
-rw-r--r--include/linux/string_helpers.h14
-rw-r--r--include/linux/sunrpc/addr.h27
-rw-r--r--include/linux/sunrpc/auth.h8
-rw-r--r--include/linux/sunrpc/cache.h9
-rw-r--r--include/linux/sunrpc/svc.h68
-rw-r--r--include/linux/sunrpc/svc_rdma.h92
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/sunrpc/xprtrdma.h2
-rw-r--r--include/linux/swap.h19
-rw-r--r--include/linux/swapops.h37
-rw-r--r--include/linux/syscalls.h3
-rw-r--r--include/linux/thermal.h26
-rw-r--r--include/linux/tick.h9
-rw-r--r--include/linux/uaccess.h2
-rw-r--r--include/linux/userfaultfd_k.h85
-rw-r--r--include/linux/verify_pefile.h6
-rw-r--r--include/linux/wait.h5
-rw-r--r--include/linux/watchdog.h8
-rw-r--r--include/linux/zbud.h2
-rw-r--r--include/linux/zpool.h6
-rw-r--r--include/linux/zsmalloc.h6
-rw-r--r--include/media/media-devnode.h4
-rw-r--r--include/media/omap3isp.h158
-rw-r--r--include/media/rc-core.h6
-rw-r--r--include/media/rc-map.h38
-rw-r--r--include/media/tc358743.h131
-rw-r--r--include/media/v4l2-async.h8
-rw-r--r--include/media/v4l2-ctrls.h1018
-rw-r--r--include/media/v4l2-dv-timings.h141
-rw-r--r--include/media/v4l2-event.h47
-rw-r--r--include/media/v4l2-flash-led-class.h12
-rw-r--r--include/media/v4l2-mediabus.h4
-rw-r--r--include/media/v4l2-mem2mem.h20
-rw-r--r--include/media/v4l2-subdev.h376
-rw-r--r--include/media/videobuf2-core.h10
-rw-r--r--include/media/videobuf2-memops.h14
-rw-r--r--include/misc/cxl.h10
-rw-r--r--include/net/6lowpan.h23
-rw-r--r--include/net/act_api.h16
-rw-r--r--include/net/addrconf.h35
-rw-r--r--include/net/bluetooth/hci_core.h31
-rw-r--r--include/net/bluetooth/l2cap.h2
-rw-r--r--include/net/bond_options.h1
-rw-r--r--include/net/bonding.h7
-rw-r--r--include/net/cfg80211.h3
-rw-r--r--include/net/cfg802154.h10
-rw-r--r--include/net/checksum.h8
-rw-r--r--include/net/cls_cgroup.h29
-rw-r--r--include/net/dsa.h33
-rw-r--r--include/net/dst.h29
-rw-r--r--include/net/dst_metadata.h108
-rw-r--r--include/net/fib_rules.h3
-rw-r--r--include/net/flow.h29
-rw-r--r--include/net/flow_dissector.h67
-rw-r--r--include/net/geneve.h35
-rw-r--r--include/net/gre.h92
-rw-r--r--include/net/gro_cells.h18
-rw-r--r--include/net/inet_hashtables.h4
-rw-r--r--include/net/inet_timewait_sock.h8
-rw-r--r--include/net/inetpeer.h118
-rw-r--r--include/net/ip.h31
-rw-r--r--include/net/ip6_fib.h2
-rw-r--r--include/net/ip_fib.h5
-rw-r--r--include/net/ip_tunnels.h145
-rw-r--r--include/net/ip_vs.h23
-rw-r--r--include/net/ipv6.h76
-rw-r--r--include/net/lwtunnel.h175
-rw-r--r--include/net/mac80211.h73
-rw-r--r--include/net/mac802154.h17
-rw-r--r--include/net/mpls_iptunnel.h29
-rw-r--r--include/net/ndisc.h3
-rw-r--r--include/net/neighbour.h1
-rw-r--r--include/net/net_namespace.h3
-rw-r--r--include/net/netfilter/br_netfilter.h2
-rw-r--r--include/net/netfilter/ipv4/nf_dup_ipv4.h7
-rw-r--r--include/net/netfilter/ipv6/nf_dup_ipv6.h7
-rw-r--r--include/net/netfilter/nf_conntrack.h11
-rw-r--r--include/net/netfilter/nf_conntrack_core.h3
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h11
-rw-r--r--include/net/netfilter/nf_conntrack_labels.h4
-rw-r--r--include/net/netfilter/nf_conntrack_zones.h86
-rw-r--r--include/net/netfilter/nf_tables.h2
-rw-r--r--include/net/netfilter/nft_dup.h9
-rw-r--r--include/net/netns/ipv6.h1
-rw-r--r--include/net/netns/netfilter.h1
-rw-r--r--include/net/nfc/nci_core.h3
-rw-r--r--include/net/nfc/nfc.h41
-rw-r--r--include/net/nl802154.h4
-rw-r--r--include/net/pkt_sched.h4
-rw-r--r--include/net/route.h7
-rw-r--r--include/net/rtnetlink.h1
-rw-r--r--include/net/sch_generic.h32
-rw-r--r--include/net/sock.h51
-rw-r--r--include/net/switchdev.h10
-rw-r--r--include/net/tc_act/tc_bpf.h2
-rw-r--r--include/net/tc_act/tc_gact.h7
-rw-r--r--include/net/tc_act/tc_mirred.h2
-rw-r--r--include/net/tcp.h24
-rw-r--r--include/net/timewait_sock.h3
-rw-r--r--include/net/udp_tunnel.h7
-rw-r--r--include/net/vrf.h178
-rw-r--r--include/net/vxlan.h90
-rw-r--r--include/net/xfrm.h7
-rw-r--r--include/rdma/ib_cm.h25
-rw-r--r--include/rdma/ib_mad.h82
-rw-r--r--include/rdma/ib_pack.h2
-rw-r--r--include/rdma/ib_smi.h47
-rw-r--r--include/rdma/ib_verbs.h249
-rw-r--r--include/rdma/opa_port_info.h433
-rw-r--r--include/rdma/opa_smi.h47
-rw-r--r--include/rdma/rdma_netlink.h7
-rw-r--r--include/scsi/scsi_common.h5
-rw-r--r--include/scsi/scsi_device.h27
-rw-r--r--include/scsi/scsi_dh.h29
-rw-r--r--include/scsi/scsi_eh.h6
-rw-r--r--include/soc/tegra/mc.h8
-rw-r--r--include/sound/ac97_codec.h2
-rw-r--r--include/sound/hda_i915.h9
-rw-r--r--include/sound/hda_register.h4
-rw-r--r--include/sound/hdaudio.h19
-rw-r--r--include/sound/hdaudio_ext.h71
-rw-r--r--include/sound/rcar_snd.h14
-rw-r--r--include/sound/rt298.h20
-rw-r--r--include/sound/soc-dapm.h84
-rw-r--r--include/sound/soc-topology.h13
-rw-r--r--include/sound/soc.h29
-rw-r--r--include/target/iscsi/iscsi_target_core.h15
-rw-r--r--include/target/iscsi/iscsi_target_stat.h2
-rw-r--r--include/target/iscsi/iscsi_transport.h2
-rw-r--r--include/target/target_core_backend.h2
-rw-r--r--include/target/target_core_base.h27
-rw-r--r--include/target/target_core_fabric.h14
-rw-r--r--include/trace/events/asoc.h53
-rw-r--r--include/trace/events/ext3.h866
-rw-r--r--include/trace/events/f2fs.h12
-rw-r--r--include/trace/events/fib.h113
-rw-r--r--include/trace/events/jbd.h194
-rw-r--r--include/trace/events/kvm.h30
-rw-r--r--include/trace/events/sunrpc.h21
-rw-r--r--include/trace/events/task.h2
-rw-r--r--include/trace/events/thermal_power_allocator.h6
-rw-r--r--include/trace/events/tlb.h3
-rw-r--r--include/trace/events/v4l2.h257
-rw-r--r--include/trace/events/writeback.h180
-rw-r--r--include/uapi/asm-generic/unistd.h4
-rw-r--r--include/uapi/drm/drm_fourcc.h7
-rw-r--r--include/uapi/drm/i915_drm.h16
-rw-r--r--include/uapi/drm/vmwgfx_drm.h38
-rw-r--r--include/uapi/linux/Kbuild3
-rw-r--r--include/uapi/linux/audit.h8
-rw-r--r--include/uapi/linux/bpf.h29
-rw-r--r--include/uapi/linux/dlm_device.h2
-rw-r--r--include/uapi/linux/dm-ioctl.h4
-rw-r--r--include/uapi/linux/elf-em.h3
-rw-r--r--include/uapi/linux/ethtool.h5
-rw-r--r--include/uapi/linux/fib_rules.h2
-rw-r--r--include/uapi/linux/if_bridge.h1
-rw-r--r--include/uapi/linux/if_ether.h1
-rw-r--r--include/uapi/linux/if_link.h16
-rw-r--r--include/uapi/linux/if_packet.h3
-rw-r--r--include/uapi/linux/if_tunnel.h1
-rw-r--r--include/uapi/linux/ila.h15
-rw-r--r--include/uapi/linux/ip_vs.h5
-rw-r--r--include/uapi/linux/ipv6.h3
-rw-r--r--include/uapi/linux/kernel-page-flags.h1
-rw-r--r--include/uapi/linux/kvm.h5
-rw-r--r--include/uapi/linux/lwtunnel.h47
-rw-r--r--include/uapi/linux/membarrier.h53
-rw-r--r--include/uapi/linux/mpls.h2
-rw-r--r--include/uapi/linux/mpls_iptunnel.h28
-rw-r--r--include/uapi/linux/ndctl.h12
-rw-r--r--include/uapi/linux/neighbour.h1
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_sctp.h2
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h23
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_conntrack.h1
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_cttimeout.h2
-rw-r--r--include/uapi/linux/netfilter/xt_CT.h8
-rw-r--r--include/uapi/linux/netfilter_ipv6/ip6t_REJECT.h4
-rw-r--r--include/uapi/linux/netlink.h1
-rw-r--r--include/uapi/linux/nfs4.h2
-rw-r--r--include/uapi/linux/nfsacl.h1
-rw-r--r--include/uapi/linux/openvswitch.h60
-rw-r--r--include/uapi/linux/prctl.h7
-rw-r--r--include/uapi/linux/ptrace.h6
-rw-r--r--include/uapi/linux/rtnetlink.h13
-rw-r--r--include/uapi/linux/securebits.h11
-rw-r--r--include/uapi/linux/snmp.h2
-rw-r--r--include/uapi/linux/target_core_user.h4
-rw-r--r--include/uapi/linux/toshiba.h32
-rw-r--r--include/uapi/linux/userfaultfd.h169
-rw-r--r--include/uapi/linux/v4l2-controls.h4
-rw-r--r--include/uapi/linux/vsp1.h2
-rw-r--r--include/uapi/misc/cxl.h4
-rw-r--r--include/uapi/rdma/Kbuild1
-rw-r--r--include/uapi/rdma/hfi/Kbuild2
-rw-r--r--include/uapi/rdma/hfi/hfi1_user.h427
-rw-r--r--include/uapi/rdma/rdma_netlink.h82
-rw-r--r--include/uapi/xen/privcmd.h4
-rw-r--r--include/video/samsung_fimd.h1
-rw-r--r--include/video/vga.h2
-rw-r--r--include/xen/events.h1
-rw-r--r--include/xen/interface/io/netif.h8
-rw-r--r--include/xen/interface/platform.h18
-rw-r--r--include/xen/interface/xen.h37
-rw-r--r--include/xen/interface/xenpmu.h94
-rw-r--r--include/xen/page.h8
-rw-r--r--include/xen/xen-ops.h10
400 files changed, 11835 insertions, 5611 deletions
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
index b7babf0206b8..a94cbebbc33d 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -23,236 +23,159 @@
typedef atomic64_t atomic_long_t;
#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
+#define ATOMIC_LONG_PFX(x) atomic64 ## x
-static inline long atomic_long_read(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return (long)atomic64_read(v);
-}
-
-static inline void atomic_long_set(atomic_long_t *l, long i)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- atomic64_set(v, i);
-}
-
-static inline void atomic_long_inc(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- atomic64_inc(v);
-}
-
-static inline void atomic_long_dec(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- atomic64_dec(v);
-}
-
-static inline void atomic_long_add(long i, atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- atomic64_add(i, v);
-}
-
-static inline void atomic_long_sub(long i, atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- atomic64_sub(i, v);
-}
-
-static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return atomic64_sub_and_test(i, v);
-}
-
-static inline int atomic_long_dec_and_test(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return atomic64_dec_and_test(v);
-}
-
-static inline int atomic_long_inc_and_test(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return atomic64_inc_and_test(v);
-}
-
-static inline int atomic_long_add_negative(long i, atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return atomic64_add_negative(i, v);
-}
-
-static inline long atomic_long_add_return(long i, atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return (long)atomic64_add_return(i, v);
-}
-
-static inline long atomic_long_sub_return(long i, atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return (long)atomic64_sub_return(i, v);
-}
-
-static inline long atomic_long_inc_return(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return (long)atomic64_inc_return(v);
-}
-
-static inline long atomic_long_dec_return(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return (long)atomic64_dec_return(v);
-}
-
-static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return (long)atomic64_add_unless(v, a, u);
-}
-
-#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l))
-
-#define atomic_long_cmpxchg(l, old, new) \
- (atomic64_cmpxchg((atomic64_t *)(l), (old), (new)))
-#define atomic_long_xchg(v, new) \
- (atomic64_xchg((atomic64_t *)(v), (new)))
-
-#else /* BITS_PER_LONG == 64 */
+#else
typedef atomic_t atomic_long_t;
#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
-static inline long atomic_long_read(atomic_long_t *l)
-{
- atomic_t *v = (atomic_t *)l;
-
- return (long)atomic_read(v);
-}
-
-static inline void atomic_long_set(atomic_long_t *l, long i)
-{
- atomic_t *v = (atomic_t *)l;
-
- atomic_set(v, i);
-}
+#define ATOMIC_LONG_PFX(x) atomic ## x
+
+#endif
+
+#define ATOMIC_LONG_READ_OP(mo) \
+static inline long atomic_long_read##mo(atomic_long_t *l) \
+{ \
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
+ \
+ return (long)ATOMIC_LONG_PFX(_read##mo)(v); \
+}
+ATOMIC_LONG_READ_OP()
+ATOMIC_LONG_READ_OP(_acquire)
+
+#undef ATOMIC_LONG_READ_OP
+
+#define ATOMIC_LONG_SET_OP(mo) \
+static inline void atomic_long_set##mo(atomic_long_t *l, long i) \
+{ \
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
+ \
+ ATOMIC_LONG_PFX(_set##mo)(v, i); \
+}
+ATOMIC_LONG_SET_OP()
+ATOMIC_LONG_SET_OP(_release)
+
+#undef ATOMIC_LONG_SET_OP
+
+#define ATOMIC_LONG_ADD_SUB_OP(op, mo) \
+static inline long \
+atomic_long_##op##_return##mo(long i, atomic_long_t *l) \
+{ \
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
+ \
+ return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(i, v); \
+}
+ATOMIC_LONG_ADD_SUB_OP(add,)
+ATOMIC_LONG_ADD_SUB_OP(add, _relaxed)
+ATOMIC_LONG_ADD_SUB_OP(add, _acquire)
+ATOMIC_LONG_ADD_SUB_OP(add, _release)
+ATOMIC_LONG_ADD_SUB_OP(sub,)
+ATOMIC_LONG_ADD_SUB_OP(sub, _relaxed)
+ATOMIC_LONG_ADD_SUB_OP(sub, _acquire)
+ATOMIC_LONG_ADD_SUB_OP(sub, _release)
+
+#undef ATOMIC_LONG_ADD_SUB_OP
+
+#define atomic_long_cmpxchg_relaxed(l, old, new) \
+ (ATOMIC_LONG_PFX(_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \
+ (old), (new)))
+#define atomic_long_cmpxchg_acquire(l, old, new) \
+ (ATOMIC_LONG_PFX(_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \
+ (old), (new)))
+#define atomic_long_cmpxchg_release(l, old, new) \
+ (ATOMIC_LONG_PFX(_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \
+ (old), (new)))
+#define atomic_long_cmpxchg(l, old, new) \
+ (ATOMIC_LONG_PFX(_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), (old), (new)))
+
+#define atomic_long_xchg_relaxed(v, new) \
+ (ATOMIC_LONG_PFX(_xchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
+#define atomic_long_xchg_acquire(v, new) \
+ (ATOMIC_LONG_PFX(_xchg_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
+#define atomic_long_xchg_release(v, new) \
+ (ATOMIC_LONG_PFX(_xchg_release)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
+#define atomic_long_xchg(v, new) \
+ (ATOMIC_LONG_PFX(_xchg)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
static inline void atomic_long_inc(atomic_long_t *l)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- atomic_inc(v);
+ ATOMIC_LONG_PFX(_inc)(v);
}
static inline void atomic_long_dec(atomic_long_t *l)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- atomic_dec(v);
+ ATOMIC_LONG_PFX(_dec)(v);
}
static inline void atomic_long_add(long i, atomic_long_t *l)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- atomic_add(i, v);
+ ATOMIC_LONG_PFX(_add)(i, v);
}
static inline void atomic_long_sub(long i, atomic_long_t *l)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- atomic_sub(i, v);
+ ATOMIC_LONG_PFX(_sub)(i, v);
}
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- return atomic_sub_and_test(i, v);
+ return ATOMIC_LONG_PFX(_sub_and_test)(i, v);
}
static inline int atomic_long_dec_and_test(atomic_long_t *l)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- return atomic_dec_and_test(v);
+ return ATOMIC_LONG_PFX(_dec_and_test)(v);
}
static inline int atomic_long_inc_and_test(atomic_long_t *l)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- return atomic_inc_and_test(v);
+ return ATOMIC_LONG_PFX(_inc_and_test)(v);
}
static inline int atomic_long_add_negative(long i, atomic_long_t *l)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- return atomic_add_negative(i, v);
-}
-
-static inline long atomic_long_add_return(long i, atomic_long_t *l)
-{
- atomic_t *v = (atomic_t *)l;
-
- return (long)atomic_add_return(i, v);
-}
-
-static inline long atomic_long_sub_return(long i, atomic_long_t *l)
-{
- atomic_t *v = (atomic_t *)l;
-
- return (long)atomic_sub_return(i, v);
+ return ATOMIC_LONG_PFX(_add_negative)(i, v);
}
static inline long atomic_long_inc_return(atomic_long_t *l)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- return (long)atomic_inc_return(v);
+ return (long)ATOMIC_LONG_PFX(_inc_return)(v);
}
static inline long atomic_long_dec_return(atomic_long_t *l)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- return (long)atomic_dec_return(v);
+ return (long)ATOMIC_LONG_PFX(_dec_return)(v);
}
static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- return (long)atomic_add_unless(v, a, u);
+ return (long)ATOMIC_LONG_PFX(_add_unless)(v, a, u);
}
-#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l))
-
-#define atomic_long_cmpxchg(l, old, new) \
- (atomic_cmpxchg((atomic_t *)(l), (old), (new)))
-#define atomic_long_xchg(v, new) \
- (atomic_xchg((atomic_t *)(v), (new)))
-
-#endif /* BITS_PER_LONG == 64 */
+#define atomic_long_inc_not_zero(l) \
+ ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l))
#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 1973ad2b13f4..d4d7e337fdcb 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -98,15 +98,16 @@ ATOMIC_OP_RETURN(add, +)
ATOMIC_OP_RETURN(sub, -)
#endif
-#ifndef atomic_clear_mask
+#ifndef atomic_and
ATOMIC_OP(and, &)
-#define atomic_clear_mask(i, v) atomic_and(~(i), (v))
#endif
-#ifndef atomic_set_mask
-#define CONFIG_ARCH_HAS_ATOMIC_OR
+#ifndef atomic_or
ATOMIC_OP(or, |)
-#define atomic_set_mask(i, v) atomic_or((i), (v))
+#endif
+
+#ifndef atomic_xor
+ATOMIC_OP(xor, ^)
#endif
#undef ATOMIC_OP_RETURN
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index 30ad9c86cebb..d48e78ccad3d 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -32,6 +32,10 @@ extern long long atomic64_##op##_return(long long a, atomic64_t *v);
ATOMIC64_OPS(add)
ATOMIC64_OPS(sub)
+ATOMIC64_OP(and)
+ATOMIC64_OP(or)
+ATOMIC64_OP(xor)
+
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 55e3abc2d027..b42afada1280 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -108,12 +108,12 @@
do { \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
- ACCESS_ONCE(*p) = (v); \
+ WRITE_ONCE(*p, v); \
} while (0)
#define smp_load_acquire(p) \
({ \
- typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
___p1; \
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
index 940d5ec122c9..b1bc954eccf3 100644
--- a/include/asm-generic/dma-mapping-common.h
+++ b/include/asm-generic/dma-mapping-common.h
@@ -6,6 +6,7 @@
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
#include <linux/dma-attrs.h>
+#include <asm-generic/dma-coherent.h>
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
size_t size,
@@ -237,4 +238,121 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
+#ifndef arch_dma_alloc_attrs
+#define arch_dma_alloc_attrs(dev, flag) (true)
+#endif
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ void *cpu_addr;
+
+ BUG_ON(!ops);
+
+ if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
+ return cpu_addr;
+
+ if (!arch_dma_alloc_attrs(&dev, &flag))
+ return NULL;
+ if (!ops->alloc)
+ return NULL;
+
+ cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
+ debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
+ return cpu_addr;
+}
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!ops);
+ WARN_ON(irqs_disabled());
+
+ if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
+ return;
+
+ if (!ops->free)
+ return;
+
+ debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+ ops->free(dev, size, cpu_addr, dma_handle, attrs);
+}
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ return dma_alloc_attrs(dev, size, dma_handle, flag, NULL);
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
+}
+
+static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+ return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
+}
+
+static inline void dma_free_noncoherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+ dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+}
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ debug_dma_mapping_error(dev, dma_addr);
+
+ if (get_dma_ops(dev)->mapping_error)
+ return get_dma_ops(dev)->mapping_error(dev, dma_addr);
+
+#ifdef DMA_ERROR_CODE
+ return dma_addr == DMA_ERROR_CODE;
+#else
+ return 0;
+#endif
+}
+
+#ifndef HAVE_ARCH_DMA_SUPPORTED
+static inline int dma_supported(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (!ops)
+ return 0;
+ if (!ops->dma_supported)
+ return 1;
+ return ops->dma_supported(dev, mask);
+}
+#endif
+
+#ifndef HAVE_ARCH_DMA_SET_MASK
+static inline int dma_set_mask(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (ops->set_dma_mask)
+ return ops->set_dma_mask(dev, mask);
+
+ if (!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+ *dev->dma_mask = mask;
+ return 0;
+}
+#endif
+
#endif
diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h
index a5de55c04fb2..734ad4db388c 100644
--- a/include/asm-generic/early_ioremap.h
+++ b/include/asm-generic/early_ioremap.h
@@ -11,6 +11,8 @@ extern void __iomem *early_ioremap(resource_size_t phys_addr,
unsigned long size);
extern void *early_memremap(resource_size_t phys_addr,
unsigned long size);
+extern void *early_memremap_ro(resource_size_t phys_addr,
+ unsigned long size);
extern void early_iounmap(void __iomem *addr, unsigned long size);
extern void early_memunmap(void *addr, unsigned long size);
@@ -33,6 +35,12 @@ extern void early_ioremap_setup(void);
*/
extern void early_ioremap_reset(void);
+/*
+ * Early copy from unmapped memory to kernel mapped memory.
+ */
+extern void copy_from_early_mem(void *dest, phys_addr_t src,
+ unsigned long size);
+
#else
static inline void early_ioremap_init(void) { }
static inline void early_ioremap_setup(void) { }
diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h
index f23174fb9ec4..1cbb8338edf3 100644
--- a/include/asm-generic/fixmap.h
+++ b/include/asm-generic/fixmap.h
@@ -46,6 +46,9 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
#ifndef FIXMAP_PAGE_NORMAL
#define FIXMAP_PAGE_NORMAL PAGE_KERNEL
#endif
+#if !defined(FIXMAP_PAGE_RO) && defined(PAGE_KERNEL_RO)
+#define FIXMAP_PAGE_RO PAGE_KERNEL_RO
+#endif
#ifndef FIXMAP_PAGE_NOCACHE
#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NOCACHE
#endif
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index 14909b0b9cae..f20f407ce45d 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -69,6 +69,12 @@
})
#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
+/*
+ * Convert a physical address to a Page Frame Number and back
+ */
+#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
+#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
+
#define page_to_pfn __page_to_pfn
#define pfn_to_page __pfn_to_page
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
index 6383d54bf983..54a8e65e18b6 100644
--- a/include/asm-generic/qrwlock.h
+++ b/include/asm-generic/qrwlock.h
@@ -36,39 +36,39 @@
/*
* External function declarations
*/
-extern void queue_read_lock_slowpath(struct qrwlock *lock);
-extern void queue_write_lock_slowpath(struct qrwlock *lock);
+extern void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts);
+extern void queued_write_lock_slowpath(struct qrwlock *lock);
/**
- * queue_read_can_lock- would read_trylock() succeed?
+ * queued_read_can_lock- would read_trylock() succeed?
* @lock: Pointer to queue rwlock structure
*/
-static inline int queue_read_can_lock(struct qrwlock *lock)
+static inline int queued_read_can_lock(struct qrwlock *lock)
{
return !(atomic_read(&lock->cnts) & _QW_WMASK);
}
/**
- * queue_write_can_lock- would write_trylock() succeed?
+ * queued_write_can_lock- would write_trylock() succeed?
* @lock: Pointer to queue rwlock structure
*/
-static inline int queue_write_can_lock(struct qrwlock *lock)
+static inline int queued_write_can_lock(struct qrwlock *lock)
{
return !atomic_read(&lock->cnts);
}
/**
- * queue_read_trylock - try to acquire read lock of a queue rwlock
+ * queued_read_trylock - try to acquire read lock of a queue rwlock
* @lock : Pointer to queue rwlock structure
* Return: 1 if lock acquired, 0 if failed
*/
-static inline int queue_read_trylock(struct qrwlock *lock)
+static inline int queued_read_trylock(struct qrwlock *lock)
{
u32 cnts;
cnts = atomic_read(&lock->cnts);
if (likely(!(cnts & _QW_WMASK))) {
- cnts = (u32)atomic_add_return(_QR_BIAS, &lock->cnts);
+ cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
if (likely(!(cnts & _QW_WMASK)))
return 1;
atomic_sub(_QR_BIAS, &lock->cnts);
@@ -77,11 +77,11 @@ static inline int queue_read_trylock(struct qrwlock *lock)
}
/**
- * queue_write_trylock - try to acquire write lock of a queue rwlock
+ * queued_write_trylock - try to acquire write lock of a queue rwlock
* @lock : Pointer to queue rwlock structure
* Return: 1 if lock acquired, 0 if failed
*/
-static inline int queue_write_trylock(struct qrwlock *lock)
+static inline int queued_write_trylock(struct qrwlock *lock)
{
u32 cnts;
@@ -89,78 +89,70 @@ static inline int queue_write_trylock(struct qrwlock *lock)
if (unlikely(cnts))
return 0;
- return likely(atomic_cmpxchg(&lock->cnts,
- cnts, cnts | _QW_LOCKED) == cnts);
+ return likely(atomic_cmpxchg_acquire(&lock->cnts,
+ cnts, cnts | _QW_LOCKED) == cnts);
}
/**
- * queue_read_lock - acquire read lock of a queue rwlock
+ * queued_read_lock - acquire read lock of a queue rwlock
* @lock: Pointer to queue rwlock structure
*/
-static inline void queue_read_lock(struct qrwlock *lock)
+static inline void queued_read_lock(struct qrwlock *lock)
{
u32 cnts;
- cnts = atomic_add_return(_QR_BIAS, &lock->cnts);
+ cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
if (likely(!(cnts & _QW_WMASK)))
return;
/* The slowpath will decrement the reader count, if necessary. */
- queue_read_lock_slowpath(lock);
+ queued_read_lock_slowpath(lock, cnts);
}
/**
- * queue_write_lock - acquire write lock of a queue rwlock
+ * queued_write_lock - acquire write lock of a queue rwlock
* @lock : Pointer to queue rwlock structure
*/
-static inline void queue_write_lock(struct qrwlock *lock)
+static inline void queued_write_lock(struct qrwlock *lock)
{
/* Optimize for the unfair lock case where the fair flag is 0. */
- if (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0)
+ if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0)
return;
- queue_write_lock_slowpath(lock);
+ queued_write_lock_slowpath(lock);
}
/**
- * queue_read_unlock - release read lock of a queue rwlock
+ * queued_read_unlock - release read lock of a queue rwlock
* @lock : Pointer to queue rwlock structure
*/
-static inline void queue_read_unlock(struct qrwlock *lock)
+static inline void queued_read_unlock(struct qrwlock *lock)
{
/*
* Atomically decrement the reader count
*/
- smp_mb__before_atomic();
- atomic_sub(_QR_BIAS, &lock->cnts);
+ (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
}
-#ifndef queue_write_unlock
/**
- * queue_write_unlock - release write lock of a queue rwlock
+ * queued_write_unlock - release write lock of a queue rwlock
* @lock : Pointer to queue rwlock structure
*/
-static inline void queue_write_unlock(struct qrwlock *lock)
+static inline void queued_write_unlock(struct qrwlock *lock)
{
- /*
- * If the writer field is atomic, it can be cleared directly.
- * Otherwise, an atomic subtraction will be used to clear it.
- */
- smp_mb__before_atomic();
- atomic_sub(_QW_LOCKED, &lock->cnts);
+ smp_store_release((u8 *)&lock->cnts, 0);
}
-#endif
/*
* Remapping rwlock architecture specific functions to the corresponding
* queue rwlock functions.
*/
-#define arch_read_can_lock(l) queue_read_can_lock(l)
-#define arch_write_can_lock(l) queue_write_can_lock(l)
-#define arch_read_lock(l) queue_read_lock(l)
-#define arch_write_lock(l) queue_write_lock(l)
-#define arch_read_trylock(l) queue_read_trylock(l)
-#define arch_write_trylock(l) queue_write_trylock(l)
-#define arch_read_unlock(l) queue_read_unlock(l)
-#define arch_write_unlock(l) queue_write_unlock(l)
+#define arch_read_can_lock(l) queued_read_can_lock(l)
+#define arch_write_can_lock(l) queued_write_can_lock(l)
+#define arch_read_lock(l) queued_read_lock(l)
+#define arch_write_lock(l) queued_write_lock(l)
+#define arch_read_trylock(l) queued_read_trylock(l)
+#define arch_write_trylock(l) queued_write_trylock(l)
+#define arch_read_unlock(l) queued_read_unlock(l)
+#define arch_write_unlock(l) queued_write_unlock(l)
#endif /* __ASM_GENERIC_QRWLOCK_H */
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 83bfb87f5bf1..e2aadbc7151f 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -111,8 +111,8 @@ static inline void queued_spin_unlock_wait(struct qspinlock *lock)
cpu_relax();
}
-#ifndef virt_queued_spin_lock
-static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock)
+#ifndef virt_spin_lock
+static __always_inline bool virt_spin_lock(struct qspinlock *lock)
{
return false;
}
diff --git a/include/asm-generic/rtc.h b/include/asm-generic/rtc.h
index fa86f240c874..4e3b6558331e 100644
--- a/include/asm-generic/rtc.h
+++ b/include/asm-generic/rtc.h
@@ -16,6 +16,9 @@
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/delay.h>
+#ifdef CONFIG_ACPI
+#include <linux/acpi.h>
+#endif
#define RTC_PIE 0x40 /* periodic interrupt enable */
#define RTC_AIE 0x20 /* alarm interrupt enable */
@@ -46,6 +49,7 @@ static inline unsigned int __get_rtc_time(struct rtc_time *time)
{
unsigned char ctrl;
unsigned long flags;
+ unsigned char century = 0;
#ifdef CONFIG_MACH_DECSTATION
unsigned int real_year;
@@ -79,6 +83,11 @@ static inline unsigned int __get_rtc_time(struct rtc_time *time)
#ifdef CONFIG_MACH_DECSTATION
real_year = CMOS_READ(RTC_DEC_YEAR);
#endif
+#ifdef CONFIG_ACPI
+ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
+ acpi_gbl_FADT.century)
+ century = CMOS_READ(acpi_gbl_FADT.century);
+#endif
ctrl = CMOS_READ(RTC_CONTROL);
spin_unlock_irqrestore(&rtc_lock, flags);
@@ -90,12 +99,16 @@ static inline unsigned int __get_rtc_time(struct rtc_time *time)
time->tm_mday = bcd2bin(time->tm_mday);
time->tm_mon = bcd2bin(time->tm_mon);
time->tm_year = bcd2bin(time->tm_year);
+ century = bcd2bin(century);
}
#ifdef CONFIG_MACH_DECSTATION
time->tm_year += real_year - 72;
#endif
+ if (century)
+ time->tm_year += (century - 19) * 100;
+
/*
* Account for differences between how the RTC uses the values
* and how they are defined in a struct rtc_time;
@@ -122,6 +135,7 @@ static inline int __set_rtc_time(struct rtc_time *time)
#ifdef CONFIG_MACH_DECSTATION
unsigned int real_yrs, leap_yr;
#endif
+ unsigned char century = 0;
yrs = time->tm_year;
mon = time->tm_mon + 1; /* tm_mon starts at zero */
@@ -150,6 +164,15 @@ static inline int __set_rtc_time(struct rtc_time *time)
yrs = 73;
}
#endif
+
+#ifdef CONFIG_ACPI
+ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
+ acpi_gbl_FADT.century) {
+ century = (yrs + 1900) / 100;
+ yrs %= 100;
+ }
+#endif
+
/* These limits and adjustments are independent of
* whether the chip is in binary mode or not.
*/
@@ -169,6 +192,7 @@ static inline int __set_rtc_time(struct rtc_time *time)
day = bin2bcd(day);
mon = bin2bcd(mon);
yrs = bin2bcd(yrs);
+ century = bin2bcd(century);
}
save_control = CMOS_READ(RTC_CONTROL);
@@ -185,6 +209,11 @@ static inline int __set_rtc_time(struct rtc_time *time)
CMOS_WRITE(hrs, RTC_HOURS);
CMOS_WRITE(min, RTC_MINUTES);
CMOS_WRITE(sec, RTC_SECONDS);
+#ifdef CONFIG_ACPI
+ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
+ acpi_gbl_FADT.century)
+ CMOS_WRITE(century, acpi_gbl_FADT.century);
+#endif
CMOS_WRITE(save_control, RTC_CONTROL);
CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 8bd374d3cf21..1781e54ea6d3 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -412,12 +412,10 @@
* during second ld run in second ld pass when generating System.map */
#define TEXT_TEXT \
ALIGN_FUNCTION(); \
- *(.text.hot) \
- *(.text .text.fixup) \
+ *(.text.hot .text .text.fixup .text.unlikely) \
*(.ref.text) \
MEM_KEEP(init.text) \
MEM_KEEP(exit.text) \
- *(.text.unlikely)
/* sched.text is aling to function alignment to secure we have same
diff --git a/include/crypto/pkcs7.h b/include/crypto/pkcs7.h
index 691c79172a26..441aff9b5aa7 100644
--- a/include/crypto/pkcs7.h
+++ b/include/crypto/pkcs7.h
@@ -9,6 +9,11 @@
* 2 of the Licence, or (at your option) any later version.
*/
+#ifndef _CRYPTO_PKCS7_H
+#define _CRYPTO_PKCS7_H
+
+#include <crypto/public_key.h>
+
struct key;
struct pkcs7_message;
@@ -33,4 +38,10 @@ extern int pkcs7_validate_trust(struct pkcs7_message *pkcs7,
/*
* pkcs7_verify.c
*/
-extern int pkcs7_verify(struct pkcs7_message *pkcs7);
+extern int pkcs7_verify(struct pkcs7_message *pkcs7,
+ enum key_being_used_for usage);
+
+extern int pkcs7_supply_detached_data(struct pkcs7_message *pkcs7,
+ const void *data, size_t datalen);
+
+#endif /* _CRYPTO_PKCS7_H */
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index 54add2069901..067c242b1e15 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -33,12 +33,27 @@ extern const struct public_key_algorithm *pkey_algo[PKEY_ALGO__LAST];
enum pkey_id_type {
PKEY_ID_PGP, /* OpenPGP generated key ID */
PKEY_ID_X509, /* X.509 arbitrary subjectKeyIdentifier */
+ PKEY_ID_PKCS7, /* Signature in PKCS#7 message */
PKEY_ID_TYPE__LAST
};
extern const char *const pkey_id_type_name[PKEY_ID_TYPE__LAST];
/*
+ * The use to which an asymmetric key is being put.
+ */
+enum key_being_used_for {
+ VERIFYING_MODULE_SIGNATURE,
+ VERIFYING_FIRMWARE_SIGNATURE,
+ VERIFYING_KEXEC_PE_SIGNATURE,
+ VERIFYING_KEY_SIGNATURE,
+ VERIFYING_KEY_SELF_SIGNATURE,
+ VERIFYING_UNSPECIFIED_SIGNATURE,
+ NR__KEY_BEING_USED_FOR
+};
+extern const char *const key_being_used_for[NR__KEY_BEING_USED_FOR];
+
+/*
* Cryptographic data for the public-key subtype of the asymmetric key type.
*
* Note that this may include private part of the key as well as the public
@@ -101,7 +116,8 @@ extern int verify_signature(const struct key *key,
struct asymmetric_key_id;
extern struct key *x509_request_asymmetric_key(struct key *keyring,
- const struct asymmetric_key_id *kid,
+ const struct asymmetric_key_id *id,
+ const struct asymmetric_key_id *skid,
bool partial);
#endif /* _LINUX_PUBLIC_KEY_H */
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index de13bfc35634..bae79f3c4d28 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -12,6 +12,8 @@
#include <drm/drmP.h>
+struct dw_hdmi;
+
enum {
DW_HDMI_RES_8,
DW_HDMI_RES_10,
@@ -59,4 +61,9 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
void *data, struct drm_encoder *encoder,
struct resource *iores, int irq,
const struct dw_hdmi_plat_data *plat_data);
+
+void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
+void dw_hdmi_audio_enable(struct dw_hdmi *hdmi);
+void dw_hdmi_audio_disable(struct dw_hdmi *hdmi);
+
#endif /* __IMX_HDMI_H__ */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 5aa519711e0b..8b5ce7c5d9bb 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -137,17 +137,18 @@ void drm_err(const char *format, ...);
/*@{*/
/* driver capabilities and requirements mask */
-#define DRIVER_USE_AGP 0x1
-#define DRIVER_PCI_DMA 0x8
-#define DRIVER_SG 0x10
-#define DRIVER_HAVE_DMA 0x20
-#define DRIVER_HAVE_IRQ 0x40
-#define DRIVER_IRQ_SHARED 0x80
-#define DRIVER_GEM 0x1000
-#define DRIVER_MODESET 0x2000
-#define DRIVER_PRIME 0x4000
-#define DRIVER_RENDER 0x8000
-#define DRIVER_ATOMIC 0x10000
+#define DRIVER_USE_AGP 0x1
+#define DRIVER_PCI_DMA 0x8
+#define DRIVER_SG 0x10
+#define DRIVER_HAVE_DMA 0x20
+#define DRIVER_HAVE_IRQ 0x40
+#define DRIVER_IRQ_SHARED 0x80
+#define DRIVER_GEM 0x1000
+#define DRIVER_MODESET 0x2000
+#define DRIVER_PRIME 0x4000
+#define DRIVER_RENDER 0x8000
+#define DRIVER_ATOMIC 0x10000
+#define DRIVER_KMS_LEGACY_CONTEXT 0x20000
/***********************************************************************/
/** \name Macros to make printk easier */
@@ -675,13 +676,12 @@ struct drm_minor {
/* currently active master for this node. Protected by master_mutex */
struct drm_master *master;
- struct drm_mode_group mode_group;
};
struct drm_pending_vblank_event {
struct drm_pending_event base;
- int pipe;
+ unsigned int pipe;
struct drm_event_vblank event;
};
@@ -700,7 +700,7 @@ struct drm_vblank_crtc {
/* for wraparound handling */
u32 last_wait; /* Last vblank seqno waited per CRTC */
unsigned int inmodeset; /* Display driver is setting mode */
- int crtc; /* crtc index */
+ unsigned int pipe; /* crtc index */
bool enabled; /* so we don't call enable more than
once per disable */
};
@@ -887,6 +887,7 @@ static inline bool drm_is_primary_client(const struct drm_file *file_priv)
/*@{*/
/* Driver support (drm_drv.h) */
+extern int drm_ioctl_permit(u32 flags, struct drm_file *file_priv);
extern long drm_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
extern long drm_compat_ioctl(struct file *filp,
@@ -920,34 +921,34 @@ void drm_clflush_virt_range(void *addr, unsigned long length);
extern int drm_irq_install(struct drm_device *dev, int irq);
extern int drm_irq_uninstall(struct drm_device *dev);
-extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
+extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
extern int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *filp);
-extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
+extern u32 drm_vblank_count(struct drm_device *dev, int pipe);
extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
-extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+extern u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
struct timeval *vblanktime);
-extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
- struct drm_pending_vblank_event *e);
+extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
+ struct drm_pending_vblank_event *e);
extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e);
-extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
+extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
-extern int drm_vblank_get(struct drm_device *dev, int crtc);
-extern void drm_vblank_put(struct drm_device *dev, int crtc);
+extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe);
+extern void drm_vblank_put(struct drm_device *dev, unsigned int pipe);
extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
-extern void drm_wait_one_vblank(struct drm_device *dev, int crtc);
+extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
-extern void drm_vblank_off(struct drm_device *dev, int crtc);
-extern void drm_vblank_on(struct drm_device *dev, int crtc);
+extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe);
+extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe);
extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
extern void drm_vblank_cleanup(struct drm_device *dev);
extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
- int crtc, int *max_error,
+ unsigned int pipe, int *max_error,
struct timeval *vblank_time,
unsigned flags,
const struct drm_crtc *refcrtc,
@@ -968,8 +969,8 @@ static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc
}
/* Modesetting support */
-extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
-extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
+extern void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe);
+extern void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe);
/* Stub support (drm_stub.h) */
extern struct drm_master *drm_master_get(struct drm_master *master);
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 8a3a913320eb..e67aeac2aee0 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -166,7 +166,8 @@ int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
static inline bool
drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state)
{
- return state->mode_changed || state->active_changed;
+ return state->mode_changed || state->active_changed ||
+ state->connectors_changed;
}
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index cc1fee8a12d0..11266d147a29 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -87,8 +87,8 @@ int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t flags);
-void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
- int mode);
+int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
+ int mode);
/* default implementations for state handling */
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 3b4d8a4a23fb..faaeff7db684 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -255,12 +255,13 @@ struct drm_atomic_state;
* @crtc: backpointer to the CRTC
* @enable: whether the CRTC should be enabled, gates all other state
* @active: whether the CRTC is actively displaying (used for DPMS)
- * @mode_changed: for use by helpers and drivers when computing state updates
- * @active_changed: for use by helpers and drivers when computing state updates
+ * @planes_changed: planes on this crtc are updated
+ * @mode_changed: crtc_state->mode or crtc_state->enable has been changed
+ * @active_changed: crtc_state->active has been toggled.
+ * @connectors_changed: connectors to this crtc have been updated
* @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
* @last_vblank_count: for helpers and drivers to capture the vblank of the
* update to ensure framebuffer cleanup isn't done too early
- * @planes_changed: for use by helpers and drivers when computing state updates
* @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
* @mode: current mode timings
* @event: optional pointer to a DRM event to signal upon completion of the
@@ -283,6 +284,7 @@ struct drm_crtc_state {
bool planes_changed : 1;
bool mode_changed : 1;
bool active_changed : 1;
+ bool connectors_changed : 1;
/* attached planes bitmask:
* WARNING: transitional helpers do not maintain plane_mask so
@@ -525,7 +527,7 @@ struct drm_connector_state {
* etc.
*/
struct drm_connector_funcs {
- void (*dpms)(struct drm_connector *connector, int mode);
+ int (*dpms)(struct drm_connector *connector, int mode);
void (*save)(struct drm_connector *connector);
void (*restore)(struct drm_connector *connector);
void (*reset)(struct drm_connector *connector);
@@ -861,7 +863,7 @@ struct drm_plane {
uint32_t possible_crtcs;
uint32_t *format_types;
- uint32_t format_count;
+ unsigned int format_count;
bool format_default;
struct drm_crtc *crtc;
@@ -1016,29 +1018,6 @@ struct drm_mode_config_funcs {
};
/**
- * struct drm_mode_group - group of mode setting resources for potential sub-grouping
- * @num_crtcs: CRTC count
- * @num_encoders: encoder count
- * @num_connectors: connector count
- * @num_bridges: bridge count
- * @id_list: list of KMS object IDs in this group
- *
- * Currently this simply tracks the global mode setting state. But in the
- * future it could allow groups of objects to be set aside into independent
- * control groups for use by different user level processes (e.g. two X servers
- * running simultaneously on different heads, each with their own mode
- * configuration and freedom of mode setting).
- */
-struct drm_mode_group {
- uint32_t num_crtcs;
- uint32_t num_encoders;
- uint32_t num_connectors;
-
- /* list of object IDs for this group */
- uint32_t *id_list;
-};
-
-/**
* struct drm_mode_config - Mode configuration control structure
* @mutex: mutex protecting KMS related lists and structures
* @connection_mutex: ww mutex protecting connector state and routing
@@ -1289,13 +1268,13 @@ extern int drm_universal_plane_init(struct drm_device *dev,
unsigned long possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats,
- uint32_t format_count,
+ unsigned int format_count,
enum drm_plane_type type);
extern int drm_plane_init(struct drm_device *dev,
struct drm_plane *plane,
unsigned long possible_crtcs,
const struct drm_plane_funcs *funcs,
- const uint32_t *formats, uint32_t format_count,
+ const uint32_t *formats, unsigned int format_count,
bool is_primary);
extern void drm_plane_cleanup(struct drm_plane *plane);
extern unsigned int drm_plane_index(struct drm_plane *plane);
@@ -1322,9 +1301,6 @@ extern const char *drm_get_tv_select_name(int val);
extern void drm_fb_release(struct drm_file *file_priv);
extern void drm_property_destroy_user_blobs(struct drm_device *dev,
struct drm_file *file_priv);
-extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
-extern void drm_mode_group_destroy(struct drm_mode_group *group);
-extern void drm_reinit_primary_mode_group(struct drm_device *dev);
extern bool drm_probe_ddc(struct i2c_adapter *adapter);
extern struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter);
@@ -1577,8 +1553,45 @@ static inline struct drm_property *drm_property_find(struct drm_device *dev,
}
/* Plane list iterator for legacy (overlay only) planes. */
-#define drm_for_each_legacy_plane(plane, planelist) \
- list_for_each_entry(plane, planelist, head) \
+#define drm_for_each_legacy_plane(plane, dev) \
+ list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
+#define drm_for_each_plane(plane, dev) \
+ list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
+
+#define drm_for_each_crtc(crtc, dev) \
+ list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
+
+static inline void
+assert_drm_connector_list_read_locked(struct drm_mode_config *mode_config)
+{
+ /*
+ * The connector hotadd/remove code currently grabs both locks when
+ * updating lists. Hence readers need only hold either of them to be
+ * safe and the check amounts to
+ *
+ * WARN_ON(not_holding(A) && not_holding(B)).
+ */
+ WARN_ON(!mutex_is_locked(&mode_config->mutex) &&
+ !drm_modeset_is_locked(&mode_config->connection_mutex));
+}
+
+#define drm_for_each_connector(connector, dev) \
+ for (assert_drm_connector_list_read_locked(&(dev)->mode_config), \
+ connector = list_first_entry(&(dev)->mode_config.connector_list, \
+ struct drm_connector, head); \
+ &connector->head != (&(dev)->mode_config.connector_list); \
+ connector = list_next_entry(connector, head))
+
+#define drm_for_each_encoder(encoder, dev) \
+ list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head)
+
+#define drm_for_each_fb(fb, dev) \
+ for (WARN_ON(!mutex_is_locked(&(dev)->mode_config.fb_lock)), \
+ fb = list_first_entry(&(dev)->mode_config.fb_list, \
+ struct drm_framebuffer, head); \
+ &fb->head != (&(dev)->mode_config.fb_list); \
+ fb = list_next_entry(fb, head))
+
#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 918aa68b5199..2a747a91fded 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -108,8 +108,10 @@ struct drm_crtc_helper_funcs {
/* atomic helpers */
int (*atomic_check)(struct drm_crtc *crtc,
struct drm_crtc_state *state);
- void (*atomic_begin)(struct drm_crtc *crtc);
- void (*atomic_flush)(struct drm_crtc *crtc);
+ void (*atomic_begin)(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state);
+ void (*atomic_flush)(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state);
};
/**
@@ -190,7 +192,7 @@ extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
-extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
+extern int drm_helper_connector_dpms(struct drm_connector *connector, int mode);
extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 2e86f642fc33..499e9f625aef 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -420,7 +420,7 @@
#define DP_TEST_SINK_MISC 0x246
# define DP_TEST_CRC_SUPPORTED (1 << 5)
-# define DP_TEST_COUNT_MASK 0x7
+# define DP_TEST_COUNT_MASK 0xf
#define DP_TEST_RESPONSE 0x260
# define DP_TEST_ACK (1 << 0)
@@ -578,6 +578,7 @@ u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane);
+#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
#define EDP_PSR_RECEIVER_CAP_SIZE 2
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 0dfd94def593..dbab4622b58f 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -122,6 +122,7 @@ struct drm_fb_helper {
bool delayed_hotplug;
};
+#ifdef CONFIG_DRM_FBDEV_EMULATION
void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
const struct drm_fb_helper_funcs *funcs);
int drm_fb_helper_init(struct drm_device *dev,
@@ -136,11 +137,38 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct fb_info *info);
bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper);
+
+struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper);
+void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper);
+void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper);
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height);
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
uint32_t depth);
+void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper);
+
+ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
+ size_t count, loff_t *ppos);
+ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos);
+
+void drm_fb_helper_sys_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect);
+void drm_fb_helper_sys_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area);
+void drm_fb_helper_sys_imageblit(struct fb_info *info,
+ const struct fb_image *image);
+
+void drm_fb_helper_cfb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect);
+void drm_fb_helper_cfb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area);
+void drm_fb_helper_cfb_imageblit(struct fb_info *info,
+ const struct fb_image *image);
+
+void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state);
+
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
@@ -158,4 +186,188 @@ drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector);
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector);
+#else
+static inline void drm_fb_helper_prepare(struct drm_device *dev,
+ struct drm_fb_helper *helper,
+ const struct drm_fb_helper_funcs *funcs)
+{
+}
+
+static inline int drm_fb_helper_init(struct drm_device *dev,
+ struct drm_fb_helper *helper, int crtc_count,
+ int max_conn)
+{
+ return 0;
+}
+
+static inline void drm_fb_helper_fini(struct drm_fb_helper *helper)
+{
+}
+
+static inline int drm_fb_helper_blank(int blank, struct fb_info *info)
+{
+ return 0;
+}
+
+static inline int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ return 0;
+}
+
+static inline int drm_fb_helper_set_par(struct fb_info *info)
+{
+ return 0;
+}
+
+static inline int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ return 0;
+}
+
+static inline bool
+drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
+{
+ return true;
+}
+
+static inline struct fb_info *
+drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
+{
+ return NULL;
+}
+
+static inline void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper)
+{
+}
+static inline void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper)
+{
+}
+
+static inline void drm_fb_helper_fill_var(struct fb_info *info,
+ struct drm_fb_helper *fb_helper,
+ uint32_t fb_width, uint32_t fb_height)
+{
+}
+
+static inline void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
+ uint32_t depth)
+{
+}
+
+static inline int drm_fb_helper_setcmap(struct fb_cmap *cmap,
+ struct fb_info *info)
+{
+ return 0;
+}
+
+static inline void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
+{
+}
+
+static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info,
+ char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ return -ENODEV;
+}
+
+static inline ssize_t drm_fb_helper_sys_write(struct fb_info *info,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return -ENODEV;
+}
+
+static inline void drm_fb_helper_sys_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+}
+
+static inline void drm_fb_helper_sys_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+}
+
+static inline void drm_fb_helper_sys_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+}
+
+static inline void drm_fb_helper_cfb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+}
+
+static inline void drm_fb_helper_cfb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+}
+
+static inline void drm_fb_helper_cfb_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+}
+
+static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper,
+ int state)
+{
+}
+
+static inline int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
+{
+ return 0;
+}
+
+static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper,
+ int bpp_sel)
+{
+ return 0;
+}
+
+static inline int
+drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
+{
+ return 0;
+}
+
+static inline int drm_fb_helper_debug_enter(struct fb_info *info)
+{
+ return 0;
+}
+
+static inline int drm_fb_helper_debug_leave(struct fb_info *info)
+{
+ return 0;
+}
+
+static inline struct drm_display_mode *
+drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector,
+ int width, int height)
+{
+ return NULL;
+}
+
+static inline struct drm_display_mode *
+drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
+ int width, int height)
+{
+ return NULL;
+}
+
+static inline int
+drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
+ struct drm_connector *connector)
+{
+ return 0;
+}
+
+static inline int
+drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
+ struct drm_connector *connector)
+{
+ return 0;
+}
+#endif
#endif
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index 70595ff565ba..5dd18bfdf601 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -130,7 +130,6 @@ struct drm_crtc;
struct drm_plane;
void drm_modeset_lock_all(struct drm_device *dev);
-int __drm_modeset_lock_all(struct drm_device *dev, bool trylock);
void drm_modeset_unlock_all(struct drm_device *dev);
void drm_modeset_lock_crtc(struct drm_crtc *crtc,
struct drm_plane *plane);
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 96e16283afb9..dda401bf910e 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -43,9 +43,8 @@
* planes.
*/
-extern int drm_crtc_init(struct drm_device *dev,
- struct drm_crtc *crtc,
- const struct drm_crtc_funcs *funcs);
+int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+ const struct drm_crtc_funcs *funcs);
/**
* drm_plane_helper_funcs - helper operations for CRTCs
@@ -79,26 +78,26 @@ static inline void drm_plane_helper_add(struct drm_plane *plane,
plane->helper_private = funcs;
}
-extern int drm_plane_helper_check_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_rect *src,
- struct drm_rect *dest,
- const struct drm_rect *clip,
- int min_scale,
- int max_scale,
- bool can_position,
- bool can_update_disabled,
- bool *visible);
-extern int drm_primary_helper_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h);
-extern int drm_primary_helper_disable(struct drm_plane *plane);
-extern void drm_primary_helper_destroy(struct drm_plane *plane);
+int drm_plane_helper_check_update(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_rect *src,
+ struct drm_rect *dest,
+ const struct drm_rect *clip,
+ int min_scale,
+ int max_scale,
+ bool can_position,
+ bool can_update_disabled,
+ bool *visible);
+int drm_primary_helper_update(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+int drm_primary_helper_disable(struct drm_plane *plane);
+void drm_primary_helper_destroy(struct drm_plane *plane);
extern const struct drm_plane_funcs drm_primary_helper_funcs;
int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index c9a8b64aa33b..b2d56dd483d9 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -34,6 +34,17 @@ struct i915_audio_component {
void (*codec_wake_override)(struct device *, bool enable);
int (*get_cdclk_freq)(struct device *);
} *ops;
+
+ const struct i915_audio_component_audio_ops {
+ void *audio_ptr;
+ /**
+ * Call from i915 driver, notifying the HDA driver that
+ * pin sense and/or ELD information has changed.
+ * @audio_ptr: HDA driver object
+ * @port: Which port has changed (PORTA / PORTB / PORTC etc)
+ */
+ void (*pin_eld_notify)(void *audio_ptr, int port);
+ } *audio_ops;
};
#endif /* _I915_COMPONENT_H_ */
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index b08bdade6002..9e9bddaa58a5 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -3,8 +3,8 @@
#ifndef _DRM_INTEL_GTT_H
#define _DRM_INTEL_GTT_H
-void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
- phys_addr_t *mappable_base, unsigned long *mappable_end);
+void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
+ phys_addr_t *mappable_base, u64 *mappable_end);
int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
struct agp_bridge_data *bridge);
diff --git a/include/dt-bindings/dma/axi-dmac.h b/include/dt-bindings/dma/axi-dmac.h
new file mode 100644
index 000000000000..ad9e6ecb9c2f
--- /dev/null
+++ b/include/dt-bindings/dma/axi-dmac.h
@@ -0,0 +1,48 @@
+/*
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __DT_BINDINGS_DMA_AXI_DMAC_H__
+#define __DT_BINDINGS_DMA_AXI_DMAC_H__
+
+#define AXI_DMAC_BUS_TYPE_AXI_MM 0
+#define AXI_DMAC_BUS_TYPE_AXI_STREAM 1
+#define AXI_DMAC_BUS_TYPE_FIFO 2
+
+#endif
diff --git a/include/dt-bindings/dma/jz4780-dma.h b/include/dt-bindings/dma/jz4780-dma.h
deleted file mode 100644
index df017fdfb44e..000000000000
--- a/include/dt-bindings/dma/jz4780-dma.h
+++ /dev/null
@@ -1,49 +0,0 @@
-#ifndef __DT_BINDINGS_DMA_JZ4780_DMA_H__
-#define __DT_BINDINGS_DMA_JZ4780_DMA_H__
-
-/*
- * Request type numbers for the JZ4780 DMA controller (written to the DRTn
- * register for the channel).
- */
-#define JZ4780_DMA_I2S1_TX 0x4
-#define JZ4780_DMA_I2S1_RX 0x5
-#define JZ4780_DMA_I2S0_TX 0x6
-#define JZ4780_DMA_I2S0_RX 0x7
-#define JZ4780_DMA_AUTO 0x8
-#define JZ4780_DMA_SADC_RX 0x9
-#define JZ4780_DMA_UART4_TX 0xc
-#define JZ4780_DMA_UART4_RX 0xd
-#define JZ4780_DMA_UART3_TX 0xe
-#define JZ4780_DMA_UART3_RX 0xf
-#define JZ4780_DMA_UART2_TX 0x10
-#define JZ4780_DMA_UART2_RX 0x11
-#define JZ4780_DMA_UART1_TX 0x12
-#define JZ4780_DMA_UART1_RX 0x13
-#define JZ4780_DMA_UART0_TX 0x14
-#define JZ4780_DMA_UART0_RX 0x15
-#define JZ4780_DMA_SSI0_TX 0x16
-#define JZ4780_DMA_SSI0_RX 0x17
-#define JZ4780_DMA_SSI1_TX 0x18
-#define JZ4780_DMA_SSI1_RX 0x19
-#define JZ4780_DMA_MSC0_TX 0x1a
-#define JZ4780_DMA_MSC0_RX 0x1b
-#define JZ4780_DMA_MSC1_TX 0x1c
-#define JZ4780_DMA_MSC1_RX 0x1d
-#define JZ4780_DMA_MSC2_TX 0x1e
-#define JZ4780_DMA_MSC2_RX 0x1f
-#define JZ4780_DMA_PCM0_TX 0x20
-#define JZ4780_DMA_PCM0_RX 0x21
-#define JZ4780_DMA_SMB0_TX 0x24
-#define JZ4780_DMA_SMB0_RX 0x25
-#define JZ4780_DMA_SMB1_TX 0x26
-#define JZ4780_DMA_SMB1_RX 0x27
-#define JZ4780_DMA_SMB2_TX 0x28
-#define JZ4780_DMA_SMB2_RX 0x29
-#define JZ4780_DMA_SMB3_TX 0x2a
-#define JZ4780_DMA_SMB3_RX 0x2b
-#define JZ4780_DMA_SMB4_TX 0x2c
-#define JZ4780_DMA_SMB4_RX 0x2d
-#define JZ4780_DMA_DES_TX 0x2e
-#define JZ4780_DMA_DES_RX 0x2f
-
-#endif /* __DT_BINDINGS_DMA_JZ4780_DMA_H__ */
diff --git a/include/dt-bindings/i2c/i2c.h b/include/dt-bindings/i2c/i2c.h
new file mode 100644
index 000000000000..1d5da81d90f1
--- /dev/null
+++ b/include/dt-bindings/i2c/i2c.h
@@ -0,0 +1,18 @@
+/*
+ * This header provides constants for I2C bindings
+ *
+ * Copyright (C) 2015 by Sang Engineering
+ * Copyright (C) 2015 by Renesas Electronics Corporation
+ *
+ * Wolfram Sang <wsa@sang-engineering.com>
+ *
+ * GPLv2 only
+ */
+
+#ifndef _DT_BINDINGS_I2C_I2C_H
+#define _DT_BINDINGS_I2C_I2C_H
+
+#define I2C_TEN_BIT_ADDRESS (1 << 31)
+#define I2C_OWN_SLAVE_ADDRESS (1 << 30)
+
+#endif
diff --git a/include/dt-bindings/media/c8sectpfe.h b/include/dt-bindings/media/c8sectpfe.h
new file mode 100644
index 000000000000..a0b5c7be683c
--- /dev/null
+++ b/include/dt-bindings/media/c8sectpfe.h
@@ -0,0 +1,12 @@
+#ifndef __DT_C8SECTPFE_H
+#define __DT_C8SECTPFE_H
+
+#define STV0367_TDA18212_NIMA_1 0
+#define STV0367_TDA18212_NIMA_2 1
+#define STV0367_TDA18212_NIMB_1 2
+#define STV0367_TDA18212_NIMB_2 3
+
+#define STV0903_6110_LNB24_NIMA 4
+#define STV0903_6110_LNB24_NIMB 5
+
+#endif /* __DT_C8SECTPFE_H */
diff --git a/include/dt-bindings/mfd/st-lpc.h b/include/dt-bindings/mfd/st-lpc.h
index e3e6c75d8822..d05894afa7e7 100644
--- a/include/dt-bindings/mfd/st-lpc.h
+++ b/include/dt-bindings/mfd/st-lpc.h
@@ -11,5 +11,6 @@
#define ST_LPC_MODE_RTC 0
#define ST_LPC_MODE_WDT 1
+#define ST_LPC_MODE_CLKSRC 2
#endif /* __DT_BINDINGS_ST_LPC_H__ */
diff --git a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
index c10205491f8d..a15c1704d0ec 100644
--- a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
+++ b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
@@ -7,6 +7,47 @@
#define _DT_BINDINGS_PINCTRL_QCOM_PMIC_MPP_H
/* power-source */
+
+/* Digital Input/Output: level [PM8058] */
+#define PM8058_MPP_VPH 0
+#define PM8058_MPP_S3 1
+#define PM8058_MPP_L2 2
+#define PM8058_MPP_L3 3
+
+/* Digital Input/Output: level [PM8901] */
+#define PM8901_MPP_MSMIO 0
+#define PM8901_MPP_DIG 1
+#define PM8901_MPP_L5 2
+#define PM8901_MPP_S4 3
+#define PM8901_MPP_VPH 4
+
+/* Digital Input/Output: level [PM8921] */
+#define PM8921_MPP_S4 1
+#define PM8921_MPP_L15 3
+#define PM8921_MPP_L17 4
+#define PM8921_MPP_VPH 7
+
+/* Digital Input/Output: level [PM8821] */
+#define PM8821_MPP_1P8 0
+#define PM8821_MPP_VPH 7
+
+/* Digital Input/Output: level [PM8018] */
+#define PM8018_MPP_L4 0
+#define PM8018_MPP_L14 1
+#define PM8018_MPP_S3 2
+#define PM8018_MPP_L6 3
+#define PM8018_MPP_L2 4
+#define PM8018_MPP_L5 5
+#define PM8018_MPP_VPH 7
+
+/* Digital Input/Output: level [PM8038] */
+#define PM8038_MPP_L20 0
+#define PM8038_MPP_L11 1
+#define PM8038_MPP_L5 2
+#define PM8038_MPP_L15 3
+#define PM8038_MPP_L17 4
+#define PM8038_MPP_VPH 7
+
#define PM8841_MPP_VPH 0
#define PM8841_MPP_S3 2
@@ -37,6 +78,16 @@
#define PMIC_MPP_AMUX_ROUTE_ABUS3 6
#define PMIC_MPP_AMUX_ROUTE_ABUS4 7
+/* Analog Output: level */
+#define PMIC_MPP_AOUT_LVL_1V25 0
+#define PMIC_MPP_AOUT_LVL_1V25_2 1
+#define PMIC_MPP_AOUT_LVL_0V625 2
+#define PMIC_MPP_AOUT_LVL_0V3125 3
+#define PMIC_MPP_AOUT_LVL_MPP 4
+#define PMIC_MPP_AOUT_LVL_ABUS1 5
+#define PMIC_MPP_AOUT_LVL_ABUS2 6
+#define PMIC_MPP_AOUT_LVL_ABUS3 7
+
/* To be used with "function" */
#define PMIC_MPP_FUNC_NORMAL "normal"
#define PMIC_MPP_FUNC_PAIRED "paired"
diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h
index 72665eb80692..b20cd885c1fd 100644
--- a/include/keys/system_keyring.h
+++ b/include/keys/system_keyring.h
@@ -15,6 +15,7 @@
#ifdef CONFIG_SYSTEM_TRUSTED_KEYRING
#include <linux/key.h>
+#include <crypto/public_key.h>
extern struct key *system_trusted_keyring;
static inline struct key *get_system_trusted_keyring(void)
@@ -28,4 +29,10 @@ static inline struct key *get_system_trusted_keyring(void)
}
#endif
+#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
+extern int system_verify_data(const void *data, unsigned long len,
+ const void *raw_pkcs7, size_t pkcs7_len,
+ enum key_being_used_for usage);
+#endif
+
#endif /* _KEYS_SYSTEM_KEYRING_H */
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index e5966758c093..e1e4d7c38dda 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -52,13 +52,16 @@ struct arch_timer_cpu {
/* Timer IRQ */
const struct kvm_irq_level *irq;
+
+ /* VGIC mapping */
+ struct irq_phys_map *map;
};
int kvm_timer_hyp_init(void);
void kvm_timer_enable(struct kvm *kvm);
void kvm_timer_init(struct kvm *kvm);
-void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
- const struct kvm_irq_level *irq);
+int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
+ const struct kvm_irq_level *irq);
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu);
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 133ea00aa83b..4e14dac282bb 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -35,11 +35,7 @@
#define VGIC_V3_MAX_LRS 16
#define VGIC_MAX_IRQS 1024
#define VGIC_V2_MAX_CPUS 8
-
-/* Sanity checks... */
-#if (KVM_MAX_VCPUS > 255)
-#error Too many KVM VCPUs, the VGIC only supports up to 255 VCPUs for now
-#endif
+#define VGIC_V3_MAX_CPUS 255
#if (VGIC_NR_IRQS_LEGACY & 31)
#error "VGIC_NR_IRQS must be a multiple of 32"
@@ -95,11 +91,15 @@ enum vgic_type {
#define LR_STATE_ACTIVE (1 << 1)
#define LR_STATE_MASK (3 << 0)
#define LR_EOI_INT (1 << 2)
+#define LR_HW (1 << 3)
struct vgic_lr {
- u16 irq;
- u8 source;
- u8 state;
+ unsigned irq:10;
+ union {
+ unsigned hwirq:10;
+ unsigned source:3;
+ };
+ unsigned state:4;
};
struct vgic_vmcr {
@@ -155,6 +155,19 @@ struct vgic_io_device {
struct kvm_io_device dev;
};
+struct irq_phys_map {
+ u32 virt_irq;
+ u32 phys_irq;
+ u32 irq;
+ bool active;
+};
+
+struct irq_phys_map_entry {
+ struct list_head entry;
+ struct rcu_head rcu;
+ struct irq_phys_map map;
+};
+
struct vgic_dist {
spinlock_t lock;
bool in_kernel;
@@ -252,6 +265,10 @@ struct vgic_dist {
struct vgic_vm_ops vm_ops;
struct vgic_io_device dist_iodev;
struct vgic_io_device *redist_iodevs;
+
+ /* Virtual irq to hwirq mapping */
+ spinlock_t irq_phys_map_lock;
+ struct list_head irq_phys_map_list;
};
struct vgic_v2_cpu_if {
@@ -303,6 +320,9 @@ struct vgic_cpu {
struct vgic_v2_cpu_if vgic_v2;
struct vgic_v3_cpu_if vgic_v3;
};
+
+ /* Protected by the distributor's irq_phys_map_lock */
+ struct list_head irq_phys_map_list;
};
#define LR_EMPTY 0xff
@@ -317,16 +337,25 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
int kvm_vgic_hyp_init(void);
int kvm_vgic_map_resources(struct kvm *kvm);
int kvm_vgic_get_max_vcpus(void);
+void kvm_vgic_early_init(struct kvm *kvm);
int kvm_vgic_create(struct kvm *kvm, u32 type);
void kvm_vgic_destroy(struct kvm *kvm);
+void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu);
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
bool level);
+int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
+ struct irq_phys_map *map, bool level);
void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu);
+struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
+ int virt_irq, int irq);
+int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
+bool kvm_vgic_get_phys_irq_active(struct irq_phys_map *map);
+void kvm_vgic_set_phys_irq_active(struct irq_phys_map *map, bool active);
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h
index 6a0a89ed7f81..0ddb5c02ad8b 100644
--- a/include/linux/amba/serial.h
+++ b/include/linux/amba/serial.h
@@ -33,14 +33,12 @@
#define UART01x_DR 0x00 /* Data read or written from the interface. */
#define UART01x_RSR 0x04 /* Receive status register (Read). */
#define UART01x_ECR 0x04 /* Error clear register (Write). */
-#define ZX_UART01x_DR 0x04 /* Data read or written from the interface. */
#define UART010_LCRH 0x08 /* Line control register, high byte. */
#define ST_UART011_DMAWM 0x08 /* DMA watermark configure register. */
#define UART010_LCRM 0x0C /* Line control register, middle byte. */
#define ST_UART011_TIMEOUT 0x0C /* Timeout period register. */
#define UART010_LCRL 0x10 /* Line control register, low byte. */
#define UART010_CR 0x14 /* Control register. */
-#define ZX_UART01x_FR 0x14 /* Flag register (Read only). */
#define UART01x_FR 0x18 /* Flag register (Read only). */
#define UART010_IIR 0x1C /* Interrupt identification register (Read). */
#define UART010_ICR 0x1C /* Interrupt clear register (Write). */
@@ -51,21 +49,13 @@
#define UART011_LCRH 0x2c /* Line control register. */
#define ST_UART011_LCRH_TX 0x2c /* Tx Line control register. */
#define UART011_CR 0x30 /* Control register. */
-#define ZX_UART011_LCRH_TX 0x30 /* Tx Line control register. */
#define UART011_IFLS 0x34 /* Interrupt fifo level select. */
-#define ZX_UART011_CR 0x34 /* Control register. */
-#define ZX_UART011_IFLS 0x38 /* Interrupt fifo level select. */
#define UART011_IMSC 0x38 /* Interrupt mask. */
#define UART011_RIS 0x3c /* Raw interrupt status. */
#define UART011_MIS 0x40 /* Masked interrupt status. */
-#define ZX_UART011_IMSC 0x40 /* Interrupt mask. */
#define UART011_ICR 0x44 /* Interrupt clear register. */
-#define ZX_UART011_RIS 0x44 /* Raw interrupt status. */
#define UART011_DMACR 0x48 /* DMA control register. */
-#define ZX_UART011_MIS 0x48 /* Masked interrupt status. */
-#define ZX_UART011_ICR 0x4c /* Interrupt clear register. */
#define ST_UART011_XFCR 0x50 /* XON/XOFF control register. */
-#define ZX_UART011_DMACR 0x50 /* DMA control register. */
#define ST_UART011_XON1 0x54 /* XON1 register. */
#define ST_UART011_XON2 0x58 /* XON2 register. */
#define ST_UART011_XOFF1 0x5C /* XON1 register. */
@@ -85,19 +75,15 @@
#define UART01x_RSR_PE 0x02
#define UART01x_RSR_FE 0x01
-#define ZX_UART01x_FR_BUSY 0x300
#define UART011_FR_RI 0x100
#define UART011_FR_TXFE 0x080
#define UART011_FR_RXFF 0x040
#define UART01x_FR_TXFF 0x020
#define UART01x_FR_RXFE 0x010
#define UART01x_FR_BUSY 0x008
-#define ZX_UART01x_FR_DSR 0x008
#define UART01x_FR_DCD 0x004
#define UART01x_FR_DSR 0x002
-#define ZX_UART01x_FR_CTS 0x002
#define UART01x_FR_CTS 0x001
-#define ZX_UART011_FR_RI 0x001
#define UART01x_FR_TMSK (UART01x_FR_TXFF + UART01x_FR_BUSY)
#define UART011_CR_CTSEN 0x8000 /* CTS hardware flow control */
diff --git a/include/linux/asn1_ber_bytecode.h b/include/linux/asn1_ber_bytecode.h
index 945d44ae529c..ab3a6c002f7b 100644
--- a/include/linux/asn1_ber_bytecode.h
+++ b/include/linux/asn1_ber_bytecode.h
@@ -45,23 +45,27 @@ enum asn1_opcode {
ASN1_OP_MATCH_JUMP = 0x04,
ASN1_OP_MATCH_JUMP_OR_SKIP = 0x05,
ASN1_OP_MATCH_ANY = 0x08,
+ ASN1_OP_MATCH_ANY_OR_SKIP = 0x09,
ASN1_OP_MATCH_ANY_ACT = 0x0a,
+ ASN1_OP_MATCH_ANY_ACT_OR_SKIP = 0x0b,
/* Everything before here matches unconditionally */
ASN1_OP_COND_MATCH_OR_SKIP = 0x11,
ASN1_OP_COND_MATCH_ACT_OR_SKIP = 0x13,
ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 0x15,
ASN1_OP_COND_MATCH_ANY = 0x18,
+ ASN1_OP_COND_MATCH_ANY_OR_SKIP = 0x19,
ASN1_OP_COND_MATCH_ANY_ACT = 0x1a,
+ ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP = 0x1b,
/* Everything before here will want a tag from the data */
-#define ASN1_OP__MATCHES_TAG ASN1_OP_COND_MATCH_ANY_ACT
+#define ASN1_OP__MATCHES_TAG ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP
/* These are here to help fill up space */
- ASN1_OP_COND_FAIL = 0x1b,
- ASN1_OP_COMPLETE = 0x1c,
- ASN1_OP_ACT = 0x1d,
- ASN1_OP_RETURN = 0x1e,
+ ASN1_OP_COND_FAIL = 0x1c,
+ ASN1_OP_COMPLETE = 0x1d,
+ ASN1_OP_ACT = 0x1e,
+ ASN1_OP_MAYBE_ACT = 0x1f,
/* The following eight have bit 0 -> SET, 1 -> OF, 2 -> ACT */
ASN1_OP_END_SEQ = 0x20,
@@ -76,6 +80,8 @@ enum asn1_opcode {
#define ASN1_OP_END__OF 0x02
#define ASN1_OP_END__ACT 0x04
+ ASN1_OP_RETURN = 0x28,
+
ASN1_OP__NR
};
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 5b08a8540ecf..00a5763e850e 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -2,6 +2,329 @@
#ifndef _LINUX_ATOMIC_H
#define _LINUX_ATOMIC_H
#include <asm/atomic.h>
+#include <asm/barrier.h>
+
+/*
+ * Relaxed variants of xchg, cmpxchg and some atomic operations.
+ *
+ * We support four variants:
+ *
+ * - Fully ordered: The default implementation, no suffix required.
+ * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
+ * - Release: Provides RELEASE semantics, _release suffix.
+ * - Relaxed: No ordering guarantees, _relaxed suffix.
+ *
+ * For compound atomics performing both a load and a store, ACQUIRE
+ * semantics apply only to the load and RELEASE semantics only to the
+ * store portion of the operation. Note that a failed cmpxchg_acquire
+ * does -not- imply any memory ordering constraints.
+ *
+ * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
+ */
+
+#ifndef atomic_read_acquire
+#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
+#endif
+
+#ifndef atomic_set_release
+#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
+#endif
+
+/*
+ * The idea here is to build acquire/release variants by adding explicit
+ * barriers on top of the relaxed variant. In the case where the relaxed
+ * variant is already fully ordered, no additional barriers are needed.
+ */
+#define __atomic_op_acquire(op, args...) \
+({ \
+ typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
+ smp_mb__after_atomic(); \
+ __ret; \
+})
+
+#define __atomic_op_release(op, args...) \
+({ \
+ smp_mb__before_atomic(); \
+ op##_relaxed(args); \
+})
+
+#define __atomic_op_fence(op, args...) \
+({ \
+ typeof(op##_relaxed(args)) __ret; \
+ smp_mb__before_atomic(); \
+ __ret = op##_relaxed(args); \
+ smp_mb__after_atomic(); \
+ __ret; \
+})
+
+/* atomic_add_return_relaxed */
+#ifndef atomic_add_return_relaxed
+#define atomic_add_return_relaxed atomic_add_return
+#define atomic_add_return_acquire atomic_add_return
+#define atomic_add_return_release atomic_add_return
+
+#else /* atomic_add_return_relaxed */
+
+#ifndef atomic_add_return_acquire
+#define atomic_add_return_acquire(...) \
+ __atomic_op_acquire(atomic_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_add_return_release
+#define atomic_add_return_release(...) \
+ __atomic_op_release(atomic_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_add_return
+#define atomic_add_return(...) \
+ __atomic_op_fence(atomic_add_return, __VA_ARGS__)
+#endif
+#endif /* atomic_add_return_relaxed */
+
+/* atomic_sub_return_relaxed */
+#ifndef atomic_sub_return_relaxed
+#define atomic_sub_return_relaxed atomic_sub_return
+#define atomic_sub_return_acquire atomic_sub_return
+#define atomic_sub_return_release atomic_sub_return
+
+#else /* atomic_sub_return_relaxed */
+
+#ifndef atomic_sub_return_acquire
+#define atomic_sub_return_acquire(...) \
+ __atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_sub_return_release
+#define atomic_sub_return_release(...) \
+ __atomic_op_release(atomic_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_sub_return
+#define atomic_sub_return(...) \
+ __atomic_op_fence(atomic_sub_return, __VA_ARGS__)
+#endif
+#endif /* atomic_sub_return_relaxed */
+
+/* atomic_xchg_relaxed */
+#ifndef atomic_xchg_relaxed
+#define atomic_xchg_relaxed atomic_xchg
+#define atomic_xchg_acquire atomic_xchg
+#define atomic_xchg_release atomic_xchg
+
+#else /* atomic_xchg_relaxed */
+
+#ifndef atomic_xchg_acquire
+#define atomic_xchg_acquire(...) \
+ __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_xchg_release
+#define atomic_xchg_release(...) \
+ __atomic_op_release(atomic_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_xchg
+#define atomic_xchg(...) \
+ __atomic_op_fence(atomic_xchg, __VA_ARGS__)
+#endif
+#endif /* atomic_xchg_relaxed */
+
+/* atomic_cmpxchg_relaxed */
+#ifndef atomic_cmpxchg_relaxed
+#define atomic_cmpxchg_relaxed atomic_cmpxchg
+#define atomic_cmpxchg_acquire atomic_cmpxchg
+#define atomic_cmpxchg_release atomic_cmpxchg
+
+#else /* atomic_cmpxchg_relaxed */
+
+#ifndef atomic_cmpxchg_acquire
+#define atomic_cmpxchg_acquire(...) \
+ __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_cmpxchg_release
+#define atomic_cmpxchg_release(...) \
+ __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_cmpxchg
+#define atomic_cmpxchg(...) \
+ __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
+#endif
+#endif /* atomic_cmpxchg_relaxed */
+
+#ifndef atomic64_read_acquire
+#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
+#endif
+
+#ifndef atomic64_set_release
+#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
+#endif
+
+/* atomic64_add_return_relaxed */
+#ifndef atomic64_add_return_relaxed
+#define atomic64_add_return_relaxed atomic64_add_return
+#define atomic64_add_return_acquire atomic64_add_return
+#define atomic64_add_return_release atomic64_add_return
+
+#else /* atomic64_add_return_relaxed */
+
+#ifndef atomic64_add_return_acquire
+#define atomic64_add_return_acquire(...) \
+ __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_add_return_release
+#define atomic64_add_return_release(...) \
+ __atomic_op_release(atomic64_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_add_return
+#define atomic64_add_return(...) \
+ __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_add_return_relaxed */
+
+/* atomic64_sub_return_relaxed */
+#ifndef atomic64_sub_return_relaxed
+#define atomic64_sub_return_relaxed atomic64_sub_return
+#define atomic64_sub_return_acquire atomic64_sub_return
+#define atomic64_sub_return_release atomic64_sub_return
+
+#else /* atomic64_sub_return_relaxed */
+
+#ifndef atomic64_sub_return_acquire
+#define atomic64_sub_return_acquire(...) \
+ __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_sub_return_release
+#define atomic64_sub_return_release(...) \
+ __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_sub_return
+#define atomic64_sub_return(...) \
+ __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_sub_return_relaxed */
+
+/* atomic64_xchg_relaxed */
+#ifndef atomic64_xchg_relaxed
+#define atomic64_xchg_relaxed atomic64_xchg
+#define atomic64_xchg_acquire atomic64_xchg
+#define atomic64_xchg_release atomic64_xchg
+
+#else /* atomic64_xchg_relaxed */
+
+#ifndef atomic64_xchg_acquire
+#define atomic64_xchg_acquire(...) \
+ __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_xchg_release
+#define atomic64_xchg_release(...) \
+ __atomic_op_release(atomic64_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_xchg
+#define atomic64_xchg(...) \
+ __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
+#endif
+#endif /* atomic64_xchg_relaxed */
+
+/* atomic64_cmpxchg_relaxed */
+#ifndef atomic64_cmpxchg_relaxed
+#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
+#define atomic64_cmpxchg_acquire atomic64_cmpxchg
+#define atomic64_cmpxchg_release atomic64_cmpxchg
+
+#else /* atomic64_cmpxchg_relaxed */
+
+#ifndef atomic64_cmpxchg_acquire
+#define atomic64_cmpxchg_acquire(...) \
+ __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_cmpxchg_release
+#define atomic64_cmpxchg_release(...) \
+ __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_cmpxchg
+#define atomic64_cmpxchg(...) \
+ __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+#endif /* atomic64_cmpxchg_relaxed */
+
+/* cmpxchg_relaxed */
+#ifndef cmpxchg_relaxed
+#define cmpxchg_relaxed cmpxchg
+#define cmpxchg_acquire cmpxchg
+#define cmpxchg_release cmpxchg
+
+#else /* cmpxchg_relaxed */
+
+#ifndef cmpxchg_acquire
+#define cmpxchg_acquire(...) \
+ __atomic_op_acquire(cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg_release
+#define cmpxchg_release(...) \
+ __atomic_op_release(cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg
+#define cmpxchg(...) \
+ __atomic_op_fence(cmpxchg, __VA_ARGS__)
+#endif
+#endif /* cmpxchg_relaxed */
+
+/* cmpxchg64_relaxed */
+#ifndef cmpxchg64_relaxed
+#define cmpxchg64_relaxed cmpxchg64
+#define cmpxchg64_acquire cmpxchg64
+#define cmpxchg64_release cmpxchg64
+
+#else /* cmpxchg64_relaxed */
+
+#ifndef cmpxchg64_acquire
+#define cmpxchg64_acquire(...) \
+ __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg64_release
+#define cmpxchg64_release(...) \
+ __atomic_op_release(cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg64
+#define cmpxchg64(...) \
+ __atomic_op_fence(cmpxchg64, __VA_ARGS__)
+#endif
+#endif /* cmpxchg64_relaxed */
+
+/* xchg_relaxed */
+#ifndef xchg_relaxed
+#define xchg_relaxed xchg
+#define xchg_acquire xchg
+#define xchg_release xchg
+
+#else /* xchg_relaxed */
+
+#ifndef xchg_acquire
+#define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__)
+#endif
+
+#ifndef xchg_release
+#define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__)
+#endif
+
+#ifndef xchg
+#define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
+#endif
+#endif /* xchg_relaxed */
/**
* atomic_add_unless - add unless the number is already a given value
@@ -28,6 +351,23 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#endif
+#ifndef atomic_andnot
+static inline void atomic_andnot(int i, atomic_t *v)
+{
+ atomic_and(~i, v);
+}
+#endif
+
+static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+ atomic_andnot(mask, v);
+}
+
+static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+ atomic_or(mask, v);
+}
+
/**
* atomic_inc_not_zero_hint - increment if not null
* @v: pointer of type atomic_t
@@ -111,21 +451,16 @@ static inline int atomic_dec_if_positive(atomic_t *v)
}
#endif
-#ifndef CONFIG_ARCH_HAS_ATOMIC_OR
-static inline void atomic_or(int i, atomic_t *v)
-{
- int old;
- int new;
-
- do {
- old = atomic_read(v);
- new = old | i;
- } while (atomic_cmpxchg(v, old, new) != old);
-}
-#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */
-
#include <asm-generic/atomic-long.h>
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
+
+#ifndef atomic64_andnot
+static inline void atomic64_andnot(long long i, atomic64_t *v)
+{
+ atomic64_and(~i, v);
+}
+#endif
+
#endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/audit.h b/include/linux/audit.h
index c2e7e3a83965..b2abc996c25d 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -27,6 +27,9 @@
#include <linux/ptrace.h>
#include <uapi/linux/audit.h>
+#define AUDIT_INO_UNSET ((unsigned long)-1)
+#define AUDIT_DEV_UNSET ((dev_t)-1)
+
struct audit_sig_info {
uid_t uid;
pid_t pid;
@@ -59,6 +62,7 @@ struct audit_krule {
struct audit_field *inode_f; /* quick access to an inode field */
struct audit_watch *watch; /* associated watch */
struct audit_tree *tree; /* associated watched tree */
+ struct audit_fsnotify_mark *exe;
struct list_head rlist; /* entry in audit_{watch,tree}.rules list */
struct list_head list; /* for AUDIT_LIST* purposes only */
u64 prio;
diff --git a/include/linux/average.h b/include/linux/average.h
index c6028fd742c1..d04aa58280de 100644
--- a/include/linux/average.h
+++ b/include/linux/average.h
@@ -3,28 +3,43 @@
/* Exponentially weighted moving average (EWMA) */
-/* For more documentation see lib/average.c */
-
-struct ewma {
- unsigned long internal;
- unsigned long factor;
- unsigned long weight;
-};
-
-extern void ewma_init(struct ewma *avg, unsigned long factor,
- unsigned long weight);
-
-extern struct ewma *ewma_add(struct ewma *avg, unsigned long val);
-
-/**
- * ewma_read() - Get average value
- * @avg: Average structure
- *
- * Returns the average value held in @avg.
- */
-static inline unsigned long ewma_read(const struct ewma *avg)
-{
- return avg->internal >> avg->factor;
-}
+#define DECLARE_EWMA(name, _factor, _weight) \
+ struct ewma_##name { \
+ unsigned long internal; \
+ }; \
+ static inline void ewma_##name##_init(struct ewma_##name *e) \
+ { \
+ BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
+ BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
+ e->internal = 0; \
+ } \
+ static inline unsigned long \
+ ewma_##name##_read(struct ewma_##name *e) \
+ { \
+ BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
+ BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
+ return e->internal >> ilog2(_factor); \
+ } \
+ static inline void ewma_##name##_add(struct ewma_##name *e, \
+ unsigned long val) \
+ { \
+ unsigned long internal = ACCESS_ONCE(e->internal); \
+ unsigned long weight = ilog2(_weight); \
+ unsigned long factor = ilog2(_factor); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
+ BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
+ \
+ ACCESS_ONCE(e->internal) = internal ? \
+ (((internal << weight) - internal) + \
+ (val << factor)) >> weight : \
+ (val << factor); \
+ }
#endif /* _LINUX_AVERAGE_H */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 0fe9df983ab7..5a5d79ee256f 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -286,7 +286,7 @@ static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi
* %current's blkcg equals the effective blkcg of its memcg. No
* need to use the relatively expensive cgroup_get_e_css().
*/
- if (likely(wb && wb->blkcg_css == task_css(current, blkio_cgrp_id)))
+ if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
return wb;
return NULL;
}
@@ -402,7 +402,7 @@ static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
}
struct wb_iter {
- int start_blkcg_id;
+ int start_memcg_id;
struct radix_tree_iter tree_iter;
void **slot;
};
@@ -414,9 +414,9 @@ static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
WARN_ON_ONCE(!rcu_read_lock_held());
- if (iter->start_blkcg_id >= 0) {
- iter->slot = radix_tree_iter_init(titer, iter->start_blkcg_id);
- iter->start_blkcg_id = -1;
+ if (iter->start_memcg_id >= 0) {
+ iter->slot = radix_tree_iter_init(titer, iter->start_memcg_id);
+ iter->start_memcg_id = -1;
} else {
iter->slot = radix_tree_next_slot(iter->slot, titer, 0);
}
@@ -430,30 +430,30 @@ static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter,
struct backing_dev_info *bdi,
- int start_blkcg_id)
+ int start_memcg_id)
{
- iter->start_blkcg_id = start_blkcg_id;
+ iter->start_memcg_id = start_memcg_id;
- if (start_blkcg_id)
+ if (start_memcg_id)
return __wb_iter_next(iter, bdi);
else
return &bdi->wb;
}
/**
- * bdi_for_each_wb - walk all wb's of a bdi in ascending blkcg ID order
+ * bdi_for_each_wb - walk all wb's of a bdi in ascending memcg ID order
* @wb_cur: cursor struct bdi_writeback pointer
* @bdi: bdi to walk wb's of
* @iter: pointer to struct wb_iter to be used as iteration buffer
- * @start_blkcg_id: blkcg ID to start iteration from
+ * @start_memcg_id: memcg ID to start iteration from
*
* Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending
- * blkcg ID order starting from @start_blkcg_id. @iter is struct wb_iter
+ * memcg ID order starting from @start_memcg_id. @iter is struct wb_iter
* to be used as temp storage during iteration. rcu_read_lock() must be
* held throughout iteration.
*/
-#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
- for ((wb_cur) = __wb_iter_init(iter, bdi, start_blkcg_id); \
+#define bdi_for_each_wb(wb_cur, bdi, iter, start_memcg_id) \
+ for ((wb_cur) = __wb_iter_init(iter, bdi, start_memcg_id); \
(wb_cur); (wb_cur) = __wb_iter_next(iter, bdi))
#else /* CONFIG_CGROUP_WRITEBACK */
diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h
index 14eea946e640..ed3768f4ecc7 100644
--- a/include/linux/basic_mmio_gpio.h
+++ b/include/linux/basic_mmio_gpio.h
@@ -75,5 +75,6 @@ int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
#define BGPIOF_UNREADABLE_REG_DIR BIT(2) /* reg_dir is unreadable */
#define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3)
#define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */
+#define BGPIOF_NO_OUTPUT BIT(5) /* only input */
#endif /* __BASIC_MMIO_GPIO_H */
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 6cceedf65ca2..cf038431a5cc 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -640,7 +640,6 @@ struct bcma_drv_cc {
spinlock_t gpio_lock;
#ifdef CONFIG_BCMA_DRIVER_GPIO
struct gpio_chip gpio;
- struct irq_domain *irq_domain;
#endif
};
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index a4cd1641e9e2..0a5cc7a1109b 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -14,12 +14,15 @@
*/
#include <linux/cgroup.h>
-#include <linux/u64_stats_sync.h>
+#include <linux/percpu_counter.h>
#include <linux/seq_file.h>
#include <linux/radix-tree.h>
#include <linux/blkdev.h>
#include <linux/atomic.h>
+/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
+#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
+
/* Max limits for throttle policy */
#define THROTL_IOPS_MAX UINT_MAX
@@ -45,7 +48,7 @@ struct blkcg {
struct blkcg_gq *blkg_hint;
struct hlist_head blkg_list;
- struct blkcg_policy_data *pd[BLKCG_MAX_POLS];
+ struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
struct list_head all_blkcgs_node;
#ifdef CONFIG_CGROUP_WRITEBACK
@@ -53,14 +56,19 @@ struct blkcg {
#endif
};
+/*
+ * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
+ * recursive. Used to carry stats of dead children, and, for blkg_rwstat,
+ * to carry result values from read and sum operations.
+ */
struct blkg_stat {
- struct u64_stats_sync syncp;
- uint64_t cnt;
+ struct percpu_counter cpu_cnt;
+ atomic64_t aux_cnt;
};
struct blkg_rwstat {
- struct u64_stats_sync syncp;
- uint64_t cnt[BLKG_RWSTAT_NR];
+ struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
+ atomic64_t aux_cnt[BLKG_RWSTAT_NR];
};
/*
@@ -68,32 +76,28 @@ struct blkg_rwstat {
* request_queue (q). This is used by blkcg policies which need to track
* information per blkcg - q pair.
*
- * There can be multiple active blkcg policies and each has its private
- * data on each blkg, the size of which is determined by
- * blkcg_policy->pd_size. blkcg core allocates and frees such areas
- * together with blkg and invokes pd_init/exit_fn() methods.
- *
- * Such private data must embed struct blkg_policy_data (pd) at the
- * beginning and pd_size can't be smaller than pd.
+ * There can be multiple active blkcg policies and each blkg:policy pair is
+ * represented by a blkg_policy_data which is allocated and freed by each
+ * policy's pd_alloc/free_fn() methods. A policy can allocate private data
+ * area by allocating larger data structure which embeds blkg_policy_data
+ * at the beginning.
*/
struct blkg_policy_data {
/* the blkg and policy id this per-policy data belongs to */
struct blkcg_gq *blkg;
int plid;
-
- /* used during policy activation */
- struct list_head alloc_node;
};
/*
- * Policies that need to keep per-blkcg data which is independent
- * from any request_queue associated to it must specify its size
- * with the cpd_size field of the blkcg_policy structure and
- * embed a blkcg_policy_data in it. cpd_init() is invoked to let
- * each policy handle per-blkcg data.
+ * Policies that need to keep per-blkcg data which is independent from any
+ * request_queue associated to it should implement cpd_alloc/free_fn()
+ * methods. A policy can allocate private data area by allocating larger
+ * data structure which embeds blkcg_policy_data at the beginning.
+ * cpd_init() is invoked to let each policy handle per-blkcg data.
*/
struct blkcg_policy_data {
- /* the policy id this per-policy data belongs to */
+ /* the blkcg and policy id this per-policy data belongs to */
+ struct blkcg *blkcg;
int plid;
};
@@ -123,40 +127,50 @@ struct blkcg_gq {
/* is this blkg online? protected by both blkcg and q locks */
bool online;
+ struct blkg_rwstat stat_bytes;
+ struct blkg_rwstat stat_ios;
+
struct blkg_policy_data *pd[BLKCG_MAX_POLS];
struct rcu_head rcu_head;
};
-typedef void (blkcg_pol_init_cpd_fn)(const struct blkcg *blkcg);
-typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
-typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
-typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
-typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
-typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
+typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
+typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
+typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
+typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
+typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
+typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
+typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
+typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
+typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
+typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
struct blkcg_policy {
int plid;
- /* policy specific private data size */
- size_t pd_size;
- /* policy specific per-blkcg data size */
- size_t cpd_size;
/* cgroup files for the policy */
- struct cftype *cftypes;
+ struct cftype *dfl_cftypes;
+ struct cftype *legacy_cftypes;
/* operations */
+ blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
blkcg_pol_init_cpd_fn *cpd_init_fn;
+ blkcg_pol_free_cpd_fn *cpd_free_fn;
+ blkcg_pol_bind_cpd_fn *cpd_bind_fn;
+
+ blkcg_pol_alloc_pd_fn *pd_alloc_fn;
blkcg_pol_init_pd_fn *pd_init_fn;
blkcg_pol_online_pd_fn *pd_online_fn;
blkcg_pol_offline_pd_fn *pd_offline_fn;
- blkcg_pol_exit_pd_fn *pd_exit_fn;
+ blkcg_pol_free_pd_fn *pd_free_fn;
blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
};
extern struct blkcg blkcg_root;
extern struct cgroup_subsys_state * const blkcg_root_css;
-struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
+struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
+ struct request_queue *q, bool update_hint);
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q);
int blkcg_init_queue(struct request_queue *q);
@@ -171,6 +185,7 @@ int blkcg_activate_policy(struct request_queue *q,
void blkcg_deactivate_policy(struct request_queue *q,
const struct blkcg_policy *pol);
+const char *blkg_dev_name(struct blkcg_gq *blkg);
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
u64 (*prfill)(struct seq_file *,
struct blkg_policy_data *, int),
@@ -182,19 +197,24 @@ u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
int off);
+int blkg_print_stat_bytes(struct seq_file *sf, void *v);
+int blkg_print_stat_ios(struct seq_file *sf, void *v);
+int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
+int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
-u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
-struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
- int off);
+u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
+ struct blkcg_policy *pol, int off);
+struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
+ struct blkcg_policy *pol, int off);
struct blkg_conf_ctx {
struct gendisk *disk;
struct blkcg_gq *blkg;
- u64 v;
+ char *body;
};
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
- const char *input, struct blkg_conf_ctx *ctx);
+ char *input, struct blkg_conf_ctx *ctx);
void blkg_conf_finish(struct blkg_conf_ctx *ctx);
@@ -205,7 +225,7 @@ static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
static inline struct blkcg *task_blkcg(struct task_struct *tsk)
{
- return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
+ return css_to_blkcg(task_css(tsk, io_cgrp_id));
}
static inline struct blkcg *bio_blkcg(struct bio *bio)
@@ -218,7 +238,7 @@ static inline struct blkcg *bio_blkcg(struct bio *bio)
static inline struct cgroup_subsys_state *
task_get_blkcg_css(struct task_struct *task)
{
- return task_get_css(task, blkio_cgrp_id);
+ return task_get_css(task, io_cgrp_id);
}
/**
@@ -233,6 +253,52 @@ static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
}
/**
+ * __blkg_lookup - internal version of blkg_lookup()
+ * @blkcg: blkcg of interest
+ * @q: request_queue of interest
+ * @update_hint: whether to update lookup hint with the result or not
+ *
+ * This is internal version and shouldn't be used by policy
+ * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
+ * @q's bypass state. If @update_hint is %true, the caller should be
+ * holding @q->queue_lock and lookup hint is updated on success.
+ */
+static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
+ struct request_queue *q,
+ bool update_hint)
+{
+ struct blkcg_gq *blkg;
+
+ if (blkcg == &blkcg_root)
+ return q->root_blkg;
+
+ blkg = rcu_dereference(blkcg->blkg_hint);
+ if (blkg && blkg->q == q)
+ return blkg;
+
+ return blkg_lookup_slowpath(blkcg, q, update_hint);
+}
+
+/**
+ * blkg_lookup - lookup blkg for the specified blkcg - q pair
+ * @blkcg: blkcg of interest
+ * @q: request_queue of interest
+ *
+ * Lookup blkg for the @blkcg - @q pair. This function should be called
+ * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
+ * - see blk_queue_bypass_start() for details.
+ */
+static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
+ struct request_queue *q)
+{
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ if (unlikely(blk_queue_bypass(q)))
+ return NULL;
+ return __blkg_lookup(blkcg, q, false);
+}
+
+/**
* blkg_to_pdata - get policy private data
* @blkg: blkg of interest
* @pol: policy of interest
@@ -248,7 +314,7 @@ static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
struct blkcg_policy *pol)
{
- return blkcg ? blkcg->pd[pol->plid] : NULL;
+ return blkcg ? blkcg->cpd[pol->plid] : NULL;
}
/**
@@ -262,6 +328,11 @@ static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
return pd ? pd->blkg : NULL;
}
+static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
+{
+ return cpd ? cpd->blkcg : NULL;
+}
+
/**
* blkg_path - format cgroup path of blkg
* @blkg: blkg of interest
@@ -309,9 +380,6 @@ static inline void blkg_put(struct blkcg_gq *blkg)
call_rcu(&blkg->rcu_head, __blkg_release_rcu);
}
-struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
- bool update_hint);
-
/**
* blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
* @d_blkg: loop cursor pointing to the current descendant
@@ -373,8 +441,8 @@ static inline struct request_list *blk_get_rl(struct request_queue *q,
* or if either the blkcg or queue is going away. Fall back to
* root_rl in such cases.
*/
- blkg = blkg_lookup_create(blkcg, q);
- if (IS_ERR(blkg))
+ blkg = blkg_lookup(blkcg, q);
+ if (unlikely(!blkg))
goto root_rl;
blkg_get(blkg);
@@ -394,8 +462,7 @@ root_rl:
*/
static inline void blk_put_rl(struct request_list *rl)
{
- /* root_rl may not have blkg set */
- if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
+ if (rl->blkg->blkcg != &blkcg_root)
blkg_put(rl->blkg);
}
@@ -433,9 +500,21 @@ struct request_list *__blk_queue_next_rl(struct request_list *rl,
#define blk_queue_for_each_rl(rl, q) \
for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
-static inline void blkg_stat_init(struct blkg_stat *stat)
+static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
{
- u64_stats_init(&stat->syncp);
+ int ret;
+
+ ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
+ if (ret)
+ return ret;
+
+ atomic64_set(&stat->aux_cnt, 0);
+ return 0;
+}
+
+static inline void blkg_stat_exit(struct blkg_stat *stat)
+{
+ percpu_counter_destroy(&stat->cpu_cnt);
}
/**
@@ -443,34 +522,21 @@ static inline void blkg_stat_init(struct blkg_stat *stat)
* @stat: target blkg_stat
* @val: value to add
*
- * Add @val to @stat. The caller is responsible for synchronizing calls to
- * this function.
+ * Add @val to @stat. The caller must ensure that IRQ on the same CPU
+ * don't re-enter this function for the same counter.
*/
static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
{
- u64_stats_update_begin(&stat->syncp);
- stat->cnt += val;
- u64_stats_update_end(&stat->syncp);
+ __percpu_counter_add(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
}
/**
* blkg_stat_read - read the current value of a blkg_stat
* @stat: blkg_stat to read
- *
- * Read the current value of @stat. This function can be called without
- * synchroniztion and takes care of u64 atomicity.
*/
static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
{
- unsigned int start;
- uint64_t v;
-
- do {
- start = u64_stats_fetch_begin_irq(&stat->syncp);
- v = stat->cnt;
- } while (u64_stats_fetch_retry_irq(&stat->syncp, start));
-
- return v;
+ return percpu_counter_sum_positive(&stat->cpu_cnt);
}
/**
@@ -479,24 +545,46 @@ static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
*/
static inline void blkg_stat_reset(struct blkg_stat *stat)
{
- stat->cnt = 0;
+ percpu_counter_set(&stat->cpu_cnt, 0);
+ atomic64_set(&stat->aux_cnt, 0);
}
/**
- * blkg_stat_merge - merge a blkg_stat into another
+ * blkg_stat_add_aux - add a blkg_stat into another's aux count
* @to: the destination blkg_stat
* @from: the source
*
- * Add @from's count to @to.
+ * Add @from's count including the aux one to @to's aux count.
*/
-static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
+static inline void blkg_stat_add_aux(struct blkg_stat *to,
+ struct blkg_stat *from)
{
- blkg_stat_add(to, blkg_stat_read(from));
+ atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
+ &to->aux_cnt);
}
-static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
+static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
{
- u64_stats_init(&rwstat->syncp);
+ int i, ret;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++) {
+ ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
+ if (ret) {
+ while (--i >= 0)
+ percpu_counter_destroy(&rwstat->cpu_cnt[i]);
+ return ret;
+ }
+ atomic64_set(&rwstat->aux_cnt[i], 0);
+ }
+ return 0;
+}
+
+static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
+{
+ int i;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ percpu_counter_destroy(&rwstat->cpu_cnt[i]);
}
/**
@@ -511,39 +599,38 @@ static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
int rw, uint64_t val)
{
- u64_stats_update_begin(&rwstat->syncp);
+ struct percpu_counter *cnt;
if (rw & REQ_WRITE)
- rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
+ cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
else
- rwstat->cnt[BLKG_RWSTAT_READ] += val;
+ cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
+
+ __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
+
if (rw & REQ_SYNC)
- rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
+ cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
else
- rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
+ cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
- u64_stats_update_end(&rwstat->syncp);
+ __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
}
/**
* blkg_rwstat_read - read the current values of a blkg_rwstat
* @rwstat: blkg_rwstat to read
*
- * Read the current snapshot of @rwstat and return it as the return value.
- * This function can be called without synchronization and takes care of
- * u64 atomicity.
+ * Read the current snapshot of @rwstat and return it in the aux counts.
*/
static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
{
- unsigned int start;
- struct blkg_rwstat tmp;
-
- do {
- start = u64_stats_fetch_begin_irq(&rwstat->syncp);
- tmp = *rwstat;
- } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
+ struct blkg_rwstat result;
+ int i;
- return tmp;
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ atomic64_set(&result.aux_cnt[i],
+ percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
+ return result;
}
/**
@@ -558,7 +645,8 @@ static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
{
struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
- return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
+ return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
+ atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
}
/**
@@ -567,26 +655,71 @@ static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
*/
static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
{
- memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
+ int i;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++) {
+ percpu_counter_set(&rwstat->cpu_cnt[i], 0);
+ atomic64_set(&rwstat->aux_cnt[i], 0);
+ }
}
/**
- * blkg_rwstat_merge - merge a blkg_rwstat into another
+ * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
* @to: the destination blkg_rwstat
* @from: the source
*
- * Add @from's counts to @to.
+ * Add @from's count including the aux one to @to's aux count.
*/
-static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
- struct blkg_rwstat *from)
+static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
+ struct blkg_rwstat *from)
{
struct blkg_rwstat v = blkg_rwstat_read(from);
int i;
- u64_stats_update_begin(&to->syncp);
for (i = 0; i < BLKG_RWSTAT_NR; i++)
- to->cnt[i] += v.cnt[i];
- u64_stats_update_end(&to->syncp);
+ atomic64_add(atomic64_read(&v.aux_cnt[i]) +
+ atomic64_read(&from->aux_cnt[i]),
+ &to->aux_cnt[i]);
+}
+
+#ifdef CONFIG_BLK_DEV_THROTTLING
+extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
+ struct bio *bio);
+#else
+static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
+ struct bio *bio) { return false; }
+#endif
+
+static inline bool blkcg_bio_issue_check(struct request_queue *q,
+ struct bio *bio)
+{
+ struct blkcg *blkcg;
+ struct blkcg_gq *blkg;
+ bool throtl = false;
+
+ rcu_read_lock();
+ blkcg = bio_blkcg(bio);
+
+ blkg = blkg_lookup(blkcg, q);
+ if (unlikely(!blkg)) {
+ spin_lock_irq(q->queue_lock);
+ blkg = blkg_lookup_create(blkcg, q);
+ if (IS_ERR(blkg))
+ blkg = NULL;
+ spin_unlock_irq(q->queue_lock);
+ }
+
+ throtl = blk_throtl_bio(q, blkg, bio);
+
+ if (!throtl) {
+ blkg = blkg ?: q->root_blkg;
+ blkg_rwstat_add(&blkg->stat_bytes, bio->bi_flags,
+ bio->bi_iter.bi_size);
+ blkg_rwstat_add(&blkg->stat_ios, bio->bi_flags, 1);
+ }
+
+ rcu_read_unlock();
+ return !throtl;
}
#else /* CONFIG_BLK_CGROUP */
@@ -642,6 +775,9 @@ static inline void blk_put_rl(struct request_list *rl) { }
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
+static inline bool blkcg_bio_issue_check(struct request_queue *q,
+ struct bio *bio) { return true; }
+
#define blk_queue_for_each_rl(rl, q) \
for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 4b7b4ebaa633..e8130138f29d 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -118,9 +118,8 @@ struct bio {
#define BIO_USER_MAPPED 4 /* contains user pages */
#define BIO_NULL_MAPPED 5 /* contains invalid user pages */
#define BIO_QUIET 6 /* Make BIO Quiet */
-#define BIO_SNAP_STABLE 7 /* bio data must be snapshotted during write */
-#define BIO_CHAIN 8 /* chained bio, ->bi_remaining in effect */
-#define BIO_REFFED 9 /* bio has elevated ->bi_cnt */
+#define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */
+#define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
/*
* Flags starting here get preserved by bio_reset() - this includes
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1aac7316a4b5..99da9ebc7377 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -584,7 +584,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
-#define rq_data_dir(rq) (((rq)->cmd_flags & 1) != 0)
+#define rq_data_dir(rq) ((int)((rq)->cmd_flags & 1))
/*
* Driver can handle struct request, if it either has an old style
@@ -1619,8 +1619,8 @@ struct block_device_operations {
int (*rw_page)(struct block_device *, sector_t, struct page *, int rw);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
- long (*direct_access)(struct block_device *, sector_t,
- void **, unsigned long *pfn, long size);
+ long (*direct_access)(struct block_device *, sector_t, void __pmem **,
+ unsigned long *pfn);
unsigned int (*check_events) (struct gendisk *disk,
unsigned int clearing);
/* ->media_changed() is DEPRECATED, use ->check_events() instead */
@@ -1638,8 +1638,8 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
extern int bdev_read_page(struct block_device *, sector_t, struct page *);
extern int bdev_write_page(struct block_device *, sector_t, struct page *,
struct writeback_control *);
-extern long bdev_direct_access(struct block_device *, sector_t, void **addr,
- unsigned long *pfn, long size);
+extern long bdev_direct_access(struct block_device *, sector_t,
+ void __pmem **addr, unsigned long *pfn, long size);
#else /* CONFIG_BLOCK */
struct block_device;
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 4383476a0d48..f57d7fed9ec3 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -10,6 +10,7 @@
#include <uapi/linux/bpf.h>
#include <linux/workqueue.h>
#include <linux/file.h>
+#include <linux/perf_event.h>
struct bpf_map;
@@ -24,6 +25,10 @@ struct bpf_map_ops {
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
int (*map_delete_elem)(struct bpf_map *map, void *key);
+
+ /* funcs called by prog_array and perf_event_array map */
+ void *(*map_fd_get_ptr) (struct bpf_map *map, int fd);
+ void (*map_fd_put_ptr) (void *ptr);
};
struct bpf_map {
@@ -142,13 +147,13 @@ struct bpf_array {
bool owner_jited;
union {
char value[0] __aligned(8);
- struct bpf_prog *prog[0] __aligned(8);
+ void *ptrs[0] __aligned(8);
};
};
#define MAX_TAIL_CALL_CNT 32
u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
-void bpf_prog_array_map_clear(struct bpf_map *map);
+void bpf_fd_array_map_clear(struct bpf_map *map);
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
@@ -185,6 +190,7 @@ extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
extern const struct bpf_func_proto bpf_map_delete_elem_proto;
+extern const struct bpf_func_proto bpf_perf_event_read_proto;
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
extern const struct bpf_func_proto bpf_tail_call_proto;
@@ -192,5 +198,7 @@ extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
+extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
+extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index 4763ad64e832..f89b31d45cc8 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -107,6 +107,7 @@ static inline u64 ceph_sanitize_features(u64 features)
CEPH_FEATURE_OSDMAP_ENC | \
CEPH_FEATURE_CRUSH_TUNABLES3 | \
CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \
+ CEPH_FEATURE_MSGR_KEEPALIVE2 | \
CEPH_FEATURE_CRUSH_V4)
#define CEPH_FEATURES_REQUIRED_DEFAULT \
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 9ebee53d3bf5..397c5cd09794 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -46,6 +46,7 @@ struct ceph_options {
unsigned long mount_timeout; /* jiffies */
unsigned long osd_idle_ttl; /* jiffies */
unsigned long osd_keepalive_timeout; /* jiffies */
+ unsigned long monc_ping_timeout; /* jiffies */
/*
* any type that can't be simply compared or doesn't need need
@@ -66,6 +67,7 @@ struct ceph_options {
#define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000)
#define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000)
#define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000)
+#define CEPH_MONC_PING_TIMEOUT_DEFAULT msecs_to_jiffies(30 * 1000)
#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024)
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 37753278987a..b2371d9b51fa 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -238,6 +238,8 @@ struct ceph_connection {
bool out_kvec_is_msg; /* kvec refers to out_msg */
int out_more; /* there is more data after the kvecs */
__le64 out_temp_ack; /* for writing an ack */
+ struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2
+ stamp */
/* message in temps */
struct ceph_msg_header in_hdr;
@@ -248,6 +250,8 @@ struct ceph_connection {
int in_base_pos; /* bytes read */
__le64 in_temp_ack; /* for reading an ack */
+ struct timespec last_keepalive_ack; /* keepalive2 ack stamp */
+
struct delayed_work work; /* send|recv work */
unsigned long delay; /* current delay interval */
};
@@ -285,6 +289,8 @@ extern void ceph_msg_revoke(struct ceph_msg *msg);
extern void ceph_msg_revoke_incoming(struct ceph_msg *msg);
extern void ceph_con_keepalive(struct ceph_connection *con);
+extern bool ceph_con_keepalive_expired(struct ceph_connection *con,
+ unsigned long interval);
extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
size_t length, size_t alignment);
diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h
index 1c1887206ffa..0fe2656ac415 100644
--- a/include/linux/ceph/msgr.h
+++ b/include/linux/ceph/msgr.h
@@ -84,10 +84,12 @@ struct ceph_entity_inst {
#define CEPH_MSGR_TAG_MSG 7 /* message */
#define CEPH_MSGR_TAG_ACK 8 /* message ack */
#define CEPH_MSGR_TAG_KEEPALIVE 9 /* just a keepalive byte! */
-#define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */
+#define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */
#define CEPH_MSGR_TAG_BADAUTHORIZER 11 /* bad authorizer */
#define CEPH_MSGR_TAG_FEATURES 12 /* insufficient features */
#define CEPH_MSGR_TAG_SEQ 13 /* 64-bit int follows with seen seq number */
+#define CEPH_MSGR_TAG_KEEPALIVE2 14 /* keepalive2 byte + ceph_timespec */
+#define CEPH_MSGR_TAG_KEEPALIVE2_ACK 15 /* keepalive2 reply */
/*
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index 1f36945fd23d..1a96fdaa33d5 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -27,7 +27,7 @@ SUBSYS(cpuacct)
#endif
#if IS_ENABLED(CONFIG_BLK_CGROUP)
-SUBSYS(blkio)
+SUBSYS(io)
#endif
#if IS_ENABLED(CONFIG_MEMCG)
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 31ce435981fe..bdcf358dfce2 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -18,15 +18,6 @@
struct clock_event_device;
struct module;
-/* Clock event mode commands for legacy ->set_mode(): OBSOLETE */
-enum clock_event_mode {
- CLOCK_EVT_MODE_UNUSED,
- CLOCK_EVT_MODE_SHUTDOWN,
- CLOCK_EVT_MODE_PERIODIC,
- CLOCK_EVT_MODE_ONESHOT,
- CLOCK_EVT_MODE_RESUME,
-};
-
/*
* Possible states of a clock event device.
*
@@ -86,16 +77,14 @@ enum clock_event_state {
* @min_delta_ns: minimum delta value in ns
* @mult: nanosecond to cycles multiplier
* @shift: nanoseconds to cycles divisor (power of two)
- * @mode: operating mode, relevant only to ->set_mode(), OBSOLETE
* @state_use_accessors:current state of the device, assigned by the core code
* @features: features
* @retries: number of forced programming retries
- * @set_mode: legacy set mode function, only for modes <= CLOCK_EVT_MODE_RESUME.
- * @set_state_periodic: switch state to periodic, if !set_mode
- * @set_state_oneshot: switch state to oneshot, if !set_mode
- * @set_state_oneshot_stopped: switch state to oneshot_stopped, if !set_mode
- * @set_state_shutdown: switch state to shutdown, if !set_mode
- * @tick_resume: resume clkevt device, if !set_mode
+ * @set_state_periodic: switch state to periodic
+ * @set_state_oneshot: switch state to oneshot
+ * @set_state_oneshot_stopped: switch state to oneshot_stopped
+ * @set_state_shutdown: switch state to shutdown
+ * @tick_resume: resume clkevt device
* @broadcast: function to broadcast events
* @min_delta_ticks: minimum delta value in ticks stored for reconfiguration
* @max_delta_ticks: maximum delta value in ticks stored for reconfiguration
@@ -116,18 +105,10 @@ struct clock_event_device {
u64 min_delta_ns;
u32 mult;
u32 shift;
- enum clock_event_mode mode;
enum clock_event_state state_use_accessors;
unsigned int features;
unsigned long retries;
- /*
- * State transition callback(s): Only one of the two groups should be
- * defined:
- * - set_mode(), only for modes <= CLOCK_EVT_MODE_RESUME.
- * - set_state_{shutdown|periodic|oneshot|oneshot_stopped}(), tick_resume().
- */
- void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *);
int (*set_state_periodic)(struct clock_event_device *);
int (*set_state_oneshot)(struct clock_event_device *);
int (*set_state_oneshot_stopped)(struct clock_event_device *);
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index e08a6ae7c0a4..c836eb2dc44d 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -252,7 +252,12 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
#define WRITE_ONCE(x, val) \
- ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+({ \
+ union { typeof(x) __val; char __c[1]; } __u = \
+ { .__val = (__force typeof(x)) (val) }; \
+ __write_once_size(&(x), __u.__c, sizeof(x)); \
+ __u.__val; \
+})
/**
* READ_ONCE_CTRL - Read a value heading a control dependency
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 8b6c083e68a7..8d70e1361ecd 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -137,6 +137,7 @@ struct cred {
kernel_cap_t cap_permitted; /* caps we're permitted */
kernel_cap_t cap_effective; /* caps we can actually use */
kernel_cap_t cap_bset; /* capability bounding set */
+ kernel_cap_t cap_ambient; /* Ambient capability set */
#ifdef CONFIG_KEYS
unsigned char jit_keyring; /* default keyring to attach requested
* keys to */
@@ -212,6 +213,13 @@ static inline void validate_process_creds(void)
}
#endif
+static inline bool cap_ambient_invariant_ok(const struct cred *cred)
+{
+ return cap_issubset(cred->cap_ambient,
+ cap_intersect(cred->cap_permitted,
+ cred->cap_inheritable));
+}
+
/**
* get_new_cred - Get a reference on a new set of credentials
* @cred: The new credentials to reference
diff --git a/include/linux/dax.h b/include/linux/dax.h
new file mode 100644
index 000000000000..b415e521528d
--- /dev/null
+++ b/include/linux/dax.h
@@ -0,0 +1,39 @@
+#ifndef _LINUX_DAX_H
+#define _LINUX_DAX_H
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <asm/pgtable.h>
+
+ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
+ get_block_t, dio_iodone_t, int flags);
+int dax_clear_blocks(struct inode *, sector_t block, long size);
+int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
+int dax_truncate_page(struct inode *, loff_t from, get_block_t);
+int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
+ dax_iodone_t);
+int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
+ dax_iodone_t);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
+ unsigned int flags, get_block_t, dax_iodone_t);
+int __dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
+ unsigned int flags, get_block_t, dax_iodone_t);
+#else
+static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmd, unsigned int flags, get_block_t gb,
+ dax_iodone_t di)
+{
+ return VM_FAULT_FALLBACK;
+}
+#define __dax_pmd_fault dax_pmd_fault
+#endif
+int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
+#define dax_mkwrite(vma, vmf, gb, iod) dax_fault(vma, vmf, gb, iod)
+#define __dax_mkwrite(vma, vmf, gb, iod) __dax_fault(vma, vmf, gb, iod)
+
+static inline bool vma_is_dax(struct vm_area_struct *vma)
+{
+ return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
+}
+#endif
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 420311bcee38..9beb636b97eb 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -116,6 +116,12 @@ struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name,
bool debugfs_initialized(void);
+ssize_t debugfs_read_file_bool(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+
+ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos);
+
#else
#include <linux/err.h>
@@ -282,6 +288,20 @@ static inline struct dentry *debugfs_create_devm_seqfile(struct device *dev,
return ERR_PTR(-ENODEV);
}
+static inline ssize_t debugfs_read_file_bool(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return -ENODEV;
+}
+
+static inline ssize_t debugfs_write_file_bool(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return -ENODEV;
+}
+
#endif
#endif
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index e2f5eb419976..7ea9184eaa13 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -66,6 +66,7 @@ enum dma_transaction_type {
DMA_XOR_VAL,
DMA_PQ_VAL,
DMA_MEMSET,
+ DMA_MEMSET_SG,
DMA_INTERRUPT,
DMA_SG,
DMA_PRIVATE,
@@ -183,6 +184,8 @@ struct dma_interleaved_template {
* operation it continues the calculation with new sources
* @DMA_PREP_FENCE - tell the driver that subsequent operations depend
* on the result of this operation
+ * @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till
+ * cleared or freed
*/
enum dma_ctrl_flags {
DMA_PREP_INTERRUPT = (1 << 0),
@@ -191,6 +194,7 @@ enum dma_ctrl_flags {
DMA_PREP_PQ_DISABLE_Q = (1 << 3),
DMA_PREP_CONTINUE = (1 << 4),
DMA_PREP_FENCE = (1 << 5),
+ DMA_CTRL_REUSE = (1 << 6),
};
/**
@@ -400,6 +404,8 @@ enum dma_residue_granularity {
* @cmd_pause: true, if pause and thereby resume is supported
* @cmd_terminate: true, if terminate cmd is supported
* @residue_granularity: granularity of the reported transfer residue
+ * @descriptor_reuse: if a descriptor can be reused by client and
+ * resubmitted multiple times
*/
struct dma_slave_caps {
u32 src_addr_widths;
@@ -408,6 +414,7 @@ struct dma_slave_caps {
bool cmd_pause;
bool cmd_terminate;
enum dma_residue_granularity residue_granularity;
+ bool descriptor_reuse;
};
static inline const char *dma_chan_name(struct dma_chan *chan)
@@ -467,6 +474,7 @@ struct dma_async_tx_descriptor {
dma_addr_t phys;
struct dma_chan *chan;
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
+ int (*desc_free)(struct dma_async_tx_descriptor *tx);
dma_async_tx_callback callback;
void *callback_param;
struct dmaengine_unmap_data *unmap;
@@ -585,6 +593,20 @@ struct dma_tx_state {
};
/**
+ * enum dmaengine_alignment - defines alignment of the DMA async tx
+ * buffers
+ */
+enum dmaengine_alignment {
+ DMAENGINE_ALIGN_1_BYTE = 0,
+ DMAENGINE_ALIGN_2_BYTES = 1,
+ DMAENGINE_ALIGN_4_BYTES = 2,
+ DMAENGINE_ALIGN_8_BYTES = 3,
+ DMAENGINE_ALIGN_16_BYTES = 4,
+ DMAENGINE_ALIGN_32_BYTES = 5,
+ DMAENGINE_ALIGN_64_BYTES = 6,
+};
+
+/**
* struct dma_device - info on the entity supplying DMA services
* @chancnt: how many DMA channels are supported
* @privatecnt: how many DMA channels are requested by dma_request_channel
@@ -616,6 +638,7 @@ struct dma_tx_state {
* @device_prep_dma_pq: prepares a pq operation
* @device_prep_dma_pq_val: prepares a pqzero_sum operation
* @device_prep_dma_memset: prepares a memset operation
+ * @device_prep_dma_memset_sg: prepares a memset operation over a scatter list
* @device_prep_dma_interrupt: prepares an end of chain interrupt operation
* @device_prep_slave_sg: prepares a slave dma operation
* @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
@@ -645,10 +668,10 @@ struct dma_device {
dma_cap_mask_t cap_mask;
unsigned short max_xor;
unsigned short max_pq;
- u8 copy_align;
- u8 xor_align;
- u8 pq_align;
- u8 fill_align;
+ enum dmaengine_alignment copy_align;
+ enum dmaengine_alignment xor_align;
+ enum dmaengine_alignment pq_align;
+ enum dmaengine_alignment fill_align;
#define DMA_HAS_PQ_CONTINUE (1 << 15)
int dev_id;
@@ -682,6 +705,9 @@ struct dma_device {
struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(
+ struct dma_chan *chan, struct scatterlist *sg,
+ unsigned int nents, int value, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
struct dma_chan *chan, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
@@ -833,7 +859,8 @@ static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc
return desc->tx_submit(desc);
}
-static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
+static inline bool dmaengine_check_align(enum dmaengine_alignment align,
+ size_t off1, size_t off2, size_t len)
{
size_t mask;
@@ -1155,6 +1182,39 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
}
#endif
+static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
+{
+ struct dma_slave_caps caps;
+
+ dma_get_slave_caps(tx->chan, &caps);
+
+ if (caps.descriptor_reuse) {
+ tx->flags |= DMA_CTRL_REUSE;
+ return 0;
+ } else {
+ return -EPERM;
+ }
+}
+
+static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
+{
+ tx->flags &= ~DMA_CTRL_REUSE;
+}
+
+static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
+{
+ return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE;
+}
+
+static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
+{
+ /* this is supported for reusable desc, so check that */
+ if (dmaengine_desc_test_reuse(desc))
+ return desc->desc_free(desc);
+ else
+ return -EPERM;
+}
+
/* --- DMA device --- */
int dma_async_device_register(struct dma_device *device);
@@ -1169,7 +1229,7 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
static inline struct dma_chan
*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
dma_filter_fn fn, void *fn_param,
- struct device *dev, char *name)
+ struct device *dev, const char *name)
{
struct dma_chan *chan;
@@ -1177,6 +1237,9 @@ static inline struct dma_chan
if (chan)
return chan;
+ if (!fn || !fn_param)
+ return NULL;
+
return __dma_request_channel(mask, fn, fn_param);
}
#endif /* DMAENGINE_H */
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index e1043f79122f..53ba737505df 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -24,6 +24,12 @@ void dma_pool_destroy(struct dma_pool *pool);
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle);
+static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
+ dma_addr_t *handle)
+{
+ return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle);
+}
+
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
/*
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 9012f8775208..eb049c622208 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -76,7 +76,7 @@ static inline bool is_link_local_ether_addr(const u8 *addr)
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
return (((*(const u32 *)addr) ^ (*(const u32 *)b)) |
- ((a[2] ^ b[2]) & m)) == 0;
+ (__force int)((a[2] ^ b[2]) & m)) == 0;
#else
return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
#endif
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 920408a21ffd..25c6324a0dd0 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -417,15 +417,25 @@ typedef __le32 f2fs_hash_t;
#define GET_DENTRY_SLOTS(x) ((x + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS)
-/* the number of dentry in a block */
-#define NR_DENTRY_IN_BLOCK 214
-
/* MAX level for dir lookup */
#define MAX_DIR_HASH_DEPTH 63
/* MAX buckets in one level of dir */
#define MAX_DIR_BUCKETS (1 << ((MAX_DIR_HASH_DEPTH / 2) - 1))
+/*
+ * space utilization of regular dentry and inline dentry
+ * regular dentry inline dentry
+ * bitmap 1 * 27 = 27 1 * 23 = 23
+ * reserved 1 * 3 = 3 1 * 7 = 7
+ * dentry 11 * 214 = 2354 11 * 182 = 2002
+ * filename 8 * 214 = 1712 8 * 182 = 1456
+ * total 4096 3488
+ *
+ * Note: there are more reserved space in inline dentry than in regular
+ * dentry, when converting inline dentry we should handle this carefully.
+ */
+#define NR_DENTRY_IN_BLOCK 214 /* the number of dentry in a block */
#define SIZE_OF_DIR_ENTRY 11 /* by byte */
#define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \
BITS_PER_BYTE)
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 043f3283b71c..bc9afa74ee11 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -788,7 +788,7 @@ struct dmt_videomode {
extern const char *fb_mode_option;
extern const struct fb_videomode vesa_modes[];
-extern const struct fb_videomode cea_modes[64];
+extern const struct fb_videomode cea_modes[65];
extern const struct dmt_videomode dmt_modes[];
struct fb_modelist {
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 17724f6ea983..fa2cab985e57 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <linux/printk.h>
#include <linux/workqueue.h>
+#include <linux/sched.h>
#include <asm/cacheflush.h>
@@ -354,6 +355,16 @@ static inline unsigned int bpf_prog_size(unsigned int proglen)
offsetof(struct bpf_prog, insns[proglen]));
}
+static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
+{
+ /* When classic BPF programs have been loaded and the arch
+ * does not have a classic BPF JIT (anymore), they have been
+ * converted via bpf_migrate_filter() to eBPF and thus always
+ * have an unspec program type.
+ */
+ return prog->type == BPF_PROG_TYPE_UNSPEC;
+}
+
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
#ifdef CONFIG_DEBUG_SET_MODULE_RONX
@@ -411,6 +422,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
void bpf_int_jit_compile(struct bpf_prog *fp);
+bool bpf_helper_changes_skb_data(void *func);
#ifdef CONFIG_BPF_JIT
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
@@ -427,8 +439,9 @@ void bpf_jit_free(struct bpf_prog *fp);
static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
u32 pass, void *image)
{
- pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
- flen, proglen, pass, image);
+ pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
+ proglen, pass, image, current->comm, task_pid_nr(current));
+
if (image)
print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
16, 1, image, proglen, false);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index fbd780c33c5f..72d8a844c692 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1,7 +1,6 @@
#ifndef _LINUX_FS_H
#define _LINUX_FS_H
-
#include <linux/linkage.h>
#include <linux/wait.h>
#include <linux/kdev_t.h>
@@ -30,6 +29,8 @@
#include <linux/lockdep.h>
#include <linux/percpu-rwsem.h>
#include <linux/blk_types.h>
+#include <linux/workqueue.h>
+#include <linux/percpu-rwsem.h>
#include <asm/byteorder.h>
#include <uapi/linux/fs.h>
@@ -51,7 +52,6 @@ struct swap_info_struct;
struct seq_file;
struct workqueue_struct;
struct iov_iter;
-struct vm_fault;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
@@ -636,7 +636,7 @@ struct inode {
unsigned long dirtied_time_when;
struct hlist_node i_hash;
- struct list_head i_wb_list; /* backing dev IO list */
+ struct list_head i_io_list; /* backing dev IO list */
#ifdef CONFIG_CGROUP_WRITEBACK
struct bdi_writeback *i_wb; /* the associated cgroup wb */
@@ -943,12 +943,18 @@ struct lock_manager_operations {
struct lock_manager {
struct list_head list;
+ /*
+ * NFSv4 and up also want opens blocked during the grace period;
+ * NLM doesn't care:
+ */
+ bool block_opens;
};
struct net;
void locks_start_grace(struct net *, struct lock_manager *);
void locks_end_grace(struct lock_manager *);
int locks_in_grace(struct net *);
+int opens_in_grace(struct net *);
/* that will die - we need it for nfs_lock_info */
#include <linux/nfs_fs_i.h>
@@ -1275,16 +1281,9 @@ enum {
#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1)
struct sb_writers {
- /* Counters for counting writers at each level */
- struct percpu_counter counter[SB_FREEZE_LEVELS];
- wait_queue_head_t wait; /* queue for waiting for
- writers / faults to finish */
- int frozen; /* Is sb frozen? */
- wait_queue_head_t wait_unfrozen; /* queue for waiting for
- sb to be thawed */
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map lock_map[SB_FREEZE_LEVELS];
-#endif
+ int frozen; /* Is sb frozen? */
+ wait_queue_head_t wait_unfrozen; /* for get_super_thawed() */
+ struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
};
struct super_block {
@@ -1310,7 +1309,6 @@ struct super_block {
#endif
const struct xattr_handler **s_xattr;
- struct list_head s_inodes; /* all inodes */
struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
struct block_device *s_bdev;
@@ -1376,11 +1374,18 @@ struct super_block {
struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
struct rcu_head rcu;
+ struct work_struct destroy_work;
+
+ struct mutex s_sync_lock; /* sync serialisation lock */
/*
* Indicates how deep in a filesystem stack this SB is
*/
int s_stack_depth;
+
+ /* s_inode_list_lock protects s_inodes */
+ spinlock_t s_inode_list_lock ____cacheline_aligned_in_smp;
+ struct list_head s_inodes; /* all inodes */
};
extern struct timespec current_fs_time(struct super_block *sb);
@@ -1392,6 +1397,11 @@ extern struct timespec current_fs_time(struct super_block *sb);
void __sb_end_write(struct super_block *sb, int level);
int __sb_start_write(struct super_block *sb, int level, bool wait);
+#define __sb_writers_acquired(sb, lev) \
+ percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
+#define __sb_writers_release(sb, lev) \
+ percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
+
/**
* sb_end_write - drop write access to a superblock
* @sb: the super we wrote to
@@ -1612,7 +1622,6 @@ struct file_operations {
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
- int (*mremap)(struct file *, struct vm_area_struct *);
int (*open) (struct inode *, struct file *);
int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *);
@@ -2609,7 +2618,7 @@ static inline void insert_inode_hash(struct inode *inode)
extern void __remove_inode_hash(struct inode *);
static inline void remove_inode_hash(struct inode *inode)
{
- if (!inode_unhashed(inode))
+ if (!inode_unhashed(inode) && !hlist_fake(&inode->i_hash))
__remove_inode_hash(inode);
}
@@ -2668,19 +2677,6 @@ extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
extern int generic_file_open(struct inode * inode, struct file * filp);
extern int nonseekable_open(struct inode * inode, struct file * filp);
-ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
- get_block_t, dio_iodone_t, int flags);
-int dax_clear_blocks(struct inode *, sector_t block, long size);
-int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
-int dax_truncate_page(struct inode *, loff_t from, get_block_t);
-int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
- dax_iodone_t);
-int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
- dax_iodone_t);
-int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
-#define dax_mkwrite(vma, vmf, gb, iod) dax_fault(vma, vmf, gb, iod)
-#define __dax_mkwrite(vma, vmf, gb, iod) __dax_fault(vma, vmf, gb, iod)
-
#ifdef CONFIG_BLOCK
typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
loff_t file_offset);
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index bf0321eabbda..0023088b253b 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -841,9 +841,59 @@ struct fsl_ifc_ctrl {
u32 nand_stat;
wait_queue_head_t nand_wait;
+ bool little_endian;
};
extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
+static inline u32 ifc_in32(void __iomem *addr)
+{
+ u32 val;
+
+ if (fsl_ifc_ctrl_dev->little_endian)
+ val = ioread32(addr);
+ else
+ val = ioread32be(addr);
+
+ return val;
+}
+
+static inline u16 ifc_in16(void __iomem *addr)
+{
+ u16 val;
+
+ if (fsl_ifc_ctrl_dev->little_endian)
+ val = ioread16(addr);
+ else
+ val = ioread16be(addr);
+
+ return val;
+}
+
+static inline u8 ifc_in8(void __iomem *addr)
+{
+ return ioread8(addr);
+}
+
+static inline void ifc_out32(u32 val, void __iomem *addr)
+{
+ if (fsl_ifc_ctrl_dev->little_endian)
+ iowrite32(val, addr);
+ else
+ iowrite32be(val, addr);
+}
+
+static inline void ifc_out16(u16 val, void __iomem *addr)
+{
+ if (fsl_ifc_ctrl_dev->little_endian)
+ iowrite16(val, addr);
+ else
+ iowrite16be(val, addr);
+}
+
+static inline void ifc_out8(u8 val, void __iomem *addr)
+{
+ iowrite8(val, addr);
+}
#endif /* __ASM_FSL_IFC_H */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 65a517dd32f7..533c4408529a 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -195,40 +195,49 @@ struct fsnotify_group {
#define FSNOTIFY_EVENT_INODE 2
/*
- * a mark is simply an object attached to an in core inode which allows an
+ * A mark is simply an object attached to an in core inode which allows an
* fsnotify listener to indicate they are either no longer interested in events
* of a type matching mask or only interested in those events.
*
- * these are flushed when an inode is evicted from core and may be flushed
- * when the inode is modified (as seen by fsnotify_access). Some fsnotify users
- * (such as dnotify) will flush these when the open fd is closed and not at
- * inode eviction or modification.
+ * These are flushed when an inode is evicted from core and may be flushed
+ * when the inode is modified (as seen by fsnotify_access). Some fsnotify
+ * users (such as dnotify) will flush these when the open fd is closed and not
+ * at inode eviction or modification.
+ *
+ * Text in brackets is showing the lock(s) protecting modifications of a
+ * particular entry. obj_lock means either inode->i_lock or
+ * mnt->mnt_root->d_lock depending on the mark type.
*/
struct fsnotify_mark {
- __u32 mask; /* mask this mark is for */
- /* we hold ref for each i_list and g_list. also one ref for each 'thing'
+ /* Mask this mark is for [mark->lock, group->mark_mutex] */
+ __u32 mask;
+ /* We hold one for presence in g_list. Also one ref for each 'thing'
* in kernel that found and may be using this mark. */
- atomic_t refcnt; /* active things looking at this mark */
- struct fsnotify_group *group; /* group this mark is for */
- struct list_head g_list; /* list of marks by group->i_fsnotify_marks
- * Also reused for queueing mark into
- * destroy_list when it's waiting for
- * the end of SRCU period before it can
- * be freed */
- spinlock_t lock; /* protect group and inode */
- struct hlist_node obj_list; /* list of marks for inode / vfsmount */
- struct list_head free_list; /* tmp list used when freeing this mark */
- union {
+ atomic_t refcnt;
+ /* Group this mark is for. Set on mark creation, stable until last ref
+ * is dropped */
+ struct fsnotify_group *group;
+ /* List of marks by group->i_fsnotify_marks. Also reused for queueing
+ * mark into destroy_list when it's waiting for the end of SRCU period
+ * before it can be freed. [group->mark_mutex] */
+ struct list_head g_list;
+ /* Protects inode / mnt pointers, flags, masks */
+ spinlock_t lock;
+ /* List of marks for inode / vfsmount [obj_lock] */
+ struct hlist_node obj_list;
+ union { /* Object pointer [mark->lock, group->mark_mutex] */
struct inode *inode; /* inode this mark is associated with */
struct vfsmount *mnt; /* vfsmount this mark is associated with */
};
- __u32 ignored_mask; /* events types to ignore */
+ /* Events types to ignore [mark->lock, group->mark_mutex] */
+ __u32 ignored_mask;
#define FSNOTIFY_MARK_FLAG_INODE 0x01
#define FSNOTIFY_MARK_FLAG_VFSMOUNT 0x02
#define FSNOTIFY_MARK_FLAG_OBJECT_PINNED 0x04
#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08
#define FSNOTIFY_MARK_FLAG_ALIVE 0x10
- unsigned int flags; /* vfsmount or inode mark? */
+#define FSNOTIFY_MARK_FLAG_ATTACHED 0x20
+ unsigned int flags; /* flags [mark->lock] */
void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */
};
@@ -345,8 +354,10 @@ extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark, struct fsnotify_
/* given a group and a mark, flag mark to be freed when all references are dropped */
extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group);
-extern void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
- struct fsnotify_group *group);
+/* detach mark from inode / mount list, group list, drop inode reference */
+extern void fsnotify_detach_mark(struct fsnotify_mark *mark);
+/* free mark */
+extern void fsnotify_free_mark(struct fsnotify_mark *mark);
/* run all the marks in a group, and clear all of the vfsmount marks */
extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group);
/* run all the marks in a group, and clear all of the inode marks */
@@ -357,7 +368,7 @@ extern void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, un
extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group);
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
-extern void fsnotify_unmount_inodes(struct list_head *list);
+extern void fsnotify_unmount_inodes(struct super_block *sb);
/* put here because inotify does some weird stuff when destroying watches */
extern void fsnotify_init_event(struct fsnotify_event *event,
@@ -393,7 +404,7 @@ static inline u32 fsnotify_get_cookie(void)
return 0;
}
-static inline void fsnotify_unmount_inodes(struct list_head *list)
+static inline void fsnotify_unmount_inodes(struct super_block *sb)
{}
#endif /* CONFIG_FSNOTIFY */
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 5383bb1394a1..7ff168d06967 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -59,6 +59,8 @@ struct gen_pool {
genpool_algo_t algo; /* allocation function */
void *data;
+
+ const char *name;
};
/*
@@ -118,8 +120,8 @@ extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data);
extern struct gen_pool *devm_gen_pool_create(struct device *dev,
- int min_alloc_order, int nid);
-extern struct gen_pool *gen_pool_get(struct device *dev);
+ int min_alloc_order, int nid, const char *name);
+extern struct gen_pool *gen_pool_get(struct device *dev, const char *name);
bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
size_t size);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index ad35f300b9a4..f92cbd2f4450 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -63,7 +63,10 @@ struct vm_area_struct;
* but it is definitely preferable to use the flag rather than opencode endless
* loop around allocator.
*
- * __GFP_NORETRY: The VM implementation must not retry indefinitely.
+ * __GFP_NORETRY: The VM implementation must not retry indefinitely and will
+ * return NULL when direct reclaim and memory compaction have failed to allow
+ * the allocation to succeed. The OOM killer is not called with the current
+ * implementation.
*
* __GFP_MOVABLE: Flag that this page will be movable by the page migration
* mechanism or reclaimed
@@ -300,22 +303,31 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order,
return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
}
-static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
- unsigned int order)
+/*
+ * Allocate pages, preferring the node given as nid. The node must be valid and
+ * online. For more general interface, see alloc_pages_node().
+ */
+static inline struct page *
+__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
{
- /* Unknown node is current node */
- if (nid < 0)
- nid = numa_node_id();
+ VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
+ VM_WARN_ON(!node_online(nid));
return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
}
-static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask,
+/*
+ * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE,
+ * prefer the current CPU's closest node. Otherwise node must be valid and
+ * online.
+ */
+static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
unsigned int order)
{
- VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES || !node_online(nid));
+ if (nid == NUMA_NO_NODE)
+ nid = numa_mem_id();
- return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
+ return __alloc_pages_node(nid, gfp_mask, order);
}
#ifdef CONFIG_NUMA
@@ -354,7 +366,6 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask);
void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
void free_pages_exact(void *virt, size_t size);
-/* This is different from alloc_pages_exact_node !!! */
void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
#define __get_free_page(gfp_mask) \
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index adac255aee86..14cac67c2012 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -47,17 +47,17 @@ enum gpiod_flags {
int gpiod_count(struct device *dev, const char *con_id);
/* Acquire and dispose GPIOs */
-struct gpio_desc *__must_check __gpiod_get(struct device *dev,
+struct gpio_desc *__must_check gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
-struct gpio_desc *__must_check __gpiod_get_index(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags);
-struct gpio_desc *__must_check __gpiod_get_optional(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
-struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev,
const char *con_id,
unsigned int index,
enum gpiod_flags flags);
@@ -70,18 +70,18 @@ struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev,
void gpiod_put(struct gpio_desc *desc);
void gpiod_put_array(struct gpio_descs *descs);
-struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
-struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags);
-struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
struct gpio_desc *__must_check
-__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
+devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags);
struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev,
const char *con_id,
@@ -146,31 +146,31 @@ static inline int gpiod_count(struct device *dev, const char *con_id)
return 0;
}
-static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev,
- const char *con_id,
- enum gpiod_flags flags)
+static inline struct gpio_desc *__must_check gpiod_get(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_desc *__must_check
-__gpiod_get_index(struct device *dev,
- const char *con_id,
- unsigned int idx,
- enum gpiod_flags flags)
+gpiod_get_index(struct device *dev,
+ const char *con_id,
+ unsigned int idx,
+ enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_desc *__must_check
-__gpiod_get_optional(struct device *dev, const char *con_id,
- enum gpiod_flags flags)
+gpiod_get_optional(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_desc *__must_check
-__gpiod_get_index_optional(struct device *dev, const char *con_id,
- unsigned int index, enum gpiod_flags flags)
+gpiod_get_index_optional(struct device *dev, const char *con_id,
+ unsigned int index, enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
@@ -206,7 +206,7 @@ static inline void gpiod_put_array(struct gpio_descs *descs)
}
static inline struct gpio_desc *__must_check
-__devm_gpiod_get(struct device *dev,
+devm_gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags)
{
@@ -214,7 +214,7 @@ __devm_gpiod_get(struct device *dev,
}
static inline
struct gpio_desc *__must_check
-__devm_gpiod_get_index(struct device *dev,
+devm_gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags)
@@ -223,14 +223,14 @@ __devm_gpiod_get_index(struct device *dev,
}
static inline struct gpio_desc *__must_check
-__devm_gpiod_get_optional(struct device *dev, const char *con_id,
+devm_gpiod_get_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_desc *__must_check
-__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
+devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
@@ -424,42 +424,6 @@ static inline struct gpio_desc *devm_get_gpiod_from_child(
#endif /* CONFIG_GPIOLIB */
-/*
- * Vararg-hacks! This is done to transition the kernel to always pass
- * the options flags argument to the below functions. During a transition
- * phase these vararg macros make both old-and-newstyle code compile,
- * but when all calls to the elder API are removed, these should go away
- * and the __gpiod_get() etc functions above be renamed just gpiod_get()
- * etc.
- */
-#define __gpiod_get(dev, con_id, flags, ...) __gpiod_get(dev, con_id, flags)
-#define gpiod_get(varargs...) __gpiod_get(varargs, GPIOD_ASIS)
-#define __gpiod_get_index(dev, con_id, index, flags, ...) \
- __gpiod_get_index(dev, con_id, index, flags)
-#define gpiod_get_index(varargs...) __gpiod_get_index(varargs, GPIOD_ASIS)
-#define __gpiod_get_optional(dev, con_id, flags, ...) \
- __gpiod_get_optional(dev, con_id, flags)
-#define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, GPIOD_ASIS)
-#define __gpiod_get_index_optional(dev, con_id, index, flags, ...) \
- __gpiod_get_index_optional(dev, con_id, index, flags)
-#define gpiod_get_index_optional(varargs...) \
- __gpiod_get_index_optional(varargs, GPIOD_ASIS)
-#define __devm_gpiod_get(dev, con_id, flags, ...) \
- __devm_gpiod_get(dev, con_id, flags)
-#define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, GPIOD_ASIS)
-#define __devm_gpiod_get_index(dev, con_id, index, flags, ...) \
- __devm_gpiod_get_index(dev, con_id, index, flags)
-#define devm_gpiod_get_index(varargs...) \
- __devm_gpiod_get_index(varargs, GPIOD_ASIS)
-#define __devm_gpiod_get_optional(dev, con_id, flags, ...) \
- __devm_gpiod_get_optional(dev, con_id, flags)
-#define devm_gpiod_get_optional(varargs...) \
- __devm_gpiod_get_optional(varargs, GPIOD_ASIS)
-#define __devm_gpiod_get_index_optional(dev, con_id, index, flags, ...) \
- __devm_gpiod_get_index_optional(dev, con_id, index, flags)
-#define devm_gpiod_get_index_optional(varargs...) \
- __devm_gpiod_get_index_optional(varargs, GPIOD_ASIS)
-
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index c8393cd4d44f..1aed31c5ffba 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -6,6 +6,7 @@
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
+#include <linux/lockdep.h>
#include <linux/pinctrl/pinctrl.h>
struct device;
@@ -64,6 +65,17 @@ struct seq_file;
* registers.
* @irq_not_threaded: flag must be set if @can_sleep is set but the
* IRQs don't need to be threaded
+ * @irqchip: GPIO IRQ chip impl, provided by GPIO driver
+ * @irqdomain: Interrupt translation domain; responsible for mapping
+ * between GPIO hwirq number and linux irq number
+ * @irq_base: first linux IRQ number assigned to GPIO IRQ chip (deprecated)
+ * @irq_handler: the irq handler to use (often a predefined irq core function)
+ * for GPIO IRQs, provided by GPIO driver
+ * @irq_default_type: default IRQ triggering type applied during GPIO driver
+ * initialization, provided by GPIO driver
+ * @irq_parent: GPIO IRQ chip parent/bank linux irq number,
+ * provided by GPIO driver
+ * @lock_key: per GPIO IRQ chip lockdep class
*
* A gpio_chip can help platforms abstract various sources of GPIOs so
* they can all be accessed through a common programing interface.
@@ -126,6 +138,7 @@ struct gpio_chip {
irq_flow_handler_t irq_handler;
unsigned int irq_default_type;
int irq_parent;
+ struct lock_class_key *lock_key;
#endif
#if defined(CONFIG_OF_GPIO)
@@ -171,11 +184,25 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
int parent_irq,
irq_flow_handler_t parent_handler);
-int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type);
+int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ unsigned int first_irq,
+ irq_flow_handler_t handler,
+ unsigned int type,
+ struct lock_class_key *lock_key);
+
+#ifdef CONFIG_LOCKDEP
+#define gpiochip_irqchip_add(...) \
+( \
+ ({ \
+ static struct lock_class_key _key; \
+ _gpiochip_irqchip_add(__VA_ARGS__, &_key); \
+ }) \
+)
+#else
+#define gpiochip_irqchip_add(...) \
+ _gpiochip_irqchip_add(__VA_ARGS__, NULL)
+#endif
#endif /* CONFIG_GPIOLIB_IRQCHIP */
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index e2706140eaff..c0d712d22b07 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -57,5 +57,6 @@ struct gpiod_lookup_table {
}
void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
+void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
#endif /* __LINUX_GPIO_MACHINE_H */
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index f10b20f05159..ecb080d6ff42 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -33,6 +33,8 @@ extern int move_huge_pmd(struct vm_area_struct *vma,
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot,
int prot_numa);
+int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
+ unsigned long pfn, bool write);
enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_FLAG,
@@ -122,7 +124,7 @@ extern void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
#endif
extern int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice);
-extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
+extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
long adjust_next);
@@ -138,15 +140,6 @@ static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
else
return 0;
}
-static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
- unsigned long start,
- unsigned long end,
- long adjust_next)
-{
- if (!vma->anon_vma || vma->vm_ops)
- return;
- __vma_adjust_trans_huge(vma, start, end, adjust_next);
-}
static inline int hpage_nr_pages(struct page *page)
{
if (unlikely(PageTransHuge(page)))
@@ -164,6 +157,13 @@ static inline bool is_huge_zero_page(struct page *page)
return ACCESS_ONCE(huge_zero_page) == page;
}
+static inline bool is_huge_zero_pmd(pmd_t pmd)
+{
+ return is_huge_zero_page(pmd_page(pmd));
+}
+
+struct page *get_huge_zero_page(void);
+
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d891f949466a..5e35379f58a5 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -35,6 +35,9 @@ struct resv_map {
struct kref refs;
spinlock_t lock;
struct list_head regions;
+ long adds_in_progress;
+ struct list_head region_cache;
+ long region_cache_count;
};
extern struct resv_map *resv_map_alloc(void);
void resv_map_release(struct kref *ref);
@@ -80,11 +83,18 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
int hugetlb_reserve_pages(struct inode *inode, long from, long to,
struct vm_area_struct *vma,
vm_flags_t vm_flags);
-void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
+long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
+ long freed);
int dequeue_hwpoisoned_huge_page(struct page *page);
bool isolate_huge_page(struct page *page, struct list_head *list);
void putback_active_hugepage(struct page *page);
void free_huge_page(struct page *page);
+void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve);
+extern struct mutex *hugetlb_fault_mutex_table;
+u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ struct address_space *mapping,
+ pgoff_t idx, unsigned long address);
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
@@ -320,9 +330,13 @@ struct huge_bootmem_page {
#endif
};
+struct page *alloc_huge_page(struct vm_area_struct *vma,
+ unsigned long addr, int avoid_reserve);
struct page *alloc_huge_page_node(struct hstate *h, int nid);
struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve);
+int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
+ pgoff_t idx);
/* arch callback */
int __init alloc_bootmem_huge_page(struct hstate *h);
@@ -471,6 +485,7 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
+#define alloc_huge_page(v, a, r) NULL
#define alloc_huge_page_node(h, nid) NULL
#define alloc_huge_page_noerr(v, a, r) NULL
#define alloc_bootmem_huge_page(h) NULL
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index e83a738a3b87..768063baafbf 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -121,6 +121,9 @@ extern s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client,
extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
u8 command, u8 length,
const u8 *values);
+extern s32
+i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
+ u8 command, u8 length, u8 *values);
#endif /* I2C */
/**
@@ -550,11 +553,12 @@ void i2c_lock_adapter(struct i2c_adapter *);
void i2c_unlock_adapter(struct i2c_adapter *);
/*flags for the client struct: */
-#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */
-#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
+#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */
+#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
/* Must equal I2C_M_TEN below */
-#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
-#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
+#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */
+#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
+#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
/* Must match I2C_M_STOP|IGNORE_NAK */
/* i2c adapter classes (bitmask) */
@@ -638,6 +642,8 @@ extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
/* must call put_device() when done with returned i2c_adapter device */
extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node);
+/* must call i2c_put_adapter() when done with returned i2c_adapter device */
+struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node);
#else
static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
@@ -649,6 +655,11 @@ static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node
{
return NULL;
}
+
+static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node)
+{
+ return NULL;
+}
#endif /* CONFIG_OF */
#endif /* _LINUX_I2C_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index b9c7897dc566..cfa906f28b7a 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -2074,8 +2074,8 @@ enum ieee80211_tdls_actioncode {
#define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6)
#define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED BIT(7)
+#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(5)
#define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6)
-#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(7)
/* TDLS specific payload type in the LLC/SNAP header */
#define WLAN_TDLS_SNAP_RFTYPE 0x2
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 193ad488d3e2..908429216d9f 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -37,6 +37,7 @@ static inline struct igmpv3_query *
return (struct igmpv3_query *)skb_transport_header(skb);
}
+extern int sysctl_igmp_llm_reports;
extern int sysctl_igmp_max_memberships;
extern int sysctl_igmp_max_msf;
extern int sysctl_igmp_qrv;
diff --git a/include/linux/input/touchscreen.h b/include/linux/input/touchscreen.h
index eecc9ea6cd58..c91e1376132b 100644
--- a/include/linux/input/touchscreen.h
+++ b/include/linux/input/touchscreen.h
@@ -9,15 +9,8 @@
#ifndef _TOUCHSCREEN_H
#define _TOUCHSCREEN_H
-#include <linux/input.h>
+struct input_dev;
-#ifdef CONFIG_OF
-void touchscreen_parse_of_params(struct input_dev *dev, bool multitouch);
-#else
-static inline void touchscreen_parse_of_params(struct input_dev *dev,
- bool multitouch)
-{
-}
-#endif
+void touchscreen_parse_properties(struct input_dev *dev, bool multitouch);
#endif
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index d9a366d24e3b..6240063bdcac 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -344,7 +344,7 @@ struct intel_iommu {
#ifdef CONFIG_INTEL_IOMMU
unsigned long *domain_ids; /* bitmap of domains */
- struct dmar_domain **domains; /* ptr to domains */
+ struct dmar_domain ***domains; /* ptr to domains */
spinlock_t lock; /* protect context, domain ids */
struct root_entry *root_entry; /* virtual address */
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index c27dde7215b5..e399029b68c5 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -21,7 +21,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/bug.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/page.h>
/*
diff --git a/include/linux/io.h b/include/linux/io.h
index fb5a99800e77..de64c1e53612 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -20,10 +20,13 @@
#include <linux/types.h>
#include <linux/init.h>
+#include <linux/bug.h>
+#include <linux/err.h>
#include <asm/io.h>
#include <asm/page.h>
struct device;
+struct resource;
__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
@@ -80,6 +83,27 @@ int check_signature(const volatile void __iomem *io_addr,
const unsigned char *signature, int length);
void devm_ioremap_release(struct device *dev, void *res);
+void *devm_memremap(struct device *dev, resource_size_t offset,
+ size_t size, unsigned long flags);
+void devm_memunmap(struct device *dev, void *addr);
+
+void *__devm_memremap_pages(struct device *dev, struct resource *res);
+
+#ifdef CONFIG_ZONE_DEVICE
+void *devm_memremap_pages(struct device *dev, struct resource *res);
+#else
+static inline void *devm_memremap_pages(struct device *dev, struct resource *res)
+{
+ /*
+ * Fail attempts to call devm_memremap_pages() without
+ * ZONE_DEVICE support enabled, this requires callers to fall
+ * back to plain devm_memremap() based on config
+ */
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-ENXIO);
+}
+#endif
+
/*
* Some systems do not have legacy ISA devices.
* /dev/port is not a valid interface on these systems.
@@ -121,4 +145,13 @@ static inline int arch_phys_wc_index(int handle)
#endif
#endif
+enum {
+ /* See memremap() kernel-doc for usage description... */
+ MEMREMAP_WB = 1 << 0,
+ MEMREMAP_WT = 1 << 1,
+};
+
+void *memremap(resource_size_t offset, size_t size, unsigned long flags);
+void memunmap(void *addr);
+
#endif /* _LINUX_IO_H */
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 0b1e569f5ff5..f8cea14485dd 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -115,6 +115,11 @@ struct ipmi_smi_handlers {
implement it. */
void (*set_need_watch)(void *send_info, bool enable);
+ /*
+ * Called when flushing all pending messages.
+ */
+ void (*flush_messages)(void *send_info);
+
/* Called when the interface should go into "run to
completion" mode. If this call sets the value to true, the
interface should make sure that all messages are flushed
@@ -207,7 +212,7 @@ static inline int ipmi_demangle_device_id(const unsigned char *data,
upper layer until the start_processing() function in the handlers
is called, and the lower layer must get the interface from that
call. */
-int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
+int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
void *send_info,
struct ipmi_device_id *device_id,
struct device *dev,
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 82806c60aa42..f1f32af6d9b9 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -29,7 +29,9 @@ struct ipv6_devconf {
__s32 max_desync_factor;
__s32 max_addresses;
__s32 accept_ra_defrtr;
+ __s32 accept_ra_min_hop_limit;
__s32 accept_ra_pinfo;
+ __s32 ignore_routes_with_linkdown;
#ifdef CONFIG_IPV6_ROUTER_PREF
__s32 accept_ra_rtr_pref;
__s32 rtr_probe_interval;
@@ -57,6 +59,7 @@ struct ipv6_devconf {
bool initialized;
struct in6_addr secret;
} stable_secret;
+ __s32 use_oif_addrs_only;
void *sysctl;
};
@@ -94,7 +97,6 @@ static inline struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb)
struct inet6_skb_parm {
int iif;
__be16 ra;
- __u16 hop;
__u16 dst0;
__u16 srcrt;
__u16 dst1;
@@ -111,6 +113,7 @@ struct inet6_skb_parm {
#define IP6SKB_REROUTED 4
#define IP6SKB_ROUTERALERT 8
#define IP6SKB_FRAGMENTED 16
+#define IP6SKB_HOPBYHOP 32
};
#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 6f8b34066442..11bf09288ddb 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -110,8 +110,8 @@ enum {
/*
* Return value for chip->irq_set_affinity()
*
- * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity
- * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity
+ * IRQ_SET_MASK_OK - OK, core updates irq_common_data.affinity
+ * IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity
* IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to
* support stacked irqchips, which indicates skipping
* all descendent irqchips.
@@ -129,9 +129,19 @@ struct irq_domain;
* struct irq_common_data - per irq data shared by all irqchips
* @state_use_accessors: status information for irq chip functions.
* Use accessor functions to deal with it
+ * @node: node index useful for balancing
+ * @handler_data: per-IRQ data for the irq_chip methods
+ * @affinity: IRQ affinity on SMP
+ * @msi_desc: MSI descriptor
*/
struct irq_common_data {
unsigned int state_use_accessors;
+#ifdef CONFIG_NUMA
+ unsigned int node;
+#endif
+ void *handler_data;
+ struct msi_desc *msi_desc;
+ cpumask_var_t affinity;
};
/**
@@ -139,38 +149,26 @@ struct irq_common_data {
* @mask: precomputed bitmask for accessing the chip registers
* @irq: interrupt number
* @hwirq: hardware interrupt number, local to the interrupt domain
- * @node: node index useful for balancing
* @common: point to data shared by all irqchips
* @chip: low level interrupt hardware access
* @domain: Interrupt translation domain; responsible for mapping
* between hwirq number and linux irq number.
* @parent_data: pointer to parent struct irq_data to support hierarchy
* irq_domain
- * @handler_data: per-IRQ data for the irq_chip methods
* @chip_data: platform-specific per-chip private data for the chip
* methods, to allow shared chip implementations
- * @msi_desc: MSI descriptor
- * @affinity: IRQ affinity on SMP
- *
- * The fields here need to overlay the ones in irq_desc until we
- * cleaned up the direct references and switched everything over to
- * irq_data.
*/
struct irq_data {
u32 mask;
unsigned int irq;
unsigned long hwirq;
- unsigned int node;
struct irq_common_data *common;
struct irq_chip *chip;
struct irq_domain *domain;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
struct irq_data *parent_data;
#endif
- void *handler_data;
void *chip_data;
- struct msi_desc *msi_desc;
- cpumask_var_t affinity;
};
/*
@@ -190,6 +188,7 @@ struct irq_data {
* IRQD_IRQ_MASKED - Masked state of the interrupt
* IRQD_IRQ_INPROGRESS - In progress state of the interrupt
* IRQD_WAKEUP_ARMED - Wakeup mode armed
+ * IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU
*/
enum {
IRQD_TRIGGER_MASK = 0xf,
@@ -204,6 +203,7 @@ enum {
IRQD_IRQ_MASKED = (1 << 17),
IRQD_IRQ_INPROGRESS = (1 << 18),
IRQD_WAKEUP_ARMED = (1 << 19),
+ IRQD_FORWARDED_TO_VCPU = (1 << 20),
};
#define __irqd_to_state(d) ((d)->common->state_use_accessors)
@@ -282,6 +282,20 @@ static inline bool irqd_is_wakeup_armed(struct irq_data *d)
return __irqd_to_state(d) & IRQD_WAKEUP_ARMED;
}
+static inline bool irqd_is_forwarded_to_vcpu(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_FORWARDED_TO_VCPU;
+}
+
+static inline void irqd_set_forwarded_to_vcpu(struct irq_data *d)
+{
+ __irqd_to_state(d) |= IRQD_FORWARDED_TO_VCPU;
+}
+
+static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
+{
+ __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
+}
/*
* Functions for chained handlers which can be enabled/disabled by the
@@ -461,14 +475,14 @@ static inline int irq_set_parent(int irq, int parent_irq)
* Built-in IRQ handlers for various IRQ types,
* callable via desc->handle_irq()
*/
-extern void handle_level_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_level_irq(struct irq_desc *desc);
+extern void handle_fasteoi_irq(struct irq_desc *desc);
+extern void handle_edge_irq(struct irq_desc *desc);
+extern void handle_edge_eoi_irq(struct irq_desc *desc);
+extern void handle_simple_irq(struct irq_desc *desc);
+extern void handle_percpu_irq(struct irq_desc *desc);
+extern void handle_percpu_devid_irq(struct irq_desc *desc);
+extern void handle_bad_irq(struct irq_desc *desc);
extern void handle_nested_irq(unsigned int irq);
extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
@@ -627,23 +641,23 @@ static inline void *irq_data_get_irq_chip_data(struct irq_data *d)
static inline void *irq_get_handler_data(unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
- return d ? d->handler_data : NULL;
+ return d ? d->common->handler_data : NULL;
}
static inline void *irq_data_get_irq_handler_data(struct irq_data *d)
{
- return d->handler_data;
+ return d->common->handler_data;
}
static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
- return d ? d->msi_desc : NULL;
+ return d ? d->common->msi_desc : NULL;
}
static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d)
{
- return d->msi_desc;
+ return d->common->msi_desc;
}
static inline u32 irq_get_trigger_type(unsigned int irq)
@@ -652,21 +666,30 @@ static inline u32 irq_get_trigger_type(unsigned int irq)
return d ? irqd_get_trigger_type(d) : 0;
}
-static inline int irq_data_get_node(struct irq_data *d)
+static inline int irq_common_data_get_node(struct irq_common_data *d)
{
+#ifdef CONFIG_NUMA
return d->node;
+#else
+ return 0;
+#endif
+}
+
+static inline int irq_data_get_node(struct irq_data *d)
+{
+ return irq_common_data_get_node(d->common);
}
static inline struct cpumask *irq_get_affinity_mask(int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
- return d ? d->affinity : NULL;
+ return d ? d->common->affinity : NULL;
}
static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
{
- return d->affinity;
+ return d->common->affinity;
}
unsigned int arch_dynirq_lower_bound(unsigned int from);
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index bf982e021fbd..9eeeb9589acf 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -104,6 +104,8 @@
#define GICR_SYNCR 0x00C0
#define GICR_MOVLPIR 0x0100
#define GICR_MOVALLR 0x0110
+#define GICR_ISACTIVER GICD_ISACTIVER
+#define GICR_ICACTIVER GICD_ICACTIVER
#define GICR_IDREGS GICD_IDREGS
#define GICR_PIDR2 GICD_PIDR2
@@ -268,9 +270,12 @@
#define ICH_LR_EOI (1UL << 41)
#define ICH_LR_GROUP (1UL << 60)
+#define ICH_LR_HW (1UL << 61)
#define ICH_LR_STATE (3UL << 62)
#define ICH_LR_PENDING_BIT (1UL << 62)
#define ICH_LR_ACTIVE_BIT (1UL << 63)
+#define ICH_LR_PHYS_ID_SHIFT 32
+#define ICH_LR_PHYS_ID_MASK (0x3ffUL << ICH_LR_PHYS_ID_SHIFT)
#define ICH_MISR_EOI (1 << 0)
#define ICH_MISR_U (1 << 1)
@@ -288,6 +293,7 @@
#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT)
#define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
+#define ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1)
#define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
#define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
#define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
@@ -385,6 +391,12 @@ static inline void gic_write_eoir(u64 irq)
isb();
}
+static inline void gic_write_dir(u64 irq)
+{
+ asm volatile("msr_s " __stringify(ICC_DIR_EL1) ", %0" : : "r" (irq));
+ isb();
+}
+
struct irq_domain;
int its_cpu_init(void);
int its_init(struct device_node *node, struct rdists *rdists,
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 65da435d01c1..b8901dfd9e95 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -20,9 +20,13 @@
#define GIC_CPU_ALIAS_BINPOINT 0x1c
#define GIC_CPU_ACTIVEPRIO 0xd0
#define GIC_CPU_IDENT 0xfc
+#define GIC_CPU_DEACTIVATE 0x1000
#define GICC_ENABLE 0x1
#define GICC_INT_PRI_THRESHOLD 0xf0
+
+#define GIC_CPU_CTRL_EOImodeNS (1 << 9)
+
#define GICC_IAR_INT_ID_MASK 0x3ff
#define GICC_INT_SPURIOUS 1023
#define GICC_DIS_BYPASS_MASK 0x1e0
@@ -71,11 +75,12 @@
#define GICH_LR_VIRTUALID (0x3ff << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
-#define GICH_LR_PHYSID_CPUID (7 << GICH_LR_PHYSID_CPUID_SHIFT)
+#define GICH_LR_PHYSID_CPUID (0x3ff << GICH_LR_PHYSID_CPUID_SHIFT)
#define GICH_LR_STATE (3 << 28)
#define GICH_LR_PENDING_BIT (1 << 28)
#define GICH_LR_ACTIVE_BIT (1 << 29)
#define GICH_LR_EOI (1 << 19)
+#define GICH_LR_HW (1 << 31)
#define GICH_VMCR_CTRL_SHIFT 0
#define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT)
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
index 9b1ad3734911..4e6861605050 100644
--- a/include/linux/irqchip/mips-gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -41,12 +41,20 @@
/* Shared Global Counter */
#define GIC_SH_COUNTER_31_00_OFS 0x0010
+/* 64-bit counter register for CM3 */
+#define GIC_SH_COUNTER_OFS GIC_SH_COUNTER_31_00_OFS
#define GIC_SH_COUNTER_63_32_OFS 0x0014
#define GIC_SH_REVISIONID_OFS 0x0020
/* Convert an interrupt number to a byte offset/bit for multi-word registers */
-#define GIC_INTR_OFS(intr) (((intr) / 32) * 4)
-#define GIC_INTR_BIT(intr) ((intr) % 32)
+#define GIC_INTR_OFS(intr) ({ \
+ unsigned bits = mips_cm_is64 ? 64 : 32; \
+ unsigned reg_idx = (intr) / bits; \
+ unsigned reg_width = bits / 8; \
+ \
+ reg_idx * reg_width; \
+})
+#define GIC_INTR_BIT(intr) ((intr) % (mips_cm_is64 ? 64 : 32))
/* Polarity : Reset Value is always 0 */
#define GIC_SH_SET_POLARITY_OFS 0x0100
@@ -98,6 +106,8 @@
#define GIC_VPE_WD_COUNT0_OFS 0x0094
#define GIC_VPE_WD_INITIAL0_OFS 0x0098
#define GIC_VPE_COMPARE_LO_OFS 0x00a0
+/* 64-bit Compare register on CM3 */
+#define GIC_VPE_COMPARE_OFS GIC_VPE_COMPARE_LO_OFS
#define GIC_VPE_COMPARE_HI_OFS 0x00a4
#define GIC_VPE_EIC_SHADOW_SET_BASE_OFS 0x0100
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 5acfa26602e1..a587a33363c7 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -98,11 +98,7 @@ extern struct irq_desc irq_desc[NR_IRQS];
static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
{
-#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
- return irq_to_desc(data->irq);
-#else
- return container_of(data, struct irq_desc, irq_data);
-#endif
+ return container_of(data->common, struct irq_desc, irq_common_data);
}
static inline unsigned int irq_desc_get_irq(struct irq_desc *desc)
@@ -127,23 +123,21 @@ static inline void *irq_desc_get_chip_data(struct irq_desc *desc)
static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
{
- return desc->irq_data.handler_data;
+ return desc->irq_common_data.handler_data;
}
static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
{
- return desc->irq_data.msi_desc;
+ return desc->irq_common_data.msi_desc;
}
/*
* Architectures call this to let the generic IRQ layer
- * handle an interrupt. If the descriptor is attached to an
- * irqchip-style controller then we call the ->handle_irq() handler,
- * and it calls __do_IRQ() if it's attached to an irqtype-style controller.
+ * handle an interrupt.
*/
-static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
+static inline void generic_handle_irq_desc(struct irq_desc *desc)
{
- desc->handle_irq(irq, desc);
+ desc->handle_irq(desc);
}
int generic_handle_irq(unsigned int irq);
@@ -176,29 +170,6 @@ static inline int irq_has_action(unsigned int irq)
return irq_desc_has_action(irq_to_desc(irq));
}
-/* caller has locked the irq_desc and both params are valid */
-static inline void __irq_set_handler_locked(unsigned int irq,
- irq_flow_handler_t handler)
-{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
- desc->handle_irq = handler;
-}
-
-/* caller has locked the irq_desc and both params are valid */
-static inline void
-__irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip,
- irq_flow_handler_t handler, const char *name)
-{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
- irq_desc_get_irq_data(desc)->chip = chip;
- desc->handle_irq = handler;
- desc->name = name;
-}
-
/**
* irq_set_handler_locked - Set irq handler from a locked region
* @data: Pointer to the irq_data structure which identifies the irq
diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h
index 62d543004197..661bed0ed1f3 100644
--- a/include/linux/irqhandler.h
+++ b/include/linux/irqhandler.h
@@ -8,7 +8,7 @@
struct irq_desc;
struct irq_data;
-typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc);
+typedef void (*irq_flow_handler_t)(struct irq_desc *desc);
typedef void (*irq_preflow_handler_t)(struct irq_data *data);
#endif
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
deleted file mode 100644
index d32615280be9..000000000000
--- a/include/linux/jbd.h
+++ /dev/null
@@ -1,1047 +0,0 @@
-/*
- * linux/include/linux/jbd.h
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>
- *
- * Copyright 1998-2000 Red Hat, Inc --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Definitions for transaction data structures for the buffer cache
- * filesystem journaling support.
- */
-
-#ifndef _LINUX_JBD_H
-#define _LINUX_JBD_H
-
-/* Allow this file to be included directly into e2fsprogs */
-#ifndef __KERNEL__
-#include "jfs_compat.h"
-#define JFS_DEBUG
-#define jfs_debug jbd_debug
-#else
-
-#include <linux/types.h>
-#include <linux/buffer_head.h>
-#include <linux/journal-head.h>
-#include <linux/stddef.h>
-#include <linux/mutex.h>
-#include <linux/timer.h>
-#include <linux/lockdep.h>
-#include <linux/slab.h>
-
-#define journal_oom_retry 1
-
-/*
- * Define JBD_PARANOID_IOFAIL to cause a kernel BUG() if ext3 finds
- * certain classes of error which can occur due to failed IOs. Under
- * normal use we want ext3 to continue after such errors, because
- * hardware _can_ fail, but for debugging purposes when running tests on
- * known-good hardware we may want to trap these errors.
- */
-#undef JBD_PARANOID_IOFAIL
-
-/*
- * The default maximum commit age, in seconds.
- */
-#define JBD_DEFAULT_MAX_COMMIT_AGE 5
-
-#ifdef CONFIG_JBD_DEBUG
-/*
- * Define JBD_EXPENSIVE_CHECKING to enable more expensive internal
- * consistency checks. By default we don't do this unless
- * CONFIG_JBD_DEBUG is on.
- */
-#define JBD_EXPENSIVE_CHECKING
-extern u8 journal_enable_debug;
-
-void __jbd_debug(int level, const char *file, const char *func,
- unsigned int line, const char *fmt, ...);
-
-#define jbd_debug(n, fmt, a...) \
- __jbd_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a)
-#else
-#define jbd_debug(n, fmt, a...) /**/
-#endif
-
-static inline void *jbd_alloc(size_t size, gfp_t flags)
-{
- return (void *)__get_free_pages(flags, get_order(size));
-}
-
-static inline void jbd_free(void *ptr, size_t size)
-{
- free_pages((unsigned long)ptr, get_order(size));
-}
-
-#define JFS_MIN_JOURNAL_BLOCKS 1024
-
-
-/**
- * typedef handle_t - The handle_t type represents a single atomic update being performed by some process.
- *
- * All filesystem modifications made by the process go
- * through this handle. Recursive operations (such as quota operations)
- * are gathered into a single update.
- *
- * The buffer credits field is used to account for journaled buffers
- * being modified by the running process. To ensure that there is
- * enough log space for all outstanding operations, we need to limit the
- * number of outstanding buffers possible at any time. When the
- * operation completes, any buffer credits not used are credited back to
- * the transaction, so that at all times we know how many buffers the
- * outstanding updates on a transaction might possibly touch.
- *
- * This is an opaque datatype.
- **/
-typedef struct handle_s handle_t; /* Atomic operation type */
-
-
-/**
- * typedef journal_t - The journal_t maintains all of the journaling state information for a single filesystem.
- *
- * journal_t is linked to from the fs superblock structure.
- *
- * We use the journal_t to keep track of all outstanding transaction
- * activity on the filesystem, and to manage the state of the log
- * writing process.
- *
- * This is an opaque datatype.
- **/
-typedef struct journal_s journal_t; /* Journal control structure */
-#endif
-
-/*
- * Internal structures used by the logging mechanism:
- */
-
-#define JFS_MAGIC_NUMBER 0xc03b3998U /* The first 4 bytes of /dev/random! */
-
-/*
- * On-disk structures
- */
-
-/*
- * Descriptor block types:
- */
-
-#define JFS_DESCRIPTOR_BLOCK 1
-#define JFS_COMMIT_BLOCK 2
-#define JFS_SUPERBLOCK_V1 3
-#define JFS_SUPERBLOCK_V2 4
-#define JFS_REVOKE_BLOCK 5
-
-/*
- * Standard header for all descriptor blocks:
- */
-typedef struct journal_header_s
-{
- __be32 h_magic;
- __be32 h_blocktype;
- __be32 h_sequence;
-} journal_header_t;
-
-
-/*
- * The block tag: used to describe a single buffer in the journal
- */
-typedef struct journal_block_tag_s
-{
- __be32 t_blocknr; /* The on-disk block number */
- __be32 t_flags; /* See below */
-} journal_block_tag_t;
-
-/*
- * The revoke descriptor: used on disk to describe a series of blocks to
- * be revoked from the log
- */
-typedef struct journal_revoke_header_s
-{
- journal_header_t r_header;
- __be32 r_count; /* Count of bytes used in the block */
-} journal_revoke_header_t;
-
-
-/* Definitions for the journal tag flags word: */
-#define JFS_FLAG_ESCAPE 1 /* on-disk block is escaped */
-#define JFS_FLAG_SAME_UUID 2 /* block has same uuid as previous */
-#define JFS_FLAG_DELETED 4 /* block deleted by this transaction */
-#define JFS_FLAG_LAST_TAG 8 /* last tag in this descriptor block */
-
-
-/*
- * The journal superblock. All fields are in big-endian byte order.
- */
-typedef struct journal_superblock_s
-{
-/* 0x0000 */
- journal_header_t s_header;
-
-/* 0x000C */
- /* Static information describing the journal */
- __be32 s_blocksize; /* journal device blocksize */
- __be32 s_maxlen; /* total blocks in journal file */
- __be32 s_first; /* first block of log information */
-
-/* 0x0018 */
- /* Dynamic information describing the current state of the log */
- __be32 s_sequence; /* first commit ID expected in log */
- __be32 s_start; /* blocknr of start of log */
-
-/* 0x0020 */
- /* Error value, as set by journal_abort(). */
- __be32 s_errno;
-
-/* 0x0024 */
- /* Remaining fields are only valid in a version-2 superblock */
- __be32 s_feature_compat; /* compatible feature set */
- __be32 s_feature_incompat; /* incompatible feature set */
- __be32 s_feature_ro_compat; /* readonly-compatible feature set */
-/* 0x0030 */
- __u8 s_uuid[16]; /* 128-bit uuid for journal */
-
-/* 0x0040 */
- __be32 s_nr_users; /* Nr of filesystems sharing log */
-
- __be32 s_dynsuper; /* Blocknr of dynamic superblock copy*/
-
-/* 0x0048 */
- __be32 s_max_transaction; /* Limit of journal blocks per trans.*/
- __be32 s_max_trans_data; /* Limit of data blocks per trans. */
-
-/* 0x0050 */
- __u32 s_padding[44];
-
-/* 0x0100 */
- __u8 s_users[16*48]; /* ids of all fs'es sharing the log */
-/* 0x0400 */
-} journal_superblock_t;
-
-#define JFS_HAS_COMPAT_FEATURE(j,mask) \
- ((j)->j_format_version >= 2 && \
- ((j)->j_superblock->s_feature_compat & cpu_to_be32((mask))))
-#define JFS_HAS_RO_COMPAT_FEATURE(j,mask) \
- ((j)->j_format_version >= 2 && \
- ((j)->j_superblock->s_feature_ro_compat & cpu_to_be32((mask))))
-#define JFS_HAS_INCOMPAT_FEATURE(j,mask) \
- ((j)->j_format_version >= 2 && \
- ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask))))
-
-#define JFS_FEATURE_INCOMPAT_REVOKE 0x00000001
-
-/* Features known to this kernel version: */
-#define JFS_KNOWN_COMPAT_FEATURES 0
-#define JFS_KNOWN_ROCOMPAT_FEATURES 0
-#define JFS_KNOWN_INCOMPAT_FEATURES JFS_FEATURE_INCOMPAT_REVOKE
-
-#ifdef __KERNEL__
-
-#include <linux/fs.h>
-#include <linux/sched.h>
-
-enum jbd_state_bits {
- BH_JBD /* Has an attached ext3 journal_head */
- = BH_PrivateStart,
- BH_JWrite, /* Being written to log (@@@ DEBUGGING) */
- BH_Freed, /* Has been freed (truncated) */
- BH_Revoked, /* Has been revoked from the log */
- BH_RevokeValid, /* Revoked flag is valid */
- BH_JBDDirty, /* Is dirty but journaled */
- BH_State, /* Pins most journal_head state */
- BH_JournalHead, /* Pins bh->b_private and jh->b_bh */
- BH_Unshadow, /* Dummy bit, for BJ_Shadow wakeup filtering */
- BH_JBDPrivateStart, /* First bit available for private use by FS */
-};
-
-BUFFER_FNS(JBD, jbd)
-BUFFER_FNS(JWrite, jwrite)
-BUFFER_FNS(JBDDirty, jbddirty)
-TAS_BUFFER_FNS(JBDDirty, jbddirty)
-BUFFER_FNS(Revoked, revoked)
-TAS_BUFFER_FNS(Revoked, revoked)
-BUFFER_FNS(RevokeValid, revokevalid)
-TAS_BUFFER_FNS(RevokeValid, revokevalid)
-BUFFER_FNS(Freed, freed)
-
-#include <linux/jbd_common.h>
-
-#define J_ASSERT(assert) BUG_ON(!(assert))
-
-#define J_ASSERT_BH(bh, expr) J_ASSERT(expr)
-#define J_ASSERT_JH(jh, expr) J_ASSERT(expr)
-
-#if defined(JBD_PARANOID_IOFAIL)
-#define J_EXPECT(expr, why...) J_ASSERT(expr)
-#define J_EXPECT_BH(bh, expr, why...) J_ASSERT_BH(bh, expr)
-#define J_EXPECT_JH(jh, expr, why...) J_ASSERT_JH(jh, expr)
-#else
-#define __journal_expect(expr, why...) \
- ({ \
- int val = (expr); \
- if (!val) { \
- printk(KERN_ERR \
- "EXT3-fs unexpected failure: %s;\n",# expr); \
- printk(KERN_ERR why "\n"); \
- } \
- val; \
- })
-#define J_EXPECT(expr, why...) __journal_expect(expr, ## why)
-#define J_EXPECT_BH(bh, expr, why...) __journal_expect(expr, ## why)
-#define J_EXPECT_JH(jh, expr, why...) __journal_expect(expr, ## why)
-#endif
-
-struct jbd_revoke_table_s;
-
-/**
- * struct handle_s - this is the concrete type associated with handle_t.
- * @h_transaction: Which compound transaction is this update a part of?
- * @h_buffer_credits: Number of remaining buffers we are allowed to dirty.
- * @h_ref: Reference count on this handle
- * @h_err: Field for caller's use to track errors through large fs operations
- * @h_sync: flag for sync-on-close
- * @h_jdata: flag to force data journaling
- * @h_aborted: flag indicating fatal error on handle
- * @h_lockdep_map: lockdep info for debugging lock problems
- */
-struct handle_s
-{
- /* Which compound transaction is this update a part of? */
- transaction_t *h_transaction;
-
- /* Number of remaining buffers we are allowed to dirty: */
- int h_buffer_credits;
-
- /* Reference count on this handle */
- int h_ref;
-
- /* Field for caller's use to track errors through large fs */
- /* operations */
- int h_err;
-
- /* Flags [no locking] */
- unsigned int h_sync: 1; /* sync-on-close */
- unsigned int h_jdata: 1; /* force data journaling */
- unsigned int h_aborted: 1; /* fatal error on handle */
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map h_lockdep_map;
-#endif
-};
-
-
-/* The transaction_t type is the guts of the journaling mechanism. It
- * tracks a compound transaction through its various states:
- *
- * RUNNING: accepting new updates
- * LOCKED: Updates still running but we don't accept new ones
- * RUNDOWN: Updates are tidying up but have finished requesting
- * new buffers to modify (state not used for now)
- * FLUSH: All updates complete, but we are still writing to disk
- * COMMIT: All data on disk, writing commit record
- * FINISHED: We still have to keep the transaction for checkpointing.
- *
- * The transaction keeps track of all of the buffers modified by a
- * running transaction, and all of the buffers committed but not yet
- * flushed to home for finished transactions.
- */
-
-/*
- * Lock ranking:
- *
- * j_list_lock
- * ->jbd_lock_bh_journal_head() (This is "innermost")
- *
- * j_state_lock
- * ->jbd_lock_bh_state()
- *
- * jbd_lock_bh_state()
- * ->j_list_lock
- *
- * j_state_lock
- * ->t_handle_lock
- *
- * j_state_lock
- * ->j_list_lock (journal_unmap_buffer)
- *
- */
-
-struct transaction_s
-{
- /* Pointer to the journal for this transaction. [no locking] */
- journal_t *t_journal;
-
- /* Sequence number for this transaction [no locking] */
- tid_t t_tid;
-
- /*
- * Transaction's current state
- * [no locking - only kjournald alters this]
- * [j_list_lock] guards transition of a transaction into T_FINISHED
- * state and subsequent call of __journal_drop_transaction()
- * FIXME: needs barriers
- * KLUDGE: [use j_state_lock]
- */
- enum {
- T_RUNNING,
- T_LOCKED,
- T_FLUSH,
- T_COMMIT,
- T_COMMIT_RECORD,
- T_FINISHED
- } t_state;
-
- /*
- * Where in the log does this transaction's commit start? [no locking]
- */
- unsigned int t_log_start;
-
- /* Number of buffers on the t_buffers list [j_list_lock] */
- int t_nr_buffers;
-
- /*
- * Doubly-linked circular list of all buffers reserved but not yet
- * modified by this transaction [j_list_lock]
- */
- struct journal_head *t_reserved_list;
-
- /*
- * Doubly-linked circular list of all buffers under writeout during
- * commit [j_list_lock]
- */
- struct journal_head *t_locked_list;
-
- /*
- * Doubly-linked circular list of all metadata buffers owned by this
- * transaction [j_list_lock]
- */
- struct journal_head *t_buffers;
-
- /*
- * Doubly-linked circular list of all data buffers still to be
- * flushed before this transaction can be committed [j_list_lock]
- */
- struct journal_head *t_sync_datalist;
-
- /*
- * Doubly-linked circular list of all forget buffers (superseded
- * buffers which we can un-checkpoint once this transaction commits)
- * [j_list_lock]
- */
- struct journal_head *t_forget;
-
- /*
- * Doubly-linked circular list of all buffers still to be flushed before
- * this transaction can be checkpointed. [j_list_lock]
- */
- struct journal_head *t_checkpoint_list;
-
- /*
- * Doubly-linked circular list of all buffers submitted for IO while
- * checkpointing. [j_list_lock]
- */
- struct journal_head *t_checkpoint_io_list;
-
- /*
- * Doubly-linked circular list of temporary buffers currently undergoing
- * IO in the log [j_list_lock]
- */
- struct journal_head *t_iobuf_list;
-
- /*
- * Doubly-linked circular list of metadata buffers being shadowed by log
- * IO. The IO buffers on the iobuf list and the shadow buffers on this
- * list match each other one for one at all times. [j_list_lock]
- */
- struct journal_head *t_shadow_list;
-
- /*
- * Doubly-linked circular list of control buffers being written to the
- * log. [j_list_lock]
- */
- struct journal_head *t_log_list;
-
- /*
- * Protects info related to handles
- */
- spinlock_t t_handle_lock;
-
- /*
- * Number of outstanding updates running on this transaction
- * [t_handle_lock]
- */
- int t_updates;
-
- /*
- * Number of buffers reserved for use by all handles in this transaction
- * handle but not yet modified. [t_handle_lock]
- */
- int t_outstanding_credits;
-
- /*
- * Forward and backward links for the circular list of all transactions
- * awaiting checkpoint. [j_list_lock]
- */
- transaction_t *t_cpnext, *t_cpprev;
-
- /*
- * When will the transaction expire (become due for commit), in jiffies?
- * [no locking]
- */
- unsigned long t_expires;
-
- /*
- * When this transaction started, in nanoseconds [no locking]
- */
- ktime_t t_start_time;
-
- /*
- * How many handles used this transaction? [t_handle_lock]
- */
- int t_handle_count;
-};
-
-/**
- * struct journal_s - this is the concrete type associated with journal_t.
- * @j_flags: General journaling state flags
- * @j_errno: Is there an outstanding uncleared error on the journal (from a
- * prior abort)?
- * @j_sb_buffer: First part of superblock buffer
- * @j_superblock: Second part of superblock buffer
- * @j_format_version: Version of the superblock format
- * @j_state_lock: Protect the various scalars in the journal
- * @j_barrier_count: Number of processes waiting to create a barrier lock
- * @j_running_transaction: The current running transaction..
- * @j_committing_transaction: the transaction we are pushing to disk
- * @j_checkpoint_transactions: a linked circular list of all transactions
- * waiting for checkpointing
- * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction
- * to start committing, or for a barrier lock to be released
- * @j_wait_logspace: Wait queue for waiting for checkpointing to complete
- * @j_wait_done_commit: Wait queue for waiting for commit to complete
- * @j_wait_checkpoint: Wait queue to trigger checkpointing
- * @j_wait_commit: Wait queue to trigger commit
- * @j_wait_updates: Wait queue to wait for updates to complete
- * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
- * @j_head: Journal head - identifies the first unused block in the journal
- * @j_tail: Journal tail - identifies the oldest still-used block in the
- * journal.
- * @j_free: Journal free - how many free blocks are there in the journal?
- * @j_first: The block number of the first usable block
- * @j_last: The block number one beyond the last usable block
- * @j_dev: Device where we store the journal
- * @j_blocksize: blocksize for the location where we store the journal.
- * @j_blk_offset: starting block offset for into the device where we store the
- * journal
- * @j_fs_dev: Device which holds the client fs. For internal journal this will
- * be equal to j_dev
- * @j_maxlen: Total maximum capacity of the journal region on disk.
- * @j_list_lock: Protects the buffer lists and internal buffer state.
- * @j_inode: Optional inode where we store the journal. If present, all journal
- * block numbers are mapped into this inode via bmap().
- * @j_tail_sequence: Sequence number of the oldest transaction in the log
- * @j_transaction_sequence: Sequence number of the next transaction to grant
- * @j_commit_sequence: Sequence number of the most recently committed
- * transaction
- * @j_commit_request: Sequence number of the most recent transaction wanting
- * commit
- * @j_commit_waited: Sequence number of the most recent transaction someone
- * is waiting for to commit.
- * @j_uuid: Uuid of client object.
- * @j_task: Pointer to the current commit thread for this journal
- * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a
- * single compound commit transaction
- * @j_commit_interval: What is the maximum transaction lifetime before we begin
- * a commit?
- * @j_commit_timer: The timer used to wakeup the commit thread
- * @j_revoke_lock: Protect the revoke table
- * @j_revoke: The revoke table - maintains the list of revoked blocks in the
- * current transaction.
- * @j_revoke_table: alternate revoke tables for j_revoke
- * @j_wbuf: array of buffer_heads for journal_commit_transaction
- * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
- * number that will fit in j_blocksize
- * @j_last_sync_writer: most recent pid which did a synchronous write
- * @j_average_commit_time: the average amount of time in nanoseconds it
- * takes to commit a transaction to the disk.
- * @j_private: An opaque pointer to fs-private information.
- */
-
-struct journal_s
-{
- /* General journaling state flags [j_state_lock] */
- unsigned long j_flags;
-
- /*
- * Is there an outstanding uncleared error on the journal (from a prior
- * abort)? [j_state_lock]
- */
- int j_errno;
-
- /* The superblock buffer */
- struct buffer_head *j_sb_buffer;
- journal_superblock_t *j_superblock;
-
- /* Version of the superblock format */
- int j_format_version;
-
- /*
- * Protect the various scalars in the journal
- */
- spinlock_t j_state_lock;
-
- /*
- * Number of processes waiting to create a barrier lock [j_state_lock]
- */
- int j_barrier_count;
-
- /*
- * Transactions: The current running transaction...
- * [j_state_lock] [caller holding open handle]
- */
- transaction_t *j_running_transaction;
-
- /*
- * the transaction we are pushing to disk
- * [j_state_lock] [caller holding open handle]
- */
- transaction_t *j_committing_transaction;
-
- /*
- * ... and a linked circular list of all transactions waiting for
- * checkpointing. [j_list_lock]
- */
- transaction_t *j_checkpoint_transactions;
-
- /*
- * Wait queue for waiting for a locked transaction to start committing,
- * or for a barrier lock to be released
- */
- wait_queue_head_t j_wait_transaction_locked;
-
- /* Wait queue for waiting for checkpointing to complete */
- wait_queue_head_t j_wait_logspace;
-
- /* Wait queue for waiting for commit to complete */
- wait_queue_head_t j_wait_done_commit;
-
- /* Wait queue to trigger checkpointing */
- wait_queue_head_t j_wait_checkpoint;
-
- /* Wait queue to trigger commit */
- wait_queue_head_t j_wait_commit;
-
- /* Wait queue to wait for updates to complete */
- wait_queue_head_t j_wait_updates;
-
- /* Semaphore for locking against concurrent checkpoints */
- struct mutex j_checkpoint_mutex;
-
- /*
- * Journal head: identifies the first unused block in the journal.
- * [j_state_lock]
- */
- unsigned int j_head;
-
- /*
- * Journal tail: identifies the oldest still-used block in the journal.
- * [j_state_lock]
- */
- unsigned int j_tail;
-
- /*
- * Journal free: how many free blocks are there in the journal?
- * [j_state_lock]
- */
- unsigned int j_free;
-
- /*
- * Journal start and end: the block numbers of the first usable block
- * and one beyond the last usable block in the journal. [j_state_lock]
- */
- unsigned int j_first;
- unsigned int j_last;
-
- /*
- * Device, blocksize and starting block offset for the location where we
- * store the journal.
- */
- struct block_device *j_dev;
- int j_blocksize;
- unsigned int j_blk_offset;
-
- /*
- * Device which holds the client fs. For internal journal this will be
- * equal to j_dev.
- */
- struct block_device *j_fs_dev;
-
- /* Total maximum capacity of the journal region on disk. */
- unsigned int j_maxlen;
-
- /*
- * Protects the buffer lists and internal buffer state.
- */
- spinlock_t j_list_lock;
-
- /* Optional inode where we store the journal. If present, all */
- /* journal block numbers are mapped into this inode via */
- /* bmap(). */
- struct inode *j_inode;
-
- /*
- * Sequence number of the oldest transaction in the log [j_state_lock]
- */
- tid_t j_tail_sequence;
-
- /*
- * Sequence number of the next transaction to grant [j_state_lock]
- */
- tid_t j_transaction_sequence;
-
- /*
- * Sequence number of the most recently committed transaction
- * [j_state_lock].
- */
- tid_t j_commit_sequence;
-
- /*
- * Sequence number of the most recent transaction wanting commit
- * [j_state_lock]
- */
- tid_t j_commit_request;
-
- /*
- * Sequence number of the most recent transaction someone is waiting
- * for to commit.
- * [j_state_lock]
- */
- tid_t j_commit_waited;
-
- /*
- * Journal uuid: identifies the object (filesystem, LVM volume etc)
- * backed by this journal. This will eventually be replaced by an array
- * of uuids, allowing us to index multiple devices within a single
- * journal and to perform atomic updates across them.
- */
- __u8 j_uuid[16];
-
- /* Pointer to the current commit thread for this journal */
- struct task_struct *j_task;
-
- /*
- * Maximum number of metadata buffers to allow in a single compound
- * commit transaction
- */
- int j_max_transaction_buffers;
-
- /*
- * What is the maximum transaction lifetime before we begin a commit?
- */
- unsigned long j_commit_interval;
-
- /* The timer used to wakeup the commit thread: */
- struct timer_list j_commit_timer;
-
- /*
- * The revoke table: maintains the list of revoked blocks in the
- * current transaction. [j_revoke_lock]
- */
- spinlock_t j_revoke_lock;
- struct jbd_revoke_table_s *j_revoke;
- struct jbd_revoke_table_s *j_revoke_table[2];
-
- /*
- * array of bhs for journal_commit_transaction
- */
- struct buffer_head **j_wbuf;
- int j_wbufsize;
-
- /*
- * this is the pid of the last person to run a synchronous operation
- * through the journal.
- */
- pid_t j_last_sync_writer;
-
- /*
- * the average amount of time in nanoseconds it takes to commit a
- * transaction to the disk. [j_state_lock]
- */
- u64 j_average_commit_time;
-
- /*
- * An opaque pointer to fs-private information. ext3 puts its
- * superblock pointer here
- */
- void *j_private;
-};
-
-/*
- * Journal flag definitions
- */
-#define JFS_UNMOUNT 0x001 /* Journal thread is being destroyed */
-#define JFS_ABORT 0x002 /* Journaling has been aborted for errors. */
-#define JFS_ACK_ERR 0x004 /* The errno in the sb has been acked */
-#define JFS_FLUSHED 0x008 /* The journal superblock has been flushed */
-#define JFS_LOADED 0x010 /* The journal superblock has been loaded */
-#define JFS_BARRIER 0x020 /* Use IDE barriers */
-#define JFS_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
- * data write error in ordered
- * mode */
-
-/*
- * Function declarations for the journaling transaction and buffer
- * management
- */
-
-/* Filing buffers */
-extern void journal_unfile_buffer(journal_t *, struct journal_head *);
-extern void __journal_unfile_buffer(struct journal_head *);
-extern void __journal_refile_buffer(struct journal_head *);
-extern void journal_refile_buffer(journal_t *, struct journal_head *);
-extern void __journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_free_buffer(struct journal_head *bh);
-extern void journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_clean_data_list(transaction_t *transaction);
-
-/* Log buffer allocation */
-extern struct journal_head * journal_get_descriptor_buffer(journal_t *);
-int journal_next_log_block(journal_t *, unsigned int *);
-
-/* Commit management */
-extern void journal_commit_transaction(journal_t *);
-
-/* Checkpoint list management */
-int __journal_clean_checkpoint_list(journal_t *journal);
-int __journal_remove_checkpoint(struct journal_head *);
-void __journal_insert_checkpoint(struct journal_head *, transaction_t *);
-
-/* Buffer IO */
-extern int
-journal_write_metadata_buffer(transaction_t *transaction,
- struct journal_head *jh_in,
- struct journal_head **jh_out,
- unsigned int blocknr);
-
-/* Transaction locking */
-extern void __wait_on_journal (journal_t *);
-
-/*
- * Journal locking.
- *
- * We need to lock the journal during transaction state changes so that nobody
- * ever tries to take a handle on the running transaction while we are in the
- * middle of moving it to the commit phase. j_state_lock does this.
- *
- * Note that the locking is completely interrupt unsafe. We never touch
- * journal structures from interrupts.
- */
-
-static inline handle_t *journal_current_handle(void)
-{
- return current->journal_info;
-}
-
-/* The journaling code user interface:
- *
- * Create and destroy handles
- * Register buffer modifications against the current transaction.
- */
-
-extern handle_t *journal_start(journal_t *, int nblocks);
-extern int journal_restart (handle_t *, int nblocks);
-extern int journal_extend (handle_t *, int nblocks);
-extern int journal_get_write_access(handle_t *, struct buffer_head *);
-extern int journal_get_create_access (handle_t *, struct buffer_head *);
-extern int journal_get_undo_access(handle_t *, struct buffer_head *);
-extern int journal_dirty_data (handle_t *, struct buffer_head *);
-extern int journal_dirty_metadata (handle_t *, struct buffer_head *);
-extern void journal_release_buffer (handle_t *, struct buffer_head *);
-extern int journal_forget (handle_t *, struct buffer_head *);
-extern void journal_sync_buffer (struct buffer_head *);
-extern void journal_invalidatepage(journal_t *,
- struct page *, unsigned int, unsigned int);
-extern int journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
-extern int journal_stop(handle_t *);
-extern int journal_flush (journal_t *);
-extern void journal_lock_updates (journal_t *);
-extern void journal_unlock_updates (journal_t *);
-
-extern journal_t * journal_init_dev(struct block_device *bdev,
- struct block_device *fs_dev,
- int start, int len, int bsize);
-extern journal_t * journal_init_inode (struct inode *);
-extern int journal_update_format (journal_t *);
-extern int journal_check_used_features
- (journal_t *, unsigned long, unsigned long, unsigned long);
-extern int journal_check_available_features
- (journal_t *, unsigned long, unsigned long, unsigned long);
-extern int journal_set_features
- (journal_t *, unsigned long, unsigned long, unsigned long);
-extern int journal_create (journal_t *);
-extern int journal_load (journal_t *journal);
-extern int journal_destroy (journal_t *);
-extern int journal_recover (journal_t *journal);
-extern int journal_wipe (journal_t *, int);
-extern int journal_skip_recovery (journal_t *);
-extern void journal_update_sb_log_tail (journal_t *, tid_t, unsigned int,
- int);
-extern void journal_abort (journal_t *, int);
-extern int journal_errno (journal_t *);
-extern void journal_ack_err (journal_t *);
-extern int journal_clear_err (journal_t *);
-extern int journal_bmap(journal_t *, unsigned int, unsigned int *);
-extern int journal_force_commit(journal_t *);
-
-/*
- * journal_head management
- */
-struct journal_head *journal_add_journal_head(struct buffer_head *bh);
-struct journal_head *journal_grab_journal_head(struct buffer_head *bh);
-void journal_put_journal_head(struct journal_head *jh);
-
-/*
- * handle management
- */
-extern struct kmem_cache *jbd_handle_cache;
-
-static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
-{
- return kmem_cache_zalloc(jbd_handle_cache, gfp_flags);
-}
-
-static inline void jbd_free_handle(handle_t *handle)
-{
- kmem_cache_free(jbd_handle_cache, handle);
-}
-
-/* Primary revoke support */
-#define JOURNAL_REVOKE_DEFAULT_HASH 256
-extern int journal_init_revoke(journal_t *, int);
-extern void journal_destroy_revoke_caches(void);
-extern int journal_init_revoke_caches(void);
-
-extern void journal_destroy_revoke(journal_t *);
-extern int journal_revoke (handle_t *,
- unsigned int, struct buffer_head *);
-extern int journal_cancel_revoke(handle_t *, struct journal_head *);
-extern void journal_write_revoke_records(journal_t *,
- transaction_t *, int);
-
-/* Recovery revoke support */
-extern int journal_set_revoke(journal_t *, unsigned int, tid_t);
-extern int journal_test_revoke(journal_t *, unsigned int, tid_t);
-extern void journal_clear_revoke(journal_t *);
-extern void journal_switch_revoke_table(journal_t *journal);
-extern void journal_clear_buffer_revoked_flags(journal_t *journal);
-
-/*
- * The log thread user interface:
- *
- * Request space in the current transaction, and force transaction commit
- * transitions on demand.
- */
-
-int __log_space_left(journal_t *); /* Called with journal locked */
-int log_start_commit(journal_t *journal, tid_t tid);
-int __log_start_commit(journal_t *journal, tid_t tid);
-int journal_start_commit(journal_t *journal, tid_t *tid);
-int journal_force_commit_nested(journal_t *journal);
-int log_wait_commit(journal_t *journal, tid_t tid);
-int log_do_checkpoint(journal_t *journal);
-int journal_trans_will_send_data_barrier(journal_t *journal, tid_t tid);
-
-void __log_wait_for_space(journal_t *journal);
-extern void __journal_drop_transaction(journal_t *, transaction_t *);
-extern int cleanup_journal_tail(journal_t *);
-
-/*
- * is_journal_abort
- *
- * Simple test wrapper function to test the JFS_ABORT state flag. This
- * bit, when set, indicates that we have had a fatal error somewhere,
- * either inside the journaling layer or indicated to us by the client
- * (eg. ext3), and that we and should not commit any further
- * transactions.
- */
-
-static inline int is_journal_aborted(journal_t *journal)
-{
- return journal->j_flags & JFS_ABORT;
-}
-
-static inline int is_handle_aborted(handle_t *handle)
-{
- if (handle->h_aborted)
- return 1;
- return is_journal_aborted(handle->h_transaction->t_journal);
-}
-
-static inline void journal_abort_handle(handle_t *handle)
-{
- handle->h_aborted = 1;
-}
-
-#endif /* __KERNEL__ */
-
-/* Comparison functions for transaction IDs: perform comparisons using
- * modulo arithmetic so that they work over sequence number wraps. */
-
-static inline int tid_gt(tid_t x, tid_t y)
-{
- int difference = (x - y);
- return (difference > 0);
-}
-
-static inline int tid_geq(tid_t x, tid_t y)
-{
- int difference = (x - y);
- return (difference >= 0);
-}
-
-extern int journal_blocks_per_page(struct inode *inode);
-
-/*
- * Return the minimum number of blocks which must be free in the journal
- * before a new transaction may be started. Must be called under j_state_lock.
- */
-static inline int jbd_space_needed(journal_t *journal)
-{
- int nblocks = journal->j_max_transaction_buffers;
- if (journal->j_committing_transaction)
- nblocks += journal->j_committing_transaction->
- t_outstanding_credits;
- return nblocks;
-}
-
-/*
- * Definitions which augment the buffer_head layer
- */
-
-/* journaling buffer types */
-#define BJ_None 0 /* Not journaled */
-#define BJ_SyncData 1 /* Normal data: flush before commit */
-#define BJ_Metadata 2 /* Normal journaled metadata */
-#define BJ_Forget 3 /* Buffer superseded by this transaction */
-#define BJ_IO 4 /* Buffer is for temporary IO use */
-#define BJ_Shadow 5 /* Buffer contents being shadowed to the log */
-#define BJ_LogCtl 6 /* Buffer contains log descriptors */
-#define BJ_Reserved 7 /* Buffer is reserved for access by journal */
-#define BJ_Locked 8 /* Locked for I/O during commit */
-#define BJ_Types 9
-
-extern int jbd_blocks_per_page(struct inode *inode);
-
-#ifdef __KERNEL__
-
-#define buffer_trace_init(bh) do {} while (0)
-#define print_buffer_fields(bh) do {} while (0)
-#define print_buffer_trace(bh) do {} while (0)
-#define BUFFER_TRACE(bh, info) do {} while (0)
-#define BUFFER_TRACE2(bh, bh2, info) do {} while (0)
-#define JBUFFER_TRACE(jh, info) do {} while (0)
-
-#endif /* __KERNEL__ */
-
-#endif /* _LINUX_JBD_H */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index edb640ae9a94..df07e78487d5 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -29,6 +29,7 @@
#include <linux/mutex.h>
#include <linux/timer.h>
#include <linux/slab.h>
+#include <linux/bit_spinlock.h>
#include <crypto/hash.h>
#endif
@@ -336,7 +337,45 @@ BUFFER_FNS(Freed, freed)
BUFFER_FNS(Shadow, shadow)
BUFFER_FNS(Verified, verified)
-#include <linux/jbd_common.h>
+static inline struct buffer_head *jh2bh(struct journal_head *jh)
+{
+ return jh->b_bh;
+}
+
+static inline struct journal_head *bh2jh(struct buffer_head *bh)
+{
+ return bh->b_private;
+}
+
+static inline void jbd_lock_bh_state(struct buffer_head *bh)
+{
+ bit_spin_lock(BH_State, &bh->b_state);
+}
+
+static inline int jbd_trylock_bh_state(struct buffer_head *bh)
+{
+ return bit_spin_trylock(BH_State, &bh->b_state);
+}
+
+static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
+{
+ return bit_spin_is_locked(BH_State, &bh->b_state);
+}
+
+static inline void jbd_unlock_bh_state(struct buffer_head *bh)
+{
+ bit_spin_unlock(BH_State, &bh->b_state);
+}
+
+static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
+{
+ bit_spin_lock(BH_JournalHead, &bh->b_state);
+}
+
+static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
+{
+ bit_spin_unlock(BH_JournalHead, &bh->b_state);
+}
#define J_ASSERT(assert) BUG_ON(!(assert))
@@ -1042,8 +1081,9 @@ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
extern void jbd2_journal_commit_transaction(journal_t *);
/* Checkpoint list management */
-void __jbd2_journal_clean_checkpoint_list(journal_t *journal);
+void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
int __jbd2_journal_remove_checkpoint(struct journal_head *);
+void jbd2_journal_destroy_checkpoint(journal_t *journal);
void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
diff --git a/include/linux/jbd_common.h b/include/linux/jbd_common.h
deleted file mode 100644
index 3dc53432355f..000000000000
--- a/include/linux/jbd_common.h
+++ /dev/null
@@ -1,46 +0,0 @@
-#ifndef _LINUX_JBD_STATE_H
-#define _LINUX_JBD_STATE_H
-
-#include <linux/bit_spinlock.h>
-
-static inline struct buffer_head *jh2bh(struct journal_head *jh)
-{
- return jh->b_bh;
-}
-
-static inline struct journal_head *bh2jh(struct buffer_head *bh)
-{
- return bh->b_private;
-}
-
-static inline void jbd_lock_bh_state(struct buffer_head *bh)
-{
- bit_spin_lock(BH_State, &bh->b_state);
-}
-
-static inline int jbd_trylock_bh_state(struct buffer_head *bh)
-{
- return bit_spin_trylock(BH_State, &bh->b_state);
-}
-
-static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
-{
- return bit_spin_is_locked(BH_State, &bh->b_state);
-}
-
-static inline void jbd_unlock_bh_state(struct buffer_head *bh)
-{
- bit_spin_unlock(BH_State, &bh->b_state);
-}
-
-static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
-{
- bit_spin_lock(BH_JournalHead, &bh->b_state);
-}
-
-static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
-{
- bit_spin_unlock(BH_JournalHead, &bh->b_state);
-}
-
-#endif
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index f4de473f226b..f1094238ab2a 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -7,17 +7,50 @@
* Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
* Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com>
*
+ * DEPRECATED API:
+ *
+ * The use of 'struct static_key' directly, is now DEPRECATED. In addition
+ * static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:
+ *
+ * struct static_key false = STATIC_KEY_INIT_FALSE;
+ * struct static_key true = STATIC_KEY_INIT_TRUE;
+ * static_key_true()
+ * static_key_false()
+ *
+ * The updated API replacements are:
+ *
+ * DEFINE_STATIC_KEY_TRUE(key);
+ * DEFINE_STATIC_KEY_FALSE(key);
+ * static_branch_likely()
+ * static_branch_unlikely()
+ *
* Jump labels provide an interface to generate dynamic branches using
- * self-modifying code. Assuming toolchain and architecture support, the result
- * of a "if (static_key_false(&key))" statement is an unconditional branch (which
- * defaults to false - and the true block is placed out of line).
+ * self-modifying code. Assuming toolchain and architecture support, if we
+ * define a "key" that is initially false via "DEFINE_STATIC_KEY_FALSE(key)",
+ * an "if (static_branch_unlikely(&key))" statement is an unconditional branch
+ * (which defaults to false - and the true block is placed out of line).
+ * Similarly, we can define an initially true key via
+ * "DEFINE_STATIC_KEY_TRUE(key)", and use it in the same
+ * "if (static_branch_unlikely(&key))", in which case we will generate an
+ * unconditional branch to the out-of-line true branch. Keys that are
+ * initially true or false can be using in both static_branch_unlikely()
+ * and static_branch_likely() statements.
+ *
+ * At runtime we can change the branch target by setting the key
+ * to true via a call to static_branch_enable(), or false using
+ * static_branch_disable(). If the direction of the branch is switched by
+ * these calls then we run-time modify the branch target via a
+ * no-op -> jump or jump -> no-op conversion. For example, for an
+ * initially false key that is used in an "if (static_branch_unlikely(&key))"
+ * statement, setting the key to true requires us to patch in a jump
+ * to the out-of-line of true branch.
*
- * However at runtime we can change the branch target using
- * static_key_slow_{inc,dec}(). These function as a 'reference' count on the key
- * object, and for as long as there are references all branches referring to
- * that particular key will point to the (out of line) true block.
+ * In addition to static_branch_{enable,disable}, we can also reference count
+ * the key or branch direction via static_branch_{inc,dec}. Thus,
+ * static_branch_inc() can be thought of as a 'make more true' and
+ * static_branch_dec() as a 'make more false'.
*
- * Since this relies on modifying code, the static_key_slow_{inc,dec}() functions
+ * Since this relies on modifying code, the branch modifying functions
* must be considered absolute slow paths (machine wide synchronization etc.).
* OTOH, since the affected branches are unconditional, their runtime overhead
* will be absolutely minimal, esp. in the default (off) case where the total
@@ -29,20 +62,10 @@
* cause significant performance degradation. Struct static_key_deferred and
* static_key_slow_dec_deferred() provide for this.
*
- * Lacking toolchain and or architecture support, jump labels fall back to a simple
- * conditional branch.
- *
- * struct static_key my_key = STATIC_KEY_INIT_TRUE;
- *
- * if (static_key_true(&my_key)) {
- * }
+ * Lacking toolchain and or architecture support, static keys fall back to a
+ * simple conditional branch.
*
- * will result in the true case being in-line and starts the key with a single
- * reference. Mixing static_key_true() and static_key_false() on the same key is not
- * allowed.
- *
- * Not initializing the key (static data is initialized to 0s anyway) is the
- * same as using STATIC_KEY_INIT_FALSE.
+ * Additional babbling in: Documentation/static-keys.txt
*/
#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
@@ -86,8 +109,8 @@ struct static_key {
#ifndef __ASSEMBLY__
enum jump_label_type {
- JUMP_LABEL_DISABLE = 0,
- JUMP_LABEL_ENABLE,
+ JUMP_LABEL_NOP = 0,
+ JUMP_LABEL_JMP,
};
struct module;
@@ -101,33 +124,18 @@ static inline int static_key_count(struct static_key *key)
#ifdef HAVE_JUMP_LABEL
-#define JUMP_LABEL_TYPE_FALSE_BRANCH 0UL
-#define JUMP_LABEL_TYPE_TRUE_BRANCH 1UL
-#define JUMP_LABEL_TYPE_MASK 1UL
-
-static
-inline struct jump_entry *jump_label_get_entries(struct static_key *key)
-{
- return (struct jump_entry *)((unsigned long)key->entries
- & ~JUMP_LABEL_TYPE_MASK);
-}
-
-static inline bool jump_label_get_branch_default(struct static_key *key)
-{
- if (((unsigned long)key->entries & JUMP_LABEL_TYPE_MASK) ==
- JUMP_LABEL_TYPE_TRUE_BRANCH)
- return true;
- return false;
-}
+#define JUMP_TYPE_FALSE 0UL
+#define JUMP_TYPE_TRUE 1UL
+#define JUMP_TYPE_MASK 1UL
static __always_inline bool static_key_false(struct static_key *key)
{
- return arch_static_branch(key);
+ return arch_static_branch(key, false);
}
static __always_inline bool static_key_true(struct static_key *key)
{
- return !static_key_false(key);
+ return !arch_static_branch(key, true);
}
extern struct jump_entry __start___jump_table[];
@@ -145,12 +153,12 @@ extern void static_key_slow_inc(struct static_key *key);
extern void static_key_slow_dec(struct static_key *key);
extern void jump_label_apply_nops(struct module *mod);
-#define STATIC_KEY_INIT_TRUE ((struct static_key) \
+#define STATIC_KEY_INIT_TRUE \
{ .enabled = ATOMIC_INIT(1), \
- .entries = (void *)JUMP_LABEL_TYPE_TRUE_BRANCH })
-#define STATIC_KEY_INIT_FALSE ((struct static_key) \
+ .entries = (void *)JUMP_TYPE_TRUE }
+#define STATIC_KEY_INIT_FALSE \
{ .enabled = ATOMIC_INIT(0), \
- .entries = (void *)JUMP_LABEL_TYPE_FALSE_BRANCH })
+ .entries = (void *)JUMP_TYPE_FALSE }
#else /* !HAVE_JUMP_LABEL */
@@ -198,10 +206,8 @@ static inline int jump_label_apply_nops(struct module *mod)
return 0;
}
-#define STATIC_KEY_INIT_TRUE ((struct static_key) \
- { .enabled = ATOMIC_INIT(1) })
-#define STATIC_KEY_INIT_FALSE ((struct static_key) \
- { .enabled = ATOMIC_INIT(0) })
+#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
+#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
#endif /* HAVE_JUMP_LABEL */
@@ -213,6 +219,157 @@ static inline bool static_key_enabled(struct static_key *key)
return static_key_count(key) > 0;
}
+static inline void static_key_enable(struct static_key *key)
+{
+ int count = static_key_count(key);
+
+ WARN_ON_ONCE(count < 0 || count > 1);
+
+ if (!count)
+ static_key_slow_inc(key);
+}
+
+static inline void static_key_disable(struct static_key *key)
+{
+ int count = static_key_count(key);
+
+ WARN_ON_ONCE(count < 0 || count > 1);
+
+ if (count)
+ static_key_slow_dec(key);
+}
+
+/* -------------------------------------------------------------------------- */
+
+/*
+ * Two type wrappers around static_key, such that we can use compile time
+ * type differentiation to emit the right code.
+ *
+ * All the below code is macros in order to play type games.
+ */
+
+struct static_key_true {
+ struct static_key key;
+};
+
+struct static_key_false {
+ struct static_key key;
+};
+
+#define STATIC_KEY_TRUE_INIT (struct static_key_true) { .key = STATIC_KEY_INIT_TRUE, }
+#define STATIC_KEY_FALSE_INIT (struct static_key_false){ .key = STATIC_KEY_INIT_FALSE, }
+
+#define DEFINE_STATIC_KEY_TRUE(name) \
+ struct static_key_true name = STATIC_KEY_TRUE_INIT
+
+#define DEFINE_STATIC_KEY_FALSE(name) \
+ struct static_key_false name = STATIC_KEY_FALSE_INIT
+
+#ifdef HAVE_JUMP_LABEL
+
+/*
+ * Combine the right initial value (type) with the right branch order
+ * to generate the desired result.
+ *
+ *
+ * type\branch| likely (1) | unlikely (0)
+ * -----------+-----------------------+------------------
+ * | |
+ * true (1) | ... | ...
+ * | NOP | JMP L
+ * | <br-stmts> | 1: ...
+ * | L: ... |
+ * | |
+ * | | L: <br-stmts>
+ * | | jmp 1b
+ * | |
+ * -----------+-----------------------+------------------
+ * | |
+ * false (0) | ... | ...
+ * | JMP L | NOP
+ * | <br-stmts> | 1: ...
+ * | L: ... |
+ * | |
+ * | | L: <br-stmts>
+ * | | jmp 1b
+ * | |
+ * -----------+-----------------------+------------------
+ *
+ * The initial value is encoded in the LSB of static_key::entries,
+ * type: 0 = false, 1 = true.
+ *
+ * The branch type is encoded in the LSB of jump_entry::key,
+ * branch: 0 = unlikely, 1 = likely.
+ *
+ * This gives the following logic table:
+ *
+ * enabled type branch instuction
+ * -----------------------------+-----------
+ * 0 0 0 | NOP
+ * 0 0 1 | JMP
+ * 0 1 0 | NOP
+ * 0 1 1 | JMP
+ *
+ * 1 0 0 | JMP
+ * 1 0 1 | NOP
+ * 1 1 0 | JMP
+ * 1 1 1 | NOP
+ *
+ * Which gives the following functions:
+ *
+ * dynamic: instruction = enabled ^ branch
+ * static: instruction = type ^ branch
+ *
+ * See jump_label_type() / jump_label_init_type().
+ */
+
+extern bool ____wrong_branch_error(void);
+
+#define static_branch_likely(x) \
+({ \
+ bool branch; \
+ if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \
+ branch = !arch_static_branch(&(x)->key, true); \
+ else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
+ branch = !arch_static_branch_jump(&(x)->key, true); \
+ else \
+ branch = ____wrong_branch_error(); \
+ branch; \
+})
+
+#define static_branch_unlikely(x) \
+({ \
+ bool branch; \
+ if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \
+ branch = arch_static_branch_jump(&(x)->key, false); \
+ else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
+ branch = arch_static_branch(&(x)->key, false); \
+ else \
+ branch = ____wrong_branch_error(); \
+ branch; \
+})
+
+#else /* !HAVE_JUMP_LABEL */
+
+#define static_branch_likely(x) likely(static_key_enabled(&(x)->key))
+#define static_branch_unlikely(x) unlikely(static_key_enabled(&(x)->key))
+
+#endif /* HAVE_JUMP_LABEL */
+
+/*
+ * Advanced usage; refcount, branch is enabled when: count != 0
+ */
+
+#define static_branch_inc(x) static_key_slow_inc(&(x)->key)
+#define static_branch_dec(x) static_key_slow_dec(&(x)->key)
+
+/*
+ * Normal usage; boolean enable/disable.
+ */
+
+#define static_branch_enable(x) static_key_enable(&(x)->key)
+#define static_branch_disable(x) static_key_disable(&(x)->key)
+
#endif /* _LINUX_JUMP_LABEL_H */
#endif /* __ASSEMBLY__ */
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 123be25ea15a..5d4e9c4b821d 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -266,6 +266,7 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
}
int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen);
+size_t kernfs_path_len(struct kernfs_node *kn);
char * __must_check kernfs_path(struct kernfs_node *kn, char *buf,
size_t buflen);
void pr_cont_kernfs_name(struct kernfs_node *kn);
@@ -332,6 +333,9 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
static inline int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
{ return -ENOSYS; }
+static inline size_t kernfs_path_len(struct kernfs_node *kn)
+{ return 0; }
+
static inline char * __must_check kernfs_path(struct kernfs_node *kn, char *buf,
size_t buflen)
{ return NULL; }
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index b63218f68c4b..d140b1e9faa7 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -16,7 +16,7 @@
#include <uapi/linux/kexec.h>
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
#include <linux/list.h>
#include <linux/linkage.h>
#include <linux/compat.h>
@@ -318,13 +318,24 @@ int crash_shrink_memory(unsigned long new_size);
size_t crash_get_memory_size(void);
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
-#else /* !CONFIG_KEXEC */
+int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
+ unsigned long buf_len);
+void * __weak arch_kexec_kernel_image_load(struct kimage *image);
+int __weak arch_kimage_file_post_load_cleanup(struct kimage *image);
+int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
+ unsigned long buf_len);
+int __weak arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr,
+ Elf_Shdr *sechdrs, unsigned int relsec);
+int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
+ unsigned int relsec);
+
+#else /* !CONFIG_KEXEC_CORE */
struct pt_regs;
struct task_struct;
static inline void crash_kexec(struct pt_regs *regs) { }
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
#define kexec_in_progress false
-#endif /* CONFIG_KEXEC */
+#endif /* CONFIG_KEXEC_CORE */
#endif /* !defined(__ASSEBMLY__) */
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index 0555cc66a15b..fcfd2bf14d3f 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -85,8 +85,6 @@ enum umh_disable_depth {
UMH_DISABLED,
};
-extern void usermodehelper_init(void);
-
extern int __usermodehelper_disable(enum umh_disable_depth depth);
extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth);
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 869b21dcf503..e691b6a23f72 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -11,7 +11,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
const char namefmt[], ...);
#define kthread_create(threadfn, data, namefmt, arg...) \
- kthread_create_on_node(threadfn, data, -1, namefmt, ##arg)
+ kthread_create_on_node(threadfn, data, NUMA_NO_NODE, namefmt, ##arg)
struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 81089cf1f0c1..1bef9e21e725 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -242,6 +242,7 @@ struct kvm_vcpu {
int sigset_active;
sigset_t sigset;
struct kvm_vcpu_stat stat;
+ unsigned int halt_poll_ns;
#ifdef CONFIG_HAS_IOMEM
int mmio_needed;
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 75e3af01ee32..3f021dc5da8c 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -31,6 +31,9 @@ enum {
ND_CMD_ARS_STATUS_MAX = SZ_4K,
ND_MAX_MAPPINGS = 32,
+ /* region flag indicating to direct-map persistent memory by default */
+ ND_REGION_PAGEMAP = 0,
+
/* mark newly adjusted resources as requiring a label update */
DPA_RESOURCE_ADJUSTED = 1 << 0,
};
@@ -91,6 +94,7 @@ struct nd_region_desc {
void *provider_data;
int num_lanes;
int numa_node;
+ unsigned long flags;
};
struct nvdimm_bus;
diff --git a/include/linux/list.h b/include/linux/list.h
index feb773c76ee0..3e3e64a61002 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -672,6 +672,11 @@ static inline void hlist_add_fake(struct hlist_node *n)
n->pprev = &n->next;
}
+static inline bool hlist_fake(struct hlist_node *h)
+{
+ return h->pprev == &h->next;
+}
+
/*
* Move a list from one list head to another. Fixup the pprev
* reference of the first entry if it exists.
diff --git a/include/linux/llist.h b/include/linux/llist.h
index fbf10a0bc095..fd4ca0b4fe0f 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -55,8 +55,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/atomic.h>
#include <linux/kernel.h>
-#include <asm/cmpxchg.h>
struct llist_head {
struct llist_node *first;
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index 1cc89e9df480..ffb9c9da4f39 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -40,6 +40,11 @@ struct lsm_network_audit {
} fam;
};
+struct lsm_ioctlop_audit {
+ struct path path;
+ u16 cmd;
+};
+
/* Auxiliary data to use in generating the audit record. */
struct common_audit_data {
char type;
@@ -53,6 +58,7 @@ struct common_audit_data {
#define LSM_AUDIT_DATA_KMOD 8
#define LSM_AUDIT_DATA_INODE 9
#define LSM_AUDIT_DATA_DENTRY 10
+#define LSM_AUDIT_DATA_IOCTL_OP 11
union {
struct path path;
struct dentry *dentry;
@@ -68,6 +74,7 @@ struct common_audit_data {
} key_struct;
#endif
char *kmod_name;
+ struct lsm_ioctlop_audit *op;
} u;
/* this union contains LSM specific data */
union {
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 9429f054c323..ec3a6bab29de 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1881,8 +1881,10 @@ static inline void security_delete_hooks(struct security_hook_list *hooks,
extern int __init security_module_enable(const char *module);
extern void __init capability_add_hooks(void);
-#ifdef CONFIG_SECURITY_YAMA_STACKED
-void __init yama_add_hooks(void);
+#ifdef CONFIG_SECURITY_YAMA
+extern void __init yama_add_hooks(void);
+#else
+static inline void __init yama_add_hooks(void) { }
#endif
#endif /* ! __LINUX_LSM_HOOKS_H */
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
index 68c42454439b..74deadb42d76 100644
--- a/include/linux/mailbox_controller.h
+++ b/include/linux/mailbox_controller.h
@@ -9,7 +9,7 @@
#include <linux/of.h>
#include <linux/types.h>
-#include <linux/timer.h>
+#include <linux/hrtimer.h>
#include <linux/device.h>
#include <linux/completion.h>
@@ -67,7 +67,8 @@ struct mbox_chan_ops {
* @txpoll_period: If 'txdone_poll' is in effect, the API polls for
* last TX's status after these many millisecs
* @of_xlate: Controller driver specific mapping of channel via DT
- * @poll: API private. Used to poll for TXDONE on all channels.
+ * @poll_hrt: API private. hrtimer used to poll for TXDONE on all
+ * channels.
* @node: API private. To hook into list of controllers.
*/
struct mbox_controller {
@@ -81,7 +82,7 @@ struct mbox_controller {
struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox,
const struct of_phandle_args *sp);
/* Internal to API */
- struct timer_list poll;
+ struct hrtimer poll_hrt;
struct list_head node;
};
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index cc4b01972060..c518eb589260 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -77,6 +77,8 @@ int memblock_remove(phys_addr_t base, phys_addr_t size);
int memblock_free(phys_addr_t base, phys_addr_t size);
int memblock_reserve(phys_addr_t base, phys_addr_t size);
void memblock_trim_memory(phys_addr_t align);
+bool memblock_overlaps_region(struct memblock_type *type,
+ phys_addr_t base, phys_addr_t size);
int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
@@ -323,7 +325,7 @@ void memblock_enforce_memory_limit(phys_addr_t memory_limit);
int memblock_is_memory(phys_addr_t addr);
int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
int memblock_is_reserved(phys_addr_t addr);
-int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
+bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
extern void __memblock_dump_all(void);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 73b02b0a8f60..ad800e62cb7a 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -23,6 +23,11 @@
#include <linux/vm_event_item.h>
#include <linux/hardirq.h>
#include <linux/jump_label.h>
+#include <linux/page_counter.h>
+#include <linux/vmpressure.h>
+#include <linux/eventfd.h>
+#include <linux/mmzone.h>
+#include <linux/writeback.h>
struct mem_cgroup;
struct page;
@@ -67,12 +72,221 @@ enum mem_cgroup_events_index {
MEMCG_NR_EVENTS,
};
+/*
+ * Per memcg event counter is incremented at every pagein/pageout. With THP,
+ * it will be incremated by the number of pages. This counter is used for
+ * for trigger some periodic events. This is straightforward and better
+ * than using jiffies etc. to handle periodic memcg event.
+ */
+enum mem_cgroup_events_target {
+ MEM_CGROUP_TARGET_THRESH,
+ MEM_CGROUP_TARGET_SOFTLIMIT,
+ MEM_CGROUP_TARGET_NUMAINFO,
+ MEM_CGROUP_NTARGETS,
+};
+
+/*
+ * Bits in struct cg_proto.flags
+ */
+enum cg_proto_flags {
+ /* Currently active and new sockets should be assigned to cgroups */
+ MEMCG_SOCK_ACTIVE,
+ /* It was ever activated; we must disarm static keys on destruction */
+ MEMCG_SOCK_ACTIVATED,
+};
+
+struct cg_proto {
+ struct page_counter memory_allocated; /* Current allocated memory. */
+ struct percpu_counter sockets_allocated; /* Current number of sockets. */
+ int memory_pressure;
+ long sysctl_mem[3];
+ unsigned long flags;
+ /*
+ * memcg field is used to find which memcg we belong directly
+ * Each memcg struct can hold more than one cg_proto, so container_of
+ * won't really cut.
+ *
+ * The elegant solution would be having an inverse function to
+ * proto_cgroup in struct proto, but that means polluting the structure
+ * for everybody, instead of just for memcg users.
+ */
+ struct mem_cgroup *memcg;
+};
+
#ifdef CONFIG_MEMCG
+struct mem_cgroup_stat_cpu {
+ long count[MEM_CGROUP_STAT_NSTATS];
+ unsigned long events[MEMCG_NR_EVENTS];
+ unsigned long nr_page_events;
+ unsigned long targets[MEM_CGROUP_NTARGETS];
+};
+
+struct mem_cgroup_reclaim_iter {
+ struct mem_cgroup *position;
+ /* scan generation, increased every round-trip */
+ unsigned int generation;
+};
+
+/*
+ * per-zone information in memory controller.
+ */
+struct mem_cgroup_per_zone {
+ struct lruvec lruvec;
+ unsigned long lru_size[NR_LRU_LISTS];
+
+ struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
+
+ struct rb_node tree_node; /* RB tree node */
+ unsigned long usage_in_excess;/* Set to the value by which */
+ /* the soft limit is exceeded*/
+ bool on_tree;
+ struct mem_cgroup *memcg; /* Back pointer, we cannot */
+ /* use container_of */
+};
+
+struct mem_cgroup_per_node {
+ struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
+};
+
+struct mem_cgroup_threshold {
+ struct eventfd_ctx *eventfd;
+ unsigned long threshold;
+};
+
+/* For threshold */
+struct mem_cgroup_threshold_ary {
+ /* An array index points to threshold just below or equal to usage. */
+ int current_threshold;
+ /* Size of entries[] */
+ unsigned int size;
+ /* Array of thresholds */
+ struct mem_cgroup_threshold entries[0];
+};
+
+struct mem_cgroup_thresholds {
+ /* Primary thresholds array */
+ struct mem_cgroup_threshold_ary *primary;
+ /*
+ * Spare threshold array.
+ * This is needed to make mem_cgroup_unregister_event() "never fail".
+ * It must be able to store at least primary->size - 1 entries.
+ */
+ struct mem_cgroup_threshold_ary *spare;
+};
+
+/*
+ * The memory controller data structure. The memory controller controls both
+ * page cache and RSS per cgroup. We would eventually like to provide
+ * statistics based on the statistics developed by Rik Van Riel for clock-pro,
+ * to help the administrator determine what knobs to tune.
+ */
+struct mem_cgroup {
+ struct cgroup_subsys_state css;
+
+ /* Accounted resources */
+ struct page_counter memory;
+ struct page_counter memsw;
+ struct page_counter kmem;
+
+ /* Normal memory consumption range */
+ unsigned long low;
+ unsigned long high;
+
+ unsigned long soft_limit;
+
+ /* vmpressure notifications */
+ struct vmpressure vmpressure;
+
+ /* css_online() has been completed */
+ int initialized;
+
+ /*
+ * Should the accounting and control be hierarchical, per subtree?
+ */
+ bool use_hierarchy;
+
+ /* protected by memcg_oom_lock */
+ bool oom_lock;
+ int under_oom;
+
+ int swappiness;
+ /* OOM-Killer disable */
+ int oom_kill_disable;
+
+ /* protect arrays of thresholds */
+ struct mutex thresholds_lock;
+
+ /* thresholds for memory usage. RCU-protected */
+ struct mem_cgroup_thresholds thresholds;
+
+ /* thresholds for mem+swap usage. RCU-protected */
+ struct mem_cgroup_thresholds memsw_thresholds;
+
+ /* For oom notifier event fd */
+ struct list_head oom_notify;
+
+ /*
+ * Should we move charges of a task when a task is moved into this
+ * mem_cgroup ? And what type of charges should we move ?
+ */
+ unsigned long move_charge_at_immigrate;
+ /*
+ * set > 0 if pages under this cgroup are moving to other cgroup.
+ */
+ atomic_t moving_account;
+ /* taken only while moving_account > 0 */
+ spinlock_t move_lock;
+ struct task_struct *move_lock_task;
+ unsigned long move_lock_flags;
+ /*
+ * percpu counter.
+ */
+ struct mem_cgroup_stat_cpu __percpu *stat;
+ spinlock_t pcp_counter_lock;
+
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
+ struct cg_proto tcp_mem;
+#endif
+#if defined(CONFIG_MEMCG_KMEM)
+ /* Index in the kmem_cache->memcg_params.memcg_caches array */
+ int kmemcg_id;
+ bool kmem_acct_activated;
+ bool kmem_acct_active;
+#endif
+
+ int last_scanned_node;
+#if MAX_NUMNODES > 1
+ nodemask_t scan_nodes;
+ atomic_t numainfo_events;
+ atomic_t numainfo_updating;
+#endif
+
+#ifdef CONFIG_CGROUP_WRITEBACK
+ struct list_head cgwb_list;
+ struct wb_domain cgwb_domain;
+#endif
+
+ /* List of events which userspace want to receive */
+ struct list_head event_list;
+ spinlock_t event_list_lock;
+
+ struct mem_cgroup_per_node *nodeinfo[0];
+ /* WARNING: nodeinfo must be the last member here */
+};
extern struct cgroup_subsys_state *mem_cgroup_root_css;
-void mem_cgroup_events(struct mem_cgroup *memcg,
+/**
+ * mem_cgroup_events - count memory events against a cgroup
+ * @memcg: the memory cgroup
+ * @idx: the event index
+ * @nr: the number of events to account for
+ */
+static inline void mem_cgroup_events(struct mem_cgroup *memcg,
enum mem_cgroup_events_index idx,
- unsigned int nr);
+ unsigned int nr)
+{
+ this_cpu_add(memcg->stat->events[idx], nr);
+}
bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
@@ -90,15 +304,29 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
-bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
- struct mem_cgroup *root);
bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
+struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
+struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
-extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
-extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
+static inline
+struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
+ return css ? container_of(css, struct mem_cgroup, css) : NULL;
+}
-extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
-extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
+struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
+ struct mem_cgroup *,
+ struct mem_cgroup_reclaim_cookie *);
+void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
+
+static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
+ struct mem_cgroup *root)
+{
+ if (root == memcg)
+ return true;
+ if (!root->use_hierarchy)
+ return false;
+ return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
+}
static inline bool mm_match_cgroup(struct mm_struct *mm,
struct mem_cgroup *memcg)
@@ -114,24 +342,68 @@ static inline bool mm_match_cgroup(struct mm_struct *mm,
return match;
}
-extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
-extern struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
+struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
+ino_t page_cgroup_ino(struct page *page);
-struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
- struct mem_cgroup *,
- struct mem_cgroup_reclaim_cookie *);
-void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
+static inline bool mem_cgroup_disabled(void)
+{
+ if (memory_cgrp_subsys.disabled)
+ return true;
+ return false;
+}
/*
* For memory reclaim.
*/
-int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
-bool mem_cgroup_lruvec_online(struct lruvec *lruvec);
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
-unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
-void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
-extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
- struct task_struct *p);
+
+void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
+ int nr_pages);
+
+static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec)
+{
+ struct mem_cgroup_per_zone *mz;
+ struct mem_cgroup *memcg;
+
+ if (mem_cgroup_disabled())
+ return true;
+
+ mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
+ memcg = mz->memcg;
+
+ return !!(memcg->css.flags & CSS_ONLINE);
+}
+
+static inline
+unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
+{
+ struct mem_cgroup_per_zone *mz;
+
+ mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
+ return mz->lru_size[lru];
+}
+
+static inline int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
+{
+ unsigned long inactive_ratio;
+ unsigned long inactive;
+ unsigned long active;
+ unsigned long gb;
+
+ inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
+ active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
+
+ gb = (inactive + active) >> (30 - PAGE_SHIFT);
+ if (gb)
+ inactive_ratio = int_sqrt(10 * gb);
+ else
+ inactive_ratio = 1;
+
+ return inactive * inactive_ratio < active;
+}
+
+void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
+ struct task_struct *p);
static inline void mem_cgroup_oom_enable(void)
{
@@ -156,18 +428,26 @@ bool mem_cgroup_oom_synchronize(bool wait);
extern int do_swap_account;
#endif
-static inline bool mem_cgroup_disabled(void)
-{
- if (memory_cgrp_subsys.disabled)
- return true;
- return false;
-}
-
struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page);
-void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
- enum mem_cgroup_stat_index idx, int val);
void mem_cgroup_end_page_stat(struct mem_cgroup *memcg);
+/**
+ * mem_cgroup_update_page_stat - update page state statistics
+ * @memcg: memcg to account against
+ * @idx: page state item to account
+ * @val: number of pages (positive or negative)
+ *
+ * See mem_cgroup_begin_page_stat() for locking requirements.
+ */
+static inline void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
+ enum mem_cgroup_stat_index idx, int val)
+{
+ VM_BUG_ON(!rcu_read_lock_held());
+
+ if (memcg)
+ this_cpu_add(memcg->stat->count[idx], val);
+}
+
static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
enum mem_cgroup_stat_index idx)
{
@@ -184,13 +464,31 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
gfp_t gfp_mask,
unsigned long *total_scanned);
-void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
enum vm_event_item idx)
{
+ struct mem_cgroup *memcg;
+
if (mem_cgroup_disabled())
return;
- __mem_cgroup_count_vm_event(mm, idx);
+
+ rcu_read_lock();
+ memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
+ if (unlikely(!memcg))
+ goto out;
+
+ switch (idx) {
+ case PGFAULT:
+ this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
+ break;
+ case PGMAJFAULT:
+ this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
+ break;
+ default:
+ BUG();
+ }
+out:
+ rcu_read_unlock();
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void mem_cgroup_split_huge_fixup(struct page *head);
@@ -199,8 +497,6 @@ void mem_cgroup_split_huge_fixup(struct page *head);
#else /* CONFIG_MEMCG */
struct mem_cgroup;
-#define mem_cgroup_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
-
static inline void mem_cgroup_events(struct mem_cgroup *memcg,
enum mem_cgroup_events_index idx,
unsigned int nr)
@@ -258,11 +554,6 @@ static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
return &zone->lruvec;
}
-static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
-{
- return NULL;
-}
-
static inline bool mm_match_cgroup(struct mm_struct *mm,
struct mem_cgroup *memcg)
{
@@ -275,12 +566,6 @@ static inline bool task_in_mem_cgroup(struct task_struct *task,
return true;
}
-static inline struct cgroup_subsys_state
- *mem_cgroup_css(struct mem_cgroup *memcg)
-{
- return NULL;
-}
-
static inline struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup *root,
struct mem_cgroup *prev,
@@ -428,8 +713,8 @@ static inline void sock_release_memcg(struct sock *sk)
extern struct static_key memcg_kmem_enabled_key;
extern int memcg_nr_cache_ids;
-extern void memcg_get_cache_ids(void);
-extern void memcg_put_cache_ids(void);
+void memcg_get_cache_ids(void);
+void memcg_put_cache_ids(void);
/*
* Helper macro to loop through all memcg-specific caches. Callers must still
@@ -444,7 +729,10 @@ static inline bool memcg_kmem_enabled(void)
return static_key_false(&memcg_kmem_enabled_key);
}
-bool memcg_kmem_is_active(struct mem_cgroup *memcg);
+static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg)
+{
+ return memcg->kmem_acct_active;
+}
/*
* In general, we'll do everything in our power to not incur in any overhead
@@ -463,7 +751,15 @@ void __memcg_kmem_commit_charge(struct page *page,
struct mem_cgroup *memcg, int order);
void __memcg_kmem_uncharge_pages(struct page *page, int order);
-int memcg_cache_id(struct mem_cgroup *memcg);
+/*
+ * helper for acessing a memcg's index. It will be used as an index in the
+ * child cache array in kmem_cache, and also to derive its name. This function
+ * will return -1 when this is not a kmem-limited memcg.
+ */
+static inline int memcg_cache_id(struct mem_cgroup *memcg)
+{
+ return memcg ? memcg->kmemcg_id : -1;
+}
struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep);
void __memcg_kmem_put_cache(struct kmem_cache *cachep);
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 6ffa0ac7f7d6..8f60e899b33c 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -266,8 +266,9 @@ static inline void remove_memory(int nid, u64 start, u64 size) {}
extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
void *arg, int (*func)(struct memory_block *, void *));
extern int add_memory(int nid, u64 start, u64 size);
-extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default);
-extern int arch_add_memory(int nid, u64 start, u64 size);
+extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default,
+ bool for_device);
+extern int arch_add_memory(int nid, u64 start, u64 size, bool for_device);
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
extern bool is_memblock_offlined(struct memory_block *mem);
extern void remove_memory(int nid, u64 start, u64 size);
diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h
index 97cb283cc8e1..8fcad63fab55 100644
--- a/include/linux/mfd/88pm80x.h
+++ b/include/linux/mfd/88pm80x.h
@@ -60,60 +60,60 @@ enum {
/* page 0 basic: slave adder 0x60 */
#define PM800_STATUS_1 (0x01)
-#define PM800_ONKEY_STS1 (1 << 0)
-#define PM800_EXTON_STS1 (1 << 1)
-#define PM800_CHG_STS1 (1 << 2)
-#define PM800_BAT_STS1 (1 << 3)
-#define PM800_VBUS_STS1 (1 << 4)
-#define PM800_LDO_PGOOD_STS1 (1 << 5)
-#define PM800_BUCK_PGOOD_STS1 (1 << 6)
+#define PM800_ONKEY_STS1 BIT(0)
+#define PM800_EXTON_STS1 BIT(1)
+#define PM800_CHG_STS1 BIT(2)
+#define PM800_BAT_STS1 BIT(3)
+#define PM800_VBUS_STS1 BIT(4)
+#define PM800_LDO_PGOOD_STS1 BIT(5)
+#define PM800_BUCK_PGOOD_STS1 BIT(6)
#define PM800_STATUS_2 (0x02)
-#define PM800_RTC_ALARM_STS2 (1 << 0)
+#define PM800_RTC_ALARM_STS2 BIT(0)
/* Wakeup Registers */
-#define PM800_WAKEUP1 (0x0D)
+#define PM800_WAKEUP1 (0x0D)
-#define PM800_WAKEUP2 (0x0E)
-#define PM800_WAKEUP2_INV_INT (1 << 0)
-#define PM800_WAKEUP2_INT_CLEAR (1 << 1)
-#define PM800_WAKEUP2_INT_MASK (1 << 2)
+#define PM800_WAKEUP2 (0x0E)
+#define PM800_WAKEUP2_INV_INT BIT(0)
+#define PM800_WAKEUP2_INT_CLEAR BIT(1)
+#define PM800_WAKEUP2_INT_MASK BIT(2)
-#define PM800_POWER_UP_LOG (0x10)
+#define PM800_POWER_UP_LOG (0x10)
/* Referance and low power registers */
#define PM800_LOW_POWER1 (0x20)
#define PM800_LOW_POWER2 (0x21)
-#define PM800_LOW_POWER_CONFIG3 (0x22)
-#define PM800_LOW_POWER_CONFIG4 (0x23)
+#define PM800_LOW_POWER_CONFIG3 (0x22)
+#define PM800_LOW_POWER_CONFIG4 (0x23)
/* GPIO register */
#define PM800_GPIO_0_1_CNTRL (0x30)
-#define PM800_GPIO0_VAL (1 << 0)
+#define PM800_GPIO0_VAL BIT(0)
#define PM800_GPIO0_GPIO_MODE(x) (x << 1)
-#define PM800_GPIO1_VAL (1 << 4)
+#define PM800_GPIO1_VAL BIT(4)
#define PM800_GPIO1_GPIO_MODE(x) (x << 5)
#define PM800_GPIO_2_3_CNTRL (0x31)
-#define PM800_GPIO2_VAL (1 << 0)
+#define PM800_GPIO2_VAL BIT(0)
#define PM800_GPIO2_GPIO_MODE(x) (x << 1)
-#define PM800_GPIO3_VAL (1 << 4)
+#define PM800_GPIO3_VAL BIT(4)
#define PM800_GPIO3_GPIO_MODE(x) (x << 5)
#define PM800_GPIO3_MODE_MASK 0x1F
#define PM800_GPIO3_HEADSET_MODE PM800_GPIO3_GPIO_MODE(6)
-#define PM800_GPIO_4_CNTRL (0x32)
-#define PM800_GPIO4_VAL (1 << 0)
+#define PM800_GPIO_4_CNTRL (0x32)
+#define PM800_GPIO4_VAL BIT(0)
#define PM800_GPIO4_GPIO_MODE(x) (x << 1)
#define PM800_HEADSET_CNTRL (0x38)
-#define PM800_HEADSET_DET_EN (1 << 7)
-#define PM800_HSDET_SLP (1 << 1)
+#define PM800_HEADSET_DET_EN BIT(7)
+#define PM800_HSDET_SLP BIT(1)
/* PWM register */
-#define PM800_PWM1 (0x40)
-#define PM800_PWM2 (0x41)
-#define PM800_PWM3 (0x42)
-#define PM800_PWM4 (0x43)
+#define PM800_PWM1 (0x40)
+#define PM800_PWM2 (0x41)
+#define PM800_PWM3 (0x42)
+#define PM800_PWM4 (0x43)
/* RTC Registers */
#define PM800_RTC_CONTROL (0xD0)
@@ -123,55 +123,55 @@ enum {
#define PM800_RTC_MISC4 (0xE4)
#define PM800_RTC_MISC5 (0xE7)
/* bit definitions of RTC Register 1 (0xD0) */
-#define PM800_ALARM1_EN (1 << 0)
-#define PM800_ALARM_WAKEUP (1 << 4)
-#define PM800_ALARM (1 << 5)
-#define PM800_RTC1_USE_XO (1 << 7)
+#define PM800_ALARM1_EN BIT(0)
+#define PM800_ALARM_WAKEUP BIT(4)
+#define PM800_ALARM BIT(5)
+#define PM800_RTC1_USE_XO BIT(7)
/* Regulator Control Registers: BUCK1,BUCK5,LDO1 have DVC */
/* buck registers */
-#define PM800_SLEEP_BUCK1 (0x30)
+#define PM800_SLEEP_BUCK1 (0x30)
/* BUCK Sleep Mode Register 1: BUCK[1..4] */
-#define PM800_BUCK_SLP1 (0x5A)
-#define PM800_BUCK1_SLP1_SHIFT 0
-#define PM800_BUCK1_SLP1_MASK (0x3 << PM800_BUCK1_SLP1_SHIFT)
+#define PM800_BUCK_SLP1 (0x5A)
+#define PM800_BUCK1_SLP1_SHIFT 0
+#define PM800_BUCK1_SLP1_MASK (0x3 << PM800_BUCK1_SLP1_SHIFT)
/* page 2 GPADC: slave adder 0x02 */
#define PM800_GPADC_MEAS_EN1 (0x01)
-#define PM800_MEAS_EN1_VBAT (1 << 2)
+#define PM800_MEAS_EN1_VBAT BIT(2)
#define PM800_GPADC_MEAS_EN2 (0x02)
-#define PM800_MEAS_EN2_RFTMP (1 << 0)
-#define PM800_MEAS_GP0_EN (1 << 2)
-#define PM800_MEAS_GP1_EN (1 << 3)
-#define PM800_MEAS_GP2_EN (1 << 4)
-#define PM800_MEAS_GP3_EN (1 << 5)
-#define PM800_MEAS_GP4_EN (1 << 6)
+#define PM800_MEAS_EN2_RFTMP BIT(0)
+#define PM800_MEAS_GP0_EN BIT(2)
+#define PM800_MEAS_GP1_EN BIT(3)
+#define PM800_MEAS_GP2_EN BIT(4)
+#define PM800_MEAS_GP3_EN BIT(5)
+#define PM800_MEAS_GP4_EN BIT(6)
#define PM800_GPADC_MISC_CONFIG1 (0x05)
#define PM800_GPADC_MISC_CONFIG2 (0x06)
-#define PM800_GPADC_MISC_GPFSM_EN (1 << 0)
+#define PM800_GPADC_MISC_GPFSM_EN BIT(0)
#define PM800_GPADC_SLOW_MODE(x) (x << 3)
-#define PM800_GPADC_MISC_CONFIG3 (0x09)
-#define PM800_GPADC_MISC_CONFIG4 (0x0A)
+#define PM800_GPADC_MISC_CONFIG3 (0x09)
+#define PM800_GPADC_MISC_CONFIG4 (0x0A)
-#define PM800_GPADC_PREBIAS1 (0x0F)
+#define PM800_GPADC_PREBIAS1 (0x0F)
#define PM800_GPADC0_GP_PREBIAS_TIME(x) (x << 0)
-#define PM800_GPADC_PREBIAS2 (0x10)
+#define PM800_GPADC_PREBIAS2 (0x10)
-#define PM800_GP_BIAS_ENA1 (0x14)
-#define PM800_GPADC_GP_BIAS_EN0 (1 << 0)
-#define PM800_GPADC_GP_BIAS_EN1 (1 << 1)
-#define PM800_GPADC_GP_BIAS_EN2 (1 << 2)
-#define PM800_GPADC_GP_BIAS_EN3 (1 << 3)
+#define PM800_GP_BIAS_ENA1 (0x14)
+#define PM800_GPADC_GP_BIAS_EN0 BIT(0)
+#define PM800_GPADC_GP_BIAS_EN1 BIT(1)
+#define PM800_GPADC_GP_BIAS_EN2 BIT(2)
+#define PM800_GPADC_GP_BIAS_EN3 BIT(3)
#define PM800_GP_BIAS_OUT1 (0x15)
-#define PM800_BIAS_OUT_GP0 (1 << 0)
-#define PM800_BIAS_OUT_GP1 (1 << 1)
-#define PM800_BIAS_OUT_GP2 (1 << 2)
-#define PM800_BIAS_OUT_GP3 (1 << 3)
+#define PM800_BIAS_OUT_GP0 BIT(0)
+#define PM800_BIAS_OUT_GP1 BIT(1)
+#define PM800_BIAS_OUT_GP2 BIT(2)
+#define PM800_BIAS_OUT_GP3 BIT(3)
#define PM800_GPADC0_LOW_TH 0x20
#define PM800_GPADC1_LOW_TH 0x21
@@ -222,37 +222,37 @@ enum {
#define PM805_INT_STATUS1 (0x03)
-#define PM805_INT1_HP1_SHRT (1 << 0)
-#define PM805_INT1_HP2_SHRT (1 << 1)
-#define PM805_INT1_MIC_CONFLICT (1 << 2)
-#define PM805_INT1_CLIP_FAULT (1 << 3)
-#define PM805_INT1_LDO_OFF (1 << 4)
-#define PM805_INT1_SRC_DPLL_LOCK (1 << 5)
+#define PM805_INT1_HP1_SHRT BIT(0)
+#define PM805_INT1_HP2_SHRT BIT(1)
+#define PM805_INT1_MIC_CONFLICT BIT(2)
+#define PM805_INT1_CLIP_FAULT BIT(3)
+#define PM805_INT1_LDO_OFF BIT(4)
+#define PM805_INT1_SRC_DPLL_LOCK BIT(5)
#define PM805_INT_STATUS2 (0x04)
-#define PM805_INT2_MIC_DET (1 << 0)
-#define PM805_INT2_SHRT_BTN_DET (1 << 1)
-#define PM805_INT2_VOLM_BTN_DET (1 << 2)
-#define PM805_INT2_VOLP_BTN_DET (1 << 3)
-#define PM805_INT2_RAW_PLL_FAULT (1 << 4)
-#define PM805_INT2_FINE_PLL_FAULT (1 << 5)
+#define PM805_INT2_MIC_DET BIT(0)
+#define PM805_INT2_SHRT_BTN_DET BIT(1)
+#define PM805_INT2_VOLM_BTN_DET BIT(2)
+#define PM805_INT2_VOLP_BTN_DET BIT(3)
+#define PM805_INT2_RAW_PLL_FAULT BIT(4)
+#define PM805_INT2_FINE_PLL_FAULT BIT(5)
#define PM805_INT_MASK1 (0x05)
#define PM805_INT_MASK2 (0x06)
-#define PM805_SHRT_BTN_DET (1 << 1)
+#define PM805_SHRT_BTN_DET BIT(1)
/* number of status and int reg in a row */
#define PM805_INT_REG_NUM (2)
#define PM805_MIC_DET1 (0x07)
-#define PM805_MIC_DET_EN_MIC_DET (1 << 0)
+#define PM805_MIC_DET_EN_MIC_DET BIT(0)
#define PM805_MIC_DET2 (0x08)
-#define PM805_MIC_DET_STATUS1 (0x09)
+#define PM805_MIC_DET_STATUS1 (0x09)
-#define PM805_MIC_DET_STATUS3 (0x0A)
-#define PM805_AUTO_SEQ_STATUS1 (0x0B)
-#define PM805_AUTO_SEQ_STATUS2 (0x0C)
+#define PM805_MIC_DET_STATUS3 (0x0A)
+#define PM805_AUTO_SEQ_STATUS1 (0x0B)
+#define PM805_AUTO_SEQ_STATUS2 (0x0C)
#define PM805_ADC_SETTING1 (0x10)
#define PM805_ADC_SETTING2 (0x11)
@@ -261,7 +261,7 @@ enum {
#define PM805_ADC_GAIN2 (0x13)
#define PM805_DMIC_SETTING (0x15)
#define PM805_DWS_SETTING (0x16)
-#define PM805_MIC_CONFLICT_STS (0x17)
+#define PM805_MIC_CONFLICT_STS (0x17)
#define PM805_PDM_SETTING1 (0x20)
#define PM805_PDM_SETTING2 (0x21)
@@ -270,11 +270,11 @@ enum {
#define PM805_PDM_CONTROL2 (0x24)
#define PM805_PDM_CONTROL3 (0x25)
-#define PM805_HEADPHONE_SETTING (0x26)
-#define PM805_HEADPHONE_GAIN_A2A (0x27)
-#define PM805_HEADPHONE_SHORT_STATE (0x28)
-#define PM805_EARPHONE_SETTING (0x29)
-#define PM805_AUTO_SEQ_SETTING (0x2A)
+#define PM805_HEADPHONE_SETTING (0x26)
+#define PM805_HEADPHONE_GAIN_A2A (0x27)
+#define PM805_HEADPHONE_SHORT_STATE (0x28)
+#define PM805_EARPHONE_SETTING (0x29)
+#define PM805_AUTO_SEQ_SETTING (0x2A)
struct pm80x_rtc_pdata {
int vrtc;
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index 2f434f4f79a1..79e607e2f081 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -25,6 +25,8 @@ enum arizona_type {
WM5110 = 2,
WM8997 = 3,
WM8280 = 4,
+ WM8998 = 5,
+ WM1814 = 6,
};
#define ARIZONA_IRQ_GP1 0
@@ -165,6 +167,7 @@ static inline int wm5102_patch(struct arizona *arizona)
int wm5110_patch(struct arizona *arizona);
int wm8997_patch(struct arizona *arizona);
+int wm8998_patch(struct arizona *arizona);
extern int arizona_of_get_named_gpio(struct arizona *arizona, const char *prop,
bool mandatory);
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index 43db4faad143..1dc385850ba2 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -101,7 +101,7 @@ struct arizona_pdata {
* useful for systems where and I2S bus with multiple data
* lines is mastered.
*/
- int max_channels_clocked[ARIZONA_MAX_AIF];
+ unsigned int max_channels_clocked[ARIZONA_MAX_AIF];
/** GPIO5 is used for jack detection */
bool jd_gpio5;
@@ -125,22 +125,22 @@ struct arizona_pdata {
unsigned int hpdet_channel;
/** Extra debounce timeout used during initial mic detection (ms) */
- int micd_detect_debounce;
+ unsigned int micd_detect_debounce;
/** GPIO for mic detection polarity */
int micd_pol_gpio;
/** Mic detect ramp rate */
- int micd_bias_start_time;
+ unsigned int micd_bias_start_time;
/** Mic detect sample rate */
- int micd_rate;
+ unsigned int micd_rate;
/** Mic detect debounce level */
- int micd_dbtime;
+ unsigned int micd_dbtime;
/** Mic detect timeout (ms) */
- int micd_timeout;
+ unsigned int micd_timeout;
/** Force MICBIAS on for mic detect */
bool micd_force_micbias;
@@ -162,6 +162,8 @@ struct arizona_pdata {
/**
* Mode of input structures
* One of the ARIZONA_INMODE_xxx values
+ * wm5102/wm5110/wm8280/wm8997: [0]=IN1 [1]=IN2 [2]=IN3 [3]=IN4
+ * wm8998: [0]=IN1A [1]=IN2A [2]=IN1B [3]=IN2B
*/
int inmode[ARIZONA_MAX_INPUT];
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index 3499d36e6067..fdd70b3c7418 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -39,6 +39,7 @@
#define ARIZONA_PWM_DRIVE_3 0x32
#define ARIZONA_WAKE_CONTROL 0x40
#define ARIZONA_SEQUENCE_CONTROL 0x41
+#define ARIZONA_SPARE_TRIGGERS 0x42
#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1 0x61
#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2 0x62
#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3 0x63
@@ -139,6 +140,7 @@
#define ARIZONA_MIC_DETECT_LEVEL_2 0x2A7
#define ARIZONA_MIC_DETECT_LEVEL_3 0x2A8
#define ARIZONA_MIC_DETECT_LEVEL_4 0x2A9
+#define ARIZONA_MIC_DETECT_4 0x2AB
#define ARIZONA_MIC_NOISE_MIX_CONTROL_1 0x2C3
#define ARIZONA_ISOLATION_CONTROL 0x2CB
#define ARIZONA_JACK_DETECT_ANALOGUE 0x2D3
@@ -225,14 +227,18 @@
#define ARIZONA_DAC_VOLUME_LIMIT_6R 0x43E
#define ARIZONA_NOISE_GATE_SELECT_6R 0x43F
#define ARIZONA_DRE_ENABLE 0x440
+#define ARIZONA_DRE_CONTROL_1 0x441
#define ARIZONA_DRE_CONTROL_2 0x442
#define ARIZONA_DRE_CONTROL_3 0x443
+#define ARIZONA_EDRE_ENABLE 0x448
#define ARIZONA_DAC_AEC_CONTROL_1 0x450
+#define ARIZONA_DAC_AEC_CONTROL_2 0x451
#define ARIZONA_NOISE_GATE_CONTROL 0x458
#define ARIZONA_PDM_SPK1_CTRL_1 0x490
#define ARIZONA_PDM_SPK1_CTRL_2 0x491
#define ARIZONA_PDM_SPK2_CTRL_1 0x492
#define ARIZONA_PDM_SPK2_CTRL_2 0x493
+#define ARIZONA_HP_TEST_CTRL_13 0x49A
#define ARIZONA_HP1_SHORT_CIRCUIT_CTRL 0x4A0
#define ARIZONA_HP2_SHORT_CIRCUIT_CTRL 0x4A1
#define ARIZONA_HP3_SHORT_CIRCUIT_CTRL 0x4A2
@@ -310,6 +316,10 @@
#define ARIZONA_AIF3_TX_ENABLES 0x599
#define ARIZONA_AIF3_RX_ENABLES 0x59A
#define ARIZONA_AIF3_FORCE_WRITE 0x59B
+#define ARIZONA_SPD1_TX_CONTROL 0x5C2
+#define ARIZONA_SPD1_TX_CHANNEL_STATUS_1 0x5C3
+#define ARIZONA_SPD1_TX_CHANNEL_STATUS_2 0x5C4
+#define ARIZONA_SPD1_TX_CHANNEL_STATUS_3 0x5C5
#define ARIZONA_SLIMBUS_FRAMER_REF_GEAR 0x5E3
#define ARIZONA_SLIMBUS_RATES_1 0x5E5
#define ARIZONA_SLIMBUS_RATES_2 0x5E6
@@ -643,6 +653,10 @@
#define ARIZONA_SLIMTX8MIX_INPUT_3_VOLUME 0x7FD
#define ARIZONA_SLIMTX8MIX_INPUT_4_SOURCE 0x7FE
#define ARIZONA_SLIMTX8MIX_INPUT_4_VOLUME 0x7FF
+#define ARIZONA_SPDIFTX1MIX_INPUT_1_SOURCE 0x800
+#define ARIZONA_SPDIFTX1MIX_INPUT_1_VOLUME 0x801
+#define ARIZONA_SPDIFTX2MIX_INPUT_1_SOURCE 0x808
+#define ARIZONA_SPDIFTX2MIX_INPUT_1_VOLUME 0x809
#define ARIZONA_EQ1MIX_INPUT_1_SOURCE 0x880
#define ARIZONA_EQ1MIX_INPUT_1_VOLUME 0x881
#define ARIZONA_EQ1MIX_INPUT_2_SOURCE 0x882
@@ -868,6 +882,7 @@
#define ARIZONA_GPIO5_CTRL 0xC04
#define ARIZONA_IRQ_CTRL_1 0xC0F
#define ARIZONA_GPIO_DEBOUNCE_CONFIG 0xC10
+#define ARIZONA_GP_SWITCH_1 0xC18
#define ARIZONA_MISC_PAD_CTRL_1 0xC20
#define ARIZONA_MISC_PAD_CTRL_2 0xC21
#define ARIZONA_MISC_PAD_CTRL_3 0xC22
@@ -1169,6 +1184,13 @@
#define ARIZONA_DSP4_SCRATCH_1 0x1441
#define ARIZONA_DSP4_SCRATCH_2 0x1442
#define ARIZONA_DSP4_SCRATCH_3 0x1443
+#define ARIZONA_FRF_COEFF_1 0x1700
+#define ARIZONA_FRF_COEFF_2 0x1701
+#define ARIZONA_FRF_COEFF_3 0x1702
+#define ARIZONA_FRF_COEFF_4 0x1703
+#define ARIZONA_V2_DAC_COMP_1 0x1704
+#define ARIZONA_V2_DAC_COMP_2 0x1705
+
/*
* Field Definitions.
@@ -1431,6 +1453,42 @@
#define ARIZONA_WSEQ_ENA_JD2_RISE_WIDTH 1 /* WSEQ_ENA_JD2_RISE */
/*
+ * R66 (0x42) - Spare Triggers
+ */
+#define ARIZONA_WS_TRG8 0x0080 /* WS_TRG8 */
+#define ARIZONA_WS_TRG8_MASK 0x0080 /* WS_TRG8 */
+#define ARIZONA_WS_TRG8_SHIFT 7 /* WS_TRG8 */
+#define ARIZONA_WS_TRG8_WIDTH 1 /* WS_TRG8 */
+#define ARIZONA_WS_TRG7 0x0040 /* WS_TRG7 */
+#define ARIZONA_WS_TRG7_MASK 0x0040 /* WS_TRG7 */
+#define ARIZONA_WS_TRG7_SHIFT 6 /* WS_TRG7 */
+#define ARIZONA_WS_TRG7_WIDTH 1 /* WS_TRG7 */
+#define ARIZONA_WS_TRG6 0x0020 /* WS_TRG6 */
+#define ARIZONA_WS_TRG6_MASK 0x0020 /* WS_TRG6 */
+#define ARIZONA_WS_TRG6_SHIFT 5 /* WS_TRG6 */
+#define ARIZONA_WS_TRG6_WIDTH 1 /* WS_TRG6 */
+#define ARIZONA_WS_TRG5 0x0010 /* WS_TRG5 */
+#define ARIZONA_WS_TRG5_MASK 0x0010 /* WS_TRG5 */
+#define ARIZONA_WS_TRG5_SHIFT 4 /* WS_TRG5 */
+#define ARIZONA_WS_TRG5_WIDTH 1 /* WS_TRG5 */
+#define ARIZONA_WS_TRG4 0x0008 /* WS_TRG4 */
+#define ARIZONA_WS_TRG4_MASK 0x0008 /* WS_TRG4 */
+#define ARIZONA_WS_TRG4_SHIFT 3 /* WS_TRG4 */
+#define ARIZONA_WS_TRG4_WIDTH 1 /* WS_TRG4 */
+#define ARIZONA_WS_TRG3 0x0004 /* WS_TRG3 */
+#define ARIZONA_WS_TRG3_MASK 0x0004 /* WS_TRG3 */
+#define ARIZONA_WS_TRG3_SHIFT 2 /* WS_TRG3 */
+#define ARIZONA_WS_TRG3_WIDTH 1 /* WS_TRG3 */
+#define ARIZONA_WS_TRG2 0x0002 /* WS_TRG2 */
+#define ARIZONA_WS_TRG2_MASK 0x0002 /* WS_TRG2 */
+#define ARIZONA_WS_TRG2_SHIFT 1 /* WS_TRG2 */
+#define ARIZONA_WS_TRG2_WIDTH 1 /* WS_TRG2 */
+#define ARIZONA_WS_TRG1 0x0001 /* WS_TRG1 */
+#define ARIZONA_WS_TRG1_MASK 0x0001 /* WS_TRG1 */
+#define ARIZONA_WS_TRG1_SHIFT 0 /* WS_TRG1 */
+#define ARIZONA_WS_TRG1_WIDTH 1 /* WS_TRG1 */
+
+/*
* R97 (0x61) - Sample Rate Sequence Select 1
*/
#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR_MASK 0x01FF /* WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR - [8:0] */
@@ -2325,6 +2383,9 @@
#define ARIZONA_HP_IDAC_STEER_MASK 0x0004 /* HP_IDAC_STEER */
#define ARIZONA_HP_IDAC_STEER_SHIFT 2 /* HP_IDAC_STEER */
#define ARIZONA_HP_IDAC_STEER_WIDTH 1 /* HP_IDAC_STEER */
+#define WM8998_HP_RATE_MASK 0x0006 /* HP_RATE - [2:1] */
+#define WM8998_HP_RATE_SHIFT 1 /* HP_RATE - [2:1] */
+#define WM8998_HP_RATE_WIDTH 2 /* HP_RATE - [2:1] */
#define ARIZONA_HP_RATE 0x0002 /* HP_RATE */
#define ARIZONA_HP_RATE_MASK 0x0002 /* HP_RATE */
#define ARIZONA_HP_RATE_SHIFT 1 /* HP_RATE */
@@ -2413,6 +2474,16 @@
#define ARIZONA_MICD_STS_WIDTH 1 /* MICD_STS */
/*
+ * R683 (0x2AB) - Mic Detect 4
+ */
+#define ARIZONA_MICDET_ADCVAL_DIFF_MASK 0xFF00 /* MICDET_ADCVAL_DIFF - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_DIFF_SHIFT 8 /* MICDET_ADCVAL_DIFF - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_DIFF_WIDTH 8 /* MICDET_ADCVAL_DIFF - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_MASK 0x007F /* MICDET_ADCVAL - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_SHIFT 0 /* MICDET_ADCVAL - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_WIDTH 7 /* MICDET_ADCVAL - [15:8] */
+
+/*
* R707 (0x2C3) - Mic noise mix control 1
*/
#define ARIZONA_MICMUTE_RATE_MASK 0x7800 /* MICMUTE_RATE - [14:11] */
@@ -2528,6 +2599,12 @@
/*
* R785 (0x311) - ADC Digital Volume 1L
*/
+#define ARIZONA_IN1L_SRC_MASK 0x4000 /* IN1L_SRC - [14] */
+#define ARIZONA_IN1L_SRC_SHIFT 14 /* IN1L_SRC - [14] */
+#define ARIZONA_IN1L_SRC_WIDTH 1 /* IN1L_SRC - [14] */
+#define ARIZONA_IN1L_SRC_SE_MASK 0x2000 /* IN1L_SRC - [13] */
+#define ARIZONA_IN1L_SRC_SE_SHIFT 13 /* IN1L_SRC - [13] */
+#define ARIZONA_IN1L_SRC_SE_WIDTH 1 /* IN1L_SRC - [13] */
#define ARIZONA_IN_VU 0x0200 /* IN_VU */
#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
@@ -2560,6 +2637,12 @@
/*
* R789 (0x315) - ADC Digital Volume 1R
*/
+#define ARIZONA_IN1R_SRC_MASK 0x4000 /* IN1R_SRC - [14] */
+#define ARIZONA_IN1R_SRC_SHIFT 14 /* IN1R_SRC - [14] */
+#define ARIZONA_IN1R_SRC_WIDTH 1 /* IN1R_SRC - [14] */
+#define ARIZONA_IN1R_SRC_SE_MASK 0x2000 /* IN1R_SRC - [13] */
+#define ARIZONA_IN1R_SRC_SE_SHIFT 13 /* IN1R_SRC - [13] */
+#define ARIZONA_IN1R_SRC_SE_WIDTH 1 /* IN1R_SRC - [13] */
#define ARIZONA_IN_VU 0x0200 /* IN_VU */
#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
@@ -2604,6 +2687,12 @@
/*
* R793 (0x319) - ADC Digital Volume 2L
*/
+#define ARIZONA_IN2L_SRC_MASK 0x4000 /* IN2L_SRC - [14] */
+#define ARIZONA_IN2L_SRC_SHIFT 14 /* IN2L_SRC - [14] */
+#define ARIZONA_IN2L_SRC_WIDTH 1 /* IN2L_SRC - [14] */
+#define ARIZONA_IN2L_SRC_SE_MASK 0x2000 /* IN2L_SRC - [13] */
+#define ARIZONA_IN2L_SRC_SE_SHIFT 13 /* IN2L_SRC - [13] */
+#define ARIZONA_IN2L_SRC_SE_WIDTH 1 /* IN2L_SRC - [13] */
#define ARIZONA_IN_VU 0x0200 /* IN_VU */
#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
@@ -3412,11 +3501,45 @@
#define ARIZONA_DRE1L_ENA_WIDTH 1 /* DRE1L_ENA */
/*
+ * R1088 (0x440) - DRE Enable (WM8998)
+ */
+#define WM8998_DRE3L_ENA 0x0020 /* DRE3L_ENA */
+#define WM8998_DRE3L_ENA_MASK 0x0020 /* DRE3L_ENA */
+#define WM8998_DRE3L_ENA_SHIFT 5 /* DRE3L_ENA */
+#define WM8998_DRE3L_ENA_WIDTH 1 /* DRE3L_ENA */
+#define WM8998_DRE2L_ENA 0x0008 /* DRE2L_ENA */
+#define WM8998_DRE2L_ENA_MASK 0x0008 /* DRE2L_ENA */
+#define WM8998_DRE2L_ENA_SHIFT 3 /* DRE2L_ENA */
+#define WM8998_DRE2L_ENA_WIDTH 1 /* DRE2L_ENA */
+#define WM8998_DRE2R_ENA 0x0004 /* DRE2R_ENA */
+#define WM8998_DRE2R_ENA_MASK 0x0004 /* DRE2R_ENA */
+#define WM8998_DRE2R_ENA_SHIFT 2 /* DRE2R_ENA */
+#define WM8998_DRE2R_ENA_WIDTH 1 /* DRE2R_ENA */
+#define WM8998_DRE1L_ENA 0x0002 /* DRE1L_ENA */
+#define WM8998_DRE1L_ENA_MASK 0x0002 /* DRE1L_ENA */
+#define WM8998_DRE1L_ENA_SHIFT 1 /* DRE1L_ENA */
+#define WM8998_DRE1L_ENA_WIDTH 1 /* DRE1L_ENA */
+#define WM8998_DRE1R_ENA 0x0001 /* DRE1R_ENA */
+#define WM8998_DRE1R_ENA_MASK 0x0001 /* DRE1R_ENA */
+#define WM8998_DRE1R_ENA_SHIFT 0 /* DRE1R_ENA */
+#define WM8998_DRE1R_ENA_WIDTH 1 /* DRE1R_ENA */
+
+/*
+ * R1089 (0x441) - DRE Control 1
+ */
+#define ARIZONA_DRE_ENV_TC_FAST_MASK 0x0F00 /* DRE_ENV_TC_FAST - [11:8] */
+#define ARIZONA_DRE_ENV_TC_FAST_SHIFT 8 /* DRE_ENV_TC_FAST - [11:8] */
+#define ARIZONA_DRE_ENV_TC_FAST_WIDTH 4 /* DRE_ENV_TC_FAST - [11:8] */
+
+/*
* R1090 (0x442) - DRE Control 2
*/
#define ARIZONA_DRE_T_LOW_MASK 0x3F00 /* DRE_T_LOW - [13:8] */
#define ARIZONA_DRE_T_LOW_SHIFT 8 /* DRE_T_LOW - [13:8] */
#define ARIZONA_DRE_T_LOW_WIDTH 6 /* DRE_T_LOW - [13:8] */
+#define ARIZONA_DRE_ALOG_VOL_DELAY_MASK 0x000F /* DRE_ALOG_VOL_DELAY - [3:0] */
+#define ARIZONA_DRE_ALOG_VOL_DELAY_SHIFT 0 /* DRE_ALOG_VOL_DELAY - [3:0] */
+#define ARIZONA_DRE_ALOG_VOL_DELAY_WIDTH 4 /* DRE_ALOG_VOL_DELAY - [3:0] */
/*
* R1091 (0x443) - DRE Control 3
@@ -3428,6 +3551,49 @@
#define ARIZONA_DRE_LOW_LEVEL_ABS_SHIFT 0 /* LOW_LEVEL_ABS - [3:0] */
#define ARIZONA_DRE_LOW_LEVEL_ABS_WIDTH 4 /* LOW_LEVEL_ABS - [3:0] */
+/* R486 (0x448) - EDRE_Enable
+ */
+#define ARIZONA_EDRE_OUT4L_THR2_ENA 0x0200 /* EDRE_OUT4L_THR2_ENA */
+#define ARIZONA_EDRE_OUT4L_THR2_ENA_MASK 0x0200 /* EDRE_OUT4L_THR2_ENA */
+#define ARIZONA_EDRE_OUT4L_THR2_ENA_SHIFT 9 /* EDRE_OUT4L_THR2_ENA */
+#define ARIZONA_EDRE_OUT4L_THR2_ENA_WIDTH 1 /* EDRE_OUT4L_THR2_ENA */
+#define ARIZONA_EDRE_OUT4R_THR2_ENA 0x0100 /* EDRE_OUT4R_THR2_ENA */
+#define ARIZONA_EDRE_OUT4R_THR2_ENA_MASK 0x0100 /* EDRE_OUT4R_THR2_ENA */
+#define ARIZONA_EDRE_OUT4R_THR2_ENA_SHIFT 8 /* EDRE_OUT4R_THR2_ENA */
+#define ARIZONA_EDRE_OUT4R_THR2_ENA_WIDTH 1 /* EDRE_OUT4R_THR2_ENA */
+#define ARIZONA_EDRE_OUT4L_THR1_ENA 0x0080 /* EDRE_OUT4L_THR1_ENA */
+#define ARIZONA_EDRE_OUT4L_THR1_ENA_MASK 0x0080 /* EDRE_OUT4L_THR1_ENA */
+#define ARIZONA_EDRE_OUT4L_THR1_ENA_SHIFT 7 /* EDRE_OUT4L_THR1_ENA */
+#define ARIZONA_EDRE_OUT4L_THR1_ENA_WIDTH 1 /* EDRE_OUT4L_THR1_ENA */
+#define ARIZONA_EDRE_OUT4R_THR1_ENA 0x0040 /* EDRE_OUT4R_THR1_ENA */
+#define ARIZONA_EDRE_OUT4R_THR1_ENA_MASK 0x0040 /* EDRE_OUT4R_THR1_ENA */
+#define ARIZONA_EDRE_OUT4R_THR1_ENA_SHIFT 6 /* EDRE_OUT4R_THR1_ENA */
+#define ARIZONA_EDRE_OUT4R_THR1_ENA_WIDTH 1 /* EDRE_OUT4R_THR1_ENA */
+#define ARIZONA_EDRE_OUT3L_THR1_ENA 0x0020 /* EDRE_OUT3L_THR1_ENA */
+#define ARIZONA_EDRE_OUT3L_THR1_ENA_MASK 0x0020 /* EDRE_OUT3L_THR1_ENA */
+#define ARIZONA_EDRE_OUT3L_THR1_ENA_SHIFT 5 /* EDRE_OUT3L_THR1_ENA */
+#define ARIZONA_EDRE_OUT3L_THR1_ENA_WIDTH 1 /* EDRE_OUT3L_THR1_ENA */
+#define ARIZONA_EDRE_OUT3R_THR1_ENA 0x0010 /* EDRE_OUT3R_THR1_ENA */
+#define ARIZONA_EDRE_OUT3R_THR1_ENA_MASK 0x0010 /* EDRE_OUT3R_THR1_ENA */
+#define ARIZONA_EDRE_OUT3R_THR1_ENA_SHIFT 4 /* EDRE_OUT3R_THR1_ENA */
+#define ARIZONA_EDRE_OUT3R_THR1_ENA_WIDTH 1 /* EDRE_OUT3R_THR1_ENA */
+#define ARIZONA_EDRE_OUT2L_THR1_ENA 0x0008 /* EDRE_OUT2L_THR1_ENA */
+#define ARIZONA_EDRE_OUT2L_THR1_ENA_MASK 0x0008 /* EDRE_OUT2L_THR1_ENA */
+#define ARIZONA_EDRE_OUT2L_THR1_ENA_SHIFT 3 /* EDRE_OUT2L_THR1_ENA */
+#define ARIZONA_EDRE_OUT2L_THR1_ENA_WIDTH 1 /* EDRE_OUT2L_THR1_ENA */
+#define ARIZONA_EDRE_OUT2R_THR1_ENA 0x0004 /* EDRE_OUT2R_THR1_ENA */
+#define ARIZONA_EDRE_OUT2R_THR1_ENA_MASK 0x0004 /* EDRE_OUT2R_THR1_ENA */
+#define ARIZONA_EDRE_OUT2R_THR1_ENA_SHIFT 2 /* EDRE_OUT2R_THR1_ENA */
+#define ARIZONA_EDRE_OUT2R_THR1_ENA_WIDTH 1 /* EDRE_OUT2R_THR1_ENA */
+#define ARIZONA_EDRE_OUT1L_THR1_ENA 0x0002 /* EDRE_OUT1L_THR1_ENA */
+#define ARIZONA_EDRE_OUT1L_THR1_ENA_MASK 0x0002 /* EDRE_OUT1L_THR1_ENA */
+#define ARIZONA_EDRE_OUT1L_THR1_ENA_SHIFT 1 /* EDRE_OUT1L_THR1_ENA */
+#define ARIZONA_EDRE_OUT1L_THR1_ENA_WIDTH 1 /* EDRE_OUT1L_THR1_ENA */
+#define ARIZONA_EDRE_OUT1R_THR1_ENA 0x0001 /* EDRE_OUT1R_THR1_ENA */
+#define ARIZONA_EDRE_OUT1R_THR1_ENA_MASK 0x0001 /* EDRE_OUT1R_THR1_ENA */
+#define ARIZONA_EDRE_OUT1R_THR1_ENA_SHIFT 0 /* EDRE_OUT1R_THR1_ENA */
+#define ARIZONA_EDRE_OUT1R_THR1_ENA_WIDTH 1 /* EDRE_OUT1R_THR1_ENA */
+
/*
* R1104 (0x450) - DAC AEC Control 1
*/
@@ -4308,6 +4474,86 @@
#define ARIZONA_AIF3_FRC_WR_WIDTH 1 /* AIF3_FRC_WR */
/*
+ * R1474 (0x5C2) - SPD1 TX Control
+ */
+#define ARIZONA_SPD1_VAL2 0x2000 /* SPD1_VAL2 */
+#define ARIZONA_SPD1_VAL2_MASK 0x2000 /* SPD1_VAL2 */
+#define ARIZONA_SPD1_VAL2_SHIFT 13 /* SPD1_VAL2 */
+#define ARIZONA_SPD1_VAL2_WIDTH 1 /* SPD1_VAL2 */
+#define ARIZONA_SPD1_VAL1 0x1000 /* SPD1_VAL1 */
+#define ARIZONA_SPD1_VAL1_MASK 0x1000 /* SPD1_VAL1 */
+#define ARIZONA_SPD1_VAL1_SHIFT 12 /* SPD1_VAL1 */
+#define ARIZONA_SPD1_VAL1_WIDTH 1 /* SPD1_VAL1 */
+#define ARIZONA_SPD1_RATE_MASK 0x00F0 /* SPD1_RATE */
+#define ARIZONA_SPD1_RATE_SHIFT 4 /* SPD1_RATE */
+#define ARIZONA_SPD1_RATE_WIDTH 4 /* SPD1_RATE */
+#define ARIZONA_SPD1_ENA 0x0001 /* SPD1_ENA */
+#define ARIZONA_SPD1_ENA_MASK 0x0001 /* SPD1_ENA */
+#define ARIZONA_SPD1_ENA_SHIFT 0 /* SPD1_ENA */
+#define ARIZONA_SPD1_ENA_WIDTH 1 /* SPD1_ENA */
+
+/*
+ * R1475 (0x5C3) - SPD1 TX Channel Status 1
+ */
+#define ARIZONA_SPD1_CATCODE_MASK 0xFF00 /* SPD1_CATCODE */
+#define ARIZONA_SPD1_CATCODE_SHIFT 8 /* SPD1_CATCODE */
+#define ARIZONA_SPD1_CATCODE_WIDTH 8 /* SPD1_CATCODE */
+#define ARIZONA_SPD1_CHSTMODE_MASK 0x00C0 /* SPD1_CHSTMODE */
+#define ARIZONA_SPD1_CHSTMODE_SHIFT 6 /* SPD1_CHSTMODE */
+#define ARIZONA_SPD1_CHSTMODE_WIDTH 2 /* SPD1_CHSTMODE */
+#define ARIZONA_SPD1_PREEMPH_MASK 0x0038 /* SPD1_PREEMPH */
+#define ARIZONA_SPD1_PREEMPH_SHIFT 3 /* SPD1_PREEMPH */
+#define ARIZONA_SPD1_PREEMPH_WIDTH 3 /* SPD1_PREEMPH */
+#define ARIZONA_SPD1_NOCOPY 0x0004 /* SPD1_NOCOPY */
+#define ARIZONA_SPD1_NOCOPY_MASK 0x0004 /* SPD1_NOCOPY */
+#define ARIZONA_SPD1_NOCOPY_SHIFT 2 /* SPD1_NOCOPY */
+#define ARIZONA_SPD1_NOCOPY_WIDTH 1 /* SPD1_NOCOPY */
+#define ARIZONA_SPD1_NOAUDIO 0x0002 /* SPD1_NOAUDIO */
+#define ARIZONA_SPD1_NOAUDIO_MASK 0x0002 /* SPD1_NOAUDIO */
+#define ARIZONA_SPD1_NOAUDIO_SHIFT 1 /* SPD1_NOAUDIO */
+#define ARIZONA_SPD1_NOAUDIO_WIDTH 1 /* SPD1_NOAUDIO */
+#define ARIZONA_SPD1_PRO 0x0001 /* SPD1_PRO */
+#define ARIZONA_SPD1_PRO_MASK 0x0001 /* SPD1_PRO */
+#define ARIZONA_SPD1_PRO_SHIFT 0 /* SPD1_PRO */
+#define ARIZONA_SPD1_PRO_WIDTH 1 /* SPD1_PRO */
+
+/*
+ * R1475 (0x5C4) - SPD1 TX Channel Status 2
+ */
+#define ARIZONA_SPD1_FREQ_MASK 0xF000 /* SPD1_FREQ */
+#define ARIZONA_SPD1_FREQ_SHIFT 12 /* SPD1_FREQ */
+#define ARIZONA_SPD1_FREQ_WIDTH 4 /* SPD1_FREQ */
+#define ARIZONA_SPD1_CHNUM2_MASK 0x0F00 /* SPD1_CHNUM2 */
+#define ARIZONA_SPD1_CHNUM2_SHIFT 8 /* SPD1_CHNUM2 */
+#define ARIZONA_SPD1_CHNUM2_WIDTH 4 /* SPD1_CHNUM2 */
+#define ARIZONA_SPD1_CHNUM1_MASK 0x00F0 /* SPD1_CHNUM1 */
+#define ARIZONA_SPD1_CHNUM1_SHIFT 4 /* SPD1_CHNUM1 */
+#define ARIZONA_SPD1_CHNUM1_WIDTH 4 /* SPD1_CHNUM1 */
+#define ARIZONA_SPD1_SRCNUM_MASK 0x000F /* SPD1_SRCNUM */
+#define ARIZONA_SPD1_SRCNUM_SHIFT 0 /* SPD1_SRCNUM */
+#define ARIZONA_SPD1_SRCNUM_WIDTH 4 /* SPD1_SRCNUM */
+
+/*
+ * R1475 (0x5C5) - SPD1 TX Channel Status 3
+ */
+#define ARIZONA_SPD1_ORGSAMP_MASK 0x0F00 /* SPD1_ORGSAMP */
+#define ARIZONA_SPD1_ORGSAMP_SHIFT 8 /* SPD1_ORGSAMP */
+#define ARIZONA_SPD1_ORGSAMP_WIDTH 4 /* SPD1_ORGSAMP */
+#define ARIZONA_SPD1_TXWL_MASK 0x00E0 /* SPD1_TXWL */
+#define ARIZONA_SPD1_TXWL_SHIFT 5 /* SPD1_TXWL */
+#define ARIZONA_SPD1_TXWL_WIDTH 3 /* SPD1_TXWL */
+#define ARIZONA_SPD1_MAXWL 0x0010 /* SPD1_MAXWL */
+#define ARIZONA_SPD1_MAXWL_MASK 0x0010 /* SPD1_MAXWL */
+#define ARIZONA_SPD1_MAXWL_SHIFT 4 /* SPD1_MAXWL */
+#define ARIZONA_SPD1_MAXWL_WIDTH 1 /* SPD1_MAXWL */
+#define ARIZONA_SPD1_CS31_30_MASK 0x000C /* SPD1_CS31_30 */
+#define ARIZONA_SPD1_CS31_30_SHIFT 2 /* SPD1_CS31_30 */
+#define ARIZONA_SPD1_CS31_30_WIDTH 2 /* SPD1_CS31_30 */
+#define ARIZONA_SPD1_CLKACU_MASK 0x0003 /* SPD1_CLKACU */
+#define ARIZONA_SPD1_CLKACU_SHIFT 2 /* SPD1_CLKACU */
+#define ARIZONA_SPD1_CLKACU_WIDTH 0 /* SPD1_CLKACU */
+
+/*
* R1507 (0x5E3) - SLIMbus Framer Ref Gear
*/
#define ARIZONA_SLIMCLK_SRC 0x0010 /* SLIMCLK_SRC */
@@ -4562,6 +4808,13 @@
#define ARIZONA_GP_DBTIME_WIDTH 4 /* GP_DBTIME - [15:12] */
/*
+ * R3096 (0xC18) - GP Switch 1
+ */
+#define ARIZONA_SW1_MODE_MASK 0x0003 /* SW1_MODE - [1:0] */
+#define ARIZONA_SW1_MODE_SHIFT 0 /* SW1_MODE - [1:0] */
+#define ARIZONA_SW1_MODE_WIDTH 2 /* SW1_MODE - [1:0] */
+
+/*
* R3104 (0xC20) - Misc Pad Ctrl 1
*/
#define ARIZONA_LDO1ENA_PD 0x8000 /* LDO1ENA_PD */
@@ -6301,6 +6554,10 @@
/*
* R3366 (0xD26) - Interrupt Raw Status 8
*/
+#define ARIZONA_SPDIF_OVERCLOCKED_STS 0x8000 /* SPDIF_OVERCLOCKED_STS */
+#define ARIZONA_SPDIF_OVERCLOCKED_STS_MASK 0x8000 /* SPDIF_OVERCLOCKED_STS */
+#define ARIZONA_SPDIF_OVERCLOCKED_STS_SHIFT 15 /* SPDIF_OVERCLOCKED_STS */
+#define ARIZONA_SPDIF_OVERCLOCKED_STS_WIDTH 1 /* SPDIF_OVERCLOCKED_STS */
#define ARIZONA_AIF3_UNDERCLOCKED_STS 0x0400 /* AIF3_UNDERCLOCKED_STS */
#define ARIZONA_AIF3_UNDERCLOCKED_STS_MASK 0x0400 /* AIF3_UNDERCLOCKED_STS */
#define ARIZONA_AIF3_UNDERCLOCKED_STS_SHIFT 10 /* AIF3_UNDERCLOCKED_STS */
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index c2aa853fb412..cc8ad1e1a307 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -12,7 +12,8 @@
#define __LINUX_MFD_AXP20X_H
enum {
- AXP202_ID = 0,
+ AXP152_ID = 0,
+ AXP202_ID,
AXP209_ID,
AXP221_ID,
AXP288_ID,
@@ -22,6 +23,24 @@ enum {
#define AXP20X_DATACACHE(m) (0x04 + (m))
/* Power supply */
+#define AXP152_PWR_OP_MODE 0x01
+#define AXP152_LDO3456_DC1234_CTRL 0x12
+#define AXP152_ALDO_OP_MODE 0x13
+#define AXP152_LDO0_CTRL 0x15
+#define AXP152_DCDC2_V_OUT 0x23
+#define AXP152_DCDC2_V_SCAL 0x25
+#define AXP152_DCDC1_V_OUT 0x26
+#define AXP152_DCDC3_V_OUT 0x27
+#define AXP152_ALDO12_V_OUT 0x28
+#define AXP152_DLDO1_V_OUT 0x29
+#define AXP152_DLDO2_V_OUT 0x2a
+#define AXP152_DCDC4_V_OUT 0x2b
+#define AXP152_V_OFF 0x31
+#define AXP152_OFF_CTRL 0x32
+#define AXP152_PEK_KEY 0x36
+#define AXP152_DCDC_FREQ 0x37
+#define AXP152_DCDC_MODE 0x80
+
#define AXP20X_PWR_INPUT_STATUS 0x00
#define AXP20X_PWR_OP_MODE 0x01
#define AXP20X_USB_OTG_STATUS 0x02
@@ -69,6 +88,13 @@ enum {
#define AXP22X_CHRG_CTRL3 0x35
/* Interrupt */
+#define AXP152_IRQ1_EN 0x40
+#define AXP152_IRQ2_EN 0x41
+#define AXP152_IRQ3_EN 0x42
+#define AXP152_IRQ1_STATE 0x48
+#define AXP152_IRQ2_STATE 0x49
+#define AXP152_IRQ3_STATE 0x4a
+
#define AXP20X_IRQ1_EN 0x40
#define AXP20X_IRQ2_EN 0x41
#define AXP20X_IRQ3_EN 0x42
@@ -127,6 +153,19 @@ enum {
#define AXP22X_PWREN_CTRL2 0x8d
/* GPIO */
+#define AXP152_GPIO0_CTRL 0x90
+#define AXP152_GPIO1_CTRL 0x91
+#define AXP152_GPIO2_CTRL 0x92
+#define AXP152_GPIO3_CTRL 0x93
+#define AXP152_LDOGPIO2_V_OUT 0x96
+#define AXP152_GPIO_INPUT 0x97
+#define AXP152_PWM0_FREQ_X 0x98
+#define AXP152_PWM0_FREQ_Y 0x99
+#define AXP152_PWM0_DUTY_CYCLE 0x9a
+#define AXP152_PWM1_FREQ_X 0x9b
+#define AXP152_PWM1_FREQ_Y 0x9c
+#define AXP152_PWM1_DUTY_CYCLE 0x9d
+
#define AXP20X_GPIO0_CTRL 0x90
#define AXP20X_LDO5_V_OUT 0x91
#define AXP20X_GPIO1_CTRL 0x92
@@ -151,6 +190,12 @@ enum {
#define AXP20X_CC_CTRL 0xb8
#define AXP20X_FG_RES 0xb9
+/* OCV */
+#define AXP20X_RDC_H 0xba
+#define AXP20X_RDC_L 0xbb
+#define AXP20X_OCV(m) (0xc0 + (m))
+#define AXP20X_OCV_MAX 0xf
+
/* AXP22X specific registers */
#define AXP22X_BATLOW_THRES1 0xe6
@@ -218,6 +263,26 @@ enum {
/* IRQs */
enum {
+ AXP152_IRQ_LDO0IN_CONNECT = 1,
+ AXP152_IRQ_LDO0IN_REMOVAL,
+ AXP152_IRQ_ALDO0IN_CONNECT,
+ AXP152_IRQ_ALDO0IN_REMOVAL,
+ AXP152_IRQ_DCDC1_V_LOW,
+ AXP152_IRQ_DCDC2_V_LOW,
+ AXP152_IRQ_DCDC3_V_LOW,
+ AXP152_IRQ_DCDC4_V_LOW,
+ AXP152_IRQ_PEK_SHORT,
+ AXP152_IRQ_PEK_LONG,
+ AXP152_IRQ_TIMER,
+ AXP152_IRQ_PEK_RIS_EDGE,
+ AXP152_IRQ_PEK_FAL_EDGE,
+ AXP152_IRQ_GPIO3_INPUT,
+ AXP152_IRQ_GPIO2_INPUT,
+ AXP152_IRQ_GPIO1_INPUT,
+ AXP152_IRQ_GPIO0_INPUT,
+};
+
+enum {
AXP20X_IRQ_ACIN_OVER_V = 1,
AXP20X_IRQ_ACIN_PLUGIN,
AXP20X_IRQ_ACIN_REMOVAL,
diff --git a/include/linux/mfd/da9062/core.h b/include/linux/mfd/da9062/core.h
new file mode 100644
index 000000000000..376ba84366a0
--- /dev/null
+++ b/include/linux/mfd/da9062/core.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2015 Dialog Semiconductor Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MFD_DA9062_CORE_H__
+#define __MFD_DA9062_CORE_H__
+
+#include <linux/interrupt.h>
+#include <linux/mfd/da9062/registers.h>
+
+/* Interrupts */
+enum da9062_irqs {
+ /* IRQ A */
+ DA9062_IRQ_ONKEY,
+ DA9062_IRQ_ALARM,
+ DA9062_IRQ_TICK,
+ DA9062_IRQ_WDG_WARN,
+ DA9062_IRQ_SEQ_RDY,
+ /* IRQ B*/
+ DA9062_IRQ_TEMP,
+ DA9062_IRQ_LDO_LIM,
+ DA9062_IRQ_DVC_RDY,
+ DA9062_IRQ_VDD_WARN,
+ /* IRQ C */
+ DA9062_IRQ_GPI0,
+ DA9062_IRQ_GPI1,
+ DA9062_IRQ_GPI2,
+ DA9062_IRQ_GPI3,
+ DA9062_IRQ_GPI4,
+
+ DA9062_NUM_IRQ,
+};
+
+struct da9062 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regmap_irq_chip_data *regmap_irq;
+};
+
+#endif /* __MFD_DA9062_CORE_H__ */
diff --git a/include/linux/mfd/da9062/registers.h b/include/linux/mfd/da9062/registers.h
new file mode 100644
index 000000000000..97790d1b02c5
--- /dev/null
+++ b/include/linux/mfd/da9062/registers.h
@@ -0,0 +1,1108 @@
+/*
+ * registers.h - REGISTERS H for DA9062
+ * Copyright (C) 2015 Dialog Semiconductor Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DA9062_H__
+#define __DA9062_H__
+
+#define DA9062_PMIC_DEVICE_ID 0x62
+#define DA9062_PMIC_VARIANT_MRC_AA 0x01
+
+#define DA9062_I2C_PAGE_SEL_SHIFT 1
+
+/*
+ * Registers
+ */
+
+#define DA9062AA_PAGE_CON 0x000
+#define DA9062AA_STATUS_A 0x001
+#define DA9062AA_STATUS_B 0x002
+#define DA9062AA_STATUS_D 0x004
+#define DA9062AA_FAULT_LOG 0x005
+#define DA9062AA_EVENT_A 0x006
+#define DA9062AA_EVENT_B 0x007
+#define DA9062AA_EVENT_C 0x008
+#define DA9062AA_IRQ_MASK_A 0x00A
+#define DA9062AA_IRQ_MASK_B 0x00B
+#define DA9062AA_IRQ_MASK_C 0x00C
+#define DA9062AA_CONTROL_A 0x00E
+#define DA9062AA_CONTROL_B 0x00F
+#define DA9062AA_CONTROL_C 0x010
+#define DA9062AA_CONTROL_D 0x011
+#define DA9062AA_CONTROL_E 0x012
+#define DA9062AA_CONTROL_F 0x013
+#define DA9062AA_PD_DIS 0x014
+#define DA9062AA_GPIO_0_1 0x015
+#define DA9062AA_GPIO_2_3 0x016
+#define DA9062AA_GPIO_4 0x017
+#define DA9062AA_GPIO_WKUP_MODE 0x01C
+#define DA9062AA_GPIO_MODE0_4 0x01D
+#define DA9062AA_GPIO_OUT0_2 0x01E
+#define DA9062AA_GPIO_OUT3_4 0x01F
+#define DA9062AA_BUCK2_CONT 0x020
+#define DA9062AA_BUCK1_CONT 0x021
+#define DA9062AA_BUCK4_CONT 0x022
+#define DA9062AA_BUCK3_CONT 0x024
+#define DA9062AA_LDO1_CONT 0x026
+#define DA9062AA_LDO2_CONT 0x027
+#define DA9062AA_LDO3_CONT 0x028
+#define DA9062AA_LDO4_CONT 0x029
+#define DA9062AA_DVC_1 0x032
+#define DA9062AA_COUNT_S 0x040
+#define DA9062AA_COUNT_MI 0x041
+#define DA9062AA_COUNT_H 0x042
+#define DA9062AA_COUNT_D 0x043
+#define DA9062AA_COUNT_MO 0x044
+#define DA9062AA_COUNT_Y 0x045
+#define DA9062AA_ALARM_S 0x046
+#define DA9062AA_ALARM_MI 0x047
+#define DA9062AA_ALARM_H 0x048
+#define DA9062AA_ALARM_D 0x049
+#define DA9062AA_ALARM_MO 0x04A
+#define DA9062AA_ALARM_Y 0x04B
+#define DA9062AA_SECOND_A 0x04C
+#define DA9062AA_SECOND_B 0x04D
+#define DA9062AA_SECOND_C 0x04E
+#define DA9062AA_SECOND_D 0x04F
+#define DA9062AA_SEQ 0x081
+#define DA9062AA_SEQ_TIMER 0x082
+#define DA9062AA_ID_2_1 0x083
+#define DA9062AA_ID_4_3 0x084
+#define DA9062AA_ID_12_11 0x088
+#define DA9062AA_ID_14_13 0x089
+#define DA9062AA_ID_16_15 0x08A
+#define DA9062AA_ID_22_21 0x08D
+#define DA9062AA_ID_24_23 0x08E
+#define DA9062AA_ID_26_25 0x08F
+#define DA9062AA_ID_28_27 0x090
+#define DA9062AA_ID_30_29 0x091
+#define DA9062AA_ID_32_31 0x092
+#define DA9062AA_SEQ_A 0x095
+#define DA9062AA_SEQ_B 0x096
+#define DA9062AA_WAIT 0x097
+#define DA9062AA_EN_32K 0x098
+#define DA9062AA_RESET 0x099
+#define DA9062AA_BUCK_ILIM_A 0x09A
+#define DA9062AA_BUCK_ILIM_B 0x09B
+#define DA9062AA_BUCK_ILIM_C 0x09C
+#define DA9062AA_BUCK2_CFG 0x09D
+#define DA9062AA_BUCK1_CFG 0x09E
+#define DA9062AA_BUCK4_CFG 0x09F
+#define DA9062AA_BUCK3_CFG 0x0A0
+#define DA9062AA_VBUCK2_A 0x0A3
+#define DA9062AA_VBUCK1_A 0x0A4
+#define DA9062AA_VBUCK4_A 0x0A5
+#define DA9062AA_VBUCK3_A 0x0A7
+#define DA9062AA_VLDO1_A 0x0A9
+#define DA9062AA_VLDO2_A 0x0AA
+#define DA9062AA_VLDO3_A 0x0AB
+#define DA9062AA_VLDO4_A 0x0AC
+#define DA9062AA_VBUCK2_B 0x0B4
+#define DA9062AA_VBUCK1_B 0x0B5
+#define DA9062AA_VBUCK4_B 0x0B6
+#define DA9062AA_VBUCK3_B 0x0B8
+#define DA9062AA_VLDO1_B 0x0BA
+#define DA9062AA_VLDO2_B 0x0BB
+#define DA9062AA_VLDO3_B 0x0BC
+#define DA9062AA_VLDO4_B 0x0BD
+#define DA9062AA_BBAT_CONT 0x0C5
+#define DA9062AA_INTERFACE 0x105
+#define DA9062AA_CONFIG_A 0x106
+#define DA9062AA_CONFIG_B 0x107
+#define DA9062AA_CONFIG_C 0x108
+#define DA9062AA_CONFIG_D 0x109
+#define DA9062AA_CONFIG_E 0x10A
+#define DA9062AA_CONFIG_G 0x10C
+#define DA9062AA_CONFIG_H 0x10D
+#define DA9062AA_CONFIG_I 0x10E
+#define DA9062AA_CONFIG_J 0x10F
+#define DA9062AA_CONFIG_K 0x110
+#define DA9062AA_CONFIG_M 0x112
+#define DA9062AA_TRIM_CLDR 0x120
+#define DA9062AA_GP_ID_0 0x121
+#define DA9062AA_GP_ID_1 0x122
+#define DA9062AA_GP_ID_2 0x123
+#define DA9062AA_GP_ID_3 0x124
+#define DA9062AA_GP_ID_4 0x125
+#define DA9062AA_GP_ID_5 0x126
+#define DA9062AA_GP_ID_6 0x127
+#define DA9062AA_GP_ID_7 0x128
+#define DA9062AA_GP_ID_8 0x129
+#define DA9062AA_GP_ID_9 0x12A
+#define DA9062AA_GP_ID_10 0x12B
+#define DA9062AA_GP_ID_11 0x12C
+#define DA9062AA_GP_ID_12 0x12D
+#define DA9062AA_GP_ID_13 0x12E
+#define DA9062AA_GP_ID_14 0x12F
+#define DA9062AA_GP_ID_15 0x130
+#define DA9062AA_GP_ID_16 0x131
+#define DA9062AA_GP_ID_17 0x132
+#define DA9062AA_GP_ID_18 0x133
+#define DA9062AA_GP_ID_19 0x134
+#define DA9062AA_DEVICE_ID 0x181
+#define DA9062AA_VARIANT_ID 0x182
+#define DA9062AA_CUSTOMER_ID 0x183
+#define DA9062AA_CONFIG_ID 0x184
+
+/*
+ * Bit fields
+ */
+
+/* DA9062AA_PAGE_CON = 0x000 */
+#define DA9062AA_PAGE_SHIFT 0
+#define DA9062AA_PAGE_MASK 0x3f
+#define DA9062AA_WRITE_MODE_SHIFT 6
+#define DA9062AA_WRITE_MODE_MASK BIT(6)
+#define DA9062AA_REVERT_SHIFT 7
+#define DA9062AA_REVERT_MASK BIT(7)
+
+/* DA9062AA_STATUS_A = 0x001 */
+#define DA9062AA_NONKEY_SHIFT 0
+#define DA9062AA_NONKEY_MASK 0x01
+#define DA9062AA_DVC_BUSY_SHIFT 2
+#define DA9062AA_DVC_BUSY_MASK BIT(2)
+
+/* DA9062AA_STATUS_B = 0x002 */
+#define DA9062AA_GPI0_SHIFT 0
+#define DA9062AA_GPI0_MASK 0x01
+#define DA9062AA_GPI1_SHIFT 1
+#define DA9062AA_GPI1_MASK BIT(1)
+#define DA9062AA_GPI2_SHIFT 2
+#define DA9062AA_GPI2_MASK BIT(2)
+#define DA9062AA_GPI3_SHIFT 3
+#define DA9062AA_GPI3_MASK BIT(3)
+#define DA9062AA_GPI4_SHIFT 4
+#define DA9062AA_GPI4_MASK BIT(4)
+
+/* DA9062AA_STATUS_D = 0x004 */
+#define DA9062AA_LDO1_ILIM_SHIFT 0
+#define DA9062AA_LDO1_ILIM_MASK 0x01
+#define DA9062AA_LDO2_ILIM_SHIFT 1
+#define DA9062AA_LDO2_ILIM_MASK BIT(1)
+#define DA9062AA_LDO3_ILIM_SHIFT 2
+#define DA9062AA_LDO3_ILIM_MASK BIT(2)
+#define DA9062AA_LDO4_ILIM_SHIFT 3
+#define DA9062AA_LDO4_ILIM_MASK BIT(3)
+
+/* DA9062AA_FAULT_LOG = 0x005 */
+#define DA9062AA_TWD_ERROR_SHIFT 0
+#define DA9062AA_TWD_ERROR_MASK 0x01
+#define DA9062AA_POR_SHIFT 1
+#define DA9062AA_POR_MASK BIT(1)
+#define DA9062AA_VDD_FAULT_SHIFT 2
+#define DA9062AA_VDD_FAULT_MASK BIT(2)
+#define DA9062AA_VDD_START_SHIFT 3
+#define DA9062AA_VDD_START_MASK BIT(3)
+#define DA9062AA_TEMP_CRIT_SHIFT 4
+#define DA9062AA_TEMP_CRIT_MASK BIT(4)
+#define DA9062AA_KEY_RESET_SHIFT 5
+#define DA9062AA_KEY_RESET_MASK BIT(5)
+#define DA9062AA_NSHUTDOWN_SHIFT 6
+#define DA9062AA_NSHUTDOWN_MASK BIT(6)
+#define DA9062AA_WAIT_SHUT_SHIFT 7
+#define DA9062AA_WAIT_SHUT_MASK BIT(7)
+
+/* DA9062AA_EVENT_A = 0x006 */
+#define DA9062AA_E_NONKEY_SHIFT 0
+#define DA9062AA_E_NONKEY_MASK 0x01
+#define DA9062AA_E_ALARM_SHIFT 1
+#define DA9062AA_E_ALARM_MASK BIT(1)
+#define DA9062AA_E_TICK_SHIFT 2
+#define DA9062AA_E_TICK_MASK BIT(2)
+#define DA9062AA_E_WDG_WARN_SHIFT 3
+#define DA9062AA_E_WDG_WARN_MASK BIT(3)
+#define DA9062AA_E_SEQ_RDY_SHIFT 4
+#define DA9062AA_E_SEQ_RDY_MASK BIT(4)
+#define DA9062AA_EVENTS_B_SHIFT 5
+#define DA9062AA_EVENTS_B_MASK BIT(5)
+#define DA9062AA_EVENTS_C_SHIFT 6
+#define DA9062AA_EVENTS_C_MASK BIT(6)
+
+/* DA9062AA_EVENT_B = 0x007 */
+#define DA9062AA_E_TEMP_SHIFT 1
+#define DA9062AA_E_TEMP_MASK BIT(1)
+#define DA9062AA_E_LDO_LIM_SHIFT 3
+#define DA9062AA_E_LDO_LIM_MASK BIT(3)
+#define DA9062AA_E_DVC_RDY_SHIFT 5
+#define DA9062AA_E_DVC_RDY_MASK BIT(5)
+#define DA9062AA_E_VDD_WARN_SHIFT 7
+#define DA9062AA_E_VDD_WARN_MASK BIT(7)
+
+/* DA9062AA_EVENT_C = 0x008 */
+#define DA9062AA_E_GPI0_SHIFT 0
+#define DA9062AA_E_GPI0_MASK 0x01
+#define DA9062AA_E_GPI1_SHIFT 1
+#define DA9062AA_E_GPI1_MASK BIT(1)
+#define DA9062AA_E_GPI2_SHIFT 2
+#define DA9062AA_E_GPI2_MASK BIT(2)
+#define DA9062AA_E_GPI3_SHIFT 3
+#define DA9062AA_E_GPI3_MASK BIT(3)
+#define DA9062AA_E_GPI4_SHIFT 4
+#define DA9062AA_E_GPI4_MASK BIT(4)
+
+/* DA9062AA_IRQ_MASK_A = 0x00A */
+#define DA9062AA_M_NONKEY_SHIFT 0
+#define DA9062AA_M_NONKEY_MASK 0x01
+#define DA9062AA_M_ALARM_SHIFT 1
+#define DA9062AA_M_ALARM_MASK BIT(1)
+#define DA9062AA_M_TICK_SHIFT 2
+#define DA9062AA_M_TICK_MASK BIT(2)
+#define DA9062AA_M_WDG_WARN_SHIFT 3
+#define DA9062AA_M_WDG_WARN_MASK BIT(3)
+#define DA9062AA_M_SEQ_RDY_SHIFT 4
+#define DA9062AA_M_SEQ_RDY_MASK BIT(4)
+
+/* DA9062AA_IRQ_MASK_B = 0x00B */
+#define DA9062AA_M_TEMP_SHIFT 1
+#define DA9062AA_M_TEMP_MASK BIT(1)
+#define DA9062AA_M_LDO_LIM_SHIFT 3
+#define DA9062AA_M_LDO_LIM_MASK BIT(3)
+#define DA9062AA_M_DVC_RDY_SHIFT 5
+#define DA9062AA_M_DVC_RDY_MASK BIT(5)
+#define DA9062AA_M_VDD_WARN_SHIFT 7
+#define DA9062AA_M_VDD_WARN_MASK BIT(7)
+
+/* DA9062AA_IRQ_MASK_C = 0x00C */
+#define DA9062AA_M_GPI0_SHIFT 0
+#define DA9062AA_M_GPI0_MASK 0x01
+#define DA9062AA_M_GPI1_SHIFT 1
+#define DA9062AA_M_GPI1_MASK BIT(1)
+#define DA9062AA_M_GPI2_SHIFT 2
+#define DA9062AA_M_GPI2_MASK BIT(2)
+#define DA9062AA_M_GPI3_SHIFT 3
+#define DA9062AA_M_GPI3_MASK BIT(3)
+#define DA9062AA_M_GPI4_SHIFT 4
+#define DA9062AA_M_GPI4_MASK BIT(4)
+
+/* DA9062AA_CONTROL_A = 0x00E */
+#define DA9062AA_SYSTEM_EN_SHIFT 0
+#define DA9062AA_SYSTEM_EN_MASK 0x01
+#define DA9062AA_POWER_EN_SHIFT 1
+#define DA9062AA_POWER_EN_MASK BIT(1)
+#define DA9062AA_POWER1_EN_SHIFT 2
+#define DA9062AA_POWER1_EN_MASK BIT(2)
+#define DA9062AA_STANDBY_SHIFT 3
+#define DA9062AA_STANDBY_MASK BIT(3)
+#define DA9062AA_M_SYSTEM_EN_SHIFT 4
+#define DA9062AA_M_SYSTEM_EN_MASK BIT(4)
+#define DA9062AA_M_POWER_EN_SHIFT 5
+#define DA9062AA_M_POWER_EN_MASK BIT(5)
+#define DA9062AA_M_POWER1_EN_SHIFT 6
+#define DA9062AA_M_POWER1_EN_MASK BIT(6)
+
+/* DA9062AA_CONTROL_B = 0x00F */
+#define DA9062AA_WATCHDOG_PD_SHIFT 1
+#define DA9062AA_WATCHDOG_PD_MASK BIT(1)
+#define DA9062AA_FREEZE_EN_SHIFT 2
+#define DA9062AA_FREEZE_EN_MASK BIT(2)
+#define DA9062AA_NRES_MODE_SHIFT 3
+#define DA9062AA_NRES_MODE_MASK BIT(3)
+#define DA9062AA_NONKEY_LOCK_SHIFT 4
+#define DA9062AA_NONKEY_LOCK_MASK BIT(4)
+#define DA9062AA_NFREEZE_SHIFT 5
+#define DA9062AA_NFREEZE_MASK (0x03 << 5)
+#define DA9062AA_BUCK_SLOWSTART_SHIFT 7
+#define DA9062AA_BUCK_SLOWSTART_MASK BIT(7)
+
+/* DA9062AA_CONTROL_C = 0x010 */
+#define DA9062AA_DEBOUNCING_SHIFT 0
+#define DA9062AA_DEBOUNCING_MASK 0x07
+#define DA9062AA_AUTO_BOOT_SHIFT 3
+#define DA9062AA_AUTO_BOOT_MASK BIT(3)
+#define DA9062AA_OTPREAD_EN_SHIFT 4
+#define DA9062AA_OTPREAD_EN_MASK BIT(4)
+#define DA9062AA_SLEW_RATE_SHIFT 5
+#define DA9062AA_SLEW_RATE_MASK (0x03 << 5)
+#define DA9062AA_DEF_SUPPLY_SHIFT 7
+#define DA9062AA_DEF_SUPPLY_MASK BIT(7)
+
+/* DA9062AA_CONTROL_D = 0x011 */
+#define DA9062AA_TWDSCALE_SHIFT 0
+#define DA9062AA_TWDSCALE_MASK 0x07
+
+/* DA9062AA_CONTROL_E = 0x012 */
+#define DA9062AA_RTC_MODE_PD_SHIFT 0
+#define DA9062AA_RTC_MODE_PD_MASK 0x01
+#define DA9062AA_RTC_MODE_SD_SHIFT 1
+#define DA9062AA_RTC_MODE_SD_MASK BIT(1)
+#define DA9062AA_RTC_EN_SHIFT 2
+#define DA9062AA_RTC_EN_MASK BIT(2)
+#define DA9062AA_V_LOCK_SHIFT 7
+#define DA9062AA_V_LOCK_MASK BIT(7)
+
+/* DA9062AA_CONTROL_F = 0x013 */
+#define DA9062AA_WATCHDOG_SHIFT 0
+#define DA9062AA_WATCHDOG_MASK 0x01
+#define DA9062AA_SHUTDOWN_SHIFT 1
+#define DA9062AA_SHUTDOWN_MASK BIT(1)
+#define DA9062AA_WAKE_UP_SHIFT 2
+#define DA9062AA_WAKE_UP_MASK BIT(2)
+
+/* DA9062AA_PD_DIS = 0x014 */
+#define DA9062AA_GPI_DIS_SHIFT 0
+#define DA9062AA_GPI_DIS_MASK 0x01
+#define DA9062AA_PMIF_DIS_SHIFT 2
+#define DA9062AA_PMIF_DIS_MASK BIT(2)
+#define DA9062AA_CLDR_PAUSE_SHIFT 4
+#define DA9062AA_CLDR_PAUSE_MASK BIT(4)
+#define DA9062AA_BBAT_DIS_SHIFT 5
+#define DA9062AA_BBAT_DIS_MASK BIT(5)
+#define DA9062AA_OUT32K_PAUSE_SHIFT 6
+#define DA9062AA_OUT32K_PAUSE_MASK BIT(6)
+#define DA9062AA_PMCONT_DIS_SHIFT 7
+#define DA9062AA_PMCONT_DIS_MASK BIT(7)
+
+/* DA9062AA_GPIO_0_1 = 0x015 */
+#define DA9062AA_GPIO0_PIN_SHIFT 0
+#define DA9062AA_GPIO0_PIN_MASK 0x03
+#define DA9062AA_GPIO0_TYPE_SHIFT 2
+#define DA9062AA_GPIO0_TYPE_MASK BIT(2)
+#define DA9062AA_GPIO0_WEN_SHIFT 3
+#define DA9062AA_GPIO0_WEN_MASK BIT(3)
+#define DA9062AA_GPIO1_PIN_SHIFT 4
+#define DA9062AA_GPIO1_PIN_MASK (0x03 << 4)
+#define DA9062AA_GPIO1_TYPE_SHIFT 6
+#define DA9062AA_GPIO1_TYPE_MASK BIT(6)
+#define DA9062AA_GPIO1_WEN_SHIFT 7
+#define DA9062AA_GPIO1_WEN_MASK BIT(7)
+
+/* DA9062AA_GPIO_2_3 = 0x016 */
+#define DA9062AA_GPIO2_PIN_SHIFT 0
+#define DA9062AA_GPIO2_PIN_MASK 0x03
+#define DA9062AA_GPIO2_TYPE_SHIFT 2
+#define DA9062AA_GPIO2_TYPE_MASK BIT(2)
+#define DA9062AA_GPIO2_WEN_SHIFT 3
+#define DA9062AA_GPIO2_WEN_MASK BIT(3)
+#define DA9062AA_GPIO3_PIN_SHIFT 4
+#define DA9062AA_GPIO3_PIN_MASK (0x03 << 4)
+#define DA9062AA_GPIO3_TYPE_SHIFT 6
+#define DA9062AA_GPIO3_TYPE_MASK BIT(6)
+#define DA9062AA_GPIO3_WEN_SHIFT 7
+#define DA9062AA_GPIO3_WEN_MASK BIT(7)
+
+/* DA9062AA_GPIO_4 = 0x017 */
+#define DA9062AA_GPIO4_PIN_SHIFT 0
+#define DA9062AA_GPIO4_PIN_MASK 0x03
+#define DA9062AA_GPIO4_TYPE_SHIFT 2
+#define DA9062AA_GPIO4_TYPE_MASK BIT(2)
+#define DA9062AA_GPIO4_WEN_SHIFT 3
+#define DA9062AA_GPIO4_WEN_MASK BIT(3)
+
+/* DA9062AA_GPIO_WKUP_MODE = 0x01C */
+#define DA9062AA_GPIO0_WKUP_MODE_SHIFT 0
+#define DA9062AA_GPIO0_WKUP_MODE_MASK 0x01
+#define DA9062AA_GPIO1_WKUP_MODE_SHIFT 1
+#define DA9062AA_GPIO1_WKUP_MODE_MASK BIT(1)
+#define DA9062AA_GPIO2_WKUP_MODE_SHIFT 2
+#define DA9062AA_GPIO2_WKUP_MODE_MASK BIT(2)
+#define DA9062AA_GPIO3_WKUP_MODE_SHIFT 3
+#define DA9062AA_GPIO3_WKUP_MODE_MASK BIT(3)
+#define DA9062AA_GPIO4_WKUP_MODE_SHIFT 4
+#define DA9062AA_GPIO4_WKUP_MODE_MASK BIT(4)
+
+/* DA9062AA_GPIO_MODE0_4 = 0x01D */
+#define DA9062AA_GPIO0_MODE_SHIFT 0
+#define DA9062AA_GPIO0_MODE_MASK 0x01
+#define DA9062AA_GPIO1_MODE_SHIFT 1
+#define DA9062AA_GPIO1_MODE_MASK BIT(1)
+#define DA9062AA_GPIO2_MODE_SHIFT 2
+#define DA9062AA_GPIO2_MODE_MASK BIT(2)
+#define DA9062AA_GPIO3_MODE_SHIFT 3
+#define DA9062AA_GPIO3_MODE_MASK BIT(3)
+#define DA9062AA_GPIO4_MODE_SHIFT 4
+#define DA9062AA_GPIO4_MODE_MASK BIT(4)
+
+/* DA9062AA_GPIO_OUT0_2 = 0x01E */
+#define DA9062AA_GPIO0_OUT_SHIFT 0
+#define DA9062AA_GPIO0_OUT_MASK 0x07
+#define DA9062AA_GPIO1_OUT_SHIFT 3
+#define DA9062AA_GPIO1_OUT_MASK (0x07 << 3)
+#define DA9062AA_GPIO2_OUT_SHIFT 6
+#define DA9062AA_GPIO2_OUT_MASK (0x03 << 6)
+
+/* DA9062AA_GPIO_OUT3_4 = 0x01F */
+#define DA9062AA_GPIO3_OUT_SHIFT 0
+#define DA9062AA_GPIO3_OUT_MASK 0x07
+#define DA9062AA_GPIO4_OUT_SHIFT 3
+#define DA9062AA_GPIO4_OUT_MASK (0x03 << 3)
+
+/* DA9062AA_BUCK2_CONT = 0x020 */
+#define DA9062AA_BUCK2_EN_SHIFT 0
+#define DA9062AA_BUCK2_EN_MASK 0x01
+#define DA9062AA_BUCK2_GPI_SHIFT 1
+#define DA9062AA_BUCK2_GPI_MASK (0x03 << 1)
+#define DA9062AA_BUCK2_CONF_SHIFT 3
+#define DA9062AA_BUCK2_CONF_MASK BIT(3)
+#define DA9062AA_VBUCK2_GPI_SHIFT 5
+#define DA9062AA_VBUCK2_GPI_MASK (0x03 << 5)
+
+/* DA9062AA_BUCK1_CONT = 0x021 */
+#define DA9062AA_BUCK1_EN_SHIFT 0
+#define DA9062AA_BUCK1_EN_MASK 0x01
+#define DA9062AA_BUCK1_GPI_SHIFT 1
+#define DA9062AA_BUCK1_GPI_MASK (0x03 << 1)
+#define DA9062AA_BUCK1_CONF_SHIFT 3
+#define DA9062AA_BUCK1_CONF_MASK BIT(3)
+#define DA9062AA_VBUCK1_GPI_SHIFT 5
+#define DA9062AA_VBUCK1_GPI_MASK (0x03 << 5)
+
+/* DA9062AA_BUCK4_CONT = 0x022 */
+#define DA9062AA_BUCK4_EN_SHIFT 0
+#define DA9062AA_BUCK4_EN_MASK 0x01
+#define DA9062AA_BUCK4_GPI_SHIFT 1
+#define DA9062AA_BUCK4_GPI_MASK (0x03 << 1)
+#define DA9062AA_BUCK4_CONF_SHIFT 3
+#define DA9062AA_BUCK4_CONF_MASK BIT(3)
+#define DA9062AA_VBUCK4_GPI_SHIFT 5
+#define DA9062AA_VBUCK4_GPI_MASK (0x03 << 5)
+
+/* DA9062AA_BUCK3_CONT = 0x024 */
+#define DA9062AA_BUCK3_EN_SHIFT 0
+#define DA9062AA_BUCK3_EN_MASK 0x01
+#define DA9062AA_BUCK3_GPI_SHIFT 1
+#define DA9062AA_BUCK3_GPI_MASK (0x03 << 1)
+#define DA9062AA_BUCK3_CONF_SHIFT 3
+#define DA9062AA_BUCK3_CONF_MASK BIT(3)
+#define DA9062AA_VBUCK3_GPI_SHIFT 5
+#define DA9062AA_VBUCK3_GPI_MASK (0x03 << 5)
+
+/* DA9062AA_LDO1_CONT = 0x026 */
+#define DA9062AA_LDO1_EN_SHIFT 0
+#define DA9062AA_LDO1_EN_MASK 0x01
+#define DA9062AA_LDO1_GPI_SHIFT 1
+#define DA9062AA_LDO1_GPI_MASK (0x03 << 1)
+#define DA9062AA_LDO1_PD_DIS_SHIFT 3
+#define DA9062AA_LDO1_PD_DIS_MASK BIT(3)
+#define DA9062AA_VLDO1_GPI_SHIFT 5
+#define DA9062AA_VLDO1_GPI_MASK (0x03 << 5)
+#define DA9062AA_LDO1_CONF_SHIFT 7
+#define DA9062AA_LDO1_CONF_MASK BIT(7)
+
+/* DA9062AA_LDO2_CONT = 0x027 */
+#define DA9062AA_LDO2_EN_SHIFT 0
+#define DA9062AA_LDO2_EN_MASK 0x01
+#define DA9062AA_LDO2_GPI_SHIFT 1
+#define DA9062AA_LDO2_GPI_MASK (0x03 << 1)
+#define DA9062AA_LDO2_PD_DIS_SHIFT 3
+#define DA9062AA_LDO2_PD_DIS_MASK BIT(3)
+#define DA9062AA_VLDO2_GPI_SHIFT 5
+#define DA9062AA_VLDO2_GPI_MASK (0x03 << 5)
+#define DA9062AA_LDO2_CONF_SHIFT 7
+#define DA9062AA_LDO2_CONF_MASK BIT(7)
+
+/* DA9062AA_LDO3_CONT = 0x028 */
+#define DA9062AA_LDO3_EN_SHIFT 0
+#define DA9062AA_LDO3_EN_MASK 0x01
+#define DA9062AA_LDO3_GPI_SHIFT 1
+#define DA9062AA_LDO3_GPI_MASK (0x03 << 1)
+#define DA9062AA_LDO3_PD_DIS_SHIFT 3
+#define DA9062AA_LDO3_PD_DIS_MASK BIT(3)
+#define DA9062AA_VLDO3_GPI_SHIFT 5
+#define DA9062AA_VLDO3_GPI_MASK (0x03 << 5)
+#define DA9062AA_LDO3_CONF_SHIFT 7
+#define DA9062AA_LDO3_CONF_MASK BIT(7)
+
+/* DA9062AA_LDO4_CONT = 0x029 */
+#define DA9062AA_LDO4_EN_SHIFT 0
+#define DA9062AA_LDO4_EN_MASK 0x01
+#define DA9062AA_LDO4_GPI_SHIFT 1
+#define DA9062AA_LDO4_GPI_MASK (0x03 << 1)
+#define DA9062AA_LDO4_PD_DIS_SHIFT 3
+#define DA9062AA_LDO4_PD_DIS_MASK BIT(3)
+#define DA9062AA_VLDO4_GPI_SHIFT 5
+#define DA9062AA_VLDO4_GPI_MASK (0x03 << 5)
+#define DA9062AA_LDO4_CONF_SHIFT 7
+#define DA9062AA_LDO4_CONF_MASK BIT(7)
+
+/* DA9062AA_DVC_1 = 0x032 */
+#define DA9062AA_VBUCK1_SEL_SHIFT 0
+#define DA9062AA_VBUCK1_SEL_MASK 0x01
+#define DA9062AA_VBUCK2_SEL_SHIFT 1
+#define DA9062AA_VBUCK2_SEL_MASK BIT(1)
+#define DA9062AA_VBUCK4_SEL_SHIFT 2
+#define DA9062AA_VBUCK4_SEL_MASK BIT(2)
+#define DA9062AA_VBUCK3_SEL_SHIFT 3
+#define DA9062AA_VBUCK3_SEL_MASK BIT(3)
+#define DA9062AA_VLDO1_SEL_SHIFT 4
+#define DA9062AA_VLDO1_SEL_MASK BIT(4)
+#define DA9062AA_VLDO2_SEL_SHIFT 5
+#define DA9062AA_VLDO2_SEL_MASK BIT(5)
+#define DA9062AA_VLDO3_SEL_SHIFT 6
+#define DA9062AA_VLDO3_SEL_MASK BIT(6)
+#define DA9062AA_VLDO4_SEL_SHIFT 7
+#define DA9062AA_VLDO4_SEL_MASK BIT(7)
+
+/* DA9062AA_COUNT_S = 0x040 */
+#define DA9062AA_COUNT_SEC_SHIFT 0
+#define DA9062AA_COUNT_SEC_MASK 0x3f
+#define DA9062AA_RTC_READ_SHIFT 7
+#define DA9062AA_RTC_READ_MASK BIT(7)
+
+/* DA9062AA_COUNT_MI = 0x041 */
+#define DA9062AA_COUNT_MIN_SHIFT 0
+#define DA9062AA_COUNT_MIN_MASK 0x3f
+
+/* DA9062AA_COUNT_H = 0x042 */
+#define DA9062AA_COUNT_HOUR_SHIFT 0
+#define DA9062AA_COUNT_HOUR_MASK 0x1f
+
+/* DA9062AA_COUNT_D = 0x043 */
+#define DA9062AA_COUNT_DAY_SHIFT 0
+#define DA9062AA_COUNT_DAY_MASK 0x1f
+
+/* DA9062AA_COUNT_MO = 0x044 */
+#define DA9062AA_COUNT_MONTH_SHIFT 0
+#define DA9062AA_COUNT_MONTH_MASK 0x0f
+
+/* DA9062AA_COUNT_Y = 0x045 */
+#define DA9062AA_COUNT_YEAR_SHIFT 0
+#define DA9062AA_COUNT_YEAR_MASK 0x3f
+#define DA9062AA_MONITOR_SHIFT 6
+#define DA9062AA_MONITOR_MASK BIT(6)
+
+/* DA9062AA_ALARM_S = 0x046 */
+#define DA9062AA_ALARM_SEC_SHIFT 0
+#define DA9062AA_ALARM_SEC_MASK 0x3f
+#define DA9062AA_ALARM_STATUS_SHIFT 6
+#define DA9062AA_ALARM_STATUS_MASK (0x03 << 6)
+
+/* DA9062AA_ALARM_MI = 0x047 */
+#define DA9062AA_ALARM_MIN_SHIFT 0
+#define DA9062AA_ALARM_MIN_MASK 0x3f
+
+/* DA9062AA_ALARM_H = 0x048 */
+#define DA9062AA_ALARM_HOUR_SHIFT 0
+#define DA9062AA_ALARM_HOUR_MASK 0x1f
+
+/* DA9062AA_ALARM_D = 0x049 */
+#define DA9062AA_ALARM_DAY_SHIFT 0
+#define DA9062AA_ALARM_DAY_MASK 0x1f
+
+/* DA9062AA_ALARM_MO = 0x04A */
+#define DA9062AA_ALARM_MONTH_SHIFT 0
+#define DA9062AA_ALARM_MONTH_MASK 0x0f
+#define DA9062AA_TICK_TYPE_SHIFT 4
+#define DA9062AA_TICK_TYPE_MASK BIT(4)
+#define DA9062AA_TICK_WAKE_SHIFT 5
+#define DA9062AA_TICK_WAKE_MASK BIT(5)
+
+/* DA9062AA_ALARM_Y = 0x04B */
+#define DA9062AA_ALARM_YEAR_SHIFT 0
+#define DA9062AA_ALARM_YEAR_MASK 0x3f
+#define DA9062AA_ALARM_ON_SHIFT 6
+#define DA9062AA_ALARM_ON_MASK BIT(6)
+#define DA9062AA_TICK_ON_SHIFT 7
+#define DA9062AA_TICK_ON_MASK BIT(7)
+
+/* DA9062AA_SECOND_A = 0x04C */
+#define DA9062AA_SECONDS_A_SHIFT 0
+#define DA9062AA_SECONDS_A_MASK 0xff
+
+/* DA9062AA_SECOND_B = 0x04D */
+#define DA9062AA_SECONDS_B_SHIFT 0
+#define DA9062AA_SECONDS_B_MASK 0xff
+
+/* DA9062AA_SECOND_C = 0x04E */
+#define DA9062AA_SECONDS_C_SHIFT 0
+#define DA9062AA_SECONDS_C_MASK 0xff
+
+/* DA9062AA_SECOND_D = 0x04F */
+#define DA9062AA_SECONDS_D_SHIFT 0
+#define DA9062AA_SECONDS_D_MASK 0xff
+
+/* DA9062AA_SEQ = 0x081 */
+#define DA9062AA_SEQ_POINTER_SHIFT 0
+#define DA9062AA_SEQ_POINTER_MASK 0x0f
+#define DA9062AA_NXT_SEQ_START_SHIFT 4
+#define DA9062AA_NXT_SEQ_START_MASK (0x0f << 4)
+
+/* DA9062AA_SEQ_TIMER = 0x082 */
+#define DA9062AA_SEQ_TIME_SHIFT 0
+#define DA9062AA_SEQ_TIME_MASK 0x0f
+#define DA9062AA_SEQ_DUMMY_SHIFT 4
+#define DA9062AA_SEQ_DUMMY_MASK (0x0f << 4)
+
+/* DA9062AA_ID_2_1 = 0x083 */
+#define DA9062AA_LDO1_STEP_SHIFT 0
+#define DA9062AA_LDO1_STEP_MASK 0x0f
+#define DA9062AA_LDO2_STEP_SHIFT 4
+#define DA9062AA_LDO2_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_4_3 = 0x084 */
+#define DA9062AA_LDO3_STEP_SHIFT 0
+#define DA9062AA_LDO3_STEP_MASK 0x0f
+#define DA9062AA_LDO4_STEP_SHIFT 4
+#define DA9062AA_LDO4_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_12_11 = 0x088 */
+#define DA9062AA_PD_DIS_STEP_SHIFT 4
+#define DA9062AA_PD_DIS_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_14_13 = 0x089 */
+#define DA9062AA_BUCK1_STEP_SHIFT 0
+#define DA9062AA_BUCK1_STEP_MASK 0x0f
+#define DA9062AA_BUCK2_STEP_SHIFT 4
+#define DA9062AA_BUCK2_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_16_15 = 0x08A */
+#define DA9062AA_BUCK4_STEP_SHIFT 0
+#define DA9062AA_BUCK4_STEP_MASK 0x0f
+#define DA9062AA_BUCK3_STEP_SHIFT 4
+#define DA9062AA_BUCK3_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_22_21 = 0x08D */
+#define DA9062AA_GP_RISE1_STEP_SHIFT 0
+#define DA9062AA_GP_RISE1_STEP_MASK 0x0f
+#define DA9062AA_GP_FALL1_STEP_SHIFT 4
+#define DA9062AA_GP_FALL1_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_24_23 = 0x08E */
+#define DA9062AA_GP_RISE2_STEP_SHIFT 0
+#define DA9062AA_GP_RISE2_STEP_MASK 0x0f
+#define DA9062AA_GP_FALL2_STEP_SHIFT 4
+#define DA9062AA_GP_FALL2_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_26_25 = 0x08F */
+#define DA9062AA_GP_RISE3_STEP_SHIFT 0
+#define DA9062AA_GP_RISE3_STEP_MASK 0x0f
+#define DA9062AA_GP_FALL3_STEP_SHIFT 4
+#define DA9062AA_GP_FALL3_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_28_27 = 0x090 */
+#define DA9062AA_GP_RISE4_STEP_SHIFT 0
+#define DA9062AA_GP_RISE4_STEP_MASK 0x0f
+#define DA9062AA_GP_FALL4_STEP_SHIFT 4
+#define DA9062AA_GP_FALL4_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_30_29 = 0x091 */
+#define DA9062AA_GP_RISE5_STEP_SHIFT 0
+#define DA9062AA_GP_RISE5_STEP_MASK 0x0f
+#define DA9062AA_GP_FALL5_STEP_SHIFT 4
+#define DA9062AA_GP_FALL5_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_32_31 = 0x092 */
+#define DA9062AA_WAIT_STEP_SHIFT 0
+#define DA9062AA_WAIT_STEP_MASK 0x0f
+#define DA9062AA_EN32K_STEP_SHIFT 4
+#define DA9062AA_EN32K_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_SEQ_A = 0x095 */
+#define DA9062AA_SYSTEM_END_SHIFT 0
+#define DA9062AA_SYSTEM_END_MASK 0x0f
+#define DA9062AA_POWER_END_SHIFT 4
+#define DA9062AA_POWER_END_MASK (0x0f << 4)
+
+/* DA9062AA_SEQ_B = 0x096 */
+#define DA9062AA_MAX_COUNT_SHIFT 0
+#define DA9062AA_MAX_COUNT_MASK 0x0f
+#define DA9062AA_PART_DOWN_SHIFT 4
+#define DA9062AA_PART_DOWN_MASK (0x0f << 4)
+
+/* DA9062AA_WAIT = 0x097 */
+#define DA9062AA_WAIT_TIME_SHIFT 0
+#define DA9062AA_WAIT_TIME_MASK 0x0f
+#define DA9062AA_WAIT_MODE_SHIFT 4
+#define DA9062AA_WAIT_MODE_MASK BIT(4)
+#define DA9062AA_TIME_OUT_SHIFT 5
+#define DA9062AA_TIME_OUT_MASK BIT(5)
+#define DA9062AA_WAIT_DIR_SHIFT 6
+#define DA9062AA_WAIT_DIR_MASK (0x03 << 6)
+
+/* DA9062AA_EN_32K = 0x098 */
+#define DA9062AA_STABILISATION_TIME_SHIFT 0
+#define DA9062AA_STABILISATION_TIME_MASK 0x07
+#define DA9062AA_CRYSTAL_SHIFT 3
+#define DA9062AA_CRYSTAL_MASK BIT(3)
+#define DA9062AA_DELAY_MODE_SHIFT 4
+#define DA9062AA_DELAY_MODE_MASK BIT(4)
+#define DA9062AA_OUT_CLOCK_SHIFT 5
+#define DA9062AA_OUT_CLOCK_MASK BIT(5)
+#define DA9062AA_RTC_CLOCK_SHIFT 6
+#define DA9062AA_RTC_CLOCK_MASK BIT(6)
+#define DA9062AA_EN_32KOUT_SHIFT 7
+#define DA9062AA_EN_32KOUT_MASK BIT(7)
+
+/* DA9062AA_RESET = 0x099 */
+#define DA9062AA_RESET_TIMER_SHIFT 0
+#define DA9062AA_RESET_TIMER_MASK 0x3f
+#define DA9062AA_RESET_EVENT_SHIFT 6
+#define DA9062AA_RESET_EVENT_MASK (0x03 << 6)
+
+/* DA9062AA_BUCK_ILIM_A = 0x09A */
+#define DA9062AA_BUCK3_ILIM_SHIFT 0
+#define DA9062AA_BUCK3_ILIM_MASK 0x0f
+
+/* DA9062AA_BUCK_ILIM_B = 0x09B */
+#define DA9062AA_BUCK4_ILIM_SHIFT 0
+#define DA9062AA_BUCK4_ILIM_MASK 0x0f
+
+/* DA9062AA_BUCK_ILIM_C = 0x09C */
+#define DA9062AA_BUCK1_ILIM_SHIFT 0
+#define DA9062AA_BUCK1_ILIM_MASK 0x0f
+#define DA9062AA_BUCK2_ILIM_SHIFT 4
+#define DA9062AA_BUCK2_ILIM_MASK (0x0f << 4)
+
+/* DA9062AA_BUCK2_CFG = 0x09D */
+#define DA9062AA_BUCK2_PD_DIS_SHIFT 5
+#define DA9062AA_BUCK2_PD_DIS_MASK BIT(5)
+#define DA9062AA_BUCK2_MODE_SHIFT 6
+#define DA9062AA_BUCK2_MODE_MASK (0x03 << 6)
+
+/* DA9062AA_BUCK1_CFG = 0x09E */
+#define DA9062AA_BUCK1_PD_DIS_SHIFT 5
+#define DA9062AA_BUCK1_PD_DIS_MASK BIT(5)
+#define DA9062AA_BUCK1_MODE_SHIFT 6
+#define DA9062AA_BUCK1_MODE_MASK (0x03 << 6)
+
+/* DA9062AA_BUCK4_CFG = 0x09F */
+#define DA9062AA_BUCK4_VTTR_EN_SHIFT 3
+#define DA9062AA_BUCK4_VTTR_EN_MASK BIT(3)
+#define DA9062AA_BUCK4_VTT_EN_SHIFT 4
+#define DA9062AA_BUCK4_VTT_EN_MASK BIT(4)
+#define DA9062AA_BUCK4_PD_DIS_SHIFT 5
+#define DA9062AA_BUCK4_PD_DIS_MASK BIT(5)
+#define DA9062AA_BUCK4_MODE_SHIFT 6
+#define DA9062AA_BUCK4_MODE_MASK (0x03 << 6)
+
+/* DA9062AA_BUCK3_CFG = 0x0A0 */
+#define DA9062AA_BUCK3_PD_DIS_SHIFT 5
+#define DA9062AA_BUCK3_PD_DIS_MASK BIT(5)
+#define DA9062AA_BUCK3_MODE_SHIFT 6
+#define DA9062AA_BUCK3_MODE_MASK (0x03 << 6)
+
+/* DA9062AA_VBUCK2_A = 0x0A3 */
+#define DA9062AA_VBUCK2_A_SHIFT 0
+#define DA9062AA_VBUCK2_A_MASK 0x7f
+#define DA9062AA_BUCK2_SL_A_SHIFT 7
+#define DA9062AA_BUCK2_SL_A_MASK BIT(7)
+
+/* DA9062AA_VBUCK1_A = 0x0A4 */
+#define DA9062AA_VBUCK1_A_SHIFT 0
+#define DA9062AA_VBUCK1_A_MASK 0x7f
+#define DA9062AA_BUCK1_SL_A_SHIFT 7
+#define DA9062AA_BUCK1_SL_A_MASK BIT(7)
+
+/* DA9062AA_VBUCK4_A = 0x0A5 */
+#define DA9062AA_VBUCK4_A_SHIFT 0
+#define DA9062AA_VBUCK4_A_MASK 0x7f
+#define DA9062AA_BUCK4_SL_A_SHIFT 7
+#define DA9062AA_BUCK4_SL_A_MASK BIT(7)
+
+/* DA9062AA_VBUCK3_A = 0x0A7 */
+#define DA9062AA_VBUCK3_A_SHIFT 0
+#define DA9062AA_VBUCK3_A_MASK 0x7f
+#define DA9062AA_BUCK3_SL_A_SHIFT 7
+#define DA9062AA_BUCK3_SL_A_MASK BIT(7)
+
+/* DA9062AA_VLDO1_A = 0x0A9 */
+#define DA9062AA_VLDO1_A_SHIFT 0
+#define DA9062AA_VLDO1_A_MASK 0x3f
+#define DA9062AA_LDO1_SL_A_SHIFT 7
+#define DA9062AA_LDO1_SL_A_MASK BIT(7)
+
+/* DA9062AA_VLDO2_A = 0x0AA */
+#define DA9062AA_VLDO2_A_SHIFT 0
+#define DA9062AA_VLDO2_A_MASK 0x3f
+#define DA9062AA_LDO2_SL_A_SHIFT 7
+#define DA9062AA_LDO2_SL_A_MASK BIT(7)
+
+/* DA9062AA_VLDO3_A = 0x0AB */
+#define DA9062AA_VLDO3_A_SHIFT 0
+#define DA9062AA_VLDO3_A_MASK 0x3f
+#define DA9062AA_LDO3_SL_A_SHIFT 7
+#define DA9062AA_LDO3_SL_A_MASK BIT(7)
+
+/* DA9062AA_VLDO4_A = 0x0AC */
+#define DA9062AA_VLDO4_A_SHIFT 0
+#define DA9062AA_VLDO4_A_MASK 0x3f
+#define DA9062AA_LDO4_SL_A_SHIFT 7
+#define DA9062AA_LDO4_SL_A_MASK BIT(7)
+
+/* DA9062AA_VBUCK2_B = 0x0B4 */
+#define DA9062AA_VBUCK2_B_SHIFT 0
+#define DA9062AA_VBUCK2_B_MASK 0x7f
+#define DA9062AA_BUCK2_SL_B_SHIFT 7
+#define DA9062AA_BUCK2_SL_B_MASK BIT(7)
+
+/* DA9062AA_VBUCK1_B = 0x0B5 */
+#define DA9062AA_VBUCK1_B_SHIFT 0
+#define DA9062AA_VBUCK1_B_MASK 0x7f
+#define DA9062AA_BUCK1_SL_B_SHIFT 7
+#define DA9062AA_BUCK1_SL_B_MASK BIT(7)
+
+/* DA9062AA_VBUCK4_B = 0x0B6 */
+#define DA9062AA_VBUCK4_B_SHIFT 0
+#define DA9062AA_VBUCK4_B_MASK 0x7f
+#define DA9062AA_BUCK4_SL_B_SHIFT 7
+#define DA9062AA_BUCK4_SL_B_MASK BIT(7)
+
+/* DA9062AA_VBUCK3_B = 0x0B8 */
+#define DA9062AA_VBUCK3_B_SHIFT 0
+#define DA9062AA_VBUCK3_B_MASK 0x7f
+#define DA9062AA_BUCK3_SL_B_SHIFT 7
+#define DA9062AA_BUCK3_SL_B_MASK BIT(7)
+
+/* DA9062AA_VLDO1_B = 0x0BA */
+#define DA9062AA_VLDO1_B_SHIFT 0
+#define DA9062AA_VLDO1_B_MASK 0x3f
+#define DA9062AA_LDO1_SL_B_SHIFT 7
+#define DA9062AA_LDO1_SL_B_MASK BIT(7)
+
+/* DA9062AA_VLDO2_B = 0x0BB */
+#define DA9062AA_VLDO2_B_SHIFT 0
+#define DA9062AA_VLDO2_B_MASK 0x3f
+#define DA9062AA_LDO2_SL_B_SHIFT 7
+#define DA9062AA_LDO2_SL_B_MASK BIT(7)
+
+/* DA9062AA_VLDO3_B = 0x0BC */
+#define DA9062AA_VLDO3_B_SHIFT 0
+#define DA9062AA_VLDO3_B_MASK 0x3f
+#define DA9062AA_LDO3_SL_B_SHIFT 7
+#define DA9062AA_LDO3_SL_B_MASK BIT(7)
+
+/* DA9062AA_VLDO4_B = 0x0BD */
+#define DA9062AA_VLDO4_B_SHIFT 0
+#define DA9062AA_VLDO4_B_MASK 0x3f
+#define DA9062AA_LDO4_SL_B_SHIFT 7
+#define DA9062AA_LDO4_SL_B_MASK BIT(7)
+
+/* DA9062AA_BBAT_CONT = 0x0C5 */
+#define DA9062AA_BCHG_VSET_SHIFT 0
+#define DA9062AA_BCHG_VSET_MASK 0x0f
+#define DA9062AA_BCHG_ISET_SHIFT 4
+#define DA9062AA_BCHG_ISET_MASK (0x0f << 4)
+
+/* DA9062AA_INTERFACE = 0x105 */
+#define DA9062AA_IF_BASE_ADDR_SHIFT 4
+#define DA9062AA_IF_BASE_ADDR_MASK (0x0f << 4)
+
+/* DA9062AA_CONFIG_A = 0x106 */
+#define DA9062AA_PM_I_V_SHIFT 0
+#define DA9062AA_PM_I_V_MASK 0x01
+#define DA9062AA_PM_O_TYPE_SHIFT 2
+#define DA9062AA_PM_O_TYPE_MASK BIT(2)
+#define DA9062AA_IRQ_TYPE_SHIFT 3
+#define DA9062AA_IRQ_TYPE_MASK BIT(3)
+#define DA9062AA_PM_IF_V_SHIFT 4
+#define DA9062AA_PM_IF_V_MASK BIT(4)
+#define DA9062AA_PM_IF_FMP_SHIFT 5
+#define DA9062AA_PM_IF_FMP_MASK BIT(5)
+#define DA9062AA_PM_IF_HSM_SHIFT 6
+#define DA9062AA_PM_IF_HSM_MASK BIT(6)
+
+/* DA9062AA_CONFIG_B = 0x107 */
+#define DA9062AA_VDD_FAULT_ADJ_SHIFT 0
+#define DA9062AA_VDD_FAULT_ADJ_MASK 0x0f
+#define DA9062AA_VDD_HYST_ADJ_SHIFT 4
+#define DA9062AA_VDD_HYST_ADJ_MASK (0x07 << 4)
+
+/* DA9062AA_CONFIG_C = 0x108 */
+#define DA9062AA_BUCK_ACTV_DISCHRG_SHIFT 2
+#define DA9062AA_BUCK_ACTV_DISCHRG_MASK BIT(2)
+#define DA9062AA_BUCK1_CLK_INV_SHIFT 3
+#define DA9062AA_BUCK1_CLK_INV_MASK BIT(3)
+#define DA9062AA_BUCK4_CLK_INV_SHIFT 4
+#define DA9062AA_BUCK4_CLK_INV_MASK BIT(4)
+#define DA9062AA_BUCK3_CLK_INV_SHIFT 6
+#define DA9062AA_BUCK3_CLK_INV_MASK BIT(6)
+
+/* DA9062AA_CONFIG_D = 0x109 */
+#define DA9062AA_GPI_V_SHIFT 0
+#define DA9062AA_GPI_V_MASK 0x01
+#define DA9062AA_NIRQ_MODE_SHIFT 1
+#define DA9062AA_NIRQ_MODE_MASK BIT(1)
+#define DA9062AA_SYSTEM_EN_RD_SHIFT 2
+#define DA9062AA_SYSTEM_EN_RD_MASK BIT(2)
+#define DA9062AA_FORCE_RESET_SHIFT 5
+#define DA9062AA_FORCE_RESET_MASK BIT(5)
+
+/* DA9062AA_CONFIG_E = 0x10A */
+#define DA9062AA_BUCK1_AUTO_SHIFT 0
+#define DA9062AA_BUCK1_AUTO_MASK 0x01
+#define DA9062AA_BUCK2_AUTO_SHIFT 1
+#define DA9062AA_BUCK2_AUTO_MASK BIT(1)
+#define DA9062AA_BUCK4_AUTO_SHIFT 2
+#define DA9062AA_BUCK4_AUTO_MASK BIT(2)
+#define DA9062AA_BUCK3_AUTO_SHIFT 4
+#define DA9062AA_BUCK3_AUTO_MASK BIT(4)
+
+/* DA9062AA_CONFIG_G = 0x10C */
+#define DA9062AA_LDO1_AUTO_SHIFT 0
+#define DA9062AA_LDO1_AUTO_MASK 0x01
+#define DA9062AA_LDO2_AUTO_SHIFT 1
+#define DA9062AA_LDO2_AUTO_MASK BIT(1)
+#define DA9062AA_LDO3_AUTO_SHIFT 2
+#define DA9062AA_LDO3_AUTO_MASK BIT(2)
+#define DA9062AA_LDO4_AUTO_SHIFT 3
+#define DA9062AA_LDO4_AUTO_MASK BIT(3)
+
+/* DA9062AA_CONFIG_H = 0x10D */
+#define DA9062AA_BUCK1_2_MERGE_SHIFT 3
+#define DA9062AA_BUCK1_2_MERGE_MASK BIT(3)
+#define DA9062AA_BUCK2_OD_SHIFT 5
+#define DA9062AA_BUCK2_OD_MASK BIT(5)
+#define DA9062AA_BUCK1_OD_SHIFT 6
+#define DA9062AA_BUCK1_OD_MASK BIT(6)
+
+/* DA9062AA_CONFIG_I = 0x10E */
+#define DA9062AA_NONKEY_PIN_SHIFT 0
+#define DA9062AA_NONKEY_PIN_MASK 0x03
+#define DA9062AA_nONKEY_SD_SHIFT 2
+#define DA9062AA_nONKEY_SD_MASK BIT(2)
+#define DA9062AA_WATCHDOG_SD_SHIFT 3
+#define DA9062AA_WATCHDOG_SD_MASK BIT(3)
+#define DA9062AA_KEY_SD_MODE_SHIFT 4
+#define DA9062AA_KEY_SD_MODE_MASK BIT(4)
+#define DA9062AA_HOST_SD_MODE_SHIFT 5
+#define DA9062AA_HOST_SD_MODE_MASK BIT(5)
+#define DA9062AA_INT_SD_MODE_SHIFT 6
+#define DA9062AA_INT_SD_MODE_MASK BIT(6)
+#define DA9062AA_LDO_SD_SHIFT 7
+#define DA9062AA_LDO_SD_MASK BIT(7)
+
+/* DA9062AA_CONFIG_J = 0x10F */
+#define DA9062AA_KEY_DELAY_SHIFT 0
+#define DA9062AA_KEY_DELAY_MASK 0x03
+#define DA9062AA_SHUT_DELAY_SHIFT 2
+#define DA9062AA_SHUT_DELAY_MASK (0x03 << 2)
+#define DA9062AA_RESET_DURATION_SHIFT 4
+#define DA9062AA_RESET_DURATION_MASK (0x03 << 4)
+#define DA9062AA_TWOWIRE_TO_SHIFT 6
+#define DA9062AA_TWOWIRE_TO_MASK BIT(6)
+#define DA9062AA_IF_RESET_SHIFT 7
+#define DA9062AA_IF_RESET_MASK BIT(7)
+
+/* DA9062AA_CONFIG_K = 0x110 */
+#define DA9062AA_GPIO0_PUPD_SHIFT 0
+#define DA9062AA_GPIO0_PUPD_MASK 0x01
+#define DA9062AA_GPIO1_PUPD_SHIFT 1
+#define DA9062AA_GPIO1_PUPD_MASK BIT(1)
+#define DA9062AA_GPIO2_PUPD_SHIFT 2
+#define DA9062AA_GPIO2_PUPD_MASK BIT(2)
+#define DA9062AA_GPIO3_PUPD_SHIFT 3
+#define DA9062AA_GPIO3_PUPD_MASK BIT(3)
+#define DA9062AA_GPIO4_PUPD_SHIFT 4
+#define DA9062AA_GPIO4_PUPD_MASK BIT(4)
+
+/* DA9062AA_CONFIG_M = 0x112 */
+#define DA9062AA_NSHUTDOWN_PU_SHIFT 1
+#define DA9062AA_NSHUTDOWN_PU_MASK BIT(1)
+#define DA9062AA_WDG_MODE_SHIFT 3
+#define DA9062AA_WDG_MODE_MASK BIT(3)
+#define DA9062AA_OSC_FRQ_SHIFT 4
+#define DA9062AA_OSC_FRQ_MASK (0x0f << 4)
+
+/* DA9062AA_TRIM_CLDR = 0x120 */
+#define DA9062AA_TRIM_CLDR_SHIFT 0
+#define DA9062AA_TRIM_CLDR_MASK 0xff
+
+/* DA9062AA_GP_ID_0 = 0x121 */
+#define DA9062AA_GP_0_SHIFT 0
+#define DA9062AA_GP_0_MASK 0xff
+
+/* DA9062AA_GP_ID_1 = 0x122 */
+#define DA9062AA_GP_1_SHIFT 0
+#define DA9062AA_GP_1_MASK 0xff
+
+/* DA9062AA_GP_ID_2 = 0x123 */
+#define DA9062AA_GP_2_SHIFT 0
+#define DA9062AA_GP_2_MASK 0xff
+
+/* DA9062AA_GP_ID_3 = 0x124 */
+#define DA9062AA_GP_3_SHIFT 0
+#define DA9062AA_GP_3_MASK 0xff
+
+/* DA9062AA_GP_ID_4 = 0x125 */
+#define DA9062AA_GP_4_SHIFT 0
+#define DA9062AA_GP_4_MASK 0xff
+
+/* DA9062AA_GP_ID_5 = 0x126 */
+#define DA9062AA_GP_5_SHIFT 0
+#define DA9062AA_GP_5_MASK 0xff
+
+/* DA9062AA_GP_ID_6 = 0x127 */
+#define DA9062AA_GP_6_SHIFT 0
+#define DA9062AA_GP_6_MASK 0xff
+
+/* DA9062AA_GP_ID_7 = 0x128 */
+#define DA9062AA_GP_7_SHIFT 0
+#define DA9062AA_GP_7_MASK 0xff
+
+/* DA9062AA_GP_ID_8 = 0x129 */
+#define DA9062AA_GP_8_SHIFT 0
+#define DA9062AA_GP_8_MASK 0xff
+
+/* DA9062AA_GP_ID_9 = 0x12A */
+#define DA9062AA_GP_9_SHIFT 0
+#define DA9062AA_GP_9_MASK 0xff
+
+/* DA9062AA_GP_ID_10 = 0x12B */
+#define DA9062AA_GP_10_SHIFT 0
+#define DA9062AA_GP_10_MASK 0xff
+
+/* DA9062AA_GP_ID_11 = 0x12C */
+#define DA9062AA_GP_11_SHIFT 0
+#define DA9062AA_GP_11_MASK 0xff
+
+/* DA9062AA_GP_ID_12 = 0x12D */
+#define DA9062AA_GP_12_SHIFT 0
+#define DA9062AA_GP_12_MASK 0xff
+
+/* DA9062AA_GP_ID_13 = 0x12E */
+#define DA9062AA_GP_13_SHIFT 0
+#define DA9062AA_GP_13_MASK 0xff
+
+/* DA9062AA_GP_ID_14 = 0x12F */
+#define DA9062AA_GP_14_SHIFT 0
+#define DA9062AA_GP_14_MASK 0xff
+
+/* DA9062AA_GP_ID_15 = 0x130 */
+#define DA9062AA_GP_15_SHIFT 0
+#define DA9062AA_GP_15_MASK 0xff
+
+/* DA9062AA_GP_ID_16 = 0x131 */
+#define DA9062AA_GP_16_SHIFT 0
+#define DA9062AA_GP_16_MASK 0xff
+
+/* DA9062AA_GP_ID_17 = 0x132 */
+#define DA9062AA_GP_17_SHIFT 0
+#define DA9062AA_GP_17_MASK 0xff
+
+/* DA9062AA_GP_ID_18 = 0x133 */
+#define DA9062AA_GP_18_SHIFT 0
+#define DA9062AA_GP_18_MASK 0xff
+
+/* DA9062AA_GP_ID_19 = 0x134 */
+#define DA9062AA_GP_19_SHIFT 0
+#define DA9062AA_GP_19_MASK 0xff
+
+/* DA9062AA_DEVICE_ID = 0x181 */
+#define DA9062AA_DEV_ID_SHIFT 0
+#define DA9062AA_DEV_ID_MASK 0xff
+
+/* DA9062AA_VARIANT_ID = 0x182 */
+#define DA9062AA_VRC_SHIFT 0
+#define DA9062AA_VRC_MASK 0x0f
+#define DA9062AA_MRC_SHIFT 4
+#define DA9062AA_MRC_MASK (0x0f << 4)
+
+/* DA9062AA_CUSTOMER_ID = 0x183 */
+#define DA9062AA_CUST_ID_SHIFT 0
+#define DA9062AA_CUST_ID_MASK 0xff
+
+/* DA9062AA_CONFIG_ID = 0x184 */
+#define DA9062AA_CONFIG_REV_SHIFT 0
+#define DA9062AA_CONFIG_REV_MASK 0xff
+
+#endif /* __DA9062_H__ */
diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h
index 79f4d822ba13..621af82123c6 100644
--- a/include/linux/mfd/da9063/core.h
+++ b/include/linux/mfd/da9063/core.h
@@ -51,6 +51,7 @@ enum da9063_irqs {
DA9063_IRQ_COMP_1V2,
DA9063_IRQ_LDO_LIM,
DA9063_IRQ_REG_UVOV,
+ DA9063_IRQ_DVC_RDY,
DA9063_IRQ_VDD_MON,
DA9063_IRQ_WARN,
DA9063_IRQ_GPI0,
diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h
index 8feac782fa83..2b300b44f994 100644
--- a/include/linux/mfd/lpc_ich.h
+++ b/include/linux/mfd/lpc_ich.h
@@ -20,12 +20,6 @@
#ifndef LPC_ICH_H
#define LPC_ICH_H
-/* Watchdog resources */
-#define ICH_RES_IO_TCO 0
-#define ICH_RES_IO_SMI 1
-#define ICH_RES_MEM_OFF 2
-#define ICH_RES_MEM_GCS_PMC 0
-
/* GPIO resources */
#define ICH_RES_GPIO 0
#define ICH_RES_GPE0 1
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
index cf5265b0d1c1..45b8e8aa1fbf 100644
--- a/include/linux/mfd/mt6397/core.h
+++ b/include/linux/mfd/mt6397/core.h
@@ -57,6 +57,7 @@ struct mt6397_chip {
int irq;
struct irq_domain *irq_domain;
struct mutex irqlock;
+ u16 wake_mask[2];
u16 irq_masks_cur[2];
u16 irq_masks_cache[2];
};
diff --git a/include/linux/microchipphy.h b/include/linux/microchipphy.h
new file mode 100644
index 000000000000..eb492d47f717
--- /dev/null
+++ b/include/linux/microchipphy.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2015 Microchip Technology
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _MICROCHIPPHY_H
+#define _MICROCHIPPHY_H
+
+#define LAN88XX_INT_MASK (0x19)
+#define LAN88XX_INT_MASK_MDINTPIN_EN_ (0x8000)
+#define LAN88XX_INT_MASK_SPEED_CHANGE_ (0x4000)
+#define LAN88XX_INT_MASK_LINK_CHANGE_ (0x2000)
+#define LAN88XX_INT_MASK_FDX_CHANGE_ (0x1000)
+#define LAN88XX_INT_MASK_AUTONEG_ERR_ (0x0800)
+#define LAN88XX_INT_MASK_AUTONEG_DONE_ (0x0400)
+#define LAN88XX_INT_MASK_POE_DETECT_ (0x0200)
+#define LAN88XX_INT_MASK_SYMBOL_ERR_ (0x0100)
+#define LAN88XX_INT_MASK_FAST_LINK_FAIL_ (0x0080)
+#define LAN88XX_INT_MASK_WOL_EVENT_ (0x0040)
+#define LAN88XX_INT_MASK_EXTENDED_INT_ (0x0020)
+#define LAN88XX_INT_MASK_RESERVED_ (0x0010)
+#define LAN88XX_INT_MASK_FALSE_CARRIER_ (0x0008)
+#define LAN88XX_INT_MASK_LINK_SPEED_DS_ (0x0004)
+#define LAN88XX_INT_MASK_MASTER_SLAVE_DONE_ (0x0002)
+#define LAN88XX_INT_MASK_RX__ER_ (0x0001)
+
+#define LAN88XX_INT_STS (0x1A)
+#define LAN88XX_INT_STS_INT_ACTIVE_ (0x8000)
+#define LAN88XX_INT_STS_SPEED_CHANGE_ (0x4000)
+#define LAN88XX_INT_STS_LINK_CHANGE_ (0x2000)
+#define LAN88XX_INT_STS_FDX_CHANGE_ (0x1000)
+#define LAN88XX_INT_STS_AUTONEG_ERR_ (0x0800)
+#define LAN88XX_INT_STS_AUTONEG_DONE_ (0x0400)
+#define LAN88XX_INT_STS_POE_DETECT_ (0x0200)
+#define LAN88XX_INT_STS_SYMBOL_ERR_ (0x0100)
+#define LAN88XX_INT_STS_FAST_LINK_FAIL_ (0x0080)
+#define LAN88XX_INT_STS_WOL_EVENT_ (0x0040)
+#define LAN88XX_INT_STS_EXTENDED_INT_ (0x0020)
+#define LAN88XX_INT_STS_RESERVED_ (0x0010)
+#define LAN88XX_INT_STS_FALSE_CARRIER_ (0x0008)
+#define LAN88XX_INT_STS_LINK_SPEED_DS_ (0x0004)
+#define LAN88XX_INT_STS_MASTER_SLAVE_DONE_ (0x0002)
+#define LAN88XX_INT_STS_RX_ER_ (0x0001)
+
+#define LAN88XX_EXT_PAGE_ACCESS (0x1F)
+#define LAN88XX_EXT_PAGE_SPACE_0 (0x0000)
+#define LAN88XX_EXT_PAGE_SPACE_1 (0x0001)
+#define LAN88XX_EXT_PAGE_SPACE_2 (0x0002)
+
+/* Extended Register Page 1 space */
+#define LAN88XX_EXT_MODE_CTRL (0x13)
+#define LAN88XX_EXT_MODE_CTRL_MDIX_MASK_ (0x000C)
+#define LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_ (0x0000)
+#define LAN88XX_EXT_MODE_CTRL_MDI_ (0x0008)
+#define LAN88XX_EXT_MODE_CTRL_MDI_X_ (0x000C)
+
+/* MMD 3 Registers */
+#define LAN88XX_MMD3_CHIP_ID (32877)
+#define LAN88XX_MMD3_CHIP_REV (32878)
+
+#endif /* _MICROCHIPPHY_H */
diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h
index e7ecc12a1163..09cebe528488 100644
--- a/include/linux/mlx4/cq.h
+++ b/include/linux/mlx4/cq.h
@@ -88,7 +88,8 @@ struct mlx4_ts_cqe {
enum {
MLX4_CQE_L2_TUNNEL_IPOK = 1 << 31,
- MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29,
+ MLX4_CQE_CVLAN_PRESENT_MASK = 1 << 29,
+ MLX4_CQE_SVLAN_PRESENT_MASK = 1 << 30,
MLX4_CQE_L2_TUNNEL = 1 << 27,
MLX4_CQE_L2_TUNNEL_CSUM = 1 << 26,
MLX4_CQE_L2_TUNNEL_IPV4 = 1 << 25,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index fd13c1ce3b4a..baad4cb8e9b0 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -79,7 +79,8 @@ enum {
enum {
MLX4_MAX_PORTS = 2,
- MLX4_MAX_PORT_PKEYS = 128
+ MLX4_MAX_PORT_PKEYS = 128,
+ MLX4_MAX_PORT_GIDS = 128
};
/* base qkey for use in sriov tunnel-qp/proxy-qp communication.
@@ -211,6 +212,8 @@ enum {
MLX4_DEV_CAP_FLAG2_ETS_CFG = 1LL << 26,
MLX4_DEV_CAP_FLAG2_PORT_BEACON = 1LL << 27,
MLX4_DEV_CAP_FLAG2_IGNORE_FCS = 1LL << 28,
+ MLX4_DEV_CAP_FLAG2_PHV_EN = 1LL << 29,
+ MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN = 1LL << 30,
};
enum {
@@ -581,6 +584,7 @@ struct mlx4_caps {
u64 phys_port_id[MLX4_MAX_PORTS + 1];
int tunnel_offload_mode;
u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1];
+ u8 phv_bit[MLX4_MAX_PORTS + 1];
u8 alloc_res_qp_mask;
u32 dmfs_high_rate_qpn_base;
u32 dmfs_high_rate_qpn_range;
@@ -1332,6 +1336,8 @@ int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time);
int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
u8 ignore_fcs_value);
int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
+int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
+int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index 9553a73d2049..5a06d969338e 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -59,6 +59,7 @@ struct mlx4_interface {
void (*event) (struct mlx4_dev *dev, void *context,
enum mlx4_dev_event event, unsigned long param);
void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port);
+ void (*activate)(struct mlx4_dev *dev, void *context);
struct list_head list;
enum mlx4_protocol protocol;
int flags;
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 6fed539e5456..de45a51b3f04 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -272,7 +272,8 @@ enum {
MLX4_WQE_CTRL_SOLICITED = 1 << 1,
MLX4_WQE_CTRL_IP_CSUM = 1 << 4,
MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5,
- MLX4_WQE_CTRL_INS_VLAN = 1 << 6,
+ MLX4_WQE_CTRL_INS_CVLAN = 1 << 6,
+ MLX4_WQE_CTRL_INS_SVLAN = 1 << 7,
MLX4_WQE_CTRL_STRONG_ORDER = 1 << 7,
MLX4_WQE_CTRL_FORCE_LOOPBACK = 1 << 0,
};
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index b943cd9e2097..8eb3b19af2a4 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -402,6 +402,17 @@ struct mlx5_cmd_teardown_hca_mbox_out {
u8 rsvd[8];
};
+struct mlx5_cmd_query_special_contexts_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_cmd_query_special_contexts_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ __be32 dump_fill_mkey;
+ __be32 resd_lkey;
+};
+
struct mlx5_cmd_layout {
u8 type;
u8 rsvd0[3];
@@ -1182,6 +1193,16 @@ enum {
MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
};
+enum {
+ MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0,
+ MLX5_RFC_2863_COUNTERS_GROUP = 0x1,
+ MLX5_RFC_2819_COUNTERS_GROUP = 0x2,
+ MLX5_RFC_3635_COUNTERS_GROUP = 0x3,
+ MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
+ MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
+ MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11
+};
+
static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
{
if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 5722d88c2429..27b53f9a24ad 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -103,6 +103,8 @@ enum {
MLX5_REG_PMTU = 0x5003,
MLX5_REG_PTYS = 0x5004,
MLX5_REG_PAOS = 0x5006,
+ MLX5_REG_PFCC = 0x5007,
+ MLX5_REG_PPCNT = 0x5008,
MLX5_REG_PMAOS = 0x5012,
MLX5_REG_PUDE = 0x5009,
MLX5_REG_PMPE = 0x5010,
@@ -151,8 +153,8 @@ enum mlx5_dev_event {
};
enum mlx5_port_status {
- MLX5_PORT_UP = 1 << 1,
- MLX5_PORT_DOWN = 1 << 2,
+ MLX5_PORT_UP = 1,
+ MLX5_PORT_DOWN = 2,
};
struct mlx5_uuar_info {
@@ -380,7 +382,7 @@ struct mlx5_uar {
u32 index;
struct list_head bf_list;
unsigned free_bf_bmap;
- void __iomem *wc_map;
+ void __iomem *bf_map;
void __iomem *map;
};
@@ -435,6 +437,8 @@ struct mlx5_priv {
struct mlx5_uuar_info uuari;
MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
+ struct io_mapping *bf_mapping;
+
/* pages stuff */
struct workqueue_struct *pg_wq;
struct rb_root page_root;
@@ -463,6 +467,10 @@ struct mlx5_priv {
/* end: mr staff */
/* start: alloc staff */
+ /* protect buffer alocation according to numa node */
+ struct mutex alloc_mutex;
+ int numa_node;
+
struct mutex pgdir_mutex;
struct list_head pgdir_list;
/* end: alloc staff */
@@ -672,6 +680,8 @@ void mlx5_health_cleanup(void);
void __init mlx5_health_init(void);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
+int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+ struct mlx5_buf *buf, int node);
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
@@ -752,9 +762,10 @@ int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
u8 local_port);
int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
int proto_mask);
-int mlx5_set_port_status(struct mlx5_core_dev *dev,
- enum mlx5_port_status status);
-int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status);
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status);
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status *status);
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
@@ -764,6 +775,10 @@ void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
u8 *vl_hw_cap, u8 local_port);
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+ u32 *rx_pause, u32 *tx_pause);
+
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
@@ -773,6 +788,8 @@ void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
+int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
+ int node);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
const char *mlx5_command_str(int command);
@@ -828,6 +845,7 @@ void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
int mlx5_register_interface(struct mlx5_interface *intf);
void mlx5_unregister_interface(struct mlx5_interface *intf);
int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
+int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey);
struct mlx5_profile {
u64 mask;
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 6d2f6fee041c..dd2097455a2e 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1936,9 +1936,9 @@ enum {
};
enum {
- MLX5_TIRC_RX_HASH_FN_HASH_NONE = 0x0,
- MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8 = 0x1,
- MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ = 0x2,
+ MLX5_RX_HASH_FN_NONE = 0x0,
+ MLX5_RX_HASH_FN_INVERTED_XOR8 = 0x1,
+ MLX5_RX_HASH_FN_TOEPLITZ = 0x2,
};
enum {
@@ -4050,6 +4050,13 @@ struct mlx5_ifc_modify_tis_in_bits {
struct mlx5_ifc_tisc_bits ctx;
};
+struct mlx5_ifc_modify_tir_bitmask_bits {
+ u8 reserved[0x20];
+
+ u8 reserved1[0x1f];
+ u8 lro[0x1];
+};
+
struct mlx5_ifc_modify_tir_out_bits {
u8 status[0x8];
u8 reserved_0[0x18];
@@ -4071,7 +4078,7 @@ struct mlx5_ifc_modify_tir_in_bits {
u8 reserved_3[0x20];
- u8 modify_bitmask[0x40];
+ struct mlx5_ifc_modify_tir_bitmask_bits bitmask;
u8 reserved_4[0x40];
@@ -4116,6 +4123,13 @@ struct mlx5_ifc_modify_rqt_out_bits {
u8 reserved_1[0x40];
};
+struct mlx5_ifc_rqt_bitmask_bits {
+ u8 reserved[0x20];
+
+ u8 reserved1[0x1f];
+ u8 rqn_list[0x1];
+};
+
struct mlx5_ifc_modify_rqt_in_bits {
u8 opcode[0x10];
u8 reserved_0[0x10];
@@ -4128,7 +4142,7 @@ struct mlx5_ifc_modify_rqt_in_bits {
u8 reserved_3[0x20];
- u8 modify_bitmask[0x40];
+ struct mlx5_ifc_rqt_bitmask_bits bitmask;
u8 reserved_4[0x40];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index bf6f117fcf4d..91c08f6f0dc9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -20,6 +20,7 @@
#include <linux/shrinker.h>
#include <linux/resource.h>
#include <linux/page_ext.h>
+#include <linux/err.h>
struct mempolicy;
struct anon_vma;
@@ -124,8 +125,10 @@ extern unsigned int kobjsize(const void *objp);
#define VM_MAYSHARE 0x00000080
#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
+#define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */
#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
+#define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */
#define VM_LOCKED 0x00002000
#define VM_IO 0x00004000 /* Memory mapped I/O or similar */
@@ -245,7 +248,10 @@ struct vm_fault {
struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
void (*close)(struct vm_area_struct * area);
+ int (*mremap)(struct vm_area_struct * area);
int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
+ int (*pmd_fault)(struct vm_area_struct *, unsigned long address,
+ pmd_t *, unsigned int flags);
void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
/* notification that a previously read-only page is about to become
@@ -304,18 +310,6 @@ struct inode;
#define page_private(page) ((page)->private)
#define set_page_private(page, v) ((page)->private = (v))
-/* It's valid only if the page is free path or free_list */
-static inline void set_freepage_migratetype(struct page *page, int migratetype)
-{
- page->index = migratetype;
-}
-
-/* It's valid only if the page is free path or free_list */
-static inline int get_freepage_migratetype(struct page *page)
-{
- return page->index;
-}
-
/*
* FIXME: take this include out, include page-flags.h in
* files which need it (119 of them)
@@ -356,20 +350,15 @@ static inline int get_page_unless_zero(struct page *page)
return atomic_inc_not_zero(&page->_count);
}
-/*
- * Try to drop a ref unless the page has a refcount of one, return false if
- * that is the case.
- * This is to make sure that the refcount won't become zero after this drop.
- * This can be called when MMU is off so it must not access
- * any of the virtual mappings.
- */
-static inline int put_page_unless_one(struct page *page)
-{
- return atomic_add_unless(&page->_count, -1, 1);
-}
-
extern int page_is_ram(unsigned long pfn);
-extern int region_is_ram(resource_size_t phys_addr, unsigned long size);
+
+enum {
+ REGION_INTERSECTS,
+ REGION_DISJOINT,
+ REGION_MIXED,
+};
+
+int region_intersects(resource_size_t offset, size_t size, const char *type);
/* Support for virtually mapped pages */
struct page *vmalloc_to_page(const void *addr);
@@ -1226,6 +1215,49 @@ long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
int write, int force, struct page **pages);
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
+
+/* Container for pinned pfns / pages */
+struct frame_vector {
+ unsigned int nr_allocated; /* Number of frames we have space for */
+ unsigned int nr_frames; /* Number of frames stored in ptrs array */
+ bool got_ref; /* Did we pin pages by getting page ref? */
+ bool is_pfns; /* Does array contain pages or pfns? */
+ void *ptrs[0]; /* Array of pinned pfns / pages. Use
+ * pfns_vector_pages() or pfns_vector_pfns()
+ * for access */
+};
+
+struct frame_vector *frame_vector_create(unsigned int nr_frames);
+void frame_vector_destroy(struct frame_vector *vec);
+int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
+ bool write, bool force, struct frame_vector *vec);
+void put_vaddr_frames(struct frame_vector *vec);
+int frame_vector_to_pages(struct frame_vector *vec);
+void frame_vector_to_pfns(struct frame_vector *vec);
+
+static inline unsigned int frame_vector_count(struct frame_vector *vec)
+{
+ return vec->nr_frames;
+}
+
+static inline struct page **frame_vector_pages(struct frame_vector *vec)
+{
+ if (vec->is_pfns) {
+ int err = frame_vector_to_pages(vec);
+
+ if (err)
+ return ERR_PTR(err);
+ }
+ return (struct page **)(vec->ptrs);
+}
+
+static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
+{
+ if (!vec->is_pfns)
+ frame_vector_to_pfns(vec);
+ return (unsigned long *)(vec->ptrs);
+}
+
struct kvec;
int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
struct page **pages);
@@ -1257,6 +1289,11 @@ static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
}
+static inline bool vma_is_anonymous(struct vm_area_struct *vma)
+{
+ return !vma->vm_ops;
+}
+
static inline int stack_guard_page_start(struct vm_area_struct *vma,
unsigned long addr)
{
@@ -1833,7 +1870,7 @@ extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
extern struct vm_area_struct *vma_merge(struct mm_struct *,
struct vm_area_struct *prev, unsigned long addr, unsigned long end,
unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
- struct mempolicy *);
+ struct mempolicy *, struct vm_userfaultfd_ctx);
extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
extern int split_vma(struct mm_struct *,
struct vm_area_struct *, unsigned long addr, int new_below);
@@ -1880,11 +1917,19 @@ extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned lo
extern unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
-extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
- unsigned long pgoff, unsigned long *populate);
+ vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate);
extern int do_munmap(struct mm_struct *, unsigned long, size_t);
+static inline unsigned long
+do_mmap_pgoff(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long prot, unsigned long flags,
+ unsigned long pgoff, unsigned long *populate)
+{
+ return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate);
+}
+
#ifdef CONFIG_MMU
extern int __mm_populate(unsigned long addr, unsigned long len,
int ignore_errors);
@@ -2183,6 +2228,7 @@ extern int memory_failure(unsigned long pfn, int trapno, int flags);
extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
extern int unpoison_memory(unsigned long pfn);
extern int get_hwpoison_page(struct page *page);
+extern void put_hwpoison_page(struct page *page);
extern int sysctl_memory_failure_early_kill;
extern int sysctl_memory_failure_recovery;
extern void shake_page(struct page *p, int access);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 15549578d559..3d6baa7d4534 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -235,7 +235,7 @@ struct page_frag_cache {
bool pfmemalloc;
};
-typedef unsigned long __nocast vm_flags_t;
+typedef unsigned long vm_flags_t;
/*
* A region containing a mapping of a non-memory backed file under NOMMU
@@ -256,6 +256,16 @@ struct vm_region {
* this region */
};
+#ifdef CONFIG_USERFAULTFD
+#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
+struct vm_userfaultfd_ctx {
+ struct userfaultfd_ctx *ctx;
+};
+#else /* CONFIG_USERFAULTFD */
+#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
+struct vm_userfaultfd_ctx {};
+#endif /* CONFIG_USERFAULTFD */
+
/*
* This struct defines a memory VMM memory area. There is one of these
* per VM-area/task. A VM area is any part of the process virtual memory
@@ -322,6 +332,7 @@ struct vm_area_struct {
#ifdef CONFIG_NUMA
struct mempolicy *vm_policy; /* NUMA policy for the VMA */
#endif
+ struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
};
struct core_thread {
@@ -543,6 +554,7 @@ enum tlb_flush_reason {
TLB_REMOTE_SHOOTDOWN,
TLB_LOCAL_SHOOTDOWN,
TLB_LOCAL_MM_SHOOTDOWN,
+ TLB_REMOTE_SEND_IPI,
NR_TLB_FLUSH_REASONS,
};
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 4d3776d25925..fdd0779ccdfa 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -279,10 +279,13 @@ struct mmc_card {
#define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */
#define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */
#define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */
+#define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */
+
unsigned int erase_size; /* erase size in sectors */
unsigned int erase_shift; /* if erase unit is power 2 */
unsigned int pref_erase; /* in sectors */
+ unsigned int eg_boundary; /* don't cross erase-group boundaries */
u8 erased_byte; /* value of erased bytes */
u32 raw_cid[4]; /* raw card CID */
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 5be97676f1fa..134c57422740 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -98,6 +98,7 @@ struct mmc_data;
* @irq_flags: The flags to be passed to request_irq.
* @irq: The irq value to be passed to request_irq.
* @sdio_id0: Number of slot0 in the SDIO interrupt registers.
+ * @dto_timer: Timer for broken data transfer over scheme.
*
* Locking
* =======
@@ -153,11 +154,7 @@ struct dw_mci {
dma_addr_t sg_dma;
void *sg_cpu;
const struct dw_mci_dma_ops *dma_ops;
-#ifdef CONFIG_MMC_DW_IDMAC
unsigned int ring_size;
-#else
- struct dw_mci_dma_data *dma_data;
-#endif
u32 cmd_status;
u32 data_status;
u32 stop_cmdr;
@@ -204,6 +201,7 @@ struct dw_mci {
int sdio_id0;
struct timer_list cmd11_timer;
+ struct timer_list dto_timer;
};
/* DMA ops for Internal/External DMAC interface */
@@ -226,6 +224,8 @@ struct dw_mci_dma_ops {
#define DW_MCI_QUIRK_HIGHSPEED BIT(2)
/* Unreliable card detection */
#define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(3)
+/* Timer for broken data transfer over scheme */
+#define DW_MCI_QUIRK_BROKEN_DTO BIT(4)
struct dma_pdata;
@@ -259,7 +259,6 @@ struct dw_mci_board {
struct dw_mci_dma_ops *dma_ops;
struct dma_pdata *data;
- struct block_settings *blk_settings;
};
#endif /* LINUX_MMC_DW_MMC_H */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 1369e54faeb7..83b81fd865f3 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -412,7 +412,8 @@ static inline void mmc_signal_sdio_irq(struct mmc_host *host)
{
host->ops->enable_sdio_irq(host, 0);
host->sdio_irq_pending = true;
- wake_up_process(host->sdio_irq_thread);
+ if (host->sdio_irq_thread)
+ wake_up_process(host->sdio_irq_thread);
}
void sdio_run_irqs(struct mmc_host *host);
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 61cd67f4d788..a1a210d59961 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -66,6 +66,16 @@ struct mmu_notifier_ops {
unsigned long end);
/*
+ * clear_young is a lightweight version of clear_flush_young. Like the
+ * latter, it is supposed to test-and-clear the young/accessed bitflag
+ * in the secondary pte, but it may omit flushing the secondary tlb.
+ */
+ int (*clear_young)(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);
+
+ /*
* test_young is called to check the young/accessed bitflag in
* the secondary pte. This is used to know if the page is
* frequently used without actually clearing the flag or tearing
@@ -203,6 +213,9 @@ extern void __mmu_notifier_release(struct mm_struct *mm);
extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
unsigned long start,
unsigned long end);
+extern int __mmu_notifier_clear_young(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);
extern int __mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address);
extern void __mmu_notifier_change_pte(struct mm_struct *mm,
@@ -231,6 +244,15 @@ static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
return 0;
}
+static inline int mmu_notifier_clear_young(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ if (mm_has_notifiers(mm))
+ return __mmu_notifier_clear_young(mm, start, end);
+ return 0;
+}
+
static inline int mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address)
{
@@ -311,6 +333,28 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
__young; \
})
+#define ptep_clear_young_notify(__vma, __address, __ptep) \
+({ \
+ int __young; \
+ struct vm_area_struct *___vma = __vma; \
+ unsigned long ___address = __address; \
+ __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
+ __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
+ ___address + PAGE_SIZE); \
+ __young; \
+})
+
+#define pmdp_clear_young_notify(__vma, __address, __pmdp) \
+({ \
+ int __young; \
+ struct vm_area_struct *___vma = __vma; \
+ unsigned long ___address = __address; \
+ __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
+ __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
+ ___address + PMD_SIZE); \
+ __young; \
+})
+
#define ptep_clear_flush_notify(__vma, __address, __ptep) \
({ \
unsigned long ___addr = __address & PAGE_MASK; \
@@ -427,6 +471,8 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
#define ptep_clear_flush_young_notify ptep_clear_flush_young
#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
+#define ptep_clear_young_notify ptep_test_and_clear_young
+#define pmdp_clear_young_notify pmdp_test_and_clear_young
#define ptep_clear_flush_notify ptep_clear_flush
#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 754c25966a0a..d94347737292 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -319,7 +319,11 @@ enum zone_type {
ZONE_HIGHMEM,
#endif
ZONE_MOVABLE,
+#ifdef CONFIG_ZONE_DEVICE
+ ZONE_DEVICE,
+#endif
__MAX_NR_ZONES
+
};
#ifndef __GENERATING_BOUNDS_H
@@ -690,14 +694,6 @@ struct zonelist {
#endif
};
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
-struct node_active_region {
- unsigned long start_pfn;
- unsigned long end_pfn;
- int nid;
-};
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
-
#ifndef CONFIG_DISCONTIGMEM
/* The array of struct pages - for discontigmem use pgdat->lmem_map */
extern struct page *mem_map;
@@ -794,6 +790,25 @@ static inline bool pgdat_is_empty(pg_data_t *pgdat)
return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
}
+static inline int zone_id(const struct zone *zone)
+{
+ struct pglist_data *pgdat = zone->zone_pgdat;
+
+ return zone - pgdat->node_zones;
+}
+
+#ifdef CONFIG_ZONE_DEVICE
+static inline bool is_dev_zone(const struct zone *zone)
+{
+ return zone_id(zone) == ZONE_DEVICE;
+}
+#else
+static inline bool is_dev_zone(const struct zone *zone)
+{
+ return false;
+}
+#endif
+
#include <linux/memory_hotplug.h>
extern struct mutex zonelists_mutex;
diff --git a/include/linux/mpls_iptunnel.h b/include/linux/mpls_iptunnel.h
new file mode 100644
index 000000000000..ef29eb2d6dfd
--- /dev/null
+++ b/include/linux/mpls_iptunnel.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_MPLS_IPTUNNEL_H
+#define _LINUX_MPLS_IPTUNNEL_H
+
+#include <uapi/linux/mpls_iptunnel.h>
+
+#endif /* _LINUX_MPLS_IPTUNNEL_H */
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 29975c73a953..366cf77953b5 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -27,9 +27,9 @@
#include <linux/string.h>
#include <linux/bug.h>
#include <linux/kernel.h>
+#include <linux/io.h>
#include <asm/unaligned.h>
-#include <asm/io.h>
#include <asm/barrier.h>
#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
diff --git a/include/linux/net.h b/include/linux/net.h
index 04aa06852771..049d4b03c4c4 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -239,8 +239,16 @@ do { \
net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
#define net_info_ratelimited(fmt, ...) \
net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
+#if defined(DEBUG)
#define net_dbg_ratelimited(fmt, ...) \
net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
+#else
+#define net_dbg_ratelimited(fmt, ...) \
+ do { \
+ if (0) \
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
+ } while (0)
+#endif
bool __net_get_random_once(void *buf, int nbytes, bool *done,
struct static_key *done_key);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e20979dfd6a9..88a00694eda5 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -766,6 +766,13 @@ struct netdev_phys_item_id {
unsigned char id_len;
};
+static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
+ struct netdev_phys_item_id *b)
+{
+ return a->id_len == b->id_len &&
+ memcmp(a->id, b->id, a->id_len) == 0;
+}
+
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
struct sk_buff *skb);
@@ -1041,6 +1048,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* TX queue.
* int (*ndo_get_iflink)(const struct net_device *dev);
* Called to get the iflink value of this device.
+ * void (*ndo_change_proto_down)(struct net_device *dev,
+ * bool proto_down);
+ * This function is used to pass protocol port error state information
+ * to the switch driver. The switch driver can react to the proto_down
+ * by doing a phys down on the associated switch port.
+ *
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
@@ -1211,6 +1224,8 @@ struct net_device_ops {
int queue_index,
u32 maxrate);
int (*ndo_get_iflink)(const struct net_device *dev);
+ int (*ndo_change_proto_down)(struct net_device *dev,
+ bool proto_down);
};
/**
@@ -1225,13 +1240,8 @@ struct net_device_ops {
*
* @IFF_802_1Q_VLAN: 802.1Q VLAN device
* @IFF_EBRIDGE: Ethernet bridging device
- * @IFF_SLAVE_INACTIVE: bonding slave not the curr. active
- * @IFF_MASTER_8023AD: bonding master, 802.3ad
- * @IFF_MASTER_ALB: bonding master, balance-alb
* @IFF_BONDING: bonding master or slave
- * @IFF_SLAVE_NEEDARP: need ARPs for validation
* @IFF_ISATAP: ISATAP interface (RFC4214)
- * @IFF_MASTER_ARPMON: bonding master, ARP mon in use
* @IFF_WAN_HDLC: WAN HDLC device
* @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
* release skb->dst
@@ -1247,44 +1257,40 @@ struct net_device_ops {
* @IFF_LIVE_ADDR_CHANGE: device supports hardware address
* change when it's running
* @IFF_MACVLAN: Macvlan device
+ * @IFF_VRF_MASTER: device is a VRF master
+ * @IFF_NO_QUEUE: device can run without qdisc attached
+ * @IFF_OPENVSWITCH: device is a Open vSwitch master
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
IFF_EBRIDGE = 1<<1,
- IFF_SLAVE_INACTIVE = 1<<2,
- IFF_MASTER_8023AD = 1<<3,
- IFF_MASTER_ALB = 1<<4,
- IFF_BONDING = 1<<5,
- IFF_SLAVE_NEEDARP = 1<<6,
- IFF_ISATAP = 1<<7,
- IFF_MASTER_ARPMON = 1<<8,
- IFF_WAN_HDLC = 1<<9,
- IFF_XMIT_DST_RELEASE = 1<<10,
- IFF_DONT_BRIDGE = 1<<11,
- IFF_DISABLE_NETPOLL = 1<<12,
- IFF_MACVLAN_PORT = 1<<13,
- IFF_BRIDGE_PORT = 1<<14,
- IFF_OVS_DATAPATH = 1<<15,
- IFF_TX_SKB_SHARING = 1<<16,
- IFF_UNICAST_FLT = 1<<17,
- IFF_TEAM_PORT = 1<<18,
- IFF_SUPP_NOFCS = 1<<19,
- IFF_LIVE_ADDR_CHANGE = 1<<20,
- IFF_MACVLAN = 1<<21,
- IFF_XMIT_DST_RELEASE_PERM = 1<<22,
- IFF_IPVLAN_MASTER = 1<<23,
- IFF_IPVLAN_SLAVE = 1<<24,
+ IFF_BONDING = 1<<2,
+ IFF_ISATAP = 1<<3,
+ IFF_WAN_HDLC = 1<<4,
+ IFF_XMIT_DST_RELEASE = 1<<5,
+ IFF_DONT_BRIDGE = 1<<6,
+ IFF_DISABLE_NETPOLL = 1<<7,
+ IFF_MACVLAN_PORT = 1<<8,
+ IFF_BRIDGE_PORT = 1<<9,
+ IFF_OVS_DATAPATH = 1<<10,
+ IFF_TX_SKB_SHARING = 1<<11,
+ IFF_UNICAST_FLT = 1<<12,
+ IFF_TEAM_PORT = 1<<13,
+ IFF_SUPP_NOFCS = 1<<14,
+ IFF_LIVE_ADDR_CHANGE = 1<<15,
+ IFF_MACVLAN = 1<<16,
+ IFF_XMIT_DST_RELEASE_PERM = 1<<17,
+ IFF_IPVLAN_MASTER = 1<<18,
+ IFF_IPVLAN_SLAVE = 1<<19,
+ IFF_VRF_MASTER = 1<<20,
+ IFF_NO_QUEUE = 1<<21,
+ IFF_OPENVSWITCH = 1<<22,
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
#define IFF_EBRIDGE IFF_EBRIDGE
-#define IFF_SLAVE_INACTIVE IFF_SLAVE_INACTIVE
-#define IFF_MASTER_8023AD IFF_MASTER_8023AD
-#define IFF_MASTER_ALB IFF_MASTER_ALB
#define IFF_BONDING IFF_BONDING
-#define IFF_SLAVE_NEEDARP IFF_SLAVE_NEEDARP
#define IFF_ISATAP IFF_ISATAP
-#define IFF_MASTER_ARPMON IFF_MASTER_ARPMON
#define IFF_WAN_HDLC IFF_WAN_HDLC
#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
@@ -1301,6 +1307,9 @@ enum netdev_priv_flags {
#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
#define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER
#define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE
+#define IFF_VRF_MASTER IFF_VRF_MASTER
+#define IFF_NO_QUEUE IFF_NO_QUEUE
+#define IFF_OPENVSWITCH IFF_OPENVSWITCH
/**
* struct net_device - The DEVICE structure.
@@ -1417,6 +1426,7 @@ enum netdev_priv_flags {
* @dn_ptr: DECnet specific data
* @ip6_ptr: IPv6 specific data
* @ax25_ptr: AX.25 specific data
+ * @vrf_ptr: VRF specific data
* @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
*
* @last_rx: Time of last Rx
@@ -1448,6 +1458,8 @@ enum netdev_priv_flags {
*
* @xps_maps: XXX: need comments on this one
*
+ * @offload_fwd_mark: Offload device fwding mark
+ *
* @trans_start: Time (in jiffies) of last Tx
* @watchdog_timeo: Represents the timeout that is used by
* the watchdog ( see dev_watchdog() )
@@ -1502,6 +1514,10 @@ enum netdev_priv_flags {
*
* @qdisc_tx_busylock: XXX: need comments on this one
*
+ * @proto_down: protocol port state information can be sent to the
+ * switch driver and used to set the phys state of the
+ * switch port.
+ *
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
@@ -1629,6 +1645,7 @@ struct net_device {
struct dn_dev __rcu *dn_ptr;
struct inet6_dev __rcu *ip6_ptr;
void *ax25_ptr;
+ struct net_vrf_dev __rcu *vrf_ptr;
struct wireless_dev *ieee80211_ptr;
struct wpan_dev *ieee802154_ptr;
#if IS_ENABLED(CONFIG_MPLS_ROUTING)
@@ -1685,6 +1702,10 @@ struct net_device {
struct xps_dev_maps __rcu *xps_maps;
#endif
+#ifdef CONFIG_NET_SWITCHDEV
+ u32 offload_fwd_mark;
+#endif
+
/* These may be needed for future network-power-down code. */
/*
@@ -1762,6 +1783,7 @@ struct net_device {
#endif
struct phy_device *phydev;
struct lock_class_key *qdisc_tx_busylock;
+ bool proto_down;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -2093,6 +2115,13 @@ struct netdev_notifier_change_info {
unsigned int flags_changed;
};
+struct netdev_notifier_changeupper_info {
+ struct netdev_notifier_info info; /* must be first */
+ struct net_device *upper_dev; /* new upper dev */
+ bool master; /* is upper dev master */
+ bool linking; /* is the nofication for link or unlink */
+};
+
static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
struct net_device *dev)
{
@@ -2277,8 +2306,7 @@ __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
{
- return (NAPI_GRO_CB(skb)->gro_remcsum_start - skb_headroom(skb) ==
- skb_gro_offset(skb));
+ return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
}
static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
@@ -2374,37 +2402,58 @@ static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
grc->delta = 0;
}
-static inline void skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
- int start, int offset,
- struct gro_remcsum *grc,
- bool nopartial)
+static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
+ unsigned int off, size_t hdrlen,
+ int start, int offset,
+ struct gro_remcsum *grc,
+ bool nopartial)
{
__wsum delta;
+ size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
if (!nopartial) {
- NAPI_GRO_CB(skb)->gro_remcsum_start =
- ((unsigned char *)ptr + start) - skb->head;
- return;
+ NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
+ return ptr;
+ }
+
+ ptr = skb_gro_header_fast(skb, off);
+ if (skb_gro_header_hard(skb, off + plen)) {
+ ptr = skb_gro_header_slow(skb, off + plen, off);
+ if (!ptr)
+ return NULL;
}
- delta = remcsum_adjust(ptr, NAPI_GRO_CB(skb)->csum, start, offset);
+ delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
+ start, offset);
/* Adjust skb->csum since we changed the packet */
NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
- grc->offset = (ptr + offset) - (void *)skb->head;
+ grc->offset = off + hdrlen + offset;
grc->delta = delta;
+
+ return ptr;
}
static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
struct gro_remcsum *grc)
{
+ void *ptr;
+ size_t plen = grc->offset + sizeof(u16);
+
if (!grc->delta)
return;
- remcsum_unadjust((__sum16 *)(skb->head + grc->offset), grc->delta);
+ ptr = skb_gro_header_fast(skb, grc->offset);
+ if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
+ ptr = skb_gro_header_slow(skb, plen, grc->offset);
+ if (!ptr)
+ return;
+ }
+
+ remcsum_unadjust((__sum16 *)ptr, grc->delta);
}
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
@@ -2982,6 +3031,7 @@ int dev_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid);
int dev_get_phys_port_name(struct net_device *dev,
char *name, size_t len);
+int dev_change_proto_down(struct net_device *dev, bool proto_down);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
@@ -3781,6 +3831,42 @@ static inline bool netif_supports_nofcs(struct net_device *dev)
return dev->priv_flags & IFF_SUPP_NOFCS;
}
+static inline bool netif_is_vrf(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_VRF_MASTER;
+}
+
+static inline bool netif_is_bridge_master(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_EBRIDGE;
+}
+
+static inline bool netif_is_ovs_master(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_OPENVSWITCH;
+}
+
+static inline bool netif_index_is_vrf(struct net *net, int ifindex)
+{
+ bool rc = false;
+
+#if IS_ENABLED(CONFIG_NET_VRF)
+ struct net_device *dev;
+
+ if (ifindex == 0)
+ return false;
+
+ rcu_read_lock();
+
+ dev = dev_get_by_index_rcu(net, ifindex);
+ if (dev)
+ rc = netif_is_vrf(dev);
+
+ rcu_read_unlock();
+#endif
+ return rc;
+}
+
/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
static inline void netif_keep_dst(struct net_device *dev)
{
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 00050dfd9f23..36a652531791 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -11,6 +11,8 @@
#include <linux/list.h>
#include <linux/static_key.h>
#include <linux/netfilter_defs.h>
+#include <linux/netdevice.h>
+#include <net/net_namespace.h>
#ifdef CONFIG_NETFILTER
static inline int NF_DROP_GETERR(int verdict)
@@ -118,6 +120,13 @@ struct nf_sockopt_ops {
};
/* Function to register/unregister hook points. */
+int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops);
+void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops);
+int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
+ unsigned int n);
+void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
+ unsigned int n);
+
int nf_register_hook(struct nf_hook_ops *reg);
void nf_unregister_hook(struct nf_hook_ops *reg);
int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
@@ -128,33 +137,26 @@ void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
int nf_register_sockopt(struct nf_sockopt_ops *reg);
void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
-extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
-
#ifdef HAVE_JUMP_LABEL
extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
-static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
+static inline bool nf_hook_list_active(struct list_head *hook_list,
u_int8_t pf, unsigned int hook)
{
if (__builtin_constant_p(pf) &&
__builtin_constant_p(hook))
return static_key_false(&nf_hooks_needed[pf][hook]);
- return !list_empty(nf_hook_list);
+ return !list_empty(hook_list);
}
#else
-static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
+static inline bool nf_hook_list_active(struct list_head *hook_list,
u_int8_t pf, unsigned int hook)
{
- return !list_empty(nf_hook_list);
+ return !list_empty(hook_list);
}
#endif
-static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
-{
- return nf_hook_list_active(&nf_hooks[pf][hook], pf, hook);
-}
-
int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
/**
@@ -172,10 +174,13 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
int (*okfn)(struct sock *, struct sk_buff *),
int thresh)
{
- if (nf_hooks_active(pf, hook)) {
+ struct net *net = dev_net(indev ? indev : outdev);
+ struct list_head *hook_list = &net->nf.hooks[pf][hook];
+
+ if (nf_hook_list_active(hook_list, pf, hook)) {
struct nf_hook_state state;
- nf_hook_state_init(&state, &nf_hooks[pf][hook], hook, thresh,
+ nf_hook_state_init(&state, hook_list, hook, thresh,
pf, indev, outdev, sk, okfn);
return nf_hook_slow(skb, &state);
}
@@ -363,6 +368,8 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
#endif /*CONFIG_NETFILTER*/
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#include <linux/netfilter/nf_conntrack_zones_common.h>
+
extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
@@ -385,4 +392,15 @@ extern struct nfq_ct_hook __rcu *nfq_ct_hook;
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
#endif
+/**
+ * nf_skb_duplicated - TEE target has sent a packet
+ *
+ * When a xtables target sends a packet, the OUTPUT and POSTROUTING
+ * hooks are traversed again, i.e. nft and xtables are invoked recursively.
+ *
+ * This is used by xtables TEE target to prevent the duplicated skb from
+ * being duplicated again.
+ */
+DECLARE_PER_CPU(bool, nf_skb_duplicated);
+
#endif /*__LINUX_NETFILTER_H*/
diff --git a/include/linux/netfilter/nf_conntrack_zones_common.h b/include/linux/netfilter/nf_conntrack_zones_common.h
new file mode 100644
index 000000000000..5d7cf36d4766
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_zones_common.h
@@ -0,0 +1,23 @@
+#ifndef _NF_CONNTRACK_ZONES_COMMON_H
+#define _NF_CONNTRACK_ZONES_COMMON_H
+
+#include <uapi/linux/netfilter/nf_conntrack_tuple_common.h>
+
+#define NF_CT_DEFAULT_ZONE_ID 0
+
+#define NF_CT_ZONE_DIR_ORIG (1 << IP_CT_DIR_ORIGINAL)
+#define NF_CT_ZONE_DIR_REPL (1 << IP_CT_DIR_REPLY)
+
+#define NF_CT_DEFAULT_ZONE_DIR (NF_CT_ZONE_DIR_ORIG | NF_CT_ZONE_DIR_REPL)
+
+#define NF_CT_FLAG_MARK 1
+
+struct nf_conntrack_zone {
+ u16 id;
+ u8 flags;
+ u8 dir;
+};
+
+extern const struct nf_conntrack_zone nf_ct_zone_dflt;
+
+#endif /* _NF_CONNTRACK_ZONES_COMMON_H */
diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h
index 6ec975748742..80ca889b164e 100644
--- a/include/linux/netfilter/nfnetlink_acct.h
+++ b/include/linux/netfilter/nfnetlink_acct.h
@@ -2,6 +2,7 @@
#define _NFNL_ACCT_H_
#include <uapi/linux/netfilter/nfnetlink_acct.h>
+#include <net/net_namespace.h>
enum {
NFACCT_NO_QUOTA = -1,
@@ -11,7 +12,7 @@ enum {
struct nf_acct;
-struct nf_acct *nfnl_acct_find_get(const char *filter_name);
+struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name);
void nfnl_acct_put(struct nf_acct *acct);
void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
extern int nfnl_acct_overquota(const struct sk_buff *skb,
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 286098a5667f..b006b719183f 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -3,6 +3,7 @@
#include <linux/netdevice.h>
+#include <linux/static_key.h>
#include <uapi/linux/netfilter/x_tables.h>
/**
@@ -222,7 +223,6 @@ struct xt_table_info {
* @stacksize jumps (number of user chains) can possibly be made.
*/
unsigned int stacksize;
- unsigned int __percpu *stackptr;
void ***jumpstack;
unsigned char entries[0] __aligned(8);
@@ -281,6 +281,12 @@ void xt_free_table_info(struct xt_table_info *info);
*/
DECLARE_PER_CPU(seqcount_t, xt_recseq);
+/* xt_tee_enabled - true if x_tables needs to handle reentrancy
+ *
+ * Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
+ */
+extern struct static_key xt_tee_enabled;
+
/**
* xt_write_recseq_begin - start of a write section
*
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index 6d80fc686323..2437b8a5d7a9 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -17,9 +17,6 @@ enum nf_br_hook_priorities {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-#define BRNF_BRIDGED_DNAT 0x02
-#define BRNF_NF_BRIDGE_PREROUTING 0x08
-
int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
static inline void br_drop_fake_rtable(struct sk_buff *skb)
@@ -63,8 +60,17 @@ nf_bridge_get_physoutdev(const struct sk_buff *skb)
{
return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL;
}
+
+static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb)
+{
+ return skb->nf_bridge && skb->nf_bridge->in_prerouting;
+}
#else
#define br_drop_fake_rtable(skb) do { } while (0)
+static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb)
+{
+ return false;
+}
#endif /* CONFIG_BRIDGE_NETFILTER */
#endif
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 8b7d28f3aada..771574677e83 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -9,15 +9,6 @@
#include <uapi/linux/netfilter_ipv6.h>
-
-#ifdef CONFIG_NETFILTER
-int ip6_route_me_harder(struct sk_buff *skb);
-__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, u_int8_t protocol);
-
-int ipv6_netfilter_init(void);
-void ipv6_netfilter_fini(void);
-
/*
* Hook functions for ipv6 to allow xt_* modules to be built-in even
* if IPv6 is a module.
@@ -30,6 +21,14 @@ struct nf_ipv6_ops {
int (*output)(struct sock *, struct sk_buff *));
};
+#ifdef CONFIG_NETFILTER
+int ip6_route_me_harder(struct sk_buff *skb);
+__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, u_int8_t protocol);
+
+int ipv6_netfilter_init(void);
+void ipv6_netfilter_fini(void);
+
extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops;
static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
{
@@ -39,6 +38,7 @@ static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
#else /* CONFIG_NETFILTER */
static inline int ipv6_netfilter_init(void) { return 0; }
static inline void ipv6_netfilter_fini(void) { return; }
+static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) { return NULL; }
#endif /* CONFIG_NETFILTER */
#endif /*__LINUX_IP6_NETFILTER_H*/
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 9120edb650a0..639e9b8b0e4d 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -68,8 +68,17 @@ extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
extern int netlink_has_listeners(struct sock *sk, unsigned int group);
-extern struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
- u32 dst_portid, gfp_t gfp_mask);
+
+extern struct sk_buff *__netlink_alloc_skb(struct sock *ssk, unsigned int size,
+ unsigned int ldiff, u32 dst_portid,
+ gfp_t gfp_mask);
+static inline struct sk_buff *
+netlink_alloc_skb(struct sock *ssk, unsigned int size, u32 dst_portid,
+ gfp_t gfp_mask)
+{
+ return __netlink_alloc_skb(ssk, size, 0, dst_portid, gfp_mask);
+}
+
extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
__u32 group, gfp_t allocation);
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index b8e72aad919c..00121f298269 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -547,6 +547,24 @@ enum pnfs_notify_deviceid_type4 {
NOTIFY_DEVICEID4_DELETE = 1 << 2,
};
+enum pnfs_block_volume_type {
+ PNFS_BLOCK_VOLUME_SIMPLE = 0,
+ PNFS_BLOCK_VOLUME_SLICE = 1,
+ PNFS_BLOCK_VOLUME_CONCAT = 2,
+ PNFS_BLOCK_VOLUME_STRIPE = 3,
+};
+
+enum pnfs_block_extent_state {
+ PNFS_BLOCK_READWRITE_DATA = 0,
+ PNFS_BLOCK_READ_DATA = 1,
+ PNFS_BLOCK_INVALID_DATA = 2,
+ PNFS_BLOCK_NONE_DATA = 3,
+};
+
+/* on the wire size of a block layout extent */
+#define PNFS_BLOCK_EXTENT_SIZE \
+ (7 * sizeof(__be32) + NFS4_DEVICEID4_SIZE)
+
#define NFL4_UFLG_MASK 0x0000003F
#define NFL4_UFLG_DENSE 0x00000001
#define NFL4_UFLG_COMMIT_THRU_MDS 0x00000002
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 874b77228fb9..c0e961474a52 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -353,7 +353,6 @@ extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *);
extern void nfs_access_set_mask(struct nfs_access_entry *, u32);
extern int nfs_permission(struct inode *, int);
extern int nfs_open(struct inode *, struct file *);
-extern int nfs_release(struct inode *, struct file *);
extern int nfs_attribute_timeout(struct inode *inode);
extern int nfs_attribute_cache_expired(struct inode *inode);
extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
@@ -371,6 +370,7 @@ extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struc
extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode);
extern void nfs_inode_attach_open_context(struct nfs_open_context *ctx);
extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx);
+extern void nfs_file_clear_open_context(struct file *flip);
extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx);
extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx);
extern u64 nfs_compat_user_ino64(u64 fileid);
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 20bc8e51b161..570a7df2775b 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -173,6 +173,11 @@ struct nfs_server {
set of attributes supported
on this filesystem excluding
the label support bit. */
+ u32 exclcreat_bitmask[3];
+ /* V4 bitmask representing the
+ set of attributes supported
+ on this filesystem for the
+ exclusive create. */
u32 cache_consistency_bitmask[3];
/* V4 bitmask representing the subset
of change attribute, size, ctime
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 7bbe50504211..52faf7e96c65 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -379,7 +379,7 @@ struct nfs_openargs {
struct stateowner_id id;
union {
struct {
- struct iattr * attrs; /* UNCHECKED, GUARDED */
+ struct iattr * attrs; /* UNCHECKED, GUARDED, EXCLUSIVE4_1 */
nfs4_verifier verifier; /* EXCLUSIVE */
};
nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */
@@ -389,7 +389,7 @@ struct nfs_openargs {
const struct nfs_server *server; /* Needed for ID mapping */
const u32 * bitmask;
const u32 * open_bitmap;
- __u32 claim;
+ enum open_claim_type4 claim;
enum createmode4 createmode;
const struct nfs4_label *label;
};
@@ -406,8 +406,8 @@ struct nfs_openres {
const struct nfs_server *server;
fmode_t delegation_type;
nfs4_stateid delegation;
+ unsigned long pagemod_limit;
__u32 do_recall;
- __u64 maxsize;
__u32 attrset[NFS4_BITMAP_SIZE];
struct nfs4_string *owner;
struct nfs4_string *group_owner;
@@ -1057,11 +1057,13 @@ struct nfs4_statfs_res {
struct nfs4_server_caps_arg {
struct nfs4_sequence_args seq_args;
struct nfs_fh *fhandle;
+ const u32 * bitmask;
};
struct nfs4_server_caps_res {
struct nfs4_sequence_res seq_res;
u32 attr_bitmask[3];
+ u32 exclcreat_bitmask[3];
u32 acl_bitmask;
u32 has_links;
u32 has_symlinks;
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index f94da0e65dea..78488e099ce7 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -27,9 +27,7 @@ static inline void touch_nmi_watchdog(void)
#if defined(CONFIG_HARDLOCKUP_DETECTOR)
extern void hardlockup_detector_disable(void);
#else
-static inline void hardlockup_detector_disable(void)
-{
-}
+static inline void hardlockup_detector_disable(void) {}
#endif
/*
@@ -49,6 +47,12 @@ static inline bool trigger_allbutself_cpu_backtrace(void)
arch_trigger_all_cpu_backtrace(false);
return true;
}
+
+/* generic implementation */
+void nmi_trigger_all_cpu_backtrace(bool include_self,
+ void (*raise)(cpumask_t *mask));
+bool nmi_cpu_backtrace(struct pt_regs *regs);
+
#else
static inline bool trigger_all_cpu_backtrace(void)
{
@@ -80,6 +84,17 @@ extern int proc_watchdog_thresh(struct ctl_table *, int ,
void __user *, size_t *, loff_t *);
extern int proc_watchdog_cpumask(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
+extern int lockup_detector_suspend(void);
+extern void lockup_detector_resume(void);
+#else
+static inline int lockup_detector_suspend(void)
+{
+ return 0;
+}
+
+static inline void lockup_detector_resume(void)
+{
+}
#endif
#ifdef CONFIG_HAVE_ACPI_APEI_NMI
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
index b02f72bb8e32..f798e2afba88 100644
--- a/include/linux/ntb.h
+++ b/include/linux/ntb.h
@@ -522,10 +522,9 @@ static inline int ntb_mw_clear_trans(struct ntb_dev *ntb, int idx)
* @speed: OUT - The link speed expressed as PCIe generation number.
* @width: OUT - The link width expressed as the number of PCIe lanes.
*
- * Set the translation of a memory window. The peer may access local memory
- * through the window starting at the address, up to the size. The address
- * must be aligned to the alignment specified by ntb_mw_get_range(). The size
- * must be aligned to the size alignment specified by ntb_mw_get_range().
+ * Get the current state of the ntb link. It is recommended to query the link
+ * state once after every link event. It is safe to query the link state in
+ * the context of the link event callback.
*
* Return: One if the link is up, zero if the link is down, otherwise a
* negative value indicating the error number.
@@ -795,7 +794,7 @@ static inline int ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
}
/**
- * ntb_peer_db_clear() - clear bits in the local doorbell register
+ * ntb_peer_db_clear() - clear bits in the peer doorbell register
* @ntb: NTB device context.
* @db_bits: Doorbell bits to clear.
*
diff --git a/include/linux/ntb_transport.h b/include/linux/ntb_transport.h
index 2862861366a5..7243eb98a722 100644
--- a/include/linux/ntb_transport.h
+++ b/include/linux/ntb_transport.h
@@ -83,3 +83,4 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len);
void ntb_transport_link_up(struct ntb_transport_qp *qp);
void ntb_transport_link_down(struct ntb_transport_qp *qp);
bool ntb_transport_link_query(struct ntb_transport_qp *qp);
+unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp);
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 69dbe312b11b..f3191828f037 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -54,7 +54,7 @@ extern int of_mm_gpiochip_add(struct device_node *np,
struct of_mm_gpio_chip *mm_gc);
extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc);
-extern void of_gpiochip_add(struct gpio_chip *gc);
+extern int of_gpiochip_add(struct gpio_chip *gc);
extern void of_gpiochip_remove(struct gpio_chip *gc);
extern int of_gpio_simple_xlate(struct gpio_chip *gc,
const struct of_phandle_args *gpiospec,
@@ -76,7 +76,7 @@ static inline int of_gpio_simple_xlate(struct gpio_chip *gc,
return -ENOSYS;
}
-static inline void of_gpiochip_add(struct gpio_chip *gc) { }
+static inline int of_gpiochip_add(struct gpio_chip *gc) { return 0; }
static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
#endif /* CONFIG_OF_GPIO */
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index c2bbf672b84e..d2fa9ca42e9a 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -41,7 +41,7 @@ enum OID {
OID_signed_data, /* 1.2.840.113549.1.7.2 */
/* PKCS#9 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-9(9)} */
OID_email_address, /* 1.2.840.113549.1.9.1 */
- OID_content_type, /* 1.2.840.113549.1.9.3 */
+ OID_contentType, /* 1.2.840.113549.1.9.3 */
OID_messageDigest, /* 1.2.840.113549.1.9.4 */
OID_signingTime, /* 1.2.840.113549.1.9.5 */
OID_smimeCapabilites, /* 1.2.840.113549.1.9.15 */
@@ -54,6 +54,8 @@ enum OID {
/* Microsoft Authenticode & Software Publishing */
OID_msIndirectData, /* 1.3.6.1.4.1.311.2.1.4 */
+ OID_msStatementType, /* 1.3.6.1.4.1.311.2.1.11 */
+ OID_msSpOpusInfo, /* 1.3.6.1.4.1.311.2.1.12 */
OID_msPeImageDataObjId, /* 1.3.6.1.4.1.311.2.1.15 */
OID_msIndividualSPKeyPurpose, /* 1.3.6.1.4.1.311.2.1.21 */
OID_msOutlookExpress, /* 1.3.6.1.4.1.311.16.4 */
@@ -61,6 +63,9 @@ enum OID {
OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */
OID_sha1, /* 1.3.14.3.2.26 */
OID_sha256, /* 2.16.840.1.101.3.4.2.1 */
+ OID_sha384, /* 2.16.840.1.101.3.4.2.2 */
+ OID_sha512, /* 2.16.840.1.101.3.4.2.3 */
+ OID_sha224, /* 2.16.840.1.101.3.4.2.4 */
/* Distinguished Name attribute IDs [RFC 2256] */
OID_commonName, /* 2.5.4.3 */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 7deecb7bca5e..03e6257321f0 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -13,6 +13,27 @@ struct mem_cgroup;
struct task_struct;
/*
+ * Details of the page allocation that triggered the oom killer that are used to
+ * determine what should be killed.
+ */
+struct oom_control {
+ /* Used to determine cpuset */
+ struct zonelist *zonelist;
+
+ /* Used to determine mempolicy */
+ nodemask_t *nodemask;
+
+ /* Used to determine cpuset and node locality requirement */
+ const gfp_t gfp_mask;
+
+ /*
+ * order == -1 means the oom kill is required by sysrq, otherwise only
+ * for display purposes.
+ */
+ const int order;
+};
+
+/*
* Types of limitations to the nodes from which allocations may occur
*/
enum oom_constraint {
@@ -57,21 +78,18 @@ extern unsigned long oom_badness(struct task_struct *p,
extern int oom_kills_count(void);
extern void note_oom_kill(void);
-extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
+extern void oom_kill_process(struct oom_control *oc, struct task_struct *p,
unsigned int points, unsigned long totalpages,
- struct mem_cgroup *memcg, nodemask_t *nodemask,
- const char *message);
+ struct mem_cgroup *memcg, const char *message);
-extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
- int order, const nodemask_t *nodemask,
+extern void check_panic_on_oom(struct oom_control *oc,
+ enum oom_constraint constraint,
struct mem_cgroup *memcg);
-extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
- unsigned long totalpages, const nodemask_t *nodemask,
- bool force_kill);
+extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
+ struct task_struct *task, unsigned long totalpages);
-extern bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
- int order, nodemask_t *mask, bool force_kill);
+extern bool out_of_memory(struct oom_control *oc);
extern void exit_oom_victim(void);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 41c93844fb1d..416509e26d6d 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -109,6 +109,10 @@ enum pageflags {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
PG_compound_lock,
#endif
+#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
+ PG_young,
+ PG_idle,
+#endif
__NR_PAGEFLAGS,
/* Filesystems */
@@ -289,6 +293,13 @@ PAGEFLAG_FALSE(HWPoison)
#define __PG_HWPOISON 0
#endif
+#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
+TESTPAGEFLAG(Young, young)
+SETPAGEFLAG(Young, young)
+TESTCLEARFLAG(Young, young)
+PAGEFLAG(Idle, idle)
+#endif
+
/*
* On an anonymous page mapped into a user virtual memory area,
* page->mapping points to its anon_vma, not to a struct address_space;
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 2dc1e1697b45..047d64706f2a 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -65,11 +65,6 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
bool skip_hwpoisoned_pages);
-/*
- * Internal functions. Changes pageblock's migrate type.
- */
-int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages);
-void unset_migratetype_isolate(struct page *page, unsigned migratetype);
struct page *alloc_migrate_target(struct page *page, unsigned long private,
int **resultp);
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index c42981cd99aa..17f118a82854 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -26,6 +26,10 @@ enum page_ext_flags {
PAGE_EXT_DEBUG_POISON, /* Page is poisoned */
PAGE_EXT_DEBUG_GUARD,
PAGE_EXT_OWNER,
+#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
+ PAGE_EXT_YOUNG,
+ PAGE_EXT_IDLE,
+#endif
};
/*
diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h
new file mode 100644
index 000000000000..bf268fa92c5b
--- /dev/null
+++ b/include/linux/page_idle.h
@@ -0,0 +1,110 @@
+#ifndef _LINUX_MM_PAGE_IDLE_H
+#define _LINUX_MM_PAGE_IDLE_H
+
+#include <linux/bitops.h>
+#include <linux/page-flags.h>
+#include <linux/page_ext.h>
+
+#ifdef CONFIG_IDLE_PAGE_TRACKING
+
+#ifdef CONFIG_64BIT
+static inline bool page_is_young(struct page *page)
+{
+ return PageYoung(page);
+}
+
+static inline void set_page_young(struct page *page)
+{
+ SetPageYoung(page);
+}
+
+static inline bool test_and_clear_page_young(struct page *page)
+{
+ return TestClearPageYoung(page);
+}
+
+static inline bool page_is_idle(struct page *page)
+{
+ return PageIdle(page);
+}
+
+static inline void set_page_idle(struct page *page)
+{
+ SetPageIdle(page);
+}
+
+static inline void clear_page_idle(struct page *page)
+{
+ ClearPageIdle(page);
+}
+#else /* !CONFIG_64BIT */
+/*
+ * If there is not enough space to store Idle and Young bits in page flags, use
+ * page ext flags instead.
+ */
+extern struct page_ext_operations page_idle_ops;
+
+static inline bool page_is_young(struct page *page)
+{
+ return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
+}
+
+static inline void set_page_young(struct page *page)
+{
+ set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
+}
+
+static inline bool test_and_clear_page_young(struct page *page)
+{
+ return test_and_clear_bit(PAGE_EXT_YOUNG,
+ &lookup_page_ext(page)->flags);
+}
+
+static inline bool page_is_idle(struct page *page)
+{
+ return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+}
+
+static inline void set_page_idle(struct page *page)
+{
+ set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+}
+
+static inline void clear_page_idle(struct page *page)
+{
+ clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+}
+#endif /* CONFIG_64BIT */
+
+#else /* !CONFIG_IDLE_PAGE_TRACKING */
+
+static inline bool page_is_young(struct page *page)
+{
+ return false;
+}
+
+static inline void set_page_young(struct page *page)
+{
+}
+
+static inline bool test_and_clear_page_young(struct page *page)
+{
+ return false;
+}
+
+static inline bool page_is_idle(struct page *page)
+{
+ return false;
+}
+
+static inline void set_page_idle(struct page *page)
+{
+}
+
+static inline void clear_page_idle(struct page *page)
+{
+}
+
+#endif /* CONFIG_IDLE_PAGE_TRACKING */
+
+#endif /* _LINUX_MM_PAGE_IDLE_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 1a64733c48c7..e90eb22de628 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1227,6 +1227,8 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode,
dma_pool_create(name, &pdev->dev, size, align, allocation)
#define pci_pool_destroy(pool) dma_pool_destroy(pool)
#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
+#define pci_pool_zalloc(pool, flags, handle) \
+ dma_pool_zalloc(pool, flags, handle)
#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
struct msix_entry {
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index fcff8f865341..d9ba49cedc5d 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2332,6 +2332,15 @@
#define PCI_VENDOR_ID_CAVIUM 0x177d
+#define PCI_VENDOR_ID_TECHWELL 0x1797
+#define PCI_DEVICE_ID_TECHWELL_6800 0x6800
+#define PCI_DEVICE_ID_TECHWELL_6801 0x6801
+#define PCI_DEVICE_ID_TECHWELL_6804 0x6804
+#define PCI_DEVICE_ID_TECHWELL_6816_1 0x6810
+#define PCI_DEVICE_ID_TECHWELL_6816_2 0x6811
+#define PCI_DEVICE_ID_TECHWELL_6816_3 0x6812
+#define PCI_DEVICE_ID_TECHWELL_6816_4 0x6813
+
#define PCI_VENDOR_ID_BELKIN 0x1799
#define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 3e88c9a7d57f..834c4e52cb2d 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -16,6 +16,7 @@ struct percpu_rw_semaphore {
};
extern void percpu_down_read(struct percpu_rw_semaphore *);
+extern int percpu_down_read_trylock(struct percpu_rw_semaphore *);
extern void percpu_up_read(struct percpu_rw_semaphore *);
extern void percpu_down_write(struct percpu_rw_semaphore *);
@@ -31,4 +32,23 @@ extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
__percpu_init_rwsem(brw, #brw, &rwsem_key); \
})
+
+#define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem)
+
+static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
+ bool read, unsigned long ip)
+{
+ lock_release(&sem->rw_sem.dep_map, 1, ip);
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+ if (!read)
+ sem->rw_sem.owner = NULL;
+#endif
+}
+
+static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
+ bool read, unsigned long ip)
+{
+ lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip);
+}
+
#endif
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
new file mode 100644
index 000000000000..bfa673bb822d
--- /dev/null
+++ b/include/linux/perf/arm_pmu.h
@@ -0,0 +1,154 @@
+/*
+ * linux/arch/arm/include/asm/pmu.h
+ *
+ * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ARM_PMU_H__
+#define __ARM_PMU_H__
+
+#include <linux/interrupt.h>
+#include <linux/perf_event.h>
+
+#include <asm/cputype.h>
+
+/*
+ * struct arm_pmu_platdata - ARM PMU platform data
+ *
+ * @handle_irq: an optional handler which will be called from the
+ * interrupt and passed the address of the low level handler,
+ * and can be used to implement any platform specific handling
+ * before or after calling it.
+ */
+struct arm_pmu_platdata {
+ irqreturn_t (*handle_irq)(int irq, void *dev,
+ irq_handler_t pmu_handler);
+};
+
+#ifdef CONFIG_ARM_PMU
+
+/*
+ * The ARMv7 CPU PMU supports up to 32 event counters.
+ */
+#define ARMPMU_MAX_HWEVENTS 32
+
+#define HW_OP_UNSUPPORTED 0xFFFF
+#define C(_x) PERF_COUNT_HW_CACHE_##_x
+#define CACHE_OP_UNSUPPORTED 0xFFFF
+
+#define PERF_MAP_ALL_UNSUPPORTED \
+ [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
+
+#define PERF_CACHE_MAP_ALL_UNSUPPORTED \
+[0 ... C(MAX) - 1] = { \
+ [0 ... C(OP_MAX) - 1] = { \
+ [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \
+ }, \
+}
+
+/* The events for a given PMU register set. */
+struct pmu_hw_events {
+ /*
+ * The events that are active on the PMU for the given index.
+ */
+ struct perf_event *events[ARMPMU_MAX_HWEVENTS];
+
+ /*
+ * A 1 bit for an index indicates that the counter is being used for
+ * an event. A 0 means that the counter can be used.
+ */
+ DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
+
+ /*
+ * Hardware lock to serialize accesses to PMU registers. Needed for the
+ * read/modify/write sequences.
+ */
+ raw_spinlock_t pmu_lock;
+
+ /*
+ * When using percpu IRQs, we need a percpu dev_id. Place it here as we
+ * already have to allocate this struct per cpu.
+ */
+ struct arm_pmu *percpu_pmu;
+};
+
+struct arm_pmu {
+ struct pmu pmu;
+ cpumask_t active_irqs;
+ cpumask_t supported_cpus;
+ int *irq_affinity;
+ char *name;
+ irqreturn_t (*handle_irq)(int irq_num, void *dev);
+ void (*enable)(struct perf_event *event);
+ void (*disable)(struct perf_event *event);
+ int (*get_event_idx)(struct pmu_hw_events *hw_events,
+ struct perf_event *event);
+ void (*clear_event_idx)(struct pmu_hw_events *hw_events,
+ struct perf_event *event);
+ int (*set_event_filter)(struct hw_perf_event *evt,
+ struct perf_event_attr *attr);
+ u32 (*read_counter)(struct perf_event *event);
+ void (*write_counter)(struct perf_event *event, u32 val);
+ void (*start)(struct arm_pmu *);
+ void (*stop)(struct arm_pmu *);
+ void (*reset)(void *);
+ int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
+ void (*free_irq)(struct arm_pmu *);
+ int (*map_event)(struct perf_event *event);
+ int num_events;
+ atomic_t active_events;
+ struct mutex reserve_mutex;
+ u64 max_period;
+ struct platform_device *plat_device;
+ struct pmu_hw_events __percpu *hw_events;
+ struct notifier_block hotplug_nb;
+};
+
+#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
+
+int armpmu_register(struct arm_pmu *armpmu, int type);
+
+u64 armpmu_event_update(struct perf_event *event);
+
+int armpmu_event_set_period(struct perf_event *event);
+
+int armpmu_map_event(struct perf_event *event,
+ const unsigned (*event_map)[PERF_COUNT_HW_MAX],
+ const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX],
+ u32 raw_event_mask);
+
+struct pmu_probe_info {
+ unsigned int cpuid;
+ unsigned int mask;
+ int (*init)(struct arm_pmu *);
+};
+
+#define PMU_PROBE(_cpuid, _mask, _fn) \
+{ \
+ .cpuid = (_cpuid), \
+ .mask = (_mask), \
+ .init = (_fn), \
+}
+
+#define ARM_PMU_PROBE(_cpuid, _fn) \
+ PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
+
+#define ARM_PMU_XSCALE_MASK ((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
+
+#define XSCALE_PMU_PROBE(_version, _fn) \
+ PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
+
+int arm_pmu_device_probe(struct platform_device *pdev,
+ const struct of_device_id *of_table,
+ const struct pmu_probe_info *probe_table);
+
+#endif /* CONFIG_ARM_PMU */
+
+#endif /* __ARM_PMU_H__ */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2027809433b3..092a0e8a479a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -641,6 +641,8 @@ extern int perf_event_init_task(struct task_struct *child);
extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
extern void perf_event_delayed_put(struct task_struct *task);
+extern struct perf_event *perf_event_get(unsigned int fd);
+extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
extern void perf_event_print_debug(void);
extern void perf_pmu_disable(struct pmu *pmu);
extern void perf_pmu_enable(struct pmu *pmu);
@@ -659,6 +661,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr,
void *context);
extern void perf_pmu_migrate_context(struct pmu *pmu,
int src_cpu, int dst_cpu);
+extern u64 perf_event_read_local(struct perf_event *event);
extern u64 perf_event_read_value(struct perf_event *event,
u64 *enabled, u64 *running);
@@ -979,6 +982,12 @@ static inline int perf_event_init_task(struct task_struct *child) { return 0; }
static inline void perf_event_exit_task(struct task_struct *child) { }
static inline void perf_event_free_task(struct task_struct *task) { }
static inline void perf_event_delayed_put(struct task_struct *task) { }
+static inline struct perf_event *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
+static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
+{
+ return ERR_PTR(-EINVAL);
+}
+static inline u64 perf_event_read_local(struct perf_event *event) { return -EINVAL; }
static inline void perf_event_print_debug(void) { }
static inline int perf_event_task_disable(void) { return -EINVAL; }
static inline int perf_event_task_enable(void) { return -EINVAL; }
@@ -1011,6 +1020,7 @@ static inline void perf_event_enable(struct perf_event *event) { }
static inline void perf_event_disable(struct perf_event *event) { }
static inline int __perf_event_disable(void *info) { return -1; }
static inline void perf_event_task_tick(void) { }
+static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
#endif
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
diff --git a/include/linux/phy.h b/include/linux/phy.h
index a26c3f84b8dd..962387a192f1 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -330,6 +330,7 @@ struct phy_c45_device_ids {
* c45_ids: 802.3-c45 Device Identifers if is_c45.
* is_c45: Set to true if this phy uses clause 45 addressing.
* is_internal: Set to true if this phy is internal to a MAC.
+ * is_pseudo_fixed_link: Set to true if this phy is an Ethernet switch, etc.
* has_fixups: Set to true if this phy has fixups/quirks.
* suspended: Set to true if this phy has been suspended successfully.
* state: state of the PHY for management purposes
@@ -368,6 +369,7 @@ struct phy_device {
struct phy_c45_device_ids c45_ids;
bool is_c45;
bool is_internal;
+ bool is_pseudo_fixed_link;
bool has_fixups;
bool suspended;
@@ -424,6 +426,8 @@ struct phy_device {
struct net_device *attached_dev;
+ u8 mdix;
+
void (*adjust_link)(struct net_device *dev);
};
#define to_phy_device(d) container_of(d, struct phy_device, dev)
@@ -686,6 +690,16 @@ static inline bool phy_interface_is_rgmii(struct phy_device *phydev)
{
return phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID;
+};
+
+/*
+ * phy_is_pseudo_fixed_link - Convenience function for testing if this
+ * PHY is the CPU port facing side of an Ethernet switch, or similar.
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev)
+{
+ return phydev->is_pseudo_fixed_link;
}
/**
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index fe5732d53eda..2400d2ea4f34 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -13,9 +13,11 @@ struct device_node;
#if IS_ENABLED(CONFIG_FIXED_PHY)
extern int fixed_phy_add(unsigned int irq, int phy_id,
- struct fixed_phy_status *status);
+ struct fixed_phy_status *status,
+ int link_gpio);
extern struct phy_device *fixed_phy_register(unsigned int irq,
struct fixed_phy_status *status,
+ int link_gpio,
struct device_node *np);
extern void fixed_phy_del(int phy_addr);
extern int fixed_phy_set_link_update(struct phy_device *phydev,
@@ -26,12 +28,14 @@ extern int fixed_phy_update_state(struct phy_device *phydev,
const struct fixed_phy_status *changed);
#else
static inline int fixed_phy_add(unsigned int irq, int phy_id,
- struct fixed_phy_status *status)
+ struct fixed_phy_status *status,
+ int link_gpio)
{
return -ENODEV;
}
static inline struct phy_device *fixed_phy_register(unsigned int irq,
struct fixed_phy_status *status,
+ int gpio_link,
struct device_node *np)
{
return ERR_PTR(-ENODEV);
diff --git a/include/linux/i2c/atmel_mxt_ts.h b/include/linux/platform_data/atmel_mxt_ts.h
index 02bf6ea31701..695035a8d7fb 100644
--- a/include/linux/i2c/atmel_mxt_ts.h
+++ b/include/linux/platform_data/atmel_mxt_ts.h
@@ -10,16 +10,22 @@
* option) any later version.
*/
-#ifndef __LINUX_ATMEL_MXT_TS_H
-#define __LINUX_ATMEL_MXT_TS_H
+#ifndef __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H
+#define __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H
#include <linux/types.h>
+enum mxt_suspend_mode {
+ MXT_SUSPEND_DEEP_SLEEP = 0,
+ MXT_SUSPEND_T9_CTRL = 1,
+};
+
/* The platform data for the Atmel maXTouch touchscreen driver */
struct mxt_platform_data {
unsigned long irqflags;
u8 t19_num_keys;
const unsigned int *t19_keymap;
+ enum mxt_suspend_mode suspend_mode;
};
-#endif /* __LINUX_ATMEL_MXT_TS_H */
+#endif /* __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H */
diff --git a/include/linux/platform_data/gpio-em.h b/include/linux/platform_data/gpio-em.h
deleted file mode 100644
index 7c5a519d2dcd..000000000000
--- a/include/linux/platform_data/gpio-em.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __GPIO_EM_H__
-#define __GPIO_EM_H__
-
-struct gpio_em_config {
- unsigned int gpio_base;
- unsigned int irq_base;
- unsigned int number_of_pins;
- const char *pctl_name;
-};
-
-#endif /* __GPIO_EM_H__ */
diff --git a/include/linux/platform_data/i2c-mux-reg.h b/include/linux/platform_data/i2c-mux-reg.h
new file mode 100644
index 000000000000..c68712aadf43
--- /dev/null
+++ b/include/linux/platform_data/i2c-mux-reg.h
@@ -0,0 +1,44 @@
+/*
+ * I2C multiplexer using a single register
+ *
+ * Copyright 2015 Freescale Semiconductor
+ * York Sun <yorksun@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_I2C_MUX_REG_H
+#define __LINUX_PLATFORM_DATA_I2C_MUX_REG_H
+
+/**
+ * struct i2c_mux_reg_platform_data - Platform-dependent data for i2c-mux-reg
+ * @parent: Parent I2C bus adapter number
+ * @base_nr: Base I2C bus number to number adapters from or zero for dynamic
+ * @values: Array of value for each channel
+ * @n_values: Number of multiplexer channels
+ * @little_endian: Indicating if the register is in little endian
+ * @write_only: Reading the register is not allowed by hardware
+ * @classes: Optional I2C auto-detection classes
+ * @idle: Value to write to mux when idle
+ * @idle_in_use: indicate if idle value is in use
+ * @reg: Virtual address of the register to switch channel
+ * @reg_size: register size in bytes
+ */
+struct i2c_mux_reg_platform_data {
+ int parent;
+ int base_nr;
+ const unsigned int *values;
+ int n_values;
+ bool little_endian;
+ bool write_only;
+ const unsigned int *classes;
+ u32 idle;
+ bool idle_in_use;
+ void __iomem *reg;
+ resource_size_t reg_size;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_I2C_MUX_REG_H */
diff --git a/include/linux/platform_data/itco_wdt.h b/include/linux/platform_data/itco_wdt.h
new file mode 100644
index 000000000000..f16542c77ff7
--- /dev/null
+++ b/include/linux/platform_data/itco_wdt.h
@@ -0,0 +1,19 @@
+/*
+ * Platform data for the Intel TCO Watchdog
+ */
+
+#ifndef _ITCO_WDT_H_
+#define _ITCO_WDT_H_
+
+/* Watchdog resources */
+#define ICH_RES_IO_TCO 0
+#define ICH_RES_IO_SMI 1
+#define ICH_RES_MEM_OFF 2
+#define ICH_RES_MEM_GCS_PMC 0
+
+struct itco_wdt_platform_data {
+ char name[32];
+ unsigned int version;
+};
+
+#endif /* _ITCO_WDT_H_ */
diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h
index 9c7fd1efe495..1b2ba24e4e03 100644
--- a/include/linux/platform_data/lp855x.h
+++ b/include/linux/platform_data/lp855x.h
@@ -136,7 +136,6 @@ struct lp855x_rom_data {
Only valid when mode is PWM_BASED.
* @size_program : total size of lp855x_rom_data
* @rom_data : list of new eeprom/eprom registers
- * @supply : regulator that supplies 3V input
*/
struct lp855x_platform_data {
const char *name;
@@ -145,7 +144,6 @@ struct lp855x_platform_data {
unsigned int period_ns;
int size_program;
struct lp855x_rom_data *rom_data;
- struct regulator *supply;
};
#endif
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h
index e1571efa3f2b..95ccab3f454a 100644
--- a/include/linux/platform_data/mmc-esdhc-imx.h
+++ b/include/linux/platform_data/mmc-esdhc-imx.h
@@ -45,5 +45,6 @@ struct esdhc_platform_data {
int max_bus_width;
bool support_vsel;
unsigned int delay_line;
+ unsigned int tuning_step; /* The delay cell steps in tuning procedure */
};
#endif /* __ASM_ARCH_IMX_ESDHC_H */
diff --git a/include/linux/input/pixcir_ts.h b/include/linux/platform_data/pixcir_i2c_ts.h
index 7bae83b7c396..646af6f8b838 100644
--- a/include/linux/input/pixcir_ts.h
+++ b/include/linux/platform_data/pixcir_i2c_ts.h
@@ -57,7 +57,6 @@ struct pixcir_i2c_chip_data {
struct pixcir_ts_platform_data {
int x_max;
int y_max;
- int gpio_attb; /* GPIO connected to ATTB line */
struct pixcir_i2c_chip_data chip;
};
diff --git a/include/linux/platform_data/st_nci.h b/include/linux/platform_data/st_nci.h
deleted file mode 100644
index d9d400a297bd..000000000000
--- a/include/linux/platform_data/st_nci.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Driver include for ST NCI NFC chip family.
- *
- * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _ST_NCI_H_
-#define _ST_NCI_H_
-
-#define ST_NCI_DRIVER_NAME "st_nci"
-
-struct st_nci_nfc_platform_data {
- unsigned int gpio_reset;
- unsigned int irq_polarity;
-};
-
-#endif /* _ST_NCI_H_ */
diff --git a/include/linux/platform_data/zforce_ts.h b/include/linux/platform_data/zforce_ts.h
index 0472ab2f6ede..7bdece8ef33e 100644
--- a/include/linux/platform_data/zforce_ts.h
+++ b/include/linux/platform_data/zforce_ts.h
@@ -16,9 +16,6 @@
#define _LINUX_INPUT_ZFORCE_TS_H
struct zforce_ts_platdata {
- int gpio_int;
- int gpio_rst;
-
unsigned int x_max;
unsigned int y_max;
};
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index cab7ba55bedb..e817722ee3f0 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -34,6 +34,7 @@ bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
int dev_pm_opp_get_opp_count(struct device *dev);
unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
+struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev);
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq,
@@ -80,6 +81,11 @@ static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
return 0;
}
+static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
+{
+ return NULL;
+}
+
static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq, bool available)
{
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index d2114045a6c4..85f810b33917 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -14,28 +14,42 @@
#define __PMEM_H__
#include <linux/io.h>
+#include <linux/uio.h>
#ifdef CONFIG_ARCH_HAS_PMEM_API
-#include <asm/cacheflush.h>
+#define ARCH_MEMREMAP_PMEM MEMREMAP_WB
+#include <asm/pmem.h>
#else
+#define ARCH_MEMREMAP_PMEM MEMREMAP_WT
+/*
+ * These are simply here to enable compilation, all call sites gate
+ * calling these symbols with arch_has_pmem_api() and redirect to the
+ * implementation in asm/pmem.h.
+ */
+static inline bool __arch_has_wmb_pmem(void)
+{
+ return false;
+}
+
static inline void arch_wmb_pmem(void)
{
BUG();
}
-static inline bool __arch_has_wmb_pmem(void)
+static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
+ size_t n)
{
- return false;
+ BUG();
}
-static inline void __pmem *arch_memremap_pmem(resource_size_t offset,
- unsigned long size)
+static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
+ struct iov_iter *i)
{
- return NULL;
+ BUG();
+ return 0;
}
-static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
- size_t n)
+static inline void arch_clear_pmem(void __pmem *addr, size_t size)
{
BUG();
}
@@ -43,18 +57,22 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
/*
* Architectures that define ARCH_HAS_PMEM_API must provide
- * implementations for arch_memremap_pmem(), arch_memcpy_to_pmem(),
- * arch_wmb_pmem(), and __arch_has_wmb_pmem().
+ * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(),
+ * arch_copy_from_iter_pmem(), arch_clear_pmem() and arch_has_wmb_pmem().
*/
-
static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size)
{
memcpy(dst, (void __force const *) src, size);
}
-static inline void memunmap_pmem(void __pmem *addr)
+static inline void memunmap_pmem(struct device *dev, void __pmem *addr)
+{
+ devm_memunmap(dev, (void __force *) addr);
+}
+
+static inline bool arch_has_pmem_api(void)
{
- iounmap((void __force __iomem *) addr);
+ return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
}
/**
@@ -68,14 +86,7 @@ static inline void memunmap_pmem(void __pmem *addr)
*/
static inline bool arch_has_wmb_pmem(void)
{
- if (IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
- return __arch_has_wmb_pmem();
- return false;
-}
-
-static inline bool arch_has_pmem_api(void)
-{
- return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && arch_has_wmb_pmem();
+ return arch_has_pmem_api() && __arch_has_wmb_pmem();
}
/*
@@ -85,16 +96,24 @@ static inline bool arch_has_pmem_api(void)
* default_memremap_pmem + default_memcpy_to_pmem is sufficient for
* making data durable relative to i/o completion.
*/
-static void default_memcpy_to_pmem(void __pmem *dst, const void *src,
+static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src,
size_t size)
{
memcpy((void __force *) dst, src, size);
}
-static void __pmem *default_memremap_pmem(resource_size_t offset,
- unsigned long size)
+static inline size_t default_copy_from_iter_pmem(void __pmem *addr,
+ size_t bytes, struct iov_iter *i)
+{
+ return copy_from_iter_nocache((void __force *)addr, bytes, i);
+}
+
+static inline void default_clear_pmem(void __pmem *addr, size_t size)
{
- return (void __pmem __force *)ioremap_wt(offset, size);
+ if (size == PAGE_SIZE && ((unsigned long)addr & ~PAGE_MASK) == 0)
+ clear_page((void __force *)addr);
+ else
+ memset((void __force *)addr, 0, size);
}
/**
@@ -109,12 +128,11 @@ static void __pmem *default_memremap_pmem(resource_size_t offset,
* wmb_pmem() arrange for the data to be written through the
* cache to persistent media.
*/
-static inline void __pmem *memremap_pmem(resource_size_t offset,
- unsigned long size)
+static inline void __pmem *memremap_pmem(struct device *dev,
+ resource_size_t offset, unsigned long size)
{
- if (arch_has_pmem_api())
- return arch_memremap_pmem(offset, size);
- return default_memremap_pmem(offset, size);
+ return (void __pmem *) devm_memremap(dev, offset, size,
+ ARCH_MEMREMAP_PMEM);
}
/**
@@ -146,7 +164,42 @@ static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n)
*/
static inline void wmb_pmem(void)
{
- if (arch_has_pmem_api())
+ if (arch_has_wmb_pmem())
arch_wmb_pmem();
+ else
+ wmb();
+}
+
+/**
+ * copy_from_iter_pmem - copy data from an iterator to PMEM
+ * @addr: PMEM destination address
+ * @bytes: number of bytes to copy
+ * @i: iterator with source data
+ *
+ * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
+ * This function requires explicit ordering with a wmb_pmem() call.
+ */
+static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes,
+ struct iov_iter *i)
+{
+ if (arch_has_pmem_api())
+ return arch_copy_from_iter_pmem(addr, bytes, i);
+ return default_copy_from_iter_pmem(addr, bytes, i);
+}
+
+/**
+ * clear_pmem - zero a PMEM memory range
+ * @addr: virtual start address
+ * @size: number of bytes to zero
+ *
+ * Write zeros into the memory range starting at 'addr' for 'size' bytes.
+ * This function requires explicit ordering with a wmb_pmem() call.
+ */
+static inline void clear_pmem(void __pmem *addr, size_t size)
+{
+ if (arch_has_pmem_api())
+ arch_clear_pmem(addr, size);
+ else
+ default_clear_pmem(addr, size);
}
#endif /* __PMEM_H__ */
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 2110a81c5e2a..317e16de09e5 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -19,8 +19,8 @@
* under normal circumstances, used to verify that nobody uses
* non-initialized list entries.
*/
-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
+#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA)
+#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA)
/********** include/linux/timer.h **********/
/*
@@ -69,10 +69,6 @@
#define ATM_POISON_FREE 0x12
#define ATM_POISON 0xdeadbeef
-/********** net/ **********/
-#define NEIGHBOR_DEAD 0xdeadbeef
-#define NETFILTER_LINK_POISON 0xdead57ac
-
/********** kernel/mutexes **********/
#define MUTEX_DEBUG_INIT 0x11
#define MUTEX_DEBUG_FREE 0x22
@@ -83,7 +79,4 @@
/********** security/ **********/
#define KEY_DESTROY 0xbd
-/********** sound/oss/ **********/
-#define OSS_POISON_FREE 0xAB
-
#endif
diff --git a/include/linux/printk.h b/include/linux/printk.h
index a6298b27ac99..9729565c25ff 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -404,10 +404,10 @@ do { \
static DEFINE_RATELIMIT_STATE(_rs, \
DEFAULT_RATELIMIT_INTERVAL, \
DEFAULT_RATELIMIT_BURST); \
- DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, pr_fmt(fmt)); \
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
__ratelimit(&_rs)) \
- __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
+ __dynamic_pr_debug(&descriptor, pr_fmt(fmt), ##__VA_ARGS__); \
} while (0)
#elif defined(DEBUG)
#define pr_debug_ratelimited(fmt, ...) \
@@ -456,11 +456,17 @@ static inline void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
groupsize, buf, len, ascii) \
dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii)
-#else
+#elif defined(DEBUG)
#define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii) \
print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii)
-#endif /* defined(CONFIG_DYNAMIC_DEBUG) */
+#else
+static inline void print_hex_dump_debug(const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii)
+{
+}
+#endif
#endif
diff --git a/include/linux/property.h b/include/linux/property.h
index 76ebde9c11d4..a59c6ee566c2 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -166,4 +166,8 @@ void device_add_property_set(struct device *dev, struct property_set *pset);
bool device_dma_is_coherent(struct device *dev);
+int device_get_phy_mode(struct device *dev);
+
+void *device_get_mac_address(struct device *dev, char *addr, int alen);
+
#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/psci.h b/include/linux/psci.h
new file mode 100644
index 000000000000..a682fcc91c33
--- /dev/null
+++ b/include/linux/psci.h
@@ -0,0 +1,52 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2015 ARM Limited
+ */
+
+#ifndef __LINUX_PSCI_H
+#define __LINUX_PSCI_H
+
+#include <linux/init.h>
+#include <linux/types.h>
+
+#define PSCI_POWER_STATE_TYPE_STANDBY 0
+#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
+
+bool psci_tos_resident_on(int cpu);
+
+struct psci_operations {
+ int (*cpu_suspend)(u32 state, unsigned long entry_point);
+ int (*cpu_off)(u32 state);
+ int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
+ int (*migrate)(unsigned long cpuid);
+ int (*affinity_info)(unsigned long target_affinity,
+ unsigned long lowest_affinity_level);
+ int (*migrate_info_type)(void);
+};
+
+extern struct psci_operations psci_ops;
+
+#if defined(CONFIG_ARM_PSCI_FW)
+int __init psci_dt_init(void);
+#else
+static inline int psci_dt_init(void) { return 0; }
+#endif
+
+#if defined(CONFIG_ARM_PSCI_FW) && defined(CONFIG_ACPI)
+int __init psci_acpi_init(void);
+bool __init acpi_psci_present(void);
+bool __init acpi_psci_use_hvc(void);
+#else
+static inline int psci_acpi_init(void) { return 0; }
+static inline bool acpi_psci_present(void) { return false; }
+#endif
+
+#endif /* __LINUX_PSCI_H */
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 987a73a40ef8..061265f92876 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -34,6 +34,7 @@
#define PT_TRACE_SECCOMP PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP)
#define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT)
+#define PT_SUSPEND_SECCOMP (PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT)
/* single stepping state bits (used on ARM and PA-RISC) */
#define PT_SINGLESTEP_BIT 31
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 36262d08a9da..d681f6875aef 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -79,26 +79,43 @@ enum {
PWMF_EXPORTED = 1 << 2,
};
+/**
+ * struct pwm_device - PWM channel object
+ * @label: name of the PWM device
+ * @flags: flags associated with the PWM device
+ * @hwpwm: per-chip relative index of the PWM device
+ * @pwm: global index of the PWM device
+ * @chip: PWM chip providing this PWM device
+ * @chip_data: chip-private data associated with the PWM device
+ * @period: period of the PWM signal (in nanoseconds)
+ * @duty_cycle: duty cycle of the PWM signal (in nanoseconds)
+ * @polarity: polarity of the PWM signal
+ */
struct pwm_device {
- const char *label;
- unsigned long flags;
- unsigned int hwpwm;
- unsigned int pwm;
- struct pwm_chip *chip;
- void *chip_data;
-
- unsigned int period; /* in nanoseconds */
- unsigned int duty_cycle; /* in nanoseconds */
- enum pwm_polarity polarity;
+ const char *label;
+ unsigned long flags;
+ unsigned int hwpwm;
+ unsigned int pwm;
+ struct pwm_chip *chip;
+ void *chip_data;
+
+ unsigned int period;
+ unsigned int duty_cycle;
+ enum pwm_polarity polarity;
};
+static inline bool pwm_is_enabled(const struct pwm_device *pwm)
+{
+ return test_bit(PWMF_ENABLED, &pwm->flags);
+}
+
static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period)
{
if (pwm)
pwm->period = period;
}
-static inline unsigned int pwm_get_period(struct pwm_device *pwm)
+static inline unsigned int pwm_get_period(const struct pwm_device *pwm)
{
return pwm ? pwm->period : 0;
}
@@ -109,7 +126,7 @@ static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty)
pwm->duty_cycle = duty;
}
-static inline unsigned int pwm_get_duty_cycle(struct pwm_device *pwm)
+static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm)
{
return pwm ? pwm->duty_cycle : 0;
}
@@ -119,6 +136,11 @@ static inline unsigned int pwm_get_duty_cycle(struct pwm_device *pwm)
*/
int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity);
+static inline enum pwm_polarity pwm_get_polarity(const struct pwm_device *pwm)
+{
+ return pwm ? pwm->polarity : PWM_POLARITY_NORMAL;
+}
+
/**
* struct pwm_ops - PWM controller operations
* @request: optional hook for requesting a PWM
@@ -131,25 +153,18 @@ int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity);
* @owner: helps prevent removal of modules exporting active PWMs
*/
struct pwm_ops {
- int (*request)(struct pwm_chip *chip,
- struct pwm_device *pwm);
- void (*free)(struct pwm_chip *chip,
- struct pwm_device *pwm);
- int (*config)(struct pwm_chip *chip,
- struct pwm_device *pwm,
- int duty_ns, int period_ns);
- int (*set_polarity)(struct pwm_chip *chip,
- struct pwm_device *pwm,
- enum pwm_polarity polarity);
- int (*enable)(struct pwm_chip *chip,
- struct pwm_device *pwm);
- void (*disable)(struct pwm_chip *chip,
- struct pwm_device *pwm);
+ int (*request)(struct pwm_chip *chip, struct pwm_device *pwm);
+ void (*free)(struct pwm_chip *chip, struct pwm_device *pwm);
+ int (*config)(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns);
+ int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm,
+ enum pwm_polarity polarity);
+ int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm);
+ void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm);
#ifdef CONFIG_DEBUG_FS
- void (*dbg_show)(struct pwm_chip *chip,
- struct seq_file *s);
+ void (*dbg_show)(struct pwm_chip *chip, struct seq_file *s);
#endif
- struct module *owner;
+ struct module *owner;
};
/**
@@ -160,22 +175,24 @@ struct pwm_ops {
* @base: number of first PWM controlled by this chip
* @npwm: number of PWMs controlled by this chip
* @pwms: array of PWM devices allocated by the framework
+ * @of_xlate: request a PWM device given a device tree PWM specifier
+ * @of_pwm_n_cells: number of cells expected in the device tree PWM specifier
* @can_sleep: must be true if the .config(), .enable() or .disable()
* operations may sleep
*/
struct pwm_chip {
- struct device *dev;
- struct list_head list;
- const struct pwm_ops *ops;
- int base;
- unsigned int npwm;
-
- struct pwm_device *pwms;
-
- struct pwm_device * (*of_xlate)(struct pwm_chip *pc,
- const struct of_phandle_args *args);
- unsigned int of_pwm_n_cells;
- bool can_sleep;
+ struct device *dev;
+ struct list_head list;
+ const struct pwm_ops *ops;
+ int base;
+ unsigned int npwm;
+
+ struct pwm_device *pwms;
+
+ struct pwm_device * (*of_xlate)(struct pwm_chip *pc,
+ const struct of_phandle_args *args);
+ unsigned int of_pwm_n_cells;
+ bool can_sleep;
};
#if IS_ENABLED(CONFIG_PWM)
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 77ca6601ff25..7a57c28eb5e7 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -43,7 +43,7 @@ void inode_claim_rsv_space(struct inode *inode, qsize_t number);
void inode_sub_rsv_space(struct inode *inode, qsize_t number);
void inode_reclaim_rsv_space(struct inode *inode, qsize_t number);
-void dquot_initialize(struct inode *inode);
+int dquot_initialize(struct inode *inode);
void dquot_drop(struct inode *inode);
struct dquot *dqget(struct super_block *sb, struct kqid qid);
static inline struct dquot *dqgrab(struct dquot *dquot)
@@ -200,8 +200,9 @@ static inline int sb_has_quota_active(struct super_block *sb, int type)
return 0;
}
-static inline void dquot_initialize(struct inode *inode)
+static inline int dquot_initialize(struct inode *inode)
{
+ return 0;
}
static inline void dquot_drop(struct inode *inode)
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 59c55ea0f0b5..8fc0bfd8edc4 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -17,6 +17,7 @@
#include <linux/rbtree.h>
#include <linux/err.h>
#include <linux/bug.h>
+#include <linux/lockdep.h>
struct module;
struct device;
@@ -50,6 +51,20 @@ struct reg_default {
unsigned int def;
};
+/**
+ * Register/value pairs for sequences of writes with an optional delay in
+ * microseconds to be applied after each write.
+ *
+ * @reg: Register address.
+ * @def: Register value.
+ * @delay_us: Delay to be applied after the register write in microseconds
+ */
+struct reg_sequence {
+ unsigned int reg;
+ unsigned int def;
+ unsigned int delay_us;
+};
+
#ifdef CONFIG_REGMAP
enum regmap_endian {
@@ -296,8 +311,12 @@ typedef void (*regmap_hw_free_context)(void *context);
* if not implemented on a given device.
* @async_write: Write operation which completes asynchronously, optional and
* must serialise with respect to non-async I/O.
+ * @reg_write: Write a single register value to the given register address. This
+ * write operation has to complete when returning from the function.
* @read: Read operation. Data is returned in the buffer used to transmit
* data.
+ * @reg_read: Read a single register value from a given register address.
+ * @free_context: Free context.
* @async_alloc: Allocate a regmap_async() structure.
* @read_flag_mask: Mask to be set in the top byte of the register when doing
* a read.
@@ -307,7 +326,8 @@ typedef void (*regmap_hw_free_context)(void *context);
* @val_format_endian_default: Default endianness for formatted register
* values. Used when the regmap_config specifies DEFAULT. If this is
* DEFAULT, BIG is assumed.
- * @async_size: Size of struct used for async work.
+ * @max_raw_read: Max raw read size that can be used on the bus.
+ * @max_raw_write: Max raw write size that can be used on the bus.
*/
struct regmap_bus {
bool fast_io;
@@ -322,47 +342,186 @@ struct regmap_bus {
u8 read_flag_mask;
enum regmap_endian reg_format_endian_default;
enum regmap_endian val_format_endian_default;
+ size_t max_raw_read;
+ size_t max_raw_write;
};
-struct regmap *regmap_init(struct device *dev,
- const struct regmap_bus *bus,
- void *bus_context,
- const struct regmap_config *config);
+/*
+ * __regmap_init functions.
+ *
+ * These functions take a lock key and name parameter, and should not be called
+ * directly. Instead, use the regmap_init macros that generate a key and name
+ * for each call.
+ */
+struct regmap *__regmap_init(struct device *dev,
+ const struct regmap_bus *bus,
+ void *bus_context,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_spi(struct spi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_spmi_base(struct spmi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_spmi_ext(struct spmi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+
+struct regmap *__devm_regmap_init(struct device *dev,
+ const struct regmap_bus *bus,
+ void *bus_context,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_spi(struct spi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_spmi_base(struct spmi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_spmi_ext(struct spmi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_mmio_clk(struct device *dev,
+ const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+
+/*
+ * Wrapper for regmap_init macros to include a unique lockdep key and name
+ * for each call. No-op if CONFIG_LOCKDEP is not set.
+ *
+ * @fn: Real function to call (in the form __[*_]regmap_init[_*])
+ * @name: Config variable name (#config in the calling macro)
+ **/
+#ifdef CONFIG_LOCKDEP
+#define __regmap_lockdep_wrapper(fn, name, ...) \
+( \
+ ({ \
+ static struct lock_class_key _key; \
+ fn(__VA_ARGS__, &_key, \
+ KBUILD_BASENAME ":" \
+ __stringify(__LINE__) ":" \
+ "(" name ")->lock"); \
+ }) \
+)
+#else
+#define __regmap_lockdep_wrapper(fn, name, ...) fn(__VA_ARGS__, NULL, NULL)
+#endif
+
+/**
+ * regmap_init(): Initialise register map
+ *
+ * @dev: Device that will be interacted with
+ * @bus: Bus-specific callbacks to use with device
+ * @bus_context: Data passed to bus-specific callbacks
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap. This function should generally not be called
+ * directly, it should be called by bus-specific init functions.
+ */
+#define regmap_init(dev, bus, bus_context, config) \
+ __regmap_lockdep_wrapper(__regmap_init, #config, \
+ dev, bus, bus_context, config)
int regmap_attach_dev(struct device *dev, struct regmap *map,
- const struct regmap_config *config);
-struct regmap *regmap_init_i2c(struct i2c_client *i2c,
- const struct regmap_config *config);
-struct regmap *regmap_init_spi(struct spi_device *dev,
- const struct regmap_config *config);
-struct regmap *regmap_init_spmi_base(struct spmi_device *dev,
- const struct regmap_config *config);
-struct regmap *regmap_init_spmi_ext(struct spmi_device *dev,
- const struct regmap_config *config);
-struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
- void __iomem *regs,
- const struct regmap_config *config);
-struct regmap *regmap_init_ac97(struct snd_ac97 *ac97,
- const struct regmap_config *config);
-
-struct regmap *devm_regmap_init(struct device *dev,
- const struct regmap_bus *bus,
- void *bus_context,
- const struct regmap_config *config);
-struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c,
- const struct regmap_config *config);
-struct regmap *devm_regmap_init_spi(struct spi_device *dev,
- const struct regmap_config *config);
-struct regmap *devm_regmap_init_spmi_base(struct spmi_device *dev,
- const struct regmap_config *config);
-struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *dev,
- const struct regmap_config *config);
-struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
- void __iomem *regs,
- const struct regmap_config *config);
-struct regmap *devm_regmap_init_ac97(struct snd_ac97 *ac97,
- const struct regmap_config *config);
+ const struct regmap_config *config);
-bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
+/**
+ * regmap_init_i2c(): Initialise register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_i2c(i2c, config) \
+ __regmap_lockdep_wrapper(__regmap_init_i2c, #config, \
+ i2c, config)
+
+/**
+ * regmap_init_spi(): Initialise register map
+ *
+ * @spi: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_spi(dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_spi, #config, \
+ dev, config)
+
+/**
+ * regmap_init_spmi_base(): Create regmap for the Base register space
+ * @sdev: SPMI device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_spmi_base(dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_spmi_base, #config, \
+ dev, config)
+
+/**
+ * regmap_init_spmi_ext(): Create regmap for Ext register space
+ * @sdev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_spmi_ext(dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_spmi_ext, #config, \
+ dev, config)
+
+/**
+ * regmap_init_mmio_clk(): Initialise register map with register clock
+ *
+ * @dev: Device that will be interacted with
+ * @clk_id: register clock consumer ID
+ * @regs: Pointer to memory-mapped IO region
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_mmio_clk(dev, clk_id, regs, config) \
+ __regmap_lockdep_wrapper(__regmap_init_mmio_clk, #config, \
+ dev, clk_id, regs, config)
/**
* regmap_init_mmio(): Initialise register map
@@ -374,12 +533,109 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
* The return value will be an ERR_PTR() on error or a valid pointer to
* a struct regmap.
*/
-static inline struct regmap *regmap_init_mmio(struct device *dev,
- void __iomem *regs,
- const struct regmap_config *config)
-{
- return regmap_init_mmio_clk(dev, NULL, regs, config);
-}
+#define regmap_init_mmio(dev, regs, config) \
+ regmap_init_mmio_clk(dev, NULL, regs, config)
+
+/**
+ * regmap_init_ac97(): Initialise AC'97 register map
+ *
+ * @ac97: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_ac97(ac97, config) \
+ __regmap_lockdep_wrapper(__regmap_init_ac97, #config, \
+ ac97, config)
+bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
+
+/**
+ * devm_regmap_init(): Initialise managed register map
+ *
+ * @dev: Device that will be interacted with
+ * @bus: Bus-specific callbacks to use with device
+ * @bus_context: Data passed to bus-specific callbacks
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. This function should generally not be called
+ * directly, it should be called by bus-specific init functions. The
+ * map will be automatically freed by the device management code.
+ */
+#define devm_regmap_init(dev, bus, bus_context, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init, #config, \
+ dev, bus, bus_context, config)
+
+/**
+ * devm_regmap_init_i2c(): Initialise managed register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_i2c(i2c, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_i2c, #config, \
+ i2c, config)
+
+/**
+ * devm_regmap_init_spi(): Initialise register map
+ *
+ * @spi: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The map will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_spi(dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_spi, #config, \
+ dev, config)
+
+/**
+ * devm_regmap_init_spmi_base(): Create managed regmap for Base register space
+ * @sdev: SPMI device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_spmi_base(dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_spmi_base, #config, \
+ dev, config)
+
+/**
+ * devm_regmap_init_spmi_ext(): Create managed regmap for Ext register space
+ * @sdev: SPMI device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_spmi_ext(dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_spmi_ext, #config, \
+ dev, config)
+
+/**
+ * devm_regmap_init_mmio_clk(): Initialise managed register map with clock
+ *
+ * @dev: Device that will be interacted with
+ * @clk_id: register clock consumer ID
+ * @regs: Pointer to memory-mapped IO region
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_mmio_clk(dev, clk_id, regs, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_mmio_clk, #config, \
+ dev, clk_id, regs, config)
/**
* devm_regmap_init_mmio(): Initialise managed register map
@@ -392,12 +648,22 @@ static inline struct regmap *regmap_init_mmio(struct device *dev,
* to a struct regmap. The regmap will be automatically freed by the
* device management code.
*/
-static inline struct regmap *devm_regmap_init_mmio(struct device *dev,
- void __iomem *regs,
- const struct regmap_config *config)
-{
- return devm_regmap_init_mmio_clk(dev, NULL, regs, config);
-}
+#define devm_regmap_init_mmio(dev, regs, config) \
+ devm_regmap_init_mmio_clk(dev, NULL, regs, config)
+
+/**
+ * devm_regmap_init_ac97(): Initialise AC'97 register map
+ *
+ * @ac97: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_ac97(ac97, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_ac97, #config, \
+ ac97, config)
void regmap_exit(struct regmap *map);
int regmap_reinit_cache(struct regmap *map,
@@ -410,10 +676,10 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len);
int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
size_t val_count);
-int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs,
+int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
int num_regs);
int regmap_multi_reg_write_bypassed(struct regmap *map,
- const struct reg_default *regs,
+ const struct reg_sequence *regs,
int num_regs);
int regmap_raw_write_async(struct regmap *map, unsigned int reg,
const void *val, size_t val_len);
@@ -424,6 +690,8 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
size_t val_count);
int regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val);
+int regmap_write_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val);
int regmap_update_bits_async(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val);
int regmap_update_bits_check(struct regmap *map, unsigned int reg,
@@ -437,6 +705,8 @@ int regmap_get_max_register(struct regmap *map);
int regmap_get_reg_stride(struct regmap *map);
int regmap_async_complete(struct regmap *map);
bool regmap_can_raw_write(struct regmap *map);
+size_t regmap_get_raw_read_max(struct regmap *map);
+size_t regmap_get_raw_write_max(struct regmap *map);
int regcache_sync(struct regmap *map);
int regcache_sync_region(struct regmap *map, unsigned int min,
@@ -450,7 +720,7 @@ void regcache_mark_dirty(struct regmap *map);
bool regmap_check_range_table(struct regmap *map, unsigned int reg,
const struct regmap_access_table *table);
-int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
+int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
int num_regs);
int regmap_parse_val(struct regmap *map, const void *buf,
unsigned int *val);
@@ -503,6 +773,8 @@ int regmap_field_update_bits(struct regmap_field *field,
int regmap_fields_write(struct regmap_field *field, unsigned int id,
unsigned int val);
+int regmap_fields_force_write(struct regmap_field *field, unsigned int id,
+ unsigned int val);
int regmap_fields_read(struct regmap_field *field, unsigned int id,
unsigned int *val);
int regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
@@ -645,6 +917,13 @@ static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
return -EINVAL;
}
+static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_update_bits_async(struct regmap *map,
unsigned int reg,
unsigned int mask, unsigned int val)
diff --git a/include/linux/reset.h b/include/linux/reset.h
index da5602bd77d7..7f65f9cff951 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -74,6 +74,20 @@ static inline int device_reset_optional(struct device *dev)
return -ENOSYS;
}
+static inline struct reset_control *__must_check reset_control_get(
+ struct device *dev, const char *id)
+{
+ WARN_ON(1);
+ return ERR_PTR(-EINVAL);
+}
+
+static inline struct reset_control *__must_check devm_reset_control_get(
+ struct device *dev, const char *id)
+{
+ WARN_ON(1);
+ return ERR_PTR(-EINVAL);
+}
+
static inline struct reset_control *reset_control_get_optional(
struct device *dev, const char *id)
{
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index c89c53a113a8..29446aeef36e 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -89,6 +89,9 @@ enum ttu_flags {
TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
+ TTU_BATCH_FLUSH = (1 << 11), /* Batch TLB flushes where possible
+ * and caller guarantees they will
+ * do a final flush if necessary */
};
#ifdef CONFIG_MMU
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 119823decc46..a4ab9daa387c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1344,6 +1344,25 @@ enum perf_event_task_context {
perf_nr_task_contexts,
};
+/* Track pages that require TLB flushes */
+struct tlbflush_unmap_batch {
+ /*
+ * Each bit set is a CPU that potentially has a TLB entry for one of
+ * the PFNs being flushed. See set_tlb_ubc_flush_pending().
+ */
+ struct cpumask cpumask;
+
+ /* True if any bit in cpumask is set */
+ bool flush_required;
+
+ /*
+ * If true then the PTE was dirty when unmapped. The entry must be
+ * flushed before IO is initiated or a stale TLB entry potentially
+ * allows an update without redirtying the page.
+ */
+ bool writable;
+};
+
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
void *stack;
@@ -1700,6 +1719,10 @@ struct task_struct {
unsigned long numa_pages_migrated;
#endif /* CONFIG_NUMA_BALANCING */
+#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+ struct tlbflush_unmap_batch tlb_ubc;
+#endif
+
struct rcu_head rcu;
/*
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index a19ddacdac30..f4265039a94c 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -78,7 +78,7 @@ static inline long prctl_set_seccomp(unsigned long arg2, char __user *arg3)
static inline int seccomp_mode(struct seccomp *s)
{
- return 0;
+ return SECCOMP_MODE_DISABLED;
}
#endif /* CONFIG_SECCOMP */
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 912a7c482649..dde00defbaa5 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -114,13 +114,22 @@ int seq_open(struct file *, const struct seq_operations *);
ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
loff_t seq_lseek(struct file *, loff_t, int);
int seq_release(struct inode *, struct file *);
-int seq_escape(struct seq_file *, const char *, const char *);
-int seq_putc(struct seq_file *m, char c);
-int seq_puts(struct seq_file *m, const char *s);
int seq_write(struct seq_file *seq, const void *data, size_t len);
-__printf(2, 3) int seq_printf(struct seq_file *, const char *, ...);
-__printf(2, 0) int seq_vprintf(struct seq_file *, const char *, va_list args);
+__printf(2, 0)
+void seq_vprintf(struct seq_file *m, const char *fmt, va_list args);
+__printf(2, 3)
+void seq_printf(struct seq_file *m, const char *fmt, ...);
+void seq_putc(struct seq_file *m, char c);
+void seq_puts(struct seq_file *m, const char *s);
+void seq_put_decimal_ull(struct seq_file *m, char delimiter,
+ unsigned long long num);
+void seq_put_decimal_ll(struct seq_file *m, char delimiter, long long num);
+void seq_escape(struct seq_file *m, const char *s, const char *esc);
+
+void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize, const void *buf, size_t len,
+ bool ascii);
int seq_path(struct seq_file *, const struct path *, const char *);
int seq_file_path(struct seq_file *, struct file *, const char *);
@@ -134,10 +143,6 @@ int single_release(struct inode *, struct file *);
void *__seq_open_private(struct file *, const struct seq_operations *, int);
int seq_open_private(struct file *, const struct seq_operations *, int);
int seq_release_private(struct inode *, struct file *);
-int seq_put_decimal_ull(struct seq_file *m, char delimiter,
- unsigned long long num);
-int seq_put_decimal_ll(struct seq_file *m, char delimiter,
- long long num);
static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
{
@@ -149,6 +154,41 @@ static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
#endif
}
+/**
+ * seq_show_options - display mount options with appropriate escapes.
+ * @m: the seq_file handle
+ * @name: the mount option name
+ * @value: the mount option name's value, can be NULL
+ */
+static inline void seq_show_option(struct seq_file *m, const char *name,
+ const char *value)
+{
+ seq_putc(m, ',');
+ seq_escape(m, name, ",= \t\n\\");
+ if (value) {
+ seq_putc(m, '=');
+ seq_escape(m, value, ", \t\n\\");
+ }
+}
+
+/**
+ * seq_show_option_n - display mount options with appropriate escapes
+ * where @value must be a specific length.
+ * @m: the seq_file handle
+ * @name: the mount option name
+ * @value: the mount option name's value, cannot be NULL
+ * @length: the length of @value to display
+ *
+ * This is a macro since this uses "length" to define the size of the
+ * stack buffer.
+ */
+#define seq_show_option_n(m, name, value, length) { \
+ char val_buf[length + 1]; \
+ strncpy(val_buf, value, length); \
+ val_buf[length] = '\0'; \
+ seq_show_option(m, name, val_buf); \
+}
+
#define SEQ_START_TOKEN ((void *)1)
/*
* Helpers for iteration over list_head-s in seq_files
diff --git a/include/linux/serio.h b/include/linux/serio.h
index 9f779c7a2da4..df4ab5de1586 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -18,6 +18,8 @@
#include <linux/mod_devicetable.h>
#include <uapi/linux/serio.h>
+extern struct bus_type serio_bus;
+
struct serio {
void *port_data;
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
index dd0ba502ccb3..d927647e6350 100644
--- a/include/linux/shdma-base.h
+++ b/include/linux/shdma-base.h
@@ -128,7 +128,10 @@ void shdma_cleanup(struct shdma_dev *sdev);
#if IS_ENABLED(CONFIG_SH_DMAE_BASE)
bool shdma_chan_filter(struct dma_chan *chan, void *arg);
#else
-#define shdma_chan_filter NULL
+static inline bool shdma_chan_filter(struct dma_chan *chan, void *arg)
+{
+ return false;
+}
#endif
#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 9b88536487e6..2738d355cdf9 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -37,6 +37,7 @@
#include <net/flow_dissector.h>
#include <linux/splice.h>
#include <linux/in6.h>
+#include <net/flow.h>
/* A. Checksumming of received packets by device.
*
@@ -173,17 +174,24 @@ struct nf_bridge_info {
BRNF_PROTO_8021Q,
BRNF_PROTO_PPPOE
} orig_proto:8;
- bool pkt_otherhost;
+ u8 pkt_otherhost:1;
+ u8 in_prerouting:1;
+ u8 bridged_dnat:1;
__u16 frag_max_size;
- unsigned int mask;
struct net_device *physindev;
union {
- struct net_device *physoutdev;
- char neigh_header[8];
- };
- union {
+ /* prerouting: detect dnat in orig/reply direction */
__be32 ipv4_daddr;
struct in6_addr ipv6_daddr;
+
+ /* after prerouting + nat detected: store original source
+ * mac since neigh resolution overwrites it, only used while
+ * skb is out in neigh layer.
+ */
+ char neigh_header[8];
+
+ /* always valid & non-NULL from FORWARD on, for physdev match */
+ struct net_device *physoutdev;
};
};
#endif
@@ -506,6 +514,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
* @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
* @napi_id: id of the NAPI struct this skb came from
* @secmark: security marking
+ * @offload_fwd_mark: fwding offload mark
* @mark: Generic packet mark
* @vlan_proto: vlan encapsulation protocol
* @vlan_tci: vlan tag control information
@@ -650,9 +659,15 @@ struct sk_buff {
unsigned int sender_cpu;
};
#endif
+ union {
#ifdef CONFIG_NETWORK_SECMARK
- __u32 secmark;
+ __u32 secmark;
+#endif
+#ifdef CONFIG_NET_SWITCHDEV
+ __u32 offload_fwd_mark;
#endif
+ };
+
union {
__u32 mark;
__u32 reserved_tailroom;
@@ -922,14 +937,90 @@ enum pkt_hash_types {
PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */
};
-static inline void
-skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
+static inline void skb_clear_hash(struct sk_buff *skb)
{
- skb->l4_hash = (type == PKT_HASH_TYPE_L4);
+ skb->hash = 0;
skb->sw_hash = 0;
+ skb->l4_hash = 0;
+}
+
+static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
+{
+ if (!skb->l4_hash)
+ skb_clear_hash(skb);
+}
+
+static inline void
+__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
+{
+ skb->l4_hash = is_l4;
+ skb->sw_hash = is_sw;
skb->hash = hash;
}
+static inline void
+skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
+{
+ /* Used by drivers to set hash from HW */
+ __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
+}
+
+static inline void
+__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
+{
+ __skb_set_hash(skb, hash, true, is_l4);
+}
+
+void __skb_get_hash(struct sk_buff *skb);
+u32 skb_get_poff(const struct sk_buff *skb);
+u32 __skb_get_poff(const struct sk_buff *skb, void *data,
+ const struct flow_keys *keys, int hlen);
+__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
+ void *data, int hlen_proto);
+
+static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
+ int thoff, u8 ip_proto)
+{
+ return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
+}
+
+void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
+ const struct flow_dissector_key *key,
+ unsigned int key_count);
+
+bool __skb_flow_dissect(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container,
+ void *data, __be16 proto, int nhoff, int hlen,
+ unsigned int flags);
+
+static inline bool skb_flow_dissect(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container, unsigned int flags)
+{
+ return __skb_flow_dissect(skb, flow_dissector, target_container,
+ NULL, 0, 0, 0, flags);
+}
+
+static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
+ struct flow_keys *flow,
+ unsigned int flags)
+{
+ memset(flow, 0, sizeof(*flow));
+ return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
+ NULL, 0, 0, 0, flags);
+}
+
+static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
+ void *data, __be16 proto,
+ int nhoff, int hlen,
+ unsigned int flags)
+{
+ memset(flow, 0, sizeof(*flow));
+ return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
+ data, proto, nhoff, hlen, flags);
+}
+
static inline __u32 skb_get_hash(struct sk_buff *skb)
{
if (!skb->l4_hash && !skb->sw_hash)
@@ -938,24 +1029,39 @@ static inline __u32 skb_get_hash(struct sk_buff *skb)
return skb->hash;
}
-__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
+__u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6);
-static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
+static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
{
+ if (!skb->l4_hash && !skb->sw_hash) {
+ struct flow_keys keys;
+ __u32 hash = __get_hash_from_flowi6(fl6, &keys);
+
+ __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
+ }
+
return skb->hash;
}
-static inline void skb_clear_hash(struct sk_buff *skb)
+__u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl);
+
+static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4)
{
- skb->hash = 0;
- skb->sw_hash = 0;
- skb->l4_hash = 0;
+ if (!skb->l4_hash && !skb->sw_hash) {
+ struct flow_keys keys;
+ __u32 hash = __get_hash_from_flowi4(fl4, &keys);
+
+ __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
+ }
+
+ return skb->hash;
}
-static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
+__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
+
+static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
{
- if (!skb->l4_hash)
- skb_clear_hash(skb);
+ return skb->hash;
}
static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
@@ -1943,7 +2049,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
if (skb_transport_header_was_set(skb))
return;
- else if (skb_flow_dissect_flow_keys(skb, &keys))
+ else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
skb_set_transport_header(skb, keys.control.thoff);
else
skb_set_transport_header(skb, offset_hint);
@@ -2667,12 +2773,6 @@ static inline void skb_frag_list_init(struct sk_buff *skb)
skb_shinfo(skb)->frag_list = NULL;
}
-static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
-{
- frag->next = skb_shinfo(skb)->frag_list;
- skb_shinfo(skb)->frag_list = frag;
-}
-
#define skb_walk_frags(skb, iter) \
for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
@@ -3464,5 +3564,6 @@ static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
skb_network_header(skb);
return hdr_len + skb_gso_transport_seglen(skb);
}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index a99f0e5243e1..7e37d448ed91 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -290,6 +290,16 @@ void *__kmalloc(size_t size, gfp_t flags);
void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
void kmem_cache_free(struct kmem_cache *, void *);
+/*
+ * Bulk allocation and freeing operations. These are accellerated in an
+ * allocator specific way to avoid taking locks repeatedly or building
+ * metadata structures unnecessarily.
+ *
+ * Note that interrupts must be enabled when calling these functions.
+ */
+void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
+bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
+
#ifdef CONFIG_NUMA
void *__kmalloc_node(size_t size, gfp_t flags, int node);
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index da3c593f9845..e6109a6cd8f6 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -48,7 +48,16 @@ struct smp_hotplug_thread {
const char *thread_comm;
};
-int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread);
+int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_thread,
+ const struct cpumask *cpumask);
+
+static inline int
+smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
+{
+ return smpboot_register_percpu_thread_cpumask(plug_thread,
+ cpu_possible_mask);
+}
+
void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
const struct cpumask *);
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index c735f5c91eea..eead8ab93c0a 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -119,30 +119,8 @@ struct plat_stmmacenet_data {
int rx_fifo_size;
void (*fix_mac_speed)(void *priv, unsigned int speed);
void (*bus_setup)(void __iomem *ioaddr);
- void *(*setup)(struct platform_device *pdev);
- void (*free)(struct platform_device *pdev, void *priv);
int (*init)(struct platform_device *pdev, void *priv);
void (*exit)(struct platform_device *pdev, void *priv);
- void *custom_cfg;
- void *custom_data;
void *bsp_priv;
};
-
-/* of_data for SoC glue layer device tree bindings */
-
-struct stmmac_of_data {
- int has_gmac;
- int enh_desc;
- int tx_coe;
- int rx_coe;
- int bugged_jumbo;
- int pmt;
- int riwt_off;
- void (*fix_mac_speed)(void *priv, unsigned int speed);
- void (*bus_setup)(void __iomem *ioaddr);
- void *(*setup)(struct platform_device *pdev);
- void (*free)(struct platform_device *pdev, void *priv);
- int (*init)(struct platform_device *pdev, void *priv);
- void (*exit)(struct platform_device *pdev, void *priv);
-};
#endif
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index 71f711db4500..dabe643eb5fa 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -48,24 +48,24 @@ static inline int string_unescape_any_inplace(char *buf)
#define ESCAPE_HEX 0x20
int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
- unsigned int flags, const char *esc);
+ unsigned int flags, const char *only);
static inline int string_escape_mem_any_np(const char *src, size_t isz,
- char *dst, size_t osz, const char *esc)
+ char *dst, size_t osz, const char *only)
{
- return string_escape_mem(src, isz, dst, osz, ESCAPE_ANY_NP, esc);
+ return string_escape_mem(src, isz, dst, osz, ESCAPE_ANY_NP, only);
}
static inline int string_escape_str(const char *src, char *dst, size_t sz,
- unsigned int flags, const char *esc)
+ unsigned int flags, const char *only)
{
- return string_escape_mem(src, strlen(src), dst, sz, flags, esc);
+ return string_escape_mem(src, strlen(src), dst, sz, flags, only);
}
static inline int string_escape_str_any_np(const char *src, char *dst,
- size_t sz, const char *esc)
+ size_t sz, const char *only)
{
- return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, esc);
+ return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, only);
}
#endif
diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
index 07d8e53bedfc..5c9c6cd08d3b 100644
--- a/include/linux/sunrpc/addr.h
+++ b/include/linux/sunrpc/addr.h
@@ -46,8 +46,8 @@ static inline void rpc_set_port(struct sockaddr *sap,
#define IPV6_SCOPE_DELIMITER '%'
#define IPV6_SCOPE_ID_LEN sizeof("%nnnnnnnnnn")
-static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
- const struct sockaddr *sap2)
+static inline bool rpc_cmp_addr4(const struct sockaddr *sap1,
+ const struct sockaddr *sap2)
{
const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sap1;
const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sap2;
@@ -67,8 +67,8 @@ static inline bool __rpc_copy_addr4(struct sockaddr *dst,
}
#if IS_ENABLED(CONFIG_IPV6)
-static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
- const struct sockaddr *sap2)
+static inline bool rpc_cmp_addr6(const struct sockaddr *sap1,
+ const struct sockaddr *sap2)
{
const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1;
const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2;
@@ -93,7 +93,7 @@ static inline bool __rpc_copy_addr6(struct sockaddr *dst,
return true;
}
#else /* !(IS_ENABLED(CONFIG_IPV6) */
-static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
+static inline bool rpc_cmp_addr6(const struct sockaddr *sap1,
const struct sockaddr *sap2)
{
return false;
@@ -122,15 +122,28 @@ static inline bool rpc_cmp_addr(const struct sockaddr *sap1,
if (sap1->sa_family == sap2->sa_family) {
switch (sap1->sa_family) {
case AF_INET:
- return __rpc_cmp_addr4(sap1, sap2);
+ return rpc_cmp_addr4(sap1, sap2);
case AF_INET6:
- return __rpc_cmp_addr6(sap1, sap2);
+ return rpc_cmp_addr6(sap1, sap2);
}
}
return false;
}
/**
+ * rpc_cmp_addr_port - compare the address and port number of two sockaddrs.
+ * @sap1: first sockaddr
+ * @sap2: second sockaddr
+ */
+static inline bool rpc_cmp_addr_port(const struct sockaddr *sap1,
+ const struct sockaddr *sap2)
+{
+ if (!rpc_cmp_addr(sap1, sap2))
+ return false;
+ return rpc_get_port(sap1) == rpc_get_port(sap2);
+}
+
+/**
* rpc_copy_addr - copy the address portion of one sockaddr to another
* @dst: destination sockaddr
* @src: source sockaddr
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index a7cbb570cc5c..1ecf13e148b8 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -18,9 +18,13 @@
#include <linux/atomic.h>
#include <linux/rcupdate.h>
#include <linux/uidgid.h>
+#include <linux/utsname.h>
-/* size of the nodename buffer */
-#define UNX_MAXNODENAME 32
+/*
+ * Size of the nodename buffer. RFC1831 specifies a hard limit of 255 bytes,
+ * but Linux hostnames are actually limited to __NEW_UTS_LEN bytes.
+ */
+#define UNX_MAXNODENAME __NEW_UTS_LEN
struct rpcsec_gss_info;
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 437ddb6c4aef..03d3b4c92d9f 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -46,7 +46,7 @@
*
*/
struct cache_head {
- struct cache_head * next;
+ struct hlist_node cache_list;
time_t expiry_time; /* After time time, don't use the data */
time_t last_refresh; /* If CACHE_PENDING, this is when upcall
* was sent, else this is when update was received
@@ -73,7 +73,7 @@ struct cache_detail_pipefs {
struct cache_detail {
struct module * owner;
int hash_size;
- struct cache_head ** hash_table;
+ struct hlist_head * hash_table;
rwlock_t hash_lock;
atomic_t inuse; /* active user-space update or lookup */
@@ -224,6 +224,11 @@ extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *,
umode_t, struct cache_detail *);
extern void sunrpc_cache_unregister_pipefs(struct cache_detail *);
+/* Must store cache_detail in seq_file->private if using next three functions */
+extern void *cache_seq_start(struct seq_file *file, loff_t *pos);
+extern void *cache_seq_next(struct seq_file *file, void *p, loff_t *pos);
+extern void cache_seq_stop(struct seq_file *file, void *p);
+
extern void qword_add(char **bpp, int *lp, char *str);
extern void qword_addhex(char **bpp, int *lp, char *buf, int blen);
extern int qword_get(char **bpp, char *dest, int bufsize);
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index fae6fb947fc8..cc0fc712bb82 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -19,11 +19,6 @@
#include <linux/wait.h>
#include <linux/mm.h>
-/*
- * This is the RPC server thread function prototype
- */
-typedef int (*svc_thread_fn)(void *);
-
/* statistics for svc_pool structures */
struct svc_pool_stats {
atomic_long_t packets;
@@ -54,6 +49,25 @@ struct svc_pool {
unsigned long sp_flags;
} ____cacheline_aligned_in_smp;
+struct svc_serv;
+
+struct svc_serv_ops {
+ /* Callback to use when last thread exits. */
+ void (*svo_shutdown)(struct svc_serv *, struct net *);
+
+ /* function for service threads to run */
+ int (*svo_function)(void *);
+
+ /* queue up a transport for servicing */
+ void (*svo_enqueue_xprt)(struct svc_xprt *);
+
+ /* set up thread (or whatever) execution context */
+ int (*svo_setup)(struct svc_serv *, struct svc_pool *, int);
+
+ /* optional module to count when adding threads (pooled svcs only) */
+ struct module *svo_module;
+};
+
/*
* RPC service.
*
@@ -85,16 +99,7 @@ struct svc_serv {
unsigned int sv_nrpools; /* number of thread pools */
struct svc_pool * sv_pools; /* array of thread pools */
-
- void (*sv_shutdown)(struct svc_serv *serv,
- struct net *net);
- /* Callback to use when last thread
- * exits.
- */
-
- struct module * sv_module; /* optional module to count when
- * adding threads */
- svc_thread_fn sv_function; /* main function for threads */
+ struct svc_serv_ops *sv_ops; /* server operations */
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
struct list_head sv_cb_list; /* queue for callback requests
* that arrive over the same
@@ -423,19 +428,46 @@ struct svc_procedure {
};
/*
+ * Mode for mapping cpus to pools.
+ */
+enum {
+ SVC_POOL_AUTO = -1, /* choose one of the others */
+ SVC_POOL_GLOBAL, /* no mapping, just a single global pool
+ * (legacy & UP mode) */
+ SVC_POOL_PERCPU, /* one pool per cpu */
+ SVC_POOL_PERNODE /* one pool per numa node */
+};
+
+struct svc_pool_map {
+ int count; /* How many svc_servs use us */
+ int mode; /* Note: int not enum to avoid
+ * warnings about "enumeration value
+ * not handled in switch" */
+ unsigned int npools;
+ unsigned int *pool_to; /* maps pool id to cpu or node */
+ unsigned int *to_pool; /* maps cpu or node to pool id */
+};
+
+extern struct svc_pool_map svc_pool_map;
+
+/*
* Function prototypes.
*/
int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
int svc_bind(struct svc_serv *serv, struct net *net);
struct svc_serv *svc_create(struct svc_program *, unsigned int,
- void (*shutdown)(struct svc_serv *, struct net *net));
+ struct svc_serv_ops *);
+struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
+ struct svc_pool *pool, int node);
struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
struct svc_pool *pool, int node);
+void svc_rqst_free(struct svc_rqst *);
void svc_exit_thread(struct svc_rqst *);
+unsigned int svc_pool_map_get(void);
+void svc_pool_map_put(void);
struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
- void (*shutdown)(struct svc_serv *, struct net *net),
- svc_thread_fn, struct module *);
+ struct svc_serv_ops *);
int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
void svc_destroy(struct svc_serv *);
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index cb94ee4181d4..7ccc961f33e9 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -132,6 +132,7 @@ struct svcxprt_rdma {
struct list_head sc_accept_q; /* Conn. waiting accept */
int sc_ord; /* RDMA read limit */
int sc_max_sge;
+ int sc_max_sge_rd; /* max sge for read target */
int sc_sq_depth; /* Depth of SQ */
atomic_t sc_sq_count; /* Number of SQ WR on queue */
@@ -172,13 +173,6 @@ struct svcxprt_rdma {
#define RDMAXPRT_SQ_PENDING 2
#define RDMAXPRT_CONN_PENDING 3
-#define RPCRDMA_MAX_SVC_SEGS (64) /* server max scatter/gather */
-#if RPCSVC_MAXPAYLOAD < (RPCRDMA_MAX_SVC_SEGS << PAGE_SHIFT)
-#define RPCRDMA_MAXPAYLOAD RPCSVC_MAXPAYLOAD
-#else
-#define RPCRDMA_MAXPAYLOAD (RPCRDMA_MAX_SVC_SEGS << PAGE_SHIFT)
-#endif
-
#define RPCRDMA_LISTEN_BACKLOG 10
/* The default ORD value is based on two outstanding full-size writes with a
* page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ. */
@@ -187,6 +181,8 @@ struct svcxprt_rdma {
#define RPCRDMA_MAX_REQUESTS 32
#define RPCRDMA_MAX_REQ_SIZE 4096
+#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
+
/* svc_rdma_marshal.c */
extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *);
extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
@@ -213,6 +209,8 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
/* svc_rdma_sendto.c */
extern int svc_rdma_sendto(struct svc_rqst *);
+extern struct rpcrdma_read_chunk *
+ svc_rdma_get_read_chunk(struct rpcrdma_msg *);
/* svc_rdma_transport.c */
extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
@@ -225,7 +223,6 @@ extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt);
extern struct svc_rdma_req_map *svc_rdma_get_req_map(void);
extern void svc_rdma_put_req_map(struct svc_rdma_req_map *);
-extern int svc_rdma_fastreg(struct svcxprt_rdma *, struct svc_rdma_fastreg_mr *);
extern struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *);
extern void svc_rdma_put_frmr(struct svcxprt_rdma *,
struct svc_rdma_fastreg_mr *);
@@ -238,83 +235,4 @@ extern void svc_rdma_prep_reply_hdr(struct svc_rqst *);
extern int svc_rdma_init(void);
extern void svc_rdma_cleanup(void);
-/*
- * Returns the address of the first read chunk or <nul> if no read chunk is
- * present
- */
-static inline struct rpcrdma_read_chunk *
-svc_rdma_get_read_chunk(struct rpcrdma_msg *rmsgp)
-{
- struct rpcrdma_read_chunk *ch =
- (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
-
- if (ch->rc_discrim == 0)
- return NULL;
-
- return ch;
-}
-
-/*
- * Returns the address of the first read write array element or <nul> if no
- * write array list is present
- */
-static inline struct rpcrdma_write_array *
-svc_rdma_get_write_array(struct rpcrdma_msg *rmsgp)
-{
- if (rmsgp->rm_body.rm_chunks[0] != 0
- || rmsgp->rm_body.rm_chunks[1] == 0)
- return NULL;
-
- return (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[1];
-}
-
-/*
- * Returns the address of the first reply array element or <nul> if no
- * reply array is present
- */
-static inline struct rpcrdma_write_array *
-svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp)
-{
- struct rpcrdma_read_chunk *rch;
- struct rpcrdma_write_array *wr_ary;
- struct rpcrdma_write_array *rp_ary;
-
- /* XXX: Need to fix when reply list may occur with read-list and/or
- * write list */
- if (rmsgp->rm_body.rm_chunks[0] != 0 ||
- rmsgp->rm_body.rm_chunks[1] != 0)
- return NULL;
-
- rch = svc_rdma_get_read_chunk(rmsgp);
- if (rch) {
- while (rch->rc_discrim)
- rch++;
-
- /* The reply list follows an empty write array located
- * at 'rc_position' here. The reply array is at rc_target.
- */
- rp_ary = (struct rpcrdma_write_array *)&rch->rc_target;
-
- goto found_it;
- }
-
- wr_ary = svc_rdma_get_write_array(rmsgp);
- if (wr_ary) {
- rp_ary = (struct rpcrdma_write_array *)
- &wr_ary->
- wc_array[ntohl(wr_ary->wc_nchunks)].wc_target.rs_length;
-
- goto found_it;
- }
-
- /* No read list, no write list */
- rp_ary = (struct rpcrdma_write_array *)
- &rmsgp->rm_body.rm_chunks[2];
-
- found_it:
- if (rp_ary->wc_discrim == 0)
- return NULL;
-
- return rp_ary;
-}
#endif
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 79f6f8f3dc0a..78512cfe1fe6 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -116,6 +116,7 @@ void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *,
struct svc_serv *);
int svc_create_xprt(struct svc_serv *, const char *, struct net *,
const int, const unsigned short, int);
+void svc_xprt_do_enqueue(struct svc_xprt *xprt);
void svc_xprt_enqueue(struct svc_xprt *xprt);
void svc_xprt_put(struct svc_xprt *xprt);
void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt);
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index b17613052cc3..b7b279b54504 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -49,7 +49,7 @@
* a single chunk type per message is supported currently.
*/
#define RPCRDMA_MIN_SLOT_TABLE (2U)
-#define RPCRDMA_DEF_SLOT_TABLE (32U)
+#define RPCRDMA_DEF_SLOT_TABLE (128U)
#define RPCRDMA_MAX_SLOT_TABLE (256U)
#define RPCRDMA_DEF_INLINE (1024) /* default inline max */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 31496d201fdc..7ba7dccaf0e7 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -351,7 +351,15 @@ extern void check_move_unevictable_pages(struct page **, int nr_pages);
extern int kswapd_run(int nid);
extern void kswapd_stop(int nid);
#ifdef CONFIG_MEMCG
-extern int mem_cgroup_swappiness(struct mem_cgroup *mem);
+static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
+{
+ /* root ? */
+ if (mem_cgroup_disabled() || !memcg->css.parent)
+ return vm_swappiness;
+
+ return memcg->swappiness;
+}
+
#else
static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
{
@@ -398,6 +406,9 @@ extern void free_pages_and_swap_cache(struct page **, int);
extern struct page *lookup_swap_cache(swp_entry_t);
extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
struct vm_area_struct *vma, unsigned long addr);
+extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
+ struct vm_area_struct *vma, unsigned long addr,
+ bool *new_page_allocated);
extern struct page *swapin_readahead(swp_entry_t, gfp_t,
struct vm_area_struct *vma, unsigned long addr);
@@ -431,6 +442,7 @@ extern unsigned int count_swap_pages(int, int);
extern sector_t map_swap_page(struct page *, struct block_device **);
extern sector_t swapdev_block(int, pgoff_t);
extern int page_swapcount(struct page *);
+extern int swp_swapcount(swp_entry_t entry);
extern struct swap_info_struct *page_swap_info(struct page *);
extern int reuse_swap_page(struct page *);
extern int try_to_free_swap(struct page *);
@@ -522,6 +534,11 @@ static inline int page_swapcount(struct page *page)
return 0;
}
+static inline int swp_swapcount(swp_entry_t entry)
+{
+ return 0;
+}
+
#define reuse_swap_page(page) (page_mapcount(page) == 1)
static inline int try_to_free_swap(struct page *page)
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index cedf3d3c373f..5c3a5f3e7eec 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -164,6 +164,9 @@ static inline int is_write_migration_entry(swp_entry_t entry)
#endif
#ifdef CONFIG_MEMORY_FAILURE
+
+extern atomic_long_t num_poisoned_pages __read_mostly;
+
/*
* Support for hardware poisoned pages
*/
@@ -177,6 +180,31 @@ static inline int is_hwpoison_entry(swp_entry_t entry)
{
return swp_type(entry) == SWP_HWPOISON;
}
+
+static inline bool test_set_page_hwpoison(struct page *page)
+{
+ return TestSetPageHWPoison(page);
+}
+
+static inline void num_poisoned_pages_inc(void)
+{
+ atomic_long_inc(&num_poisoned_pages);
+}
+
+static inline void num_poisoned_pages_dec(void)
+{
+ atomic_long_dec(&num_poisoned_pages);
+}
+
+static inline void num_poisoned_pages_add(long num)
+{
+ atomic_long_add(num, &num_poisoned_pages);
+}
+
+static inline void num_poisoned_pages_sub(long num)
+{
+ atomic_long_sub(num, &num_poisoned_pages);
+}
#else
static inline swp_entry_t make_hwpoison_entry(struct page *page)
@@ -188,6 +216,15 @@ static inline int is_hwpoison_entry(swp_entry_t swp)
{
return 0;
}
+
+static inline bool test_set_page_hwpoison(struct page *page)
+{
+ return false;
+}
+
+static inline void num_poisoned_pages_inc(void)
+{
+}
#endif
#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index b45c45b8c829..a460e2ef2843 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -810,6 +810,7 @@ asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr);
asmlinkage long sys_eventfd(unsigned int count);
asmlinkage long sys_eventfd2(unsigned int count, int flags);
asmlinkage long sys_memfd_create(const char __user *uname_ptr, unsigned int flags);
+asmlinkage long sys_userfaultfd(int flags);
asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len);
asmlinkage long sys_old_readdir(unsigned int, struct old_linux_dirent __user *, unsigned int);
asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *,
@@ -884,4 +885,6 @@ asmlinkage long sys_execveat(int dfd, const char __user *filename,
const char __user *const __user *argv,
const char __user *const __user *envp, int flags);
+asmlinkage long sys_membarrier(int cmd, int flags);
+
#endif
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 037e9df2f610..17292fee8686 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -92,23 +92,19 @@ struct thermal_zone_device_ops {
struct thermal_cooling_device *);
int (*unbind) (struct thermal_zone_device *,
struct thermal_cooling_device *);
- int (*get_temp) (struct thermal_zone_device *, unsigned long *);
+ int (*get_temp) (struct thermal_zone_device *, int *);
int (*get_mode) (struct thermal_zone_device *,
enum thermal_device_mode *);
int (*set_mode) (struct thermal_zone_device *,
enum thermal_device_mode);
int (*get_trip_type) (struct thermal_zone_device *, int,
enum thermal_trip_type *);
- int (*get_trip_temp) (struct thermal_zone_device *, int,
- unsigned long *);
- int (*set_trip_temp) (struct thermal_zone_device *, int,
- unsigned long);
- int (*get_trip_hyst) (struct thermal_zone_device *, int,
- unsigned long *);
- int (*set_trip_hyst) (struct thermal_zone_device *, int,
- unsigned long);
- int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *);
- int (*set_emul_temp) (struct thermal_zone_device *, unsigned long);
+ int (*get_trip_temp) (struct thermal_zone_device *, int, int *);
+ int (*set_trip_temp) (struct thermal_zone_device *, int, int);
+ int (*get_trip_hyst) (struct thermal_zone_device *, int, int *);
+ int (*set_trip_hyst) (struct thermal_zone_device *, int, int);
+ int (*get_crit_temp) (struct thermal_zone_device *, int *);
+ int (*set_emul_temp) (struct thermal_zone_device *, int);
int (*get_trend) (struct thermal_zone_device *, int,
enum thermal_trend *);
int (*notify) (struct thermal_zone_device *, int,
@@ -332,9 +328,9 @@ struct thermal_genl_event {
* temperature.
*/
struct thermal_zone_of_device_ops {
- int (*get_temp)(void *, long *);
+ int (*get_temp)(void *, int *);
int (*get_trend)(void *, long *);
- int (*set_emul_temp)(void *, unsigned long);
+ int (*set_emul_temp)(void *, int);
};
/**
@@ -406,7 +402,7 @@ thermal_of_cooling_device_register(struct device_node *np, char *, void *,
const struct thermal_cooling_device_ops *);
void thermal_cooling_device_unregister(struct thermal_cooling_device *);
struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name);
-int thermal_zone_get_temp(struct thermal_zone_device *tz, unsigned long *temp);
+int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
int get_tz_trend(struct thermal_zone_device *, int);
struct thermal_instance *get_thermal_instance(struct thermal_zone_device *,
@@ -457,7 +453,7 @@ static inline struct thermal_zone_device *thermal_zone_get_zone_by_name(
const char *name)
{ return ERR_PTR(-ENODEV); }
static inline int thermal_zone_get_temp(
- struct thermal_zone_device *tz, unsigned long *temp)
+ struct thermal_zone_device *tz, int *temp)
{ return -ENODEV; }
static inline int get_tz_trend(struct thermal_zone_device *tz, int trip)
{ return -ENODEV; }
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 48d901f83f92..e312219ff823 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -147,11 +147,20 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
cpumask_or(mask, mask, tick_nohz_full_mask);
}
+static inline int housekeeping_any_cpu(void)
+{
+ return cpumask_any_and(housekeeping_mask, cpu_online_mask);
+}
+
extern void tick_nohz_full_kick(void);
extern void tick_nohz_full_kick_cpu(int cpu);
extern void tick_nohz_full_kick_all(void);
extern void __tick_nohz_task_switch(void);
#else
+static inline int housekeeping_any_cpu(void)
+{
+ return smp_processor_id();
+}
static inline bool tick_nohz_full_enabled(void) { return false; }
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index ae572c138607..d6f2c2c5b043 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -129,4 +129,6 @@ extern long __probe_kernel_read(void *dst, const void *src, size_t size);
extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
+extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
+
#endif /* __LINUX_UACCESS_H__ */
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
new file mode 100644
index 000000000000..587480ad41b7
--- /dev/null
+++ b/include/linux/userfaultfd_k.h
@@ -0,0 +1,85 @@
+/*
+ * include/linux/userfaultfd_k.h
+ *
+ * Copyright (C) 2015 Red Hat, Inc.
+ *
+ */
+
+#ifndef _LINUX_USERFAULTFD_K_H
+#define _LINUX_USERFAULTFD_K_H
+
+#ifdef CONFIG_USERFAULTFD
+
+#include <linux/userfaultfd.h> /* linux/include/uapi/linux/userfaultfd.h */
+
+#include <linux/fcntl.h>
+
+/*
+ * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
+ * new flags, since they might collide with O_* ones. We want
+ * to re-use O_* flags that couldn't possibly have a meaning
+ * from userfaultfd, in order to leave a free define-space for
+ * shared O_* flags.
+ */
+#define UFFD_CLOEXEC O_CLOEXEC
+#define UFFD_NONBLOCK O_NONBLOCK
+
+#define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
+#define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
+
+extern int handle_userfault(struct vm_area_struct *vma, unsigned long address,
+ unsigned int flags, unsigned long reason);
+
+extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
+ unsigned long src_start, unsigned long len);
+extern ssize_t mfill_zeropage(struct mm_struct *dst_mm,
+ unsigned long dst_start,
+ unsigned long len);
+
+/* mm helpers */
+static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
+ struct vm_userfaultfd_ctx vm_ctx)
+{
+ return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx;
+}
+
+static inline bool userfaultfd_missing(struct vm_area_struct *vma)
+{
+ return vma->vm_flags & VM_UFFD_MISSING;
+}
+
+static inline bool userfaultfd_armed(struct vm_area_struct *vma)
+{
+ return vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP);
+}
+
+#else /* CONFIG_USERFAULTFD */
+
+/* mm helpers */
+static inline int handle_userfault(struct vm_area_struct *vma,
+ unsigned long address,
+ unsigned int flags,
+ unsigned long reason)
+{
+ return VM_FAULT_SIGBUS;
+}
+
+static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
+ struct vm_userfaultfd_ctx vm_ctx)
+{
+ return true;
+}
+
+static inline bool userfaultfd_missing(struct vm_area_struct *vma)
+{
+ return false;
+}
+
+static inline bool userfaultfd_armed(struct vm_area_struct *vma)
+{
+ return false;
+}
+
+#endif /* CONFIG_USERFAULTFD */
+
+#endif /* _LINUX_USERFAULTFD_K_H */
diff --git a/include/linux/verify_pefile.h b/include/linux/verify_pefile.h
index ac34819214f9..da2049b5161c 100644
--- a/include/linux/verify_pefile.h
+++ b/include/linux/verify_pefile.h
@@ -12,7 +12,11 @@
#ifndef _LINUX_VERIFY_PEFILE_H
#define _LINUX_VERIFY_PEFILE_H
+#include <crypto/public_key.h>
+
extern int verify_pefile_signature(const void *pebuf, unsigned pelen,
- struct key *trusted_keyring, bool *_trusted);
+ struct key *trusted_keyring,
+ enum key_being_used_for usage,
+ bool *_trusted);
#endif /* _LINUX_VERIFY_PEFILE_H */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 1e1bf9f963a9..d3d077228d4c 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -147,7 +147,8 @@ __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
typedef int wait_bit_action_f(struct wait_bit_key *);
void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
-void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
+void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, int nr,
+ void *key);
void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
@@ -179,7 +180,7 @@ wait_queue_head_t *bit_waitqueue(void *, int);
#define wake_up_poll(x, m) \
__wake_up(x, TASK_NORMAL, 1, (void *) (m))
#define wake_up_locked_poll(x, m) \
- __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
+ __wake_up_locked_key((x), TASK_NORMAL, 1, (void *) (m))
#define wake_up_interruptible_poll(x, m) \
__wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
#define wake_up_interruptible_sync_poll(x, m) \
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index f47feada5b42..d74a0e907b9e 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -140,12 +140,4 @@ extern int watchdog_init_timeout(struct watchdog_device *wdd,
extern int watchdog_register_device(struct watchdog_device *);
extern void watchdog_unregister_device(struct watchdog_device *);
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-void watchdog_nmi_disable_all(void);
-void watchdog_nmi_enable_all(void);
-#else
-static inline void watchdog_nmi_disable_all(void) {}
-static inline void watchdog_nmi_enable_all(void) {}
-#endif
-
#endif /* ifndef _LINUX_WATCHDOG_H */
diff --git a/include/linux/zbud.h b/include/linux/zbud.h
index f9d41a6e361f..e183a0a65ac1 100644
--- a/include/linux/zbud.h
+++ b/include/linux/zbud.h
@@ -9,7 +9,7 @@ struct zbud_ops {
int (*evict)(struct zbud_pool *pool, unsigned long handle);
};
-struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops);
+struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops);
void zbud_destroy_pool(struct zbud_pool *pool);
int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
unsigned long *handle);
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
index d30eff3d84d5..42f8ec992452 100644
--- a/include/linux/zpool.h
+++ b/include/linux/zpool.h
@@ -36,8 +36,10 @@ enum zpool_mapmode {
ZPOOL_MM_DEFAULT = ZPOOL_MM_RW
};
+bool zpool_has_pool(char *type);
+
struct zpool *zpool_create_pool(char *type, char *name,
- gfp_t gfp, struct zpool_ops *ops);
+ gfp_t gfp, const struct zpool_ops *ops);
char *zpool_get_type(struct zpool *pool);
@@ -81,7 +83,7 @@ struct zpool_driver {
atomic_t refcount;
struct list_head list;
- void *(*create)(char *name, gfp_t gfp, struct zpool_ops *ops,
+ void *(*create)(char *name, gfp_t gfp, const struct zpool_ops *ops,
struct zpool *zpool);
void (*destroy)(void *pool);
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 1338190b5478..6398dfae53f1 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -34,6 +34,11 @@ enum zs_mapmode {
*/
};
+struct zs_pool_stats {
+ /* How many pages were migrated (freed) */
+ unsigned long pages_compacted;
+};
+
struct zs_pool;
struct zs_pool *zs_create_pool(char *name, gfp_t flags);
@@ -49,4 +54,5 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
unsigned long zs_get_total_pages(struct zs_pool *pool);
unsigned long zs_compact(struct zs_pool *pool);
+void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats);
#endif
diff --git a/include/media/media-devnode.h b/include/media/media-devnode.h
index 0dc7060f9625..17ddae32060d 100644
--- a/include/media/media-devnode.h
+++ b/include/media/media-devnode.h
@@ -53,9 +53,13 @@ struct media_file_operations {
/**
* struct media_devnode - Media device node
+ * @fops: pointer to struct media_file_operations with media device ops
+ * @dev: struct device pointer for the media controller device
+ * @cdev: struct cdev pointer character device
* @parent: parent device
* @minor: device node minor number
* @flags: flags, combination of the MEDIA_FLAG_* constants
+ * @release: release callback called at the end of media_devnode_release()
*
* This structure represents a media-related device node.
*
diff --git a/include/media/omap3isp.h b/include/media/omap3isp.h
deleted file mode 100644
index 048f8f9117ef..000000000000
--- a/include/media/omap3isp.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * omap3isp.h
- *
- * TI OMAP3 ISP - Platform data
- *
- * Copyright (C) 2011 Nokia Corporation
- *
- * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- * Sakari Ailus <sakari.ailus@iki.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-
-#ifndef __MEDIA_OMAP3ISP_H__
-#define __MEDIA_OMAP3ISP_H__
-
-struct i2c_board_info;
-struct isp_device;
-
-enum isp_interface_type {
- ISP_INTERFACE_PARALLEL,
- ISP_INTERFACE_CSI2A_PHY2,
- ISP_INTERFACE_CCP2B_PHY1,
- ISP_INTERFACE_CCP2B_PHY2,
- ISP_INTERFACE_CSI2C_PHY1,
-};
-
-enum {
- ISP_LANE_SHIFT_0 = 0,
- ISP_LANE_SHIFT_2 = 1,
- ISP_LANE_SHIFT_4 = 2,
- ISP_LANE_SHIFT_6 = 3,
-};
-
-/**
- * struct isp_parallel_cfg - Parallel interface configuration
- * @data_lane_shift: Data lane shifter
- * ISP_LANE_SHIFT_0 - CAMEXT[13:0] -> CAM[13:0]
- * ISP_LANE_SHIFT_2 - CAMEXT[13:2] -> CAM[11:0]
- * ISP_LANE_SHIFT_4 - CAMEXT[13:4] -> CAM[9:0]
- * ISP_LANE_SHIFT_6 - CAMEXT[13:6] -> CAM[7:0]
- * @clk_pol: Pixel clock polarity
- * 0 - Sample on rising edge, 1 - Sample on falling edge
- * @hs_pol: Horizontal synchronization polarity
- * 0 - Active high, 1 - Active low
- * @vs_pol: Vertical synchronization polarity
- * 0 - Active high, 1 - Active low
- * @fld_pol: Field signal polarity
- * 0 - Positive, 1 - Negative
- * @data_pol: Data polarity
- * 0 - Normal, 1 - One's complement
- */
-struct isp_parallel_cfg {
- unsigned int data_lane_shift:2;
- unsigned int clk_pol:1;
- unsigned int hs_pol:1;
- unsigned int vs_pol:1;
- unsigned int fld_pol:1;
- unsigned int data_pol:1;
-};
-
-enum {
- ISP_CCP2_PHY_DATA_CLOCK = 0,
- ISP_CCP2_PHY_DATA_STROBE = 1,
-};
-
-enum {
- ISP_CCP2_MODE_MIPI = 0,
- ISP_CCP2_MODE_CCP2 = 1,
-};
-
-/**
- * struct isp_csiphy_lane: CCP2/CSI2 lane position and polarity
- * @pos: position of the lane
- * @pol: polarity of the lane
- */
-struct isp_csiphy_lane {
- u8 pos;
- u8 pol;
-};
-
-#define ISP_CSIPHY1_NUM_DATA_LANES 1
-#define ISP_CSIPHY2_NUM_DATA_LANES 2
-
-/**
- * struct isp_csiphy_lanes_cfg - CCP2/CSI2 lane configuration
- * @data: Configuration of one or two data lanes
- * @clk: Clock lane configuration
- */
-struct isp_csiphy_lanes_cfg {
- struct isp_csiphy_lane data[ISP_CSIPHY2_NUM_DATA_LANES];
- struct isp_csiphy_lane clk;
-};
-
-/**
- * struct isp_ccp2_cfg - CCP2 interface configuration
- * @strobe_clk_pol: Strobe/clock polarity
- * 0 - Non Inverted, 1 - Inverted
- * @crc: Enable the cyclic redundancy check
- * @ccp2_mode: Enable CCP2 compatibility mode
- * ISP_CCP2_MODE_MIPI - MIPI-CSI1 mode
- * ISP_CCP2_MODE_CCP2 - CCP2 mode
- * @phy_layer: Physical layer selection
- * ISP_CCP2_PHY_DATA_CLOCK - Data/clock physical layer
- * ISP_CCP2_PHY_DATA_STROBE - Data/strobe physical layer
- * @vpclk_div: Video port output clock control
- */
-struct isp_ccp2_cfg {
- unsigned int strobe_clk_pol:1;
- unsigned int crc:1;
- unsigned int ccp2_mode:1;
- unsigned int phy_layer:1;
- unsigned int vpclk_div:2;
- struct isp_csiphy_lanes_cfg lanecfg;
-};
-
-/**
- * struct isp_csi2_cfg - CSI2 interface configuration
- * @crc: Enable the cyclic redundancy check
- */
-struct isp_csi2_cfg {
- unsigned crc:1;
- struct isp_csiphy_lanes_cfg lanecfg;
-};
-
-struct isp_bus_cfg {
- enum isp_interface_type interface;
- union {
- struct isp_parallel_cfg parallel;
- struct isp_ccp2_cfg ccp2;
- struct isp_csi2_cfg csi2;
- } bus; /* gcc < 4.6.0 chokes on anonymous union initializers */
-};
-
-struct isp_platform_subdev {
- struct i2c_board_info *board_info;
- int i2c_adapter_id;
- struct isp_bus_cfg *bus;
-};
-
-struct isp_platform_data {
- struct isp_platform_subdev *subdevs;
- void (*set_constraints)(struct isp_device *isp, bool enable);
-};
-
-#endif /* __MEDIA_OMAP3ISP_H__ */
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 644bdc61c387..ec921f6538c7 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -69,7 +69,7 @@ enum rc_filter_type {
* @rc_map: current scan/key table
* @lock: used to ensure we've filled in all protocol details before
* anyone can call show_protocols or store_protocols
- * @devno: unique remote control device number
+ * @minor: unique minor remote control device number
* @raw: additional data for raw pulse/space devices
* @input_dev: the input child device used to communicate events to userspace
* @driver_type: specifies if protocol decoding is done in hardware or software
@@ -110,7 +110,7 @@ enum rc_filter_type {
* @s_tx_mask: set transmitter mask (for devices with multiple tx outputs)
* @s_tx_carrier: set transmit carrier frequency
* @s_tx_duty_cycle: set transmit duty cycle (0% - 100%)
- * @s_rx_carrier: inform driver about carrier it is expected to handle
+ * @s_rx_carrier_range: inform driver about carrier it is expected to handle
* @tx_ir: transmit IR
* @s_idle: enable/disable hardware idle mode, upon which,
* device doesn't interrupt host until it sees IR pulses
@@ -129,7 +129,7 @@ struct rc_dev {
const char *map_name;
struct rc_map rc_map;
struct mutex lock;
- unsigned long devno;
+ unsigned int minor;
struct ir_raw_event_ctrl *raw;
struct input_dev *input_dev;
enum rc_driver_type driver_type;
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index 27763d5bd261..7c4bbc4dfab4 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -14,30 +14,28 @@
enum rc_type {
RC_TYPE_UNKNOWN = 0, /* Protocol not known */
RC_TYPE_OTHER = 1, /* Protocol known but proprietary */
- RC_TYPE_LIRC = 2, /* Pass raw IR to lirc userspace */
- RC_TYPE_RC5 = 3, /* Philips RC5 protocol */
- RC_TYPE_RC5X = 4, /* Philips RC5x protocol */
- RC_TYPE_RC5_SZ = 5, /* StreamZap variant of RC5 */
- RC_TYPE_JVC = 6, /* JVC protocol */
- RC_TYPE_SONY12 = 7, /* Sony 12 bit protocol */
- RC_TYPE_SONY15 = 8, /* Sony 15 bit protocol */
- RC_TYPE_SONY20 = 9, /* Sony 20 bit protocol */
- RC_TYPE_NEC = 10, /* NEC protocol */
- RC_TYPE_SANYO = 11, /* Sanyo protocol */
- RC_TYPE_MCE_KBD = 12, /* RC6-ish MCE keyboard/mouse */
- RC_TYPE_RC6_0 = 13, /* Philips RC6-0-16 protocol */
- RC_TYPE_RC6_6A_20 = 14, /* Philips RC6-6A-20 protocol */
- RC_TYPE_RC6_6A_24 = 15, /* Philips RC6-6A-24 protocol */
- RC_TYPE_RC6_6A_32 = 16, /* Philips RC6-6A-32 protocol */
- RC_TYPE_RC6_MCE = 17, /* MCE (Philips RC6-6A-32 subtype) protocol */
- RC_TYPE_SHARP = 18, /* Sharp protocol */
- RC_TYPE_XMP = 19, /* XMP protocol */
+ RC_TYPE_RC5 = 2, /* Philips RC5 protocol */
+ RC_TYPE_RC5X = 3, /* Philips RC5x protocol */
+ RC_TYPE_RC5_SZ = 4, /* StreamZap variant of RC5 */
+ RC_TYPE_JVC = 5, /* JVC protocol */
+ RC_TYPE_SONY12 = 6, /* Sony 12 bit protocol */
+ RC_TYPE_SONY15 = 7, /* Sony 15 bit protocol */
+ RC_TYPE_SONY20 = 8, /* Sony 20 bit protocol */
+ RC_TYPE_NEC = 9, /* NEC protocol */
+ RC_TYPE_SANYO = 10, /* Sanyo protocol */
+ RC_TYPE_MCE_KBD = 11, /* RC6-ish MCE keyboard/mouse */
+ RC_TYPE_RC6_0 = 12, /* Philips RC6-0-16 protocol */
+ RC_TYPE_RC6_6A_20 = 13, /* Philips RC6-6A-20 protocol */
+ RC_TYPE_RC6_6A_24 = 14, /* Philips RC6-6A-24 protocol */
+ RC_TYPE_RC6_6A_32 = 15, /* Philips RC6-6A-32 protocol */
+ RC_TYPE_RC6_MCE = 16, /* MCE (Philips RC6-6A-32 subtype) protocol */
+ RC_TYPE_SHARP = 17, /* Sharp protocol */
+ RC_TYPE_XMP = 18, /* XMP protocol */
};
#define RC_BIT_NONE 0
#define RC_BIT_UNKNOWN (1 << RC_TYPE_UNKNOWN)
#define RC_BIT_OTHER (1 << RC_TYPE_OTHER)
-#define RC_BIT_LIRC (1 << RC_TYPE_LIRC)
#define RC_BIT_RC5 (1 << RC_TYPE_RC5)
#define RC_BIT_RC5X (1 << RC_TYPE_RC5X)
#define RC_BIT_RC5_SZ (1 << RC_TYPE_RC5_SZ)
@@ -56,7 +54,7 @@ enum rc_type {
#define RC_BIT_SHARP (1 << RC_TYPE_SHARP)
#define RC_BIT_XMP (1 << RC_TYPE_XMP)
-#define RC_BIT_ALL (RC_BIT_UNKNOWN | RC_BIT_OTHER | RC_BIT_LIRC | \
+#define RC_BIT_ALL (RC_BIT_UNKNOWN | RC_BIT_OTHER | \
RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ | \
RC_BIT_JVC | \
RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20 | \
diff --git a/include/media/tc358743.h b/include/media/tc358743.h
new file mode 100644
index 000000000000..4513f2f9cfbc
--- /dev/null
+++ b/include/media/tc358743.h
@@ -0,0 +1,131 @@
+/*
+ * tc358743 - Toshiba HDMI to CSI-2 bridge
+ *
+ * Copyright 2015 Cisco Systems, Inc. and/or its affiliates. All rights
+ * reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+/*
+ * References (c = chapter, p = page):
+ * REF_01 - Toshiba, TC358743XBG (H2C), Functional Specification, Rev 0.60
+ * REF_02 - Toshiba, TC358743XBG_HDMI-CSI_Tv11p_nm.xls
+ */
+
+#ifndef _TC358743_
+#define _TC358743_
+
+enum tc358743_ddc5v_delays {
+ DDC5V_DELAY_0_MS,
+ DDC5V_DELAY_50_MS,
+ DDC5V_DELAY_100_MS,
+ DDC5V_DELAY_200_MS,
+};
+
+enum tc358743_hdmi_detection_delay {
+ HDMI_MODE_DELAY_0_MS,
+ HDMI_MODE_DELAY_25_MS,
+ HDMI_MODE_DELAY_50_MS,
+ HDMI_MODE_DELAY_100_MS,
+};
+
+struct tc358743_platform_data {
+ /* System clock connected to REFCLK (pin H5) */
+ u32 refclk_hz; /* 26 MHz, 27 MHz or 42 MHz */
+
+ /* DDC +5V debounce delay to avoid spurious interrupts when the cable
+ * is connected.
+ * Sets DDC5V_MODE in register DDC_CTL.
+ * Default: DDC5V_DELAY_0_MS
+ */
+ enum tc358743_ddc5v_delays ddc5v_delay;
+
+ bool enable_hdcp;
+
+ /*
+ * The FIFO size is 512x32, so Toshiba recommend to set the default FIFO
+ * level to somewhere in the middle (e.g. 300), so it can cover speed
+ * mismatches in input and output ports.
+ */
+ u16 fifo_level;
+
+ /* Bps pr lane is (refclk_hz / pll_prd) * pll_fbd */
+ u16 pll_prd;
+ u16 pll_fbd;
+
+ /* CSI
+ * Calculate CSI parameters with REF_02 for the highest resolution your
+ * CSI interface can handle. The driver will adjust the number of CSI
+ * lanes in use according to the pixel clock.
+ *
+ * The values in brackets are calculated with REF_02 when the number of
+ * bps pr lane is 823.5 MHz, and can serve as a starting point.
+ */
+ u32 lineinitcnt; /* (0x00001770) */
+ u32 lptxtimecnt; /* (0x00000005) */
+ u32 tclk_headercnt; /* (0x00001d04) */
+ u32 tclk_trailcnt; /* (0x00000000) */
+ u32 ths_headercnt; /* (0x00000505) */
+ u32 twakeup; /* (0x00004650) */
+ u32 tclk_postcnt; /* (0x00000000) */
+ u32 ths_trailcnt; /* (0x00000004) */
+ u32 hstxvregcnt; /* (0x00000005) */
+
+ /* DVI->HDMI detection delay to avoid unnecessary switching between DVI
+ * and HDMI mode.
+ * Sets HDMI_DET_V in register HDMI_DET.
+ * Default: HDMI_MODE_DELAY_0_MS
+ */
+ enum tc358743_hdmi_detection_delay hdmi_detection_delay;
+
+ /* Reset PHY automatically when TMDS clock goes from DC to AC.
+ * Sets PHY_AUTO_RST2 in register PHY_CTL2.
+ * Default: false
+ */
+ bool hdmi_phy_auto_reset_tmds_detected;
+
+ /* Reset PHY automatically when TMDS clock passes 21 MHz.
+ * Sets PHY_AUTO_RST3 in register PHY_CTL2.
+ * Default: false
+ */
+ bool hdmi_phy_auto_reset_tmds_in_range;
+
+ /* Reset PHY automatically when TMDS clock is detected.
+ * Sets PHY_AUTO_RST4 in register PHY_CTL2.
+ * Default: false
+ */
+ bool hdmi_phy_auto_reset_tmds_valid;
+
+ /* Reset HDMI PHY automatically when hsync period is out of range.
+ * Sets H_PI_RST in register HV_RST.
+ * Default: false
+ */
+ bool hdmi_phy_auto_reset_hsync_out_of_range;
+
+ /* Reset HDMI PHY automatically when vsync period is out of range.
+ * Sets V_PI_RST in register HV_RST.
+ * Default: false
+ */
+ bool hdmi_phy_auto_reset_vsync_out_of_range;
+};
+
+/* custom controls */
+/* Audio sample rate in Hz */
+#define TC358743_CID_AUDIO_SAMPLING_RATE (V4L2_CID_USER_TC358743_BASE + 0)
+/* Audio present status */
+#define TC358743_CID_AUDIO_PRESENT (V4L2_CID_USER_TC358743_BASE + 1)
+
+#endif
diff --git a/include/media/v4l2-async.h b/include/media/v4l2-async.h
index 768356917bea..1d6d7da4c45d 100644
--- a/include/media/v4l2-async.h
+++ b/include/media/v4l2-async.h
@@ -32,7 +32,8 @@ enum v4l2_async_match_type {
/**
* struct v4l2_async_subdev - sub-device descriptor, as known to a bridge
- * @bus_type: subdevice bus type to select the appropriate matching method
+ *
+ * @match_type: type of match that will be used
* @match: union of per-bus type matching data sets
* @list: used to link struct v4l2_async_subdev objects, waiting to be
* probed, to a notifier->waiting list
@@ -62,8 +63,9 @@ struct v4l2_async_subdev {
};
/**
- * v4l2_async_notifier - v4l2_device notifier data
- * @num_subdevs:number of subdevices
+ * struct v4l2_async_notifier - v4l2_device notifier data
+ *
+ * @num_subdevs: number of subdevices
* @subdevs: array of pointers to subdevice descriptors
* @v4l2_dev: pointer to struct v4l2_device
* @waiting: list of struct v4l2_async_subdev, waiting for their drivers
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 911f3e542834..da6fe9802fee 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -36,7 +36,8 @@ struct v4l2_subscribed_event;
struct v4l2_fh;
struct poll_table_struct;
-/** union v4l2_ctrl_ptr - A pointer to a control value.
+/**
+ * union v4l2_ctrl_ptr - A pointer to a control value.
* @p_s32: Pointer to a 32-bit signed value.
* @p_s64: Pointer to a 64-bit signed value.
* @p_u8: Pointer to a 8-bit unsigned value.
@@ -55,30 +56,34 @@ union v4l2_ctrl_ptr {
void *p;
};
-/** struct v4l2_ctrl_ops - The control operations that the driver has to provide.
- * @g_volatile_ctrl: Get a new value for this control. Generally only relevant
- * for volatile (and usually read-only) controls such as a control
- * that returns the current signal strength which changes
- * continuously.
- * If not set, then the currently cached value will be returned.
- * @try_ctrl: Test whether the control's value is valid. Only relevant when
- * the usual min/max/step checks are not sufficient.
- * @s_ctrl: Actually set the new control value. s_ctrl is compulsory. The
- * ctrl->handler->lock is held when these ops are called, so no
- * one else can access controls owned by that handler.
- */
+/**
+ * struct v4l2_ctrl_ops - The control operations that the driver has to provide.
+ * @g_volatile_ctrl: Get a new value for this control. Generally only relevant
+ * for volatile (and usually read-only) controls such as a control
+ * that returns the current signal strength which changes
+ * continuously.
+ * If not set, then the currently cached value will be returned.
+ * @try_ctrl: Test whether the control's value is valid. Only relevant when
+ * the usual min/max/step checks are not sufficient.
+ * @s_ctrl: Actually set the new control value. s_ctrl is compulsory. The
+ * ctrl->handler->lock is held when these ops are called, so no
+ * one else can access controls owned by that handler.
+ */
struct v4l2_ctrl_ops {
int (*g_volatile_ctrl)(struct v4l2_ctrl *ctrl);
int (*try_ctrl)(struct v4l2_ctrl *ctrl);
int (*s_ctrl)(struct v4l2_ctrl *ctrl);
};
-/** struct v4l2_ctrl_type_ops - The control type operations that the driver has to provide.
- * @equal: return true if both values are equal.
- * @init: initialize the value.
- * @log: log the value.
- * @validate: validate the value. Return 0 on success and a negative value otherwise.
- */
+/**
+ * struct v4l2_ctrl_type_ops - The control type operations that the driver
+ * has to provide.
+ *
+ * @equal: return true if both values are equal.
+ * @init: initialize the value.
+ * @log: log the value.
+ * @validate: validate the value. Return 0 on success and a negative value otherwise.
+ */
struct v4l2_ctrl_type_ops {
bool (*equal)(const struct v4l2_ctrl *ctrl, u32 idx,
union v4l2_ctrl_ptr ptr1,
@@ -92,74 +97,80 @@ struct v4l2_ctrl_type_ops {
typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv);
-/** struct v4l2_ctrl - The control structure.
- * @node: The list node.
- * @ev_subs: The list of control event subscriptions.
- * @handler: The handler that owns the control.
- * @cluster: Point to start of cluster array.
- * @ncontrols: Number of controls in cluster array.
- * @done: Internal flag: set for each processed control.
- * @is_new: Set when the user specified a new value for this control. It
- * is also set when called from v4l2_ctrl_handler_setup. Drivers
- * should never set this flag.
- * @has_changed: Set when the current value differs from the new value. Drivers
- * should never use this flag.
- * @is_private: If set, then this control is private to its handler and it
- * will not be added to any other handlers. Drivers can set
- * this flag.
- * @is_auto: If set, then this control selects whether the other cluster
- * members are in 'automatic' mode or 'manual' mode. This is
- * used for autogain/gain type clusters. Drivers should never
- * set this flag directly.
- * @is_int: If set, then this control has a simple integer value (i.e. it
- * uses ctrl->val).
- * @is_string: If set, then this control has type V4L2_CTRL_TYPE_STRING.
- * @is_ptr: If set, then this control is an array and/or has type >= V4L2_CTRL_COMPOUND_TYPES
- * and/or has type V4L2_CTRL_TYPE_STRING. In other words, struct
- * v4l2_ext_control uses field p to point to the data.
- * @is_array: If set, then this control contains an N-dimensional array.
- * @has_volatiles: If set, then one or more members of the cluster are volatile.
- * Drivers should never touch this flag.
- * @call_notify: If set, then call the handler's notify function whenever the
- * control's value changes.
- * @manual_mode_value: If the is_auto flag is set, then this is the value
- * of the auto control that determines if that control is in
- * manual mode. So if the value of the auto control equals this
- * value, then the whole cluster is in manual mode. Drivers should
- * never set this flag directly.
- * @ops: The control ops.
- * @type_ops: The control type ops.
- * @id: The control ID.
- * @name: The control name.
- * @type: The control type.
- * @minimum: The control's minimum value.
- * @maximum: The control's maximum value.
- * @default_value: The control's default value.
- * @step: The control's step value for non-menu controls.
- * @elems: The number of elements in the N-dimensional array.
- * @elem_size: The size in bytes of the control.
- * @dims: The size of each dimension.
- * @nr_of_dims:The number of dimensions in @dims.
- * @menu_skip_mask: The control's skip mask for menu controls. This makes it
- * easy to skip menu items that are not valid. If bit X is set,
- * then menu item X is skipped. Of course, this only works for
- * menus with <= 32 menu items. There are no menus that come
- * close to that number, so this is OK. Should we ever need more,
- * then this will have to be extended to a u64 or a bit array.
- * @qmenu: A const char * array for all menu items. Array entries that are
- * empty strings ("") correspond to non-existing menu items (this
- * is in addition to the menu_skip_mask above). The last entry
- * must be NULL.
- * @flags: The control's flags.
- * @cur: The control's current value.
- * @val: The control's new s32 value.
- * @val64: The control's new s64 value.
- * @priv: The control's private pointer. For use by the driver. It is
- * untouched by the control framework. Note that this pointer is
- * not freed when the control is deleted. Should this be needed
- * then a new internal bitfield can be added to tell the framework
- * to free this pointer.
- */
+/**
+ * struct v4l2_ctrl - The control structure.
+ * @node: The list node.
+ * @ev_subs: The list of control event subscriptions.
+ * @handler: The handler that owns the control.
+ * @cluster: Point to start of cluster array.
+ * @ncontrols: Number of controls in cluster array.
+ * @done: Internal flag: set for each processed control.
+ * @is_new: Set when the user specified a new value for this control. It
+ * is also set when called from v4l2_ctrl_handler_setup. Drivers
+ * should never set this flag.
+ * @has_changed: Set when the current value differs from the new value. Drivers
+ * should never use this flag.
+ * @is_private: If set, then this control is private to its handler and it
+ * will not be added to any other handlers. Drivers can set
+ * this flag.
+ * @is_auto: If set, then this control selects whether the other cluster
+ * members are in 'automatic' mode or 'manual' mode. This is
+ * used for autogain/gain type clusters. Drivers should never
+ * set this flag directly.
+ * @is_int: If set, then this control has a simple integer value (i.e. it
+ * uses ctrl->val).
+ * @is_string: If set, then this control has type V4L2_CTRL_TYPE_STRING.
+ * @is_ptr: If set, then this control is an array and/or has type >= V4L2_CTRL_COMPOUND_TYPES
+ * and/or has type V4L2_CTRL_TYPE_STRING. In other words, struct
+ * v4l2_ext_control uses field p to point to the data.
+ * @is_array: If set, then this control contains an N-dimensional array.
+ * @has_volatiles: If set, then one or more members of the cluster are volatile.
+ * Drivers should never touch this flag.
+ * @call_notify: If set, then call the handler's notify function whenever the
+ * control's value changes.
+ * @manual_mode_value: If the is_auto flag is set, then this is the value
+ * of the auto control that determines if that control is in
+ * manual mode. So if the value of the auto control equals this
+ * value, then the whole cluster is in manual mode. Drivers should
+ * never set this flag directly.
+ * @ops: The control ops.
+ * @type_ops: The control type ops.
+ * @id: The control ID.
+ * @name: The control name.
+ * @type: The control type.
+ * @minimum: The control's minimum value.
+ * @maximum: The control's maximum value.
+ * @default_value: The control's default value.
+ * @step: The control's step value for non-menu controls.
+ * @elems: The number of elements in the N-dimensional array.
+ * @elem_size: The size in bytes of the control.
+ * @dims: The size of each dimension.
+ * @nr_of_dims:The number of dimensions in @dims.
+ * @menu_skip_mask: The control's skip mask for menu controls. This makes it
+ * easy to skip menu items that are not valid. If bit X is set,
+ * then menu item X is skipped. Of course, this only works for
+ * menus with <= 32 menu items. There are no menus that come
+ * close to that number, so this is OK. Should we ever need more,
+ * then this will have to be extended to a u64 or a bit array.
+ * @qmenu: A const char * array for all menu items. Array entries that are
+ * empty strings ("") correspond to non-existing menu items (this
+ * is in addition to the menu_skip_mask above). The last entry
+ * must be NULL.
+ * @flags: The control's flags.
+ * @cur: The control's current value.
+ * @val: The control's new s32 value.
+ * @priv: The control's private pointer. For use by the driver. It is
+ * untouched by the control framework. Note that this pointer is
+ * not freed when the control is deleted. Should this be needed
+ * then a new internal bitfield can be added to tell the framework
+ * to free this pointer.
+ * @p_cur: The control's current value represented via an union with
+ * provides a standard way of accessing control types
+ * through a pointer.
+ * @p_new: The control's new value represented via an union with provides
+ * a standard way of accessing control types
+ * through a pointer.
+ */
struct v4l2_ctrl {
/* Administrative fields */
struct list_head node;
@@ -210,16 +221,17 @@ struct v4l2_ctrl {
union v4l2_ctrl_ptr p_cur;
};
-/** struct v4l2_ctrl_ref - The control reference.
- * @node: List node for the sorted list.
- * @next: Single-link list node for the hash.
- * @ctrl: The actual control information.
- * @helper: Pointer to helper struct. Used internally in prepare_ext_ctrls().
- *
- * Each control handler has a list of these refs. The list_head is used to
- * keep a sorted-by-control-ID list of all controls, while the next pointer
- * is used to link the control in the hash's bucket.
- */
+/**
+ * struct v4l2_ctrl_ref - The control reference.
+ * @node: List node for the sorted list.
+ * @next: Single-link list node for the hash.
+ * @ctrl: The actual control information.
+ * @helper: Pointer to helper struct. Used internally in prepare_ext_ctrls().
+ *
+ * Each control handler has a list of these refs. The list_head is used to
+ * keep a sorted-by-control-ID list of all controls, while the next pointer
+ * is used to link the control in the hash's bucket.
+ */
struct v4l2_ctrl_ref {
struct list_head node;
struct v4l2_ctrl_ref *next;
@@ -227,25 +239,26 @@ struct v4l2_ctrl_ref {
struct v4l2_ctrl_helper *helper;
};
-/** struct v4l2_ctrl_handler - The control handler keeps track of all the
- * controls: both the controls owned by the handler and those inherited
- * from other handlers.
- * @_lock: Default for "lock".
- * @lock: Lock to control access to this handler and its controls.
- * May be replaced by the user right after init.
- * @ctrls: The list of controls owned by this handler.
- * @ctrl_refs: The list of control references.
- * @cached: The last found control reference. It is common that the same
- * control is needed multiple times, so this is a simple
- * optimization.
- * @buckets: Buckets for the hashing. Allows for quick control lookup.
- * @notify: A notify callback that is called whenever the control changes value.
- * Note that the handler's lock is held when the notify function
- * is called!
- * @notify_priv: Passed as argument to the v4l2_ctrl notify callback.
- * @nr_of_buckets: Total number of buckets in the array.
- * @error: The error code of the first failed control addition.
- */
+/**
+ * struct v4l2_ctrl_handler - The control handler keeps track of all the
+ * controls: both the controls owned by the handler and those inherited
+ * from other handlers.
+ * @_lock: Default for "lock".
+ * @lock: Lock to control access to this handler and its controls.
+ * May be replaced by the user right after init.
+ * @ctrls: The list of controls owned by this handler.
+ * @ctrl_refs: The list of control references.
+ * @cached: The last found control reference. It is common that the same
+ * control is needed multiple times, so this is a simple
+ * optimization.
+ * @buckets: Buckets for the hashing. Allows for quick control lookup.
+ * @notify: A notify callback that is called whenever the control changes value.
+ * Note that the handler's lock is held when the notify function
+ * is called!
+ * @notify_priv: Passed as argument to the v4l2_ctrl notify callback.
+ * @nr_of_buckets: Total number of buckets in the array.
+ * @error: The error code of the first failed control addition.
+ */
struct v4l2_ctrl_handler {
struct mutex _lock;
struct mutex *lock;
@@ -259,32 +272,35 @@ struct v4l2_ctrl_handler {
int error;
};
-/** struct v4l2_ctrl_config - Control configuration structure.
- * @ops: The control ops.
- * @type_ops: The control type ops. Only needed for compound controls.
- * @id: The control ID.
- * @name: The control name.
- * @type: The control type.
- * @min: The control's minimum value.
- * @max: The control's maximum value.
- * @step: The control's step value for non-menu controls.
- * @def: The control's default value.
- * @dims: The size of each dimension.
- * @elem_size: The size in bytes of the control.
- * @flags: The control's flags.
- * @menu_skip_mask: The control's skip mask for menu controls. This makes it
- * easy to skip menu items that are not valid. If bit X is set,
- * then menu item X is skipped. Of course, this only works for
- * menus with <= 64 menu items. There are no menus that come
- * close to that number, so this is OK. Should we ever need more,
- * then this will have to be extended to a bit array.
- * @qmenu: A const char * array for all menu items. Array entries that are
- * empty strings ("") correspond to non-existing menu items (this
- * is in addition to the menu_skip_mask above). The last entry
- * must be NULL.
- * @is_private: If set, then this control is private to its handler and it
- * will not be added to any other handlers.
- */
+/**
+ * struct v4l2_ctrl_config - Control configuration structure.
+ * @ops: The control ops.
+ * @type_ops: The control type ops. Only needed for compound controls.
+ * @id: The control ID.
+ * @name: The control name.
+ * @type: The control type.
+ * @min: The control's minimum value.
+ * @max: The control's maximum value.
+ * @step: The control's step value for non-menu controls.
+ * @def: The control's default value.
+ * @dims: The size of each dimension.
+ * @elem_size: The size in bytes of the control.
+ * @flags: The control's flags.
+ * @menu_skip_mask: The control's skip mask for menu controls. This makes it
+ * easy to skip menu items that are not valid. If bit X is set,
+ * then menu item X is skipped. Of course, this only works for
+ * menus with <= 64 menu items. There are no menus that come
+ * close to that number, so this is OK. Should we ever need more,
+ * then this will have to be extended to a bit array.
+ * @qmenu: A const char * array for all menu items. Array entries that are
+ * empty strings ("") correspond to non-existing menu items (this
+ * is in addition to the menu_skip_mask above). The last entry
+ * must be NULL.
+ * @qmenu_int: A const s64 integer array for all menu items of the type
+ * V4L2_CTRL_TYPE_INTEGER_MENU.
+ * @is_private: If set, then this control is private to its handler and it
+ * will not be added to any other handlers.
+ */
struct v4l2_ctrl_config {
const struct v4l2_ctrl_ops *ops;
const struct v4l2_ctrl_type_ops *type_ops;
@@ -304,42 +320,44 @@ struct v4l2_ctrl_config {
unsigned int is_private:1;
};
-/** v4l2_ctrl_fill() - Fill in the control fields based on the control ID.
- *
- * This works for all standard V4L2 controls.
- * For non-standard controls it will only fill in the given arguments
- * and @name will be NULL.
- *
- * This function will overwrite the contents of @name, @type and @flags.
- * The contents of @min, @max, @step and @def may be modified depending on
- * the type.
- *
- * Do not use in drivers! It is used internally for backwards compatibility
- * control handling only. Once all drivers are converted to use the new
- * control framework this function will no longer be exported.
- */
+/*
+ * v4l2_ctrl_fill() - Fill in the control fields based on the control ID.
+ *
+ * This works for all standard V4L2 controls.
+ * For non-standard controls it will only fill in the given arguments
+ * and @name will be NULL.
+ *
+ * This function will overwrite the contents of @name, @type and @flags.
+ * The contents of @min, @max, @step and @def may be modified depending on
+ * the type.
+ *
+ * Do not use in drivers! It is used internally for backwards compatibility
+ * control handling only. Once all drivers are converted to use the new
+ * control framework this function will no longer be exported.
+ */
void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
s64 *min, s64 *max, u64 *step, s64 *def, u32 *flags);
-/** v4l2_ctrl_handler_init_class() - Initialize the control handler.
- * @hdl: The control handler.
- * @nr_of_controls_hint: A hint of how many controls this handler is
- * expected to refer to. This is the total number, so including
- * any inherited controls. It doesn't have to be precise, but if
- * it is way off, then you either waste memory (too many buckets
- * are allocated) or the control lookup becomes slower (not enough
- * buckets are allocated, so there are more slow list lookups).
- * It will always work, though.
- * @key: Used by the lock validator if CONFIG_LOCKDEP is set.
- * @name: Used by the lock validator if CONFIG_LOCKDEP is set.
- *
- * Returns an error if the buckets could not be allocated. This error will
- * also be stored in @hdl->error.
- *
- * Never use this call directly, always use the v4l2_ctrl_handler_init
- * macro that hides the @key and @name arguments.
- */
+/**
+ * v4l2_ctrl_handler_init_class() - Initialize the control handler.
+ * @hdl: The control handler.
+ * @nr_of_controls_hint: A hint of how many controls this handler is
+ * expected to refer to. This is the total number, so including
+ * any inherited controls. It doesn't have to be precise, but if
+ * it is way off, then you either waste memory (too many buckets
+ * are allocated) or the control lookup becomes slower (not enough
+ * buckets are allocated, so there are more slow list lookups).
+ * It will always work, though.
+ * @key: Used by the lock validator if CONFIG_LOCKDEP is set.
+ * @name: Used by the lock validator if CONFIG_LOCKDEP is set.
+ *
+ * Returns an error if the buckets could not be allocated. This error will
+ * also be stored in @hdl->error.
+ *
+ * Never use this call directly, always use the v4l2_ctrl_handler_init
+ * macro that hides the @key and @name arguments.
+ */
int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl,
unsigned nr_of_controls_hint,
struct lock_class_key *key, const char *name);
@@ -361,289 +379,326 @@ int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl,
v4l2_ctrl_handler_init_class(hdl, nr_of_controls_hint, NULL, NULL)
#endif
-/** v4l2_ctrl_handler_free() - Free all controls owned by the handler and free
- * the control list.
- * @hdl: The control handler.
- *
- * Does nothing if @hdl == NULL.
- */
+/**
+ * v4l2_ctrl_handler_free() - Free all controls owned by the handler and free
+ * the control list.
+ * @hdl: The control handler.
+ *
+ * Does nothing if @hdl == NULL.
+ */
void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl);
-/** v4l2_ctrl_lock() - Helper function to lock the handler
- * associated with the control.
- * @ctrl: The control to lock.
- */
+/**
+ * v4l2_ctrl_lock() - Helper function to lock the handler
+ * associated with the control.
+ * @ctrl: The control to lock.
+ */
static inline void v4l2_ctrl_lock(struct v4l2_ctrl *ctrl)
{
mutex_lock(ctrl->handler->lock);
}
-/** v4l2_ctrl_unlock() - Helper function to unlock the handler
- * associated with the control.
- * @ctrl: The control to unlock.
- */
+/**
+ * v4l2_ctrl_unlock() - Helper function to unlock the handler
+ * associated with the control.
+ * @ctrl: The control to unlock.
+ */
static inline void v4l2_ctrl_unlock(struct v4l2_ctrl *ctrl)
{
mutex_unlock(ctrl->handler->lock);
}
-/** v4l2_ctrl_handler_setup() - Call the s_ctrl op for all controls belonging
- * to the handler to initialize the hardware to the current control values.
- * @hdl: The control handler.
- *
- * Button controls will be skipped, as are read-only controls.
- *
- * If @hdl == NULL, then this just returns 0.
- */
+/**
+ * v4l2_ctrl_handler_setup() - Call the s_ctrl op for all controls belonging
+ * to the handler to initialize the hardware to the current control values.
+ * @hdl: The control handler.
+ *
+ * Button controls will be skipped, as are read-only controls.
+ *
+ * If @hdl == NULL, then this just returns 0.
+ */
int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl);
-/** v4l2_ctrl_handler_log_status() - Log all controls owned by the handler.
- * @hdl: The control handler.
- * @prefix: The prefix to use when logging the control values. If the
- * prefix does not end with a space, then ": " will be added
- * after the prefix. If @prefix == NULL, then no prefix will be
- * used.
- *
- * For use with VIDIOC_LOG_STATUS.
- *
- * Does nothing if @hdl == NULL.
- */
+/**
+ * v4l2_ctrl_handler_log_status() - Log all controls owned by the handler.
+ * @hdl: The control handler.
+ * @prefix: The prefix to use when logging the control values. If the
+ * prefix does not end with a space, then ": " will be added
+ * after the prefix. If @prefix == NULL, then no prefix will be
+ * used.
+ *
+ * For use with VIDIOC_LOG_STATUS.
+ *
+ * Does nothing if @hdl == NULL.
+ */
void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl,
const char *prefix);
-/** v4l2_ctrl_new_custom() - Allocate and initialize a new custom V4L2
- * control.
- * @hdl: The control handler.
- * @cfg: The control's configuration data.
- * @priv: The control's driver-specific private data.
- *
- * If the &v4l2_ctrl struct could not be allocated then NULL is returned
- * and @hdl->error is set to the error code (if it wasn't set already).
- */
+/**
+ * v4l2_ctrl_new_custom() - Allocate and initialize a new custom V4L2
+ * control.
+ * @hdl: The control handler.
+ * @cfg: The control's configuration data.
+ * @priv: The control's driver-specific private data.
+ *
+ * If the &v4l2_ctrl struct could not be allocated then NULL is returned
+ * and @hdl->error is set to the error code (if it wasn't set already).
+ */
struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_config *cfg, void *priv);
-/** v4l2_ctrl_new_std() - Allocate and initialize a new standard V4L2 non-menu control.
- * @hdl: The control handler.
- * @ops: The control ops.
- * @id: The control ID.
- * @min: The control's minimum value.
- * @max: The control's maximum value.
- * @step: The control's step value
- * @def: The control's default value.
- *
- * If the &v4l2_ctrl struct could not be allocated, or the control
- * ID is not known, then NULL is returned and @hdl->error is set to the
- * appropriate error code (if it wasn't set already).
- *
- * If @id refers to a menu control, then this function will return NULL.
- *
- * Use v4l2_ctrl_new_std_menu() when adding menu controls.
- */
+/**
+ * v4l2_ctrl_new_std() - Allocate and initialize a new standard V4L2 non-menu control.
+ * @hdl: The control handler.
+ * @ops: The control ops.
+ * @id: The control ID.
+ * @min: The control's minimum value.
+ * @max: The control's maximum value.
+ * @step: The control's step value
+ * @def: The control's default value.
+ *
+ * If the &v4l2_ctrl struct could not be allocated, or the control
+ * ID is not known, then NULL is returned and @hdl->error is set to the
+ * appropriate error code (if it wasn't set already).
+ *
+ * If @id refers to a menu control, then this function will return NULL.
+ *
+ * Use v4l2_ctrl_new_std_menu() when adding menu controls.
+ */
struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops,
u32 id, s64 min, s64 max, u64 step, s64 def);
-/** v4l2_ctrl_new_std_menu() - Allocate and initialize a new standard V4L2 menu control.
- * @hdl: The control handler.
- * @ops: The control ops.
- * @id: The control ID.
- * @max: The control's maximum value.
- * @mask: The control's skip mask for menu controls. This makes it
- * easy to skip menu items that are not valid. If bit X is set,
- * then menu item X is skipped. Of course, this only works for
- * menus with <= 64 menu items. There are no menus that come
- * close to that number, so this is OK. Should we ever need more,
- * then this will have to be extended to a bit array.
- * @def: The control's default value.
- *
- * Same as v4l2_ctrl_new_std(), but @min is set to 0 and the @mask value
- * determines which menu items are to be skipped.
- *
- * If @id refers to a non-menu control, then this function will return NULL.
- */
+/**
+ * v4l2_ctrl_new_std_menu() - Allocate and initialize a new standard V4L2 menu control.
+ * @hdl: The control handler.
+ * @ops: The control ops.
+ * @id: The control ID.
+ * @max: The control's maximum value.
+ * @mask: The control's skip mask for menu controls. This makes it
+ * easy to skip menu items that are not valid. If bit X is set,
+ * then menu item X is skipped. Of course, this only works for
+ * menus with <= 64 menu items. There are no menus that come
+ * close to that number, so this is OK. Should we ever need more,
+ * then this will have to be extended to a bit array.
+ * @def: The control's default value.
+ *
+ * Same as v4l2_ctrl_new_std(), but @min is set to 0 and the @mask value
+ * determines which menu items are to be skipped.
+ *
+ * If @id refers to a non-menu control, then this function will return NULL.
+ */
struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops,
u32 id, u8 max, u64 mask, u8 def);
-/** v4l2_ctrl_new_std_menu_items() - Create a new standard V4L2 menu control
- * with driver specific menu.
- * @hdl: The control handler.
- * @ops: The control ops.
- * @id: The control ID.
- * @max: The control's maximum value.
- * @mask: The control's skip mask for menu controls. This makes it
- * easy to skip menu items that are not valid. If bit X is set,
- * then menu item X is skipped. Of course, this only works for
- * menus with <= 64 menu items. There are no menus that come
- * close to that number, so this is OK. Should we ever need more,
- * then this will have to be extended to a bit array.
- * @def: The control's default value.
- * @qmenu: The new menu.
- *
- * Same as v4l2_ctrl_new_std_menu(), but @qmenu will be the driver specific
- * menu of this control.
- *
- */
+/**
+ * v4l2_ctrl_new_std_menu_items() - Create a new standard V4L2 menu control
+ * with driver specific menu.
+ * @hdl: The control handler.
+ * @ops: The control ops.
+ * @id: The control ID.
+ * @max: The control's maximum value.
+ * @mask: The control's skip mask for menu controls. This makes it
+ * easy to skip menu items that are not valid. If bit X is set,
+ * then menu item X is skipped. Of course, this only works for
+ * menus with <= 64 menu items. There are no menus that come
+ * close to that number, so this is OK. Should we ever need more,
+ * then this will have to be extended to a bit array.
+ * @def: The control's default value.
+ * @qmenu: The new menu.
+ *
+ * Same as v4l2_ctrl_new_std_menu(), but @qmenu will be the driver specific
+ * menu of this control.
+ *
+ */
struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops, u32 id, u8 max,
u64 mask, u8 def, const char * const *qmenu);
-/** v4l2_ctrl_new_int_menu() - Create a new standard V4L2 integer menu control.
- * @hdl: The control handler.
- * @ops: The control ops.
- * @id: The control ID.
- * @max: The control's maximum value.
- * @def: The control's default value.
- * @qmenu_int: The control's menu entries.
- *
- * Same as v4l2_ctrl_new_std_menu(), but @mask is set to 0 and it additionaly
- * takes as an argument an array of integers determining the menu items.
- *
- * If @id refers to a non-integer-menu control, then this function will return NULL.
- */
+/**
+ * v4l2_ctrl_new_int_menu() - Create a new standard V4L2 integer menu control.
+ * @hdl: The control handler.
+ * @ops: The control ops.
+ * @id: The control ID.
+ * @max: The control's maximum value.
+ * @def: The control's default value.
+ * @qmenu_int: The control's menu entries.
+ *
+ * Same as v4l2_ctrl_new_std_menu(), but @mask is set to 0 and it additionaly
+ * takes as an argument an array of integers determining the menu items.
+ *
+ * If @id refers to a non-integer-menu control, then this function will return NULL.
+ */
struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops,
u32 id, u8 max, u8 def, const s64 *qmenu_int);
-/** v4l2_ctrl_add_ctrl() - Add a control from another handler to this handler.
- * @hdl: The control handler.
- * @ctrl: The control to add.
- *
- * It will return NULL if it was unable to add the control reference.
- * If the control already belonged to the handler, then it will do
- * nothing and just return @ctrl.
- */
+/**
+ * v4l2_ctrl_add_ctrl() - Add a control from another handler to this handler.
+ * @hdl: The control handler.
+ * @ctrl: The control to add.
+ *
+ * It will return NULL if it was unable to add the control reference.
+ * If the control already belonged to the handler, then it will do
+ * nothing and just return @ctrl.
+ */
struct v4l2_ctrl *v4l2_ctrl_add_ctrl(struct v4l2_ctrl_handler *hdl,
struct v4l2_ctrl *ctrl);
-/** v4l2_ctrl_add_handler() - Add all controls from handler @add to
- * handler @hdl.
- * @hdl: The control handler.
- * @add: The control handler whose controls you want to add to
- * the @hdl control handler.
- * @filter: This function will filter which controls should be added.
- *
- * Does nothing if either of the two handlers is a NULL pointer.
- * If @filter is NULL, then all controls are added. Otherwise only those
- * controls for which @filter returns true will be added.
- * In case of an error @hdl->error will be set to the error code (if it
- * wasn't set already).
- */
+/**
+ * v4l2_ctrl_add_handler() - Add all controls from handler @add to
+ * handler @hdl.
+ * @hdl: The control handler.
+ * @add: The control handler whose controls you want to add to
+ * the @hdl control handler.
+ * @filter: This function will filter which controls should be added.
+ *
+ * Does nothing if either of the two handlers is a NULL pointer.
+ * If @filter is NULL, then all controls are added. Otherwise only those
+ * controls for which @filter returns true will be added.
+ * In case of an error @hdl->error will be set to the error code (if it
+ * wasn't set already).
+ */
int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
struct v4l2_ctrl_handler *add,
bool (*filter)(const struct v4l2_ctrl *ctrl));
-/** v4l2_ctrl_radio_filter() - Standard filter for radio controls.
- * @ctrl: The control that is filtered.
- *
- * This will return true for any controls that are valid for radio device
- * nodes. Those are all of the V4L2_CID_AUDIO_* user controls and all FM
- * transmitter class controls.
- *
- * This function is to be used with v4l2_ctrl_add_handler().
- */
+/**
+ * v4l2_ctrl_radio_filter() - Standard filter for radio controls.
+ * @ctrl: The control that is filtered.
+ *
+ * This will return true for any controls that are valid for radio device
+ * nodes. Those are all of the V4L2_CID_AUDIO_* user controls and all FM
+ * transmitter class controls.
+ *
+ * This function is to be used with v4l2_ctrl_add_handler().
+ */
bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl);
-/** v4l2_ctrl_cluster() - Mark all controls in the cluster as belonging to that cluster.
- * @ncontrols: The number of controls in this cluster.
- * @controls: The cluster control array of size @ncontrols.
- */
+/**
+ * v4l2_ctrl_cluster() - Mark all controls in the cluster as belonging to that cluster.
+ * @ncontrols: The number of controls in this cluster.
+ * @controls: The cluster control array of size @ncontrols.
+ */
void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls);
-/** v4l2_ctrl_auto_cluster() - Mark all controls in the cluster as belonging to
- * that cluster and set it up for autofoo/foo-type handling.
- * @ncontrols: The number of controls in this cluster.
- * @controls: The cluster control array of size @ncontrols. The first control
- * must be the 'auto' control (e.g. autogain, autoexposure, etc.)
- * @manual_val: The value for the first control in the cluster that equals the
- * manual setting.
- * @set_volatile: If true, then all controls except the first auto control will
- * be volatile.
- *
- * Use for control groups where one control selects some automatic feature and
- * the other controls are only active whenever the automatic feature is turned
- * off (manual mode). Typical examples: autogain vs gain, auto-whitebalance vs
- * red and blue balance, etc.
- *
- * The behavior of such controls is as follows:
- *
- * When the autofoo control is set to automatic, then any manual controls
- * are set to inactive and any reads will call g_volatile_ctrl (if the control
- * was marked volatile).
- *
- * When the autofoo control is set to manual, then any manual controls will
- * be marked active, and any reads will just return the current value without
- * going through g_volatile_ctrl.
- *
- * In addition, this function will set the V4L2_CTRL_FLAG_UPDATE flag
- * on the autofoo control and V4L2_CTRL_FLAG_INACTIVE on the foo control(s)
- * if autofoo is in auto mode.
- */
+/**
+ * v4l2_ctrl_auto_cluster() - Mark all controls in the cluster as belonging to
+ * that cluster and set it up for autofoo/foo-type handling.
+ * @ncontrols: The number of controls in this cluster.
+ * @controls: The cluster control array of size @ncontrols. The first control
+ * must be the 'auto' control (e.g. autogain, autoexposure, etc.)
+ * @manual_val: The value for the first control in the cluster that equals the
+ * manual setting.
+ * @set_volatile: If true, then all controls except the first auto control will
+ * be volatile.
+ *
+ * Use for control groups where one control selects some automatic feature and
+ * the other controls are only active whenever the automatic feature is turned
+ * off (manual mode). Typical examples: autogain vs gain, auto-whitebalance vs
+ * red and blue balance, etc.
+ *
+ * The behavior of such controls is as follows:
+ *
+ * When the autofoo control is set to automatic, then any manual controls
+ * are set to inactive and any reads will call g_volatile_ctrl (if the control
+ * was marked volatile).
+ *
+ * When the autofoo control is set to manual, then any manual controls will
+ * be marked active, and any reads will just return the current value without
+ * going through g_volatile_ctrl.
+ *
+ * In addition, this function will set the V4L2_CTRL_FLAG_UPDATE flag
+ * on the autofoo control and V4L2_CTRL_FLAG_INACTIVE on the foo control(s)
+ * if autofoo is in auto mode.
+ */
void v4l2_ctrl_auto_cluster(unsigned ncontrols, struct v4l2_ctrl **controls,
u8 manual_val, bool set_volatile);
-/** v4l2_ctrl_find() - Find a control with the given ID.
- * @hdl: The control handler.
- * @id: The control ID to find.
- *
- * If @hdl == NULL this will return NULL as well. Will lock the handler so
- * do not use from inside &v4l2_ctrl_ops.
- */
+/**
+ * v4l2_ctrl_find() - Find a control with the given ID.
+ * @hdl: The control handler.
+ * @id: The control ID to find.
+ *
+ * If @hdl == NULL this will return NULL as well. Will lock the handler so
+ * do not use from inside &v4l2_ctrl_ops.
+ */
struct v4l2_ctrl *v4l2_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id);
-/** v4l2_ctrl_activate() - Make the control active or inactive.
- * @ctrl: The control to (de)activate.
- * @active: True if the control should become active.
- *
- * This sets or clears the V4L2_CTRL_FLAG_INACTIVE flag atomically.
- * Does nothing if @ctrl == NULL.
- * This will usually be called from within the s_ctrl op.
- * The V4L2_EVENT_CTRL event will be generated afterwards.
- *
- * This function assumes that the control handler is locked.
- */
+/**
+ * v4l2_ctrl_activate() - Make the control active or inactive.
+ * @ctrl: The control to (de)activate.
+ * @active: True if the control should become active.
+ *
+ * This sets or clears the V4L2_CTRL_FLAG_INACTIVE flag atomically.
+ * Does nothing if @ctrl == NULL.
+ * This will usually be called from within the s_ctrl op.
+ * The V4L2_EVENT_CTRL event will be generated afterwards.
+ *
+ * This function assumes that the control handler is locked.
+ */
void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active);
-/** v4l2_ctrl_grab() - Mark the control as grabbed or not grabbed.
- * @ctrl: The control to (de)activate.
- * @grabbed: True if the control should become grabbed.
- *
- * This sets or clears the V4L2_CTRL_FLAG_GRABBED flag atomically.
- * Does nothing if @ctrl == NULL.
- * The V4L2_EVENT_CTRL event will be generated afterwards.
- * This will usually be called when starting or stopping streaming in the
- * driver.
- *
- * This function assumes that the control handler is not locked and will
- * take the lock itself.
- */
+/**
+ * v4l2_ctrl_grab() - Mark the control as grabbed or not grabbed.
+ * @ctrl: The control to (de)activate.
+ * @grabbed: True if the control should become grabbed.
+ *
+ * This sets or clears the V4L2_CTRL_FLAG_GRABBED flag atomically.
+ * Does nothing if @ctrl == NULL.
+ * The V4L2_EVENT_CTRL event will be generated afterwards.
+ * This will usually be called when starting or stopping streaming in the
+ * driver.
+ *
+ * This function assumes that the control handler is not locked and will
+ * take the lock itself.
+ */
void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed);
-/** __v4l2_ctrl_modify_range() - Unlocked variant of v4l2_ctrl_modify_range() */
+/**
+ *__v4l2_ctrl_modify_range() - Unlocked variant of v4l2_ctrl_modify_range()
+ *
+ * @ctrl: The control to update.
+ * @min: The control's minimum value.
+ * @max: The control's maximum value.
+ * @step: The control's step value
+ * @def: The control's default value.
+ *
+ * Update the range of a control on the fly. This works for control types
+ * INTEGER, BOOLEAN, MENU, INTEGER MENU and BITMASK. For menu controls the
+ * @step value is interpreted as a menu_skip_mask.
+ *
+ * An error is returned if one of the range arguments is invalid for this
+ * control type.
+ *
+ * This function assumes that the control handler is not locked and will
+ * take the lock itself.
+ */
int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
s64 min, s64 max, u64 step, s64 def);
-/** v4l2_ctrl_modify_range() - Update the range of a control.
- * @ctrl: The control to update.
- * @min: The control's minimum value.
- * @max: The control's maximum value.
- * @step: The control's step value
- * @def: The control's default value.
- *
- * Update the range of a control on the fly. This works for control types
- * INTEGER, BOOLEAN, MENU, INTEGER MENU and BITMASK. For menu controls the
- * @step value is interpreted as a menu_skip_mask.
- *
- * An error is returned if one of the range arguments is invalid for this
- * control type.
- *
- * This function assumes that the control handler is not locked and will
- * take the lock itself.
- */
+/**
+ * v4l2_ctrl_modify_range() - Update the range of a control.
+ * @ctrl: The control to update.
+ * @min: The control's minimum value.
+ * @max: The control's maximum value.
+ * @step: The control's step value
+ * @def: The control's default value.
+ *
+ * Update the range of a control on the fly. This works for control types
+ * INTEGER, BOOLEAN, MENU, INTEGER MENU and BITMASK. For menu controls the
+ * @step value is interpreted as a menu_skip_mask.
+ *
+ * An error is returned if one of the range arguments is invalid for this
+ * control type.
+ *
+ * This function assumes that the control handler is not locked and will
+ * take the lock itself.
+ */
static inline int v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
s64 min, s64 max, u64 step, s64 def)
{
@@ -656,21 +711,23 @@ static inline int v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
return rval;
}
-/** v4l2_ctrl_notify() - Function to set a notify callback for a control.
- * @ctrl: The control.
- * @notify: The callback function.
- * @priv: The callback private handle, passed as argument to the callback.
- *
- * This function sets a callback function for the control. If @ctrl is NULL,
- * then it will do nothing. If @notify is NULL, then the notify callback will
- * be removed.
- *
- * There can be only one notify. If another already exists, then a WARN_ON
- * will be issued and the function will do nothing.
- */
+/**
+ * v4l2_ctrl_notify() - Function to set a notify callback for a control.
+ * @ctrl: The control.
+ * @notify: The callback function.
+ * @priv: The callback private handle, passed as argument to the callback.
+ *
+ * This function sets a callback function for the control. If @ctrl is NULL,
+ * then it will do nothing. If @notify is NULL, then the notify callback will
+ * be removed.
+ *
+ * There can be only one notify. If another already exists, then a WARN_ON
+ * will be issued and the function will do nothing.
+ */
void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv);
-/** v4l2_ctrl_get_name() - Get the name of the control
+/**
+ * v4l2_ctrl_get_name() - Get the name of the control
* @id: The control ID.
*
* This function returns the name of the given control ID or NULL if it isn't
@@ -678,7 +735,8 @@ void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void
*/
const char *v4l2_ctrl_get_name(u32 id);
-/** v4l2_ctrl_get_menu() - Get the menu string array of the control
+/**
+ * v4l2_ctrl_get_menu() - Get the menu string array of the control
* @id: The control ID.
*
* This function returns the NULL-terminated menu string array name of the
@@ -686,7 +744,8 @@ const char *v4l2_ctrl_get_name(u32 id);
*/
const char * const *v4l2_ctrl_get_menu(u32 id);
-/** v4l2_ctrl_get_int_menu() - Get the integer menu array of the control
+/**
+ * v4l2_ctrl_get_int_menu() - Get the integer menu array of the control
* @id: The control ID.
* @len: The size of the integer array.
*
@@ -695,29 +754,41 @@ const char * const *v4l2_ctrl_get_menu(u32 id);
*/
const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len);
-/** v4l2_ctrl_g_ctrl() - Helper function to get the control's value from within a driver.
- * @ctrl: The control.
- *
- * This returns the control's value safely by going through the control
- * framework. This function will lock the control's handler, so it cannot be
- * used from within the &v4l2_ctrl_ops functions.
- *
- * This function is for integer type controls only.
- */
+/**
+ * v4l2_ctrl_g_ctrl() - Helper function to get the control's value from within a driver.
+ * @ctrl: The control.
+ *
+ * This returns the control's value safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for integer type controls only.
+ */
s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl);
-/** __v4l2_ctrl_s_ctrl() - Unlocked variant of v4l2_ctrl_s_ctrl(). */
+/**
+ * __v4l2_ctrl_s_ctrl() - Unlocked variant of v4l2_ctrl_s_ctrl().
+ * @ctrl: The control.
+ * @val: The new value.
+ *
+ * This set the control's new value safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for integer type controls only.
+ */
int __v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val);
+
/** v4l2_ctrl_s_ctrl() - Helper function to set the control's value from within a driver.
- * @ctrl: The control.
- * @val: The new value.
- *
- * This set the control's new value safely by going through the control
- * framework. This function will lock the control's handler, so it cannot be
- * used from within the &v4l2_ctrl_ops functions.
- *
- * This function is for integer type controls only.
- */
+ * @ctrl: The control.
+ * @val: The new value.
+ *
+ * This set the control's new value safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for integer type controls only.
+ */
static inline int v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
{
int rval;
@@ -729,30 +800,45 @@ static inline int v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
return rval;
}
-/** v4l2_ctrl_g_ctrl_int64() - Helper function to get a 64-bit control's value from within a driver.
- * @ctrl: The control.
- *
- * This returns the control's value safely by going through the control
- * framework. This function will lock the control's handler, so it cannot be
- * used from within the &v4l2_ctrl_ops functions.
- *
- * This function is for 64-bit integer type controls only.
- */
+/**
+ * v4l2_ctrl_g_ctrl_int64() - Helper function to get a 64-bit control's value
+ * from within a driver.
+ * @ctrl: The control.
+ *
+ * This returns the control's value safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for 64-bit integer type controls only.
+ */
s64 v4l2_ctrl_g_ctrl_int64(struct v4l2_ctrl *ctrl);
-/** __v4l2_ctrl_s_ctrl_int64() - Unlocked variant of v4l2_ctrl_s_ctrl_int64(). */
+/**
+ * __v4l2_ctrl_s_ctrl_int64() - Unlocked variant of v4l2_ctrl_s_ctrl_int64().
+ *
+ * @ctrl: The control.
+ * @val: The new value.
+ *
+ * This set the control's new value safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for 64-bit integer type controls only.
+ */
int __v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val);
-/** v4l2_ctrl_s_ctrl_int64() - Helper function to set a 64-bit control's value from within a driver.
- * @ctrl: The control.
- * @val: The new value.
- *
- * This set the control's new value safely by going through the control
- * framework. This function will lock the control's handler, so it cannot be
- * used from within the &v4l2_ctrl_ops functions.
- *
- * This function is for 64-bit integer type controls only.
- */
+/** v4l2_ctrl_s_ctrl_int64() - Helper function to set a 64-bit control's value
+ * from within a driver.
+ *
+ * @ctrl: The control.
+ * @val: The new value.
+ *
+ * This set the control's new value safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for 64-bit integer type controls only.
+ */
static inline int v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val)
{
int rval;
@@ -764,19 +850,31 @@ static inline int v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val)
return rval;
}
-/** __v4l2_ctrl_s_ctrl_string() - Unlocked variant of v4l2_ctrl_s_ctrl_string(). */
+/** __v4l2_ctrl_s_ctrl_string() - Unlocked variant of v4l2_ctrl_s_ctrl_string().
+ *
+ * @ctrl: The control.
+ * @s: The new string.
+ *
+ * This set the control's new string safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for string type controls only.
+ */
int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s);
-/** v4l2_ctrl_s_ctrl_string() - Helper function to set a control's string value from within a driver.
- * @ctrl: The control.
- * @s: The new string.
- *
- * This set the control's new string safely by going through the control
- * framework. This function will lock the control's handler, so it cannot be
- * used from within the &v4l2_ctrl_ops functions.
- *
- * This function is for string type controls only.
- */
+/** v4l2_ctrl_s_ctrl_string() - Helper function to set a control's string value
+ * from within a driver.
+ *
+ * @ctrl: The control.
+ * @s: The new string.
+ *
+ * This set the control's new string safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for string type controls only.
+ */
static inline int v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s)
{
int rval;
diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h
index eecd3102a618..b6130b50a0f1 100644
--- a/include/media/v4l2-dv-timings.h
+++ b/include/media/v4l2-dv-timings.h
@@ -23,11 +23,14 @@
#include <linux/videodev2.h>
-/** v4l2_dv_timings_presets: list of all dv_timings presets.
+/**
+ * v4l2_dv_timings_presets: list of all dv_timings presets.
*/
extern const struct v4l2_dv_timings v4l2_dv_timings_presets[];
-/** v4l2_check_dv_timings_fnc - timings check callback
+/**
+ * v4l2_check_dv_timings_fnc - timings check callback
+ *
* @t: the v4l2_dv_timings struct.
* @handle: a handle from the driver.
*
@@ -35,86 +38,101 @@ extern const struct v4l2_dv_timings v4l2_dv_timings_presets[];
*/
typedef bool v4l2_check_dv_timings_fnc(const struct v4l2_dv_timings *t, void *handle);
-/** v4l2_valid_dv_timings() - are these timings valid?
- * @t: the v4l2_dv_timings struct.
- * @cap: the v4l2_dv_timings_cap capabilities.
- * @fnc: callback to check if this timing is OK. May be NULL.
- * @fnc_handle: a handle that is passed on to @fnc.
- *
- * Returns true if the given dv_timings struct is supported by the
- * hardware capabilities and the callback function (if non-NULL), returns
- * false otherwise.
- */
+/**
+ * v4l2_valid_dv_timings() - are these timings valid?
+ *
+ * @t: the v4l2_dv_timings struct.
+ * @cap: the v4l2_dv_timings_cap capabilities.
+ * @fnc: callback to check if this timing is OK. May be NULL.
+ * @fnc_handle: a handle that is passed on to @fnc.
+ *
+ * Returns true if the given dv_timings struct is supported by the
+ * hardware capabilities and the callback function (if non-NULL), returns
+ * false otherwise.
+ */
bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
const struct v4l2_dv_timings_cap *cap,
v4l2_check_dv_timings_fnc fnc,
void *fnc_handle);
-/** v4l2_enum_dv_timings_cap() - Helper function to enumerate possible DV timings based on capabilities
- * @t: the v4l2_enum_dv_timings struct.
- * @cap: the v4l2_dv_timings_cap capabilities.
- * @fnc: callback to check if this timing is OK. May be NULL.
- * @fnc_handle: a handle that is passed on to @fnc.
- *
- * This enumerates dv_timings using the full list of possible CEA-861 and DMT
- * timings, filtering out any timings that are not supported based on the
- * hardware capabilities and the callback function (if non-NULL).
- *
- * If a valid timing for the given index is found, it will fill in @t and
- * return 0, otherwise it returns -EINVAL.
- */
+/**
+ * v4l2_enum_dv_timings_cap() - Helper function to enumerate possible DV
+ * timings based on capabilities
+ *
+ * @t: the v4l2_enum_dv_timings struct.
+ * @cap: the v4l2_dv_timings_cap capabilities.
+ * @fnc: callback to check if this timing is OK. May be NULL.
+ * @fnc_handle: a handle that is passed on to @fnc.
+ *
+ * This enumerates dv_timings using the full list of possible CEA-861 and DMT
+ * timings, filtering out any timings that are not supported based on the
+ * hardware capabilities and the callback function (if non-NULL).
+ *
+ * If a valid timing for the given index is found, it will fill in @t and
+ * return 0, otherwise it returns -EINVAL.
+ */
int v4l2_enum_dv_timings_cap(struct v4l2_enum_dv_timings *t,
const struct v4l2_dv_timings_cap *cap,
v4l2_check_dv_timings_fnc fnc,
void *fnc_handle);
-/** v4l2_find_dv_timings_cap() - Find the closest timings struct
- * @t: the v4l2_enum_dv_timings struct.
- * @cap: the v4l2_dv_timings_cap capabilities.
- * @pclock_delta: maximum delta between t->pixelclock and the timing struct
- * under consideration.
- * @fnc: callback to check if a given timings struct is OK. May be NULL.
- * @fnc_handle: a handle that is passed on to @fnc.
- *
- * This function tries to map the given timings to an entry in the
- * full list of possible CEA-861 and DMT timings, filtering out any timings
- * that are not supported based on the hardware capabilities and the callback
- * function (if non-NULL).
- *
- * On success it will fill in @t with the found timings and it returns true.
- * On failure it will return false.
- */
+/**
+ * v4l2_find_dv_timings_cap() - Find the closest timings struct
+ *
+ * @t: the v4l2_enum_dv_timings struct.
+ * @cap: the v4l2_dv_timings_cap capabilities.
+ * @pclock_delta: maximum delta between t->pixelclock and the timing struct
+ * under consideration.
+ * @fnc: callback to check if a given timings struct is OK. May be NULL.
+ * @fnc_handle: a handle that is passed on to @fnc.
+ *
+ * This function tries to map the given timings to an entry in the
+ * full list of possible CEA-861 and DMT timings, filtering out any timings
+ * that are not supported based on the hardware capabilities and the callback
+ * function (if non-NULL).
+ *
+ * On success it will fill in @t with the found timings and it returns true.
+ * On failure it will return false.
+ */
bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t,
const struct v4l2_dv_timings_cap *cap,
unsigned pclock_delta,
v4l2_check_dv_timings_fnc fnc,
void *fnc_handle);
-/** v4l2_match_dv_timings() - do two timings match?
- * @measured: the measured timings data.
- * @standard: the timings according to the standard.
- * @pclock_delta: maximum delta in Hz between standard->pixelclock and
- * the measured timings.
- *
- * Returns true if the two timings match, returns false otherwise.
- */
+/**
+ * v4l2_match_dv_timings() - do two timings match?
+ *
+ * @measured: the measured timings data.
+ * @standard: the timings according to the standard.
+ * @pclock_delta: maximum delta in Hz between standard->pixelclock and
+ * the measured timings.
+ *
+ * Returns true if the two timings match, returns false otherwise.
+ */
bool v4l2_match_dv_timings(const struct v4l2_dv_timings *measured,
const struct v4l2_dv_timings *standard,
unsigned pclock_delta);
-/** v4l2_print_dv_timings() - log the contents of a dv_timings struct
- * @dev_prefix:device prefix for each log line.
- * @prefix: additional prefix for each log line, may be NULL.
- * @t: the timings data.
- * @detailed: if true, give a detailed log.
- */
+/**
+ * v4l2_print_dv_timings() - log the contents of a dv_timings struct
+ * @dev_prefix:device prefix for each log line.
+ * @prefix: additional prefix for each log line, may be NULL.
+ * @t: the timings data.
+ * @detailed: if true, give a detailed log.
+ */
void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
const struct v4l2_dv_timings *t, bool detailed);
-/** v4l2_detect_cvt - detect if the given timings follow the CVT standard
+/**
+ * v4l2_detect_cvt - detect if the given timings follow the CVT standard
+ *
* @frame_height - the total height of the frame (including blanking) in lines.
* @hfreq - the horizontal frequency in Hz.
* @vsync - the height of the vertical sync in lines.
+ * @active_width - active width of image (does not include blanking). This
+ * information is needed only in case of version 2 of reduced blanking.
+ * In other cases, this parameter does not have any effect on timings.
* @polarities - the horizontal and vertical polarities (same as struct
* v4l2_bt_timings polarities).
* @interlaced - if this flag is true, it indicates interlaced format
@@ -125,9 +143,12 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
* in with the found CVT timings.
*/
bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
- u32 polarities, bool interlaced, struct v4l2_dv_timings *fmt);
+ unsigned active_width, u32 polarities, bool interlaced,
+ struct v4l2_dv_timings *fmt);
-/** v4l2_detect_gtf - detect if the given timings follow the GTF standard
+/**
+ * v4l2_detect_gtf - detect if the given timings follow the GTF standard
+ *
* @frame_height - the total height of the frame (including blanking) in lines.
* @hfreq - the horizontal frequency in Hz.
* @vsync - the height of the vertical sync in lines.
@@ -149,8 +170,10 @@ bool v4l2_detect_gtf(unsigned frame_height, unsigned hfreq, unsigned vsync,
u32 polarities, bool interlaced, struct v4l2_fract aspect,
struct v4l2_dv_timings *fmt);
-/** v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes
+/**
+ * v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes
* 0x15 and 0x16 from the EDID.
+ *
* @hor_landscape - byte 0x15 from the EDID.
* @vert_portrait - byte 0x16 from the EDID.
*
diff --git a/include/media/v4l2-event.h b/include/media/v4l2-event.h
index 1ab9045e52e3..9792f906423b 100644
--- a/include/media/v4l2-event.h
+++ b/include/media/v4l2-event.h
@@ -68,10 +68,11 @@ struct v4l2_subdev;
struct v4l2_subscribed_event;
struct video_device;
-/** struct v4l2_kevent - Internal kernel event struct.
- * @list: List node for the v4l2_fh->available list.
- * @sev: Pointer to parent v4l2_subscribed_event.
- * @event: The event itself.
+/**
+ * struct v4l2_kevent - Internal kernel event struct.
+ * @list: List node for the v4l2_fh->available list.
+ * @sev: Pointer to parent v4l2_subscribed_event.
+ * @event: The event itself.
*/
struct v4l2_kevent {
struct list_head list;
@@ -80,11 +81,12 @@ struct v4l2_kevent {
};
/** struct v4l2_subscribed_event_ops - Subscribed event operations.
- * @add: Optional callback, called when a new listener is added
- * @del: Optional callback, called when a listener stops listening
- * @replace: Optional callback that can replace event 'old' with event 'new'.
- * @merge: Optional callback that can merge event 'old' into event 'new'.
- */
+ *
+ * @add: Optional callback, called when a new listener is added
+ * @del: Optional callback, called when a listener stops listening
+ * @replace: Optional callback that can replace event 'old' with event 'new'.
+ * @merge: Optional callback that can merge event 'old' into event 'new'.
+ */
struct v4l2_subscribed_event_ops {
int (*add)(struct v4l2_subscribed_event *sev, unsigned elems);
void (*del)(struct v4l2_subscribed_event *sev);
@@ -92,19 +94,20 @@ struct v4l2_subscribed_event_ops {
void (*merge)(const struct v4l2_event *old, struct v4l2_event *new);
};
-/** struct v4l2_subscribed_event - Internal struct representing a subscribed event.
- * @list: List node for the v4l2_fh->subscribed list.
- * @type: Event type.
- * @id: Associated object ID (e.g. control ID). 0 if there isn't any.
- * @flags: Copy of v4l2_event_subscription->flags.
- * @fh: Filehandle that subscribed to this event.
- * @node: List node that hooks into the object's event list (if there is one).
- * @ops: v4l2_subscribed_event_ops
- * @elems: The number of elements in the events array.
- * @first: The index of the events containing the oldest available event.
- * @in_use: The number of queued events.
- * @events: An array of @elems events.
- */
+/**
+ * struct v4l2_subscribed_event - Internal struct representing a subscribed event.
+ * @list: List node for the v4l2_fh->subscribed list.
+ * @type: Event type.
+ * @id: Associated object ID (e.g. control ID). 0 if there isn't any.
+ * @flags: Copy of v4l2_event_subscription->flags.
+ * @fh: Filehandle that subscribed to this event.
+ * @node: List node that hooks into the object's event list (if there is one).
+ * @ops: v4l2_subscribed_event_ops
+ * @elems: The number of elements in the events array.
+ * @first: The index of the events containing the oldest available event.
+ * @in_use: The number of queued events.
+ * @events: An array of @elems events.
+ */
struct v4l2_subscribed_event {
struct list_head list;
u32 type;
diff --git a/include/media/v4l2-flash-led-class.h b/include/media/v4l2-flash-led-class.h
index 098236c083b8..3d184ab52274 100644
--- a/include/media/v4l2-flash-led-class.h
+++ b/include/media/v4l2-flash-led-class.h
@@ -48,13 +48,13 @@ struct v4l2_flash_ops {
/**
* struct v4l2_flash_config - V4L2 Flash sub-device initialization data
* @dev_name: the name of the media entity,
- unique in the system
+ * unique in the system
* @torch_intensity: constraints for the LED in torch mode
* @indicator_intensity: constraints for the indicator LED
* @flash_faults: bitmask of flash faults that the LED flash class
- device can report; corresponding LED_FAULT* bit
- definitions are available in the header file
- <linux/led-class-flash.h>
+ * device can report; corresponding LED_FAULT* bit
+ * definitions are available in the header file
+ * <linux/led-class-flash.h>
* @has_external_strobe: external strobe capability
*/
struct v4l2_flash_config {
@@ -105,7 +105,7 @@ static inline struct v4l2_flash *v4l2_ctrl_to_v4l2_flash(struct v4l2_ctrl *c)
* @fled_cdev: LED flash class device to wrap
* @iled_cdev: LED flash class device representing indicator LED associated
* with fled_cdev, may be NULL
- * @flash_ops: V4L2 Flash device ops
+ * @ops: V4L2 Flash device ops
* @config: initialization data for V4L2 Flash sub-device
*
* Create V4L2 Flash sub-device wrapping given LED subsystem device.
@@ -123,7 +123,7 @@ struct v4l2_flash *v4l2_flash_init(
/**
* v4l2_flash_release - release V4L2 Flash sub-device
- * @flash: the V4L2 Flash sub-device to release
+ * @v4l2_flash: the V4L2 Flash sub-device to release
*
* Release V4L2 Flash sub-device.
*/
diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h
index 73069e4c2796..34cc99e093ef 100644
--- a/include/media/v4l2-mediabus.h
+++ b/include/media/v4l2-mediabus.h
@@ -65,7 +65,7 @@
V4L2_MBUS_CSI2_CHANNEL_2 | V4L2_MBUS_CSI2_CHANNEL_3)
/**
- * v4l2_mbus_type - media bus type
+ * enum v4l2_mbus_type - media bus type
* @V4L2_MBUS_PARALLEL: parallel interface with hsync and vsync
* @V4L2_MBUS_BT656: parallel interface with embedded synchronisation, can
* also be used for BT.1120
@@ -78,7 +78,7 @@ enum v4l2_mbus_type {
};
/**
- * v4l2_mbus_config - media bus configuration
+ * struct v4l2_mbus_config - media bus configuration
* @type: in: interface type
* @flags: in / out: configuration flags, depending on @type
*/
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 3bbd96da25c9..8849aaba6aa5 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -40,6 +40,10 @@
* v4l2_m2m_job_finish() (as if the transaction ended normally).
* This function does not have to (and will usually not) wait
* until the device enters a state when it can be stopped.
+ * @lock: optional. Define a driver's own lock callback, instead of using
+ * m2m_ctx->q_lock.
+ * @unlock: optional. Define a driver's own unlock callback, instead of
+ * using m2m_ctx->q_lock.
*/
struct v4l2_m2m_ops {
void (*device_run)(void *priv);
@@ -161,6 +165,8 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb);
/**
* v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for
* use
+ *
+ * @m2m_ctx: pointer to struct v4l2_m2m_ctx
*/
static inline
unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
@@ -171,6 +177,8 @@ unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
/**
* v4l2_m2m_num_src_bufs_ready() - return the number of destination buffers
* ready for use
+ *
+ * @m2m_ctx: pointer to struct v4l2_m2m_ctx
*/
static inline
unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
@@ -183,6 +191,8 @@ void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx);
/**
* v4l2_m2m_next_src_buf() - return next source buffer from the list of ready
* buffers
+ *
+ * @m2m_ctx: pointer to struct v4l2_m2m_ctx
*/
static inline void *v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
{
@@ -192,6 +202,8 @@ static inline void *v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
/**
* v4l2_m2m_next_dst_buf() - return next destination buffer from the list of
* ready buffers
+ *
+ * @m2m_ctx: pointer to struct v4l2_m2m_ctx
*/
static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
{
@@ -200,6 +212,8 @@ static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
/**
* v4l2_m2m_get_src_vq() - return vb2_queue for source buffers
+ *
+ * @m2m_ctx: pointer to struct v4l2_m2m_ctx
*/
static inline
struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
@@ -209,6 +223,8 @@ struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
/**
* v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers
+ *
+ * @m2m_ctx: pointer to struct v4l2_m2m_ctx
*/
static inline
struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx)
@@ -221,6 +237,8 @@ void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx);
/**
* v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready
* buffers and return it
+ *
+ * @m2m_ctx: pointer to struct v4l2_m2m_ctx
*/
static inline void *v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
{
@@ -230,6 +248,8 @@ static inline void *v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
/**
* v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of
* ready buffers and return it
+ *
+ * @m2m_ctx: pointer to struct v4l2_m2m_ctx
*/
static inline void *v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
{
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 4e18318eb425..b273cf9ac047 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -44,6 +44,7 @@
struct v4l2_device;
struct v4l2_ctrl_handler;
+struct v4l2_event;
struct v4l2_event_subscription;
struct v4l2_fh;
struct v4l2_subdev;
@@ -117,34 +118,67 @@ struct v4l2_subdev_io_pin_config {
u8 strength; /* Pin drive strength */
};
-/*
- s_io_pin_config: configure one or more chip I/O pins for chips that
- multiplex different internal signal pads out to IO pins. This function
- takes a pointer to an array of 'n' pin configuration entries, one for
- each pin being configured. This function could be called at times
- other than just subdevice initialization.
-
- init: initialize the sensor registers to some sort of reasonable default
- values. Do not use for new drivers and should be removed in existing
- drivers.
-
- load_fw: load firmware.
-
- reset: generic reset command. The argument selects which subsystems to
- reset. Passing 0 will always reset the whole chip. Do not use for new
- drivers without discussing this first on the linux-media mailinglist.
- There should be no reason normally to reset a device.
-
- s_gpio: set GPIO pins. Very simple right now, might need to be extended with
- a direction argument if needed.
-
- s_power: puts subdevice in power saving mode (on == 0) or normal operation
- mode (on == 1).
-
- interrupt_service_routine: Called by the bridge chip's interrupt service
- handler, when an interrupt status has be raised due to this subdev,
- so that this subdev can handle the details. It may schedule work to be
- performed later. It must not sleep. *Called from an IRQ context*.
+/**
+ * struct v4l2_subdev_core_ops - Define core ops callbacks for subdevs
+ *
+ * @log_status: callback for VIDIOC_LOG_STATUS ioctl handler code.
+ *
+ * @s_io_pin_config: configure one or more chip I/O pins for chips that
+ * multiplex different internal signal pads out to IO pins. This function
+ * takes a pointer to an array of 'n' pin configuration entries, one for
+ * each pin being configured. This function could be called at times
+ * other than just subdevice initialization.
+ *
+ * @init: initialize the sensor registers to some sort of reasonable default
+ * values. Do not use for new drivers and should be removed in existing
+ * drivers.
+ *
+ * @load_fw: load firmware.
+ *
+ * @reset: generic reset command. The argument selects which subsystems to
+ * reset. Passing 0 will always reset the whole chip. Do not use for new
+ * drivers without discussing this first on the linux-media mailinglist.
+ * There should be no reason normally to reset a device.
+ *
+ * @s_gpio: set GPIO pins. Very simple right now, might need to be extended with
+ * a direction argument if needed.
+ *
+ * @queryctrl: callback for VIDIOC_QUERYCTL ioctl handler code.
+ *
+ * @g_ctrl: callback for VIDIOC_G_CTRL ioctl handler code.
+ *
+ * @s_ctrl: callback for VIDIOC_S_CTRL ioctl handler code.
+ *
+ * @g_ext_ctrls: callback for VIDIOC_G_EXT_CTRLS ioctl handler code.
+ *
+ * @s_ext_ctrls: callback for VIDIOC_S_EXT_CTRLS ioctl handler code.
+ *
+ * @try_ext_ctrls: callback for VIDIOC_TRY_EXT_CTRLS ioctl handler code.
+ *
+ * @querymenu: callback for VIDIOC_QUERYMENU ioctl handler code.
+ *
+ * @ioctl: called at the end of ioctl() syscall handler at the V4L2 core.
+ * used to provide support for private ioctls used on the driver.
+ *
+ * @compat_ioctl32: called when a 32 bits application uses a 64 bits Kernel,
+ * in order to fix data passed from/to userspace.
+ *
+ * @g_register: callback for VIDIOC_G_REGISTER ioctl handler code.
+ *
+ * @s_register: callback for VIDIOC_G_REGISTER ioctl handler code.
+ *
+ * @s_power: puts subdevice in power saving mode (on == 0) or normal operation
+ * mode (on == 1).
+ *
+ * @interrupt_service_routine: Called by the bridge chip's interrupt service
+ * handler, when an interrupt status has be raised due to this subdev,
+ * so that this subdev can handle the details. It may schedule work to be
+ * performed later. It must not sleep. *Called from an IRQ context*.
+ *
+ * @subscribe_event: used by the drivers to request the control framework that
+ * for it to be warned when the value of a control changes.
+ *
+ * @unsubscribe_event: remove event subscription from the control framework.
*/
struct v4l2_subdev_core_ops {
int (*log_status)(struct v4l2_subdev *sd);
@@ -179,18 +213,32 @@ struct v4l2_subdev_core_ops {
struct v4l2_event_subscription *sub);
};
-/* s_radio: v4l device was opened in radio mode.
-
- g_frequency: freq->type must be filled in. Normally done by video_ioctl2
- or the bridge driver.
-
- g_tuner:
- s_tuner: vt->type must be filled in. Normally done by video_ioctl2 or the
- bridge driver.
-
- s_type_addr: sets tuner type and its I2C addr.
-
- s_config: sets tda9887 specific stuff, like port1, port2 and qss
+/**
+ * struct s_radio - Callbacks used when v4l device was opened in radio mode.
+ *
+ * @s_radio: callback for VIDIOC_S_RADIO ioctl handler code.
+ *
+ * @s_frequency: callback for VIDIOC_S_FREQUENCY ioctl handler code.
+ *
+ * @g_frequency: callback for VIDIOC_G_FREQUENCY ioctl handler code.
+ * freq->type must be filled in. Normally done by video_ioctl2
+ * or the bridge driver.
+ *
+ * @enum_freq_bands: callback for VIDIOC_ENUM_FREQ_BANDS ioctl handler code.
+ *
+ * @g_tuner: callback for VIDIOC_G_TUNER ioctl handler code.
+ *
+ * @s_tuner: callback for VIDIOC_S_TUNER ioctl handler code. vt->type must be
+ * filled in. Normally done by video_ioctl2 or the
+ * bridge driver.
+ *
+ * @g_modulator: callback for VIDIOC_G_MODULATOR ioctl handler code.
+ *
+ * @s_modulator: callback for VIDIOC_S_MODULATOR ioctl handler code.
+ *
+ * @s_type_addr: sets tuner type and its I2C addr.
+ *
+ * @s_config: sets tda9887 specific stuff, like port1, port2 and qss
*/
struct v4l2_subdev_tuner_ops {
int (*s_radio)(struct v4l2_subdev *sd);
@@ -205,25 +253,31 @@ struct v4l2_subdev_tuner_ops {
int (*s_config)(struct v4l2_subdev *sd, const struct v4l2_priv_tun_config *config);
};
-/* s_clock_freq: set the frequency (in Hz) of the audio clock output.
- Used to slave an audio processor to the video decoder, ensuring that
- audio and video remain synchronized. Usual values for the frequency
- are 48000, 44100 or 32000 Hz. If the frequency is not supported, then
- -EINVAL is returned.
-
- s_i2s_clock_freq: sets I2S speed in bps. This is used to provide a standard
- way to select I2S clock used by driving digital audio streams at some
- board designs. Usual values for the frequency are 1024000 and 2048000.
- If the frequency is not supported, then -EINVAL is returned.
-
- s_routing: used to define the input and/or output pins of an audio chip,
- and any additional configuration data.
- Never attempt to use user-level input IDs (e.g. Composite, S-Video,
- Tuner) at this level. An i2c device shouldn't know about whether an
- input pin is connected to a Composite connector, become on another
- board or platform it might be connected to something else entirely.
- The calling driver is responsible for mapping a user-level input to
- the right pins on the i2c device.
+/**
+ * struct v4l2_subdev_audio_ops - Callbacks used for audio-related settings
+ *
+ * @s_clock_freq: set the frequency (in Hz) of the audio clock output.
+ * Used to slave an audio processor to the video decoder, ensuring that
+ * audio and video remain synchronized. Usual values for the frequency
+ * are 48000, 44100 or 32000 Hz. If the frequency is not supported, then
+ * -EINVAL is returned.
+ *
+ * @s_i2s_clock_freq: sets I2S speed in bps. This is used to provide a standard
+ * way to select I2S clock used by driving digital audio streams at some
+ * board designs. Usual values for the frequency are 1024000 and 2048000.
+ * If the frequency is not supported, then -EINVAL is returned.
+ *
+ * @s_routing: used to define the input and/or output pins of an audio chip,
+ * and any additional configuration data.
+ * Never attempt to use user-level input IDs (e.g. Composite, S-Video,
+ * Tuner) at this level. An i2c device shouldn't know about whether an
+ * input pin is connected to a Composite connector, become on another
+ * board or platform it might be connected to something else entirely.
+ * The calling driver is responsible for mapping a user-level input to
+ * the right pins on the i2c device.
+ *
+ * @s_stream: used to notify the audio code that stream will start or has
+ * stopped.
*/
struct v4l2_subdev_audio_ops {
int (*s_clock_freq)(struct v4l2_subdev *sd, u32 freq);
@@ -242,6 +296,7 @@ struct v4l2_subdev_audio_ops {
/**
* struct v4l2_mbus_frame_desc_entry - media bus frame description structure
+ *
* @flags: V4L2_MBUS_FRAME_DESC_FL_* flags
* @pixelcode: media bus pixel code, valid if FRAME_DESC_FL_BLOB is not set
* @length: number of octets per frame, valid if V4L2_MBUS_FRAME_DESC_FL_BLOB
@@ -265,45 +320,73 @@ struct v4l2_mbus_frame_desc {
unsigned short num_entries;
};
-/*
- s_std_output: set v4l2_std_id for video OUTPUT devices. This is ignored by
- video input devices.
-
- g_std_output: get current standard for video OUTPUT devices. This is ignored
- by video input devices.
-
- g_tvnorms: get v4l2_std_id with all standards supported by the video
- CAPTURE device. This is ignored by video output devices.
-
- g_tvnorms_output: get v4l2_std_id with all standards supported by the video
- OUTPUT device. This is ignored by video capture devices.
-
- s_crystal_freq: sets the frequency of the crystal used to generate the
- clocks in Hz. An extra flags field allows device specific configuration
- regarding clock frequency dividers, etc. If not used, then set flags
- to 0. If the frequency is not supported, then -EINVAL is returned.
-
- g_input_status: get input status. Same as the status field in the v4l2_input
- struct.
-
- s_routing: see s_routing in audio_ops, except this version is for video
- devices.
-
- s_dv_timings(): Set custom dv timings in the sub device. This is used
- when sub device is capable of setting detailed timing information
- in the hardware to generate/detect the video signal.
-
- g_dv_timings(): Get custom dv timings in the sub device.
-
- g_mbus_config: get supported mediabus configurations
-
- s_mbus_config: set a certain mediabus configuration. This operation is added
- for compatibility with soc-camera drivers and should not be used by new
- software.
-
- s_rx_buffer: set a host allocated memory buffer for the subdev. The subdev
- can adjust @size to a lower value and must not write more data to the
- buffer starting at @data than the original value of @size.
+/**
+ * struct v4l2_subdev_video_ops - Callbacks used when v4l device was opened
+ * in video mode.
+ *
+ * @s_routing: see s_routing in audio_ops, except this version is for video
+ * devices.
+ *
+ * @s_crystal_freq: sets the frequency of the crystal used to generate the
+ * clocks in Hz. An extra flags field allows device specific configuration
+ * regarding clock frequency dividers, etc. If not used, then set flags
+ * to 0. If the frequency is not supported, then -EINVAL is returned.
+ *
+ * @g_std: callback for VIDIOC_G_STD ioctl handler code.
+ *
+ * @s_std: callback for VIDIOC_S_STD ioctl handler code.
+ *
+ * @s_std_output: set v4l2_std_id for video OUTPUT devices. This is ignored by
+ * video input devices.
+ *
+ * @g_std_output: get current standard for video OUTPUT devices. This is ignored
+ * by video input devices.
+ *
+ * @querystd: callback for VIDIOC_QUERYSTD ioctl handler code.
+ *
+ * @g_tvnorms: get v4l2_std_id with all standards supported by the video
+ * CAPTURE device. This is ignored by video output devices.
+ *
+ * @g_tvnorms_output: get v4l2_std_id with all standards supported by the video
+ * OUTPUT device. This is ignored by video capture devices.
+ *
+ * @g_input_status: get input status. Same as the status field in the v4l2_input
+ * struct.
+ *
+ * @s_stream: used to notify the driver that a video stream will start or has
+ * stopped.
+ *
+ * @cropcap: callback for VIDIOC_CROPCAP ioctl handler code.
+ *
+ * @g_crop: callback for VIDIOC_G_CROP ioctl handler code.
+ *
+ * @s_crop: callback for VIDIOC_S_CROP ioctl handler code.
+ *
+ * @g_parm: callback for VIDIOC_G_PARM ioctl handler code.
+ *
+ * @s_parm: callback for VIDIOC_S_PARM ioctl handler code.
+ *
+ * @g_frame_interval: callback for VIDIOC_G_FRAMEINTERVAL ioctl handler code.
+ *
+ * @s_frame_interval: callback for VIDIOC_S_FRAMEINTERVAL ioctl handler code.
+ *
+ * @s_dv_timings: Set custom dv timings in the sub device. This is used
+ * when sub device is capable of setting detailed timing information
+ * in the hardware to generate/detect the video signal.
+ *
+ * @g_dv_timings: Get custom dv timings in the sub device.
+ *
+ * @query_dv_timings: callback for VIDIOC_QUERY_DV_TIMINGS ioctl handler code.
+ *
+ * @g_mbus_config: get supported mediabus configurations
+ *
+ * @s_mbus_config: set a certain mediabus configuration. This operation is added
+ * for compatibility with soc-camera drivers and should not be used by new
+ * software.
+ *
+ * @s_rx_buffer: set a host allocated memory buffer for the subdev. The subdev
+ * can adjust @size to a lower value and must not write more data to the
+ * buffer starting at @data than the original value of @size.
*/
struct v4l2_subdev_video_ops {
int (*s_routing)(struct v4l2_subdev *sd, u32 input, u32 output, u32 config);
@@ -340,34 +423,39 @@ struct v4l2_subdev_video_ops {
unsigned int *size);
};
-/*
- decode_vbi_line: video decoders that support sliced VBI need to implement
- this ioctl. Field p of the v4l2_sliced_vbi_line struct is set to the
- start of the VBI data that was generated by the decoder. The driver
- then parses the sliced VBI data and sets the other fields in the
- struct accordingly. The pointer p is updated to point to the start of
- the payload which can be copied verbatim into the data field of the
- v4l2_sliced_vbi_data struct. If no valid VBI data was found, then the
- type field is set to 0 on return.
-
- s_vbi_data: used to generate VBI signals on a video signal.
- v4l2_sliced_vbi_data is filled with the data packets that should be
- output. Note that if you set the line field to 0, then that VBI signal
- is disabled. If no valid VBI data was found, then the type field is
- set to 0 on return.
-
- g_vbi_data: used to obtain the sliced VBI packet from a readback register.
- Not all video decoders support this. If no data is available because
- the readback register contains invalid or erroneous data -EIO is
- returned. Note that you must fill in the 'id' member and the 'field'
- member (to determine whether CC data from the first or second field
- should be obtained).
-
- s_raw_fmt: setup the video encoder/decoder for raw VBI.
-
- g_sliced_fmt: retrieve the current sliced VBI settings.
-
- s_sliced_fmt: setup the sliced VBI settings.
+/**
+ * struct v4l2_subdev_vbi_ops - Callbacks used when v4l device was opened
+ * in video mode via the vbi device node.
+ *
+ * @decode_vbi_line: video decoders that support sliced VBI need to implement
+ * this ioctl. Field p of the v4l2_sliced_vbi_line struct is set to the
+ * start of the VBI data that was generated by the decoder. The driver
+ * then parses the sliced VBI data and sets the other fields in the
+ * struct accordingly. The pointer p is updated to point to the start of
+ * the payload which can be copied verbatim into the data field of the
+ * v4l2_sliced_vbi_data struct. If no valid VBI data was found, then the
+ * type field is set to 0 on return.
+ *
+ * @s_vbi_data: used to generate VBI signals on a video signal.
+ * v4l2_sliced_vbi_data is filled with the data packets that should be
+ * output. Note that if you set the line field to 0, then that VBI signal
+ * is disabled. If no valid VBI data was found, then the type field is
+ * set to 0 on return.
+ *
+ * @g_vbi_data: used to obtain the sliced VBI packet from a readback register.
+ * Not all video decoders support this. If no data is available because
+ * the readback register contains invalid or erroneous data -EIO is
+ * returned. Note that you must fill in the 'id' member and the 'field'
+ * member (to determine whether CC data from the first or second field
+ * should be obtained).
+ *
+ * @g_sliced_vbi_cap: callback for VIDIOC_SLICED_VBI_CAP ioctl handler code.
+ *
+ * @s_raw_fmt: setup the video encoder/decoder for raw VBI.
+ *
+ * @g_sliced_fmt: retrieve the current sliced VBI settings.
+ *
+ * @s_sliced_fmt: setup the sliced VBI settings.
*/
struct v4l2_subdev_vbi_ops {
int (*decode_vbi_line)(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi_line);
@@ -480,8 +568,39 @@ struct v4l2_subdev_pad_config {
/**
* struct v4l2_subdev_pad_ops - v4l2-subdev pad level operations
+ *
+ * @enum_mbus_code: callback for VIDIOC_SUBDEV_ENUM_MBUS_CODE ioctl handler
+ * code.
+ * @enum_frame_size: callback for VIDIOC_SUBDEV_ENUM_FRAME_SIZE ioctl handler
+ * code.
+ *
+ * @enum_frame_interval: callback for VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL ioctl
+ * handler code.
+ *
+ * @get_fmt: callback for VIDIOC_SUBDEV_G_FMT ioctl handler code.
+ *
+ * @set_fmt: callback for VIDIOC_SUBDEV_S_FMT ioctl handler code.
+ *
+ * @get_selection: callback for VIDIOC_SUBDEV_G_SELECTION ioctl handler code.
+ *
+ * @set_selection: callback for VIDIOC_SUBDEV_S_SELECTION ioctl handler code.
+ *
+ * @get_edid: callback for VIDIOC_SUBDEV_G_EDID ioctl handler code.
+ *
+ * @set_edid: callback for VIDIOC_SUBDEV_S_EDID ioctl handler code.
+ *
+ * @dv_timings_cap: callback for VIDIOC_SUBDEV_DV_TIMINGS_CAP ioctl handler
+ * code.
+ *
+ * @enum_dv_timings: callback for VIDIOC_SUBDEV_ENUM_DV_TIMINGS ioctl handler
+ * code.
+ *
+ * @link_validate: used by the media controller code to check if the links
+ * that belongs to a pipeline can be used for stream.
+ *
* @get_frame_desc: get the current low level media bus frame parameters.
- * @get_frame_desc: set the low level media bus frame parameters, @fd array
+ *
+ * @set_frame_desc: set the low level media bus frame parameters, @fd array
* may be adjusted by the subdev driver to device capabilities.
*/
struct v4l2_subdev_pad_ops {
@@ -695,4 +814,7 @@ void v4l2_subdev_init(struct v4l2_subdev *sd,
#define v4l2_subdev_has_op(sd, o, f) \
((sd)->ops->o && (sd)->ops->o->f)
+void v4l2_subdev_notify_event(struct v4l2_subdev *sd,
+ const struct v4l2_event *ev);
+
#endif
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index c192e1b46cdc..589b56c68400 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -364,7 +364,9 @@ struct v4l2_fh;
* start_streaming() can be called. Used when a DMA engine
* cannot be started unless at least this number of buffers
* have been queued into the driver.
- *
+ */
+/*
+ * Private elements (won't appear at the DocBook):
* @mmap_lock: private mutex used when buffers are allocated/freed/mmapped
* @memory: current memory type used
* @bufs: videobuf buffer structures
@@ -407,7 +409,7 @@ struct vb2_queue {
gfp_t gfp_flags;
u32 min_buffers_needed;
-/* private: internal use only */
+ /* private: internal use only */
struct mutex mmap_lock;
enum v4l2_memory memory;
struct vb2_buffer *bufs[VIDEO_MAX_FRAME];
@@ -484,7 +486,8 @@ size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
loff_t *ppos, int nonblock);
size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
loff_t *ppos, int nonblock);
-/**
+
+/*
* vb2_thread_fnc - callback function for use with vb2_thread
*
* This is called whenever a buffer is dequeued in the thread.
@@ -577,7 +580,6 @@ static inline void vb2_set_plane_payload(struct vb2_buffer *vb,
* vb2_get_plane_payload() - get bytesused for the plane plane_no
* @vb: buffer for which plane payload should be set
* @plane_no: plane number for which payload should be set
- * @size: payload in bytes
*/
static inline unsigned long vb2_get_plane_payload(struct vb2_buffer *vb,
unsigned int plane_no)
diff --git a/include/media/videobuf2-memops.h b/include/media/videobuf2-memops.h
index f05444ca8c0c..6513c7ec3116 100644
--- a/include/media/videobuf2-memops.h
+++ b/include/media/videobuf2-memops.h
@@ -15,9 +15,11 @@
#define _MEDIA_VIDEOBUF2_MEMOPS_H
#include <media/videobuf2-core.h>
+#include <linux/mm.h>
/**
- * vb2_vmarea_handler - common vma refcount tracking handler
+ * struct vb2_vmarea_handler - common vma refcount tracking handler
+ *
* @refcount: pointer to refcount entry in the buffer
* @put: callback to function that decreases buffer refcount
* @arg: argument for @put callback
@@ -30,11 +32,9 @@ struct vb2_vmarea_handler {
extern const struct vm_operations_struct vb2_common_vm_ops;
-int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
- struct vm_area_struct **res_vma, dma_addr_t *res_pa);
-
-struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma);
-void vb2_put_vma(struct vm_area_struct *vma);
-
+struct frame_vector *vb2_create_framevec(unsigned long start,
+ unsigned long length,
+ bool write);
+void vb2_destroy_framevec(struct frame_vector *vec);
#endif
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
index 7a6c1d6cc173..f2ffe5bd720d 100644
--- a/include/misc/cxl.h
+++ b/include/misc/cxl.h
@@ -200,4 +200,14 @@ unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll);
ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
loff_t *off);
+/*
+ * For EEH, a driver may want to assert a PERST will reload the same image
+ * from flash into the FPGA.
+ *
+ * This is a property of the entire adapter, not a single AFU, so drivers
+ * should set this property with care!
+ */
+void cxl_perst_reloads_same_image(struct cxl_afu *afu,
+ bool perst_reloads_same_image);
+
#endif /* _MISC_CXL_H */
diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h
index dc03d77ad23b..a2f59ec98d24 100644
--- a/include/net/6lowpan.h
+++ b/include/net/6lowpan.h
@@ -197,6 +197,27 @@
#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */
#define LOWPAN_NHC_UDP_CS_C 0x04 /* checksum elided */
+#define LOWPAN_PRIV_SIZE(llpriv_size) \
+ (sizeof(struct lowpan_priv) + llpriv_size)
+
+enum lowpan_lltypes {
+ LOWPAN_LLTYPE_BTLE,
+ LOWPAN_LLTYPE_IEEE802154,
+};
+
+struct lowpan_priv {
+ enum lowpan_lltypes lltype;
+
+ /* must be last */
+ u8 priv[0] __aligned(sizeof(void *));
+};
+
+static inline
+struct lowpan_priv *lowpan_priv(const struct net_device *dev)
+{
+ return netdev_priv(dev);
+}
+
#ifdef DEBUG
/* print data in line */
static inline void raw_dump_inline(const char *caller, char *msg,
@@ -372,6 +393,8 @@ lowpan_uncompress_size(const struct sk_buff *skb, u16 *dgram_offset)
return skb->len + uncomp_header - ret;
}
+void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype);
+
int
lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
const u8 *saddr, const u8 saddr_type,
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 931738bc5bba..9d446f136607 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -21,6 +21,8 @@ struct tcf_common {
struct gnet_stats_rate_est64 tcfc_rate_est;
spinlock_t tcfc_lock;
struct rcu_head tcfc_rcu;
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+ struct gnet_stats_queue __percpu *cpu_qstats;
};
#define tcf_head common.tcfc_head
#define tcf_index common.tcfc_index
@@ -68,6 +70,17 @@ static inline void tcf_hashinfo_destroy(struct tcf_hashinfo *hf)
kfree(hf->htab);
}
+/* Update lastuse only if needed, to avoid dirtying a cache line.
+ * We use a temp variable to avoid fetching jiffies twice.
+ */
+static inline void tcf_lastuse_update(struct tcf_t *tm)
+{
+ unsigned long now = jiffies;
+
+ if (tm->lastuse != now)
+ tm->lastuse = now;
+}
+
#ifdef CONFIG_NET_CLS_ACT
#define ACT_P_CREATED 1
@@ -98,11 +111,10 @@ struct tc_action_ops {
};
int tcf_hash_search(struct tc_action *a, u32 index);
-void tcf_hash_destroy(struct tc_action *a);
u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo);
int tcf_hash_check(u32 index, struct tc_action *a, int bind);
int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
- int size, int bind);
+ int size, int bind, bool cpustats);
void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
void tcf_hash_insert(struct tc_action *a);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index def59d3a34d5..b5474b1fcd83 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -91,6 +91,37 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2);
void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr);
void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr);
+static inline int addrconf_ifid_eui48(u8 *eui, struct net_device *dev)
+{
+ if (dev->addr_len != ETH_ALEN)
+ return -1;
+ memcpy(eui, dev->dev_addr, 3);
+ memcpy(eui + 5, dev->dev_addr + 3, 3);
+
+ /*
+ * The zSeries OSA network cards can be shared among various
+ * OS instances, but the OSA cards have only one MAC address.
+ * This leads to duplicate address conflicts in conjunction
+ * with IPv6 if more than one instance uses the same card.
+ *
+ * The driver for these cards can deliver a unique 16-bit
+ * identifier for each instance sharing the same card. It is
+ * placed instead of 0xFFFE in the interface identifier. The
+ * "u" bit of the interface identifier is not inverted in this
+ * case. Hence the resulting interface identifier has local
+ * scope according to RFC2373.
+ */
+ if (dev->dev_id) {
+ eui[3] = (dev->dev_id >> 8) & 0xFF;
+ eui[4] = dev->dev_id & 0xFF;
+ } else {
+ eui[3] = 0xFF;
+ eui[4] = 0xFE;
+ eui[0] ^= 2;
+ }
+ return 0;
+}
+
static inline unsigned long addrconf_timeout_fixup(u32 timeout,
unsigned int unit)
{
@@ -158,8 +189,8 @@ struct ipv6_stub {
const struct in6_addr *addr);
int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex,
const struct in6_addr *addr);
- int (*ipv6_dst_lookup)(struct sock *sk, struct dst_entry **dst,
- struct flowi6 *fl6);
+ int (*ipv6_dst_lookup)(struct net *net, struct sock *sk,
+ struct dst_entry **dst, struct flowi6 *fl6);
void (*udpv6_encap_enable)(void);
void (*ndisc_send_na)(struct net_device *dev, struct neighbour *neigh,
const struct in6_addr *daddr,
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 3bd618d3e55d..9e1a59e01fa2 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -512,9 +512,11 @@ struct hci_conn_params {
HCI_AUTO_CONN_DIRECT,
HCI_AUTO_CONN_ALWAYS,
HCI_AUTO_CONN_LINK_LOSS,
+ HCI_AUTO_CONN_EXPLICIT,
} auto_connect;
struct hci_conn *conn;
+ bool explicit_connect;
};
extern struct list_head hci_dev_list;
@@ -639,6 +641,7 @@ enum {
HCI_CONN_DROP,
HCI_CONN_PARAM_REMOVAL_PEND,
HCI_CONN_NEW_LINK_KEY,
+ HCI_CONN_SCANNING,
};
static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
@@ -808,6 +811,26 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
return NULL;
}
+static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == LE_LINK && c->state == BT_CONNECT &&
+ !test_bit(HCI_CONN_SCANNING, &c->flags)) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
int hci_disconnect(struct hci_conn *conn, __u8 reason);
bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
void hci_sco_setup(struct hci_conn *conn, __u8 status);
@@ -823,6 +846,9 @@ void hci_chan_del(struct hci_chan *chan);
void hci_chan_list_flush(struct hci_conn *conn);
struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
+struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
+ u8 dst_type, u8 sec_level,
+ u16 conn_timeout, u8 role);
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, u8 sec_level, u16 conn_timeout,
u8 role);
@@ -988,6 +1014,9 @@ void hci_conn_params_clear_disabled(struct hci_dev *hdev);
struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
bdaddr_t *addr,
u8 addr_type);
+struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
+ bdaddr_t *addr,
+ u8 addr_type);
void hci_uuids_clear(struct hci_dev *hdev);
@@ -1297,7 +1326,7 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
if (max >= to_multiplier * 8)
return -EINVAL;
- max_latency = (to_multiplier * 8 / max) - 1;
+ max_latency = (to_multiplier * 4 / max) - 1;
if (latency > 499 || latency > max_latency)
return -EINVAL;
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 2239a3753092..c98afc08cc26 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -55,6 +55,8 @@
#define L2CAP_INFO_TIMEOUT msecs_to_jiffies(4000)
#define L2CAP_MOVE_TIMEOUT msecs_to_jiffies(4000)
#define L2CAP_MOVE_ERTX_TIMEOUT msecs_to_jiffies(60000)
+#define L2CAP_WAIT_ACK_POLL_PERIOD msecs_to_jiffies(200)
+#define L2CAP_WAIT_ACK_TIMEOUT msecs_to_jiffies(10000)
#define L2CAP_A2MP_DEFAULT_MTU 670
diff --git a/include/net/bond_options.h b/include/net/bond_options.h
index c28aca25320e..1797235cd590 100644
--- a/include/net/bond_options.h
+++ b/include/net/bond_options.h
@@ -66,6 +66,7 @@ enum {
BOND_OPT_AD_ACTOR_SYS_PRIO,
BOND_OPT_AD_ACTOR_SYSTEM,
BOND_OPT_AD_USER_PORT_KEY,
+ BOND_OPT_NUM_PEER_NOTIF_ALIAS,
BOND_OPT_LAST
};
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 20defc0353d1..c1740a2794a3 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -310,6 +310,13 @@ static inline bool bond_uses_primary(struct bonding *bond)
return bond_mode_uses_primary(BOND_MODE(bond));
}
+static inline struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond)
+{
+ struct slave *slave = rcu_dereference(bond->curr_active_slave);
+
+ return bond_uses_primary(bond) && slave ? slave->dev : NULL;
+}
+
static inline bool bond_slave_is_up(struct slave *slave)
{
return netif_running(slave->dev) && netif_carrier_ok(slave->dev);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 883fe1e7c5a1..f0889a247643 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -2369,8 +2369,7 @@ struct cfg80211_qos_map {
* method returns 0.)
*
* @mgmt_frame_register: Notify driver that a management frame type was
- * registered. Note that this callback may not sleep, and cannot run
- * concurrently with itself.
+ * registered. The callback is allowed to sleep.
*
* @set_antenna: Set antenna configuration (tx_ant, rx_ant) on the device.
* Parameters are bitmaps of allowed antennas to use for TX/RX. Drivers may
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index 290a9a69af07..76b1ffaea863 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -34,6 +34,8 @@ struct cfg802154_ops {
int type);
void (*del_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
struct net_device *dev);
+ int (*suspend)(struct wpan_phy *wpan_phy);
+ int (*resume)(struct wpan_phy *wpan_phy);
int (*add_virtual_intf)(struct wpan_phy *wpan_phy,
const char *name,
unsigned char name_assign_type,
@@ -61,6 +63,8 @@ struct cfg802154_ops {
s8 max_frame_retries);
int (*set_lbt_mode)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev, bool mode);
+ int (*set_ackreq_default)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev, bool ackreq);
};
static inline bool
@@ -171,6 +175,9 @@ struct wpan_dev {
struct list_head list;
struct net_device *netdev;
+ /* lowpan interface, set when the wpan_dev belongs to one lowpan_dev */
+ struct net_device *lowpan_dev;
+
u32 identifier;
/* MAC PIB */
@@ -191,6 +198,9 @@ struct wpan_dev {
bool lbt;
bool promiscuous_mode;
+
+ /* fallback for acknowledgment bit setting */
+ bool ackreq;
};
#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev)
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 2d1d73cb773e..9fcaedf994ee 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -140,14 +140,16 @@ static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
struct sk_buff;
void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
- __be32 from, __be32 to, int pseudohdr);
+ __be32 from, __be32 to, bool pseudohdr);
void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
const __be32 *from, const __be32 *to,
- int pseudohdr);
+ bool pseudohdr);
+void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
+ __wsum diff, bool pseudohdr);
static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
__be16 from, __be16 to,
- int pseudohdr)
+ bool pseudohdr)
{
inet_proto_csum_replace4(sum, skb, (__force __be32)from,
(__force __be32)to, pseudohdr);
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index c15d39456e14..ccd6d8bffa4d 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -49,9 +49,38 @@ static inline void sock_update_classid(struct sock *sk)
if (classid != sk->sk_classid)
sk->sk_classid = classid;
}
+
+static inline u32 task_get_classid(const struct sk_buff *skb)
+{
+ u32 classid = task_cls_state(current)->classid;
+
+ /* Due to the nature of the classifier it is required to ignore all
+ * packets originating from softirq context as accessing `current'
+ * would lead to false results.
+ *
+ * This test assumes that all callers of dev_queue_xmit() explicitly
+ * disable bh. Knowing this, it is possible to detect softirq based
+ * calls by looking at the number of nested bh disable calls because
+ * softirqs always disables bh.
+ */
+ if (in_serving_softirq()) {
+ /* If there is an sk_classid we'll use that. */
+ if (!skb->sk)
+ return 0;
+
+ classid = skb->sk->sk_classid;
+ }
+
+ return classid;
+}
#else /* !CONFIG_CGROUP_NET_CLASSID */
static inline void sock_update_classid(struct sock *sk)
{
}
+
+static inline u32 task_get_classid(const struct sk_buff *skb)
+{
+ return 0;
+}
#endif /* CONFIG_CGROUP_NET_CLASSID */
#endif /* _NET_CLS_CGROUP_H */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index fbca63ba8f73..b34d812bc5d0 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -171,6 +171,11 @@ static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port);
}
+static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
+{
+ return !!((ds->dsa_port_mask) & (1 << p));
+}
+
static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
{
return ds->phys_port_mask & (1 << p) && ds->ports[p];
@@ -296,12 +301,28 @@ struct dsa_switch_driver {
u32 br_port_mask);
int (*port_stp_update)(struct dsa_switch *ds, int port,
u8 state);
- int (*fdb_add)(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid);
- int (*fdb_del)(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid);
- int (*fdb_getnext)(struct dsa_switch *ds, int port,
- unsigned char *addr, bool *is_static);
+
+ /*
+ * VLAN support
+ */
+ int (*port_pvid_get)(struct dsa_switch *ds, int port, u16 *pvid);
+ int (*port_pvid_set)(struct dsa_switch *ds, int port, u16 pvid);
+ int (*port_vlan_add)(struct dsa_switch *ds, int port, u16 vid,
+ bool untagged);
+ int (*port_vlan_del)(struct dsa_switch *ds, int port, u16 vid);
+ int (*vlan_getnext)(struct dsa_switch *ds, u16 *vid,
+ unsigned long *ports, unsigned long *untagged);
+
+ /*
+ * Forwarding database
+ */
+ int (*port_fdb_add)(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+ int (*port_fdb_del)(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+ int (*port_fdb_getnext)(struct dsa_switch *ds, int port,
+ unsigned char *addr, u16 *vid,
+ bool *is_static);
};
void register_switch_driver(struct dsa_switch_driver *type);
diff --git a/include/net/dst.h b/include/net/dst.h
index 2bc73f8a00a9..9261d928303d 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -57,6 +57,7 @@ struct dst_entry {
#define DST_FAKE_RTABLE 0x0040
#define DST_XFRM_TUNNEL 0x0080
#define DST_XFRM_QUEUE 0x0100
+#define DST_METADATA 0x0200
unsigned short pending_confirm;
@@ -83,12 +84,13 @@ struct dst_entry {
__u32 __pad2;
#endif
+#ifdef CONFIG_64BIT
+ struct lwtunnel_state *lwtstate;
/*
* Align __refcnt to a 64 bytes alignment
* (L1_CACHE_SIZE would be too much)
*/
-#ifdef CONFIG_64BIT
- long __pad_to_align_refcnt[2];
+ long __pad_to_align_refcnt[1];
#endif
/*
* __refcnt wants to be on a different cache line from
@@ -97,6 +99,9 @@ struct dst_entry {
atomic_t __refcnt; /* client references */
int __use;
unsigned long lastuse;
+#ifndef CONFIG_64BIT
+ struct lwtunnel_state *lwtstate;
+#endif
union {
struct dst_entry *next;
struct rtable __rcu *rt_next;
@@ -202,6 +207,12 @@ static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
p[metric-1] = val;
}
+/* Kernel-internal feature bits that are unallocated in user space. */
+#define DST_FEATURE_ECN_CA (1 << 31)
+
+#define DST_FEATURE_MASK (DST_FEATURE_ECN_CA)
+#define DST_FEATURE_ECN_MASK (DST_FEATURE_ECN_CA | RTAX_FEATURE_ECN)
+
static inline u32
dst_feature(const struct dst_entry *dst, u32 feature)
{
@@ -284,13 +295,18 @@ static inline void skb_dst_drop(struct sk_buff *skb)
}
}
-static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
+static inline void __skb_dst_copy(struct sk_buff *nskb, unsigned long refdst)
{
- nskb->_skb_refdst = oskb->_skb_refdst;
+ nskb->_skb_refdst = refdst;
if (!(nskb->_skb_refdst & SKB_DST_NOREF))
dst_clone(skb_dst(nskb));
}
+static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
+{
+ __skb_dst_copy(nskb, oskb->_skb_refdst);
+}
+
/**
* skb_dst_force - makes sure skb dst is refcounted
* @skb: buffer
@@ -356,6 +372,9 @@ static inline int dst_discard(struct sk_buff *skb)
}
void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
int initial_obsolete, unsigned short flags);
+void dst_init(struct dst_entry *dst, struct dst_ops *ops,
+ struct net_device *dev, int initial_ref, int initial_obsolete,
+ unsigned short flags);
void __dst_free(struct dst_entry *dst);
struct dst_entry *dst_destroy(struct dst_entry *dst);
@@ -457,7 +476,7 @@ static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
return dst;
}
-void dst_init(void);
+void dst_subsys_init(void);
/* Flags for xfrm_lookup flags argument. */
enum {
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
new file mode 100644
index 000000000000..af9d5382f6cb
--- /dev/null
+++ b/include/net/dst_metadata.h
@@ -0,0 +1,108 @@
+#ifndef __NET_DST_METADATA_H
+#define __NET_DST_METADATA_H 1
+
+#include <linux/skbuff.h>
+#include <net/ip_tunnels.h>
+#include <net/dst.h>
+
+struct metadata_dst {
+ struct dst_entry dst;
+ union {
+ struct ip_tunnel_info tun_info;
+ } u;
+};
+
+static inline struct metadata_dst *skb_metadata_dst(struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = (struct metadata_dst *) skb_dst(skb);
+
+ if (md_dst && md_dst->dst.flags & DST_METADATA)
+ return md_dst;
+
+ return NULL;
+}
+
+static inline struct ip_tunnel_info *skb_tunnel_info(struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ struct dst_entry *dst;
+
+ if (md_dst)
+ return &md_dst->u.tun_info;
+
+ dst = skb_dst(skb);
+ if (dst && dst->lwtstate)
+ return lwt_tun_info(dst->lwtstate);
+
+ return NULL;
+}
+
+static inline bool skb_valid_dst(const struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+
+ return dst && !(dst->flags & DST_METADATA);
+}
+
+struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags);
+struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags);
+
+static inline struct metadata_dst *tun_rx_dst(int md_size)
+{
+ struct metadata_dst *tun_dst;
+
+ tun_dst = metadata_dst_alloc(md_size, GFP_ATOMIC);
+ if (!tun_dst)
+ return NULL;
+
+ tun_dst->u.tun_info.options_len = 0;
+ tun_dst->u.tun_info.mode = 0;
+ return tun_dst;
+}
+
+static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
+ __be16 flags,
+ __be64 tunnel_id,
+ int md_size)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ struct metadata_dst *tun_dst;
+
+ tun_dst = tun_rx_dst(md_size);
+ if (!tun_dst)
+ return NULL;
+
+ ip_tunnel_key_init(&tun_dst->u.tun_info.key,
+ iph->saddr, iph->daddr, iph->tos, iph->ttl,
+ 0, 0, tunnel_id, flags);
+ return tun_dst;
+}
+
+static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
+ __be16 flags,
+ __be64 tunnel_id,
+ int md_size)
+{
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ struct metadata_dst *tun_dst;
+ struct ip_tunnel_info *info;
+
+ tun_dst = tun_rx_dst(md_size);
+ if (!tun_dst)
+ return NULL;
+
+ info = &tun_dst->u.tun_info;
+ info->mode = IP_TUNNEL_INFO_IPV6;
+ info->key.tun_flags = flags;
+ info->key.tun_id = tunnel_id;
+ info->key.tp_src = 0;
+ info->key.tp_dst = 0;
+
+ info->key.u.ipv6.src = ip6h->saddr;
+ info->key.u.ipv6.dst = ip6h->daddr;
+ info->key.tos = ipv6_get_dsfield(ip6h);
+ info->key.ttl = ip6h->hop_limit;
+ return tun_dst;
+}
+
+#endif /* __NET_DST_METADATA_H */
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 903a55efbffe..59160de702b6 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -19,6 +19,7 @@ struct fib_rule {
u8 action;
/* 3 bytes hole, try to use */
u32 target;
+ __be64 tun_id;
struct fib_rule __rcu *ctarget;
struct net *fr_net;
@@ -65,7 +66,6 @@ struct fib_rules_ops {
struct nlattr **);
int (*fill)(struct fib_rule *, struct sk_buff *,
struct fib_rule_hdr *);
- u32 (*default_pref)(struct fib_rules_ops *ops);
size_t (*nlmsg_payload)(struct fib_rule *);
/* Called after modifications to the rules set, must flush
@@ -117,5 +117,4 @@ int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags,
struct fib_lookup_arg *);
int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table,
u32 flags);
-u32 fib_default_rule_pref(struct fib_rules_ops *ops);
#endif
diff --git a/include/net/flow.h b/include/net/flow.h
index 8109a159d1b3..acd6a096250e 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -10,6 +10,7 @@
#include <linux/socket.h>
#include <linux/in6.h>
#include <linux/atomic.h>
+#include <net/flow_dissector.h>
/*
* ifindex generation is per-net namespace, and loopback is
@@ -19,6 +20,10 @@
#define LOOPBACK_IFINDEX 1
+struct flowi_tunnel {
+ __be64 tun_id;
+};
+
struct flowi_common {
int flowic_oif;
int flowic_iif;
@@ -29,7 +34,9 @@ struct flowi_common {
__u8 flowic_flags;
#define FLOWI_FLAG_ANYSRC 0x01
#define FLOWI_FLAG_KNOWN_NH 0x02
+#define FLOWI_FLAG_VRFSRC 0x04
__u32 flowic_secid;
+ struct flowi_tunnel flowic_tun_key;
};
union flowi_uli {
@@ -66,6 +73,7 @@ struct flowi4 {
#define flowi4_proto __fl_common.flowic_proto
#define flowi4_flags __fl_common.flowic_flags
#define flowi4_secid __fl_common.flowic_secid
+#define flowi4_tun_key __fl_common.flowic_tun_key
/* (saddr,daddr) must be grouped, same order as in IP header */
__be32 saddr;
@@ -95,6 +103,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
fl4->flowi4_proto = proto;
fl4->flowi4_flags = flags;
fl4->flowi4_secid = 0;
+ fl4->flowi4_tun_key.tun_id = 0;
fl4->daddr = daddr;
fl4->saddr = saddr;
fl4->fl4_dport = dport;
@@ -122,6 +131,7 @@ struct flowi6 {
#define flowi6_proto __fl_common.flowic_proto
#define flowi6_flags __fl_common.flowic_flags
#define flowi6_secid __fl_common.flowic_secid
+#define flowi6_tun_key __fl_common.flowic_tun_key
struct in6_addr daddr;
struct in6_addr saddr;
__be32 flowlabel;
@@ -165,6 +175,7 @@ struct flowi {
#define flowi_proto u.__fl_common.flowic_proto
#define flowi_flags u.__fl_common.flowic_flags
#define flowi_secid u.__fl_common.flowic_secid
+#define flowi_tun_key u.__fl_common.flowic_tun_key
} __attribute__((__aligned__(BITS_PER_LONG/8)));
static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4)
@@ -233,4 +244,22 @@ void flow_cache_flush(struct net *net);
void flow_cache_flush_deferred(struct net *net);
extern atomic_t flow_cache_genid;
+__u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys);
+
+static inline __u32 get_hash_from_flowi6(const struct flowi6 *fl6)
+{
+ struct flow_keys keys;
+
+ return __get_hash_from_flowi6(fl6, &keys);
+}
+
+__u32 __get_hash_from_flowi4(const struct flowi4 *fl4, struct flow_keys *keys);
+
+static inline __u32 get_hash_from_flowi4(const struct flowi4 *fl4)
+{
+ struct flow_keys keys;
+
+ return __get_hash_from_flowi4(fl4, &keys);
+}
+
#endif
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index 1a8c22419936..8c8548cf5888 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -2,7 +2,6 @@
#define _NET_FLOW_DISSECTOR_H
#include <linux/types.h>
-#include <linux/skbuff.h>
#include <linux/in6.h>
#include <uapi/linux/if_ether.h>
@@ -13,8 +12,13 @@
struct flow_dissector_key_control {
u16 thoff;
u16 addr_type;
+ u32 flags;
};
+#define FLOW_DIS_IS_FRAGMENT BIT(0)
+#define FLOW_DIS_FIRST_FRAG BIT(1)
+#define FLOW_DIS_ENCAPSULATION BIT(2)
+
/**
* struct flow_dissector_key_basic:
* @thoff: Transport header offset
@@ -123,6 +127,11 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_MAX,
};
+#define FLOW_DISSECTOR_F_PARSE_1ST_FRAG BIT(0)
+#define FLOW_DISSECTOR_F_STOP_AT_L3 BIT(1)
+#define FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL BIT(2)
+#define FLOW_DISSECTOR_F_STOP_AT_ENCAP BIT(3)
+
struct flow_dissector_key {
enum flow_dissector_key_id key_id;
size_t offset; /* offset of struct flow_dissector_key_*
@@ -134,23 +143,6 @@ struct flow_dissector {
unsigned short int offset[FLOW_DISSECTOR_KEY_MAX];
};
-void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
- const struct flow_dissector_key *key,
- unsigned int key_count);
-
-bool __skb_flow_dissect(const struct sk_buff *skb,
- struct flow_dissector *flow_dissector,
- void *target_container,
- void *data, __be16 proto, int nhoff, int hlen);
-
-static inline bool skb_flow_dissect(const struct sk_buff *skb,
- struct flow_dissector *flow_dissector,
- void *target_container)
-{
- return __skb_flow_dissect(skb, flow_dissector, target_container,
- NULL, 0, 0, 0);
-}
-
struct flow_keys {
struct flow_dissector_key_control control;
#define FLOW_KEYS_HASH_START_FIELD basic
@@ -170,38 +162,6 @@ __be32 flow_get_u32_dst(const struct flow_keys *flow);
extern struct flow_dissector flow_keys_dissector;
extern struct flow_dissector flow_keys_buf_dissector;
-static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
- struct flow_keys *flow)
-{
- memset(flow, 0, sizeof(*flow));
- return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
- NULL, 0, 0, 0);
-}
-
-static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
- void *data, __be16 proto,
- int nhoff, int hlen)
-{
- memset(flow, 0, sizeof(*flow));
- return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
- data, proto, nhoff, hlen);
-}
-
-__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
- void *data, int hlen_proto);
-
-static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
- int thoff, u8 ip_proto)
-{
- return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
-}
-
-u32 flow_hash_from_keys(struct flow_keys *keys);
-void __skb_get_hash(struct sk_buff *skb);
-u32 skb_get_poff(const struct sk_buff *skb);
-u32 __skb_get_poff(const struct sk_buff *skb, void *data,
- const struct flow_keys *keys, int hlen);
-
/* struct flow_keys_digest:
*
* This structure is used to hold a digest of the full flow keys. This is a
@@ -217,4 +177,11 @@ struct flow_keys_digest {
void make_flow_keys_digest(struct flow_keys_digest *digest,
const struct flow_keys *flow);
+static inline bool flow_keys_have_l4(struct flow_keys *keys)
+{
+ return (keys->ports.ports || keys->tags.flow_label);
+}
+
+u32 flow_hash_from_keys(struct flow_keys *keys);
+
#endif
diff --git a/include/net/geneve.h b/include/net/geneve.h
index 2a0543a1899d..3106ed6eae0d 100644
--- a/include/net/geneve.h
+++ b/include/net/geneve.h
@@ -62,40 +62,9 @@ struct genevehdr {
struct geneve_opt options[];
};
-static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
-{
- return (struct genevehdr *)(udp_hdr(skb) + 1);
-}
-
#ifdef CONFIG_INET
-struct geneve_sock;
-
-typedef void (geneve_rcv_t)(struct geneve_sock *gs, struct sk_buff *skb);
-
-struct geneve_sock {
- struct list_head list;
- geneve_rcv_t *rcv;
- void *rcv_data;
- struct socket *sock;
- struct rcu_head rcu;
- int refcnt;
- struct udp_offload udp_offloads;
-};
-
-#define GENEVE_VER 0
-#define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr))
-
-struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
- geneve_rcv_t *rcv, void *data,
- bool no_share, bool ipv6);
-
-void geneve_sock_release(struct geneve_sock *vs);
-
-int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
- struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
- __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
- __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
- bool csum, bool xnet);
+struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
+ u8 name_assign_type, u16 dst_port);
#endif /*ifdef CONFIG_INET */
#endif /*ifdef__NET_GENEVE_H */
diff --git a/include/net/gre.h b/include/net/gre.h
index b53182018743..97eafdc47eea 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -4,6 +4,12 @@
#include <linux/skbuff.h>
#include <net/ip_tunnels.h>
+struct gre_base_hdr {
+ __be16 flags;
+ __be16 protocol;
+};
+#define GRE_HEADER_SECTION 4
+
#define GREPROTO_CISCO 0
#define GREPROTO_PPTP 1
#define GREPROTO_MAX 2
@@ -14,91 +20,9 @@ struct gre_protocol {
void (*err_handler)(struct sk_buff *skb, u32 info);
};
-struct gre_base_hdr {
- __be16 flags;
- __be16 protocol;
-};
-#define GRE_HEADER_SECTION 4
-
int gre_add_protocol(const struct gre_protocol *proto, u8 version);
int gre_del_protocol(const struct gre_protocol *proto, u8 version);
-struct gre_cisco_protocol {
- int (*handler)(struct sk_buff *skb, const struct tnl_ptk_info *tpi);
- int (*err_handler)(struct sk_buff *skb, u32 info,
- const struct tnl_ptk_info *tpi);
- u8 priority;
-};
-
-int gre_cisco_register(struct gre_cisco_protocol *proto);
-int gre_cisco_unregister(struct gre_cisco_protocol *proto);
-
-void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
- int hdr_len);
-
-static inline struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
- bool csum)
-{
- return iptunnel_handle_offloads(skb, csum,
- csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
-}
-
-
-static inline int ip_gre_calc_hlen(__be16 o_flags)
-{
- int addend = 4;
-
- if (o_flags&TUNNEL_CSUM)
- addend += 4;
- if (o_flags&TUNNEL_KEY)
- addend += 4;
- if (o_flags&TUNNEL_SEQ)
- addend += 4;
- return addend;
-}
-
-static inline __be16 gre_flags_to_tnl_flags(__be16 flags)
-{
- __be16 tflags = 0;
-
- if (flags & GRE_CSUM)
- tflags |= TUNNEL_CSUM;
- if (flags & GRE_ROUTING)
- tflags |= TUNNEL_ROUTING;
- if (flags & GRE_KEY)
- tflags |= TUNNEL_KEY;
- if (flags & GRE_SEQ)
- tflags |= TUNNEL_SEQ;
- if (flags & GRE_STRICT)
- tflags |= TUNNEL_STRICT;
- if (flags & GRE_REC)
- tflags |= TUNNEL_REC;
- if (flags & GRE_VERSION)
- tflags |= TUNNEL_VERSION;
-
- return tflags;
-}
-
-static inline __be16 tnl_flags_to_gre_flags(__be16 tflags)
-{
- __be16 flags = 0;
-
- if (tflags & TUNNEL_CSUM)
- flags |= GRE_CSUM;
- if (tflags & TUNNEL_ROUTING)
- flags |= GRE_ROUTING;
- if (tflags & TUNNEL_KEY)
- flags |= GRE_KEY;
- if (tflags & TUNNEL_SEQ)
- flags |= GRE_SEQ;
- if (tflags & TUNNEL_STRICT)
- flags |= GRE_STRICT;
- if (tflags & TUNNEL_REC)
- flags |= GRE_REC;
- if (tflags & TUNNEL_VERSION)
- flags |= GRE_VERSION;
-
- return flags;
-}
-
+struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
+ u8 name_assign_type);
#endif
diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
index 0f712c0bc0bf..cf6c74550baa 100644
--- a/include/net/gro_cells.h
+++ b/include/net/gro_cells.h
@@ -32,37 +32,28 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
return;
}
- /* We run in BH context */
- spin_lock(&cell->napi_skbs.lock);
-
__skb_queue_tail(&cell->napi_skbs, skb);
if (skb_queue_len(&cell->napi_skbs) == 1)
napi_schedule(&cell->napi);
-
- spin_unlock(&cell->napi_skbs.lock);
}
-/* called unser BH context */
+/* called under BH context */
static inline int gro_cell_poll(struct napi_struct *napi, int budget)
{
struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
struct sk_buff *skb;
int work_done = 0;
- spin_lock(&cell->napi_skbs.lock);
while (work_done < budget) {
skb = __skb_dequeue(&cell->napi_skbs);
if (!skb)
break;
- spin_unlock(&cell->napi_skbs.lock);
napi_gro_receive(napi, skb);
work_done++;
- spin_lock(&cell->napi_skbs.lock);
}
if (work_done < budget)
- napi_complete(napi);
- spin_unlock(&cell->napi_skbs.lock);
+ napi_complete_done(napi, work_done);
return work_done;
}
@@ -77,7 +68,7 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de
for_each_possible_cpu(i) {
struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
- skb_queue_head_init(&cell->napi_skbs);
+ __skb_queue_head_init(&cell->napi_skbs);
netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
napi_enable(&cell->napi);
}
@@ -92,8 +83,9 @@ static inline void gro_cells_destroy(struct gro_cells *gcells)
return;
for_each_possible_cpu(i) {
struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
+
netif_napi_del(&cell->napi);
- skb_queue_purge(&cell->napi_skbs);
+ __skb_queue_purge(&cell->napi_skbs);
}
free_percpu(gcells->cells);
gcells->cells = NULL;
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index b73c88a19dd4..b07d126694a7 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -205,8 +205,8 @@ void inet_put_port(struct sock *sk);
void inet_hashinfo_init(struct inet_hashinfo *h);
-int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw);
-int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw);
+void __inet_hash_nolisten(struct sock *sk, struct sock *osk);
+void __inet_hash(struct sock *sk, struct sock *osk);
void inet_hash(struct sock *sk);
void inet_unhash(struct sock *sk);
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 360c4802288d..879d6e5a973b 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -100,10 +100,8 @@ static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
void inet_twsk_free(struct inet_timewait_sock *tw);
void inet_twsk_put(struct inet_timewait_sock *tw);
-int inet_twsk_unhash(struct inet_timewait_sock *tw);
-
-int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
- struct inet_hashinfo *hashinfo);
+void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
+ struct inet_hashinfo *hashinfo);
struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
struct inet_timewait_death_row *dr,
@@ -113,7 +111,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
struct inet_hashinfo *hashinfo);
void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo);
-void inet_twsk_deschedule(struct inet_timewait_sock *tw);
+void inet_twsk_deschedule_put(struct inet_timewait_sock *tw);
void inet_twsk_purge(struct inet_hashinfo *hashinfo,
struct inet_timewait_death_row *twdr, int family);
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index d5332ddcea3f..4a6009d4486b 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -15,16 +15,20 @@
#include <net/ipv6.h>
#include <linux/atomic.h>
-struct inetpeer_addr_base {
- union {
- __be32 a4;
- __be32 a6[4];
- struct in6_addr in6;
- };
+/* IPv4 address key for cache lookups */
+struct ipv4_addr_key {
+ __be32 addr;
+ int vif;
};
+#define INETPEER_MAXKEYSZ (sizeof(struct in6_addr) / sizeof(u32))
+
struct inetpeer_addr {
- struct inetpeer_addr_base addr;
+ union {
+ struct ipv4_addr_key a4;
+ struct in6_addr a6;
+ u32 key[INETPEER_MAXKEYSZ];
+ };
__u16 family;
};
@@ -65,69 +69,33 @@ struct inet_peer_base {
int total;
};
-#define INETPEER_BASE_BIT 0x1UL
-
-static inline struct inet_peer *inetpeer_ptr(unsigned long val)
-{
- BUG_ON(val & INETPEER_BASE_BIT);
- return (struct inet_peer *) val;
-}
+void inet_peer_base_init(struct inet_peer_base *);
-static inline struct inet_peer_base *inetpeer_base_ptr(unsigned long val)
-{
- if (!(val & INETPEER_BASE_BIT))
- return NULL;
- val &= ~INETPEER_BASE_BIT;
- return (struct inet_peer_base *) val;
-}
+void inet_initpeers(void) __init;
-static inline bool inetpeer_ptr_is_peer(unsigned long val)
-{
- return !(val & INETPEER_BASE_BIT);
-}
+#define INETPEER_METRICS_NEW (~(u32) 0)
-static inline void __inetpeer_ptr_set_peer(unsigned long *val, struct inet_peer *peer)
+static inline void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip)
{
- /* This implicitly clears INETPEER_BASE_BIT */
- *val = (unsigned long) peer;
+ iaddr->a4.addr = ip;
+ iaddr->family = AF_INET;
}
-static inline bool inetpeer_ptr_set_peer(unsigned long *ptr, struct inet_peer *peer)
+static inline __be32 inetpeer_get_addr_v4(struct inetpeer_addr *iaddr)
{
- unsigned long val = (unsigned long) peer;
- unsigned long orig = *ptr;
-
- if (!(orig & INETPEER_BASE_BIT) ||
- cmpxchg(ptr, orig, val) != orig)
- return false;
- return true;
+ return iaddr->a4.addr;
}
-static inline void inetpeer_init_ptr(unsigned long *ptr, struct inet_peer_base *base)
+static inline void inetpeer_set_addr_v6(struct inetpeer_addr *iaddr,
+ struct in6_addr *in6)
{
- *ptr = (unsigned long) base | INETPEER_BASE_BIT;
+ iaddr->a6 = *in6;
+ iaddr->family = AF_INET6;
}
-static inline void inetpeer_transfer_peer(unsigned long *to, unsigned long *from)
+static inline struct in6_addr *inetpeer_get_addr_v6(struct inetpeer_addr *iaddr)
{
- unsigned long val = *from;
-
- *to = val;
- if (inetpeer_ptr_is_peer(val)) {
- struct inet_peer *peer = inetpeer_ptr(val);
- atomic_inc(&peer->refcnt);
- }
-}
-
-void inet_peer_base_init(struct inet_peer_base *);
-
-void inet_initpeers(void) __init;
-
-#define INETPEER_METRICS_NEW (~(u32) 0)
-
-static inline bool inet_metrics_new(const struct inet_peer *p)
-{
- return p->metrics[RTAX_LOCK-1] == INETPEER_METRICS_NEW;
+ return &iaddr->a6;
}
/* can be called with or without local BH being disabled */
@@ -137,11 +105,12 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base,
__be32 v4daddr,
- int create)
+ int vif, int create)
{
struct inetpeer_addr daddr;
- daddr.addr.a4 = v4daddr;
+ daddr.a4.addr = v4daddr;
+ daddr.a4.vif = vif;
daddr.family = AF_INET;
return inet_getpeer(base, &daddr, create);
}
@@ -152,23 +121,36 @@ static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
{
struct inetpeer_addr daddr;
- daddr.addr.in6 = *v6daddr;
+ daddr.a6 = *v6daddr;
daddr.family = AF_INET6;
return inet_getpeer(base, &daddr, create);
}
+static inline int inetpeer_addr_cmp(const struct inetpeer_addr *a,
+ const struct inetpeer_addr *b)
+{
+ int i, n;
+
+ if (a->family == AF_INET)
+ n = sizeof(a->a4) / sizeof(u32);
+ else
+ n = sizeof(a->a6) / sizeof(u32);
+
+ for (i = 0; i < n; i++) {
+ if (a->key[i] == b->key[i])
+ continue;
+ if (a->key[i] < b->key[i])
+ return -1;
+ return 1;
+ }
+
+ return 0;
+}
+
/* can be called from BH context or outside */
void inet_putpeer(struct inet_peer *p);
bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
void inetpeer_invalidate_tree(struct inet_peer_base *);
-/*
- * temporary check to make sure we dont access rid, tcp_ts,
- * tcp_ts_stamp if no refcount is taken on inet_peer
- */
-static inline void inet_peer_refcheck(const struct inet_peer *p)
-{
- WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0);
-}
#endif /* _NET_INETPEER_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index d5fe9f2ab699..9b9ca2839399 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -202,10 +202,20 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
#define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
#define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
+u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
unsigned long snmp_fold_field(void __percpu *mib, int offt);
#if BITS_PER_LONG==32
+u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
+ size_t syncp_offset);
u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
#else
+static inline u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
+ size_t syncp_offset)
+{
+ return snmp_get_cpu_field(mib, cpu, offct);
+
+}
+
static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
{
return snmp_fold_field(mib, offt);
@@ -370,22 +380,6 @@ static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
}
-static inline void inet_set_txhash(struct sock *sk)
-{
- struct inet_sock *inet = inet_sk(sk);
- struct flow_keys keys;
-
- memset(&keys, 0, sizeof(keys));
-
- keys.addrs.v4addrs.src = inet->inet_saddr;
- keys.addrs.v4addrs.dst = inet->inet_daddr;
- keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
- keys.ports.src = inet->inet_sport;
- keys.ports.dst = inet->inet_dport;
-
- sk->sk_txhash = flow_hash_from_keys(&keys);
-}
-
static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
{
const struct iphdr *iph = skb_gro_network_header(skb);
@@ -474,6 +468,11 @@ static __inline__ void inet_reset_saddr(struct sock *sk)
#endif
+static inline unsigned int ipv4_addr_hash(__be32 ip)
+{
+ return (__force unsigned int) ip;
+}
+
bool ip_call_ra_chain(struct sk_buff *skb);
/*
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 3b76849c190f..063d30474cf6 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -51,6 +51,8 @@ struct fib6_config {
struct nlattr *fc_mp;
struct nl_info fc_nlinfo;
+ struct nlattr *fc_encap;
+ u16 fc_encap_type;
};
struct fib6_node {
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 5fa643b4e891..a37d0432bebd 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -44,7 +44,9 @@ struct fib_config {
u32 fc_flow;
u32 fc_nlflags;
struct nl_info fc_nlinfo;
- };
+ struct nlattr *fc_encap;
+ u16 fc_encap_type;
+};
struct fib_info;
struct rtable;
@@ -89,6 +91,7 @@ struct fib_nh {
struct rtable __rcu * __percpu *nh_pcpu_rth_output;
struct rtable __rcu *nh_rth_input;
struct fnhe_hash_bucket __rcu *nh_exceptions;
+ struct lwtunnel_state *nh_lwtstate;
};
/*
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index d8214cb88bbc..9a6a3ba888e8 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -4,14 +4,15 @@
#include <linux/if_tunnel.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
+#include <linux/socket.h>
#include <linux/types.h>
#include <linux/u64_stats_sync.h>
#include <net/dsfield.h>
#include <net/gro_cells.h>
#include <net/inet_ecn.h>
-#include <net/ip.h>
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
+#include <net/lwtunnel.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
@@ -22,6 +23,44 @@
/* Keep error state on tunnel for 30 sec */
#define IPTUNNEL_ERR_TIMEO (30*HZ)
+/* Used to memset ip_tunnel padding. */
+#define IP_TUNNEL_KEY_SIZE offsetofend(struct ip_tunnel_key, tp_dst)
+
+/* Used to memset ipv4 address padding. */
+#define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst)
+#define IP_TUNNEL_KEY_IPV4_PAD_LEN \
+ (FIELD_SIZEOF(struct ip_tunnel_key, u) - \
+ FIELD_SIZEOF(struct ip_tunnel_key, u.ipv4))
+
+struct ip_tunnel_key {
+ __be64 tun_id;
+ union {
+ struct {
+ __be32 src;
+ __be32 dst;
+ } ipv4;
+ struct {
+ struct in6_addr src;
+ struct in6_addr dst;
+ } ipv6;
+ } u;
+ __be16 tun_flags;
+ u8 tos; /* TOS for IPv4, TC for IPv6 */
+ u8 ttl; /* TTL for IPv4, HL for IPv6 */
+ __be16 tp_src;
+ __be16 tp_dst;
+};
+
+/* Flags for ip_tunnel_info mode. */
+#define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */
+#define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */
+
+struct ip_tunnel_info {
+ struct ip_tunnel_key key;
+ u8 options_len;
+ u8 mode;
+};
+
/* 6rd prefix/relay information */
#ifdef CONFIG_IPV6_SIT_6RD
struct ip_tunnel_6rd_parm {
@@ -33,8 +72,8 @@ struct ip_tunnel_6rd_parm {
#endif
struct ip_tunnel_encap {
- __u16 type;
- __u16 flags;
+ u16 type;
+ u16 flags;
__be16 sport;
__be16 dport;
};
@@ -51,6 +90,8 @@ struct ip_tunnel_dst {
__be32 saddr;
};
+struct metadata_dst;
+
struct ip_tunnel {
struct ip_tunnel __rcu *next;
struct hlist_node hash_node;
@@ -62,8 +103,8 @@ struct ip_tunnel {
* arrived */
/* These four fields used only by GRE */
- __u32 i_seqno; /* The last seen seqno */
- __u32 o_seqno; /* The last output seqno */
+ u32 i_seqno; /* The last seen seqno */
+ u32 o_seqno; /* The last output seqno */
int tun_hlen; /* Precalculated header length */
int mlink;
@@ -84,6 +125,7 @@ struct ip_tunnel {
unsigned int prl_count; /* # of entries in PRL */
int ip_tnl_net_id;
struct gro_cells gro_cells;
+ bool collect_md;
};
#define TUNNEL_CSUM __cpu_to_be16(0x01)
@@ -118,6 +160,7 @@ struct tnl_ptk_info {
struct ip_tunnel_net {
struct net_device *fb_tunnel_dev;
struct hlist_head tunnels[IP_TNL_HASH_SIZE];
+ struct ip_tunnel __rcu *collect_md_tun;
};
struct ip_tunnel_encap_ops {
@@ -136,6 +179,40 @@ int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op,
int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
unsigned int num);
+static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
+ __be32 saddr, __be32 daddr,
+ u8 tos, u8 ttl,
+ __be16 tp_src, __be16 tp_dst,
+ __be64 tun_id, __be16 tun_flags)
+{
+ key->tun_id = tun_id;
+ key->u.ipv4.src = saddr;
+ key->u.ipv4.dst = daddr;
+ memset((unsigned char *)key + IP_TUNNEL_KEY_IPV4_PAD,
+ 0, IP_TUNNEL_KEY_IPV4_PAD_LEN);
+ key->tos = tos;
+ key->ttl = ttl;
+ key->tun_flags = tun_flags;
+
+ /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
+ * the upper tunnel are used.
+ * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
+ */
+ key->tp_src = tp_src;
+ key->tp_dst = tp_dst;
+
+ /* Clear struct padding. */
+ if (sizeof(*key) != IP_TUNNEL_KEY_SIZE)
+ memset((unsigned char *)key + IP_TUNNEL_KEY_SIZE,
+ 0, sizeof(*key) - IP_TUNNEL_KEY_SIZE);
+}
+
+static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info
+ *tun_info)
+{
+ return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET;
+}
+
#ifdef CONFIG_INET
int ip_tunnel_init(struct net_device *dev);
@@ -163,7 +240,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
__be32 key);
int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
- const struct tnl_ptk_info *tpi, bool log_ecn_error);
+ const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
+ bool log_ecn_error);
int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p);
int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
@@ -196,8 +274,8 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
- __be32 src, __be32 dst, __u8 proto,
- __u8 tos, __u8 ttl, __be16 df, bool xnet);
+ __be32 src, __be32 dst, u8 proto,
+ u8 tos, u8 ttl, __be16 df, bool xnet);
struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
int gso_type_mask);
@@ -221,6 +299,57 @@ static inline void iptunnel_xmit_stats(int err,
}
}
+static inline void *ip_tunnel_info_opts(struct ip_tunnel_info *info)
+{
+ return info + 1;
+}
+
+static inline void ip_tunnel_info_opts_get(void *to,
+ const struct ip_tunnel_info *info)
+{
+ memcpy(to, info + 1, info->options_len);
+}
+
+static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
+ const void *from, int len)
+{
+ memcpy(ip_tunnel_info_opts(info), from, len);
+ info->options_len = len;
+}
+
+static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
+{
+ return (struct ip_tunnel_info *)lwtstate->data;
+}
+
+extern struct static_key ip_tunnel_metadata_cnt;
+
+/* Returns > 0 if metadata should be collected */
+static inline int ip_tunnel_collect_metadata(void)
+{
+ return static_key_false(&ip_tunnel_metadata_cnt);
+}
+
+void __init ip_tunnel_core_init(void);
+
+void ip_tunnel_need_metadata(void);
+void ip_tunnel_unneed_metadata(void);
+
+#else /* CONFIG_INET */
+
+static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
+{
+ return NULL;
+}
+
+static inline void ip_tunnel_need_metadata(void)
+{
+}
+
+static inline void ip_tunnel_unneed_metadata(void)
+{
+}
+
#endif /* CONFIG_INET */
#endif /* __NET_IP_TUNNELS_H */
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 4e3731ee4eac..9b9ca87a4210 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -846,6 +846,17 @@ struct ipvs_master_sync_state {
/* How much time to keep dests in trash */
#define IP_VS_DEST_TRASH_PERIOD (120 * HZ)
+struct ipvs_sync_daemon_cfg {
+ union nf_inet_addr mcast_group;
+ int syncid;
+ u16 sync_maxlen;
+ u16 mcast_port;
+ u8 mcast_af;
+ u8 mcast_ttl;
+ /* multicast interface name */
+ char mcast_ifn[IP_VS_IFNAME_MAXLEN];
+};
+
/* IPVS in network namespace */
struct netns_ipvs {
int gen; /* Generation */
@@ -961,15 +972,10 @@ struct netns_ipvs {
spinlock_t sync_buff_lock;
struct task_struct **backup_threads;
int threads_mask;
- int send_mesg_maxlen;
- int recv_mesg_maxlen;
volatile int sync_state;
- volatile int master_syncid;
- volatile int backup_syncid;
struct mutex sync_mutex;
- /* multicast interface name */
- char master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
- char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
+ struct ipvs_sync_daemon_cfg mcfg; /* Master Configuration */
+ struct ipvs_sync_daemon_cfg bcfg; /* Backup Configuration */
/* net name space ptr */
struct net *net; /* Needed by timer routines */
/* Number of heterogeneous destinations, needed becaus heterogeneous
@@ -1408,7 +1414,8 @@ static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest)
/* IPVS sync daemon data and function prototypes
* (from ip_vs_sync.c)
*/
-int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid);
+int start_sync_thread(struct net *net, struct ipvs_sync_daemon_cfg *cfg,
+ int state);
int stop_sync_thread(struct net *net, int state);
void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 82dbdb092a5d..711cca428cc8 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -707,54 +707,69 @@ static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
}
#if IS_ENABLED(CONFIG_IPV6)
-static inline void ip6_set_txhash(struct sock *sk)
-{
- struct inet_sock *inet = inet_sk(sk);
- struct ipv6_pinfo *np = inet6_sk(sk);
- struct flow_keys keys;
- memset(&keys, 0, sizeof(keys));
+/* Sysctl settings for net ipv6.auto_flowlabels */
+#define IP6_AUTO_FLOW_LABEL_OFF 0
+#define IP6_AUTO_FLOW_LABEL_OPTOUT 1
+#define IP6_AUTO_FLOW_LABEL_OPTIN 2
+#define IP6_AUTO_FLOW_LABEL_FORCED 3
- memcpy(&keys.addrs.v6addrs.src, &np->saddr,
- sizeof(keys.addrs.v6addrs.src));
- memcpy(&keys.addrs.v6addrs.dst, &sk->sk_v6_daddr,
- sizeof(keys.addrs.v6addrs.dst));
- keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
- keys.ports.src = inet->inet_sport;
- keys.ports.dst = inet->inet_dport;
+#define IP6_AUTO_FLOW_LABEL_MAX IP6_AUTO_FLOW_LABEL_FORCED
- sk->sk_txhash = flow_hash_from_keys(&keys);
-}
+#define IP6_DEFAULT_AUTO_FLOW_LABELS IP6_AUTO_FLOW_LABEL_OPTOUT
static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
- __be32 flowlabel, bool autolabel)
+ __be32 flowlabel, bool autolabel,
+ struct flowi6 *fl6)
{
- if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) {
- u32 hash;
+ u32 hash;
+
+ if (flowlabel ||
+ net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
+ (!autolabel &&
+ net->ipv6.sysctl.auto_flowlabels != IP6_AUTO_FLOW_LABEL_FORCED))
+ return flowlabel;
- hash = skb_get_hash(skb);
+ hash = skb_get_hash_flowi6(skb, fl6);
- /* Since this is being sent on the wire obfuscate hash a bit
- * to minimize possbility that any useful information to an
- * attacker is leaked. Only lower 20 bits are relevant.
- */
- hash ^= hash >> 12;
+ /* Since this is being sent on the wire obfuscate hash a bit
+ * to minimize possbility that any useful information to an
+ * attacker is leaked. Only lower 20 bits are relevant.
+ */
+ rol32(hash, 16);
- flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
+ flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
- if (net->ipv6.sysctl.flowlabel_state_ranges)
- flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG;
- }
+ if (net->ipv6.sysctl.flowlabel_state_ranges)
+ flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG;
return flowlabel;
}
+
+static inline int ip6_default_np_autolabel(struct net *net)
+{
+ switch (net->ipv6.sysctl.auto_flowlabels) {
+ case IP6_AUTO_FLOW_LABEL_OFF:
+ case IP6_AUTO_FLOW_LABEL_OPTIN:
+ default:
+ return 0;
+ case IP6_AUTO_FLOW_LABEL_OPTOUT:
+ case IP6_AUTO_FLOW_LABEL_FORCED:
+ return 1;
+ }
+}
#else
static inline void ip6_set_txhash(struct sock *sk) { }
static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
- __be32 flowlabel, bool autolabel)
+ __be32 flowlabel, bool autolabel,
+ struct flowi6 *fl6)
{
return flowlabel;
}
+static inline int ip6_default_np_autolabel(struct net *net)
+{
+ return 0;
+}
#endif
@@ -832,7 +847,8 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
&inet6_sk(sk)->cork);
}
-int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6);
+int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
+ struct flowi6 *fl6);
struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst);
struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
new file mode 100644
index 000000000000..fce0e35e74d0
--- /dev/null
+++ b/include/net/lwtunnel.h
@@ -0,0 +1,175 @@
+#ifndef __NET_LWTUNNEL_H
+#define __NET_LWTUNNEL_H 1
+
+#include <linux/lwtunnel.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <net/route.h>
+
+#define LWTUNNEL_HASH_BITS 7
+#define LWTUNNEL_HASH_SIZE (1 << LWTUNNEL_HASH_BITS)
+
+/* lw tunnel state flags */
+#define LWTUNNEL_STATE_OUTPUT_REDIRECT BIT(0)
+#define LWTUNNEL_STATE_INPUT_REDIRECT BIT(1)
+
+struct lwtunnel_state {
+ __u16 type;
+ __u16 flags;
+ atomic_t refcnt;
+ int (*orig_output)(struct sock *sk, struct sk_buff *skb);
+ int (*orig_input)(struct sk_buff *);
+ int len;
+ __u8 data[0];
+};
+
+struct lwtunnel_encap_ops {
+ int (*build_state)(struct net_device *dev, struct nlattr *encap,
+ unsigned int family, const void *cfg,
+ struct lwtunnel_state **ts);
+ int (*output)(struct sock *sk, struct sk_buff *skb);
+ int (*input)(struct sk_buff *skb);
+ int (*fill_encap)(struct sk_buff *skb,
+ struct lwtunnel_state *lwtstate);
+ int (*get_encap_size)(struct lwtunnel_state *lwtstate);
+ int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b);
+};
+
+#ifdef CONFIG_LWTUNNEL
+static inline void lwtstate_free(struct lwtunnel_state *lws)
+{
+ kfree(lws);
+}
+
+static inline struct lwtunnel_state *
+lwtstate_get(struct lwtunnel_state *lws)
+{
+ if (lws)
+ atomic_inc(&lws->refcnt);
+
+ return lws;
+}
+
+static inline void lwtstate_put(struct lwtunnel_state *lws)
+{
+ if (!lws)
+ return;
+
+ if (atomic_dec_and_test(&lws->refcnt))
+ lwtstate_free(lws);
+}
+
+static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate)
+{
+ if (lwtstate && (lwtstate->flags & LWTUNNEL_STATE_OUTPUT_REDIRECT))
+ return true;
+
+ return false;
+}
+
+static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
+{
+ if (lwtstate && (lwtstate->flags & LWTUNNEL_STATE_INPUT_REDIRECT))
+ return true;
+
+ return false;
+}
+int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
+ unsigned int num);
+int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
+ unsigned int num);
+int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+ struct nlattr *encap,
+ unsigned int family, const void *cfg,
+ struct lwtunnel_state **lws);
+int lwtunnel_fill_encap(struct sk_buff *skb,
+ struct lwtunnel_state *lwtstate);
+int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate);
+struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len);
+int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b);
+int lwtunnel_output(struct sock *sk, struct sk_buff *skb);
+int lwtunnel_input(struct sk_buff *skb);
+
+#else
+
+static inline void lwtstate_free(struct lwtunnel_state *lws)
+{
+}
+
+static inline struct lwtunnel_state *
+lwtstate_get(struct lwtunnel_state *lws)
+{
+ return lws;
+}
+
+static inline void lwtstate_put(struct lwtunnel_state *lws)
+{
+}
+
+static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate)
+{
+ return false;
+}
+
+static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
+{
+ return false;
+}
+
+static inline int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
+ unsigned int num)
+{
+ return -EOPNOTSUPP;
+
+}
+
+static inline int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
+ unsigned int num)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+ struct nlattr *encap,
+ unsigned int family, const void *cfg,
+ struct lwtunnel_state **lws)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int lwtunnel_fill_encap(struct sk_buff *skb,
+ struct lwtunnel_state *lwtstate)
+{
+ return 0;
+}
+
+static inline int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate)
+{
+ return 0;
+}
+
+static inline struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len)
+{
+ return NULL;
+}
+
+static inline int lwtunnel_cmp_encap(struct lwtunnel_state *a,
+ struct lwtunnel_state *b)
+{
+ return 0;
+}
+
+static inline int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int lwtunnel_input(struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif
+
+#endif /* __NET_LWTUNNEL_H */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 6b1077c2a63f..bfc569498bfa 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -477,7 +477,9 @@ struct ieee80211_event {
* @chandef: Channel definition for this BSS -- the hardware might be
* configured a higher bandwidth than this BSS uses, for example.
* @ht_operation_mode: HT operation mode like in &struct ieee80211_ht_operation.
- * This field is only valid when the channel type is one of the HT types.
+ * This field is only valid when the channel is a wide HT/VHT channel.
+ * Note that with TDLS this can be the case (channel is HT, protection must
+ * be used from this field) even when the BSS association isn't using HT.
* @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value
* implies disabled
* @cqm_rssi_hyst: Connection quality monitor RSSI hysteresis
@@ -973,6 +975,10 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_IV_STRIPPED: The IV/ICV are stripped from this frame.
* If this flag is set, the stack cannot do any replay detection
* hence the driver or hardware will have to do that.
+ * @RX_FLAG_PN_VALIDATED: Currently only valid for CCMP/GCMP frames, this
+ * flag indicates that the PN was verified for replay protection.
+ * Note that this flag is also currently only supported when a frame
+ * is also decrypted (ie. @RX_FLAG_DECRYPTED must be set)
* @RX_FLAG_FAILED_FCS_CRC: Set this flag if the FCS check failed on
* the frame.
* @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on
@@ -997,9 +1003,6 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference
* number (@ampdu_reference) must be populated and be a distinct number for
* each A-MPDU
- * @RX_FLAG_AMPDU_REPORT_ZEROLEN: driver reports 0-length subframes
- * @RX_FLAG_AMPDU_IS_ZEROLEN: This is a zero-length subframe, for
- * monitoring purposes only
* @RX_FLAG_AMPDU_LAST_KNOWN: last subframe is known, should be set on all
* subframes of a single A-MPDU
* @RX_FLAG_AMPDU_IS_LAST: this subframe is the last subframe of the A-MPDU
@@ -1039,8 +1042,8 @@ enum mac80211_rx_flags {
RX_FLAG_NO_SIGNAL_VAL = BIT(12),
RX_FLAG_HT_GF = BIT(13),
RX_FLAG_AMPDU_DETAILS = BIT(14),
- RX_FLAG_AMPDU_REPORT_ZEROLEN = BIT(15),
- RX_FLAG_AMPDU_IS_ZEROLEN = BIT(16),
+ RX_FLAG_PN_VALIDATED = BIT(15),
+ /* bit 16 free */
RX_FLAG_AMPDU_LAST_KNOWN = BIT(17),
RX_FLAG_AMPDU_IS_LAST = BIT(18),
RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(19),
@@ -1491,8 +1494,10 @@ enum ieee80211_key_flags {
* - Temporal Authenticator Rx MIC Key (64 bits)
* @icv_len: The ICV length for this key type
* @iv_len: The IV length for this key type
+ * @drv_priv: pointer for driver use
*/
struct ieee80211_key_conf {
+ void *drv_priv;
atomic64_t tx_pn;
u32 cipher;
u8 icv_len;
@@ -1675,7 +1680,6 @@ struct ieee80211_sta_rates {
* @tdls: indicates whether the STA is a TDLS peer
* @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only
* valid if the STA is a TDLS peer in the first place.
- * @mfp: indicates whether the STA uses management frame protection or not.
* @txq: per-TID data TX queues (if driver uses the TXQ abstraction)
*/
struct ieee80211_sta {
@@ -1693,7 +1697,6 @@ struct ieee80211_sta {
struct ieee80211_sta_rates __rcu *rates;
bool tdls;
bool tdls_initiator;
- bool mfp;
struct ieee80211_txq *txq[IEEE80211_NUM_TIDS];
@@ -1888,6 +1891,9 @@ struct ieee80211_txq {
* @IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands
* in one command, mac80211 doesn't have to run separate scans per band.
*
+ * @IEEE80211_HW_TDLS_WIDER_BW: The device/driver supports wider bandwidth
+ * than then BSS bandwidth for a TDLS link on the base channel.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -1920,6 +1926,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_CHANCTX_STA_CSA,
IEEE80211_HW_SUPPORTS_CLONED_SKBS,
IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS,
+ IEEE80211_HW_TDLS_WIDER_BW,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -3696,20 +3703,28 @@ void ieee80211_free_hw(struct ieee80211_hw *hw);
void ieee80211_restart_hw(struct ieee80211_hw *hw);
/**
- * ieee80211_napi_add - initialize mac80211 NAPI context
- * @hw: the hardware to initialize the NAPI context on
- * @napi: the NAPI context to initialize
- * @napi_dev: dummy NAPI netdevice, here to not waste the space if the
- * driver doesn't use NAPI
- * @poll: poll function
- * @weight: default weight
+ * ieee80211_rx_napi - receive frame from NAPI context
+ *
+ * Use this function to hand received frames to mac80211. The receive
+ * buffer in @skb must start with an IEEE 802.11 header. In case of a
+ * paged @skb is used, the driver is recommended to put the ieee80211
+ * header of the frame on the linear part of the @skb to avoid memory
+ * allocation and/or memcpy by the stack.
+ *
+ * This function may not be called in IRQ context. Calls to this function
+ * for a single hardware must be synchronized against each other. Calls to
+ * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be
+ * mixed for a single hardware. Must not run concurrently with
+ * ieee80211_tx_status() or ieee80211_tx_status_ni().
+ *
+ * This function must be called with BHs disabled.
*
- * See also netif_napi_add().
+ * @hw: the hardware this frame came in on
+ * @skb: the buffer to receive, owned by mac80211 after this call
+ * @napi: the NAPI context
*/
-void ieee80211_napi_add(struct ieee80211_hw *hw, struct napi_struct *napi,
- struct net_device *napi_dev,
- int (*poll)(struct napi_struct *, int),
- int weight);
+void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct napi_struct *napi);
/**
* ieee80211_rx - receive frame
@@ -3731,7 +3746,10 @@ void ieee80211_napi_add(struct ieee80211_hw *hw, struct napi_struct *napi,
* @hw: the hardware this frame came in on
* @skb: the buffer to receive, owned by mac80211 after this call
*/
-void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb);
+static inline void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ ieee80211_rx_napi(hw, skb, NULL);
+}
/**
* ieee80211_rx_irqsafe - receive frame
@@ -4315,19 +4333,6 @@ void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf,
struct sk_buff *skb, u8 *p2k);
/**
- * ieee80211_aes_cmac_calculate_k1_k2 - calculate the AES-CMAC sub keys
- *
- * This function computes the two AES-CMAC sub-keys, based on the
- * previously installed master key.
- *
- * @keyconf: the parameter passed with the set key
- * @k1: a buffer to be filled with the 1st sub-key
- * @k2: a buffer to be filled with the 2nd sub-key
- */
-void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
- u8 *k1, u8 *k2);
-
-/**
* ieee80211_get_key_tx_seq - get key TX sequence counter
*
* @keyconf: the parameter passed with the set key
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index f534a46911dc..b7f99615224b 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -321,23 +321,6 @@ int ieee802154_register_hw(struct ieee802154_hw *hw);
void ieee802154_unregister_hw(struct ieee802154_hw *hw);
/**
- * ieee802154_rx - receive frame
- *
- * Use this function to hand received frames to mac802154. The receive
- * buffer in @skb must start with an IEEE 802.15.4 header. In case of a
- * paged @skb is used, the driver is recommended to put the ieee802154
- * header of the frame on the linear part of the @skb to avoid memory
- * allocation and/or memcpy by the stack.
- *
- * This function may not be called in IRQ context. Calls to this function
- * for a single hardware must be synchronized against each other.
- *
- * @hw: the hardware this frame came in on
- * @skb: the buffer to receive, owned by mac802154 after this call
- */
-void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb);
-
-/**
* ieee802154_rx_irqsafe - receive frame
*
* Like ieee802154_rx() but can be called in IRQ context
diff --git a/include/net/mpls_iptunnel.h b/include/net/mpls_iptunnel.h
new file mode 100644
index 000000000000..4757997f76ed
--- /dev/null
+++ b/include/net/mpls_iptunnel.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2015 Cumulus Networks, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _NET_MPLS_IPTUNNEL_H
+#define _NET_MPLS_IPTUNNEL_H 1
+
+#define MAX_NEW_LABELS 2
+
+struct mpls_iptunnel_encap {
+ u32 label[MAX_NEW_LABELS];
+ u32 labels;
+};
+
+static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate)
+{
+ return (struct mpls_iptunnel_encap *)lwtstate->data;
+}
+
+#endif
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index b3a7751251b4..aba5695fadb0 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -182,7 +182,8 @@ int ndisc_rcv(struct sk_buff *skb);
void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
const struct in6_addr *solicit,
- const struct in6_addr *daddr, const struct in6_addr *saddr);
+ const struct in6_addr *daddr, const struct in6_addr *saddr,
+ struct sk_buff *oskb);
void ndisc_send_rs(struct net_device *dev,
const struct in6_addr *saddr, const struct in6_addr *daddr);
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index bd33e66f49aa..8b683841e574 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -125,6 +125,7 @@ struct neigh_statistics {
unsigned long forced_gc_runs; /* number of forced GC runs */
unsigned long unres_discards; /* number of unresolved drops */
+ unsigned long table_fulls; /* times even gc couldn't help */
};
#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index e951453e0a23..2dcea635ecce 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -118,6 +118,9 @@ struct net {
#endif
struct sock *nfnl;
struct sock *nfnl_stash;
+#if IS_ENABLED(CONFIG_NETFILTER_NETLINK_ACCT)
+ struct list_head nfnl_acct_list;
+#endif
#endif
#ifdef CONFIG_WEXT_CORE
struct sk_buff_head wext_nlevents;
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
index bab824bde92c..d4c6b5f30acd 100644
--- a/include/net/netfilter/br_netfilter.h
+++ b/include/net/netfilter/br_netfilter.h
@@ -59,7 +59,7 @@ static inline unsigned int
br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct nf_hook_state *state)
{
- return NF_DROP;
+ return NF_ACCEPT;
}
#endif
diff --git a/include/net/netfilter/ipv4/nf_dup_ipv4.h b/include/net/netfilter/ipv4/nf_dup_ipv4.h
new file mode 100644
index 000000000000..42008f10dfc4
--- /dev/null
+++ b/include/net/netfilter/ipv4/nf_dup_ipv4.h
@@ -0,0 +1,7 @@
+#ifndef _NF_DUP_IPV4_H_
+#define _NF_DUP_IPV4_H_
+
+void nf_dup_ipv4(struct sk_buff *skb, unsigned int hooknum,
+ const struct in_addr *gw, int oif);
+
+#endif /* _NF_DUP_IPV4_H_ */
diff --git a/include/net/netfilter/ipv6/nf_dup_ipv6.h b/include/net/netfilter/ipv6/nf_dup_ipv6.h
new file mode 100644
index 000000000000..ed6bd66fa5a0
--- /dev/null
+++ b/include/net/netfilter/ipv6/nf_dup_ipv6.h
@@ -0,0 +1,7 @@
+#ifndef _NF_DUP_IPV6_H_
+#define _NF_DUP_IPV6_H_
+
+void nf_dup_ipv6(struct sk_buff *skb, unsigned int hooknum,
+ const struct in6_addr *gw, int oif);
+
+#endif /* _NF_DUP_IPV6_H_ */
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 37cd3911d5c5..e8ad46834df8 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -250,8 +250,12 @@ void nf_ct_untracked_status_or(unsigned long bits);
void nf_ct_iterate_cleanup(struct net *net,
int (*iter)(struct nf_conn *i, void *data),
void *data, u32 portid, int report);
+
+struct nf_conntrack_zone;
+
void nf_conntrack_free(struct nf_conn *ct);
-struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
+struct nf_conn *nf_conntrack_alloc(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *orig,
const struct nf_conntrack_tuple *repl,
gfp_t gfp);
@@ -291,7 +295,10 @@ extern unsigned int nf_conntrack_max;
extern unsigned int nf_conntrack_hash_rnd;
void init_nf_conntrack_hash_rnd(void);
-struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags);
+struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
+ const struct nf_conntrack_zone *zone,
+ gfp_t flags);
+void nf_ct_tmpl_free(struct nf_conn *tmpl);
#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index f2f0fa3bb150..c03f9c42b3cd 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -52,7 +52,8 @@ bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
/* Find a connection corresponding to a tuple. */
struct nf_conntrack_tuple_hash *
-nf_conntrack_find_get(struct net *net, u16 zone,
+nf_conntrack_find_get(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple);
int __nf_conntrack_confirm(struct sk_buff *skb);
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index 3f3aecbc8632..dce56f09ac9a 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -4,7 +4,9 @@
#ifndef _NF_CONNTRACK_EXPECT_H
#define _NF_CONNTRACK_EXPECT_H
+
#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_zones.h>
extern unsigned int nf_ct_expect_hsize;
extern unsigned int nf_ct_expect_max;
@@ -76,15 +78,18 @@ int nf_conntrack_expect_init(void);
void nf_conntrack_expect_fini(void);
struct nf_conntrack_expect *
-__nf_ct_expect_find(struct net *net, u16 zone,
+__nf_ct_expect_find(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple);
struct nf_conntrack_expect *
-nf_ct_expect_find_get(struct net *net, u16 zone,
+nf_ct_expect_find_get(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple);
struct nf_conntrack_expect *
-nf_ct_find_expectation(struct net *net, u16 zone,
+nf_ct_find_expectation(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple);
void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h
index dec6336bf850..7e2b1d025f50 100644
--- a/include/net/netfilter/nf_conntrack_labels.h
+++ b/include/net/netfilter/nf_conntrack_labels.h
@@ -54,7 +54,11 @@ int nf_connlabels_replace(struct nf_conn *ct,
#ifdef CONFIG_NF_CONNTRACK_LABELS
int nf_conntrack_labels_init(void);
void nf_conntrack_labels_fini(void);
+int nf_connlabels_get(struct net *net, unsigned int n_bits);
+void nf_connlabels_put(struct net *net);
#else
static inline int nf_conntrack_labels_init(void) { return 0; }
static inline void nf_conntrack_labels_fini(void) {}
+static inline int nf_connlabels_get(struct net *net, unsigned int n_bits) { return 0; }
+static inline void nf_connlabels_put(struct net *net) {}
#endif
diff --git a/include/net/netfilter/nf_conntrack_zones.h b/include/net/netfilter/nf_conntrack_zones.h
index 034efe8d45a5..4e32512cef32 100644
--- a/include/net/netfilter/nf_conntrack_zones.h
+++ b/include/net/netfilter/nf_conntrack_zones.h
@@ -1,25 +1,89 @@
#ifndef _NF_CONNTRACK_ZONES_H
#define _NF_CONNTRACK_ZONES_H
-#define NF_CT_DEFAULT_ZONE 0
+#include <linux/netfilter/nf_conntrack_zones_common.h>
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <net/netfilter/nf_conntrack_extend.h>
-struct nf_conntrack_zone {
- u16 id;
-};
+static inline const struct nf_conntrack_zone *
+nf_ct_zone(const struct nf_conn *ct)
+{
+ const struct nf_conntrack_zone *nf_ct_zone = NULL;
+
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+ nf_ct_zone = nf_ct_ext_find(ct, NF_CT_EXT_ZONE);
+#endif
+ return nf_ct_zone ? nf_ct_zone : &nf_ct_zone_dflt;
+}
+
+static inline const struct nf_conntrack_zone *
+nf_ct_zone_init(struct nf_conntrack_zone *zone, u16 id, u8 dir, u8 flags)
+{
+ zone->id = id;
+ zone->flags = flags;
+ zone->dir = dir;
+
+ return zone;
+}
+
+static inline const struct nf_conntrack_zone *
+nf_ct_zone_tmpl(const struct nf_conn *tmpl, const struct sk_buff *skb,
+ struct nf_conntrack_zone *tmp)
+{
+ const struct nf_conntrack_zone *zone;
+
+ if (!tmpl)
+ return &nf_ct_zone_dflt;
+
+ zone = nf_ct_zone(tmpl);
+ if (zone->flags & NF_CT_FLAG_MARK)
+ zone = nf_ct_zone_init(tmp, skb->mark, zone->dir, 0);
+
+ return zone;
+}
-static inline u16 nf_ct_zone(const struct nf_conn *ct)
+static inline int nf_ct_zone_add(struct nf_conn *ct, gfp_t flags,
+ const struct nf_conntrack_zone *info)
{
#ifdef CONFIG_NF_CONNTRACK_ZONES
struct nf_conntrack_zone *nf_ct_zone;
- nf_ct_zone = nf_ct_ext_find(ct, NF_CT_EXT_ZONE);
- if (nf_ct_zone)
- return nf_ct_zone->id;
+
+ nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, flags);
+ if (!nf_ct_zone)
+ return -ENOMEM;
+
+ nf_ct_zone_init(nf_ct_zone, info->id, info->dir,
+ info->flags);
#endif
- return NF_CT_DEFAULT_ZONE;
+ return 0;
}
-#endif /* CONFIG_NF_CONNTRACK || CONFIG_NF_CONNTRACK_MODULE */
+static inline bool nf_ct_zone_matches_dir(const struct nf_conntrack_zone *zone,
+ enum ip_conntrack_dir dir)
+{
+ return zone->dir & (1 << dir);
+}
+
+static inline u16 nf_ct_zone_id(const struct nf_conntrack_zone *zone,
+ enum ip_conntrack_dir dir)
+{
+ return nf_ct_zone_matches_dir(zone, dir) ?
+ zone->id : NF_CT_DEFAULT_ZONE_ID;
+}
+
+static inline bool nf_ct_zone_equal(const struct nf_conn *a,
+ const struct nf_conntrack_zone *b,
+ enum ip_conntrack_dir dir)
+{
+ return nf_ct_zone_id(nf_ct_zone(a), dir) ==
+ nf_ct_zone_id(b, dir);
+}
+
+static inline bool nf_ct_zone_equal_any(const struct nf_conn *a,
+ const struct nf_conntrack_zone *b)
+{
+ return nf_ct_zone(a)->id == b->id;
+}
+#endif /* IS_ENABLED(CONFIG_NF_CONNTRACK) */
#endif /* _NF_CONNTRACK_ZONES_H */
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 2a246680a6c3..aa8bee72c9d3 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -125,7 +125,7 @@ static inline enum nft_data_types nft_dreg_to_type(enum nft_registers reg)
static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
{
- return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1;
+ return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE;
}
unsigned int nft_parse_register(const struct nlattr *attr);
diff --git a/include/net/netfilter/nft_dup.h b/include/net/netfilter/nft_dup.h
new file mode 100644
index 000000000000..6b84cf6491a2
--- /dev/null
+++ b/include/net/netfilter/nft_dup.h
@@ -0,0 +1,9 @@
+#ifndef _NFT_DUP_H_
+#define _NFT_DUP_H_
+
+struct nft_dup_inet {
+ enum nft_registers sreg_addr:8;
+ enum nft_registers sreg_dev:8;
+};
+
+#endif /* _NFT_DUP_H_ */
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 8d93544a2d2b..c0368db6df54 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -31,6 +31,7 @@ struct netns_sysctl_ipv6 {
int auto_flowlabels;
int icmpv6_time;
int anycast_src_echo_reply;
+ int ip_nonlocal_bind;
int fwmark_reflect;
int idgen_retries;
int idgen_delay;
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index 532e4ba64f49..38aa4983e2a9 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -14,5 +14,6 @@ struct netns_nf {
#ifdef CONFIG_SYSCTL
struct ctl_table_header *nf_log_dir_header;
#endif
+ struct list_head hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
};
#endif
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index 01fc8c531115..d0d0f1e53bb9 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -79,6 +79,7 @@ struct nci_ops {
int (*close)(struct nci_dev *ndev);
int (*send)(struct nci_dev *ndev, struct sk_buff *skb);
int (*setup)(struct nci_dev *ndev);
+ int (*post_setup)(struct nci_dev *ndev);
int (*fw_download)(struct nci_dev *ndev, const char *firmware_name);
__u32 (*get_rfprotocol)(struct nci_dev *ndev, __u8 rf_protocol);
int (*discover_se)(struct nci_dev *ndev);
@@ -277,6 +278,8 @@ int nci_request(struct nci_dev *ndev,
unsigned long opt),
unsigned long opt, __u32 timeout);
int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len, __u8 *payload);
+int nci_core_reset(struct nci_dev *ndev);
+int nci_core_init(struct nci_dev *ndev);
int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val);
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index f9e58ae45f9c..30afc9a6718c 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -203,6 +203,7 @@ struct nfc_dev {
int n_vendor_cmds;
struct nfc_ops *ops;
+ struct genl_info *cur_cmd_info;
};
#define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev)
@@ -318,4 +319,44 @@ static inline int nfc_set_vendor_cmds(struct nfc_dev *dev,
return 0;
}
+struct sk_buff *__nfc_alloc_vendor_cmd_reply_skb(struct nfc_dev *dev,
+ enum nfc_attrs attr,
+ u32 oui, u32 subcmd,
+ int approxlen);
+int nfc_vendor_cmd_reply(struct sk_buff *skb);
+
+/**
+ * nfc_vendor_cmd_alloc_reply_skb - allocate vendor command reply
+ * @dev: nfc device
+ * @oui: vendor oui
+ * @approxlen: an upper bound of the length of the data that will
+ * be put into the skb
+ *
+ * This function allocates and pre-fills an skb for a reply to
+ * a vendor command. Since it is intended for a reply, calling
+ * it outside of a vendor command's doit() operation is invalid.
+ *
+ * The returned skb is pre-filled with some identifying data in
+ * a way that any data that is put into the skb (with skb_put(),
+ * nla_put() or similar) will end up being within the
+ * %NFC_ATTR_VENDOR_DATA attribute, so all that needs to be done
+ * with the skb is adding data for the corresponding userspace tool
+ * which can then read that data out of the vendor data attribute.
+ * You must not modify the skb in any other way.
+ *
+ * When done, call nfc_vendor_cmd_reply() with the skb and return
+ * its error code as the result of the doit() operation.
+ *
+ * Return: An allocated and pre-filled skb. %NULL if any errors happen.
+ */
+static inline struct sk_buff *
+nfc_vendor_cmd_alloc_reply_skb(struct nfc_dev *dev,
+ u32 oui, u32 subcmd, int approxlen)
+{
+ return __nfc_alloc_vendor_cmd_reply_skb(dev,
+ NFC_ATTR_VENDOR_DATA,
+ oui,
+ subcmd, approxlen);
+}
+
#endif /* __NET_NFC_H */
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index b0ab530d28cd..cf2713d8b975 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -52,6 +52,8 @@ enum nl802154_commands {
NL802154_CMD_SET_LBT_MODE,
+ NL802154_CMD_SET_ACKREQ_DEFAULT,
+
/* add new commands above here */
/* used to define NL802154_CMD_MAX below */
@@ -104,6 +106,8 @@ enum nl802154_attrs {
NL802154_ATTR_SUPPORTED_COMMANDS,
+ NL802154_ATTR_ACKREQ_DEFAULT,
+
/* add attributes here, update the policy in nl802154.c */
__NL802154_ATTR_AFTER_LAST,
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 2342bf12cb78..401038d2f9b8 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -110,10 +110,8 @@ static inline void qdisc_run(struct Qdisc *q)
__qdisc_run(q);
}
-int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
- struct tcf_result *res);
int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
- struct tcf_result *res);
+ struct tcf_result *res, bool compat_mode);
static inline __be16 tc_skb_protocol(const struct sk_buff *skb)
{
diff --git a/include/net/route.h b/include/net/route.h
index fe22d03afb6a..cc61cb95f059 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -188,8 +188,12 @@ void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk);
void ip_rt_send_redirect(struct sk_buff *skb);
unsigned int inet_addr_type(struct net *net, __be32 addr);
+unsigned int inet_addr_type_table(struct net *net, __be32 addr, u32 tb_id);
unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
__be32 addr);
+unsigned int inet_addr_type_dev_table(struct net *net,
+ const struct net_device *dev,
+ __be32 addr);
void ip_rt_multicast_event(struct in_device *);
int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
@@ -250,6 +254,9 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, __be32
if (inet_sk(sk)->transparent)
flow_flags |= FLOWI_FLAG_ANYSRC;
+ if (netif_index_is_vrf(sock_net(sk), oif))
+ flow_flags |= FLOWI_FLAG_VRFSRC;
+
flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
protocol, flow_flags, dst, src, dport, sport);
}
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 343d922d15c2..18fdb98185ab 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -141,6 +141,7 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
unsigned char name_assign_type,
const struct rtnl_link_ops *ops,
struct nlattr *tb[]);
+int rtnl_delete_link(struct net_device *dev);
int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 2738f6f87908..444faa89a55f 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -340,6 +340,7 @@ extern struct Qdisc noop_qdisc;
extern struct Qdisc_ops noop_qdisc_ops;
extern struct Qdisc_ops pfifo_fast_ops;
extern struct Qdisc_ops mq_qdisc_ops;
+extern struct Qdisc_ops noqueue_qdisc_ops;
extern const struct Qdisc_ops *default_qdisc_ops;
struct Qdisc_class_common {
@@ -513,17 +514,20 @@ static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
}
-static inline void qdisc_bstats_update_cpu(struct Qdisc *sch,
- const struct sk_buff *skb)
+static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
+ const struct sk_buff *skb)
{
- struct gnet_stats_basic_cpu *bstats =
- this_cpu_ptr(sch->cpu_bstats);
-
u64_stats_update_begin(&bstats->syncp);
bstats_update(&bstats->bstats, skb);
u64_stats_update_end(&bstats->syncp);
}
+static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
+ const struct sk_buff *skb)
+{
+ bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
+}
+
static inline void qdisc_bstats_update(struct Qdisc *sch,
const struct sk_buff *skb)
{
@@ -547,16 +551,24 @@ static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
sch->qstats.drops += count;
}
-static inline void qdisc_qstats_drop(struct Qdisc *sch)
+static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
{
- sch->qstats.drops++;
+ qstats->drops++;
}
-static inline void qdisc_qstats_drop_cpu(struct Qdisc *sch)
+static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
{
- struct gnet_stats_queue *qstats = this_cpu_ptr(sch->cpu_qstats);
+ qstats->overlimits++;
+}
- qstats->drops++;
+static inline void qdisc_qstats_drop(struct Qdisc *sch)
+{
+ qstats_drop_inc(&sch->qstats);
+}
+
+static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
+{
+ qstats_drop_inc(this_cpu_ptr(sch->cpu_qstats));
}
static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
diff --git a/include/net/sock.h b/include/net/sock.h
index f21f0708ec59..7aa78440559a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -429,7 +429,9 @@ struct sock {
void *sk_security;
#endif
__u32 sk_mark;
+#ifdef CONFIG_CGROUP_NET_CLASSID
u32 sk_classid;
+#endif
struct cg_proto *sk_cgrp;
void (*sk_state_change)(struct sock *sk);
void (*sk_data_ready)(struct sock *sk);
@@ -1040,42 +1042,9 @@ struct proto {
#endif
};
-/*
- * Bits in struct cg_proto.flags
- */
-enum cg_proto_flags {
- /* Currently active and new sockets should be assigned to cgroups */
- MEMCG_SOCK_ACTIVE,
- /* It was ever activated; we must disarm static keys on destruction */
- MEMCG_SOCK_ACTIVATED,
-};
-
-struct cg_proto {
- struct page_counter memory_allocated; /* Current allocated memory. */
- struct percpu_counter sockets_allocated; /* Current number of sockets. */
- int memory_pressure;
- long sysctl_mem[3];
- unsigned long flags;
- /*
- * memcg field is used to find which memcg we belong directly
- * Each memcg struct can hold more than one cg_proto, so container_of
- * won't really cut.
- *
- * The elegant solution would be having an inverse function to
- * proto_cgroup in struct proto, but that means polluting the structure
- * for everybody, instead of just for memcg users.
- */
- struct mem_cgroup *memcg;
-};
-
int proto_register(struct proto *prot, int alloc_slab);
void proto_unregister(struct proto *prot);
-static inline bool memcg_proto_active(struct cg_proto *cg_proto)
-{
- return test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
-}
-
#ifdef SOCK_REFCNT_DEBUG
static inline void sk_refcnt_debug_inc(struct sock *sk)
{
@@ -1685,6 +1654,20 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
kuid_t sock_i_uid(struct sock *sk);
unsigned long sock_i_ino(struct sock *sk);
+static inline void sk_set_txhash(struct sock *sk)
+{
+ sk->sk_txhash = prandom_u32();
+
+ if (unlikely(!sk->sk_txhash))
+ sk->sk_txhash = 1;
+}
+
+static inline void sk_rethink_txhash(struct sock *sk)
+{
+ if (sk->sk_txhash)
+ sk_set_txhash(sk);
+}
+
static inline struct dst_entry *
__sk_dst_get(struct sock *sk)
{
@@ -1709,6 +1692,8 @@ static inline void dst_negative_advice(struct sock *sk)
{
struct dst_entry *ndst, *dst = __sk_dst_get(sk);
+ sk_rethink_txhash(sk);
+
if (dst && dst->ops->negative_advice) {
ndst = dst->ops->negative_advice(dst);
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index d5671f118bfc..319baab3b48e 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -72,6 +72,7 @@ struct switchdev_obj {
struct switchdev_obj_fdb { /* PORT_FDB */
const unsigned char *addr;
u16 vid;
+ u16 ndm_state;
} fdb;
} u;
};
@@ -157,6 +158,9 @@ int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev,
struct net_device *filter_dev, int idx);
+void switchdev_port_fwd_mark_set(struct net_device *dev,
+ struct net_device *group_dev,
+ bool joining);
#else
@@ -271,6 +275,12 @@ static inline int switchdev_port_fdb_dump(struct sk_buff *skb,
return -EOPNOTSUPP;
}
+static inline void switchdev_port_fwd_mark_set(struct net_device *dev,
+ struct net_device *group_dev,
+ bool joining)
+{
+}
+
#endif
#endif /* _LINUX_SWITCHDEV_H_ */
diff --git a/include/net/tc_act/tc_bpf.h b/include/net/tc_act/tc_bpf.h
index a152e9858b2c..958d69cfb19c 100644
--- a/include/net/tc_act/tc_bpf.h
+++ b/include/net/tc_act/tc_bpf.h
@@ -15,7 +15,7 @@
struct tcf_bpf {
struct tcf_common common;
- struct bpf_prog *filter;
+ struct bpf_prog __rcu *filter;
union {
u32 bpf_fd;
u16 bpf_num_ops;
diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h
index 9fc9b578908a..592a6bc02b0b 100644
--- a/include/net/tc_act/tc_gact.h
+++ b/include/net/tc_act/tc_gact.h
@@ -6,9 +6,10 @@
struct tcf_gact {
struct tcf_common common;
#ifdef CONFIG_GACT_PROB
- u16 tcfg_ptype;
- u16 tcfg_pval;
- int tcfg_paction;
+ u16 tcfg_ptype;
+ u16 tcfg_pval;
+ int tcfg_paction;
+ atomic_t packets;
#endif
};
#define to_gact(a) \
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
index 4dd77a1c106b..dae96bae1c19 100644
--- a/include/net/tc_act/tc_mirred.h
+++ b/include/net/tc_act/tc_mirred.h
@@ -8,7 +8,7 @@ struct tcf_mirred {
int tcfm_eaction;
int tcfm_ifindex;
int tcfm_ok_push;
- struct net_device *tcfm_dev;
+ struct net_device __rcu *tcfm_dev;
struct list_head tcfm_list;
};
#define to_mirred(a) \
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 950cfecaad3c..0cab28cd43a9 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -281,6 +281,8 @@ extern unsigned int sysctl_tcp_notsent_lowat;
extern int sysctl_tcp_min_tso_segs;
extern int sysctl_tcp_autocorking;
extern int sysctl_tcp_invalid_ratelimit;
+extern int sysctl_tcp_pacing_ss_ratio;
+extern int sysctl_tcp_pacing_ca_ratio;
extern atomic_long_t tcp_memory_allocated;
extern struct percpu_counter tcp_sockets_allocated;
@@ -886,7 +888,7 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
extern struct tcp_congestion_ops tcp_reno;
struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
-u32 tcp_ca_get_key_by_name(const char *name);
+u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
#ifdef CONFIG_INET
char *tcp_ca_get_name_by_key(u32 key, char *buffer);
#else
@@ -989,6 +991,11 @@ static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
#define TCP_INFINITE_SSTHRESH 0x7fffffff
+static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
+{
+ return tp->snd_cwnd < tp->snd_ssthresh;
+}
+
static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
{
return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
@@ -1065,7 +1072,7 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
const struct tcp_sock *tp = tcp_sk(sk);
/* If in slow start, ensure cwnd grows to twice what was ACKed. */
- if (tp->snd_cwnd <= tp->snd_ssthresh)
+ if (tcp_in_slow_start(tp))
return tp->snd_cwnd < 2 * tp->max_packets_out;
return tp->is_cwnd_limited;
@@ -1160,6 +1167,19 @@ static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
}
u32 tcp_default_init_rwnd(u32 mss);
+void tcp_cwnd_restart(struct sock *sk, s32 delta);
+
+static inline void tcp_slow_start_after_idle_check(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ s32 delta;
+
+ if (!sysctl_tcp_slow_start_after_idle || tp->packets_out)
+ return;
+ delta = tcp_time_stamp - tp->lsndtime;
+ if (delta > inet_csk(sk)->icsk_rto)
+ tcp_cwnd_restart(sk, delta);
+}
/* Determine a window scaling and initial window to offer. */
void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
diff --git a/include/net/timewait_sock.h b/include/net/timewait_sock.h
index 68f0ecad6c6e..1a47946f95ba 100644
--- a/include/net/timewait_sock.h
+++ b/include/net/timewait_sock.h
@@ -33,9 +33,6 @@ static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
static inline void twsk_destructor(struct sock *sk)
{
- BUG_ON(sk == NULL);
- BUG_ON(sk->sk_prot == NULL);
- BUG_ON(sk->sk_prot->twsk_prot == NULL);
if (sk->sk_prot->twsk_prot->twsk_destructor != NULL)
sk->sk_prot->twsk_prot->twsk_destructor(sk);
}
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index c491c1221606..cb2f89f20f5c 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -31,7 +31,8 @@ struct udp_port_cfg {
__be16 peer_udp_port;
unsigned int use_udp_checksums:1,
use_udp6_tx_checksums:1,
- use_udp6_rx_checksums:1;
+ use_udp6_rx_checksums:1,
+ ipv6_v6only:1;
};
int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
@@ -93,6 +94,10 @@ int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
void udp_tunnel_sock_release(struct socket *sock);
+struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
+ __be16 flags, __be64 tunnel_id,
+ int md_size);
+
static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
bool udp_csum)
{
diff --git a/include/net/vrf.h b/include/net/vrf.h
new file mode 100644
index 000000000000..593e6094ddd4
--- /dev/null
+++ b/include/net/vrf.h
@@ -0,0 +1,178 @@
+/*
+ * include/net/net_vrf.h - adds vrf dev structure definitions
+ * Copyright (c) 2015 Cumulus Networks
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_NET_VRF_H
+#define __LINUX_NET_VRF_H
+
+struct net_vrf_dev {
+ struct rcu_head rcu;
+ int ifindex; /* ifindex of master dev */
+ u32 tb_id; /* table id for VRF */
+};
+
+struct slave {
+ struct list_head list;
+ struct net_device *dev;
+};
+
+struct slave_queue {
+ struct list_head all_slaves;
+};
+
+struct net_vrf {
+ struct slave_queue queue;
+ struct rtable *rth;
+ u32 tb_id;
+};
+
+
+#if IS_ENABLED(CONFIG_NET_VRF)
+/* called with rcu_read_lock() */
+static inline int vrf_master_ifindex_rcu(const struct net_device *dev)
+{
+ struct net_vrf_dev *vrf_ptr;
+ int ifindex = 0;
+
+ if (!dev)
+ return 0;
+
+ if (netif_is_vrf(dev)) {
+ ifindex = dev->ifindex;
+ } else {
+ vrf_ptr = rcu_dereference(dev->vrf_ptr);
+ if (vrf_ptr)
+ ifindex = vrf_ptr->ifindex;
+ }
+
+ return ifindex;
+}
+
+static inline int vrf_master_ifindex(const struct net_device *dev)
+{
+ int ifindex;
+
+ rcu_read_lock();
+ ifindex = vrf_master_ifindex_rcu(dev);
+ rcu_read_unlock();
+
+ return ifindex;
+}
+
+/* called with rcu_read_lock */
+static inline u32 vrf_dev_table_rcu(const struct net_device *dev)
+{
+ u32 tb_id = 0;
+
+ if (dev) {
+ struct net_vrf_dev *vrf_ptr;
+
+ vrf_ptr = rcu_dereference(dev->vrf_ptr);
+ if (vrf_ptr)
+ tb_id = vrf_ptr->tb_id;
+ }
+ return tb_id;
+}
+
+static inline u32 vrf_dev_table(const struct net_device *dev)
+{
+ u32 tb_id;
+
+ rcu_read_lock();
+ tb_id = vrf_dev_table_rcu(dev);
+ rcu_read_unlock();
+
+ return tb_id;
+}
+
+static inline u32 vrf_dev_table_ifindex(struct net *net, int ifindex)
+{
+ struct net_device *dev;
+ u32 tb_id = 0;
+
+ if (!ifindex)
+ return 0;
+
+ rcu_read_lock();
+
+ dev = dev_get_by_index_rcu(net, ifindex);
+ if (dev)
+ tb_id = vrf_dev_table_rcu(dev);
+
+ rcu_read_unlock();
+
+ return tb_id;
+}
+
+/* called with rtnl */
+static inline u32 vrf_dev_table_rtnl(const struct net_device *dev)
+{
+ u32 tb_id = 0;
+
+ if (dev) {
+ struct net_vrf_dev *vrf_ptr;
+
+ vrf_ptr = rtnl_dereference(dev->vrf_ptr);
+ if (vrf_ptr)
+ tb_id = vrf_ptr->tb_id;
+ }
+ return tb_id;
+}
+
+/* caller has already checked netif_is_vrf(dev) */
+static inline struct rtable *vrf_dev_get_rth(const struct net_device *dev)
+{
+ struct rtable *rth = ERR_PTR(-ENETUNREACH);
+ struct net_vrf *vrf = netdev_priv(dev);
+
+ if (vrf) {
+ rth = vrf->rth;
+ atomic_inc(&rth->dst.__refcnt);
+ }
+ return rth;
+}
+
+#else
+static inline int vrf_master_ifindex_rcu(const struct net_device *dev)
+{
+ return 0;
+}
+
+static inline int vrf_master_ifindex(const struct net_device *dev)
+{
+ return 0;
+}
+
+static inline u32 vrf_dev_table_rcu(const struct net_device *dev)
+{
+ return 0;
+}
+
+static inline u32 vrf_dev_table(const struct net_device *dev)
+{
+ return 0;
+}
+
+static inline u32 vrf_dev_table_ifindex(struct net *net, int ifindex)
+{
+ return 0;
+}
+
+static inline u32 vrf_dev_table_rtnl(const struct net_device *dev)
+{
+ return 0;
+}
+
+static inline struct rtable *vrf_dev_get_rth(const struct net_device *dev)
+{
+ return ERR_PTR(-ENETUNREACH);
+}
+#endif
+
+#endif /* __LINUX_NET_VRF_H */
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 0082b5d33d7d..480a319b4c92 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -7,6 +7,7 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/udp.h>
+#include <net/dst_metadata.h>
#define VNI_HASH_BITS 10
#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
@@ -94,20 +95,18 @@ struct vxlanhdr {
#define VXLAN_VNI_MASK (VXLAN_VID_MASK << 8)
#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
+#define VNI_HASH_BITS 10
+#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
+#define FDB_HASH_BITS 8
+#define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
+
struct vxlan_metadata {
- __be32 vni;
u32 gbp;
};
-struct vxlan_sock;
-typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb,
- struct vxlan_metadata *md);
-
/* per UDP socket information */
struct vxlan_sock {
struct hlist_node hlist;
- vxlan_rcv_t *rcv;
- void *data;
struct work_struct del_work;
struct socket *sock;
struct rcu_head rcu;
@@ -117,6 +116,58 @@ struct vxlan_sock {
u32 flags;
};
+union vxlan_addr {
+ struct sockaddr_in sin;
+ struct sockaddr_in6 sin6;
+ struct sockaddr sa;
+};
+
+struct vxlan_rdst {
+ union vxlan_addr remote_ip;
+ __be16 remote_port;
+ u32 remote_vni;
+ u32 remote_ifindex;
+ struct list_head list;
+ struct rcu_head rcu;
+};
+
+struct vxlan_config {
+ union vxlan_addr remote_ip;
+ union vxlan_addr saddr;
+ u32 vni;
+ int remote_ifindex;
+ int mtu;
+ __be16 dst_port;
+ __u16 port_min;
+ __u16 port_max;
+ __u8 tos;
+ __u8 ttl;
+ u32 flags;
+ unsigned long age_interval;
+ unsigned int addrmax;
+ bool no_share;
+};
+
+/* Pseudo network device */
+struct vxlan_dev {
+ struct hlist_node hlist; /* vni hash table */
+ struct list_head next; /* vxlan's per namespace list */
+ struct vxlan_sock *vn_sock; /* listening socket */
+ struct net_device *dev;
+ struct net *net; /* netns for packet i/o */
+ struct vxlan_rdst default_dst; /* default destination */
+ u32 flags; /* VXLAN_F_* in vxlan.h */
+
+ struct timer_list age_timer;
+ spinlock_t hash_lock;
+ unsigned int addrcnt;
+ struct gro_cells gro_cells;
+
+ struct vxlan_config cfg;
+
+ struct hlist_head fdb_head[FDB_HASH_SIZE];
+};
+
#define VXLAN_F_LEARN 0x01
#define VXLAN_F_PROXY 0x02
#define VXLAN_F_RSC 0x04
@@ -130,6 +181,7 @@ struct vxlan_sock {
#define VXLAN_F_REMCSUM_RX 0x400
#define VXLAN_F_GBP 0x800
#define VXLAN_F_REMCSUM_NOPARTIAL 0x1000
+#define VXLAN_F_COLLECT_METADATA 0x2000
/* Flags that are used in the receive path. These flags must match in
* order for a socket to be shareable
@@ -137,18 +189,16 @@ struct vxlan_sock {
#define VXLAN_F_RCV_FLAGS (VXLAN_F_GBP | \
VXLAN_F_UDP_ZERO_CSUM6_RX | \
VXLAN_F_REMCSUM_RX | \
- VXLAN_F_REMCSUM_NOPARTIAL)
-
-struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
- vxlan_rcv_t *rcv, void *data,
- bool no_share, u32 flags);
+ VXLAN_F_REMCSUM_NOPARTIAL | \
+ VXLAN_F_COLLECT_METADATA)
-void vxlan_sock_release(struct vxlan_sock *vs);
+struct net_device *vxlan_dev_create(struct net *net, const char *name,
+ u8 name_assign_type, struct vxlan_config *conf);
-int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
- __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
- __be16 src_port, __be16 dst_port, struct vxlan_metadata *md,
- bool xnet, u32 vxflags);
+static inline __be16 vxlan_dev_dst_port(struct vxlan_dev *vxlan)
+{
+ return inet_sk(vxlan->vn_sock->sock->sk)->inet_sport;
+}
static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
netdev_features_t features)
@@ -191,4 +241,10 @@ static inline void vxlan_get_rx_port(struct net_device *netdev)
{
}
#endif
+
+static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs)
+{
+ return vs->sock->sk->sk_family;
+}
+
#endif
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index f0ee97eec24d..312e3fee9ccf 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -285,10 +285,13 @@ struct xfrm_policy_afinfo {
unsigned short family;
struct dst_ops *dst_ops;
void (*garbage_collect)(struct net *net);
- struct dst_entry *(*dst_lookup)(struct net *net, int tos,
+ struct dst_entry *(*dst_lookup)(struct net *net,
+ int tos, int oif,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr);
- int (*get_saddr)(struct net *net, xfrm_address_t *saddr, xfrm_address_t *daddr);
+ int (*get_saddr)(struct net *net, int oif,
+ xfrm_address_t *saddr,
+ xfrm_address_t *daddr);
void (*decode_session)(struct sk_buff *skb,
struct flowi *fl,
int reverse);
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index 39ed2d2fbd51..92a7d85917b4 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -105,14 +105,16 @@ enum ib_cm_data_size {
IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE = 216,
IB_CM_SIDR_REP_PRIVATE_DATA_SIZE = 136,
IB_CM_SIDR_REP_INFO_LENGTH = 72,
- /* compare done u32 at a time */
- IB_CM_COMPARE_SIZE = (64 / sizeof(u32))
};
struct ib_cm_id;
struct ib_cm_req_event_param {
struct ib_cm_id *listen_id;
+
+ /* P_Key that was used by the GMP's BTH header */
+ u16 bth_pkey;
+
u8 port;
struct ib_sa_path_rec *primary_path;
@@ -223,6 +225,9 @@ struct ib_cm_apr_event_param {
struct ib_cm_sidr_req_event_param {
struct ib_cm_id *listen_id;
+ __be64 service_id;
+ /* P_Key that was used by the GMP's BTH header */
+ u16 bth_pkey;
u8 port;
u16 pkey;
};
@@ -337,11 +342,6 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id);
#define IB_SDP_SERVICE_ID cpu_to_be64(0x0000000000010000ULL)
#define IB_SDP_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFFFF0000ULL)
-struct ib_cm_compare_data {
- u32 data[IB_CM_COMPARE_SIZE];
- u32 mask[IB_CM_COMPARE_SIZE];
-};
-
/**
* ib_cm_listen - Initiates listening on the specified service ID for
* connection and service ID resolution requests.
@@ -354,12 +354,13 @@ struct ib_cm_compare_data {
* range of service IDs. If set to 0, the service ID is matched
* exactly. This parameter is ignored if %service_id is set to
* IB_CM_ASSIGN_SERVICE_ID.
- * @compare_data: This parameter is optional. It specifies data that must
- * appear in the private data of a connection request for the specified
- * listen request.
*/
-int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
- struct ib_cm_compare_data *compare_data);
+int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
+ __be64 service_mask);
+
+struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
+ ib_cm_handler cm_handler,
+ __be64 service_id);
struct ib_cm_req_param {
struct ib_sa_path_rec *primary_path;
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index c8422d5a5a91..188df91d5851 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -127,6 +127,23 @@
#define IB_DEFAULT_PKEY_PARTIAL 0x7FFF
#define IB_DEFAULT_PKEY_FULL 0xFFFF
+/*
+ * Generic trap/notice types
+ */
+#define IB_NOTICE_TYPE_FATAL 0x80
+#define IB_NOTICE_TYPE_URGENT 0x81
+#define IB_NOTICE_TYPE_SECURITY 0x82
+#define IB_NOTICE_TYPE_SM 0x83
+#define IB_NOTICE_TYPE_INFO 0x84
+
+/*
+ * Generic trap/notice producers
+ */
+#define IB_NOTICE_PROD_CA cpu_to_be16(1)
+#define IB_NOTICE_PROD_SWITCH cpu_to_be16(2)
+#define IB_NOTICE_PROD_ROUTER cpu_to_be16(3)
+#define IB_NOTICE_PROD_CLASS_MGR cpu_to_be16(4)
+
enum {
IB_MGMT_MAD_HDR = 24,
IB_MGMT_MAD_DATA = 232,
@@ -240,6 +257,70 @@ struct ib_class_port_info {
__be32 trap_qkey;
};
+struct ib_mad_notice_attr {
+ u8 generic_type;
+ u8 prod_type_msb;
+ __be16 prod_type_lsb;
+ __be16 trap_num;
+ __be16 issuer_lid;
+ __be16 toggle_count;
+
+ union {
+ struct {
+ u8 details[54];
+ } raw_data;
+
+ struct {
+ __be16 reserved;
+ __be16 lid; /* where violation happened */
+ u8 port_num; /* where violation happened */
+ } __packed ntc_129_131;
+
+ struct {
+ __be16 reserved;
+ __be16 lid; /* LID where change occurred */
+ u8 reserved2;
+ u8 local_changes; /* low bit - local changes */
+ __be32 new_cap_mask; /* new capability mask */
+ u8 reserved3;
+ u8 change_flags; /* low 3 bits only */
+ } __packed ntc_144;
+
+ struct {
+ __be16 reserved;
+ __be16 lid; /* lid where sys guid changed */
+ __be16 reserved2;
+ __be64 new_sys_guid;
+ } __packed ntc_145;
+
+ struct {
+ __be16 reserved;
+ __be16 lid;
+ __be16 dr_slid;
+ u8 method;
+ u8 reserved2;
+ __be16 attr_id;
+ __be32 attr_mod;
+ __be64 mkey;
+ u8 reserved3;
+ u8 dr_trunc_hop;
+ u8 dr_rtn_path[30];
+ } __packed ntc_256;
+
+ struct {
+ __be16 reserved;
+ __be16 lid1;
+ __be16 lid2;
+ __be32 key;
+ __be32 sl_qp1; /* SL: high 4 bits */
+ __be32 qp2; /* high 8 bits reserved */
+ union ib_gid gid1;
+ union ib_gid gid2;
+ } __packed ntc_257_258;
+
+ } details;
+};
+
/**
* ib_mad_send_buf - MAD data buffer and work request for sends.
* @next: A pointer used to chain together MADs for posting.
@@ -388,7 +469,6 @@ enum {
struct ib_mad_agent {
struct ib_device *device;
struct ib_qp *qp;
- struct ib_mr *mr;
ib_mad_recv_handler recv_handler;
ib_mad_send_handler send_handler;
ib_mad_snoop_handler snoop_handler;
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
index b1f7592e02e4..709a5331e6b9 100644
--- a/include/rdma/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -76,6 +76,8 @@ enum {
IB_OPCODE_UC = 0x20,
IB_OPCODE_RD = 0x40,
IB_OPCODE_UD = 0x60,
+ /* per IBTA 3.1 Table 38, A10.3.2 */
+ IB_OPCODE_CNP = 0x80,
/* operations -- just used to define real constants */
IB_OPCODE_SEND_FIRST = 0x00,
diff --git a/include/rdma/ib_smi.h b/include/rdma/ib_smi.h
index 98b9086d769a..b439e988408e 100644
--- a/include/rdma/ib_smi.h
+++ b/include/rdma/ib_smi.h
@@ -119,10 +119,57 @@ struct ib_port_info {
u8 link_roundtrip_latency[3];
};
+struct ib_node_info {
+ u8 base_version;
+ u8 class_version;
+ u8 node_type;
+ u8 num_ports;
+ __be64 sys_guid;
+ __be64 node_guid;
+ __be64 port_guid;
+ __be16 partition_cap;
+ __be16 device_id;
+ __be32 revision;
+ u8 local_port_num;
+ u8 vendor_id[3];
+} __packed;
+
+struct ib_vl_weight_elem {
+ u8 vl; /* IB: VL is low 4 bits, upper 4 bits reserved */
+ /* OPA: VL is low 5 bits, upper 3 bits reserved */
+ u8 weight;
+};
+
static inline u8
ib_get_smp_direction(struct ib_smp *smp)
{
return ((smp->status & IB_SMP_DIRECTION) == IB_SMP_DIRECTION);
}
+/*
+ * SM Trap/Notice numbers
+ */
+#define IB_NOTICE_TRAP_LLI_THRESH cpu_to_be16(129)
+#define IB_NOTICE_TRAP_EBO_THRESH cpu_to_be16(130)
+#define IB_NOTICE_TRAP_FLOW_UPDATE cpu_to_be16(131)
+#define IB_NOTICE_TRAP_CAP_MASK_CHG cpu_to_be16(144)
+#define IB_NOTICE_TRAP_SYS_GUID_CHG cpu_to_be16(145)
+#define IB_NOTICE_TRAP_BAD_MKEY cpu_to_be16(256)
+#define IB_NOTICE_TRAP_BAD_PKEY cpu_to_be16(257)
+#define IB_NOTICE_TRAP_BAD_QKEY cpu_to_be16(258)
+
+/*
+ * Other local changes flags (trap 144).
+ */
+#define IB_NOTICE_TRAP_LSE_CHG 0x04 /* Link Speed Enable changed */
+#define IB_NOTICE_TRAP_LWE_CHG 0x02 /* Link Width Enable changed */
+#define IB_NOTICE_TRAP_NODE_DESC_CHG 0x01
+
+/*
+ * M_Key volation flags in dr_trunc_hop (trap 256).
+ */
+#define IB_NOTICE_TRAP_DR_NOTICE 0x80
+#define IB_NOTICE_TRAP_DR_TRUNC 0x40
+
+
#endif /* IB_SMI_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index b0f898e3b2e7..7845fae6f2df 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -48,6 +48,7 @@
#include <linux/rwsem.h>
#include <linux/scatterlist.h>
#include <linux/workqueue.h>
+#include <linux/socket.h>
#include <uapi/linux/if_ether.h>
#include <linux/atomic.h>
@@ -64,6 +65,12 @@ union ib_gid {
} global;
};
+extern union ib_gid zgid;
+
+struct ib_gid_attr {
+ struct net_device *ndev;
+};
+
enum rdma_node_type {
/* IB values map to NodeInfo:NodeType. */
RDMA_NODE_IB_CA = 1,
@@ -284,7 +291,7 @@ enum ib_port_cap_flags {
IB_PORT_BOOT_MGMT_SUP = 1 << 23,
IB_PORT_LINK_LATENCY_SUP = 1 << 24,
IB_PORT_CLIENT_REG_SUP = 1 << 25,
- IB_PORT_IP_BASED_GIDS = 1 << 26
+ IB_PORT_IP_BASED_GIDS = 1 << 26,
};
enum ib_port_width {
@@ -556,20 +563,18 @@ __attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
*/
__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
-enum ib_mr_create_flags {
- IB_MR_SIGNATURE_EN = 1,
-};
/**
- * ib_mr_init_attr - Memory region init attributes passed to routine
- * ib_create_mr.
- * @max_reg_descriptors: max number of registration descriptors that
- * may be used with registration work requests.
- * @flags: MR creation flags bit mask.
+ * enum ib_mr_type - memory region type
+ * @IB_MR_TYPE_MEM_REG: memory region that is used for
+ * normal registration
+ * @IB_MR_TYPE_SIGNATURE: memory region that is used for
+ * signature operations (data-integrity
+ * capable regions)
*/
-struct ib_mr_init_attr {
- int max_reg_descriptors;
- u32 flags;
+enum ib_mr_type {
+ IB_MR_TYPE_MEM_REG,
+ IB_MR_TYPE_SIGNATURE,
};
/**
@@ -1252,9 +1257,11 @@ struct ib_udata {
};
struct ib_pd {
+ u32 local_dma_lkey;
struct ib_device *device;
struct ib_uobject *uobject;
atomic_t usecnt; /* count all resources */
+ struct ib_mr *local_mr;
};
struct ib_xrcd {
@@ -1488,7 +1495,7 @@ struct ib_cache {
rwlock_t lock;
struct ib_event_handler event_handler;
struct ib_pkey_cache **pkey_cache;
- struct ib_gid_cache **gid_cache;
+ struct ib_gid_table **gid_cache;
u8 *lmc_cache;
};
@@ -1550,6 +1557,8 @@ struct ib_device {
spinlock_t client_data_lock;
struct list_head core_list;
+ /* Access to the client_data_list is protected by the client_data_lock
+ * spinlock and the lists_rwsem read-write semaphore */
struct list_head client_data_list;
struct ib_cache cache;
@@ -1572,9 +1581,47 @@ struct ib_device {
struct ib_port_attr *port_attr);
enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
u8 port_num);
+ /* When calling get_netdev, the HW vendor's driver should return the
+ * net device of device @device at port @port_num or NULL if such
+ * a net device doesn't exist. The vendor driver should call dev_hold
+ * on this net device. The HW vendor's device driver must guarantee
+ * that this function returns NULL before the net device reaches
+ * NETDEV_UNREGISTER_FINAL state.
+ */
+ struct net_device *(*get_netdev)(struct ib_device *device,
+ u8 port_num);
int (*query_gid)(struct ib_device *device,
u8 port_num, int index,
union ib_gid *gid);
+ /* When calling add_gid, the HW vendor's driver should
+ * add the gid of device @device at gid index @index of
+ * port @port_num to be @gid. Meta-info of that gid (for example,
+ * the network device related to this gid is available
+ * at @attr. @context allows the HW vendor driver to store extra
+ * information together with a GID entry. The HW vendor may allocate
+ * memory to contain this information and store it in @context when a
+ * new GID entry is written to. Params are consistent until the next
+ * call of add_gid or delete_gid. The function should return 0 on
+ * success or error otherwise. The function could be called
+ * concurrently for different ports. This function is only called
+ * when roce_gid_table is used.
+ */
+ int (*add_gid)(struct ib_device *device,
+ u8 port_num,
+ unsigned int index,
+ const union ib_gid *gid,
+ const struct ib_gid_attr *attr,
+ void **context);
+ /* When calling del_gid, the HW vendor's driver should delete the
+ * gid of device @device at gid index @index of port @port_num.
+ * Upon the deletion of a GID entry, the HW vendor must free any
+ * allocated memory. The caller will clear @context afterwards.
+ * This function is only called when roce_gid_table is used.
+ */
+ int (*del_gid)(struct ib_device *device,
+ u8 port_num,
+ unsigned int index,
+ void **context);
int (*query_pkey)(struct ib_device *device,
u8 port_num, u16 index, u16 *pkey);
int (*modify_device)(struct ib_device *device,
@@ -1668,11 +1715,9 @@ struct ib_device {
int (*query_mr)(struct ib_mr *mr,
struct ib_mr_attr *mr_attr);
int (*dereg_mr)(struct ib_mr *mr);
- int (*destroy_mr)(struct ib_mr *mr);
- struct ib_mr * (*create_mr)(struct ib_pd *pd,
- struct ib_mr_init_attr *mr_init_attr);
- struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd,
- int max_page_list_len);
+ struct ib_mr * (*alloc_mr)(struct ib_pd *pd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg);
struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
int page_list_len);
void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
@@ -1724,6 +1769,7 @@ struct ib_device {
int (*destroy_flow)(struct ib_flow *flow_id);
int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
struct ib_mr_status *mr_status);
+ void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
struct ib_dma_mapping_ops *dma_ops;
@@ -1761,8 +1807,30 @@ struct ib_device {
struct ib_client {
char *name;
void (*add) (struct ib_device *);
- void (*remove)(struct ib_device *);
-
+ void (*remove)(struct ib_device *, void *client_data);
+
+ /* Returns the net_dev belonging to this ib_client and matching the
+ * given parameters.
+ * @dev: An RDMA device that the net_dev use for communication.
+ * @port: A physical port number on the RDMA device.
+ * @pkey: P_Key that the net_dev uses if applicable.
+ * @gid: A GID that the net_dev uses to communicate.
+ * @addr: An IP address the net_dev is configured with.
+ * @client_data: The device's client data set by ib_set_client_data().
+ *
+ * An ib_client that implements a net_dev on top of RDMA devices
+ * (such as IP over IB) should implement this callback, allowing the
+ * rdma_cm module to find the right net_dev for a given request.
+ *
+ * The caller is responsible for calling dev_put on the returned
+ * netdev. */
+ struct net_device *(*get_net_dev_by_params)(
+ struct ib_device *dev,
+ u8 port,
+ u16 pkey,
+ const union ib_gid *gid,
+ const struct sockaddr *addr,
+ void *client_data);
struct list_head list;
};
@@ -2071,34 +2139,6 @@ static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
}
/**
- * rdma_cap_read_multi_sge - Check if the port of device has the capability
- * RDMA Read Multiple Scatter-Gather Entries.
- * @device: Device to check
- * @port_num: Port number to check
- *
- * iWARP has a restriction that RDMA READ requests may only have a single
- * Scatter/Gather Entry (SGE) in the work request.
- *
- * NOTE: although the linux kernel currently assumes all devices are either
- * single SGE RDMA READ devices or identical SGE maximums for RDMA READs and
- * WRITEs, according to Tom Talpey, this is not accurate. There are some
- * devices out there that support more than a single SGE on RDMA READ
- * requests, but do not support the same number of SGEs as they do on
- * RDMA WRITE requests. The linux kernel would need rearchitecting to
- * support these imbalanced READ/WRITE SGEs allowed devices. So, for now,
- * suffice with either the device supports the same READ/WRITE SGEs, or
- * it only gets one READ sge.
- *
- * Return: true for any device that allows more than one SGE in RDMA READ
- * requests.
- */
-static inline bool rdma_cap_read_multi_sge(struct ib_device *device,
- u8 port_num)
-{
- return !(device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP);
-}
-
-/**
* rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
*
* @device: Device
@@ -2115,6 +2155,26 @@ static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_n
return device->port_immutable[port_num].max_mad_size;
}
+/**
+ * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * RoCE GID table mechanism manages the various GIDs for a device.
+ *
+ * NOTE: if allocating the port's GID table has failed, this call will still
+ * return true, but any RoCE GID table API will fail.
+ *
+ * Return: true if the port uses RoCE GID table mechanism in order to manage
+ * its GIDs.
+ */
+static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
+ u8 port_num)
+{
+ return rdma_protocol_roce(device, port_num) &&
+ device->add_gid && device->del_gid;
+}
+
int ib_query_gid(struct ib_device *device,
u8 port_num, int index, union ib_gid *gid);
@@ -2135,20 +2195,9 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid,
int ib_find_pkey(struct ib_device *device,
u8 port_num, u16 pkey, u16 *index);
-/**
- * ib_alloc_pd - Allocates an unused protection domain.
- * @device: The device on which to allocate the protection domain.
- *
- * A protection domain object provides an association between QPs, shared
- * receive queues, address handles, memory regions, and memory windows.
- */
struct ib_pd *ib_alloc_pd(struct ib_device *device);
-/**
- * ib_dealloc_pd - Deallocates a protection domain.
- * @pd: The protection domain to deallocate.
- */
-int ib_dealloc_pd(struct ib_pd *pd);
+void ib_dealloc_pd(struct ib_pd *pd);
/**
* ib_create_ah - Creates an address handle for the given address vector.
@@ -2760,52 +2809,6 @@ static inline void ib_dma_free_coherent(struct ib_device *dev,
}
/**
- * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
- * by an HCA.
- * @pd: The protection domain associated assigned to the registered region.
- * @phys_buf_array: Specifies a list of physical buffers to use in the
- * memory region.
- * @num_phys_buf: Specifies the size of the phys_buf_array.
- * @mr_access_flags: Specifies the memory access rights.
- * @iova_start: The offset of the region's starting I/O virtual address.
- */
-struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
- struct ib_phys_buf *phys_buf_array,
- int num_phys_buf,
- int mr_access_flags,
- u64 *iova_start);
-
-/**
- * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
- * Conceptually, this call performs the functions deregister memory region
- * followed by register physical memory region. Where possible,
- * resources are reused instead of deallocated and reallocated.
- * @mr: The memory region to modify.
- * @mr_rereg_mask: A bit-mask used to indicate which of the following
- * properties of the memory region are being modified.
- * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
- * the new protection domain to associated with the memory region,
- * otherwise, this parameter is ignored.
- * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
- * field specifies a list of physical buffers to use in the new
- * translation, otherwise, this parameter is ignored.
- * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
- * field specifies the size of the phys_buf_array, otherwise, this
- * parameter is ignored.
- * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
- * field specifies the new memory access rights, otherwise, this
- * parameter is ignored.
- * @iova_start: The offset of the region's starting I/O virtual address.
- */
-int ib_rereg_phys_mr(struct ib_mr *mr,
- int mr_rereg_mask,
- struct ib_pd *pd,
- struct ib_phys_buf *phys_buf_array,
- int num_phys_buf,
- int mr_access_flags,
- u64 *iova_start);
-
-/**
* ib_query_mr - Retrieves information about a specific memory region.
* @mr: The memory region to retrieve information about.
* @mr_attr: The attributes of the specified memory region.
@@ -2821,33 +2824,9 @@ int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
*/
int ib_dereg_mr(struct ib_mr *mr);
-
-/**
- * ib_create_mr - Allocates a memory region that may be used for
- * signature handover operations.
- * @pd: The protection domain associated with the region.
- * @mr_init_attr: memory region init attributes.
- */
-struct ib_mr *ib_create_mr(struct ib_pd *pd,
- struct ib_mr_init_attr *mr_init_attr);
-
-/**
- * ib_destroy_mr - Destroys a memory region that was created using
- * ib_create_mr and removes it from HW translation tables.
- * @mr: The memory region to destroy.
- *
- * This function can fail, if the memory region has memory windows bound to it.
- */
-int ib_destroy_mr(struct ib_mr *mr);
-
-/**
- * ib_alloc_fast_reg_mr - Allocates memory region usable with the
- * IB_WR_FAST_REG_MR send work request.
- * @pd: The protection domain associated with the region.
- * @max_page_list_len: requested max physical buffer list length to be
- * used with fast register work requests for this MR.
- */
-struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
+struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg);
/**
* ib_alloc_fast_reg_page_list - Allocates a page list array
@@ -3040,4 +3019,8 @@ static inline int ib_check_mr_access(int flags)
int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
struct ib_mr_status *mr_status);
+struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
+ u16 pkey, const union ib_gid *gid,
+ const struct sockaddr *addr);
+
#endif /* IB_VERBS_H */
diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h
new file mode 100644
index 000000000000..391dae1931c0
--- /dev/null
+++ b/include/rdma/opa_port_info.h
@@ -0,0 +1,433 @@
+/*
+ * Copyright (c) 2014 Intel Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if !defined(OPA_PORT_INFO_H)
+#define OPA_PORT_INFO_H
+
+/* Temporary until HFI driver is updated */
+#ifndef USE_PI_LED_ENABLE
+#define USE_PI_LED_ENABLE 0
+#endif
+
+#define OPA_PORT_LINK_MODE_NOP 0 /* No change */
+#define OPA_PORT_LINK_MODE_OPA 4 /* Port mode is OPA */
+
+#define OPA_PORT_PACKET_FORMAT_NOP 0 /* No change */
+#define OPA_PORT_PACKET_FORMAT_8B 1 /* Format 8B */
+#define OPA_PORT_PACKET_FORMAT_9B 2 /* Format 9B */
+#define OPA_PORT_PACKET_FORMAT_10B 4 /* Format 10B */
+#define OPA_PORT_PACKET_FORMAT_16B 8 /* Format 16B */
+
+#define OPA_PORT_LTP_CRC_MODE_NONE 0 /* No change */
+#define OPA_PORT_LTP_CRC_MODE_14 1 /* 14-bit LTP CRC mode (optional) */
+#define OPA_PORT_LTP_CRC_MODE_16 2 /* 16-bit LTP CRC mode */
+#define OPA_PORT_LTP_CRC_MODE_48 4 /* 48-bit LTP CRC mode (optional) */
+#define OPA_PORT_LTP_CRC_MODE_PER_LANE 8 /* 12/16-bit per lane LTP CRC mode */
+
+/* Link Down / Neighbor Link Down Reason; indicated as follows: */
+#define OPA_LINKDOWN_REASON_NONE 0 /* No specified reason */
+#define OPA_LINKDOWN_REASON_RCV_ERROR_0 1
+#define OPA_LINKDOWN_REASON_BAD_PKT_LEN 2
+#define OPA_LINKDOWN_REASON_PKT_TOO_LONG 3
+#define OPA_LINKDOWN_REASON_PKT_TOO_SHORT 4
+#define OPA_LINKDOWN_REASON_BAD_SLID 5
+#define OPA_LINKDOWN_REASON_BAD_DLID 6
+#define OPA_LINKDOWN_REASON_BAD_L2 7
+#define OPA_LINKDOWN_REASON_BAD_SC 8
+#define OPA_LINKDOWN_REASON_RCV_ERROR_8 9
+#define OPA_LINKDOWN_REASON_BAD_MID_TAIL 10
+#define OPA_LINKDOWN_REASON_RCV_ERROR_10 11
+#define OPA_LINKDOWN_REASON_PREEMPT_ERROR 12
+#define OPA_LINKDOWN_REASON_PREEMPT_VL15 13
+#define OPA_LINKDOWN_REASON_BAD_VL_MARKER 14
+#define OPA_LINKDOWN_REASON_RCV_ERROR_14 15
+#define OPA_LINKDOWN_REASON_RCV_ERROR_15 16
+#define OPA_LINKDOWN_REASON_BAD_HEAD_DIST 17
+#define OPA_LINKDOWN_REASON_BAD_TAIL_DIST 18
+#define OPA_LINKDOWN_REASON_BAD_CTRL_DIST 19
+#define OPA_LINKDOWN_REASON_BAD_CREDIT_ACK 20
+#define OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER 21
+#define OPA_LINKDOWN_REASON_BAD_PREEMPT 22
+#define OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT 23
+#define OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT 24
+#define OPA_LINKDOWN_REASON_RCV_ERROR_24 25
+#define OPA_LINKDOWN_REASON_RCV_ERROR_25 26
+#define OPA_LINKDOWN_REASON_RCV_ERROR_26 27
+#define OPA_LINKDOWN_REASON_RCV_ERROR_27 28
+#define OPA_LINKDOWN_REASON_RCV_ERROR_28 29
+#define OPA_LINKDOWN_REASON_RCV_ERROR_29 30
+#define OPA_LINKDOWN_REASON_RCV_ERROR_30 31
+#define OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN 32
+#define OPA_LINKDOWN_REASON_UNKNOWN 33
+/* 34 -reserved */
+#define OPA_LINKDOWN_REASON_REBOOT 35
+#define OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN 36
+/* 37-38 reserved */
+#define OPA_LINKDOWN_REASON_FM_BOUNCE 39
+#define OPA_LINKDOWN_REASON_SPEED_POLICY 40
+#define OPA_LINKDOWN_REASON_WIDTH_POLICY 41
+/* 42-48 reserved */
+#define OPA_LINKDOWN_REASON_DISCONNECTED 49
+#define OPA_LINKDOWN_REASONLOCAL_MEDIA_NOT_INSTALLED 50
+#define OPA_LINKDOWN_REASON_NOT_INSTALLED 51
+#define OPA_LINKDOWN_REASON_CHASSIS_CONFIG 52
+/* 53 reserved */
+#define OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED 54
+/* 55 reserved */
+#define OPA_LINKDOWN_REASON_POWER_POLICY 56
+#define OPA_LINKDOWN_REASON_LINKSPEED_POLICY 57
+#define OPA_LINKDOWN_REASON_LINKWIDTH_POLICY 58
+/* 59 reserved */
+#define OPA_LINKDOWN_REASON_SWITCH_MGMT 60
+#define OPA_LINKDOWN_REASON_SMA_DISABLED 61
+/* 62 reserved */
+#define OPA_LINKDOWN_REASON_TRANSIENT 63
+/* 64-255 reserved */
+
+/* OPA Link Init reason; indicated as follows: */
+/* 3-7; 11-15 reserved; 8-15 cleared on Polling->LinkUp */
+#define OPA_LINKINIT_REASON_NOP 0
+#define OPA_LINKINIT_REASON_LINKUP (1 << 4)
+#define OPA_LINKINIT_REASON_FLAPPING (2 << 4)
+#define OPA_LINKINIT_REASON_CLEAR (8 << 4)
+#define OPA_LINKINIT_OUTSIDE_POLICY (8 << 4)
+#define OPA_LINKINIT_QUARANTINED (9 << 4)
+#define OPA_LINKINIT_INSUFIC_CAPABILITY (10 << 4)
+
+#define OPA_LINK_SPEED_NOP 0x0000 /* Reserved (1-5 Gbps) */
+#define OPA_LINK_SPEED_12_5G 0x0001 /* 12.5 Gbps */
+#define OPA_LINK_SPEED_25G 0x0002 /* 25.78125? Gbps (EDR) */
+
+#define OPA_LINK_WIDTH_1X 0x0001
+#define OPA_LINK_WIDTH_2X 0x0002
+#define OPA_LINK_WIDTH_3X 0x0004
+#define OPA_LINK_WIDTH_4X 0x0008
+
+#define OPA_CAP_MASK3_IsSnoopSupported (1 << 7)
+#define OPA_CAP_MASK3_IsAsyncSC2VLSupported (1 << 6)
+#define OPA_CAP_MASK3_IsAddrRangeConfigSupported (1 << 5)
+#define OPA_CAP_MASK3_IsPassThroughSupported (1 << 4)
+#define OPA_CAP_MASK3_IsSharedSpaceSupported (1 << 3)
+/* reserved (1 << 2) */
+#define OPA_CAP_MASK3_IsVLMarkerSupported (1 << 1)
+#define OPA_CAP_MASK3_IsVLrSupported (1 << 0)
+
+/**
+ * new MTU values
+ */
+enum {
+ OPA_MTU_8192 = 6,
+ OPA_MTU_10240 = 7,
+};
+
+enum {
+ OPA_PORT_PHYS_CONF_DISCONNECTED = 0,
+ OPA_PORT_PHYS_CONF_STANDARD = 1,
+ OPA_PORT_PHYS_CONF_FIXED = 2,
+ OPA_PORT_PHYS_CONF_VARIABLE = 3,
+ OPA_PORT_PHYS_CONF_SI_PHOTO = 4
+};
+
+enum port_info_field_masks {
+ /* vl.cap */
+ OPA_PI_MASK_VL_CAP = 0x1F,
+ /* port_states.ledenable_offlinereason */
+ OPA_PI_MASK_OFFLINE_REASON = 0x0F,
+ OPA_PI_MASK_LED_ENABLE = 0x40,
+ /* port_states.unsleepstate_downdefstate */
+ OPA_PI_MASK_UNSLEEP_STATE = 0xF0,
+ OPA_PI_MASK_DOWNDEF_STATE = 0x0F,
+ /* port_states.portphysstate_portstate */
+ OPA_PI_MASK_PORT_PHYSICAL_STATE = 0xF0,
+ OPA_PI_MASK_PORT_STATE = 0x0F,
+ /* port_phys_conf */
+ OPA_PI_MASK_PORT_PHYSICAL_CONF = 0x0F,
+ /* collectivemask_multicastmask */
+ OPA_PI_MASK_COLLECT_MASK = 0x38,
+ OPA_PI_MASK_MULTICAST_MASK = 0x07,
+ /* mkeyprotect_lmc */
+ OPA_PI_MASK_MKEY_PROT_BIT = 0xC0,
+ OPA_PI_MASK_LMC = 0x0F,
+ /* smsl */
+ OPA_PI_MASK_SMSL = 0x1F,
+ /* partenforce_filterraw */
+ /* Filter Raw In/Out bits 1 and 2 were removed */
+ OPA_PI_MASK_LINKINIT_REASON = 0xF0,
+ OPA_PI_MASK_PARTITION_ENFORCE_IN = 0x08,
+ OPA_PI_MASK_PARTITION_ENFORCE_OUT = 0x04,
+ /* operational_vls */
+ OPA_PI_MASK_OPERATIONAL_VL = 0x1F,
+ /* sa_qp */
+ OPA_PI_MASK_SA_QP = 0x00FFFFFF,
+ /* sm_trap_qp */
+ OPA_PI_MASK_SM_TRAP_QP = 0x00FFFFFF,
+ /* localphy_overrun_errors */
+ OPA_PI_MASK_LOCAL_PHY_ERRORS = 0xF0,
+ OPA_PI_MASK_OVERRUN_ERRORS = 0x0F,
+ /* clientrereg_subnettimeout */
+ OPA_PI_MASK_CLIENT_REREGISTER = 0x80,
+ OPA_PI_MASK_SUBNET_TIMEOUT = 0x1F,
+ /* port_link_mode */
+ OPA_PI_MASK_PORT_LINK_SUPPORTED = (0x001F << 10),
+ OPA_PI_MASK_PORT_LINK_ENABLED = (0x001F << 5),
+ OPA_PI_MASK_PORT_LINK_ACTIVE = (0x001F << 0),
+ /* port_link_crc_mode */
+ OPA_PI_MASK_PORT_LINK_CRC_SUPPORTED = 0x0F00,
+ OPA_PI_MASK_PORT_LINK_CRC_ENABLED = 0x00F0,
+ OPA_PI_MASK_PORT_LINK_CRC_ACTIVE = 0x000F,
+ /* port_mode */
+ OPA_PI_MASK_PORT_MODE_SECURITY_CHECK = 0x0001,
+ OPA_PI_MASK_PORT_MODE_16B_TRAP_QUERY = 0x0002,
+ OPA_PI_MASK_PORT_MODE_PKEY_CONVERT = 0x0004,
+ OPA_PI_MASK_PORT_MODE_SC2SC_MAPPING = 0x0008,
+ OPA_PI_MASK_PORT_MODE_VL_MARKER = 0x0010,
+ OPA_PI_MASK_PORT_PASS_THROUGH = 0x0020,
+ OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE = 0x0040,
+ /* flit_control.interleave */
+ OPA_PI_MASK_INTERLEAVE_DIST_SUP = (0x0003 << 12),
+ OPA_PI_MASK_INTERLEAVE_DIST_ENABLE = (0x0003 << 10),
+ OPA_PI_MASK_INTERLEAVE_MAX_NEST_TX = (0x001F << 5),
+ OPA_PI_MASK_INTERLEAVE_MAX_NEST_RX = (0x001F << 0),
+
+ /* port_error_action */
+ OPA_PI_MASK_EX_BUFFER_OVERRUN = 0x80000000,
+ /* 7 bits reserved */
+ OPA_PI_MASK_FM_CFG_ERR_EXCEED_MULTICAST_LIMIT = 0x00800000,
+ OPA_PI_MASK_FM_CFG_BAD_CONTROL_FLIT = 0x00400000,
+ OPA_PI_MASK_FM_CFG_BAD_PREEMPT = 0x00200000,
+ OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER = 0x00100000,
+ OPA_PI_MASK_FM_CFG_BAD_CRDT_ACK = 0x00080000,
+ OPA_PI_MASK_FM_CFG_BAD_CTRL_DIST = 0x00040000,
+ OPA_PI_MASK_FM_CFG_BAD_TAIL_DIST = 0x00020000,
+ OPA_PI_MASK_FM_CFG_BAD_HEAD_DIST = 0x00010000,
+ /* 2 bits reserved */
+ OPA_PI_MASK_PORT_RCV_BAD_VL_MARKER = 0x00002000,
+ OPA_PI_MASK_PORT_RCV_PREEMPT_VL15 = 0x00001000,
+ OPA_PI_MASK_PORT_RCV_PREEMPT_ERROR = 0x00000800,
+ /* 1 bit reserved */
+ OPA_PI_MASK_PORT_RCV_BAD_MidTail = 0x00000200,
+ /* 1 bit reserved */
+ OPA_PI_MASK_PORT_RCV_BAD_SC = 0x00000080,
+ OPA_PI_MASK_PORT_RCV_BAD_L2 = 0x00000040,
+ OPA_PI_MASK_PORT_RCV_BAD_DLID = 0x00000020,
+ OPA_PI_MASK_PORT_RCV_BAD_SLID = 0x00000010,
+ OPA_PI_MASK_PORT_RCV_PKTLEN_TOOSHORT = 0x00000008,
+ OPA_PI_MASK_PORT_RCV_PKTLEN_TOOLONG = 0x00000004,
+ OPA_PI_MASK_PORT_RCV_BAD_PKTLEN = 0x00000002,
+ OPA_PI_MASK_PORT_RCV_BAD_LT = 0x00000001,
+
+ /* pass_through.res_drctl */
+ OPA_PI_MASK_PASS_THROUGH_DR_CONTROL = 0x01,
+
+ /* buffer_units */
+ OPA_PI_MASK_BUF_UNIT_VL15_INIT = (0x00000FFF << 11),
+ OPA_PI_MASK_BUF_UNIT_VL15_CREDIT_RATE = (0x0000001F << 6),
+ OPA_PI_MASK_BUF_UNIT_CREDIT_ACK = (0x00000003 << 3),
+ OPA_PI_MASK_BUF_UNIT_BUF_ALLOC = (0x00000003 << 0),
+
+ /* neigh_mtu.pvlx_to_mtu */
+ OPA_PI_MASK_NEIGH_MTU_PVL0 = 0xF0,
+ OPA_PI_MASK_NEIGH_MTU_PVL1 = 0x0F,
+
+ /* neigh_mtu.vlstall_hoq_life */
+ OPA_PI_MASK_VL_STALL = (0x03 << 5),
+ OPA_PI_MASK_HOQ_LIFE = (0x1F << 0),
+
+ /* port_neigh_mode */
+ OPA_PI_MASK_NEIGH_MGMT_ALLOWED = (0x01 << 3),
+ OPA_PI_MASK_NEIGH_FW_AUTH_BYPASS = (0x01 << 2),
+ OPA_PI_MASK_NEIGH_NODE_TYPE = (0x03 << 0),
+
+ /* resptime_value */
+ OPA_PI_MASK_RESPONSE_TIME_VALUE = 0x1F,
+
+ /* mtucap */
+ OPA_PI_MASK_MTU_CAP = 0x0F,
+};
+
+#if USE_PI_LED_ENABLE
+struct opa_port_states {
+ u8 reserved;
+ u8 ledenable_offlinereason; /* 1 res, 1 bit, 6 bits */
+ u8 reserved2;
+ u8 portphysstate_portstate; /* 4 bits, 4 bits */
+};
+#define PI_LED_ENABLE_SUP 1
+#else
+struct opa_port_states {
+ u8 reserved;
+ u8 offline_reason; /* 2 res, 6 bits */
+ u8 reserved2;
+ u8 portphysstate_portstate; /* 4 bits, 4 bits */
+};
+#define PI_LED_ENABLE_SUP 0
+#endif
+
+struct opa_port_state_info {
+ struct opa_port_states port_states;
+ u16 link_width_downgrade_tx_active;
+ u16 link_width_downgrade_rx_active;
+};
+
+struct opa_port_info {
+ __be32 lid;
+ __be32 flow_control_mask;
+
+ struct {
+ u8 res; /* was inittype */
+ u8 cap; /* 3 res, 5 bits */
+ __be16 high_limit;
+ __be16 preempt_limit;
+ u8 arb_high_cap;
+ u8 arb_low_cap;
+ } vl;
+
+ struct opa_port_states port_states;
+ u8 port_phys_conf; /* 4 res, 4 bits */
+ u8 collectivemask_multicastmask; /* 2 res, 3, 3 */
+ u8 mkeyprotect_lmc; /* 2 bits, 2 res, 4 bits */
+ u8 smsl; /* 3 res, 5 bits */
+
+ u8 partenforce_filterraw; /* bit fields */
+ u8 operational_vls; /* 3 res, 5 bits */
+ __be16 pkey_8b;
+ __be16 pkey_10b;
+ __be16 mkey_violations;
+
+ __be16 pkey_violations;
+ __be16 qkey_violations;
+ __be32 sm_trap_qp; /* 8 bits, 24 bits */
+
+ __be32 sa_qp; /* 8 bits, 24 bits */
+ u8 neigh_port_num;
+ u8 link_down_reason;
+ u8 neigh_link_down_reason;
+ u8 clientrereg_subnettimeout; /* 1 bit, 2 bits, 5 */
+
+ struct {
+ __be16 supported;
+ __be16 enabled;
+ __be16 active;
+ } link_speed;
+ struct {
+ __be16 supported;
+ __be16 enabled;
+ __be16 active;
+ } link_width;
+ struct {
+ __be16 supported;
+ __be16 enabled;
+ __be16 tx_active;
+ __be16 rx_active;
+ } link_width_downgrade;
+ __be16 port_link_mode; /* 1 res, 5 bits, 5 bits, 5 bits */
+ __be16 port_ltp_crc_mode; /* 4 res, 4 bits, 4 bits, 4 bits */
+
+ __be16 port_mode; /* 9 res, bit fields */
+ struct {
+ __be16 supported;
+ __be16 enabled;
+ } port_packet_format;
+ struct {
+ __be16 interleave; /* 2 res, 2,2,5,5 */
+ struct {
+ __be16 min_initial;
+ __be16 min_tail;
+ u8 large_pkt_limit;
+ u8 small_pkt_limit;
+ u8 max_small_pkt_limit;
+ u8 preemption_limit;
+ } preemption;
+ } flit_control;
+
+ __be32 reserved4;
+ __be32 port_error_action; /* bit field */
+
+ struct {
+ u8 egress_port;
+ u8 res_drctl; /* 7 res, 1 */
+ } pass_through;
+ __be16 mkey_lease_period;
+ __be32 buffer_units; /* 9 res, 12, 5, 3, 3 */
+
+ __be32 reserved5;
+ __be32 sm_lid;
+
+ __be64 mkey;
+
+ __be64 subnet_prefix;
+
+ struct {
+ u8 pvlx_to_mtu[OPA_MAX_VLS/2]; /* 4 bits, 4 bits */
+ } neigh_mtu;
+
+ struct {
+ u8 vlstall_hoqlife; /* 3 bits, 5 bits */
+ } xmit_q[OPA_MAX_VLS];
+
+ struct {
+ u8 addr[16];
+ } ipaddr_ipv6;
+
+ struct {
+ u8 addr[4];
+ } ipaddr_ipv4;
+
+ u32 reserved6;
+ u32 reserved7;
+ u32 reserved8;
+
+ __be64 neigh_node_guid;
+
+ __be32 ib_cap_mask;
+ __be16 reserved9; /* was ib_cap_mask2 */
+ __be16 opa_cap_mask;
+
+ __be32 reserved10; /* was link_roundtrip_latency */
+ __be16 overall_buffer_space;
+ __be16 reserved11; /* was max_credit_hint */
+
+ __be16 diag_code;
+ struct {
+ u8 buffer;
+ u8 wire;
+ } replay_depth;
+ u8 port_neigh_mode;
+ u8 mtucap; /* 4 res, 4 bits */
+
+ u8 resptimevalue; /* 3 res, 5 bits */
+ u8 local_port_num;
+ u8 reserved12;
+ u8 reserved13; /* was guid_cap */
+} __attribute__ ((packed));
+
+#endif /* OPA_PORT_INFO_H */
diff --git a/include/rdma/opa_smi.h b/include/rdma/opa_smi.h
index 29063e84c253..4a529ef47995 100644
--- a/include/rdma/opa_smi.h
+++ b/include/rdma/opa_smi.h
@@ -40,6 +40,10 @@
#define OPA_SMP_DR_DATA_SIZE 1872
#define OPA_SMP_MAX_PATH_HOPS 64
+#define OPA_MAX_VLS 32
+#define OPA_MAX_SLS 32
+#define OPA_MAX_SCS 32
+
#define OPA_SMI_CLASS_VERSION 0x80
#define OPA_LID_PERMISSIVE cpu_to_be32(0xFFFFFFFF)
@@ -73,6 +77,49 @@ struct opa_smp {
} __packed;
+/* Subnet management attributes */
+/* ... */
+#define OPA_ATTRIB_ID_NODE_DESCRIPTION cpu_to_be16(0x0010)
+#define OPA_ATTRIB_ID_NODE_INFO cpu_to_be16(0x0011)
+#define OPA_ATTRIB_ID_PORT_INFO cpu_to_be16(0x0015)
+#define OPA_ATTRIB_ID_PARTITION_TABLE cpu_to_be16(0x0016)
+#define OPA_ATTRIB_ID_SL_TO_SC_MAP cpu_to_be16(0x0017)
+#define OPA_ATTRIB_ID_VL_ARBITRATION cpu_to_be16(0x0018)
+#define OPA_ATTRIB_ID_SM_INFO cpu_to_be16(0x0020)
+#define OPA_ATTRIB_ID_CABLE_INFO cpu_to_be16(0x0032)
+#define OPA_ATTRIB_ID_AGGREGATE cpu_to_be16(0x0080)
+#define OPA_ATTRIB_ID_SC_TO_SL_MAP cpu_to_be16(0x0082)
+#define OPA_ATTRIB_ID_SC_TO_VLR_MAP cpu_to_be16(0x0083)
+#define OPA_ATTRIB_ID_SC_TO_VLT_MAP cpu_to_be16(0x0084)
+#define OPA_ATTRIB_ID_SC_TO_VLNT_MAP cpu_to_be16(0x0085)
+/* ... */
+#define OPA_ATTRIB_ID_PORT_STATE_INFO cpu_to_be16(0x0087)
+/* ... */
+#define OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE cpu_to_be16(0x008A)
+/* ... */
+
+struct opa_node_description {
+ u8 data[64];
+} __attribute__ ((packed));
+
+struct opa_node_info {
+ u8 base_version;
+ u8 class_version;
+ u8 node_type;
+ u8 num_ports;
+ __be32 reserved;
+ __be64 system_image_guid;
+ __be64 node_guid;
+ __be64 port_guid;
+ __be16 partition_cap;
+ __be16 device_id;
+ __be32 revision;
+ u8 local_port_num;
+ u8 vendor_id[3]; /* network byte order */
+} __attribute__ ((packed));
+
+#define OPA_PARTITION_TABLE_BLK_SIZE 32
+
static inline u8
opa_get_smp_direction(struct opa_smp *smp)
{
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
index 0790882e0c9b..585266144329 100644
--- a/include/rdma/rdma_netlink.h
+++ b/include/rdma/rdma_netlink.h
@@ -77,4 +77,11 @@ int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh,
int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh,
unsigned int group, gfp_t flags);
+/**
+ * Check if there are any listeners to the netlink group
+ * @group: the netlink group ID
+ * Returns 0 on success or a negative for no listeners.
+ */
+int ibnl_chk_listeners(unsigned int group);
+
#endif /* _RDMA_NETLINK_H */
diff --git a/include/scsi/scsi_common.h b/include/scsi/scsi_common.h
index 676b03b78e57..11571b2a831e 100644
--- a/include/scsi/scsi_common.h
+++ b/include/scsi/scsi_common.h
@@ -61,4 +61,9 @@ static inline bool scsi_sense_valid(const struct scsi_sense_hdr *sshdr)
extern bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
struct scsi_sense_hdr *sshdr);
+extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
+int scsi_set_sense_information(u8 *buf, int buf_len, u64 info);
+extern const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
+ int desc_type);
+
#endif /* _SCSI_COMMON_H_ */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 50c2a363bc8f..fe89d7cd67b9 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -196,34 +196,13 @@ struct scsi_device {
struct execute_work ew; /* used to get process context on put */
struct work_struct requeue_work;
- struct scsi_dh_data *scsi_dh_data;
+ struct scsi_device_handler *handler;
+ void *handler_data;
+
enum scsi_device_state sdev_state;
unsigned long sdev_data[0];
} __attribute__((aligned(sizeof(unsigned long))));
-typedef void (*activate_complete)(void *, int);
-struct scsi_device_handler {
- /* Used by the infrastructure */
- struct list_head list; /* list of scsi_device_handlers */
-
- /* Filled by the hardware handler */
- struct module *module;
- const char *name;
- int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *);
- struct scsi_dh_data *(*attach)(struct scsi_device *);
- void (*detach)(struct scsi_device *);
- int (*activate)(struct scsi_device *, activate_complete, void *);
- int (*prep_fn)(struct scsi_device *, struct request *);
- int (*set_params)(struct scsi_device *, const char *);
- bool (*match)(struct scsi_device *);
-};
-
-struct scsi_dh_data {
- struct scsi_device_handler *scsi_dh;
- struct scsi_device *sdev;
- struct kref kref;
-};
-
#define to_scsi_device(d) \
container_of(d, struct scsi_device, sdev_gendev)
#define class_to_sdev(d) \
diff --git a/include/scsi/scsi_dh.h b/include/scsi/scsi_dh.h
index 620c723ee8ed..85d731746834 100644
--- a/include/scsi/scsi_dh.h
+++ b/include/scsi/scsi_dh.h
@@ -55,11 +55,26 @@ enum {
SCSI_DH_NOSYS,
SCSI_DH_DRIVER_MAX,
};
-#if defined(CONFIG_SCSI_DH) || defined(CONFIG_SCSI_DH_MODULE)
+
+typedef void (*activate_complete)(void *, int);
+struct scsi_device_handler {
+ /* Used by the infrastructure */
+ struct list_head list; /* list of scsi_device_handlers */
+
+ /* Filled by the hardware handler */
+ struct module *module;
+ const char *name;
+ int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *);
+ int (*attach)(struct scsi_device *);
+ void (*detach)(struct scsi_device *);
+ int (*activate)(struct scsi_device *, activate_complete, void *);
+ int (*prep_fn)(struct scsi_device *, struct request *);
+ int (*set_params)(struct scsi_device *, const char *);
+};
+
+#ifdef CONFIG_SCSI_DH
extern int scsi_dh_activate(struct request_queue *, activate_complete, void *);
-extern int scsi_dh_handler_exist(const char *);
extern int scsi_dh_attach(struct request_queue *, const char *);
-extern void scsi_dh_detach(struct request_queue *);
extern const char *scsi_dh_attached_handler_name(struct request_queue *, gfp_t);
extern int scsi_dh_set_params(struct request_queue *, const char *);
#else
@@ -69,18 +84,10 @@ static inline int scsi_dh_activate(struct request_queue *req,
fn(data, 0);
return 0;
}
-static inline int scsi_dh_handler_exist(const char *name)
-{
- return 0;
-}
static inline int scsi_dh_attach(struct request_queue *req, const char *name)
{
return SCSI_DH_NOSYS;
}
-static inline void scsi_dh_detach(struct request_queue *q)
-{
- return;
-}
static inline const char *scsi_dh_attached_handler_name(struct request_queue *q,
gfp_t gfp)
{
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 8d1d7fa67ec4..dbb8c640e26f 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -4,6 +4,7 @@
#include <linux/scatterlist.h>
#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_common.h>
struct scsi_device;
struct Scsi_Host;
@@ -21,14 +22,9 @@ static inline bool scsi_sense_is_deferred(const struct scsi_sense_hdr *sshdr)
return ((sshdr->response_code >= 0x70) && (sshdr->response_code & 1));
}
-extern const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
- int desc_type);
-
extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
u64 * info_out);
-extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
-
extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
struct scsi_eh_save {
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
index 370f2909ec19..44202ff897fd 100644
--- a/include/soc/tegra/mc.h
+++ b/include/soc/tegra/mc.h
@@ -51,11 +51,6 @@ struct tegra_smmu_swgroup {
unsigned int reg;
};
-struct tegra_smmu_ops {
- void (*flush_dcache)(struct page *page, unsigned long offset,
- size_t size);
-};
-
struct tegra_smmu_soc {
const struct tegra_mc_client *clients;
unsigned int num_clients;
@@ -66,9 +61,8 @@ struct tegra_smmu_soc {
bool supports_round_robin_arbitration;
bool supports_request_limit;
+ unsigned int num_tlb_lines;
unsigned int num_asids;
-
- const struct tegra_smmu_ops *ops;
};
struct tegra_mc;
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index 0e9d75b49bed..74bc85473b58 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -584,6 +584,8 @@ static inline int snd_ac97_update_power(struct snd_ac97 *ac97, int reg,
void snd_ac97_suspend(struct snd_ac97 *ac97);
void snd_ac97_resume(struct snd_ac97 *ac97);
#endif
+int snd_ac97_reset(struct snd_ac97 *ac97, bool try_warm, unsigned int id,
+ unsigned int id_mask);
/* quirk types */
enum {
diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
index adb5ba5cbd9d..930b41e5acf4 100644
--- a/include/sound/hda_i915.h
+++ b/include/sound/hda_i915.h
@@ -4,14 +4,17 @@
#ifndef __SOUND_HDA_I915_H
#define __SOUND_HDA_I915_H
+#include <drm/i915_component.h>
+
#ifdef CONFIG_SND_HDA_I915
int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
int snd_hdac_get_display_clk(struct hdac_bus *bus);
int snd_hdac_i915_init(struct hdac_bus *bus);
int snd_hdac_i915_exit(struct hdac_bus *bus);
+int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *);
#else
-static int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
+static inline int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
{
return 0;
}
@@ -31,6 +34,10 @@ static inline int snd_hdac_i915_exit(struct hdac_bus *bus)
{
return 0;
}
+static inline int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *ops)
+{
+ return -ENODEV;
+}
#endif
#endif /* __SOUND_HDA_I915_H */
diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h
index ae995e523ff8..2ae8812d7b1a 100644
--- a/include/sound/hda_register.h
+++ b/include/sound/hda_register.h
@@ -160,6 +160,10 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define AZX_SPB_BASE 0x08
/* Interval used to calculate the iterating register offset */
#define AZX_SPB_INTERVAL 0x08
+/* SPIB base */
+#define AZX_SPB_SPIB 0x00
+/* SPIB MAXFIFO base*/
+#define AZX_SPB_MAXFIFO 0x04
/* registers of Global Time Synchronization Capability Structure */
#define AZX_GTS_CAP_ID 0x1
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 4caf1fde8a4f..49bc836fcd84 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -119,6 +119,7 @@ int snd_hdac_device_register(struct hdac_device *codec);
void snd_hdac_device_unregister(struct hdac_device *codec);
int snd_hdac_refresh_widgets(struct hdac_device *codec);
+int snd_hdac_refresh_widget_sysfs(struct hdac_device *codec);
unsigned int snd_hdac_make_cmd(struct hdac_device *codec, hda_nid_t nid,
unsigned int verb, unsigned int parm);
@@ -164,15 +165,15 @@ static inline int snd_hdac_read_parm(struct hdac_device *codec, hda_nid_t nid,
}
#ifdef CONFIG_PM
-void snd_hdac_power_up(struct hdac_device *codec);
-void snd_hdac_power_down(struct hdac_device *codec);
-void snd_hdac_power_up_pm(struct hdac_device *codec);
-void snd_hdac_power_down_pm(struct hdac_device *codec);
+int snd_hdac_power_up(struct hdac_device *codec);
+int snd_hdac_power_down(struct hdac_device *codec);
+int snd_hdac_power_up_pm(struct hdac_device *codec);
+int snd_hdac_power_down_pm(struct hdac_device *codec);
#else
-static inline void snd_hdac_power_up(struct hdac_device *codec) {}
-static inline void snd_hdac_power_down(struct hdac_device *codec) {}
-static inline void snd_hdac_power_up_pm(struct hdac_device *codec) {}
-static inline void snd_hdac_power_down_pm(struct hdac_device *codec) {}
+static inline int snd_hdac_power_up(struct hdac_device *codec) { return 0; }
+static inline int snd_hdac_power_down(struct hdac_device *codec) { return 0; }
+static inline int snd_hdac_power_up_pm(struct hdac_device *codec) { return 0; }
+static inline int snd_hdac_power_down_pm(struct hdac_device *codec) { return 0; }
#endif
/*
@@ -437,6 +438,8 @@ void snd_hdac_stream_init(struct hdac_bus *bus, struct hdac_stream *azx_dev,
struct hdac_stream *snd_hdac_stream_assign(struct hdac_bus *bus,
struct snd_pcm_substream *substream);
void snd_hdac_stream_release(struct hdac_stream *azx_dev);
+struct hdac_stream *snd_hdac_get_stream(struct hdac_bus *bus,
+ int dir, int stream_tag);
int snd_hdac_stream_setup(struct hdac_stream *azx_dev);
void snd_hdac_stream_cleanup(struct hdac_stream *azx_dev);
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index 0f89df1511dc..94210dcdb6ea 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -34,6 +34,7 @@ int snd_hdac_ext_bus_init(struct hdac_ext_bus *sbus, struct device *dev,
void snd_hdac_ext_bus_exit(struct hdac_ext_bus *sbus);
int snd_hdac_ext_bus_device_init(struct hdac_ext_bus *sbus, int addr);
void snd_hdac_ext_bus_device_exit(struct hdac_device *hdev);
+void snd_hdac_ext_bus_device_remove(struct hdac_ext_bus *ebus);
#define ebus_to_hbus(ebus) (&(ebus)->bus)
#define hbus_to_ebus(_bus) \
@@ -62,6 +63,8 @@ enum hdac_ext_stream_type {
* @hstream: hdac_stream
* @pphc_addr: processing pipe host stream pointer
* @pplc_addr: processing pipe link stream pointer
+ * @spib_addr: software position in buffers stream pointer
+ * @fifo_addr: software position Max fifos stream pointer
* @decoupled: stream host and link is decoupled
* @link_locked: link is locked
* @link_prepared: link is prepared
@@ -73,6 +76,9 @@ struct hdac_ext_stream {
void __iomem *pphc_addr;
void __iomem *pplc_addr;
+ void __iomem *spib_addr;
+ void __iomem *fifo_addr;
+
bool decoupled:1;
bool link_locked:1;
bool link_prepared;
@@ -99,6 +105,11 @@ void snd_hdac_ext_stream_decouple(struct hdac_ext_bus *bus,
struct hdac_ext_stream *azx_dev, bool decouple);
void snd_hdac_ext_stop_streams(struct hdac_ext_bus *sbus);
+int snd_hdac_ext_stream_set_spib(struct hdac_ext_bus *ebus,
+ struct hdac_ext_stream *stream, u32 value);
+int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_ext_bus *ebus,
+ struct hdac_ext_stream *stream);
+
void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *hstream);
void snd_hdac_ext_link_stream_clear(struct hdac_ext_stream *hstream);
void snd_hdac_ext_link_stream_reset(struct hdac_ext_stream *hstream);
@@ -115,6 +126,7 @@ struct hdac_ext_link {
int snd_hdac_ext_bus_link_power_up(struct hdac_ext_link *link);
int snd_hdac_ext_bus_link_power_down(struct hdac_ext_link *link);
+int snd_hdac_ext_bus_link_power_down_all(struct hdac_ext_bus *ebus);
void snd_hdac_ext_link_set_stream_id(struct hdac_ext_link *link,
int stream);
void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
@@ -129,4 +141,63 @@ void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
writew(((readw(addr + reg) & ~(mask)) | (val)), \
addr + reg)
+
+struct hdac_ext_device;
+
+/* ops common to all codec drivers */
+struct hdac_ext_codec_ops {
+ int (*build_controls)(struct hdac_ext_device *dev);
+ int (*init)(struct hdac_ext_device *dev);
+ void (*free)(struct hdac_ext_device *dev);
+};
+
+struct hda_dai_map {
+ char *dai_name;
+ hda_nid_t nid;
+ u32 maxbps;
+};
+
+#define HDA_MAX_NIDS 16
+
+/**
+ * struct hdac_ext_device - HDAC Ext device
+ *
+ * @hdac: hdac core device
+ * @nid_list - the dai map which matches the dai-name with the nid
+ * @map_cur_idx - the idx in use in dai_map
+ * @ops - the hda codec ops common to all codec drivers
+ * @pvt_data - private data, for asoc contains asoc codec object
+ */
+struct hdac_ext_device {
+ struct hdac_device hdac;
+ struct hdac_ext_bus *ebus;
+
+ /* soc-dai to nid map */
+ struct hda_dai_map nid_list[HDA_MAX_NIDS];
+ unsigned int map_cur_idx;
+
+ /* codec ops */
+ struct hdac_ext_codec_ops ops;
+
+ void *private_data;
+};
+
+#define to_ehdac_device(dev) (container_of((dev), \
+ struct hdac_ext_device, hdac))
+/*
+ * HD-audio codec base driver
+ */
+struct hdac_ext_driver {
+ struct hdac_driver hdac;
+
+ int (*probe)(struct hdac_ext_device *dev);
+ int (*remove)(struct hdac_ext_device *dev);
+ void (*shutdown)(struct hdac_ext_device *dev);
+};
+
+int snd_hda_ext_driver_register(struct hdac_ext_driver *drv);
+void snd_hda_ext_driver_unregister(struct hdac_ext_driver *drv);
+
+#define to_ehdac_driver(_drv) container_of(_drv, struct hdac_ext_driver, hdac)
+
#endif /* __SOUND_HDAUDIO_EXT_H */
diff --git a/include/sound/rcar_snd.h b/include/sound/rcar_snd.h
index 4cecd0c175f6..bb7b2ebfee7b 100644
--- a/include/sound/rcar_snd.h
+++ b/include/sound/rcar_snd.h
@@ -61,6 +61,14 @@ struct rsnd_src_platform_info {
/*
* flags
*/
+struct rsnd_ctu_platform_info {
+ u32 flags;
+};
+
+struct rsnd_mix_platform_info {
+ u32 flags;
+};
+
struct rsnd_dvc_platform_info {
u32 flags;
};
@@ -68,6 +76,8 @@ struct rsnd_dvc_platform_info {
struct rsnd_dai_path_info {
struct rsnd_ssi_platform_info *ssi;
struct rsnd_src_platform_info *src;
+ struct rsnd_ctu_platform_info *ctu;
+ struct rsnd_mix_platform_info *mix;
struct rsnd_dvc_platform_info *dvc;
};
@@ -93,6 +103,10 @@ struct rcar_snd_info {
int ssi_info_nr;
struct rsnd_src_platform_info *src_info;
int src_info_nr;
+ struct rsnd_ctu_platform_info *ctu_info;
+ int ctu_info_nr;
+ struct rsnd_mix_platform_info *mix_info;
+ int mix_info_nr;
struct rsnd_dvc_platform_info *dvc_info;
int dvc_info_nr;
struct rsnd_dai_platform_info *dai_info;
diff --git a/include/sound/rt298.h b/include/sound/rt298.h
new file mode 100644
index 000000000000..7fffeaa84f64
--- /dev/null
+++ b/include/sound/rt298.h
@@ -0,0 +1,20 @@
+/*
+ * linux/sound/rt286.h -- Platform data for RT286
+ *
+ * Copyright 2013 Realtek Microelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_SND_RT298_H
+#define __LINUX_SND_RT298_H
+
+struct rt298_platform_data {
+ bool cbj_en; /*combo jack enable*/
+ bool gpio2_en; /*GPIO2 enable*/
+ bool suspend_power_off; /* power is off during suspend */
+};
+
+#endif
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 37d95a898275..5abba037d245 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -397,6 +397,7 @@ int snd_soc_dapm_del_routes(struct snd_soc_dapm_context *dapm,
const struct snd_soc_dapm_route *route, int num);
int snd_soc_dapm_weak_routes(struct snd_soc_dapm_context *dapm,
const struct snd_soc_dapm_route *route, int num);
+void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w);
/* dapm events */
void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream,
@@ -511,9 +512,18 @@ struct snd_soc_dapm_route {
struct snd_soc_dapm_path {
const char *name;
- /* source (input) and sink (output) widgets */
- struct snd_soc_dapm_widget *source;
- struct snd_soc_dapm_widget *sink;
+ /*
+ * source (input) and sink (output) widgets
+ * The union is for convience, since it is a lot nicer to type
+ * p->source, rather than p->node[SND_SOC_DAPM_DIR_IN]
+ */
+ union {
+ struct {
+ struct snd_soc_dapm_widget *source;
+ struct snd_soc_dapm_widget *sink;
+ };
+ struct snd_soc_dapm_widget *node[2];
+ };
/* status */
u32 connect:1; /* source and sink widgets are connected */
@@ -524,8 +534,7 @@ struct snd_soc_dapm_path {
int (*connected)(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink);
- struct list_head list_source;
- struct list_head list_sink;
+ struct list_head list_node[2];
struct list_head list_kcontrol;
struct list_head list;
};
@@ -559,8 +568,7 @@ struct snd_soc_dapm_widget {
unsigned char new_power:1; /* power from this run */
unsigned char power_checked:1; /* power checked this run */
unsigned char is_supply:1; /* Widget is a supply type widget */
- unsigned char is_sink:1; /* Widget is a sink type widget */
- unsigned char is_source:1; /* Widget is a source type widget */
+ unsigned char is_ep:2; /* Widget is a endpoint type widget */
int subseq; /* sort within widget type */
int (*power_check)(struct snd_soc_dapm_widget *w);
@@ -575,16 +583,14 @@ struct snd_soc_dapm_widget {
struct snd_kcontrol **kcontrols;
struct snd_soc_dobj dobj;
- /* widget input and outputs */
- struct list_head sources;
- struct list_head sinks;
+ /* widget input and output edges */
+ struct list_head edges[2];
/* used during DAPM updates */
struct list_head work_list;
struct list_head power_list;
struct list_head dirty;
- int inputs;
- int outputs;
+ int endpoints[2];
struct clk *clk;
};
@@ -672,4 +678,58 @@ static inline enum snd_soc_bias_level snd_soc_dapm_get_bias_level(
return dapm->bias_level;
}
+enum snd_soc_dapm_direction {
+ SND_SOC_DAPM_DIR_IN,
+ SND_SOC_DAPM_DIR_OUT
+};
+
+#define SND_SOC_DAPM_DIR_TO_EP(x) BIT(x)
+
+#define SND_SOC_DAPM_EP_SOURCE SND_SOC_DAPM_DIR_TO_EP(SND_SOC_DAPM_DIR_IN)
+#define SND_SOC_DAPM_EP_SINK SND_SOC_DAPM_DIR_TO_EP(SND_SOC_DAPM_DIR_OUT)
+
+/**
+ * snd_soc_dapm_widget_for_each_sink_path - Iterates over all paths in the
+ * specified direction of a widget
+ * @w: The widget
+ * @dir: Whether to iterate over the paths where the specified widget is the
+ * incoming or outgoing widgets
+ * @p: The path iterator variable
+ */
+#define snd_soc_dapm_widget_for_each_path(w, dir, p) \
+ list_for_each_entry(p, &w->edges[dir], list_node[dir])
+
+/**
+ * snd_soc_dapm_widget_for_each_sink_path_safe - Iterates over all paths in the
+ * specified direction of a widget
+ * @w: The widget
+ * @dir: Whether to iterate over the paths where the specified widget is the
+ * incoming or outgoing widgets
+ * @p: The path iterator variable
+ * @next_p: Temporary storage for the next path
+ *
+ * This function works like snd_soc_dapm_widget_for_each_sink_path, expect that
+ * it is safe to remove the current path from the list while iterating
+ */
+#define snd_soc_dapm_widget_for_each_path_safe(w, dir, p, next_p) \
+ list_for_each_entry_safe(p, next_p, &w->edges[dir], list_node[dir])
+
+/**
+ * snd_soc_dapm_widget_for_each_sink_path - Iterates over all paths leaving a
+ * widget
+ * @w: The widget
+ * @p: The path iterator variable
+ */
+#define snd_soc_dapm_widget_for_each_sink_path(w, p) \
+ snd_soc_dapm_widget_for_each_path(w, SND_SOC_DAPM_DIR_IN, p)
+
+/**
+ * snd_soc_dapm_widget_for_each_source_path - Iterates over all paths leading to
+ * a widget
+ * @w: The widget
+ * @p: The path iterator variable
+ */
+#define snd_soc_dapm_widget_for_each_source_path(w, p) \
+ snd_soc_dapm_widget_for_each_path(w, SND_SOC_DAPM_DIR_OUT, p)
+
#endif
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
index 427bc41df3ae..086cd7ff6ddc 100644
--- a/include/sound/soc-topology.h
+++ b/include/sound/soc-topology.h
@@ -89,6 +89,13 @@ struct snd_soc_tplg_kcontrol_ops {
struct snd_ctl_elem_info *uinfo);
};
+/* Bytes ext operations, for TLV byte controls */
+struct snd_soc_tplg_bytes_ext_ops {
+ u32 id;
+ int (*get)(unsigned int __user *bytes, unsigned int size);
+ int (*put)(const unsigned int __user *bytes, unsigned int size);
+};
+
/*
* DAPM widget event handlers - used to map handlers onto widgets.
*/
@@ -136,9 +143,13 @@ struct snd_soc_tplg_ops {
int (*manifest)(struct snd_soc_component *,
struct snd_soc_tplg_manifest *);
- /* bespoke kcontrol handlers available for binding */
+ /* vendor specific kcontrol handlers available for binding */
const struct snd_soc_tplg_kcontrol_ops *io_ops;
int io_ops_count;
+
+ /* vendor specific bytes ext handlers available for binding */
+ const struct snd_soc_tplg_bytes_ext_ops *bytes_ext_ops;
+ int bytes_ext_ops_count;
};
#ifdef CONFIG_SND_SOC_TOPOLOGY
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 93df8bf9d54a..884e728b09d9 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -526,7 +526,8 @@ int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned int reg,
#ifdef CONFIG_SND_SOC_AC97_BUS
struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec);
-struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec);
+struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
+ unsigned int id, unsigned int id_mask);
void snd_soc_free_ac97_codec(struct snd_ac97 *ac97);
int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops);
@@ -619,6 +620,7 @@ int snd_soc_put_strobe(struct snd_kcontrol *kcontrol,
* @pin: name of the pin to update
* @mask: bits to check for in reported jack status
* @invert: if non-zero then pin is enabled when status is not reported
+ * @list: internal list entry
*/
struct snd_soc_jack_pin {
struct list_head list;
@@ -635,7 +637,7 @@ struct snd_soc_jack_pin {
* @jack_type: type of jack that is expected for this voltage
* @debounce_time: debounce_time for jack, codec driver should wait for this
* duration before reading the adc for voltages
- * @:list: list container
+ * @list: internal list entry
*/
struct snd_soc_jack_zone {
unsigned int min_mv;
@@ -651,12 +653,12 @@ struct snd_soc_jack_zone {
* @gpio: legacy gpio number
* @idx: gpio descriptor index within the function of the GPIO
* consumer device
- * @gpiod_dev GPIO consumer device
+ * @gpiod_dev: GPIO consumer device
* @name: gpio name. Also as connection ID for the GPIO consumer
* device function name lookup
* @report: value to report when jack detected
* @invert: report presence in low state
- * @debouce_time: debouce time in ms
+ * @debounce_time: debounce time in ms
* @wake: enable as wake source
* @jack_status_check: callback function which overrides the detection
* to provide more complex checks (eg, reading an
@@ -672,11 +674,13 @@ struct snd_soc_jack_gpio {
int debounce_time;
bool wake;
+ /* private: */
struct snd_soc_jack *jack;
struct delayed_work work;
struct gpio_desc *desc;
void *data;
+ /* public: */
int (*jack_status_check)(void *data);
};
@@ -758,7 +762,6 @@ struct snd_soc_component {
unsigned int ignore_pmdown_time:1; /* pmdown_time is ignored at stop */
unsigned int registered_as_component:1;
- unsigned int probed:1;
struct list_head list;
@@ -792,7 +795,6 @@ struct snd_soc_component {
/* Don't use these, use snd_soc_component_get_dapm() */
struct snd_soc_dapm_context dapm;
- struct snd_soc_dapm_context *dapm_ptr;
const struct snd_kcontrol_new *controls;
unsigned int num_controls;
@@ -832,9 +834,6 @@ struct snd_soc_codec {
/* component */
struct snd_soc_component component;
- /* Don't access this directly, use snd_soc_codec_get_dapm() */
- struct snd_soc_dapm_context dapm;
-
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_reg;
#endif
@@ -1277,7 +1276,7 @@ static inline struct snd_soc_component *snd_soc_dapm_to_component(
static inline struct snd_soc_codec *snd_soc_dapm_to_codec(
struct snd_soc_dapm_context *dapm)
{
- return container_of(dapm, struct snd_soc_codec, dapm);
+ return snd_soc_component_to_codec(snd_soc_dapm_to_component(dapm));
}
/**
@@ -1302,7 +1301,7 @@ static inline struct snd_soc_platform *snd_soc_dapm_to_platform(
static inline struct snd_soc_dapm_context *snd_soc_component_get_dapm(
struct snd_soc_component *component)
{
- return component->dapm_ptr;
+ return &component->dapm;
}
/**
@@ -1314,12 +1313,12 @@ static inline struct snd_soc_dapm_context *snd_soc_component_get_dapm(
static inline struct snd_soc_dapm_context *snd_soc_codec_get_dapm(
struct snd_soc_codec *codec)
{
- return &codec->dapm;
+ return snd_soc_component_get_dapm(&codec->component);
}
/**
* snd_soc_dapm_init_bias_level() - Initialize CODEC DAPM bias level
- * @dapm: The CODEC for which to initialize the DAPM bias level
+ * @codec: The CODEC for which to initialize the DAPM bias level
* @level: The DAPM level to initialize to
*
* Initializes the CODEC DAPM bias level. See snd_soc_dapm_init_bias_level().
@@ -1604,6 +1603,10 @@ int snd_soc_of_parse_audio_simple_widgets(struct snd_soc_card *card,
int snd_soc_of_parse_tdm_slot(struct device_node *np,
unsigned int *slots,
unsigned int *slot_width);
+void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
+ struct snd_soc_codec_conf *codec_conf,
+ struct device_node *of_node,
+ const char *propname);
int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
const char *propname);
unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 0aedbb2c10e0..373d3342002b 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -62,6 +62,8 @@
/* T10 protection information disabled by default */
#define TA_DEFAULT_T10_PI 0
#define TA_DEFAULT_FABRIC_PROT_TYPE 0
+/* TPG status needs to be enabled to return sendtargets discovery endpoint info */
+#define TA_DEFAULT_TPG_ENABLED_SENDTARGETS 1
#define ISCSI_IOV_DATA_BUFFER 5
@@ -517,7 +519,6 @@ struct iscsi_conn {
u16 cid;
/* Remote TCP Port */
u16 login_port;
- u16 local_port;
int net_size;
int login_family;
u32 auth_id;
@@ -527,9 +528,8 @@ struct iscsi_conn {
u32 exp_statsn;
/* Per connection status sequence number */
u32 stat_sn;
-#define IPV6_ADDRESS_SPACE 48
- unsigned char login_ip[IPV6_ADDRESS_SPACE];
- unsigned char local_ip[IPV6_ADDRESS_SPACE];
+ struct sockaddr_storage login_sockaddr;
+ struct sockaddr_storage local_sockaddr;
int conn_usage_count;
int conn_waiting_on_uc;
atomic_t check_immediate_queue;
@@ -636,7 +636,7 @@ struct iscsi_session {
/* session wide counter: expected command sequence number */
u32 exp_cmd_sn;
/* session wide counter: maximum allowed command sequence number */
- u32 max_cmd_sn;
+ atomic_t max_cmd_sn;
struct list_head sess_ooo_cmdsn_list;
/* LIO specific session ID */
@@ -764,6 +764,7 @@ struct iscsi_tpg_attrib {
u32 default_erl;
u8 t10_pi;
u32 fabric_prot_type;
+ u32 tpg_enabled_sendtargets;
struct iscsi_portal_group *tpg;
};
@@ -776,12 +777,10 @@ struct iscsi_np {
enum iscsi_timer_flags_table np_login_timer_flags;
u32 np_exports;
enum np_flags_table np_flags;
- unsigned char np_ip[IPV6_ADDRESS_SPACE];
- u16 np_port;
spinlock_t np_thread_lock;
struct completion np_restart_comp;
struct socket *np_socket;
- struct __kernel_sockaddr_storage np_sockaddr;
+ struct sockaddr_storage np_sockaddr;
struct task_struct *np_thread;
struct timer_list np_login_timer;
void *np_context;
diff --git a/include/target/iscsi/iscsi_target_stat.h b/include/target/iscsi/iscsi_target_stat.h
index 3ff76b4faad3..e615bb485d0b 100644
--- a/include/target/iscsi/iscsi_target_stat.h
+++ b/include/target/iscsi/iscsi_target_stat.h
@@ -50,7 +50,7 @@ struct iscsi_login_stats {
u64 last_fail_time; /* time stamp (jiffies) */
u32 last_fail_type;
int last_intr_fail_ip_family;
- unsigned char last_intr_fail_ip_addr[IPV6_ADDRESS_SPACE];
+ struct sockaddr_storage last_intr_fail_sockaddr;
char last_intr_fail_name[224];
} ____cacheline_aligned;
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index e6bb166f12c2..90e37faa2ede 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -9,7 +9,7 @@ struct iscsit_transport {
int priv_size;
struct module *owner;
struct list_head t_node;
- int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *);
+ int (*iscsit_setup_np)(struct iscsi_np *, struct sockaddr_storage *);
int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *);
void (*iscsit_free_np)(struct iscsi_np *);
void (*iscsit_wait_conn)(struct iscsi_conn *);
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 1e5c8f949bae..56cf8e485ef2 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -93,4 +93,6 @@ bool target_lun_is_rdonly(struct se_cmd *);
sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
+bool target_sense_desc_format(struct se_device *dev);
+
#endif /* TARGET_CORE_BACKEND_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 17ae2d6a4891..ac9bf1c0e42d 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -6,6 +6,7 @@
#include <linux/dma-mapping.h>
#include <linux/blkdev.h>
#include <linux/percpu_ida.h>
+#include <linux/t10-pi.h>
#include <net/sock.h>
#include <net/tcp.h>
@@ -426,12 +427,6 @@ enum target_core_dif_check {
TARGET_DIF_CHECK_REFTAG = 0x1 << 2,
};
-struct se_dif_v1_tuple {
- __be16 guard_tag;
- __be16 app_tag;
- __be32 ref_tag;
-};
-
/* for sam_task_attr */
#define TCM_SIMPLE_TAG 0x20
#define TCM_HEAD_TAG 0x21
@@ -444,6 +439,9 @@ struct se_cmd {
u8 scsi_asc;
u8 scsi_ascq;
u16 scsi_sense_length;
+ unsigned cmd_wait_set:1;
+ unsigned unknown_data_length:1;
+ bool state_active:1;
u64 tag; /* SAM command identifier aka task tag */
/* Delay for ALUA Active/NonOptimized state access in milliseconds */
int alua_nonop_delay;
@@ -455,11 +453,8 @@ struct se_cmd {
unsigned int map_tag;
/* Transport protocol dependent state, see transport_state_table */
enum transport_state_table t_state;
- unsigned cmd_wait_set:1;
- unsigned unknown_data_length:1;
/* See se_cmd_flags_table */
u32 se_cmd_flags;
- u32 se_ordered_id;
/* Total size in bytes associated with command */
u32 data_length;
u32 residual_count;
@@ -477,7 +472,6 @@ struct se_cmd {
struct se_tmr_req *se_tmr_req;
struct list_head se_cmd_list;
struct completion cmd_wait_comp;
- struct kref cmd_kref;
const struct target_core_fabric_ops *se_tfo;
sense_reason_t (*execute_cmd)(struct se_cmd *);
sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
@@ -497,6 +491,7 @@ struct se_cmd {
#define CMD_T_REQUEST_STOP (1 << 8)
#define CMD_T_BUSY (1 << 9)
spinlock_t t_state_lock;
+ struct kref cmd_kref;
struct completion t_transport_stop_comp;
struct work_struct work;
@@ -509,8 +504,10 @@ struct se_cmd {
struct scatterlist *t_bidi_data_sg;
unsigned int t_bidi_data_nents;
+ /* Used for lun->lun_ref counting */
+ int lun_ref_active;
+
struct list_head state_list;
- bool state_active;
/* old task stop completion, consider merging with some of the above */
struct completion task_stop_comp;
@@ -518,20 +515,17 @@ struct se_cmd {
/* backend private data */
void *priv;
- /* Used for lun->lun_ref counting */
- int lun_ref_active;
-
/* DIF related members */
enum target_prot_op prot_op;
enum target_prot_type prot_type;
u8 prot_checks;
+ bool prot_pto;
u32 prot_length;
u32 reftag_seed;
struct scatterlist *t_prot_sg;
unsigned int t_prot_nents;
sense_reason_t pi_err;
sector_t bad_sector;
- bool prot_pto;
};
struct se_ua {
@@ -598,7 +592,6 @@ struct se_ml_stat_grps {
};
struct se_lun_acl {
- char initiatorname[TRANSPORT_IQN_LEN];
u64 mapped_lun;
struct se_node_acl *se_lun_nacl;
struct se_lun *se_lun;
@@ -685,7 +678,6 @@ struct se_lun {
#define SE_LUN_LINK_MAGIC 0xffff7771
u32 lun_link_magic;
u32 lun_access;
- u32 lun_flags;
u32 lun_index;
/* RELATIVE TARGET PORT IDENTIFER */
@@ -751,7 +743,6 @@ struct se_device {
atomic_long_t write_bytes;
/* Active commands on this virtual SE device */
atomic_t simple_cmds;
- atomic_t dev_ordered_id;
atomic_t dev_ordered_sync;
atomic_t dev_qf_count;
u32 export_count;
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 18afef91b447..7fb2557a760e 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -5,6 +5,19 @@ struct target_core_fabric_ops {
struct module *module;
const char *name;
size_t node_acl_size;
+ /*
+ * Limits number of scatterlist entries per SCF_SCSI_DATA_CDB payload.
+ * Setting this value tells target-core to enforce this limit, and
+ * report as INQUIRY EVPD=b0 MAXIMUM TRANSFER LENGTH.
+ *
+ * target-core will currently reset se_cmd->data_length to this
+ * maximum size, and set UNDERFLOW residual count if length exceeds
+ * this limit.
+ *
+ * XXX: Not all initiator hosts honor this block-limit EVPD
+ * XXX: Currently assumes single PAGE_SIZE per scatterlist entry
+ */
+ u32 max_data_sg_nents;
char *(*get_fabric_name)(void);
char *(*tpg_get_wwn)(struct se_portal_group *);
u16 (*tpg_get_tag)(struct se_portal_group *);
@@ -152,6 +165,7 @@ int transport_generic_handle_tmr(struct se_cmd *);
void transport_generic_request_failure(struct se_cmd *, sense_reason_t);
void __target_execute_cmd(struct se_cmd *);
int transport_lookup_tmr_lun(struct se_cmd *, u64);
+void core_allocate_nexus_loss_ua(struct se_node_acl *acl);
struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
unsigned char *);
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
index 88cf39d96d0f..317a1ed2f4ac 100644
--- a/include/trace/events/asoc.h
+++ b/include/trace/events/asoc.h
@@ -8,6 +8,7 @@
#include <linux/tracepoint.h>
#define DAPM_DIRECT "(direct)"
+#define DAPM_ARROW(dir) (((dir) == SND_SOC_DAPM_DIR_OUT) ? "->" : "<-")
struct snd_soc_jack;
struct snd_soc_codec;
@@ -152,62 +153,38 @@ TRACE_EVENT(snd_soc_dapm_walk_done,
(int)__entry->path_checks, (int)__entry->neighbour_checks)
);
-TRACE_EVENT(snd_soc_dapm_output_path,
+TRACE_EVENT(snd_soc_dapm_path,
TP_PROTO(struct snd_soc_dapm_widget *widget,
+ enum snd_soc_dapm_direction dir,
struct snd_soc_dapm_path *path),
- TP_ARGS(widget, path),
+ TP_ARGS(widget, dir, path),
TP_STRUCT__entry(
__string( wname, widget->name )
__string( pname, path->name ? path->name : DAPM_DIRECT)
- __string( psname, path->sink->name )
- __field( int, path_sink )
+ __string( pnname, path->node[dir]->name )
+ __field( int, path_node )
__field( int, path_connect )
+ __field( int, path_dir )
),
TP_fast_assign(
__assign_str(wname, widget->name);
__assign_str(pname, path->name ? path->name : DAPM_DIRECT);
- __assign_str(psname, path->sink->name);
+ __assign_str(pnname, path->node[dir]->name);
__entry->path_connect = path->connect;
- __entry->path_sink = (long)path->sink;
+ __entry->path_node = (long)path->node[dir];
+ __entry->path_dir = dir;
),
- TP_printk("%c%s -> %s -> %s",
- (int) __entry->path_sink &&
+ TP_printk("%c%s %s %s %s %s",
+ (int) __entry->path_node &&
(int) __entry->path_connect ? '*' : ' ',
- __get_str(wname), __get_str(pname), __get_str(psname))
-);
-
-TRACE_EVENT(snd_soc_dapm_input_path,
-
- TP_PROTO(struct snd_soc_dapm_widget *widget,
- struct snd_soc_dapm_path *path),
-
- TP_ARGS(widget, path),
-
- TP_STRUCT__entry(
- __string( wname, widget->name )
- __string( pname, path->name ? path->name : DAPM_DIRECT)
- __string( psname, path->source->name )
- __field( int, path_source )
- __field( int, path_connect )
- ),
-
- TP_fast_assign(
- __assign_str(wname, widget->name);
- __assign_str(pname, path->name ? path->name : DAPM_DIRECT);
- __assign_str(psname, path->source->name);
- __entry->path_connect = path->connect;
- __entry->path_source = (long)path->source;
- ),
-
- TP_printk("%c%s <- %s <- %s",
- (int) __entry->path_source &&
- (int) __entry->path_connect ? '*' : ' ',
- __get_str(wname), __get_str(pname), __get_str(psname))
+ __get_str(wname), DAPM_ARROW(__entry->path_dir),
+ __get_str(pname), DAPM_ARROW(__entry->path_dir),
+ __get_str(pnname))
);
TRACE_EVENT(snd_soc_dapm_connected,
diff --git a/include/trace/events/ext3.h b/include/trace/events/ext3.h
deleted file mode 100644
index fc733d28117a..000000000000
--- a/include/trace/events/ext3.h
+++ /dev/null
@@ -1,866 +0,0 @@
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM ext3
-
-#if !defined(_TRACE_EXT3_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_EXT3_H
-
-#include <linux/tracepoint.h>
-
-TRACE_EVENT(ext3_free_inode,
- TP_PROTO(struct inode *inode),
-
- TP_ARGS(inode),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( umode_t, mode )
- __field( uid_t, uid )
- __field( gid_t, gid )
- __field( blkcnt_t, blocks )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->mode = inode->i_mode;
- __entry->uid = i_uid_read(inode);
- __entry->gid = i_gid_read(inode);
- __entry->blocks = inode->i_blocks;
- ),
-
- TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->mode, __entry->uid, __entry->gid,
- (unsigned long) __entry->blocks)
-);
-
-TRACE_EVENT(ext3_request_inode,
- TP_PROTO(struct inode *dir, int mode),
-
- TP_ARGS(dir, mode),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, dir )
- __field( umode_t, mode )
- ),
-
- TP_fast_assign(
- __entry->dev = dir->i_sb->s_dev;
- __entry->dir = dir->i_ino;
- __entry->mode = mode;
- ),
-
- TP_printk("dev %d,%d dir %lu mode 0%o",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->dir, __entry->mode)
-);
-
-TRACE_EVENT(ext3_allocate_inode,
- TP_PROTO(struct inode *inode, struct inode *dir, int mode),
-
- TP_ARGS(inode, dir, mode),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( ino_t, dir )
- __field( umode_t, mode )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->dir = dir->i_ino;
- __entry->mode = mode;
- ),
-
- TP_printk("dev %d,%d ino %lu dir %lu mode 0%o",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned long) __entry->dir, __entry->mode)
-);
-
-TRACE_EVENT(ext3_evict_inode,
- TP_PROTO(struct inode *inode),
-
- TP_ARGS(inode),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( int, nlink )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->nlink = inode->i_nlink;
- ),
-
- TP_printk("dev %d,%d ino %lu nlink %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->nlink)
-);
-
-TRACE_EVENT(ext3_drop_inode,
- TP_PROTO(struct inode *inode, int drop),
-
- TP_ARGS(inode, drop),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( int, drop )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->drop = drop;
- ),
-
- TP_printk("dev %d,%d ino %lu drop %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->drop)
-);
-
-TRACE_EVENT(ext3_mark_inode_dirty,
- TP_PROTO(struct inode *inode, unsigned long IP),
-
- TP_ARGS(inode, IP),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field(unsigned long, ip )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->ip = IP;
- ),
-
- TP_printk("dev %d,%d ino %lu caller %pS",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, (void *)__entry->ip)
-);
-
-TRACE_EVENT(ext3_write_begin,
- TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
- unsigned int flags),
-
- TP_ARGS(inode, pos, len, flags),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( loff_t, pos )
- __field( unsigned int, len )
- __field( unsigned int, flags )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->pos = pos;
- __entry->len = len;
- __entry->flags = flags;
- ),
-
- TP_printk("dev %d,%d ino %lu pos %llu len %u flags %u",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned long long) __entry->pos, __entry->len,
- __entry->flags)
-);
-
-DECLARE_EVENT_CLASS(ext3__write_end,
- TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
- unsigned int copied),
-
- TP_ARGS(inode, pos, len, copied),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( loff_t, pos )
- __field( unsigned int, len )
- __field( unsigned int, copied )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->pos = pos;
- __entry->len = len;
- __entry->copied = copied;
- ),
-
- TP_printk("dev %d,%d ino %lu pos %llu len %u copied %u",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned long long) __entry->pos, __entry->len,
- __entry->copied)
-);
-
-DEFINE_EVENT(ext3__write_end, ext3_ordered_write_end,
-
- TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
- unsigned int copied),
-
- TP_ARGS(inode, pos, len, copied)
-);
-
-DEFINE_EVENT(ext3__write_end, ext3_writeback_write_end,
-
- TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
- unsigned int copied),
-
- TP_ARGS(inode, pos, len, copied)
-);
-
-DEFINE_EVENT(ext3__write_end, ext3_journalled_write_end,
-
- TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
- unsigned int copied),
-
- TP_ARGS(inode, pos, len, copied)
-);
-
-DECLARE_EVENT_CLASS(ext3__page_op,
- TP_PROTO(struct page *page),
-
- TP_ARGS(page),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( pgoff_t, index )
-
- ),
-
- TP_fast_assign(
- __entry->index = page->index;
- __entry->ino = page->mapping->host->i_ino;
- __entry->dev = page->mapping->host->i_sb->s_dev;
- ),
-
- TP_printk("dev %d,%d ino %lu page_index %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->index)
-);
-
-DEFINE_EVENT(ext3__page_op, ext3_ordered_writepage,
-
- TP_PROTO(struct page *page),
-
- TP_ARGS(page)
-);
-
-DEFINE_EVENT(ext3__page_op, ext3_writeback_writepage,
-
- TP_PROTO(struct page *page),
-
- TP_ARGS(page)
-);
-
-DEFINE_EVENT(ext3__page_op, ext3_journalled_writepage,
-
- TP_PROTO(struct page *page),
-
- TP_ARGS(page)
-);
-
-DEFINE_EVENT(ext3__page_op, ext3_readpage,
-
- TP_PROTO(struct page *page),
-
- TP_ARGS(page)
-);
-
-DEFINE_EVENT(ext3__page_op, ext3_releasepage,
-
- TP_PROTO(struct page *page),
-
- TP_ARGS(page)
-);
-
-TRACE_EVENT(ext3_invalidatepage,
- TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
-
- TP_ARGS(page, offset, length),
-
- TP_STRUCT__entry(
- __field( pgoff_t, index )
- __field( unsigned int, offset )
- __field( unsigned int, length )
- __field( ino_t, ino )
- __field( dev_t, dev )
-
- ),
-
- TP_fast_assign(
- __entry->index = page->index;
- __entry->offset = offset;
- __entry->length = length;
- __entry->ino = page->mapping->host->i_ino;
- __entry->dev = page->mapping->host->i_sb->s_dev;
- ),
-
- TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->index, __entry->offset, __entry->length)
-);
-
-TRACE_EVENT(ext3_discard_blocks,
- TP_PROTO(struct super_block *sb, unsigned long blk,
- unsigned long count),
-
- TP_ARGS(sb, blk, count),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( unsigned long, blk )
- __field( unsigned long, count )
-
- ),
-
- TP_fast_assign(
- __entry->dev = sb->s_dev;
- __entry->blk = blk;
- __entry->count = count;
- ),
-
- TP_printk("dev %d,%d blk %lu count %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->blk, __entry->count)
-);
-
-TRACE_EVENT(ext3_request_blocks,
- TP_PROTO(struct inode *inode, unsigned long goal,
- unsigned long count),
-
- TP_ARGS(inode, goal, count),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( unsigned long, count )
- __field( unsigned long, goal )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->count = count;
- __entry->goal = goal;
- ),
-
- TP_printk("dev %d,%d ino %lu count %lu goal %lu ",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->count, __entry->goal)
-);
-
-TRACE_EVENT(ext3_allocate_blocks,
- TP_PROTO(struct inode *inode, unsigned long goal,
- unsigned long count, unsigned long block),
-
- TP_ARGS(inode, goal, count, block),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( unsigned long, block )
- __field( unsigned long, count )
- __field( unsigned long, goal )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->block = block;
- __entry->count = count;
- __entry->goal = goal;
- ),
-
- TP_printk("dev %d,%d ino %lu count %lu block %lu goal %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->count, __entry->block,
- __entry->goal)
-);
-
-TRACE_EVENT(ext3_free_blocks,
- TP_PROTO(struct inode *inode, unsigned long block,
- unsigned long count),
-
- TP_ARGS(inode, block, count),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( umode_t, mode )
- __field( unsigned long, block )
- __field( unsigned long, count )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->mode = inode->i_mode;
- __entry->block = block;
- __entry->count = count;
- ),
-
- TP_printk("dev %d,%d ino %lu mode 0%o block %lu count %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->mode, __entry->block, __entry->count)
-);
-
-TRACE_EVENT(ext3_sync_file_enter,
- TP_PROTO(struct file *file, int datasync),
-
- TP_ARGS(file, datasync),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( ino_t, parent )
- __field( int, datasync )
- ),
-
- TP_fast_assign(
- struct dentry *dentry = file->f_path.dentry;
-
- __entry->dev = d_inode(dentry)->i_sb->s_dev;
- __entry->ino = d_inode(dentry)->i_ino;
- __entry->datasync = datasync;
- __entry->parent = d_inode(dentry->d_parent)->i_ino;
- ),
-
- TP_printk("dev %d,%d ino %lu parent %ld datasync %d ",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned long) __entry->parent, __entry->datasync)
-);
-
-TRACE_EVENT(ext3_sync_file_exit,
- TP_PROTO(struct inode *inode, int ret),
-
- TP_ARGS(inode, ret),
-
- TP_STRUCT__entry(
- __field( int, ret )
- __field( ino_t, ino )
- __field( dev_t, dev )
- ),
-
- TP_fast_assign(
- __entry->ret = ret;
- __entry->ino = inode->i_ino;
- __entry->dev = inode->i_sb->s_dev;
- ),
-
- TP_printk("dev %d,%d ino %lu ret %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->ret)
-);
-
-TRACE_EVENT(ext3_sync_fs,
- TP_PROTO(struct super_block *sb, int wait),
-
- TP_ARGS(sb, wait),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( int, wait )
-
- ),
-
- TP_fast_assign(
- __entry->dev = sb->s_dev;
- __entry->wait = wait;
- ),
-
- TP_printk("dev %d,%d wait %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->wait)
-);
-
-TRACE_EVENT(ext3_rsv_window_add,
- TP_PROTO(struct super_block *sb,
- struct ext3_reserve_window_node *rsv_node),
-
- TP_ARGS(sb, rsv_node),
-
- TP_STRUCT__entry(
- __field( unsigned long, start )
- __field( unsigned long, end )
- __field( dev_t, dev )
- ),
-
- TP_fast_assign(
- __entry->dev = sb->s_dev;
- __entry->start = rsv_node->rsv_window._rsv_start;
- __entry->end = rsv_node->rsv_window._rsv_end;
- ),
-
- TP_printk("dev %d,%d start %lu end %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->start, __entry->end)
-);
-
-TRACE_EVENT(ext3_discard_reservation,
- TP_PROTO(struct inode *inode,
- struct ext3_reserve_window_node *rsv_node),
-
- TP_ARGS(inode, rsv_node),
-
- TP_STRUCT__entry(
- __field( unsigned long, start )
- __field( unsigned long, end )
- __field( ino_t, ino )
- __field( dev_t, dev )
- ),
-
- TP_fast_assign(
- __entry->start = rsv_node->rsv_window._rsv_start;
- __entry->end = rsv_node->rsv_window._rsv_end;
- __entry->ino = inode->i_ino;
- __entry->dev = inode->i_sb->s_dev;
- ),
-
- TP_printk("dev %d,%d ino %lu start %lu end %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long)__entry->ino, __entry->start,
- __entry->end)
-);
-
-TRACE_EVENT(ext3_alloc_new_reservation,
- TP_PROTO(struct super_block *sb, unsigned long goal),
-
- TP_ARGS(sb, goal),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( unsigned long, goal )
- ),
-
- TP_fast_assign(
- __entry->dev = sb->s_dev;
- __entry->goal = goal;
- ),
-
- TP_printk("dev %d,%d goal %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->goal)
-);
-
-TRACE_EVENT(ext3_reserved,
- TP_PROTO(struct super_block *sb, unsigned long block,
- struct ext3_reserve_window_node *rsv_node),
-
- TP_ARGS(sb, block, rsv_node),
-
- TP_STRUCT__entry(
- __field( unsigned long, block )
- __field( unsigned long, start )
- __field( unsigned long, end )
- __field( dev_t, dev )
- ),
-
- TP_fast_assign(
- __entry->block = block;
- __entry->start = rsv_node->rsv_window._rsv_start;
- __entry->end = rsv_node->rsv_window._rsv_end;
- __entry->dev = sb->s_dev;
- ),
-
- TP_printk("dev %d,%d block %lu, start %lu end %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->block, __entry->start, __entry->end)
-);
-
-TRACE_EVENT(ext3_forget,
- TP_PROTO(struct inode *inode, int is_metadata, unsigned long block),
-
- TP_ARGS(inode, is_metadata, block),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( umode_t, mode )
- __field( int, is_metadata )
- __field( unsigned long, block )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->mode = inode->i_mode;
- __entry->is_metadata = is_metadata;
- __entry->block = block;
- ),
-
- TP_printk("dev %d,%d ino %lu mode 0%o is_metadata %d block %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->mode, __entry->is_metadata, __entry->block)
-);
-
-TRACE_EVENT(ext3_read_block_bitmap,
- TP_PROTO(struct super_block *sb, unsigned int group),
-
- TP_ARGS(sb, group),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( __u32, group )
-
- ),
-
- TP_fast_assign(
- __entry->dev = sb->s_dev;
- __entry->group = group;
- ),
-
- TP_printk("dev %d,%d group %u",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->group)
-);
-
-TRACE_EVENT(ext3_direct_IO_enter,
- TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw),
-
- TP_ARGS(inode, offset, len, rw),
-
- TP_STRUCT__entry(
- __field( ino_t, ino )
- __field( dev_t, dev )
- __field( loff_t, pos )
- __field( unsigned long, len )
- __field( int, rw )
- ),
-
- TP_fast_assign(
- __entry->ino = inode->i_ino;
- __entry->dev = inode->i_sb->s_dev;
- __entry->pos = offset;
- __entry->len = len;
- __entry->rw = rw;
- ),
-
- TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned long long) __entry->pos, __entry->len,
- __entry->rw)
-);
-
-TRACE_EVENT(ext3_direct_IO_exit,
- TP_PROTO(struct inode *inode, loff_t offset, unsigned long len,
- int rw, int ret),
-
- TP_ARGS(inode, offset, len, rw, ret),
-
- TP_STRUCT__entry(
- __field( ino_t, ino )
- __field( dev_t, dev )
- __field( loff_t, pos )
- __field( unsigned long, len )
- __field( int, rw )
- __field( int, ret )
- ),
-
- TP_fast_assign(
- __entry->ino = inode->i_ino;
- __entry->dev = inode->i_sb->s_dev;
- __entry->pos = offset;
- __entry->len = len;
- __entry->rw = rw;
- __entry->ret = ret;
- ),
-
- TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d ret %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned long long) __entry->pos, __entry->len,
- __entry->rw, __entry->ret)
-);
-
-TRACE_EVENT(ext3_unlink_enter,
- TP_PROTO(struct inode *parent, struct dentry *dentry),
-
- TP_ARGS(parent, dentry),
-
- TP_STRUCT__entry(
- __field( ino_t, parent )
- __field( ino_t, ino )
- __field( loff_t, size )
- __field( dev_t, dev )
- ),
-
- TP_fast_assign(
- __entry->parent = parent->i_ino;
- __entry->ino = d_inode(dentry)->i_ino;
- __entry->size = d_inode(dentry)->i_size;
- __entry->dev = d_inode(dentry)->i_sb->s_dev;
- ),
-
- TP_printk("dev %d,%d ino %lu size %lld parent %ld",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned long long)__entry->size,
- (unsigned long) __entry->parent)
-);
-
-TRACE_EVENT(ext3_unlink_exit,
- TP_PROTO(struct dentry *dentry, int ret),
-
- TP_ARGS(dentry, ret),
-
- TP_STRUCT__entry(
- __field( ino_t, ino )
- __field( dev_t, dev )
- __field( int, ret )
- ),
-
- TP_fast_assign(
- __entry->ino = d_inode(dentry)->i_ino;
- __entry->dev = d_inode(dentry)->i_sb->s_dev;
- __entry->ret = ret;
- ),
-
- TP_printk("dev %d,%d ino %lu ret %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->ret)
-);
-
-DECLARE_EVENT_CLASS(ext3__truncate,
- TP_PROTO(struct inode *inode),
-
- TP_ARGS(inode),
-
- TP_STRUCT__entry(
- __field( ino_t, ino )
- __field( dev_t, dev )
- __field( blkcnt_t, blocks )
- ),
-
- TP_fast_assign(
- __entry->ino = inode->i_ino;
- __entry->dev = inode->i_sb->s_dev;
- __entry->blocks = inode->i_blocks;
- ),
-
- TP_printk("dev %d,%d ino %lu blocks %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, (unsigned long) __entry->blocks)
-);
-
-DEFINE_EVENT(ext3__truncate, ext3_truncate_enter,
-
- TP_PROTO(struct inode *inode),
-
- TP_ARGS(inode)
-);
-
-DEFINE_EVENT(ext3__truncate, ext3_truncate_exit,
-
- TP_PROTO(struct inode *inode),
-
- TP_ARGS(inode)
-);
-
-TRACE_EVENT(ext3_get_blocks_enter,
- TP_PROTO(struct inode *inode, unsigned long lblk,
- unsigned long len, int create),
-
- TP_ARGS(inode, lblk, len, create),
-
- TP_STRUCT__entry(
- __field( ino_t, ino )
- __field( dev_t, dev )
- __field( unsigned long, lblk )
- __field( unsigned long, len )
- __field( int, create )
- ),
-
- TP_fast_assign(
- __entry->ino = inode->i_ino;
- __entry->dev = inode->i_sb->s_dev;
- __entry->lblk = lblk;
- __entry->len = len;
- __entry->create = create;
- ),
-
- TP_printk("dev %d,%d ino %lu lblk %lu len %lu create %u",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->lblk, __entry->len, __entry->create)
-);
-
-TRACE_EVENT(ext3_get_blocks_exit,
- TP_PROTO(struct inode *inode, unsigned long lblk,
- unsigned long pblk, unsigned long len, int ret),
-
- TP_ARGS(inode, lblk, pblk, len, ret),
-
- TP_STRUCT__entry(
- __field( ino_t, ino )
- __field( dev_t, dev )
- __field( unsigned long, lblk )
- __field( unsigned long, pblk )
- __field( unsigned long, len )
- __field( int, ret )
- ),
-
- TP_fast_assign(
- __entry->ino = inode->i_ino;
- __entry->dev = inode->i_sb->s_dev;
- __entry->lblk = lblk;
- __entry->pblk = pblk;
- __entry->len = len;
- __entry->ret = ret;
- ),
-
- TP_printk("dev %d,%d ino %lu lblk %lu pblk %lu len %lu ret %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->lblk, __entry->pblk,
- __entry->len, __entry->ret)
-);
-
-TRACE_EVENT(ext3_load_inode,
- TP_PROTO(struct inode *inode),
-
- TP_ARGS(inode),
-
- TP_STRUCT__entry(
- __field( ino_t, ino )
- __field( dev_t, dev )
- ),
-
- TP_fast_assign(
- __entry->ino = inode->i_ino;
- __entry->dev = inode->i_sb->s_dev;
- ),
-
- TP_printk("dev %d,%d ino %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino)
-);
-
-#endif /* _TRACE_EXT3_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 04856a2d8c82..a01946514b5a 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -1099,11 +1099,11 @@ TRACE_EVENT(f2fs_lookup_extent_tree_start,
TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
TP_PROTO(struct inode *inode, unsigned int pgofs,
- struct extent_node *en),
+ struct extent_info *ei),
- TP_ARGS(inode, pgofs, en),
+ TP_ARGS(inode, pgofs, ei),
- TP_CONDITION(en),
+ TP_CONDITION(ei),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -1118,9 +1118,9 @@ TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->pgofs = pgofs;
- __entry->fofs = en->ei.fofs;
- __entry->blk = en->ei.blk;
- __entry->len = en->ei.len;
+ __entry->fofs = ei->fofs;
+ __entry->blk = ei->blk;
+ __entry->len = ei->len;
),
TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
diff --git a/include/trace/events/fib.h b/include/trace/events/fib.h
new file mode 100644
index 000000000000..833cfcb6750d
--- /dev/null
+++ b/include/trace/events/fib.h
@@ -0,0 +1,113 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fib
+
+#if !defined(_TRACE_FIB_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FIB_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <net/ip_fib.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(fib_table_lookup,
+
+ TP_PROTO(u32 tb_id, const struct flowi4 *flp),
+
+ TP_ARGS(tb_id, flp),
+
+ TP_STRUCT__entry(
+ __field( u32, tb_id )
+ __field( int, oif )
+ __field( int, iif )
+ __field( __u8, tos )
+ __field( __u8, scope )
+ __field( __u8, flags )
+ __array( __u8, src, 4 )
+ __array( __u8, dst, 4 )
+ ),
+
+ TP_fast_assign(
+ __be32 *p32;
+
+ __entry->tb_id = tb_id;
+ __entry->oif = flp->flowi4_oif;
+ __entry->iif = flp->flowi4_iif;
+ __entry->tos = flp->flowi4_tos;
+ __entry->scope = flp->flowi4_scope;
+ __entry->flags = flp->flowi4_flags;
+
+ p32 = (__be32 *) __entry->src;
+ *p32 = flp->saddr;
+
+ p32 = (__be32 *) __entry->dst;
+ *p32 = flp->daddr;
+ ),
+
+ TP_printk("table %u oif %d iif %d src %pI4 dst %pI4 tos %d scope %d flags %x",
+ __entry->tb_id, __entry->oif, __entry->iif,
+ __entry->src, __entry->dst, __entry->tos, __entry->scope,
+ __entry->flags)
+);
+
+TRACE_EVENT(fib_table_lookup_nh,
+
+ TP_PROTO(const struct fib_nh *nh),
+
+ TP_ARGS(nh),
+
+ TP_STRUCT__entry(
+ __string( name, nh->nh_dev->name)
+ __field( int, oif )
+ __array( __u8, src, 4 )
+ ),
+
+ TP_fast_assign(
+ __be32 *p32 = (__be32 *) __entry->src;
+
+ __assign_str(name, nh->nh_dev ? nh->nh_dev->name : "not set");
+ __entry->oif = nh->nh_oif;
+ *p32 = nh->nh_saddr;
+ ),
+
+ TP_printk("nexthop dev %s oif %d src %pI4",
+ __get_str(name), __entry->oif, __entry->src)
+);
+
+TRACE_EVENT(fib_validate_source,
+
+ TP_PROTO(const struct net_device *dev, const struct flowi4 *flp),
+
+ TP_ARGS(dev, flp),
+
+ TP_STRUCT__entry(
+ __string( name, dev->name )
+ __field( int, oif )
+ __field( int, iif )
+ __field( __u8, tos )
+ __array( __u8, src, 4 )
+ __array( __u8, dst, 4 )
+ ),
+
+ TP_fast_assign(
+ __be32 *p32;
+
+ __assign_str(name, dev ? dev->name : "not set");
+ __entry->oif = flp->flowi4_oif;
+ __entry->iif = flp->flowi4_iif;
+ __entry->tos = flp->flowi4_tos;
+
+ p32 = (__be32 *) __entry->src;
+ *p32 = flp->saddr;
+
+ p32 = (__be32 *) __entry->dst;
+ *p32 = flp->daddr;
+ ),
+
+ TP_printk("dev %s oif %d iif %d tos %d src %pI4 dst %pI4",
+ __get_str(name), __entry->oif, __entry->iif, __entry->tos,
+ __entry->src, __entry->dst)
+);
+#endif /* _TRACE_FIB_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/jbd.h b/include/trace/events/jbd.h
deleted file mode 100644
index da6f2591c25e..000000000000
--- a/include/trace/events/jbd.h
+++ /dev/null
@@ -1,194 +0,0 @@
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM jbd
-
-#if !defined(_TRACE_JBD_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_JBD_H
-
-#include <linux/jbd.h>
-#include <linux/tracepoint.h>
-
-TRACE_EVENT(jbd_checkpoint,
-
- TP_PROTO(journal_t *journal, int result),
-
- TP_ARGS(journal, result),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( int, result )
- ),
-
- TP_fast_assign(
- __entry->dev = journal->j_fs_dev->bd_dev;
- __entry->result = result;
- ),
-
- TP_printk("dev %d,%d result %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->result)
-);
-
-DECLARE_EVENT_CLASS(jbd_commit,
-
- TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
- TP_ARGS(journal, commit_transaction),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( int, transaction )
- ),
-
- TP_fast_assign(
- __entry->dev = journal->j_fs_dev->bd_dev;
- __entry->transaction = commit_transaction->t_tid;
- ),
-
- TP_printk("dev %d,%d transaction %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->transaction)
-);
-
-DEFINE_EVENT(jbd_commit, jbd_start_commit,
-
- TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
- TP_ARGS(journal, commit_transaction)
-);
-
-DEFINE_EVENT(jbd_commit, jbd_commit_locking,
-
- TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
- TP_ARGS(journal, commit_transaction)
-);
-
-DEFINE_EVENT(jbd_commit, jbd_commit_flushing,
-
- TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
- TP_ARGS(journal, commit_transaction)
-);
-
-DEFINE_EVENT(jbd_commit, jbd_commit_logging,
-
- TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
- TP_ARGS(journal, commit_transaction)
-);
-
-TRACE_EVENT(jbd_drop_transaction,
-
- TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
- TP_ARGS(journal, commit_transaction),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( int, transaction )
- ),
-
- TP_fast_assign(
- __entry->dev = journal->j_fs_dev->bd_dev;
- __entry->transaction = commit_transaction->t_tid;
- ),
-
- TP_printk("dev %d,%d transaction %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->transaction)
-);
-
-TRACE_EVENT(jbd_end_commit,
- TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
- TP_ARGS(journal, commit_transaction),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( int, transaction )
- __field( int, head )
- ),
-
- TP_fast_assign(
- __entry->dev = journal->j_fs_dev->bd_dev;
- __entry->transaction = commit_transaction->t_tid;
- __entry->head = journal->j_tail_sequence;
- ),
-
- TP_printk("dev %d,%d transaction %d head %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->transaction, __entry->head)
-);
-
-TRACE_EVENT(jbd_do_submit_data,
- TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
- TP_ARGS(journal, commit_transaction),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( int, transaction )
- ),
-
- TP_fast_assign(
- __entry->dev = journal->j_fs_dev->bd_dev;
- __entry->transaction = commit_transaction->t_tid;
- ),
-
- TP_printk("dev %d,%d transaction %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->transaction)
-);
-
-TRACE_EVENT(jbd_cleanup_journal_tail,
-
- TP_PROTO(journal_t *journal, tid_t first_tid,
- unsigned long block_nr, unsigned long freed),
-
- TP_ARGS(journal, first_tid, block_nr, freed),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( tid_t, tail_sequence )
- __field( tid_t, first_tid )
- __field(unsigned long, block_nr )
- __field(unsigned long, freed )
- ),
-
- TP_fast_assign(
- __entry->dev = journal->j_fs_dev->bd_dev;
- __entry->tail_sequence = journal->j_tail_sequence;
- __entry->first_tid = first_tid;
- __entry->block_nr = block_nr;
- __entry->freed = freed;
- ),
-
- TP_printk("dev %d,%d from %u to %u offset %lu freed %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->tail_sequence, __entry->first_tid,
- __entry->block_nr, __entry->freed)
-);
-
-TRACE_EVENT(journal_write_superblock,
- TP_PROTO(journal_t *journal, int write_op),
-
- TP_ARGS(journal, write_op),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( int, write_op )
- ),
-
- TP_fast_assign(
- __entry->dev = journal->j_fs_dev->bd_dev;
- __entry->write_op = write_op;
- ),
-
- TP_printk("dev %d,%d write_op %x", MAJOR(__entry->dev),
- MINOR(__entry->dev), __entry->write_op)
-);
-
-#endif /* _TRACE_JBD_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index a44062da684b..d6f83222a6a1 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -358,6 +358,36 @@ TRACE_EVENT(
#endif
+TRACE_EVENT(kvm_halt_poll_ns,
+ TP_PROTO(bool grow, unsigned int vcpu_id, int new, int old),
+ TP_ARGS(grow, vcpu_id, new, old),
+
+ TP_STRUCT__entry(
+ __field(bool, grow)
+ __field(unsigned int, vcpu_id)
+ __field(int, new)
+ __field(int, old)
+ ),
+
+ TP_fast_assign(
+ __entry->grow = grow;
+ __entry->vcpu_id = vcpu_id;
+ __entry->new = new;
+ __entry->old = old;
+ ),
+
+ TP_printk("vcpu %u: halt_poll_ns %d (%s %d)",
+ __entry->vcpu_id,
+ __entry->new,
+ __entry->grow ? "grow" : "shrink",
+ __entry->old)
+);
+
+#define trace_kvm_halt_poll_ns_grow(vcpu_id, new, old) \
+ trace_kvm_halt_poll_ns(true, vcpu_id, new, old)
+#define trace_kvm_halt_poll_ns_shrink(vcpu_id, new, old) \
+ trace_kvm_halt_poll_ns(false, vcpu_id, new, old)
+
#endif /* _TRACE_KVM_MAIN_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index fd1a02cb3c82..003dca933803 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -529,18 +529,21 @@ TRACE_EVENT(svc_xprt_do_enqueue,
TP_STRUCT__entry(
__field(struct svc_xprt *, xprt)
- __field(struct svc_rqst *, rqst)
+ __field_struct(struct sockaddr_storage, ss)
+ __field(int, pid)
+ __field(unsigned long, flags)
),
TP_fast_assign(
__entry->xprt = xprt;
- __entry->rqst = rqst;
+ xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
+ __entry->pid = rqst? rqst->rq_task->pid : 0;
+ __entry->flags = xprt ? xprt->xpt_flags : 0;
),
TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt,
- (struct sockaddr *)&__entry->xprt->xpt_remote,
- __entry->rqst ? __entry->rqst->rq_task->pid : 0,
- show_svc_xprt_flags(__entry->xprt->xpt_flags))
+ (struct sockaddr *)&__entry->ss,
+ __entry->pid, show_svc_xprt_flags(__entry->flags))
);
TRACE_EVENT(svc_xprt_dequeue,
@@ -589,16 +592,20 @@ TRACE_EVENT(svc_handle_xprt,
TP_STRUCT__entry(
__field(struct svc_xprt *, xprt)
__field(int, len)
+ __field_struct(struct sockaddr_storage, ss)
+ __field(unsigned long, flags)
),
TP_fast_assign(
__entry->xprt = xprt;
+ xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
__entry->len = len;
+ __entry->flags = xprt ? xprt->xpt_flags : 0;
),
TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt,
- (struct sockaddr *)&__entry->xprt->xpt_remote, __entry->len,
- show_svc_xprt_flags(__entry->xprt->xpt_flags))
+ (struct sockaddr *)&__entry->ss,
+ __entry->len, show_svc_xprt_flags(__entry->flags))
);
#endif /* _TRACE_SUNRPC_H */
diff --git a/include/trace/events/task.h b/include/trace/events/task.h
index dee3bb1d5a6b..2cca6cd342d8 100644
--- a/include/trace/events/task.h
+++ b/include/trace/events/task.h
@@ -46,7 +46,7 @@ TRACE_EVENT(task_rename,
TP_fast_assign(
__entry->pid = task->pid;
memcpy(entry->oldcomm, task->comm, TASK_COMM_LEN);
- memcpy(entry->newcomm, comm, TASK_COMM_LEN);
+ strlcpy(entry->newcomm, comm, TASK_COMM_LEN);
__entry->oom_score_adj = task->signal->oom_score_adj;
),
diff --git a/include/trace/events/thermal_power_allocator.h b/include/trace/events/thermal_power_allocator.h
index 12e1321c4e0c..5afae8fe3795 100644
--- a/include/trace/events/thermal_power_allocator.h
+++ b/include/trace/events/thermal_power_allocator.h
@@ -11,7 +11,7 @@ TRACE_EVENT(thermal_power_allocator,
u32 total_req_power, u32 *granted_power,
u32 total_granted_power, size_t num_actors,
u32 power_range, u32 max_allocatable_power,
- unsigned long current_temp, s32 delta_temp),
+ int current_temp, s32 delta_temp),
TP_ARGS(tz, req_power, total_req_power, granted_power,
total_granted_power, num_actors, power_range,
max_allocatable_power, current_temp, delta_temp),
@@ -24,7 +24,7 @@ TRACE_EVENT(thermal_power_allocator,
__field(size_t, num_actors )
__field(u32, power_range )
__field(u32, max_allocatable_power )
- __field(unsigned long, current_temp )
+ __field(int, current_temp )
__field(s32, delta_temp )
),
TP_fast_assign(
@@ -42,7 +42,7 @@ TRACE_EVENT(thermal_power_allocator,
__entry->delta_temp = delta_temp;
),
- TP_printk("thermal_zone_id=%d req_power={%s} total_req_power=%u granted_power={%s} total_granted_power=%u power_range=%u max_allocatable_power=%u current_temperature=%lu delta_temperature=%d",
+ TP_printk("thermal_zone_id=%d req_power={%s} total_req_power=%u granted_power={%s} total_granted_power=%u power_range=%u max_allocatable_power=%u current_temperature=%d delta_temperature=%d",
__entry->tz_id,
__print_array(__get_dynamic_array(req_power),
__entry->num_actors, 4),
diff --git a/include/trace/events/tlb.h b/include/trace/events/tlb.h
index 4250f364a6ca..bc8815f45f3b 100644
--- a/include/trace/events/tlb.h
+++ b/include/trace/events/tlb.h
@@ -11,7 +11,8 @@
EM( TLB_FLUSH_ON_TASK_SWITCH, "flush on task switch" ) \
EM( TLB_REMOTE_SHOOTDOWN, "remote shootdown" ) \
EM( TLB_LOCAL_SHOOTDOWN, "local shootdown" ) \
- EMe( TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" )
+ EM( TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" ) \
+ EMe( TLB_REMOTE_SEND_IPI, "remote ipi send" )
/*
* First define the enums in TLB_FLUSH_REASON to be exported to userspace
diff --git a/include/trace/events/v4l2.h b/include/trace/events/v4l2.h
index 89d0497c058a..dbf017bfddd9 100644
--- a/include/trace/events/v4l2.h
+++ b/include/trace/events/v4l2.h
@@ -93,90 +93,183 @@ SHOW_FIELD
{ V4L2_TC_USERBITS_USERDEFINED, "USERBITS_USERDEFINED" }, \
{ V4L2_TC_USERBITS_8BITCHARS, "USERBITS_8BITCHARS" })
-#define V4L2_TRACE_EVENT(event_name) \
- TRACE_EVENT(event_name, \
- TP_PROTO(int minor, struct v4l2_buffer *buf), \
- \
- TP_ARGS(minor, buf), \
- \
- TP_STRUCT__entry( \
- __field(int, minor) \
- __field(u32, index) \
- __field(u32, type) \
- __field(u32, bytesused) \
- __field(u32, flags) \
- __field(u32, field) \
- __field(s64, timestamp) \
- __field(u32, timecode_type) \
- __field(u32, timecode_flags) \
- __field(u8, timecode_frames) \
- __field(u8, timecode_seconds) \
- __field(u8, timecode_minutes) \
- __field(u8, timecode_hours) \
- __field(u8, timecode_userbits0) \
- __field(u8, timecode_userbits1) \
- __field(u8, timecode_userbits2) \
- __field(u8, timecode_userbits3) \
- __field(u32, sequence) \
- ), \
- \
- TP_fast_assign( \
- __entry->minor = minor; \
- __entry->index = buf->index; \
- __entry->type = buf->type; \
- __entry->bytesused = buf->bytesused; \
- __entry->flags = buf->flags; \
- __entry->field = buf->field; \
- __entry->timestamp = \
- timeval_to_ns(&buf->timestamp); \
- __entry->timecode_type = buf->timecode.type; \
- __entry->timecode_flags = buf->timecode.flags; \
- __entry->timecode_frames = \
- buf->timecode.frames; \
- __entry->timecode_seconds = \
- buf->timecode.seconds; \
- __entry->timecode_minutes = \
- buf->timecode.minutes; \
- __entry->timecode_hours = buf->timecode.hours; \
- __entry->timecode_userbits0 = \
- buf->timecode.userbits[0]; \
- __entry->timecode_userbits1 = \
- buf->timecode.userbits[1]; \
- __entry->timecode_userbits2 = \
- buf->timecode.userbits[2]; \
- __entry->timecode_userbits3 = \
- buf->timecode.userbits[3]; \
- __entry->sequence = buf->sequence; \
- ), \
- \
- TP_printk("minor = %d, index = %u, type = %s, " \
- "bytesused = %u, flags = %s, " \
- "field = %s, timestamp = %llu, timecode = { " \
- "type = %s, flags = %s, frames = %u, " \
- "seconds = %u, minutes = %u, hours = %u, " \
- "userbits = { %u %u %u %u } }, " \
- "sequence = %u", __entry->minor, \
- __entry->index, show_type(__entry->type), \
- __entry->bytesused, \
- show_flags(__entry->flags), \
- show_field(__entry->field), \
- __entry->timestamp, \
- show_timecode_type(__entry->timecode_type), \
- show_timecode_flags(__entry->timecode_flags), \
- __entry->timecode_frames, \
- __entry->timecode_seconds, \
- __entry->timecode_minutes, \
- __entry->timecode_hours, \
- __entry->timecode_userbits0, \
- __entry->timecode_userbits1, \
- __entry->timecode_userbits2, \
- __entry->timecode_userbits3, \
- __entry->sequence \
- ) \
+DECLARE_EVENT_CLASS(v4l2_event_class,
+ TP_PROTO(int minor, struct v4l2_buffer *buf),
+
+ TP_ARGS(minor, buf),
+
+ TP_STRUCT__entry(
+ __field(int, minor)
+ __field(u32, index)
+ __field(u32, type)
+ __field(u32, bytesused)
+ __field(u32, flags)
+ __field(u32, field)
+ __field(s64, timestamp)
+ __field(u32, timecode_type)
+ __field(u32, timecode_flags)
+ __field(u8, timecode_frames)
+ __field(u8, timecode_seconds)
+ __field(u8, timecode_minutes)
+ __field(u8, timecode_hours)
+ __field(u8, timecode_userbits0)
+ __field(u8, timecode_userbits1)
+ __field(u8, timecode_userbits2)
+ __field(u8, timecode_userbits3)
+ __field(u32, sequence)
+ ),
+
+ TP_fast_assign(
+ __entry->minor = minor;
+ __entry->index = buf->index;
+ __entry->type = buf->type;
+ __entry->bytesused = buf->bytesused;
+ __entry->flags = buf->flags;
+ __entry->field = buf->field;
+ __entry->timestamp = timeval_to_ns(&buf->timestamp);
+ __entry->timecode_type = buf->timecode.type;
+ __entry->timecode_flags = buf->timecode.flags;
+ __entry->timecode_frames = buf->timecode.frames;
+ __entry->timecode_seconds = buf->timecode.seconds;
+ __entry->timecode_minutes = buf->timecode.minutes;
+ __entry->timecode_hours = buf->timecode.hours;
+ __entry->timecode_userbits0 = buf->timecode.userbits[0];
+ __entry->timecode_userbits1 = buf->timecode.userbits[1];
+ __entry->timecode_userbits2 = buf->timecode.userbits[2];
+ __entry->timecode_userbits3 = buf->timecode.userbits[3];
+ __entry->sequence = buf->sequence;
+ ),
+
+ TP_printk("minor = %d, index = %u, type = %s, bytesused = %u, "
+ "flags = %s, field = %s, timestamp = %llu, "
+ "timecode = { type = %s, flags = %s, frames = %u, "
+ "seconds = %u, minutes = %u, hours = %u, "
+ "userbits = { %u %u %u %u } }, sequence = %u", __entry->minor,
+ __entry->index, show_type(__entry->type),
+ __entry->bytesused,
+ show_flags(__entry->flags),
+ show_field(__entry->field),
+ __entry->timestamp,
+ show_timecode_type(__entry->timecode_type),
+ show_timecode_flags(__entry->timecode_flags),
+ __entry->timecode_frames,
+ __entry->timecode_seconds,
+ __entry->timecode_minutes,
+ __entry->timecode_hours,
+ __entry->timecode_userbits0,
+ __entry->timecode_userbits1,
+ __entry->timecode_userbits2,
+ __entry->timecode_userbits3,
+ __entry->sequence
+ )
+)
+
+DEFINE_EVENT(v4l2_event_class, v4l2_dqbuf,
+ TP_PROTO(int minor, struct v4l2_buffer *buf),
+ TP_ARGS(minor, buf)
+);
+
+DEFINE_EVENT(v4l2_event_class, v4l2_qbuf,
+ TP_PROTO(int minor, struct v4l2_buffer *buf),
+ TP_ARGS(minor, buf)
+);
+
+DECLARE_EVENT_CLASS(vb2_event_class,
+ TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
+ TP_ARGS(q, vb),
+
+ TP_STRUCT__entry(
+ __field(int, minor)
+ __field(u32, queued_count)
+ __field(int, owned_by_drv_count)
+ __field(u32, index)
+ __field(u32, type)
+ __field(u32, bytesused)
+ __field(u32, flags)
+ __field(u32, field)
+ __field(s64, timestamp)
+ __field(u32, timecode_type)
+ __field(u32, timecode_flags)
+ __field(u8, timecode_frames)
+ __field(u8, timecode_seconds)
+ __field(u8, timecode_minutes)
+ __field(u8, timecode_hours)
+ __field(u8, timecode_userbits0)
+ __field(u8, timecode_userbits1)
+ __field(u8, timecode_userbits2)
+ __field(u8, timecode_userbits3)
+ __field(u32, sequence)
+ ),
+
+ TP_fast_assign(
+ __entry->minor = q->owner ? q->owner->vdev->minor : -1;
+ __entry->queued_count = q->queued_count;
+ __entry->owned_by_drv_count =
+ atomic_read(&q->owned_by_drv_count);
+ __entry->index = vb->v4l2_buf.index;
+ __entry->type = vb->v4l2_buf.type;
+ __entry->bytesused = vb->v4l2_planes[0].bytesused;
+ __entry->flags = vb->v4l2_buf.flags;
+ __entry->field = vb->v4l2_buf.field;
+ __entry->timestamp = timeval_to_ns(&vb->v4l2_buf.timestamp);
+ __entry->timecode_type = vb->v4l2_buf.timecode.type;
+ __entry->timecode_flags = vb->v4l2_buf.timecode.flags;
+ __entry->timecode_frames = vb->v4l2_buf.timecode.frames;
+ __entry->timecode_seconds = vb->v4l2_buf.timecode.seconds;
+ __entry->timecode_minutes = vb->v4l2_buf.timecode.minutes;
+ __entry->timecode_hours = vb->v4l2_buf.timecode.hours;
+ __entry->timecode_userbits0 = vb->v4l2_buf.timecode.userbits[0];
+ __entry->timecode_userbits1 = vb->v4l2_buf.timecode.userbits[1];
+ __entry->timecode_userbits2 = vb->v4l2_buf.timecode.userbits[2];
+ __entry->timecode_userbits3 = vb->v4l2_buf.timecode.userbits[3];
+ __entry->sequence = vb->v4l2_buf.sequence;
+ ),
+
+ TP_printk("minor = %d, queued = %u, owned_by_drv = %d, index = %u, "
+ "type = %s, bytesused = %u, flags = %s, field = %s, "
+ "timestamp = %llu, timecode = { type = %s, flags = %s, "
+ "frames = %u, seconds = %u, minutes = %u, hours = %u, "
+ "userbits = { %u %u %u %u } }, sequence = %u", __entry->minor,
+ __entry->queued_count,
+ __entry->owned_by_drv_count,
+ __entry->index, show_type(__entry->type),
+ __entry->bytesused,
+ show_flags(__entry->flags),
+ show_field(__entry->field),
+ __entry->timestamp,
+ show_timecode_type(__entry->timecode_type),
+ show_timecode_flags(__entry->timecode_flags),
+ __entry->timecode_frames,
+ __entry->timecode_seconds,
+ __entry->timecode_minutes,
+ __entry->timecode_hours,
+ __entry->timecode_userbits0,
+ __entry->timecode_userbits1,
+ __entry->timecode_userbits2,
+ __entry->timecode_userbits3,
+ __entry->sequence
)
+)
+
+DEFINE_EVENT(vb2_event_class, vb2_buf_done,
+ TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
+ TP_ARGS(q, vb)
+);
+
+DEFINE_EVENT(vb2_event_class, vb2_buf_queue,
+ TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
+ TP_ARGS(q, vb)
+);
+
+DEFINE_EVENT(vb2_event_class, vb2_dqbuf,
+ TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
+ TP_ARGS(q, vb)
+);
-V4L2_TRACE_EVENT(v4l2_dqbuf);
-V4L2_TRACE_EVENT(v4l2_qbuf);
+DEFINE_EVENT(vb2_event_class, vb2_qbuf,
+ TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
+ TP_ARGS(q, vb)
+);
#endif /* if !defined(_TRACE_V4L2_H) || defined(TRACE_HEADER_MULTI_READ) */
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index a7aa607a4c55..fff846b512e6 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -131,6 +131,66 @@ DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
TP_ARGS(inode, flags)
);
+#ifdef CREATE_TRACE_POINTS
+#ifdef CONFIG_CGROUP_WRITEBACK
+
+static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb)
+{
+ return kernfs_path_len(wb->memcg_css->cgroup->kn) + 1;
+}
+
+static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb)
+{
+ struct cgroup *cgrp = wb->memcg_css->cgroup;
+ char *path;
+
+ path = cgroup_path(cgrp, buf, kernfs_path_len(cgrp->kn) + 1);
+ WARN_ON_ONCE(path != buf);
+}
+
+static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc)
+{
+ if (wbc->wb)
+ return __trace_wb_cgroup_size(wbc->wb);
+ else
+ return 2;
+}
+
+static inline void __trace_wbc_assign_cgroup(char *buf,
+ struct writeback_control *wbc)
+{
+ if (wbc->wb)
+ __trace_wb_assign_cgroup(buf, wbc->wb);
+ else
+ strcpy(buf, "/");
+}
+
+#else /* CONFIG_CGROUP_WRITEBACK */
+
+static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb)
+{
+ return 2;
+}
+
+static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb)
+{
+ strcpy(buf, "/");
+}
+
+static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc)
+{
+ return 2;
+}
+
+static inline void __trace_wbc_assign_cgroup(char *buf,
+ struct writeback_control *wbc)
+{
+ strcpy(buf, "/");
+}
+
+#endif /* CONFIG_CGROUP_WRITEBACK */
+#endif /* CREATE_TRACE_POINTS */
+
DECLARE_EVENT_CLASS(writeback_write_inode_template,
TP_PROTO(struct inode *inode, struct writeback_control *wbc),
@@ -141,6 +201,7 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
__array(char, name, 32)
__field(unsigned long, ino)
__field(int, sync_mode)
+ __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
),
TP_fast_assign(
@@ -148,12 +209,14 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
dev_name(inode_to_bdi(inode)->dev), 32);
__entry->ino = inode->i_ino;
__entry->sync_mode = wbc->sync_mode;
+ __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
),
- TP_printk("bdi %s: ino=%lu sync_mode=%d",
+ TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup=%s",
__entry->name,
__entry->ino,
- __entry->sync_mode
+ __entry->sync_mode,
+ __get_str(cgroup)
)
);
@@ -172,8 +235,8 @@ DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
);
DECLARE_EVENT_CLASS(writeback_work_class,
- TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work),
- TP_ARGS(bdi, work),
+ TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
+ TP_ARGS(wb, work),
TP_STRUCT__entry(
__array(char, name, 32)
__field(long, nr_pages)
@@ -183,10 +246,11 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__field(int, range_cyclic)
__field(int, for_background)
__field(int, reason)
+ __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
),
TP_fast_assign(
strncpy(__entry->name,
- bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
+ wb->bdi->dev ? dev_name(wb->bdi->dev) : "(unknown)", 32);
__entry->nr_pages = work->nr_pages;
__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
__entry->sync_mode = work->sync_mode;
@@ -194,9 +258,10 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__entry->range_cyclic = work->range_cyclic;
__entry->for_background = work->for_background;
__entry->reason = work->reason;
+ __trace_wb_assign_cgroup(__get_str(cgroup), wb);
),
TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
- "kupdate=%d range_cyclic=%d background=%d reason=%s",
+ "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup=%s",
__entry->name,
MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
__entry->nr_pages,
@@ -204,13 +269,14 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__entry->for_kupdate,
__entry->range_cyclic,
__entry->for_background,
- __print_symbolic(__entry->reason, WB_WORK_REASON)
+ __print_symbolic(__entry->reason, WB_WORK_REASON),
+ __get_str(cgroup)
)
);
#define DEFINE_WRITEBACK_WORK_EVENT(name) \
DEFINE_EVENT(writeback_work_class, name, \
- TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \
- TP_ARGS(bdi, work))
+ TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
+ TP_ARGS(wb, work))
DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
@@ -230,26 +296,42 @@ TRACE_EVENT(writeback_pages_written,
);
DECLARE_EVENT_CLASS(writeback_class,
- TP_PROTO(struct backing_dev_info *bdi),
- TP_ARGS(bdi),
+ TP_PROTO(struct bdi_writeback *wb),
+ TP_ARGS(wb),
TP_STRUCT__entry(
__array(char, name, 32)
+ __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
+ __trace_wb_assign_cgroup(__get_str(cgroup), wb);
),
- TP_printk("bdi %s",
- __entry->name
+ TP_printk("bdi %s: cgroup=%s",
+ __entry->name,
+ __get_str(cgroup)
)
);
#define DEFINE_WRITEBACK_EVENT(name) \
DEFINE_EVENT(writeback_class, name, \
- TP_PROTO(struct backing_dev_info *bdi), \
- TP_ARGS(bdi))
+ TP_PROTO(struct bdi_writeback *wb), \
+ TP_ARGS(wb))
DEFINE_WRITEBACK_EVENT(writeback_nowork);
DEFINE_WRITEBACK_EVENT(writeback_wake_background);
-DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
+
+TRACE_EVENT(writeback_bdi_register,
+ TP_PROTO(struct backing_dev_info *bdi),
+ TP_ARGS(bdi),
+ TP_STRUCT__entry(
+ __array(char, name, 32)
+ ),
+ TP_fast_assign(
+ strncpy(__entry->name, dev_name(bdi->dev), 32);
+ ),
+ TP_printk("bdi %s",
+ __entry->name
+ )
+);
DECLARE_EVENT_CLASS(wbc_class,
TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
@@ -265,6 +347,7 @@ DECLARE_EVENT_CLASS(wbc_class,
__field(int, range_cyclic)
__field(long, range_start)
__field(long, range_end)
+ __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
),
TP_fast_assign(
@@ -278,11 +361,12 @@ DECLARE_EVENT_CLASS(wbc_class,
__entry->range_cyclic = wbc->range_cyclic;
__entry->range_start = (long)wbc->range_start;
__entry->range_end = (long)wbc->range_end;
+ __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
),
TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
"bgrd=%d reclm=%d cyclic=%d "
- "start=0x%lx end=0x%lx",
+ "start=0x%lx end=0x%lx cgroup=%s",
__entry->name,
__entry->nr_to_write,
__entry->pages_skipped,
@@ -292,7 +376,9 @@ DECLARE_EVENT_CLASS(wbc_class,
__entry->for_reclaim,
__entry->range_cyclic,
__entry->range_start,
- __entry->range_end)
+ __entry->range_end,
+ __get_str(cgroup)
+ )
)
#define DEFINE_WBC_EVENT(name) \
@@ -312,6 +398,7 @@ TRACE_EVENT(writeback_queue_io,
__field(long, age)
__field(int, moved)
__field(int, reason)
+ __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
),
TP_fast_assign(
unsigned long *older_than_this = work->older_than_this;
@@ -321,13 +408,15 @@ TRACE_EVENT(writeback_queue_io,
(jiffies - *older_than_this) * 1000 / HZ : -1;
__entry->moved = moved;
__entry->reason = work->reason;
+ __trace_wb_assign_cgroup(__get_str(cgroup), wb);
),
- TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s",
+ TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup=%s",
__entry->name,
__entry->older, /* older_than_this in jiffies */
__entry->age, /* older_than_this in relative milliseconds */
__entry->moved,
- __print_symbolic(__entry->reason, WB_WORK_REASON)
+ __print_symbolic(__entry->reason, WB_WORK_REASON),
+ __get_str(cgroup)
)
);
@@ -381,11 +470,11 @@ TRACE_EVENT(global_dirty_state,
TRACE_EVENT(bdi_dirty_ratelimit,
- TP_PROTO(struct backing_dev_info *bdi,
+ TP_PROTO(struct bdi_writeback *wb,
unsigned long dirty_rate,
unsigned long task_ratelimit),
- TP_ARGS(bdi, dirty_rate, task_ratelimit),
+ TP_ARGS(wb, dirty_rate, task_ratelimit),
TP_STRUCT__entry(
__array(char, bdi, 32)
@@ -395,36 +484,39 @@ TRACE_EVENT(bdi_dirty_ratelimit,
__field(unsigned long, dirty_ratelimit)
__field(unsigned long, task_ratelimit)
__field(unsigned long, balanced_dirty_ratelimit)
+ __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
),
TP_fast_assign(
- strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
- __entry->write_bw = KBps(bdi->wb.write_bandwidth);
- __entry->avg_write_bw = KBps(bdi->wb.avg_write_bandwidth);
+ strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
+ __entry->write_bw = KBps(wb->write_bandwidth);
+ __entry->avg_write_bw = KBps(wb->avg_write_bandwidth);
__entry->dirty_rate = KBps(dirty_rate);
- __entry->dirty_ratelimit = KBps(bdi->wb.dirty_ratelimit);
+ __entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
__entry->task_ratelimit = KBps(task_ratelimit);
__entry->balanced_dirty_ratelimit =
- KBps(bdi->wb.balanced_dirty_ratelimit);
+ KBps(wb->balanced_dirty_ratelimit);
+ __trace_wb_assign_cgroup(__get_str(cgroup), wb);
),
TP_printk("bdi %s: "
"write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
"dirty_ratelimit=%lu task_ratelimit=%lu "
- "balanced_dirty_ratelimit=%lu",
+ "balanced_dirty_ratelimit=%lu cgroup=%s",
__entry->bdi,
__entry->write_bw, /* write bandwidth */
__entry->avg_write_bw, /* avg write bandwidth */
__entry->dirty_rate, /* bdi dirty rate */
__entry->dirty_ratelimit, /* base ratelimit */
__entry->task_ratelimit, /* ratelimit with position control */
- __entry->balanced_dirty_ratelimit /* the balanced ratelimit */
+ __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
+ __get_str(cgroup)
)
);
TRACE_EVENT(balance_dirty_pages,
- TP_PROTO(struct backing_dev_info *bdi,
+ TP_PROTO(struct bdi_writeback *wb,
unsigned long thresh,
unsigned long bg_thresh,
unsigned long dirty,
@@ -437,7 +529,7 @@ TRACE_EVENT(balance_dirty_pages,
long pause,
unsigned long start_time),
- TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
+ TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
dirty_ratelimit, task_ratelimit,
dirtied, period, pause, start_time),
@@ -456,11 +548,12 @@ TRACE_EVENT(balance_dirty_pages,
__field( long, pause)
__field(unsigned long, period)
__field( long, think)
+ __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
),
TP_fast_assign(
unsigned long freerun = (thresh + bg_thresh) / 2;
- strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
+ strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
__entry->limit = global_wb_domain.dirty_limit;
__entry->setpoint = (global_wb_domain.dirty_limit +
@@ -478,6 +571,7 @@ TRACE_EVENT(balance_dirty_pages,
__entry->period = period * 1000 / HZ;
__entry->pause = pause * 1000 / HZ;
__entry->paused = (jiffies - start_time) * 1000 / HZ;
+ __trace_wb_assign_cgroup(__get_str(cgroup), wb);
),
@@ -486,7 +580,7 @@ TRACE_EVENT(balance_dirty_pages,
"bdi_setpoint=%lu bdi_dirty=%lu "
"dirty_ratelimit=%lu task_ratelimit=%lu "
"dirtied=%u dirtied_pause=%u "
- "paused=%lu pause=%ld period=%lu think=%ld",
+ "paused=%lu pause=%ld period=%lu think=%ld cgroup=%s",
__entry->bdi,
__entry->limit,
__entry->setpoint,
@@ -500,7 +594,8 @@ TRACE_EVENT(balance_dirty_pages,
__entry->paused, /* ms */
__entry->pause, /* ms */
__entry->period, /* ms */
- __entry->think /* ms */
+ __entry->think, /* ms */
+ __get_str(cgroup)
)
);
@@ -514,6 +609,8 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
__field(unsigned long, ino)
__field(unsigned long, state)
__field(unsigned long, dirtied_when)
+ __dynamic_array(char, cgroup,
+ __trace_wb_cgroup_size(inode_to_wb(inode)))
),
TP_fast_assign(
@@ -522,14 +619,16 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->dirtied_when = inode->dirtied_when;
+ __trace_wb_assign_cgroup(__get_str(cgroup), inode_to_wb(inode));
),
- TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu",
+ TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup=%s",
__entry->name,
__entry->ino,
show_inode_state(__entry->state),
__entry->dirtied_when,
- (jiffies - __entry->dirtied_when) / HZ
+ (jiffies - __entry->dirtied_when) / HZ,
+ __get_str(cgroup)
)
);
@@ -585,6 +684,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
__field(unsigned long, writeback_index)
__field(long, nr_to_write)
__field(unsigned long, wrote)
+ __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
),
TP_fast_assign(
@@ -596,10 +696,11 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
__entry->writeback_index = inode->i_mapping->writeback_index;
__entry->nr_to_write = nr_to_write;
__entry->wrote = nr_to_write - wbc->nr_to_write;
+ __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
),
TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
- "index=%lu to_write=%ld wrote=%lu",
+ "index=%lu to_write=%ld wrote=%lu cgroup=%s",
__entry->name,
__entry->ino,
show_inode_state(__entry->state),
@@ -607,7 +708,8 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
(jiffies - __entry->dirtied_when) / HZ,
__entry->writeback_index,
__entry->nr_to_write,
- __entry->wrote
+ __entry->wrote,
+ __get_str(cgroup)
)
);
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index e016bd9b1a04..8da542a2874d 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -709,9 +709,11 @@ __SYSCALL(__NR_memfd_create, sys_memfd_create)
__SYSCALL(__NR_bpf, sys_bpf)
#define __NR_execveat 281
__SC_COMP(__NR_execveat, sys_execveat, compat_sys_execveat)
+#define __NR_membarrier 282
+__SYSCALL(__NR_membarrier, sys_membarrier)
#undef __NR_syscalls
-#define __NR_syscalls 282
+#define __NR_syscalls 283
/*
* All syscalls below here should go away really,
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 2f295cde657e..8c5e8b91a3cb 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -34,6 +34,13 @@
/* color index */
#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
+/* 8 bpp Red */
+#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
+
+/* 16 bpp RG */
+#define DRM_FORMAT_RG88 fourcc_code('R', 'G', '8', '8') /* [15:0] R:G 8:8 little endian */
+#define DRM_FORMAT_GR88 fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */
+
/* 8 bpp RGB */
#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index db809b722985..fd5aa47bd689 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -354,9 +354,15 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_REVISION 32
#define I915_PARAM_SUBSLICE_TOTAL 33
#define I915_PARAM_EU_TOTAL 34
+#define I915_PARAM_HAS_GPU_RESET 35
+#define I915_PARAM_HAS_RESOURCE_STREAMER 36
typedef struct drm_i915_getparam {
- int param;
+ __s32 param;
+ /*
+ * WARNING: Using pointers instead of fixed-size u64 means we need to write
+ * compat32 code. Don't repeat this mistake.
+ */
int __user *value;
} drm_i915_getparam_t;
@@ -764,7 +770,12 @@ struct drm_i915_gem_execbuffer2 {
#define I915_EXEC_BSD_RING1 (1<<13)
#define I915_EXEC_BSD_RING2 (2<<13)
-#define __I915_EXEC_UNKNOWN_FLAGS -(1<<15)
+/** Tell the kernel that the batchbuffer is processed by
+ * the resource streamer.
+ */
+#define I915_EXEC_RESOURCE_STREAMER (1<<15)
+
+#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_RESOURCE_STREAMER<<1)
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1114,6 +1125,7 @@ struct drm_i915_gem_context_param {
__u32 size;
__u64 param;
#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
+#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
__u64 value;
};
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index c472bedbe38e..05b204954d16 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -64,6 +64,7 @@
#define DRM_VMW_GB_SURFACE_CREATE 23
#define DRM_VMW_GB_SURFACE_REF 24
#define DRM_VMW_SYNCCPU 25
+#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
/*************************************************************************/
/**
@@ -88,6 +89,8 @@
#define DRM_VMW_PARAM_3D_CAPS_SIZE 8
#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
+#define DRM_VMW_PARAM_SCREEN_TARGET 11
+#define DRM_VMW_PARAM_DX 12
/**
* enum drm_vmw_handle_type - handle type for ref ioctls
@@ -296,7 +299,7 @@ union drm_vmw_surface_reference_arg {
* Argument to the DRM_VMW_EXECBUF Ioctl.
*/
-#define DRM_VMW_EXECBUF_VERSION 1
+#define DRM_VMW_EXECBUF_VERSION 2
struct drm_vmw_execbuf_arg {
uint64_t commands;
@@ -305,6 +308,8 @@ struct drm_vmw_execbuf_arg {
uint64_t fence_rep;
uint32_t version;
uint32_t flags;
+ uint32_t context_handle;
+ uint32_t pad64;
};
/**
@@ -825,7 +830,6 @@ struct drm_vmw_update_layout_arg {
enum drm_vmw_shader_type {
drm_vmw_shader_type_vs = 0,
drm_vmw_shader_type_ps,
- drm_vmw_shader_type_gs
};
@@ -907,6 +911,8 @@ enum drm_vmw_surface_flags {
* @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID
* if none.
* @base_size Size of the base mip level for all faces.
+ * @array_size Must be zero for non-DX hardware, and if non-zero
+ * svga3d_flags must have proper bind flags setup.
*
* Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl.
* Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
@@ -919,7 +925,7 @@ struct drm_vmw_gb_surface_create_req {
uint32_t multisample_count;
uint32_t autogen_filter;
uint32_t buffer_handle;
- uint32_t pad64;
+ uint32_t array_size;
struct drm_vmw_size base_size;
};
@@ -1059,4 +1065,28 @@ struct drm_vmw_synccpu_arg {
uint32_t pad64;
};
+/*************************************************************************/
+/**
+ * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
+ *
+ * Allocates a device unique context id, and queues a create context command
+ * for the host. Does not wait for host completion.
+ */
+enum drm_vmw_extended_context {
+ drm_vmw_context_legacy,
+ drm_vmw_context_dx
+};
+
+/**
+ * union drm_vmw_extended_context_arg
+ *
+ * @req: Context type.
+ * @rep: Context identifier.
+ *
+ * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
+ */
+union drm_vmw_extended_context_arg {
+ enum drm_vmw_extended_context req;
+ struct drm_vmw_context_arg rep;
+};
#endif
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 1ff9942718fe..f7b2db44eb4b 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -243,6 +243,7 @@ header-y += limits.h
header-y += llc.h
header-y += loop.h
header-y += lp.h
+header-y += lwtunnel.h
header-y += magic.h
header-y += major.h
header-y += map_to_7segment.h
@@ -251,6 +252,7 @@ header-y += mdio.h
header-y += media.h
header-y += media-bus-format.h
header-y += mei.h
+header-y += membarrier.h
header-y += memfd.h
header-y += mempolicy.h
header-y += meye.h
@@ -455,3 +457,4 @@ header-y += xfrm.h
header-y += xilinx-v4l2-controls.h
header-y += zorro.h
header-y += zorro_ids.h
+header-y += userfaultfd.h
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index d3475e1f15ec..843540c398eb 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -266,6 +266,7 @@
#define AUDIT_OBJ_UID 109
#define AUDIT_OBJ_GID 110
#define AUDIT_FIELD_COMPARE 111
+#define AUDIT_EXE 112
#define AUDIT_ARG0 200
#define AUDIT_ARG1 (AUDIT_ARG0+1)
@@ -324,8 +325,10 @@ enum {
#define AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT 0x00000001
#define AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME 0x00000002
+#define AUDIT_FEATURE_BITMAP_EXECUTABLE_PATH 0x00000004
#define AUDIT_FEATURE_BITMAP_ALL (AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT | \
- AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME)
+ AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME | \
+ AUDIT_FEATURE_BITMAP_EXECUTABLE_PATH)
/* deprecated: AUDIT_VERSION_* */
#define AUDIT_VERSION_LATEST AUDIT_FEATURE_BITMAP_ALL
@@ -382,6 +385,9 @@ enum {
#define AUDIT_ARCH_SHEL64 (EM_SH|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_SPARC (EM_SPARC)
#define AUDIT_ARCH_SPARC64 (EM_SPARCV9|__AUDIT_ARCH_64BIT)
+#define AUDIT_ARCH_TILEGX (EM_TILEGX|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_TILEGX32 (EM_TILEGX|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_TILEPRO (EM_TILEPRO|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_PERM_EXEC 1
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 29ef6f99e43d..92a48e2d5461 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -114,6 +114,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_HASH,
BPF_MAP_TYPE_ARRAY,
BPF_MAP_TYPE_PROG_ARRAY,
+ BPF_MAP_TYPE_PERF_EVENT_ARRAY,
};
enum bpf_prog_type {
@@ -249,6 +250,28 @@ enum bpf_func_id {
* Return: 0 on success
*/
BPF_FUNC_get_current_comm,
+
+ /**
+ * bpf_get_cgroup_classid(skb) - retrieve a proc's classid
+ * @skb: pointer to skb
+ * Return: classid if != 0
+ */
+ BPF_FUNC_get_cgroup_classid,
+ BPF_FUNC_skb_vlan_push, /* bpf_skb_vlan_push(skb, vlan_proto, vlan_tci) */
+ BPF_FUNC_skb_vlan_pop, /* bpf_skb_vlan_pop(skb) */
+
+ /**
+ * bpf_skb_[gs]et_tunnel_key(skb, key, size, flags)
+ * retrieve or populate tunnel metadata
+ * @skb: pointer to skb
+ * @key: pointer to 'struct bpf_tunnel_key'
+ * @size: size of 'struct bpf_tunnel_key'
+ * @flags: room for future extensions
+ * Retrun: 0 on success
+ */
+ BPF_FUNC_skb_get_tunnel_key,
+ BPF_FUNC_skb_set_tunnel_key,
+ BPF_FUNC_perf_event_read, /* u64 bpf_perf_event_read(&map, index) */
__BPF_FUNC_MAX_ID,
};
@@ -269,6 +292,12 @@ struct __sk_buff {
__u32 ifindex;
__u32 tc_index;
__u32 cb[5];
+ __u32 hash;
+};
+
+struct bpf_tunnel_key {
+ __u32 tunnel_id;
+ __u32 remote_ipv4;
};
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/dlm_device.h b/include/uapi/linux/dlm_device.h
index 3060783c4191..df56c8ff0769 100644
--- a/include/uapi/linux/dlm_device.h
+++ b/include/uapi/linux/dlm_device.h
@@ -26,7 +26,7 @@
/* Version of the device interface */
#define DLM_DEVICE_VERSION_MAJOR 6
#define DLM_DEVICE_VERSION_MINOR 0
-#define DLM_DEVICE_VERSION_PATCH 1
+#define DLM_DEVICE_VERSION_PATCH 2
/* struct passed to the lock write */
struct dlm_lock_params {
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index 061aca3a962d..d34611e35a30 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -267,9 +267,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 32
+#define DM_VERSION_MINOR 33
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2015-6-26)"
+#define DM_VERSION_EXTRA "-ioctl (2015-8-18)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h
index b08829667ed7..b56dfcfe922a 100644
--- a/include/uapi/linux/elf-em.h
+++ b/include/uapi/linux/elf-em.h
@@ -38,6 +38,9 @@
#define EM_ALTERA_NIOS2 113 /* Altera Nios II soft-core processor */
#define EM_TI_C6000 140 /* TI C6X DSPs */
#define EM_AARCH64 183 /* ARM 64 bit */
+#define EM_TILEPRO 188 /* Tilera TILEPro */
+#define EM_MICROBLAZE 189 /* Xilinx MicroBlaze */
+#define EM_TILEGX 191 /* Tilera TILE-Gx */
#define EM_FRV 0x5441 /* Fujitsu FR-V */
#define EM_AVR32 0x18ad /* Atmel AVR32 */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index cd67aec187d9..cd1629170103 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1093,6 +1093,11 @@ struct ethtool_sfeatures {
* the 'hwtstamp_tx_types' and 'hwtstamp_rx_filters' enumeration values,
* respectively. For example, if the device supports HWTSTAMP_TX_ON,
* then (1 << HWTSTAMP_TX_ON) in 'tx_types' will be set.
+ *
+ * Drivers should only report the filters they actually support without
+ * upscaling in the SIOCSHWTSTAMP ioctl. If the SIOCSHWSTAMP request for
+ * HWTSTAMP_FILTER_V1_SYNC is supported by HWTSTAMP_FILTER_V1_EVENT, then the
+ * driver should only report HWTSTAMP_FILTER_V1_EVENT in this op.
*/
struct ethtool_ts_info {
__u32 cmd;
diff --git a/include/uapi/linux/fib_rules.h b/include/uapi/linux/fib_rules.h
index 2b82d7e30974..96161b8202b5 100644
--- a/include/uapi/linux/fib_rules.h
+++ b/include/uapi/linux/fib_rules.h
@@ -43,7 +43,7 @@ enum {
FRA_UNUSED5,
FRA_FWMARK, /* mark */
FRA_FLOW, /* flow/class id */
- FRA_UNUSED6,
+ FRA_TUN_ID,
FRA_SUPPRESS_IFGROUP,
FRA_SUPPRESS_PREFIXLEN,
FRA_TABLE, /* Extended table id */
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index eaaea6208b42..3635b7797508 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -182,6 +182,7 @@ struct br_mdb_entry {
#define MDB_TEMPORARY 0
#define MDB_PERMANENT 1
__u8 state;
+ __u16 vid;
struct {
union {
__be32 ip4;
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index aa63ed023c2b..ea9221b0331a 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -42,6 +42,7 @@
#define ETH_P_LOOP 0x0060 /* Ethernet Loopback packet */
#define ETH_P_PUP 0x0200 /* Xerox PUP packet */
#define ETH_P_PUPAT 0x0201 /* Xerox PUP Addr Trans packet */
+#define ETH_P_TSN 0x22F0 /* TSN (IEEE 1722) packet */
#define ETH_P_IP 0x0800 /* Internet Protocol packet */
#define ETH_P_X25 0x0805 /* CCITT X.25 */
#define ETH_P_ARP 0x0806 /* Address Resolution packet */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 2c7e8e3d3981..3a5f263cfc2f 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -148,6 +148,7 @@ enum {
IFLA_PHYS_SWITCH_ID,
IFLA_LINK_NETNSID,
IFLA_PHYS_PORT_NAME,
+ IFLA_PROTO_DOWN,
__IFLA_MAX
};
@@ -229,6 +230,8 @@ enum {
IFLA_BR_AGEING_TIME,
IFLA_BR_STP_STATE,
IFLA_BR_PRIORITY,
+ IFLA_BR_VLAN_FILTERING,
+ IFLA_BR_VLAN_PROTOCOL,
__IFLA_BR_MAX,
};
@@ -339,6 +342,15 @@ enum macvlan_macaddr_mode {
#define MACVLAN_FLAG_NOPROMISC 1
+/* VRF section */
+enum {
+ IFLA_VRF_UNSPEC,
+ IFLA_VRF_TABLE,
+ __IFLA_VRF_MAX
+};
+
+#define IFLA_VRF_MAX (__IFLA_VRF_MAX - 1)
+
/* IPVLAN section */
enum {
IFLA_IPVLAN_UNSPEC,
@@ -381,6 +393,7 @@ enum {
IFLA_VXLAN_REMCSUM_RX,
IFLA_VXLAN_GBP,
IFLA_VXLAN_REMCSUM_NOPARTIAL,
+ IFLA_VXLAN_COLLECT_METADATA,
__IFLA_VXLAN_MAX
};
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
@@ -397,6 +410,8 @@ enum {
IFLA_GENEVE_REMOTE,
IFLA_GENEVE_TTL,
IFLA_GENEVE_TOS,
+ IFLA_GENEVE_PORT, /* destination port */
+ IFLA_GENEVE_COLLECT_METADATA,
__IFLA_GENEVE_MAX
};
#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
@@ -431,6 +446,7 @@ enum {
IFLA_BOND_AD_ACTOR_SYS_PRIO,
IFLA_BOND_AD_USER_PORT_KEY,
IFLA_BOND_AD_ACTOR_SYSTEM,
+ IFLA_BOND_TLB_DYNAMIC_LB,
__IFLA_BOND_MAX,
};
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index d3d715f8c88f..9e7edfd8141e 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -55,6 +55,7 @@ struct sockaddr_ll {
#define PACKET_TX_HAS_OFF 19
#define PACKET_QDISC_BYPASS 20
#define PACKET_ROLLOVER_STATS 21
+#define PACKET_FANOUT_DATA 22
#define PACKET_FANOUT_HASH 0
#define PACKET_FANOUT_LB 1
@@ -62,6 +63,8 @@ struct sockaddr_ll {
#define PACKET_FANOUT_ROLLOVER 3
#define PACKET_FANOUT_RND 4
#define PACKET_FANOUT_QM 5
+#define PACKET_FANOUT_CBPF 6
+#define PACKET_FANOUT_EBPF 7
#define PACKET_FANOUT_FLAG_ROLLOVER 0x1000
#define PACKET_FANOUT_FLAG_DEFRAG 0x8000
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index bd3cc11a431f..af4de90ba27d 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -112,6 +112,7 @@ enum {
IFLA_GRE_ENCAP_FLAGS,
IFLA_GRE_ENCAP_SPORT,
IFLA_GRE_ENCAP_DPORT,
+ IFLA_GRE_COLLECT_METADATA,
__IFLA_GRE_MAX,
};
diff --git a/include/uapi/linux/ila.h b/include/uapi/linux/ila.h
new file mode 100644
index 000000000000..7ed9e670814e
--- /dev/null
+++ b/include/uapi/linux/ila.h
@@ -0,0 +1,15 @@
+/* ila.h - ILA Interface */
+
+#ifndef _UAPI_LINUX_ILA_H
+#define _UAPI_LINUX_ILA_H
+
+enum {
+ ILA_ATTR_UNSPEC,
+ ILA_ATTR_LOCATOR, /* u64 */
+
+ __ILA_ATTR_MAX,
+};
+
+#define ILA_ATTR_MAX (__ILA_ATTR_MAX - 1)
+
+#endif /* _UAPI_LINUX_ILA_H */
diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h
index 3199243f2028..391395c06c7e 100644
--- a/include/uapi/linux/ip_vs.h
+++ b/include/uapi/linux/ip_vs.h
@@ -406,6 +406,11 @@ enum {
IPVS_DAEMON_ATTR_STATE, /* sync daemon state (master/backup) */
IPVS_DAEMON_ATTR_MCAST_IFN, /* multicast interface name */
IPVS_DAEMON_ATTR_SYNC_ID, /* SyncID we belong to */
+ IPVS_DAEMON_ATTR_SYNC_MAXLEN, /* UDP Payload Size */
+ IPVS_DAEMON_ATTR_MCAST_GROUP, /* IPv4 Multicast Address */
+ IPVS_DAEMON_ATTR_MCAST_GROUP6, /* IPv6 Multicast Address */
+ IPVS_DAEMON_ATTR_MCAST_PORT, /* Multicast Port (base) */
+ IPVS_DAEMON_ATTR_MCAST_TTL, /* Multicast TTL */
__IPVS_DAEMON_ATTR_MAX,
};
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 5efa54ae567c..38b4fef20219 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -171,6 +171,9 @@ enum {
DEVCONF_USE_OPTIMISTIC,
DEVCONF_ACCEPT_RA_MTU,
DEVCONF_STABLE_SECRET,
+ DEVCONF_USE_OIF_ADDRS_ONLY,
+ DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT,
+ DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/kernel-page-flags.h b/include/uapi/linux/kernel-page-flags.h
index a6c4962e5d46..5da5f8751ce7 100644
--- a/include/uapi/linux/kernel-page-flags.h
+++ b/include/uapi/linux/kernel-page-flags.h
@@ -33,6 +33,7 @@
#define KPF_THP 22
#define KPF_BALLOON 23
#define KPF_ZERO_PAGE 24
+#define KPF_IDLE 25
#endif /* _UAPILINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 0d831f94f8a8..a9256f0331ae 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -237,6 +237,7 @@ struct kvm_run {
__u32 count;
__u64 data_offset; /* relative to kvm_run start */
} io;
+ /* KVM_EXIT_DEBUG */
struct {
struct kvm_debug_exit_arch arch;
} debug;
@@ -285,6 +286,7 @@ struct kvm_run {
__u32 data;
__u8 is_write;
} dcr;
+ /* KVM_EXIT_INTERNAL_ERROR */
struct {
__u32 suberror;
/* Available with KVM_CAP_INTERNAL_ERROR_DATA: */
@@ -295,6 +297,7 @@ struct kvm_run {
struct {
__u64 gprs[32];
} osi;
+ /* KVM_EXIT_PAPR_HCALL */
struct {
__u64 nr;
__u64 ret;
@@ -819,6 +822,8 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_DISABLE_QUIRKS 116
#define KVM_CAP_X86_SMM 117
#define KVM_CAP_MULTI_ADDRESS_SPACE 118
+#define KVM_CAP_GUEST_DEBUG_HW_BPS 119
+#define KVM_CAP_GUEST_DEBUG_HW_WPS 120
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h
new file mode 100644
index 000000000000..34141a5dfe74
--- /dev/null
+++ b/include/uapi/linux/lwtunnel.h
@@ -0,0 +1,47 @@
+#ifndef _UAPI_LWTUNNEL_H_
+#define _UAPI_LWTUNNEL_H_
+
+#include <linux/types.h>
+
+enum lwtunnel_encap_types {
+ LWTUNNEL_ENCAP_NONE,
+ LWTUNNEL_ENCAP_MPLS,
+ LWTUNNEL_ENCAP_IP,
+ LWTUNNEL_ENCAP_ILA,
+ LWTUNNEL_ENCAP_IP6,
+ __LWTUNNEL_ENCAP_MAX,
+};
+
+#define LWTUNNEL_ENCAP_MAX (__LWTUNNEL_ENCAP_MAX - 1)
+
+enum lwtunnel_ip_t {
+ LWTUNNEL_IP_UNSPEC,
+ LWTUNNEL_IP_ID,
+ LWTUNNEL_IP_DST,
+ LWTUNNEL_IP_SRC,
+ LWTUNNEL_IP_TTL,
+ LWTUNNEL_IP_TOS,
+ LWTUNNEL_IP_SPORT,
+ LWTUNNEL_IP_DPORT,
+ LWTUNNEL_IP_FLAGS,
+ __LWTUNNEL_IP_MAX,
+};
+
+#define LWTUNNEL_IP_MAX (__LWTUNNEL_IP_MAX - 1)
+
+enum lwtunnel_ip6_t {
+ LWTUNNEL_IP6_UNSPEC,
+ LWTUNNEL_IP6_ID,
+ LWTUNNEL_IP6_DST,
+ LWTUNNEL_IP6_SRC,
+ LWTUNNEL_IP6_HOPLIMIT,
+ LWTUNNEL_IP6_TC,
+ LWTUNNEL_IP6_SPORT,
+ LWTUNNEL_IP6_DPORT,
+ LWTUNNEL_IP6_FLAGS,
+ __LWTUNNEL_IP6_MAX,
+};
+
+#define LWTUNNEL_IP6_MAX (__LWTUNNEL_IP6_MAX - 1)
+
+#endif /* _UAPI_LWTUNNEL_H_ */
diff --git a/include/uapi/linux/membarrier.h b/include/uapi/linux/membarrier.h
new file mode 100644
index 000000000000..e0b108bd2624
--- /dev/null
+++ b/include/uapi/linux/membarrier.h
@@ -0,0 +1,53 @@
+#ifndef _UAPI_LINUX_MEMBARRIER_H
+#define _UAPI_LINUX_MEMBARRIER_H
+
+/*
+ * linux/membarrier.h
+ *
+ * membarrier system call API
+ *
+ * Copyright (c) 2010, 2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/**
+ * enum membarrier_cmd - membarrier system call command
+ * @MEMBARRIER_CMD_QUERY: Query the set of supported commands. It returns
+ * a bitmask of valid commands.
+ * @MEMBARRIER_CMD_SHARED: Execute a memory barrier on all running threads.
+ * Upon return from system call, the caller thread
+ * is ensured that all running threads have passed
+ * through a state where all memory accesses to
+ * user-space addresses match program order between
+ * entry to and return from the system call
+ * (non-running threads are de facto in such a
+ * state). This covers threads from all processes
+ * running on the system. This command returns 0.
+ *
+ * Command to be passed to the membarrier system call. The commands need to
+ * be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to
+ * the value 0.
+ */
+enum membarrier_cmd {
+ MEMBARRIER_CMD_QUERY = 0,
+ MEMBARRIER_CMD_SHARED = (1 << 0),
+};
+
+#endif /* _UAPI_LINUX_MEMBARRIER_H */
diff --git a/include/uapi/linux/mpls.h b/include/uapi/linux/mpls.h
index 139d4dd1cab8..24a6cb1aec86 100644
--- a/include/uapi/linux/mpls.h
+++ b/include/uapi/linux/mpls.h
@@ -41,4 +41,6 @@ struct mpls_label {
#define MPLS_LABEL_OAMALERT 14 /* RFC3429 */
#define MPLS_LABEL_EXTENSION 15 /* RFC7274 */
+#define MPLS_LABEL_FIRST_UNRESERVED 16 /* RFC3032 */
+
#endif /* _UAPI_MPLS_H */
diff --git a/include/uapi/linux/mpls_iptunnel.h b/include/uapi/linux/mpls_iptunnel.h
new file mode 100644
index 000000000000..d80a0498f77e
--- /dev/null
+++ b/include/uapi/linux/mpls_iptunnel.h
@@ -0,0 +1,28 @@
+/*
+ * mpls tunnel api
+ *
+ * Authors:
+ * Roopa Prabhu <roopa@cumulusnetworks.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _UAPI_LINUX_MPLS_IPTUNNEL_H
+#define _UAPI_LINUX_MPLS_IPTUNNEL_H
+
+/* MPLS tunnel attributes
+ * [RTA_ENCAP] = {
+ * [MPLS_IPTUNNEL_DST]
+ * }
+ */
+enum {
+ MPLS_IPTUNNEL_UNSPEC,
+ MPLS_IPTUNNEL_DST,
+ __MPLS_IPTUNNEL_MAX,
+};
+#define MPLS_IPTUNNEL_MAX (__MPLS_IPTUNNEL_MAX - 1)
+
+#endif /* _UAPI_LINUX_MPLS_IPTUNNEL_H */
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index 2b94ea2287bb..5b4a4be06e2b 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -87,7 +87,7 @@ struct nd_cmd_ars_status {
__u32 handle;
__u32 flags;
__u64 err_address;
- __u64 mask;
+ __u64 length;
} __packed records[0];
} __packed;
@@ -111,6 +111,11 @@ enum {
ND_CMD_VENDOR = 9,
};
+enum {
+ ND_ARS_VOLATILE = 1,
+ ND_ARS_PERSISTENT = 2,
+};
+
static inline const char *nvdimm_bus_cmd_name(unsigned cmd)
{
static const char * const names[] = {
@@ -194,4 +199,9 @@ enum nd_driver_flags {
enum {
ND_MIN_NAMESPACE_SIZE = 0x00400000,
};
+
+enum ars_masks {
+ ARS_STATUS_MASK = 0x0000FFFF,
+ ARS_EXT_STATUS_SHIFT = 16,
+};
#endif /* __NDCTL_H__ */
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index 2e35c61bbdd1..788655bfa0f3 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -106,6 +106,7 @@ struct ndt_stats {
__u64 ndts_rcv_probes_ucast;
__u64 ndts_periodic_gc_runs;
__u64 ndts_forced_gc_runs;
+ __u64 ndts_table_fulls;
};
enum {
diff --git a/include/uapi/linux/netfilter/nf_conntrack_sctp.h b/include/uapi/linux/netfilter/nf_conntrack_sctp.h
index ceeefe6681b5..ed4e776e1242 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_sctp.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_sctp.h
@@ -13,6 +13,8 @@ enum sctp_conntrack {
SCTP_CONNTRACK_SHUTDOWN_SENT,
SCTP_CONNTRACK_SHUTDOWN_RECD,
SCTP_CONNTRACK_SHUTDOWN_ACK_SENT,
+ SCTP_CONNTRACK_HEARTBEAT_SENT,
+ SCTP_CONNTRACK_HEARTBEAT_ACKED,
SCTP_CONNTRACK_MAX
};
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index a99e6a997140..d8c8a7c9d88a 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -756,16 +756,25 @@ enum nft_ct_attributes {
};
#define NFTA_CT_MAX (__NFTA_CT_MAX - 1)
+enum nft_limit_type {
+ NFT_LIMIT_PKTS,
+ NFT_LIMIT_PKT_BYTES
+};
+
/**
* enum nft_limit_attributes - nf_tables limit expression netlink attributes
*
* @NFTA_LIMIT_RATE: refill rate (NLA_U64)
* @NFTA_LIMIT_UNIT: refill unit (NLA_U64)
+ * @NFTA_LIMIT_BURST: burst (NLA_U32)
+ * @NFTA_LIMIT_TYPE: type of limit (NLA_U32: enum nft_limit_type)
*/
enum nft_limit_attributes {
NFTA_LIMIT_UNSPEC,
NFTA_LIMIT_RATE,
NFTA_LIMIT_UNIT,
+ NFTA_LIMIT_BURST,
+ NFTA_LIMIT_TYPE,
__NFTA_LIMIT_MAX
};
#define NFTA_LIMIT_MAX (__NFTA_LIMIT_MAX - 1)
@@ -936,6 +945,20 @@ enum nft_redir_attributes {
#define NFTA_REDIR_MAX (__NFTA_REDIR_MAX - 1)
/**
+ * enum nft_dup_attributes - nf_tables dup expression netlink attributes
+ *
+ * @NFTA_DUP_SREG_ADDR: source register of address (NLA_U32: nft_registers)
+ * @NFTA_DUP_SREG_DEV: source register of output interface (NLA_U32: nft_register)
+ */
+enum nft_dup_attributes {
+ NFTA_DUP_UNSPEC,
+ NFTA_DUP_SREG_ADDR,
+ NFTA_DUP_SREG_DEV,
+ __NFTA_DUP_MAX
+};
+#define NFTA_DUP_MAX (__NFTA_DUP_MAX - 1)
+
+/**
* enum nft_gen_attributes - nf_tables ruleset generation attributes
*
* @NFTA_GEN_ID: Ruleset generation ID (NLA_U32)
diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
index acad6c52a652..c1a4e1441a25 100644
--- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
@@ -61,6 +61,7 @@ enum ctattr_tuple {
CTA_TUPLE_UNSPEC,
CTA_TUPLE_IP,
CTA_TUPLE_PROTO,
+ CTA_TUPLE_ZONE,
__CTA_TUPLE_MAX
};
#define CTA_TUPLE_MAX (__CTA_TUPLE_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
index 1ab0b97b3a1e..f2c10dc140d6 100644
--- a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
+++ b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
@@ -92,6 +92,8 @@ enum ctattr_timeout_sctp {
CTA_TIMEOUT_SCTP_SHUTDOWN_SENT,
CTA_TIMEOUT_SCTP_SHUTDOWN_RECD,
CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
+ CTA_TIMEOUT_SCTP_HEARTBEAT_SENT,
+ CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED,
__CTA_TIMEOUT_SCTP_MAX
};
#define CTA_TIMEOUT_SCTP_MAX (__CTA_TIMEOUT_SCTP_MAX - 1)
diff --git a/include/uapi/linux/netfilter/xt_CT.h b/include/uapi/linux/netfilter/xt_CT.h
index 5a688c1ca4d7..9e520418b858 100644
--- a/include/uapi/linux/netfilter/xt_CT.h
+++ b/include/uapi/linux/netfilter/xt_CT.h
@@ -6,7 +6,13 @@
enum {
XT_CT_NOTRACK = 1 << 0,
XT_CT_NOTRACK_ALIAS = 1 << 1,
- XT_CT_MASK = XT_CT_NOTRACK | XT_CT_NOTRACK_ALIAS,
+ XT_CT_ZONE_DIR_ORIG = 1 << 2,
+ XT_CT_ZONE_DIR_REPL = 1 << 3,
+ XT_CT_ZONE_MARK = 1 << 4,
+
+ XT_CT_MASK = XT_CT_NOTRACK | XT_CT_NOTRACK_ALIAS |
+ XT_CT_ZONE_DIR_ORIG | XT_CT_ZONE_DIR_REPL |
+ XT_CT_ZONE_MARK,
};
struct xt_ct_target_info {
diff --git a/include/uapi/linux/netfilter_ipv6/ip6t_REJECT.h b/include/uapi/linux/netfilter_ipv6/ip6t_REJECT.h
index 205ed62e4605..cd2e940c8bf5 100644
--- a/include/uapi/linux/netfilter_ipv6/ip6t_REJECT.h
+++ b/include/uapi/linux/netfilter_ipv6/ip6t_REJECT.h
@@ -10,7 +10,9 @@ enum ip6t_reject_with {
IP6T_ICMP6_ADDR_UNREACH,
IP6T_ICMP6_PORT_UNREACH,
IP6T_ICMP6_ECHOREPLY,
- IP6T_TCP_RESET
+ IP6T_TCP_RESET,
+ IP6T_ICMP6_POLICY_FAIL,
+ IP6T_ICMP6_REJECT_ROUTE
};
struct ip6t_reject_info {
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index cf6a65cccbdf..6f3fe16cd22a 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -110,6 +110,7 @@ struct nlmsgerr {
#define NETLINK_TX_RING 7
#define NETLINK_LISTEN_ALL_NSID 8
#define NETLINK_LIST_MEMBERSHIPS 9
+#define NETLINK_CAP_ACK 10
struct nl_pktinfo {
__u32 group;
diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h
index 2119c7c274d7..2b871e0858d9 100644
--- a/include/uapi/linux/nfs4.h
+++ b/include/uapi/linux/nfs4.h
@@ -15,7 +15,7 @@
#include <linux/types.h>
-#define NFS4_BITMAP_SIZE 2
+#define NFS4_BITMAP_SIZE 3
#define NFS4_VERIFIER_SIZE 8
#define NFS4_STATEID_SEQID_SIZE 4
#define NFS4_STATEID_OTHER_SIZE 12
diff --git a/include/uapi/linux/nfsacl.h b/include/uapi/linux/nfsacl.h
index 9bb9771a107f..552726631162 100644
--- a/include/uapi/linux/nfsacl.h
+++ b/include/uapi/linux/nfsacl.h
@@ -22,6 +22,7 @@
#define NFS_ACLCNT 0x0002
#define NFS_DFACL 0x0004
#define NFS_DFACLCNT 0x0008
+#define NFS_ACL_MASK 0x000f
/* Flag for Default ACL entries */
#define NFS_ACL_DEFAULT 0x1000
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 1dab77601c21..32e07d8cbaf4 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -164,6 +164,9 @@ enum ovs_packet_cmd {
* %OVS_USERSPACE_ATTR_EGRESS_TUN_PORT attribute, which is sent only if the
* output port is actually a tunnel port. Contains the output tunnel key
* extracted from the packet as nested %OVS_TUNNEL_KEY_ATTR_* attributes.
+ * @OVS_PACKET_ATTR_MRU: Present for an %OVS_PACKET_CMD_ACTION and
+ * %OVS_PACKET_ATTR_USERSPACE action specify the Maximum received fragment
+ * size.
*
* These attributes follow the &struct ovs_header within the Generic Netlink
* payload for %OVS_PACKET_* commands.
@@ -180,6 +183,7 @@ enum ovs_packet_attr {
OVS_PACKET_ATTR_UNUSED2,
OVS_PACKET_ATTR_PROBE, /* Packet operation is a feature probe,
error logging should be suppressed. */
+ OVS_PACKET_ATTR_MRU, /* Maximum received IP fragment size. */
__OVS_PACKET_ATTR_MAX
};
@@ -319,9 +323,13 @@ enum ovs_key_attr {
OVS_KEY_ATTR_MPLS, /* array of struct ovs_key_mpls.
* The implementation may restrict
* the accepted length of the array. */
+ OVS_KEY_ATTR_CT_STATE, /* u8 bitmask of OVS_CS_F_* */
+ OVS_KEY_ATTR_CT_ZONE, /* u16 connection tracking zone. */
+ OVS_KEY_ATTR_CT_MARK, /* u32 connection tracking mark */
+ OVS_KEY_ATTR_CT_LABEL, /* 16-octet connection tracking label */
#ifdef __KERNEL__
- OVS_KEY_ATTR_TUNNEL_INFO, /* struct ovs_tunnel_info */
+ OVS_KEY_ATTR_TUNNEL_INFO, /* struct ip_tunnel_info */
#endif
__OVS_KEY_ATTR_MAX
};
@@ -431,6 +439,20 @@ struct ovs_key_nd {
__u8 nd_tll[ETH_ALEN];
};
+#define OVS_CT_LABEL_LEN 16
+struct ovs_key_ct_label {
+ __u8 ct_label[OVS_CT_LABEL_LEN];
+};
+
+/* OVS_KEY_ATTR_CT_STATE flags */
+#define OVS_CS_F_NEW 0x01 /* Beginning of a new connection. */
+#define OVS_CS_F_ESTABLISHED 0x02 /* Part of an existing connection. */
+#define OVS_CS_F_RELATED 0x04 /* Related to an established
+ * connection. */
+#define OVS_CS_F_INVALID 0x20 /* Could not track connection. */
+#define OVS_CS_F_REPLY_DIR 0x40 /* Flow is in the reply direction. */
+#define OVS_CS_F_TRACKED 0x80 /* Conntrack has occurred. */
+
/**
* enum ovs_flow_attr - attributes for %OVS_FLOW_* commands.
* @OVS_FLOW_ATTR_KEY: Nested %OVS_KEY_ATTR_* attributes specifying the flow
@@ -595,6 +617,39 @@ struct ovs_action_hash {
};
/**
+ * enum ovs_ct_attr - Attributes for %OVS_ACTION_ATTR_CT action.
+ * @OVS_CT_ATTR_FLAGS: u32 connection tracking flags.
+ * @OVS_CT_ATTR_ZONE: u16 connection tracking zone.
+ * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the
+ * mask, the corresponding bit in the value is copied to the connection
+ * tracking mark field in the connection.
+ * @OVS_CT_ATTR_LABEL: %OVS_CT_LABEL_LEN value followed by %OVS_CT_LABEL_LEN
+ * mask. For each bit set in the mask, the corresponding bit in the value is
+ * copied to the connection tracking label field in the connection.
+ * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG.
+ */
+enum ovs_ct_attr {
+ OVS_CT_ATTR_UNSPEC,
+ OVS_CT_ATTR_FLAGS, /* u8 bitmask of OVS_CT_F_*. */
+ OVS_CT_ATTR_ZONE, /* u16 zone id. */
+ OVS_CT_ATTR_MARK, /* mark to associate with this connection. */
+ OVS_CT_ATTR_LABEL, /* label to associate with this connection. */
+ OVS_CT_ATTR_HELPER, /* netlink helper to assist detection of
+ related connections. */
+ __OVS_CT_ATTR_MAX
+};
+
+#define OVS_CT_ATTR_MAX (__OVS_CT_ATTR_MAX - 1)
+
+/*
+ * OVS_CT_ATTR_FLAGS flags - bitmask of %OVS_CT_F_*
+ * @OVS_CT_F_COMMIT: Commits the flow to the conntrack table. This allows
+ * future packets for the same connection to be identified as 'established'
+ * or 'related'.
+ */
+#define OVS_CT_F_COMMIT 0x01
+
+/**
* enum ovs_action_attr - Action types.
*
* @OVS_ACTION_ATTR_OUTPUT: Output packet to port.
@@ -623,6 +678,8 @@ struct ovs_action_hash {
* indicate the new packet contents. This could potentially still be
* %ETH_P_MPLS if the resulting MPLS label stack is not empty. If there
* is no MPLS label stack, as determined by ethertype, no action is taken.
+ * @OVS_ACTION_ATTR_CT: Track the connection. Populate the conntrack-related
+ * entries in the flow key.
*
* Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all
* fields within a header are modifiable, e.g. the IPv4 protocol and fragment
@@ -648,6 +705,7 @@ enum ovs_action_attr {
* data immediately followed by a mask.
* The data must be zero for the unmasked
* bits. */
+ OVS_ACTION_ATTR_CT, /* One nested OVS_CT_ATTR_* . */
__OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted
* from userspace. */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 31891d9535e2..a8d0759a9e40 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -190,4 +190,11 @@ struct prctl_mm_map {
# define PR_FP_MODE_FR (1 << 0) /* 64b FP registers */
# define PR_FP_MODE_FRE (1 << 1) /* 32b compatibility */
+/* Control the ambient capability set */
+#define PR_CAP_AMBIENT 47
+# define PR_CAP_AMBIENT_IS_SET 1
+# define PR_CAP_AMBIENT_RAISE 2
+# define PR_CAP_AMBIENT_LOWER 3
+# define PR_CAP_AMBIENT_CLEAR_ALL 4
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h
index cf1019e15f5b..a7a697986614 100644
--- a/include/uapi/linux/ptrace.h
+++ b/include/uapi/linux/ptrace.h
@@ -89,9 +89,11 @@ struct ptrace_peeksiginfo_args {
#define PTRACE_O_TRACESECCOMP (1 << PTRACE_EVENT_SECCOMP)
/* eventless options */
-#define PTRACE_O_EXITKILL (1 << 20)
+#define PTRACE_O_EXITKILL (1 << 20)
+#define PTRACE_O_SUSPEND_SECCOMP (1 << 21)
-#define PTRACE_O_MASK (0x000000ff | PTRACE_O_EXITKILL)
+#define PTRACE_O_MASK (\
+ 0x000000ff | PTRACE_O_EXITKILL | PTRACE_O_SUSPEND_SECCOMP)
#include <asm/ptrace.h>
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index fdd8f07f1d34..702024769c74 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -308,6 +308,8 @@ enum rtattr_type_t {
RTA_VIA,
RTA_NEWDST,
RTA_PREF,
+ RTA_ENCAP_TYPE,
+ RTA_ENCAP,
__RTA_MAX
};
@@ -416,10 +418,13 @@ enum {
#define RTAX_MAX (__RTAX_MAX - 1)
-#define RTAX_FEATURE_ECN 0x00000001
-#define RTAX_FEATURE_SACK 0x00000002
-#define RTAX_FEATURE_TIMESTAMP 0x00000004
-#define RTAX_FEATURE_ALLFRAG 0x00000008
+#define RTAX_FEATURE_ECN (1 << 0)
+#define RTAX_FEATURE_SACK (1 << 1)
+#define RTAX_FEATURE_TIMESTAMP (1 << 2)
+#define RTAX_FEATURE_ALLFRAG (1 << 3)
+
+#define RTAX_FEATURE_MASK (RTAX_FEATURE_ECN | RTAX_FEATURE_SACK | \
+ RTAX_FEATURE_TIMESTAMP | RTAX_FEATURE_ALLFRAG)
struct rta_session {
__u8 proto;
diff --git a/include/uapi/linux/securebits.h b/include/uapi/linux/securebits.h
index 985aac9e6bf8..35ac35cef217 100644
--- a/include/uapi/linux/securebits.h
+++ b/include/uapi/linux/securebits.h
@@ -43,9 +43,18 @@
#define SECBIT_KEEP_CAPS (issecure_mask(SECURE_KEEP_CAPS))
#define SECBIT_KEEP_CAPS_LOCKED (issecure_mask(SECURE_KEEP_CAPS_LOCKED))
+/* When set, a process cannot add new capabilities to its ambient set. */
+#define SECURE_NO_CAP_AMBIENT_RAISE 6
+#define SECURE_NO_CAP_AMBIENT_RAISE_LOCKED 7 /* make bit-6 immutable */
+
+#define SECBIT_NO_CAP_AMBIENT_RAISE (issecure_mask(SECURE_NO_CAP_AMBIENT_RAISE))
+#define SECBIT_NO_CAP_AMBIENT_RAISE_LOCKED \
+ (issecure_mask(SECURE_NO_CAP_AMBIENT_RAISE_LOCKED))
+
#define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \
issecure_mask(SECURE_NO_SETUID_FIXUP) | \
- issecure_mask(SECURE_KEEP_CAPS))
+ issecure_mask(SECURE_KEEP_CAPS) | \
+ issecure_mask(SECURE_NO_CAP_AMBIENT_RAISE))
#define SECURE_ALL_LOCKS (SECURE_ALL_BITS << 1)
#endif /* _UAPI_LINUX_SECUREBITS_H */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index eee8968407f0..25a9ad8bcef1 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -278,6 +278,8 @@ enum
LINUX_MIB_TCPACKSKIPPEDCHALLENGE, /* TCPACKSkippedChallenge */
LINUX_MIB_TCPWINPROBE, /* TCPWinProbe */
LINUX_MIB_TCPKEEPALIVE, /* TCPKeepAlive */
+ LINUX_MIB_TCPMTUPFAIL, /* TCPMTUPFail */
+ LINUX_MIB_TCPMTUPSUCCESS, /* TCPMTUPSuccess */
__LINUX_MIB_MAX
};
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
index b67f99d3c520..95c6521d8a95 100644
--- a/include/uapi/linux/target_core_user.h
+++ b/include/uapi/linux/target_core_user.h
@@ -42,10 +42,6 @@
#define TCMU_MAILBOX_VERSION 2
#define ALIGN_SIZE 64 /* Should be enough for most CPUs */
-/* See https://gcc.gnu.org/onlinedocs/cpp/Stringification.html */
-#define xstr(s) str(s)
-#define str(s) #s
-
struct tcmu_mailbox {
__u16 version;
__u16 flags;
diff --git a/include/uapi/linux/toshiba.h b/include/uapi/linux/toshiba.h
index e9bef5b2f91e..c58bf4b5bb26 100644
--- a/include/uapi/linux/toshiba.h
+++ b/include/uapi/linux/toshiba.h
@@ -1,6 +1,7 @@
/* toshiba.h -- Linux driver for accessing the SMM on Toshiba laptops
*
* Copyright (c) 1996-2000 Jonathan A. Buzzard (jonathan@buzzard.org.uk)
+ * Copyright (c) 2015 Azael Avalos <coproscefalo@gmail.com>
*
* Thanks to Juergen Heinzl <juergen@monocerus.demon.co.uk> for the pointers
* on making sure the structure is aligned and packed.
@@ -20,9 +21,18 @@
#ifndef _UAPI_LINUX_TOSHIBA_H
#define _UAPI_LINUX_TOSHIBA_H
-#define TOSH_PROC "/proc/toshiba"
-#define TOSH_DEVICE "/dev/toshiba"
-#define TOSH_SMM _IOWR('t', 0x90, int) /* broken: meant 24 bytes */
+/*
+ * Toshiba modules paths
+ */
+
+#define TOSH_PROC "/proc/toshiba"
+#define TOSH_DEVICE "/dev/toshiba"
+#define TOSHIBA_ACPI_PROC "/proc/acpi/toshiba"
+#define TOSHIBA_ACPI_DEVICE "/dev/toshiba_acpi"
+
+/*
+ * Toshiba SMM structure
+ */
typedef struct {
unsigned int eax;
@@ -33,5 +43,21 @@ typedef struct {
unsigned int edi __attribute__ ((packed));
} SMMRegisters;
+/*
+ * IOCTLs (0x90 - 0x91)
+ */
+
+#define TOSH_SMM _IOWR('t', 0x90, SMMRegisters)
+/*
+ * Convenience toshiba_acpi command.
+ *
+ * The System Configuration Interface (SCI) is opened/closed internally
+ * to avoid userspace of buggy BIOSes.
+ *
+ * The toshiba_acpi module checks whether the eax register is set with
+ * SCI_GET (0xf300) or SCI_SET (0xf400), returning -EINVAL if not.
+ */
+#define TOSHIBA_ACPI_SCI _IOWR('t', 0x91, SMMRegisters)
+
#endif /* _UAPI_LINUX_TOSHIBA_H */
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
new file mode 100644
index 000000000000..df0e09bb7dd5
--- /dev/null
+++ b/include/uapi/linux/userfaultfd.h
@@ -0,0 +1,169 @@
+/*
+ * include/linux/userfaultfd.h
+ *
+ * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
+ * Copyright (C) 2015 Red Hat, Inc.
+ *
+ */
+
+#ifndef _LINUX_USERFAULTFD_H
+#define _LINUX_USERFAULTFD_H
+
+#include <linux/types.h>
+
+#include <linux/compiler.h>
+
+#define UFFD_API ((__u64)0xAA)
+/*
+ * After implementing the respective features it will become:
+ * #define UFFD_API_FEATURES (UFFD_FEATURE_PAGEFAULT_FLAG_WP | \
+ * UFFD_FEATURE_EVENT_FORK)
+ */
+#define UFFD_API_FEATURES (0)
+#define UFFD_API_IOCTLS \
+ ((__u64)1 << _UFFDIO_REGISTER | \
+ (__u64)1 << _UFFDIO_UNREGISTER | \
+ (__u64)1 << _UFFDIO_API)
+#define UFFD_API_RANGE_IOCTLS \
+ ((__u64)1 << _UFFDIO_WAKE | \
+ (__u64)1 << _UFFDIO_COPY | \
+ (__u64)1 << _UFFDIO_ZEROPAGE)
+
+/*
+ * Valid ioctl command number range with this API is from 0x00 to
+ * 0x3F. UFFDIO_API is the fixed number, everything else can be
+ * changed by implementing a different UFFD_API. If sticking to the
+ * same UFFD_API more ioctl can be added and userland will be aware of
+ * which ioctl the running kernel implements through the ioctl command
+ * bitmask written by the UFFDIO_API.
+ */
+#define _UFFDIO_REGISTER (0x00)
+#define _UFFDIO_UNREGISTER (0x01)
+#define _UFFDIO_WAKE (0x02)
+#define _UFFDIO_COPY (0x03)
+#define _UFFDIO_ZEROPAGE (0x04)
+#define _UFFDIO_API (0x3F)
+
+/* userfaultfd ioctl ids */
+#define UFFDIO 0xAA
+#define UFFDIO_API _IOWR(UFFDIO, _UFFDIO_API, \
+ struct uffdio_api)
+#define UFFDIO_REGISTER _IOWR(UFFDIO, _UFFDIO_REGISTER, \
+ struct uffdio_register)
+#define UFFDIO_UNREGISTER _IOR(UFFDIO, _UFFDIO_UNREGISTER, \
+ struct uffdio_range)
+#define UFFDIO_WAKE _IOR(UFFDIO, _UFFDIO_WAKE, \
+ struct uffdio_range)
+#define UFFDIO_COPY _IOWR(UFFDIO, _UFFDIO_COPY, \
+ struct uffdio_copy)
+#define UFFDIO_ZEROPAGE _IOWR(UFFDIO, _UFFDIO_ZEROPAGE, \
+ struct uffdio_zeropage)
+
+/* read() structure */
+struct uffd_msg {
+ __u8 event;
+
+ __u8 reserved1;
+ __u16 reserved2;
+ __u32 reserved3;
+
+ union {
+ struct {
+ __u64 flags;
+ __u64 address;
+ } pagefault;
+
+ struct {
+ /* unused reserved fields */
+ __u64 reserved1;
+ __u64 reserved2;
+ __u64 reserved3;
+ } reserved;
+ } arg;
+} __packed;
+
+/*
+ * Start at 0x12 and not at 0 to be more strict against bugs.
+ */
+#define UFFD_EVENT_PAGEFAULT 0x12
+#if 0 /* not available yet */
+#define UFFD_EVENT_FORK 0x13
+#endif
+
+/* flags for UFFD_EVENT_PAGEFAULT */
+#define UFFD_PAGEFAULT_FLAG_WRITE (1<<0) /* If this was a write fault */
+#define UFFD_PAGEFAULT_FLAG_WP (1<<1) /* If reason is VM_UFFD_WP */
+
+struct uffdio_api {
+ /* userland asks for an API number and the features to enable */
+ __u64 api;
+ /*
+ * Kernel answers below with the all available features for
+ * the API, this notifies userland of which events and/or
+ * which flags for each event are enabled in the current
+ * kernel.
+ *
+ * Note: UFFD_EVENT_PAGEFAULT and UFFD_PAGEFAULT_FLAG_WRITE
+ * are to be considered implicitly always enabled in all kernels as
+ * long as the uffdio_api.api requested matches UFFD_API.
+ */
+#if 0 /* not available yet */
+#define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0)
+#define UFFD_FEATURE_EVENT_FORK (1<<1)
+#endif
+ __u64 features;
+
+ __u64 ioctls;
+};
+
+struct uffdio_range {
+ __u64 start;
+ __u64 len;
+};
+
+struct uffdio_register {
+ struct uffdio_range range;
+#define UFFDIO_REGISTER_MODE_MISSING ((__u64)1<<0)
+#define UFFDIO_REGISTER_MODE_WP ((__u64)1<<1)
+ __u64 mode;
+
+ /*
+ * kernel answers which ioctl commands are available for the
+ * range, keep at the end as the last 8 bytes aren't read.
+ */
+ __u64 ioctls;
+};
+
+struct uffdio_copy {
+ __u64 dst;
+ __u64 src;
+ __u64 len;
+ /*
+ * There will be a wrprotection flag later that allows to map
+ * pages wrprotected on the fly. And such a flag will be
+ * available if the wrprotection ioctl are implemented for the
+ * range according to the uffdio_register.ioctls.
+ */
+#define UFFDIO_COPY_MODE_DONTWAKE ((__u64)1<<0)
+ __u64 mode;
+
+ /*
+ * "copy" is written by the ioctl and must be at the end: the
+ * copy_from_user will not read the last 8 bytes.
+ */
+ __s64 copy;
+};
+
+struct uffdio_zeropage {
+ struct uffdio_range range;
+#define UFFDIO_ZEROPAGE_MODE_DONTWAKE ((__u64)1<<0)
+ __u64 mode;
+
+ /*
+ * "zeropage" is written by the ioctl and must be at the end:
+ * the copy_from_user will not read the last 8 bytes.
+ */
+ __s64 zeropage;
+};
+
+#endif /* _LINUX_USERFAULTFD_H */
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 9f6e108ff4a0..d448c536b49d 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -174,6 +174,10 @@ enum v4l2_colorfx {
* We reserve 16 controls for this driver. */
#define V4L2_CID_USER_ADV7180_BASE (V4L2_CID_USER_BASE + 0x1070)
+/* The base for the tc358743 driver controls.
+ * We reserve 16 controls for this driver. */
+#define V4L2_CID_USER_TC358743_BASE (V4L2_CID_USER_BASE + 0x1080)
+
/* MPEG-class control IDs */
/* The MPEG controls are applicable to all codec controls
* and the 'MPEG' part of the define is historical */
diff --git a/include/uapi/linux/vsp1.h b/include/uapi/linux/vsp1.h
index e18858f6e865..9a823696d816 100644
--- a/include/uapi/linux/vsp1.h
+++ b/include/uapi/linux/vsp1.h
@@ -28,7 +28,7 @@
_IOWR('V', BASE_VIDIOC_PRIVATE + 1, struct vsp1_lut_config)
struct vsp1_lut_config {
- u32 lut[256];
+ __u32 lut[256];
};
#endif /* __VSP1_USER_H__ */
diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h
index 99a8ca15fe64..1e889aa8a36e 100644
--- a/include/uapi/misc/cxl.h
+++ b/include/uapi/misc/cxl.h
@@ -29,8 +29,10 @@ struct cxl_ioctl_start_work {
#define CXL_START_WORK_AMR 0x0000000000000001ULL
#define CXL_START_WORK_NUM_IRQS 0x0000000000000002ULL
+#define CXL_START_WORK_ERR_FF 0x0000000000000004ULL
#define CXL_START_WORK_ALL (CXL_START_WORK_AMR |\
- CXL_START_WORK_NUM_IRQS)
+ CXL_START_WORK_NUM_IRQS |\
+ CXL_START_WORK_ERR_FF)
/* Possible modes that an afu can be in */
diff --git a/include/uapi/rdma/Kbuild b/include/uapi/rdma/Kbuild
index 687ae332200f..231901b08f6c 100644
--- a/include/uapi/rdma/Kbuild
+++ b/include/uapi/rdma/Kbuild
@@ -5,3 +5,4 @@ header-y += ib_user_sa.h
header-y += ib_user_verbs.h
header-y += rdma_netlink.h
header-y += rdma_user_cm.h
+header-y += hfi/
diff --git a/include/uapi/rdma/hfi/Kbuild b/include/uapi/rdma/hfi/Kbuild
new file mode 100644
index 000000000000..ef23c294fc71
--- /dev/null
+++ b/include/uapi/rdma/hfi/Kbuild
@@ -0,0 +1,2 @@
+# UAPI Header export list
+header-y += hfi1_user.h
diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h
new file mode 100644
index 000000000000..78c442fbf263
--- /dev/null
+++ b/include/uapi/rdma/hfi/hfi1_user.h
@@ -0,0 +1,427 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This file contains defines, structures, etc. that are used
+ * to communicate between kernel and user code.
+ */
+
+#ifndef _LINUX__HFI1_USER_H
+#define _LINUX__HFI1_USER_H
+
+#include <linux/types.h>
+
+/*
+ * This version number is given to the driver by the user code during
+ * initialization in the spu_userversion field of hfi1_user_info, so
+ * the driver can check for compatibility with user code.
+ *
+ * The major version changes when data structures change in an incompatible
+ * way. The driver must be the same for initialization to succeed.
+ */
+#define HFI1_USER_SWMAJOR 4
+
+/*
+ * Minor version differences are always compatible
+ * a within a major version, however if user software is larger
+ * than driver software, some new features and/or structure fields
+ * may not be implemented; the user code must deal with this if it
+ * cares, or it must abort after initialization reports the difference.
+ */
+#define HFI1_USER_SWMINOR 0
+
+/*
+ * Set of HW and driver capability/feature bits.
+ * These bit values are used to configure enabled/disabled HW and
+ * driver features. The same set of bits are communicated to user
+ * space.
+ */
+#define HFI1_CAP_DMA_RTAIL (1UL << 0) /* Use DMA'ed RTail value */
+#define HFI1_CAP_SDMA (1UL << 1) /* Enable SDMA support */
+#define HFI1_CAP_SDMA_AHG (1UL << 2) /* Enable SDMA AHG support */
+#define HFI1_CAP_EXTENDED_PSN (1UL << 3) /* Enable Extended PSN support */
+#define HFI1_CAP_HDRSUPP (1UL << 4) /* Enable Header Suppression */
+/* 1UL << 5 reserved */
+#define HFI1_CAP_USE_SDMA_HEAD (1UL << 6) /* DMA Hdr Q tail vs. use CSR */
+#define HFI1_CAP_MULTI_PKT_EGR (1UL << 7) /* Enable multi-packet Egr buffs*/
+#define HFI1_CAP_NODROP_RHQ_FULL (1UL << 8) /* Don't drop on Hdr Q full */
+#define HFI1_CAP_NODROP_EGR_FULL (1UL << 9) /* Don't drop on EGR buffs full */
+#define HFI1_CAP_TID_UNMAP (1UL << 10) /* Enable Expected TID caching */
+#define HFI1_CAP_PRINT_UNIMPL (1UL << 11) /* Show for unimplemented feats */
+#define HFI1_CAP_ALLOW_PERM_JKEY (1UL << 12) /* Allow use of permissive JKEY */
+#define HFI1_CAP_NO_INTEGRITY (1UL << 13) /* Enable ctxt integrity checks */
+#define HFI1_CAP_PKEY_CHECK (1UL << 14) /* Enable ctxt PKey checking */
+#define HFI1_CAP_STATIC_RATE_CTRL (1UL << 15) /* Allow PBC.StaticRateControl */
+#define HFI1_CAP_QSFP_ENABLED (1UL << 16) /* Enable QSFP check during LNI */
+#define HFI1_CAP_SDMA_HEAD_CHECK (1UL << 17) /* SDMA head checking */
+#define HFI1_CAP_EARLY_CREDIT_RETURN (1UL << 18) /* early credit return */
+
+#define HFI1_RCVHDR_ENTSIZE_2 (1UL << 0)
+#define HFI1_RCVHDR_ENTSIZE_16 (1UL << 1)
+#define HFI1_RCVDHR_ENTSIZE_32 (1UL << 2)
+
+/*
+ * If the unit is specified via open, HFI choice is fixed. If port is
+ * specified, it's also fixed. Otherwise we try to spread contexts
+ * across ports and HFIs, using different algorithms. WITHIN is
+ * the old default, prior to this mechanism.
+ */
+#define HFI1_ALG_ACROSS 0 /* round robin contexts across HFIs, then
+ * ports; this is the default */
+#define HFI1_ALG_WITHIN 1 /* use all contexts on an HFI (round robin
+ * active ports within), then next HFI */
+#define HFI1_ALG_COUNT 2 /* number of algorithm choices */
+
+
+/* User commands. */
+#define HFI1_CMD_ASSIGN_CTXT 1 /* allocate HFI and context */
+#define HFI1_CMD_CTXT_INFO 2 /* find out what resources we got */
+#define HFI1_CMD_USER_INFO 3 /* set up userspace */
+#define HFI1_CMD_TID_UPDATE 4 /* update expected TID entries */
+#define HFI1_CMD_TID_FREE 5 /* free expected TID entries */
+#define HFI1_CMD_CREDIT_UPD 6 /* force an update of PIO credit */
+#define HFI1_CMD_SDMA_STATUS_UPD 7 /* force update of SDMA status ring */
+
+#define HFI1_CMD_RECV_CTRL 8 /* control receipt of packets */
+#define HFI1_CMD_POLL_TYPE 9 /* set the kind of polling we want */
+#define HFI1_CMD_ACK_EVENT 10 /* ack & clear user status bits */
+#define HFI1_CMD_SET_PKEY 11 /* set context's pkey */
+#define HFI1_CMD_CTXT_RESET 12 /* reset context's HW send context */
+/* separate EPROM commands from normal PSM commands */
+#define HFI1_CMD_EP_INFO 64 /* read EPROM device ID */
+#define HFI1_CMD_EP_ERASE_CHIP 65 /* erase whole EPROM */
+#define HFI1_CMD_EP_ERASE_P0 66 /* erase EPROM partition 0 */
+#define HFI1_CMD_EP_ERASE_P1 67 /* erase EPROM partition 1 */
+#define HFI1_CMD_EP_READ_P0 68 /* read EPROM partition 0 */
+#define HFI1_CMD_EP_READ_P1 69 /* read EPROM partition 1 */
+#define HFI1_CMD_EP_WRITE_P0 70 /* write EPROM partition 0 */
+#define HFI1_CMD_EP_WRITE_P1 71 /* write EPROM partition 1 */
+
+#define _HFI1_EVENT_FROZEN_BIT 0
+#define _HFI1_EVENT_LINKDOWN_BIT 1
+#define _HFI1_EVENT_LID_CHANGE_BIT 2
+#define _HFI1_EVENT_LMC_CHANGE_BIT 3
+#define _HFI1_EVENT_SL2VL_CHANGE_BIT 4
+#define _HFI1_MAX_EVENT_BIT _HFI1_EVENT_SL2VL_CHANGE_BIT
+
+#define HFI1_EVENT_FROZEN (1UL << _HFI1_EVENT_FROZEN_BIT)
+#define HFI1_EVENT_LINKDOWN_BIT (1UL << _HFI1_EVENT_LINKDOWN_BIT)
+#define HFI1_EVENT_LID_CHANGE_BIT (1UL << _HFI1_EVENT_LID_CHANGE_BIT)
+#define HFI1_EVENT_LMC_CHANGE_BIT (1UL << _HFI1_EVENT_LMC_CHANGE_BIT)
+#define HFI1_EVENT_SL2VL_CHANGE_BIT (1UL << _HFI1_EVENT_SL2VL_CHANGE_BIT)
+
+/*
+ * These are the status bits readable (in ASCII form, 64bit value)
+ * from the "status" sysfs file. For binary compatibility, values
+ * must remain as is; removed states can be reused for different
+ * purposes.
+ */
+#define HFI1_STATUS_INITTED 0x1 /* basic initialization done */
+/* Chip has been found and initialized */
+#define HFI1_STATUS_CHIP_PRESENT 0x20
+/* IB link is at ACTIVE, usable for data traffic */
+#define HFI1_STATUS_IB_READY 0x40
+/* link is configured, LID, MTU, etc. have been set */
+#define HFI1_STATUS_IB_CONF 0x80
+/* A Fatal hardware error has occurred. */
+#define HFI1_STATUS_HWERROR 0x200
+
+/*
+ * Number of supported shared contexts.
+ * This is the maximum number of software contexts that can share
+ * a hardware send/receive context.
+ */
+#define HFI1_MAX_SHARED_CTXTS 8
+
+/*
+ * Poll types
+ */
+#define HFI1_POLL_TYPE_ANYRCV 0x0
+#define HFI1_POLL_TYPE_URGENT 0x1
+
+/*
+ * This structure is passed to the driver to tell it where
+ * user code buffers are, sizes, etc. The offsets and sizes of the
+ * fields must remain unchanged, for binary compatibility. It can
+ * be extended, if userversion is changed so user code can tell, if needed
+ */
+struct hfi1_user_info {
+ /*
+ * version of user software, to detect compatibility issues.
+ * Should be set to HFI1_USER_SWVERSION.
+ */
+ __u32 userversion;
+ __u16 pad;
+ /* HFI selection algorithm, if unit has not selected */
+ __u16 hfi1_alg;
+ /*
+ * If two or more processes wish to share a context, each process
+ * must set the subcontext_cnt and subcontext_id to the same
+ * values. The only restriction on the subcontext_id is that
+ * it be unique for a given node.
+ */
+ __u16 subctxt_cnt;
+ __u16 subctxt_id;
+ /* 128bit UUID passed in by PSM. */
+ __u8 uuid[16];
+};
+
+struct hfi1_ctxt_info {
+ __u64 runtime_flags; /* chip/drv runtime flags (HFI1_CAP_*) */
+ __u32 rcvegr_size; /* size of each eager buffer */
+ __u16 num_active; /* number of active units */
+ __u16 unit; /* unit (chip) assigned to caller */
+ __u16 ctxt; /* ctxt on unit assigned to caller */
+ __u16 subctxt; /* subctxt on unit assigned to caller */
+ __u16 rcvtids; /* number of Rcv TIDs for this context */
+ __u16 credits; /* number of PIO credits for this context */
+ __u16 numa_node; /* NUMA node of the assigned device */
+ __u16 rec_cpu; /* cpu # for affinity (0xffff if none) */
+ __u16 send_ctxt; /* send context in use by this user context */
+ __u16 egrtids; /* number of RcvArray entries for Eager Rcvs */
+ __u16 rcvhdrq_cnt; /* number of RcvHdrQ entries */
+ __u16 rcvhdrq_entsize; /* size (in bytes) for each RcvHdrQ entry */
+ __u16 sdma_ring_size; /* number of entries in SDMA request ring */
+};
+
+struct hfi1_tid_info {
+ /* virtual address of first page in transfer */
+ __u64 vaddr;
+ /* pointer to tid array. this array is big enough */
+ __u64 tidlist;
+ /* number of tids programmed by this request */
+ __u32 tidcnt;
+ /* length of transfer buffer programmed by this request */
+ __u32 length;
+ /*
+ * pointer to bitmap of TIDs used for this call;
+ * checked for being large enough at open
+ */
+ __u64 tidmap;
+};
+
+struct hfi1_cmd {
+ __u32 type; /* command type */
+ __u32 len; /* length of struct pointed to by add */
+ __u64 addr; /* pointer to user structure */
+};
+
+enum hfi1_sdma_comp_state {
+ FREE = 0,
+ QUEUED,
+ COMPLETE,
+ ERROR
+};
+
+/*
+ * SDMA completion ring entry
+ */
+struct hfi1_sdma_comp_entry {
+ __u32 status;
+ __u32 errcode;
+};
+
+/*
+ * Device status and notifications from driver to user-space.
+ */
+struct hfi1_status {
+ __u64 dev; /* device/hw status bits */
+ __u64 port; /* port state and status bits */
+ char freezemsg[0];
+};
+
+/*
+ * This structure is returned by the driver immediately after
+ * open to get implementation-specific info, and info specific to this
+ * instance.
+ *
+ * This struct must have explicit pad fields where type sizes
+ * may result in different alignments between 32 and 64 bit
+ * programs, since the 64 bit * bit kernel requires the user code
+ * to have matching offsets
+ */
+struct hfi1_base_info {
+ /* version of hardware, for feature checking. */
+ __u32 hw_version;
+ /* version of software, for feature checking. */
+ __u32 sw_version;
+ /* Job key */
+ __u16 jkey;
+ __u16 padding1;
+ /*
+ * The special QP (queue pair) value that identifies PSM
+ * protocol packet from standard IB packets.
+ */
+ __u32 bthqp;
+ /* PIO credit return address, */
+ __u64 sc_credits_addr;
+ /*
+ * Base address of write-only pio buffers for this process.
+ * Each buffer has sendpio_credits*64 bytes.
+ */
+ __u64 pio_bufbase_sop;
+ /*
+ * Base address of write-only pio buffers for this process.
+ * Each buffer has sendpio_credits*64 bytes.
+ */
+ __u64 pio_bufbase;
+ /* address where receive buffer queue is mapped into */
+ __u64 rcvhdr_bufbase;
+ /* base address of Eager receive buffers. */
+ __u64 rcvegr_bufbase;
+ /* base address of SDMA completion ring */
+ __u64 sdma_comp_bufbase;
+ /*
+ * User register base for init code, not to be used directly by
+ * protocol or applications. Always maps real chip register space.
+ * the register addresses are:
+ * ur_rcvhdrhead, ur_rcvhdrtail, ur_rcvegrhead, ur_rcvegrtail,
+ * ur_rcvtidflow
+ */
+ __u64 user_regbase;
+ /* notification events */
+ __u64 events_bufbase;
+ /* status page */
+ __u64 status_bufbase;
+ /* rcvhdrtail update */
+ __u64 rcvhdrtail_base;
+ /*
+ * shared memory pages for subctxts if ctxt is shared; these cover
+ * all the processes in the group sharing a single context.
+ * all have enough space for the num_subcontexts value on this job.
+ */
+ __u64 subctxt_uregbase;
+ __u64 subctxt_rcvegrbuf;
+ __u64 subctxt_rcvhdrbuf;
+};
+
+enum sdma_req_opcode {
+ EXPECTED = 0,
+ EAGER
+};
+
+#define HFI1_SDMA_REQ_VERSION_MASK 0xF
+#define HFI1_SDMA_REQ_VERSION_SHIFT 0x0
+#define HFI1_SDMA_REQ_OPCODE_MASK 0xF
+#define HFI1_SDMA_REQ_OPCODE_SHIFT 0x4
+#define HFI1_SDMA_REQ_IOVCNT_MASK 0xFF
+#define HFI1_SDMA_REQ_IOVCNT_SHIFT 0x8
+
+struct sdma_req_info {
+ /*
+ * bits 0-3 - version (currently unused)
+ * bits 4-7 - opcode (enum sdma_req_opcode)
+ * bits 8-15 - io vector count
+ */
+ __u16 ctrl;
+ /*
+ * Number of fragments contained in this request.
+ * User-space has already computed how many
+ * fragment-sized packet the user buffer will be
+ * split into.
+ */
+ __u16 npkts;
+ /*
+ * Size of each fragment the user buffer will be
+ * split into.
+ */
+ __u16 fragsize;
+ /*
+ * Index of the slot in the SDMA completion ring
+ * this request should be using. User-space is
+ * in charge of managing its own ring.
+ */
+ __u16 comp_idx;
+} __packed;
+
+/*
+ * SW KDETH header.
+ * swdata is SW defined portion.
+ */
+struct hfi1_kdeth_header {
+ __le32 ver_tid_offset;
+ __le16 jkey;
+ __le16 hcrc;
+ __le32 swdata[7];
+} __packed;
+
+/*
+ * Structure describing the headers that User space uses. The
+ * structure above is a subset of this one.
+ */
+struct hfi1_pkt_header {
+ __le16 pbc[4];
+ __be16 lrh[4];
+ __be32 bth[3];
+ struct hfi1_kdeth_header kdeth;
+} __packed;
+
+
+/*
+ * The list of usermode accessible registers.
+ */
+enum hfi1_ureg {
+ /* (RO) DMA RcvHdr to be used next. */
+ ur_rcvhdrtail = 0,
+ /* (RW) RcvHdr entry to be processed next by host. */
+ ur_rcvhdrhead = 1,
+ /* (RO) Index of next Eager index to use. */
+ ur_rcvegrindextail = 2,
+ /* (RW) Eager TID to be processed next */
+ ur_rcvegrindexhead = 3,
+ /* (RO) Receive Eager Offset Tail */
+ ur_rcvegroffsettail = 4,
+ /* For internal use only; max register number. */
+ ur_maxreg,
+ /* (RW) Receive TID flow table */
+ ur_rcvtidflowtable = 256
+};
+
+#endif /* _LINIUX__HFI1_USER_H */
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index 6e4bb4270ca2..c19a5dc1531a 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -7,12 +7,14 @@ enum {
RDMA_NL_RDMA_CM = 1,
RDMA_NL_NES,
RDMA_NL_C4IW,
+ RDMA_NL_LS, /* RDMA Local Services */
RDMA_NL_NUM_CLIENTS
};
enum {
RDMA_NL_GROUP_CM = 1,
RDMA_NL_GROUP_IWPM,
+ RDMA_NL_GROUP_LS,
RDMA_NL_NUM_GROUPS
};
@@ -128,5 +130,85 @@ enum {
IWPM_NLA_ERR_MAX
};
+/*
+ * Local service operations:
+ * RESOLVE - The client requests the local service to resolve a path.
+ * SET_TIMEOUT - The local service requests the client to set the timeout.
+ */
+enum {
+ RDMA_NL_LS_OP_RESOLVE = 0,
+ RDMA_NL_LS_OP_SET_TIMEOUT,
+ RDMA_NL_LS_NUM_OPS
+};
+
+/* Local service netlink message flags */
+#define RDMA_NL_LS_F_ERR 0x0100 /* Failed response */
+
+/*
+ * Local service resolve operation family header.
+ * The layout for the resolve operation:
+ * nlmsg header
+ * family header
+ * attributes
+ */
+
+/*
+ * Local service path use:
+ * Specify how the path(s) will be used.
+ * ALL - For connected CM operation (6 pathrecords)
+ * UNIDIRECTIONAL - For unidirectional UD (1 pathrecord)
+ * GMP - For miscellaneous GMP like operation (at least 1 reversible
+ * pathrecord)
+ */
+enum {
+ LS_RESOLVE_PATH_USE_ALL = 0,
+ LS_RESOLVE_PATH_USE_UNIDIRECTIONAL,
+ LS_RESOLVE_PATH_USE_GMP,
+ LS_RESOLVE_PATH_USE_MAX
+};
+
+#define LS_DEVICE_NAME_MAX 64
+
+struct rdma_ls_resolve_header {
+ __u8 device_name[LS_DEVICE_NAME_MAX];
+ __u8 port_num;
+ __u8 path_use;
+};
+
+/* Local service attribute type */
+#define RDMA_NLA_F_MANDATORY (1 << 13)
+#define RDMA_NLA_TYPE_MASK (~(NLA_F_NESTED | NLA_F_NET_BYTEORDER | \
+ RDMA_NLA_F_MANDATORY))
+
+/*
+ * Local service attributes:
+ * Attr Name Size Byte order
+ * -----------------------------------------------------
+ * PATH_RECORD struct ib_path_rec_data
+ * TIMEOUT u32 cpu
+ * SERVICE_ID u64 cpu
+ * DGID u8[16] BE
+ * SGID u8[16] BE
+ * TCLASS u8
+ * PKEY u16 cpu
+ * QOS_CLASS u16 cpu
+ */
+enum {
+ LS_NLA_TYPE_UNSPEC = 0,
+ LS_NLA_TYPE_PATH_RECORD,
+ LS_NLA_TYPE_TIMEOUT,
+ LS_NLA_TYPE_SERVICE_ID,
+ LS_NLA_TYPE_DGID,
+ LS_NLA_TYPE_SGID,
+ LS_NLA_TYPE_TCLASS,
+ LS_NLA_TYPE_PKEY,
+ LS_NLA_TYPE_QOS_CLASS,
+ LS_NLA_TYPE_MAX
+};
+
+/* Local service DGID/SGID attribute: big endian */
+struct rdma_nla_ls_gid {
+ __u8 gid[16];
+};
#endif /* _UAPI_RDMA_NETLINK_H */
diff --git a/include/uapi/xen/privcmd.h b/include/uapi/xen/privcmd.h
index a85316811d79..7ddeeda93809 100644
--- a/include/uapi/xen/privcmd.h
+++ b/include/uapi/xen/privcmd.h
@@ -44,6 +44,10 @@ struct privcmd_hypercall {
struct privcmd_mmap_entry {
__u64 va;
+ /*
+ * This should be a GFN. It's not possible to change the name because
+ * it's exposed to the user-space.
+ */
__u64 mfn;
__u64 npages;
};
diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h
index 0530e5a4c6b1..d8fc96ed11e9 100644
--- a/include/video/samsung_fimd.h
+++ b/include/video/samsung_fimd.h
@@ -296,6 +296,7 @@
/* Video buffer addresses */
#define VIDW_BUF_START(_buff) (0xA0 + ((_buff) * 8))
+#define VIDW_BUF_START_S(_buff) (0x40A0 + ((_buff) * 8))
#define VIDW_BUF_START1(_buff) (0xA4 + ((_buff) * 8))
#define VIDW_BUF_END(_buff) (0xD0 + ((_buff) * 8))
#define VIDW_BUF_END1(_buff) (0xD4 + ((_buff) * 8))
diff --git a/include/video/vga.h b/include/video/vga.h
index cac567f22e62..d334e64c1c19 100644
--- a/include/video/vga.h
+++ b/include/video/vga.h
@@ -18,7 +18,7 @@
#define __linux_video_vga_h__
#include <linux/types.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/vga.h>
#include <asm/byteorder.h>
diff --git a/include/xen/events.h b/include/xen/events.h
index 7d95fdf9cf3e..88da2abaf535 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -92,7 +92,6 @@ void xen_hvm_callback_vector(void);
#ifdef CONFIG_TRACING
#define trace_xen_hvm_callback_vector xen_hvm_callback_vector
#endif
-extern int xen_have_vector_callback;
int xen_set_callback_via(uint64_t via);
void xen_evtchn_do_upcall(struct pt_regs *regs);
void xen_hvm_evtchn_do_upcall(void);
diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h
index 70054cc0708d..252ffd4801ef 100644
--- a/include/xen/interface/io/netif.h
+++ b/include/xen/interface/io/netif.h
@@ -156,7 +156,9 @@ struct xen_netif_tx_request {
/* Types of xen_netif_extra_info descriptors. */
#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */
#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */
-#define XEN_NETIF_EXTRA_TYPE_MAX (2)
+#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */
+#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */
+#define XEN_NETIF_EXTRA_TYPE_MAX (4)
/* xen_netif_extra_info flags. */
#define _XEN_NETIF_EXTRA_FLAG_MORE (0)
@@ -201,6 +203,10 @@ struct xen_netif_extra_info {
uint16_t features; /* XEN_NETIF_GSO_FEAT_* */
} gso;
+ struct {
+ uint8_t addr[6]; /* Address to add/remove. */
+ } mcast;
+
uint16_t pad[3];
} u;
};
diff --git a/include/xen/interface/platform.h b/include/xen/interface/platform.h
index 5cc49ea8d840..8e035871360e 100644
--- a/include/xen/interface/platform.h
+++ b/include/xen/interface/platform.h
@@ -474,6 +474,23 @@ struct xenpf_core_parking {
};
DEFINE_GUEST_HANDLE_STRUCT(xenpf_core_parking);
+#define XENPF_get_symbol 63
+struct xenpf_symdata {
+ /* IN/OUT variables */
+ uint32_t namelen; /* size of 'name' buffer */
+
+ /* IN/OUT variables */
+ uint32_t symnum; /* IN: Symbol to read */
+ /* OUT: Next available symbol. If same as IN */
+ /* then we reached the end */
+
+ /* OUT variables */
+ GUEST_HANDLE(char) name;
+ uint64_t address;
+ char type;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xenpf_symdata);
+
struct xen_platform_op {
uint32_t cmd;
uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
@@ -495,6 +512,7 @@ struct xen_platform_op {
struct xenpf_cpu_hotadd cpu_add;
struct xenpf_mem_hotadd mem_add;
struct xenpf_core_parking core_parking;
+ struct xenpf_symdata symdata;
uint8_t pad[128];
} u;
};
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index a48378958062..167071c290b3 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -80,6 +80,7 @@
#define __HYPERVISOR_kexec_op 37
#define __HYPERVISOR_tmem_op 38
#define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */
+#define __HYPERVISOR_xenpmu_op 40
/* Architecture-specific hypercall definitions. */
#define __HYPERVISOR_arch_0 48
@@ -112,6 +113,7 @@
#define VIRQ_MEM_EVENT 10 /* G. (DOM0) A memory event has occured */
#define VIRQ_XC_RESERVED 11 /* G. Reserved for XenClient */
#define VIRQ_ENOMEM 12 /* G. (DOM0) Low on heap memory */
+#define VIRQ_XENPMU 13 /* PMC interrupt */
/* Architecture-specific VIRQ definitions. */
#define VIRQ_ARCH_0 16
@@ -585,26 +587,29 @@ struct shared_info {
};
/*
- * Start-of-day memory layout for the initial domain (DOM0):
+ * Start-of-day memory layout
+ *
* 1. The domain is started within contiguous virtual-memory region.
* 2. The contiguous region begins and ends on an aligned 4MB boundary.
- * 3. The region start corresponds to the load address of the OS image.
- * If the load address is not 4MB aligned then the address is rounded down.
- * 4. This the order of bootstrap elements in the initial virtual region:
+ * 3. This the order of bootstrap elements in the initial virtual region:
* a. relocated kernel image
* b. initial ram disk [mod_start, mod_len]
+ * (may be omitted)
* c. list of allocated page frames [mfn_list, nr_pages]
+ * (unless relocated due to XEN_ELFNOTE_INIT_P2M)
* d. start_info_t structure [register ESI (x86)]
- * e. bootstrap page tables [pt_base, CR3 (x86)]
- * f. bootstrap stack [register ESP (x86)]
- * 5. Bootstrap elements are packed together, but each is 4kB-aligned.
- * 6. The initial ram disk may be omitted.
- * 7. The list of page frames forms a contiguous 'pseudo-physical' memory
+ * in case of dom0 this page contains the console info, too
+ * e. unless dom0: xenstore ring page
+ * f. unless dom0: console ring page
+ * g. bootstrap page tables [pt_base, CR3 (x86)]
+ * h. bootstrap stack [register ESP (x86)]
+ * 4. Bootstrap elements are packed together, but each is 4kB-aligned.
+ * 5. The list of page frames forms a contiguous 'pseudo-physical' memory
* layout for the domain. In particular, the bootstrap virtual-memory
* region is a 1:1 mapping to the first section of the pseudo-physical map.
- * 8. All bootstrap elements are mapped read-writable for the guest OS. The
+ * 6. All bootstrap elements are mapped read-writable for the guest OS. The
* only exception is the bootstrap page table, which is mapped read-only.
- * 9. There is guaranteed to be at least 512kB padding after the final
+ * 7. There is guaranteed to be at least 512kB padding after the final
* bootstrap element. If necessary, the bootstrap virtual region is
* extended by an extra 4MB to ensure this.
*/
@@ -641,10 +646,12 @@ struct start_info {
};
/* These flags are passed in the 'flags' field of start_info_t. */
-#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
-#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
-#define SIF_MULTIBOOT_MOD (1<<2) /* Is mod_start a multiboot module? */
-#define SIF_MOD_START_PFN (1<<3) /* Is mod_start a PFN? */
+#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
+#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
+#define SIF_MULTIBOOT_MOD (1<<2) /* Is mod_start a multiboot module? */
+#define SIF_MOD_START_PFN (1<<3) /* Is mod_start a PFN? */
+#define SIF_VIRT_P2M_4TOOLS (1<<4) /* Do Xen tools understand a virt. mapped */
+ /* P->M making the 3 level tree obsolete? */
#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */
/*
diff --git a/include/xen/interface/xenpmu.h b/include/xen/interface/xenpmu.h
new file mode 100644
index 000000000000..139efc91bceb
--- /dev/null
+++ b/include/xen/interface/xenpmu.h
@@ -0,0 +1,94 @@
+#ifndef __XEN_PUBLIC_XENPMU_H__
+#define __XEN_PUBLIC_XENPMU_H__
+
+#include "xen.h"
+
+#define XENPMU_VER_MAJ 0
+#define XENPMU_VER_MIN 1
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_xenpmu_op(enum xenpmu_op cmd, struct xenpmu_params *args);
+ *
+ * @cmd == XENPMU_* (PMU operation)
+ * @args == struct xenpmu_params
+ */
+/* ` enum xenpmu_op { */
+#define XENPMU_mode_get 0 /* Also used for getting PMU version */
+#define XENPMU_mode_set 1
+#define XENPMU_feature_get 2
+#define XENPMU_feature_set 3
+#define XENPMU_init 4
+#define XENPMU_finish 5
+#define XENPMU_lvtpc_set 6
+#define XENPMU_flush 7
+
+/* ` } */
+
+/* Parameters structure for HYPERVISOR_xenpmu_op call */
+struct xen_pmu_params {
+ /* IN/OUT parameters */
+ struct {
+ uint32_t maj;
+ uint32_t min;
+ } version;
+ uint64_t val;
+
+ /* IN parameters */
+ uint32_t vcpu;
+ uint32_t pad;
+};
+
+/* PMU modes:
+ * - XENPMU_MODE_OFF: No PMU virtualization
+ * - XENPMU_MODE_SELF: Guests can profile themselves
+ * - XENPMU_MODE_HV: Guests can profile themselves, dom0 profiles
+ * itself and Xen
+ * - XENPMU_MODE_ALL: Only dom0 has access to VPMU and it profiles
+ * everyone: itself, the hypervisor and the guests.
+ */
+#define XENPMU_MODE_OFF 0
+#define XENPMU_MODE_SELF (1<<0)
+#define XENPMU_MODE_HV (1<<1)
+#define XENPMU_MODE_ALL (1<<2)
+
+/*
+ * PMU features:
+ * - XENPMU_FEATURE_INTEL_BTS: Intel BTS support (ignored on AMD)
+ */
+#define XENPMU_FEATURE_INTEL_BTS 1
+
+/*
+ * Shared PMU data between hypervisor and PV(H) domains.
+ *
+ * The hypervisor fills out this structure during PMU interrupt and sends an
+ * interrupt to appropriate VCPU.
+ * Architecture-independent fields of xen_pmu_data are WO for the hypervisor
+ * and RO for the guest but some fields in xen_pmu_arch can be writable
+ * by both the hypervisor and the guest (see arch-$arch/pmu.h).
+ */
+struct xen_pmu_data {
+ /* Interrupted VCPU */
+ uint32_t vcpu_id;
+
+ /*
+ * Physical processor on which the interrupt occurred. On non-privileged
+ * guests set to vcpu_id;
+ */
+ uint32_t pcpu_id;
+
+ /*
+ * Domain that was interrupted. On non-privileged guests set to
+ * DOMID_SELF.
+ * On privileged guests can be DOMID_SELF, DOMID_XEN, or, when in
+ * XENPMU_MODE_ALL mode, domain ID of another domain.
+ */
+ domid_t domain_id;
+
+ uint8_t pad[6];
+
+ /* Architecture-specific information */
+ struct xen_pmu_arch pmu;
+};
+
+#endif /* __XEN_PUBLIC_XENPMU_H__ */
diff --git a/include/xen/page.h b/include/xen/page.h
index c5ed20bb3fe9..1daae485e336 100644
--- a/include/xen/page.h
+++ b/include/xen/page.h
@@ -3,14 +3,14 @@
#include <asm/xen/page.h>
-static inline unsigned long page_to_mfn(struct page *page)
+static inline unsigned long xen_page_to_gfn(struct page *page)
{
- return pfn_to_mfn(page_to_pfn(page));
+ return pfn_to_gfn(page_to_pfn(page));
}
struct xen_memory_region {
- phys_addr_t start;
- phys_addr_t size;
+ unsigned long start_pfn;
+ unsigned long n_pfns;
};
#define XEN_EXTRA_MEM_MAX_REGIONS 128 /* == E820MAX */
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 0ce4f32017ea..e4e214a5abd5 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -30,7 +30,7 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
struct vm_area_struct;
/*
- * xen_remap_domain_mfn_array() - map an array of foreign frames
+ * xen_remap_domain_gfn_array() - map an array of foreign frames
* @vma: VMA to map the pages into
* @addr: Address at which to map the pages
* @gfn: Array of GFNs to map
@@ -46,14 +46,14 @@ struct vm_area_struct;
* Returns the number of successfully mapped frames, or a -ve error
* code.
*/
-int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
+int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
unsigned long addr,
xen_pfn_t *gfn, int nr,
int *err_ptr, pgprot_t prot,
unsigned domid,
struct page **pages);
-/* xen_remap_domain_mfn_range() - map a range of foreign frames
+/* xen_remap_domain_gfn_range() - map a range of foreign frames
* @vma: VMA to map the pages into
* @addr: Address at which to map the pages
* @gfn: First GFN to map.
@@ -65,12 +65,12 @@ int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
* Returns the number of successfully mapped frames, or a -ve error
* code.
*/
-int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
+int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
unsigned long addr,
xen_pfn_t gfn, int nr,
pgprot_t prot, unsigned domid,
struct page **pages);
-int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
+int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
int numpgs, struct page **pages);
int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
unsigned long addr,