summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpi_bus.h41
-rw-r--r--include/acpi/acpiosxf.h2
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/actbl.h17
-rw-r--r--include/acpi/actbl1.h176
-rw-r--r--include/acpi/actbl2.h146
-rw-r--r--include/acpi/actbl3.h88
-rw-r--r--include/acpi/actypes.h6
-rw-r--r--include/acpi/acuuid.h89
-rw-r--r--include/acpi/platform/acenv.h3
-rw-r--r--include/acpi/platform/acenvex.h3
-rw-r--r--include/acpi/video.h21
-rw-r--r--include/asm-generic/barrier.h4
-rw-r--r--include/asm-generic/cmpxchg.h3
-rw-r--r--include/asm-generic/gpio.h5
-rw-r--r--include/asm-generic/io.h17
-rw-r--r--include/asm-generic/iomap.h4
-rw-r--r--include/asm-generic/pci.h13
-rw-r--r--include/asm-generic/pgtable.h4
-rw-r--r--include/asm-generic/qspinlock.h139
-rw-r--r--include/asm-generic/qspinlock_types.h79
-rw-r--r--include/crypto/aead.h533
-rw-r--r--include/crypto/akcipher.h340
-rw-r--r--include/crypto/algapi.h35
-rw-r--r--include/crypto/compress.h8
-rw-r--r--include/crypto/cryptd.h1
-rw-r--r--include/crypto/drbg.h59
-rw-r--r--include/crypto/hash.h2
-rw-r--r--include/crypto/internal/aead.h102
-rw-r--r--include/crypto/internal/akcipher.h60
-rw-r--r--include/crypto/internal/geniv.h24
-rw-r--r--include/crypto/internal/rng.h21
-rw-r--r--include/crypto/internal/rsa.h27
-rw-r--r--include/crypto/md5.h5
-rw-r--r--include/crypto/null.h3
-rw-r--r--include/crypto/rng.h100
-rw-r--r--include/crypto/scatterwalk.h4
-rw-r--r--include/dt-bindings/mfd/arizona.h14
-rw-r--r--include/dt-bindings/mfd/st-lpc.h15
-rw-r--r--include/linux/acpi.h76
-rw-r--r--include/linux/backing-dev.h1
-rw-r--r--include/linux/basic_mmio_gpio.h1
-rw-r--r--include/linux/clk.h27
-rw-r--r--include/linux/clkdev.h6
-rw-r--r--include/linux/compiler.h20
-rw-r--r--include/linux/context_tracking.h10
-rw-r--r--include/linux/context_tracking_state.h1
-rw-r--r--include/linux/cpufreq.h5
-rw-r--r--include/linux/cpuidle.h20
-rw-r--r--include/linux/crc-itu-t.h2
-rw-r--r--include/linux/crypto.h501
-rw-r--r--include/linux/debugfs.h1
-rw-r--r--include/linux/dmar.h85
-rw-r--r--include/linux/efi.h12
-rw-r--r--include/linux/fs.h16
-rw-r--r--include/linux/gpio.h7
-rw-r--r--include/linux/gpio/consumer.h43
-rw-r--r--include/linux/gpio/driver.h13
-rw-r--r--include/linux/hid.h2
-rw-r--r--include/linux/hrtimer.h4
-rw-r--r--include/linux/htirq.h22
-rw-r--r--include/linux/i2c/twl.h1
-rw-r--r--include/linux/intel-iommu.h13
-rw-r--r--include/linux/io.h8
-rw-r--r--include/linux/iommu.h44
-rw-r--r--include/linux/irq.h88
-rw-r--r--include/linux/irqdesc.h12
-rw-r--r--include/linux/irqdomain.h8
-rw-r--r--include/linux/kvm_host.h96
-rw-r--r--include/linux/kvm_types.h1
-rw-r--r--include/linux/lglock.h5
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/livepatch.h8
-rw-r--r--include/linux/lockdep.h4
-rw-r--r--include/linux/mbus.h5
-rw-r--r--include/linux/mfd/arizona/core.h9
-rw-r--r--include/linux/mfd/arizona/pdata.h5
-rw-r--r--include/linux/mfd/arizona/registers.h27
-rw-r--r--include/linux/mfd/axp20x.h93
-rw-r--r--include/linux/mfd/cros_ec.h86
-rw-r--r--include/linux/mfd/cros_ec_commands.h277
-rw-r--r--include/linux/mfd/da9055/core.h2
-rw-r--r--include/linux/mfd/max77686.h5
-rw-r--r--include/linux/mlx4/device.h9
-rw-r--r--include/linux/mlx5/driver.h2
-rw-r--r--include/linux/mmc/card.h2
-rw-r--r--include/linux/mmc/core.h1
-rw-r--r--include/linux/mmc/dw_mmc.h6
-rw-r--r--include/linux/mmc/host.h28
-rw-r--r--include/linux/mmc/mmc.h4
-rw-r--r--include/linux/mmc/sdhci-pci-data.h2
-rw-r--r--include/linux/module.h12
-rw-r--r--include/linux/mpi.h15
-rw-r--r--include/linux/mtd/cfi.h188
-rw-r--r--include/linux/mtd/nand.h6
-rw-r--r--include/linux/namei.h41
-rw-r--r--include/linux/netfilter_bridge/ebtables.h2
-rw-r--r--include/linux/nx842.h11
-rw-r--r--include/linux/of.h6
-rw-r--r--include/linux/of_fdt.h2
-rw-r--r--include/linux/osq_lock.h5
-rw-r--r--include/linux/pci.h44
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/perf_event.h46
-rw-r--r--include/linux/platform_data/gpio-omap.h12
-rw-r--r--include/linux/platform_data/irq-renesas-irqc.h27
-rw-r--r--include/linux/platform_data/ntc_thermistor.h1
-rw-r--r--include/linux/platform_data/video-msm_fb.h146
-rw-r--r--include/linux/pm.h14
-rw-r--r--include/linux/pm_clock.h10
-rw-r--r--include/linux/pm_wakeirq.h51
-rw-r--r--include/linux/pm_wakeup.h9
-rw-r--r--include/linux/power/max17042_battery.h4
-rw-r--r--include/linux/power_supply.h11
-rw-r--r--include/linux/property.h2
-rw-r--r--include/linux/pwm.h12
-rw-r--r--include/linux/pxa2xx_ssp.h3
-rw-r--r--include/linux/random.h9
-rw-r--r--include/linux/rculist.h10
-rw-r--r--include/linux/rcupdate.h70
-rw-r--r--include/linux/rcutiny.h16
-rw-r--r--include/linux/rcutree.h7
-rw-r--r--include/linux/regmap.h14
-rw-r--r--include/linux/regulator/driver.h11
-rw-r--r--include/linux/regulator/machine.h9
-rw-r--r--include/linux/regulator/max8973-regulator.h4
-rw-r--r--include/linux/rio.h2
-rw-r--r--include/linux/scatterlist.h1
-rw-r--r--include/linux/sched.h27
-rw-r--r--include/linux/sched/sysctl.h12
-rw-r--r--include/linux/security.h13
-rw-r--r--include/linux/spinlock.h2
-rw-r--r--include/linux/sw842.h12
-rw-r--r--include/linux/tick.h19
-rw-r--r--include/linux/timer.h56
-rw-r--r--include/linux/types.h12
-rw-r--r--include/misc/cxl-base.h48
-rw-r--r--include/misc/cxl.h207
-rw-r--r--include/net/xfrm.h3
-rw-r--r--include/rdma/ib_addr.h6
-rw-r--r--include/rdma/ib_cache.h8
-rw-r--r--include/rdma/ib_mad.h41
-rw-r--r--include/rdma/ib_verbs.h394
-rw-r--r--include/rdma/iw_cm.h1
-rw-r--r--include/rdma/opa_smi.h106
-rw-r--r--include/rdma/rdma_cm.h2
-rw-r--r--include/scsi/scsi.h291
-rw-r--r--include/scsi/scsi_common.h64
-rw-r--r--include/scsi/scsi_device.h2
-rw-r--r--include/scsi/scsi_eh.h31
-rw-r--r--include/scsi/scsi_proto.h281
-rw-r--r--include/scsi/srp.h7
-rw-r--r--include/sound/hda_regmap.h2
-rw-r--r--include/target/iscsi/iscsi_target_core.h1
-rw-r--r--include/target/target_core_base.h15
-rw-r--r--include/trace/events/power.h25
-rw-r--r--include/trace/events/target.h2
-rw-r--r--include/trace/events/timer.h13
-rw-r--r--include/trace/events/writeback.h1
-rw-r--r--include/uapi/drm/radeon_drm.h1
-rw-r--r--include/uapi/linux/cryptouser.h (renamed from include/linux/cryptouser.h)6
-rw-r--r--include/uapi/linux/hsi/cs-protocol.h16
-rw-r--r--include/uapi/linux/kvm.h9
-rw-r--r--include/uapi/linux/netfilter_bridge/ebtables.h2
-rw-r--r--include/uapi/linux/perf_event.h18
-rw-r--r--include/uapi/linux/vfio.h102
-rw-r--r--include/uapi/misc/cxl.h22
-rw-r--r--include/uapi/rdma/ib_user_verbs.h19
-rw-r--r--include/video/neomagic.h5
-rw-r--r--include/video/tdfx.h2
170 files changed, 4780 insertions, 1903 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 8de4fa90e8c4..b43276f339ef 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -208,7 +208,10 @@ struct acpi_device_flags {
u32 visited:1;
u32 hotplug_notify:1;
u32 is_dock_station:1;
- u32 reserved:23;
+ u32 of_compatible_ok:1;
+ u32 coherent_dma:1;
+ u32 cca_seen:1;
+ u32 reserved:20;
};
/* File System */
@@ -271,7 +274,6 @@ struct acpi_device_power_flags {
struct acpi_device_power_state {
struct {
u8 valid:1;
- u8 os_accessible:1;
u8 explicit_set:1; /* _PSx present? */
u8 reserved:6;
} flags;
@@ -380,6 +382,39 @@ struct acpi_device {
void (*remove)(struct acpi_device *);
};
+static inline bool acpi_check_dma(struct acpi_device *adev, bool *coherent)
+{
+ bool ret = false;
+
+ if (!adev)
+ return ret;
+
+ /**
+ * Currently, we only support _CCA=1 (i.e. coherent_dma=1)
+ * This should be equivalent to specifyig dma-coherent for
+ * a device in OF.
+ *
+ * For the case when _CCA=0 (i.e. coherent_dma=0 && cca_seen=1),
+ * There are two cases:
+ * case 1. Do not support and disable DMA.
+ * case 2. Support but rely on arch-specific cache maintenance for
+ * non-coherence DMA operations.
+ * Currently, we implement case 1 above.
+ *
+ * For the case when _CCA is missing (i.e. cca_seen=0) and
+ * platform specifies ACPI_CCA_REQUIRED, we do not support DMA,
+ * and fallback to arch-specific default handling.
+ *
+ * See acpi_init_coherency() for more info.
+ */
+ if (adev->flags.coherent_dma) {
+ ret = true;
+ if (coherent)
+ *coherent = adev->flags.coherent_dma;
+ }
+ return ret;
+}
+
static inline bool is_acpi_node(struct fwnode_handle *fwnode)
{
return fwnode && fwnode->type == FWNODE_ACPI;
@@ -601,7 +636,7 @@ static inline bool acpi_device_can_wakeup(struct acpi_device *adev)
static inline bool acpi_device_can_poweroff(struct acpi_device *adev)
{
- return adev->power.states[ACPI_STATE_D3_COLD].flags.os_accessible;
+ return adev->power.states[ACPI_STATE_D3_COLD].flags.valid;
}
#else /* CONFIG_ACPI */
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 0bc78df66d4b..d02df0a49d98 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -95,7 +95,7 @@ acpi_physical_address acpi_os_get_root_pointer(void);
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_predefined_override
acpi_status
acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
- acpi_string * new_val);
+ char **new_val);
#endif
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_table_override
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 08ef57bc8d63..d68f1cd39c49 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20150410
+#define ACPI_CA_VERSION 0x20150515
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index d4081fef1095..cb8a6b97ceda 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -284,6 +284,7 @@ struct acpi_table_fadt {
struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */
struct acpi_generic_address sleep_control; /* 64-bit Sleep Control register (ACPI 5.0) */
struct acpi_generic_address sleep_status; /* 64-bit Sleep Status register (ACPI 5.0) */
+ u64 hypervisor_id; /* Hypervisor Vendor ID (ACPI 6.0) */
};
/* Masks for FADT IA-PC Boot Architecture Flags (boot_flags) [Vx]=Introduced in this FADT revision */
@@ -341,7 +342,7 @@ enum acpi_preferred_pm_profiles {
PM_TABLET = 8
};
-/* Values for sleep_status and sleep_control registers (V5 FADT) */
+/* Values for sleep_status and sleep_control registers (V5+ FADT) */
#define ACPI_X_WAKE_STATUS 0x80
#define ACPI_X_SLEEP_TYPE_MASK 0x1C
@@ -398,15 +399,17 @@ struct acpi_table_desc {
* FADT is the bottom line as to what the version really is.
*
* For reference, the values below are as follows:
- * FADT V1 size: 0x074
- * FADT V2 size: 0x084
- * FADT V3 size: 0x0F4
- * FADT V4 size: 0x0F4
- * FADT V5 size: 0x10C
+ * FADT V1 size: 0x074
+ * FADT V2 size: 0x084
+ * FADT V3 size: 0x0F4
+ * FADT V4 size: 0x0F4
+ * FADT V5 size: 0x10C
+ * FADT V6 size: 0x114
*/
#define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4)
#define ACPI_FADT_V2_SIZE (u32) (ACPI_FADT_OFFSET (minor_revision) + 1)
#define ACPI_FADT_V3_SIZE (u32) (ACPI_FADT_OFFSET (sleep_control))
-#define ACPI_FADT_V5_SIZE (u32) (sizeof (struct acpi_table_fadt))
+#define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id))
+#define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt))
#endif /* __ACTBL_H__ */
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index b80b0e6dabc5..06b61f01ea59 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -71,6 +71,7 @@
#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */
#define ACPI_SIG_SLIT "SLIT" /* System Locality Distance Information Table */
#define ACPI_SIG_SRAT "SRAT" /* System Resource Affinity Table */
+#define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */
/*
* All tables must be byte-packed to match the ACPI specification, since
@@ -673,7 +674,8 @@ enum acpi_madt_type {
ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR = 12,
ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13,
ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14,
- ACPI_MADT_TYPE_RESERVED = 15 /* 15 and greater are reserved */
+ ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15,
+ ACPI_MADT_TYPE_RESERVED = 16 /* 16 and greater are reserved */
};
/*
@@ -794,7 +796,7 @@ struct acpi_madt_local_x2apic_nmi {
u8 reserved[3]; /* reserved - must be zero */
};
-/* 11: Generic Interrupt (ACPI 5.0) */
+/* 11: Generic Interrupt (ACPI 5.0 + ACPI 6.0 changes) */
struct acpi_madt_generic_interrupt {
struct acpi_subtable_header header;
@@ -811,6 +813,8 @@ struct acpi_madt_generic_interrupt {
u32 vgic_interrupt;
u64 gicr_base_address;
u64 arm_mpidr;
+ u8 efficiency_class;
+ u8 reserved2[3];
};
/* Masks for Flags field above */
@@ -819,7 +823,7 @@ struct acpi_madt_generic_interrupt {
#define ACPI_MADT_PERFORMANCE_IRQ_MODE (1<<1) /* 01: Performance Interrupt Mode */
#define ACPI_MADT_VGIC_IRQ_MODE (1<<2) /* 02: VGIC Maintenance Interrupt mode */
-/* 12: Generic Distributor (ACPI 5.0) */
+/* 12: Generic Distributor (ACPI 5.0 + ACPI 6.0 changes) */
struct acpi_madt_generic_distributor {
struct acpi_subtable_header header;
@@ -827,7 +831,8 @@ struct acpi_madt_generic_distributor {
u32 gic_id;
u64 base_address;
u32 global_irq_base;
- u32 reserved2; /* reserved - must be zero */
+ u8 version;
+ u8 reserved2[3]; /* reserved - must be zero */
};
/* 13: Generic MSI Frame (ACPI 5.1) */
@@ -855,6 +860,16 @@ struct acpi_madt_generic_redistributor {
u32 length;
};
+/* 15: Generic Translator (ACPI 6.0) */
+
+struct acpi_madt_generic_translator {
+ struct acpi_subtable_header header;
+ u16 reserved; /* reserved - must be zero */
+ u32 translation_id;
+ u64 base_address;
+ u32 reserved2;
+};
+
/*
* Common flags fields for MADT subtables
*/
@@ -908,6 +923,159 @@ struct acpi_msct_proximity {
/*******************************************************************************
*
+ * NFIT - NVDIMM Interface Table (ACPI 6.0)
+ * Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_nfit {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 reserved; /* Reserved, must be zero */
+};
+
+/* Subtable header for NFIT */
+
+struct acpi_nfit_header {
+ u16 type;
+ u16 length;
+};
+
+/* Values for subtable type in struct acpi_nfit_header */
+
+enum acpi_nfit_type {
+ ACPI_NFIT_TYPE_SYSTEM_ADDRESS = 0,
+ ACPI_NFIT_TYPE_MEMORY_MAP = 1,
+ ACPI_NFIT_TYPE_INTERLEAVE = 2,
+ ACPI_NFIT_TYPE_SMBIOS = 3,
+ ACPI_NFIT_TYPE_CONTROL_REGION = 4,
+ ACPI_NFIT_TYPE_DATA_REGION = 5,
+ ACPI_NFIT_TYPE_FLUSH_ADDRESS = 6,
+ ACPI_NFIT_TYPE_RESERVED = 7 /* 7 and greater are reserved */
+};
+
+/*
+ * NFIT Subtables
+ */
+
+/* 0: System Physical Address Range Structure */
+
+struct acpi_nfit_system_address {
+ struct acpi_nfit_header header;
+ u16 range_index;
+ u16 flags;
+ u32 reserved; /* Reseved, must be zero */
+ u32 proximity_domain;
+ u8 range_guid[16];
+ u64 address;
+ u64 length;
+ u64 memory_mapping;
+};
+
+/* Flags */
+
+#define ACPI_NFIT_ADD_ONLINE_ONLY (1) /* 00: Add/Online Operation Only */
+#define ACPI_NFIT_PROXIMITY_VALID (1<<1) /* 01: Proximity Domain Valid */
+
+/* Range Type GUIDs appear in the include/acuuid.h file */
+
+/* 1: Memory Device to System Address Range Map Structure */
+
+struct acpi_nfit_memory_map {
+ struct acpi_nfit_header header;
+ u32 device_handle;
+ u16 physical_id;
+ u16 region_id;
+ u16 range_index;
+ u16 region_index;
+ u64 region_size;
+ u64 region_offset;
+ u64 address;
+ u16 interleave_index;
+ u16 interleave_ways;
+ u16 flags;
+ u16 reserved; /* Reserved, must be zero */
+};
+
+/* Flags */
+
+#define ACPI_NFIT_MEM_SAVE_FAILED (1) /* 00: Last SAVE to Memory Device failed */
+#define ACPI_NFIT_MEM_RESTORE_FAILED (1<<1) /* 01: Last RESTORE from Memory Device failed */
+#define ACPI_NFIT_MEM_FLUSH_FAILED (1<<2) /* 02: Platform flush failed */
+#define ACPI_NFIT_MEM_ARMED (1<<3) /* 03: Memory Device observed to be not armed */
+#define ACPI_NFIT_MEM_HEALTH_OBSERVED (1<<4) /* 04: Memory Device observed SMART/health events */
+#define ACPI_NFIT_MEM_HEALTH_ENABLED (1<<5) /* 05: SMART/health events enabled */
+
+/* 2: Interleave Structure */
+
+struct acpi_nfit_interleave {
+ struct acpi_nfit_header header;
+ u16 interleave_index;
+ u16 reserved; /* Reserved, must be zero */
+ u32 line_count;
+ u32 line_size;
+ u32 line_offset[1]; /* Variable length */
+};
+
+/* 3: SMBIOS Management Information Structure */
+
+struct acpi_nfit_smbios {
+ struct acpi_nfit_header header;
+ u32 reserved; /* Reserved, must be zero */
+ u8 data[1]; /* Variable length */
+};
+
+/* 4: NVDIMM Control Region Structure */
+
+struct acpi_nfit_control_region {
+ struct acpi_nfit_header header;
+ u16 region_index;
+ u16 vendor_id;
+ u16 device_id;
+ u16 revision_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_revision_id;
+ u8 reserved[6]; /* Reserved, must be zero */
+ u32 serial_number;
+ u16 code;
+ u16 windows;
+ u64 window_size;
+ u64 command_offset;
+ u64 command_size;
+ u64 status_offset;
+ u64 status_size;
+ u16 flags;
+ u8 reserved1[6]; /* Reserved, must be zero */
+};
+
+/* Flags */
+
+#define ACPI_NFIT_CONTROL_BUFFERED (1) /* Block Data Windows implementation is buffered */
+
+/* 5: NVDIMM Block Data Window Region Structure */
+
+struct acpi_nfit_data_region {
+ struct acpi_nfit_header header;
+ u16 region_index;
+ u16 windows;
+ u64 offset;
+ u64 size;
+ u64 capacity;
+ u64 start_address;
+};
+
+/* 6: Flush Hint Address Structure */
+
+struct acpi_nfit_flush_address {
+ struct acpi_nfit_header header;
+ u32 device_handle;
+ u16 hint_count;
+ u8 reserved[6]; /* Reserved, must be zero */
+ u64 hint_address[1]; /* Variable length */
+};
+
+/*******************************************************************************
+ *
* SBST - Smart Battery Specification Table
* Version 1
*
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index cafdeb50fbdf..370d69d871a0 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -69,6 +69,7 @@
#define ACPI_SIG_DMAR "DMAR" /* DMA Remapping table */
#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */
#define ACPI_SIG_IBFT "IBFT" /* iSCSI Boot Firmware Table */
+#define ACPI_SIG_IORT "IORT" /* IO Remapping Table */
#define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */
#define ACPI_SIG_LPIT "LPIT" /* Low Power Idle Table */
#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */
@@ -650,6 +651,131 @@ struct acpi_ibft_target {
/*******************************************************************************
*
+ * IORT - IO Remapping Table
+ *
+ * Conforms to "IO Remapping Table System Software on ARM Platforms",
+ * Document number: ARM DEN 0049A, 2015
+ *
+ ******************************************************************************/
+
+struct acpi_table_iort {
+ struct acpi_table_header header;
+ u32 node_count;
+ u32 node_offset;
+ u32 reserved;
+};
+
+/*
+ * IORT subtables
+ */
+struct acpi_iort_node {
+ u8 type;
+ u16 length;
+ u8 revision;
+ u32 reserved;
+ u32 mapping_count;
+ u32 mapping_offset;
+ char node_data[1];
+};
+
+/* Values for subtable Type above */
+
+enum acpi_iort_node_type {
+ ACPI_IORT_NODE_ITS_GROUP = 0x00,
+ ACPI_IORT_NODE_NAMED_COMPONENT = 0x01,
+ ACPI_IORT_NODE_PCI_ROOT_COMPLEX = 0x02,
+ ACPI_IORT_NODE_SMMU = 0x03
+};
+
+struct acpi_iort_id_mapping {
+ u32 input_base; /* Lowest value in input range */
+ u32 id_count; /* Number of IDs */
+ u32 output_base; /* Lowest value in output range */
+ u32 output_reference; /* A reference to the output node */
+ u32 flags;
+};
+
+/* Masks for Flags field above for IORT subtable */
+
+#define ACPI_IORT_ID_SINGLE_MAPPING (1)
+
+struct acpi_iort_memory_access {
+ u32 cache_coherency;
+ u8 hints;
+ u16 reserved;
+ u8 memory_flags;
+};
+
+/* Values for cache_coherency field above */
+
+#define ACPI_IORT_NODE_COHERENT 0x00000001 /* The device node is fully coherent */
+#define ACPI_IORT_NODE_NOT_COHERENT 0x00000000 /* The device node is not coherent */
+
+/* Masks for Hints field above */
+
+#define ACPI_IORT_HT_TRANSIENT (1)
+#define ACPI_IORT_HT_WRITE (1<<1)
+#define ACPI_IORT_HT_READ (1<<2)
+#define ACPI_IORT_HT_OVERRIDE (1<<3)
+
+/* Masks for memory_flags field above */
+
+#define ACPI_IORT_MF_COHERENCY (1)
+#define ACPI_IORT_MF_ATTRIBUTES (1<<1)
+
+/*
+ * IORT node specific subtables
+ */
+struct acpi_iort_its_group {
+ u32 its_count;
+ u32 identifiers[1]; /* GIC ITS identifier arrary */
+};
+
+struct acpi_iort_named_component {
+ u32 node_flags;
+ u64 memory_properties; /* Memory access properties */
+ u8 memory_address_limit; /* Memory address size limit */
+ char device_name[1]; /* Path of namespace object */
+};
+
+struct acpi_iort_root_complex {
+ u64 memory_properties; /* Memory access properties */
+ u32 ats_attribute;
+ u32 pci_segment_number;
+};
+
+/* Values for ats_attribute field above */
+
+#define ACPI_IORT_ATS_SUPPORTED 0x00000001 /* The root complex supports ATS */
+#define ACPI_IORT_ATS_UNSUPPORTED 0x00000000 /* The root complex doesn't support ATS */
+
+struct acpi_iort_smmu {
+ u64 base_address; /* SMMU base address */
+ u64 span; /* Length of memory range */
+ u32 model;
+ u32 flags;
+ u32 global_interrupt_offset;
+ u32 context_interrupt_count;
+ u32 context_interrupt_offset;
+ u32 pmu_interrupt_count;
+ u32 pmu_interrupt_offset;
+ u64 interrupts[1]; /* Interrupt array */
+};
+
+/* Values for Model field above */
+
+#define ACPI_IORT_SMMU_V1 0x00000000 /* Generic SMMUv1 */
+#define ACPI_IORT_SMMU_V2 0x00000001 /* Generic SMMUv2 */
+#define ACPI_IORT_SMMU_CORELINK_MMU400 0x00000002 /* ARM Corelink MMU-400 */
+#define ACPI_IORT_SMMU_CORELINK_MMU500 0x00000003 /* ARM Corelink MMU-500 */
+
+/* Masks for Flags field above */
+
+#define ACPI_IORT_SMMU_DVM_SUPPORTED (1)
+#define ACPI_IORT_SMMU_COHERENT_WALK (1<<1)
+
+/*******************************************************************************
+ *
* IVRS - I/O Virtualization Reporting Structure
* Version 1
*
@@ -824,7 +950,7 @@ struct acpi_ivrs_memory {
*
* LPIT - Low Power Idle Table
*
- * Conforms to "ACPI Low Power Idle Table (LPIT) and _LPD Proposal (DRAFT)"
+ * Conforms to "ACPI Low Power Idle Table (LPIT)" July 2014.
*
******************************************************************************/
@@ -846,8 +972,7 @@ struct acpi_lpit_header {
enum acpi_lpit_type {
ACPI_LPIT_TYPE_NATIVE_CSTATE = 0x00,
- ACPI_LPIT_TYPE_SIMPLE_IO = 0x01,
- ACPI_LPIT_TYPE_RESERVED = 0x02 /* 2 and above are reserved */
+ ACPI_LPIT_TYPE_RESERVED = 0x01 /* 1 and above are reserved */
};
/* Masks for Flags field above */
@@ -870,21 +995,6 @@ struct acpi_lpit_native {
u64 counter_frequency;
};
-/* 0x01: Simple I/O based LPI structure */
-
-struct acpi_lpit_io {
- struct acpi_lpit_header header;
- struct acpi_generic_address entry_trigger;
- u32 trigger_action;
- u64 trigger_value;
- u64 trigger_mask;
- struct acpi_generic_address minimum_idle_state;
- u32 residency;
- u32 latency;
- struct acpi_generic_address residency_counter;
- u64 counter_frequency;
-};
-
/*******************************************************************************
*
* MCFG - PCI Memory Mapped Configuration table and subtable
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 440ca8104b43..4018986d2a2e 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -68,7 +68,10 @@
#define ACPI_SIG_PCCT "PCCT" /* Platform Communications Channel Table */
#define ACPI_SIG_PMTT "PMTT" /* Platform Memory Topology Table */
#define ACPI_SIG_RASF "RASF" /* RAS Feature table */
+#define ACPI_SIG_STAO "STAO" /* Status Override table */
#define ACPI_SIG_TPM2 "TPM2" /* Trusted Platform Module 2.0 H/W interface table */
+#define ACPI_SIG_WPBT "WPBT" /* Windows Platform Binary Table */
+#define ACPI_SIG_XENV "XENV" /* Xen Environment table */
#define ACPI_SIG_S3PT "S3PT" /* S3 Performance (sub)Table */
#define ACPI_SIG_PCCS "PCC" /* PCC Shared Memory Region */
@@ -77,7 +80,6 @@
#define ACPI_SIG_MATR "MATR" /* Memory Address Translation Table */
#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */
-#define ACPI_SIG_WPBT "WPBT" /* Windows Platform Binary Table */
/*
* All tables must be byte-packed to match the ACPI specification, since
@@ -117,6 +119,8 @@ struct acpi_table_bgrt {
/*******************************************************************************
*
* DRTM - Dynamic Root of Trust for Measurement table
+ * Conforms to "TCG D-RTM Architecture" June 17 2013, Version 1.0.0
+ * Table version 1
*
******************************************************************************/
@@ -133,22 +137,40 @@ struct acpi_table_drtm {
u32 flags;
};
-/* 1) Validated Tables List */
+/* Flag Definitions for above */
+
+#define ACPI_DRTM_ACCESS_ALLOWED (1)
+#define ACPI_DRTM_ENABLE_GAP_CODE (1<<1)
+#define ACPI_DRTM_INCOMPLETE_MEASUREMENTS (1<<2)
+#define ACPI_DRTM_AUTHORITY_ORDER (1<<3)
-struct acpi_drtm_vtl_list {
- u32 validated_table_list_count;
+/* 1) Validated Tables List (64-bit addresses) */
+
+struct acpi_drtm_vtable_list {
+ u32 validated_table_count;
+ u64 validated_tables[1];
};
-/* 2) Resources List */
+/* 2) Resources List (of Resource Descriptors) */
+
+/* Resource Descriptor */
+
+struct acpi_drtm_resource {
+ u8 size[7];
+ u8 type;
+ u64 address;
+};
struct acpi_drtm_resource_list {
- u32 resource_list_count;
+ u32 resource_count;
+ struct acpi_drtm_resource resources[1];
};
/* 3) Platform-specific Identifiers List */
-struct acpi_drtm_id_list {
- u32 id_list_count;
+struct acpi_drtm_dps_id {
+ u32 dps_id_length;
+ u8 dps_id[16];
};
/*******************************************************************************
@@ -685,6 +707,21 @@ enum acpi_rasf_status {
/*******************************************************************************
*
+ * STAO - Status Override Table (_STA override) - ACPI 6.0
+ * Version 1
+ *
+ * Conforms to "ACPI Specification for Status Override Table"
+ * 6 January 2015
+ *
+ ******************************************************************************/
+
+struct acpi_table_stao {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 ignore_uart;
+};
+
+/*******************************************************************************
+ *
* TPM2 - Trusted Platform Module (TPM) 2.0 Hardware Interface Table
* Version 3
*
@@ -713,6 +750,41 @@ struct acpi_tpm2_control {
u64 response_address;
};
+/*******************************************************************************
+ *
+ * WPBT - Windows Platform Environment Table (ACPI 6.0)
+ * Version 1
+ *
+ * Conforms to "Windows Platform Binary Table (WPBT)" 29 November 2011
+ *
+ ******************************************************************************/
+
+struct acpi_table_wpbt {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 handoff_size;
+ u64 handoff_address;
+ u8 layout;
+ u8 type;
+ u16 arguments_length;
+};
+
+/*******************************************************************************
+ *
+ * XENV - Xen Environment Table (ACPI 6.0)
+ * Version 1
+ *
+ * Conforms to "ACPI Specification for Xen Environment Table" 4 January 2015
+ *
+ ******************************************************************************/
+
+struct acpi_table_xenv {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u64 grant_table_address;
+ u64 grant_table_size;
+ u32 event_interrupt;
+ u8 event_flags;
+};
+
/* Reset to default packing */
#pragma pack()
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 1c3002e1db20..63fd7f5e9fb3 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -471,11 +471,6 @@ typedef u8 acpi_owner_id;
#define ACPI_INTEGER_BIT_SIZE 64
#define ACPI_MAX_DECIMAL_DIGITS 20 /* 2^64 = 18,446,744,073,709,551,616 */
-
-#if ACPI_MACHINE_WIDTH == 64
-#define ACPI_USE_NATIVE_DIVIDE /* Use compiler native 64-bit divide */
-#endif
-
#define ACPI_MAX64_DECIMAL_DIGITS 20
#define ACPI_MAX32_DECIMAL_DIGITS 10
#define ACPI_MAX16_DECIMAL_DIGITS 5
@@ -530,6 +525,7 @@ typedef u64 acpi_integer;
#define ACPI_CAST_PTR(t, p) ((t *) (acpi_uintptr_t) (p))
#define ACPI_CAST_INDIRECT_PTR(t, p) ((t **) (acpi_uintptr_t) (p))
#define ACPI_ADD_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) + (acpi_size)(b)))
+#define ACPI_SUB_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) - (acpi_size)(b)))
#define ACPI_PTR_DIFF(a, b) (acpi_size) (ACPI_CAST_PTR (u8, (a)) - ACPI_CAST_PTR (u8, (b)))
/* Pointer/Integer type conversions */
diff --git a/include/acpi/acuuid.h b/include/acpi/acuuid.h
new file mode 100644
index 000000000000..80fe8cf74d7a
--- /dev/null
+++ b/include/acpi/acuuid.h
@@ -0,0 +1,89 @@
+/******************************************************************************
+ *
+ * Name: acuuid.h - ACPI-related UUID/GUID definitions
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2015, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACUUID_H__
+#define __ACUUID_H__
+
+/*
+ * Note1: UUIDs and GUIDs are defined to be identical in ACPI.
+ *
+ * Note2: This file is standalone and should remain that way.
+ */
+
+/* Controllers */
+
+#define UUID_GPIO_CONTROLLER "4f248f40-d5e2-499f-834c-27758ea1cd3f"
+#define UUID_USB_CONTROLLER "ce2ee385-00e6-48cb-9f05-2edb927c4899"
+#define UUID_SATA_CONTROLLER "e4db149b-fcfe-425b-a6d8-92357d78fc7f"
+
+/* Devices */
+
+#define UUID_PCI_HOST_BRIDGE "33db4d5b-1ff7-401c-9657-7441c03dd766"
+#define UUID_I2C_DEVICE "3cdff6f7-4267-4555-ad05-b30a3d8938de"
+#define UUID_POWER_BUTTON "dfbcf3c5-e7a5-44e6-9c1f-29c76f6e059c"
+
+/* Interfaces */
+
+#define UUID_DEVICE_LABELING "e5c937d0-3553-4d7a-9117-ea4d19c3434d"
+#define UUID_PHYSICAL_PRESENCE "3dddfaa6-361b-4eb4-a424-8d10089d1653"
+
+/* NVDIMM - NFIT table */
+
+#define UUID_VOLATILE_MEMORY "7305944f-fdda-44e3-b16c-3f22d252e5d0"
+#define UUID_PERSISTENT_MEMORY "66f0d379-b4f3-4074-ac43-0d3318b78cdb"
+#define UUID_CONTROL_REGION "92f701f6-13b4-405d-910b-299367e8234c"
+#define UUID_DATA_REGION "91af0530-5d86-470e-a6b0-0a2db9408249"
+#define UUID_VOLATILE_VIRTUAL_DISK "77ab535a-45fc-624b-5560-f7b281d1f96e"
+#define UUID_VOLATILE_VIRTUAL_CD "3d5abd30-4175-87ce-6d64-d2ade523c4bb"
+#define UUID_PERSISTENT_VIRTUAL_DISK "5cea02c9-4d07-69d3-269f-4496fbe096f9"
+#define UUID_PERSISTENT_VIRTUAL_CD "08018188-42cd-bb48-100f-5387d53ded3d"
+
+/* Miscellaneous */
+
+#define UUID_PLATFORM_CAPABILITIES "0811b06e-4a27-44f9-8d60-3cbbc22e7b48"
+#define UUID_DYNAMIC_ENUMERATION "d8c1a3a6-be9b-4c9b-91bf-c3cb81fc5daf"
+#define UUID_BATTERY_THERMAL_LIMIT "4c2067e3-887d-475c-9720-4af1d3ed602e"
+#define UUID_THERMAL_EXTENSIONS "14d399cd-7a27-4b18-8fb4-7cb7b9f4e500"
+#define UUID_DEVICE_PROPERTIES "daffd814-6eba-4d8c-8a91-bc9bbf4aa301"
+
+#endif /* __AUUID_H__ */
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index ecdf9405dd3a..073997d729e9 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -175,6 +175,9 @@
#elif defined(_APPLE) || defined(__APPLE__)
#include "acmacosx.h"
+#elif defined(__DragonFly__)
+#include "acdragonfly.h"
+
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
#include "acfreebsd.h"
diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h
index 71e5ec5b07a3..14dc6f68ca18 100644
--- a/include/acpi/platform/acenvex.h
+++ b/include/acpi/platform/acenvex.h
@@ -56,6 +56,9 @@
#if defined(_LINUX) || defined(__linux__)
#include <acpi/platform/aclinuxex.h>
+#elif defined(__DragonFly__)
+#include "acdragonflyex.h"
+
#endif
/*! [End] no source code translation !*/
diff --git a/include/acpi/video.h b/include/acpi/video.h
index 843ef1adfbfa..a7d7f1043e9c 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -16,23 +16,36 @@ struct acpi_device;
#define ACPI_VIDEO_DISPLAY_LEGACY_PANEL 0x0110
#define ACPI_VIDEO_DISPLAY_LEGACY_TV 0x0200
+enum acpi_backlight_type {
+ acpi_backlight_undef = -1,
+ acpi_backlight_none = 0,
+ acpi_backlight_video,
+ acpi_backlight_vendor,
+ acpi_backlight_native,
+};
+
#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE)
extern int acpi_video_register(void);
extern void acpi_video_unregister(void);
-extern void acpi_video_unregister_backlight(void);
extern int acpi_video_get_edid(struct acpi_device *device, int type,
int device_id, void **edid);
-extern bool acpi_video_verify_backlight_support(void);
+extern enum acpi_backlight_type acpi_video_get_backlight_type(void);
+extern void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type);
#else
static inline int acpi_video_register(void) { return 0; }
static inline void acpi_video_unregister(void) { return; }
-static inline void acpi_video_unregister_backlight(void) { return; }
static inline int acpi_video_get_edid(struct acpi_device *device, int type,
int device_id, void **edid)
{
return -ENODEV;
}
-static inline bool acpi_video_verify_backlight_support(void) { return false; }
+static inline enum acpi_backlight_type acpi_video_get_backlight_type(void)
+{
+ return acpi_backlight_vendor;
+}
+static void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type)
+{
+}
#endif
#endif
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index f5c40b0fadc2..e6a83d712ef6 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -66,8 +66,8 @@
#define smp_read_barrier_depends() do { } while (0)
#endif
-#ifndef set_mb
-#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
+#ifndef smp_store_mb
+#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
#endif
#ifndef smp_mb__before_atomic
diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h
index 811fb1e9b061..3766ab34aa45 100644
--- a/include/asm-generic/cmpxchg.h
+++ b/include/asm-generic/cmpxchg.h
@@ -86,9 +86,6 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
/*
* Atomic compare and exchange.
- *
- * Do not define __HAVE_ARCH_CMPXCHG because we want to use it to check whether
- * a cmpxchg primitive faster than repeated local irq save/restore exists.
*/
#include <asm-generic/cmpxchg-local.h>
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 9bb0d11729c9..40ec1433f05d 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -128,11 +128,6 @@ static inline int gpio_export_link(struct device *dev, const char *name,
return gpiod_export_link(dev, name, gpio_to_desc(gpio));
}
-static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
-{
- return gpiod_sysfs_set_active_low(gpio_to_desc(gpio), value);
-}
-
static inline void gpio_unexport(unsigned gpio)
{
gpiod_unexport(gpio_to_desc(gpio));
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 9db042304df3..f56094cfdeff 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -769,6 +769,14 @@ static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size)
}
#endif
+#ifndef ioremap_uc
+#define ioremap_uc ioremap_uc
+static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
+{
+ return ioremap_nocache(offset, size);
+}
+#endif
+
#ifndef ioremap_wc
#define ioremap_wc ioremap_wc
static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size)
@@ -777,8 +785,17 @@ static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size)
}
#endif
+#ifndef ioremap_wt
+#define ioremap_wt ioremap_wt
+static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
+{
+ return ioremap_nocache(offset, size);
+}
+#endif
+
#ifndef iounmap
#define iounmap iounmap
+
static inline void iounmap(void __iomem *addr)
{
}
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
index 1b41011643a5..d8f8622fa044 100644
--- a/include/asm-generic/iomap.h
+++ b/include/asm-generic/iomap.h
@@ -66,6 +66,10 @@ extern void ioport_unmap(void __iomem *);
#define ioremap_wc ioremap_nocache
#endif
+#ifndef ARCH_HAS_IOREMAP_WT
+#define ioremap_wt ioremap_nocache
+#endif
+
#ifdef CONFIG_PCI
/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */
struct pci_dev;
diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h
index e80a0495e5b0..f24bc519bf31 100644
--- a/include/asm-generic/pci.h
+++ b/include/asm-generic/pci.h
@@ -6,19 +6,6 @@
#ifndef _ASM_GENERIC_PCI_H
#define _ASM_GENERIC_PCI_H
-static inline struct resource *
-pcibios_select_root(struct pci_dev *pdev, struct resource *res)
-{
- struct resource *root = NULL;
-
- if (res->flags & IORESOURCE_IO)
- root = &ioport_resource;
- if (res->flags & IORESOURCE_MEM)
- root = &iomem_resource;
-
- return root;
-}
-
#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 39f1d6a2b04d..bd910ceaccfa 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -262,6 +262,10 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
#define pgprot_writecombine pgprot_noncached
#endif
+#ifndef pgprot_writethrough
+#define pgprot_writethrough pgprot_noncached
+#endif
+
#ifndef pgprot_device
#define pgprot_device pgprot_noncached
#endif
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
new file mode 100644
index 000000000000..83bfb87f5bf1
--- /dev/null
+++ b/include/asm-generic/qspinlock.h
@@ -0,0 +1,139 @@
+/*
+ * Queued spinlock
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
+ *
+ * Authors: Waiman Long <waiman.long@hp.com>
+ */
+#ifndef __ASM_GENERIC_QSPINLOCK_H
+#define __ASM_GENERIC_QSPINLOCK_H
+
+#include <asm-generic/qspinlock_types.h>
+
+/**
+ * queued_spin_is_locked - is the spinlock locked?
+ * @lock: Pointer to queued spinlock structure
+ * Return: 1 if it is locked, 0 otherwise
+ */
+static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
+{
+ return atomic_read(&lock->val);
+}
+
+/**
+ * queued_spin_value_unlocked - is the spinlock structure unlocked?
+ * @lock: queued spinlock structure
+ * Return: 1 if it is unlocked, 0 otherwise
+ *
+ * N.B. Whenever there are tasks waiting for the lock, it is considered
+ * locked wrt the lockref code to avoid lock stealing by the lockref
+ * code and change things underneath the lock. This also allows some
+ * optimizations to be applied without conflict with lockref.
+ */
+static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
+{
+ return !atomic_read(&lock.val);
+}
+
+/**
+ * queued_spin_is_contended - check if the lock is contended
+ * @lock : Pointer to queued spinlock structure
+ * Return: 1 if lock contended, 0 otherwise
+ */
+static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
+{
+ return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
+}
+/**
+ * queued_spin_trylock - try to acquire the queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ * Return: 1 if lock acquired, 0 if failed
+ */
+static __always_inline int queued_spin_trylock(struct qspinlock *lock)
+{
+ if (!atomic_read(&lock->val) &&
+ (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) == 0))
+ return 1;
+ return 0;
+}
+
+extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+
+/**
+ * queued_spin_lock - acquire a queued spinlock
+ * @lock: Pointer to queued spinlock structure
+ */
+static __always_inline void queued_spin_lock(struct qspinlock *lock)
+{
+ u32 val;
+
+ val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
+ if (likely(val == 0))
+ return;
+ queued_spin_lock_slowpath(lock, val);
+}
+
+#ifndef queued_spin_unlock
+/**
+ * queued_spin_unlock - release a queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ */
+static __always_inline void queued_spin_unlock(struct qspinlock *lock)
+{
+ /*
+ * smp_mb__before_atomic() in order to guarantee release semantics
+ */
+ smp_mb__before_atomic_dec();
+ atomic_sub(_Q_LOCKED_VAL, &lock->val);
+}
+#endif
+
+/**
+ * queued_spin_unlock_wait - wait until current lock holder releases the lock
+ * @lock : Pointer to queued spinlock structure
+ *
+ * There is a very slight possibility of live-lock if the lockers keep coming
+ * and the waiter is just unfortunate enough to not see any unlock state.
+ */
+static inline void queued_spin_unlock_wait(struct qspinlock *lock)
+{
+ while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
+ cpu_relax();
+}
+
+#ifndef virt_queued_spin_lock
+static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock)
+{
+ return false;
+}
+#endif
+
+/*
+ * Initializier
+ */
+#define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) }
+
+/*
+ * Remapping spinlock architecture specific functions to the corresponding
+ * queued spinlock functions.
+ */
+#define arch_spin_is_locked(l) queued_spin_is_locked(l)
+#define arch_spin_is_contended(l) queued_spin_is_contended(l)
+#define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
+#define arch_spin_lock(l) queued_spin_lock(l)
+#define arch_spin_trylock(l) queued_spin_trylock(l)
+#define arch_spin_unlock(l) queued_spin_unlock(l)
+#define arch_spin_lock_flags(l, f) queued_spin_lock(l)
+#define arch_spin_unlock_wait(l) queued_spin_unlock_wait(l)
+
+#endif /* __ASM_GENERIC_QSPINLOCK_H */
diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h
new file mode 100644
index 000000000000..85f888e86761
--- /dev/null
+++ b/include/asm-generic/qspinlock_types.h
@@ -0,0 +1,79 @@
+/*
+ * Queued spinlock
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
+ *
+ * Authors: Waiman Long <waiman.long@hp.com>
+ */
+#ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H
+#define __ASM_GENERIC_QSPINLOCK_TYPES_H
+
+/*
+ * Including atomic.h with PARAVIRT on will cause compilation errors because
+ * of recursive header file incluson via paravirt_types.h. So don't include
+ * it if PARAVIRT is on.
+ */
+#ifndef CONFIG_PARAVIRT
+#include <linux/types.h>
+#include <linux/atomic.h>
+#endif
+
+typedef struct qspinlock {
+ atomic_t val;
+} arch_spinlock_t;
+
+/*
+ * Bitfields in the atomic value:
+ *
+ * When NR_CPUS < 16K
+ * 0- 7: locked byte
+ * 8: pending
+ * 9-15: not used
+ * 16-17: tail index
+ * 18-31: tail cpu (+1)
+ *
+ * When NR_CPUS >= 16K
+ * 0- 7: locked byte
+ * 8: pending
+ * 9-10: tail index
+ * 11-31: tail cpu (+1)
+ */
+#define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\
+ << _Q_ ## type ## _OFFSET)
+#define _Q_LOCKED_OFFSET 0
+#define _Q_LOCKED_BITS 8
+#define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED)
+
+#define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS)
+#if CONFIG_NR_CPUS < (1U << 14)
+#define _Q_PENDING_BITS 8
+#else
+#define _Q_PENDING_BITS 1
+#endif
+#define _Q_PENDING_MASK _Q_SET_MASK(PENDING)
+
+#define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS)
+#define _Q_TAIL_IDX_BITS 2
+#define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX)
+
+#define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS)
+#define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET)
+#define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU)
+
+#define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET
+#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
+
+#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET)
+#define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET)
+
+#endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 94b19be67574..7169ad04acc0 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -18,6 +18,65 @@
#include <linux/slab.h>
/**
+ * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API
+ *
+ * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD
+ * (listed as type "aead" in /proc/crypto)
+ *
+ * The most prominent examples for this type of encryption is GCM and CCM.
+ * However, the kernel supports other types of AEAD ciphers which are defined
+ * with the following cipher string:
+ *
+ * authenc(keyed message digest, block cipher)
+ *
+ * For example: authenc(hmac(sha256), cbc(aes))
+ *
+ * The example code provided for the asynchronous block cipher operation
+ * applies here as well. Naturally all *ablkcipher* symbols must be exchanged
+ * the *aead* pendants discussed in the following. In addition, for the AEAD
+ * operation, the aead_request_set_assoc function must be used to set the
+ * pointer to the associated data memory location before performing the
+ * encryption or decryption operation. In case of an encryption, the associated
+ * data memory is filled during the encryption operation. For decryption, the
+ * associated data memory must contain data that is used to verify the integrity
+ * of the decrypted data. Another deviation from the asynchronous block cipher
+ * operation is that the caller should explicitly check for -EBADMSG of the
+ * crypto_aead_decrypt. That error indicates an authentication error, i.e.
+ * a breach in the integrity of the message. In essence, that -EBADMSG error
+ * code is the key bonus an AEAD cipher has over "standard" block chaining
+ * modes.
+ */
+
+/**
+ * struct aead_request - AEAD request
+ * @base: Common attributes for async crypto requests
+ * @old: Boolean whether the old or new AEAD API is used
+ * @assoclen: Length in bytes of associated data for authentication
+ * @cryptlen: Length of data to be encrypted or decrypted
+ * @iv: Initialisation vector
+ * @assoc: Associated data
+ * @src: Source data
+ * @dst: Destination data
+ * @__ctx: Start of private context data
+ */
+struct aead_request {
+ struct crypto_async_request base;
+
+ bool old;
+
+ unsigned int assoclen;
+ unsigned int cryptlen;
+
+ u8 *iv;
+
+ struct scatterlist *assoc;
+ struct scatterlist *src;
+ struct scatterlist *dst;
+
+ void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+/**
* struct aead_givcrypt_request - AEAD request with IV generation
* @seq: Sequence number for IV generation
* @giv: Space for generated IV
@@ -30,6 +89,474 @@ struct aead_givcrypt_request {
struct aead_request areq;
};
+/**
+ * struct aead_alg - AEAD cipher definition
+ * @maxauthsize: Set the maximum authentication tag size supported by the
+ * transformation. A transformation may support smaller tag sizes.
+ * As the authentication tag is a message digest to ensure the
+ * integrity of the encrypted data, a consumer typically wants the
+ * largest authentication tag possible as defined by this
+ * variable.
+ * @setauthsize: Set authentication size for the AEAD transformation. This
+ * function is used to specify the consumer requested size of the
+ * authentication tag to be either generated by the transformation
+ * during encryption or the size of the authentication tag to be
+ * supplied during the decryption operation. This function is also
+ * responsible for checking the authentication tag size for
+ * validity.
+ * @setkey: see struct ablkcipher_alg
+ * @encrypt: see struct ablkcipher_alg
+ * @decrypt: see struct ablkcipher_alg
+ * @geniv: see struct ablkcipher_alg
+ * @ivsize: see struct ablkcipher_alg
+ * @init: Initialize the cryptographic transformation object. This function
+ * is used to initialize the cryptographic transformation object.
+ * This function is called only once at the instantiation time, right
+ * after the transformation context was allocated. In case the
+ * cryptographic hardware has some special requirements which need to
+ * be handled by software, this function shall check for the precise
+ * requirement of the transformation and put any software fallbacks
+ * in place.
+ * @exit: Deinitialize the cryptographic transformation object. This is a
+ * counterpart to @init, used to remove various changes set in
+ * @init.
+ *
+ * All fields except @ivsize is mandatory and must be filled.
+ */
+struct aead_alg {
+ int (*setkey)(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen);
+ int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
+ int (*encrypt)(struct aead_request *req);
+ int (*decrypt)(struct aead_request *req);
+ int (*init)(struct crypto_aead *tfm);
+ void (*exit)(struct crypto_aead *tfm);
+
+ const char *geniv;
+
+ unsigned int ivsize;
+ unsigned int maxauthsize;
+
+ struct crypto_alg base;
+};
+
+struct crypto_aead {
+ int (*setkey)(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen);
+ int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
+ int (*encrypt)(struct aead_request *req);
+ int (*decrypt)(struct aead_request *req);
+ int (*givencrypt)(struct aead_givcrypt_request *req);
+ int (*givdecrypt)(struct aead_givcrypt_request *req);
+
+ struct crypto_aead *child;
+
+ unsigned int authsize;
+ unsigned int reqsize;
+
+ struct crypto_tfm base;
+};
+
+static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
+{
+ return container_of(tfm, struct crypto_aead, base);
+}
+
+/**
+ * crypto_alloc_aead() - allocate AEAD cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * AEAD cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for an AEAD. The returned struct
+ * crypto_aead is the cipher handle that is required for any subsequent
+ * API invocation for that AEAD.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
+
+static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
+{
+ return &tfm->base;
+}
+
+/**
+ * crypto_free_aead() - zeroize and free aead handle
+ * @tfm: cipher handle to be freed
+ */
+static inline void crypto_free_aead(struct crypto_aead *tfm)
+{
+ crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm));
+}
+
+static inline struct crypto_aead *crypto_aead_crt(struct crypto_aead *tfm)
+{
+ return tfm;
+}
+
+static inline struct old_aead_alg *crypto_old_aead_alg(struct crypto_aead *tfm)
+{
+ return &crypto_aead_tfm(tfm)->__crt_alg->cra_aead;
+}
+
+static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
+{
+ return container_of(crypto_aead_tfm(tfm)->__crt_alg,
+ struct aead_alg, base);
+}
+
+static inline unsigned int crypto_aead_alg_ivsize(struct aead_alg *alg)
+{
+ return alg->base.cra_aead.encrypt ? alg->base.cra_aead.ivsize :
+ alg->ivsize;
+}
+
+/**
+ * crypto_aead_ivsize() - obtain IV size
+ * @tfm: cipher handle
+ *
+ * The size of the IV for the aead referenced by the cipher handle is
+ * returned. This IV size may be zero if the cipher does not need an IV.
+ *
+ * Return: IV size in bytes
+ */
+static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm)
+{
+ return crypto_aead_alg_ivsize(crypto_aead_alg(tfm));
+}
+
+/**
+ * crypto_aead_authsize() - obtain maximum authentication data size
+ * @tfm: cipher handle
+ *
+ * The maximum size of the authentication data for the AEAD cipher referenced
+ * by the AEAD cipher handle is returned. The authentication data size may be
+ * zero if the cipher implements a hard-coded maximum.
+ *
+ * The authentication data may also be known as "tag value".
+ *
+ * Return: authentication data size / tag size in bytes
+ */
+static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
+{
+ return tfm->authsize;
+}
+
+/**
+ * crypto_aead_blocksize() - obtain block size of cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the AEAD referenced with the cipher handle is returned.
+ * The caller may use that information to allocate appropriate memory for the
+ * data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm)
+{
+ return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm));
+}
+
+static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm)
+{
+ return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm));
+}
+
+static inline u32 crypto_aead_get_flags(struct crypto_aead *tfm)
+{
+ return crypto_tfm_get_flags(crypto_aead_tfm(tfm));
+}
+
+static inline void crypto_aead_set_flags(struct crypto_aead *tfm, u32 flags)
+{
+ crypto_tfm_set_flags(crypto_aead_tfm(tfm), flags);
+}
+
+static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
+{
+ crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags);
+}
+
+/**
+ * crypto_aead_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the AEAD referenced by the cipher
+ * handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+int crypto_aead_setkey(struct crypto_aead *tfm,
+ const u8 *key, unsigned int keylen);
+
+/**
+ * crypto_aead_setauthsize() - set authentication data size
+ * @tfm: cipher handle
+ * @authsize: size of the authentication data / tag in bytes
+ *
+ * Set the authentication data size / tag size. AEAD requires an authentication
+ * tag (or MAC) in addition to the associated data.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
+
+static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
+{
+ return __crypto_aead_cast(req->base.tfm);
+}
+
+/**
+ * crypto_aead_encrypt() - encrypt plaintext
+ * @req: reference to the aead_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * Encrypt plaintext data using the aead_request handle. That data structure
+ * and how it is filled with data is discussed with the aead_request_*
+ * functions.
+ *
+ * IMPORTANT NOTE The encryption operation creates the authentication data /
+ * tag. That data is concatenated with the created ciphertext.
+ * The ciphertext memory size is therefore the given number of
+ * block cipher blocks + the size defined by the
+ * crypto_aead_setauthsize invocation. The caller must ensure
+ * that sufficient memory is available for the ciphertext and
+ * the authentication tag.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+static inline int crypto_aead_encrypt(struct aead_request *req)
+{
+ return crypto_aead_reqtfm(req)->encrypt(req);
+}
+
+/**
+ * crypto_aead_decrypt() - decrypt ciphertext
+ * @req: reference to the ablkcipher_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * Decrypt ciphertext data using the aead_request handle. That data structure
+ * and how it is filled with data is discussed with the aead_request_*
+ * functions.
+ *
+ * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the
+ * authentication data / tag. That authentication data / tag
+ * must have the size defined by the crypto_aead_setauthsize
+ * invocation.
+ *
+ *
+ * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD
+ * cipher operation performs the authentication of the data during the
+ * decryption operation. Therefore, the function returns this error if
+ * the authentication of the ciphertext was unsuccessful (i.e. the
+ * integrity of the ciphertext or the associated data was violated);
+ * < 0 if an error occurred.
+ */
+static inline int crypto_aead_decrypt(struct aead_request *req)
+{
+ if (req->cryptlen < crypto_aead_authsize(crypto_aead_reqtfm(req)))
+ return -EINVAL;
+
+ return crypto_aead_reqtfm(req)->decrypt(req);
+}
+
+/**
+ * DOC: Asynchronous AEAD Request Handle
+ *
+ * The aead_request data structure contains all pointers to data required for
+ * the AEAD cipher operation. This includes the cipher handle (which can be
+ * used by multiple aead_request instances), pointer to plaintext and
+ * ciphertext, asynchronous callback function, etc. It acts as a handle to the
+ * aead_request_* API calls in a similar way as AEAD handle to the
+ * crypto_aead_* API calls.
+ */
+
+/**
+ * crypto_aead_reqsize() - obtain size of the request data structure
+ * @tfm: cipher handle
+ *
+ * Return: number of bytes
+ */
+unsigned int crypto_aead_reqsize(struct crypto_aead *tfm);
+
+/**
+ * aead_request_set_tfm() - update cipher handle reference in request
+ * @req: request handle to be modified
+ * @tfm: cipher handle that shall be added to the request handle
+ *
+ * Allow the caller to replace the existing aead handle in the request
+ * data structure with a different one.
+ */
+static inline void aead_request_set_tfm(struct aead_request *req,
+ struct crypto_aead *tfm)
+{
+ req->base.tfm = crypto_aead_tfm(tfm->child);
+}
+
+/**
+ * aead_request_alloc() - allocate request data structure
+ * @tfm: cipher handle to be registered with the request
+ * @gfp: memory allocation flag that is handed to kmalloc by the API call.
+ *
+ * Allocate the request data structure that must be used with the AEAD
+ * encrypt and decrypt API calls. During the allocation, the provided aead
+ * handle is registered in the request data structure.
+ *
+ * Return: allocated request handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
+ gfp_t gfp)
+{
+ struct aead_request *req;
+
+ req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp);
+
+ if (likely(req))
+ aead_request_set_tfm(req, tfm);
+
+ return req;
+}
+
+/**
+ * aead_request_free() - zeroize and free request data structure
+ * @req: request data structure cipher handle to be freed
+ */
+static inline void aead_request_free(struct aead_request *req)
+{
+ kzfree(req);
+}
+
+/**
+ * aead_request_set_callback() - set asynchronous callback function
+ * @req: request handle
+ * @flags: specify zero or an ORing of the flags
+ * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
+ * increase the wait queue beyond the initial maximum size;
+ * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
+ * @compl: callback function pointer to be registered with the request handle
+ * @data: The data pointer refers to memory that is not used by the kernel
+ * crypto API, but provided to the callback function for it to use. Here,
+ * the caller can provide a reference to memory the callback function can
+ * operate on. As the callback function is invoked asynchronously to the
+ * related functionality, it may need to access data structures of the
+ * related functionality which can be referenced using this pointer. The
+ * callback function can access the memory via the "data" field in the
+ * crypto_async_request data structure provided to the callback function.
+ *
+ * Setting the callback function that is triggered once the cipher operation
+ * completes
+ *
+ * The callback function is registered with the aead_request handle and
+ * must comply with the following template
+ *
+ * void callback_function(struct crypto_async_request *req, int error)
+ */
+static inline void aead_request_set_callback(struct aead_request *req,
+ u32 flags,
+ crypto_completion_t compl,
+ void *data)
+{
+ req->base.complete = compl;
+ req->base.data = data;
+ req->base.flags = flags;
+}
+
+/**
+ * aead_request_set_crypt - set data buffers
+ * @req: request handle
+ * @src: source scatter / gather list
+ * @dst: destination scatter / gather list
+ * @cryptlen: number of bytes to process from @src
+ * @iv: IV for the cipher operation which must comply with the IV size defined
+ * by crypto_aead_ivsize()
+ *
+ * Setting the source data and destination data scatter / gather lists which
+ * hold the associated data concatenated with the plaintext or ciphertext. See
+ * below for the authentication tag.
+ *
+ * For encryption, the source is treated as the plaintext and the
+ * destination is the ciphertext. For a decryption operation, the use is
+ * reversed - the source is the ciphertext and the destination is the plaintext.
+ *
+ * For both src/dst the layout is associated data, plain/cipher text,
+ * authentication tag.
+ *
+ * The content of the AD in the destination buffer after processing
+ * will either be untouched, or it will contain a copy of the AD
+ * from the source buffer. In order to ensure that it always has
+ * a copy of the AD, the user must copy the AD over either before
+ * or after processing. Of course this is not relevant if the user
+ * is doing in-place processing where src == dst.
+ *
+ * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption,
+ * the caller must concatenate the ciphertext followed by the
+ * authentication tag and provide the entire data stream to the
+ * decryption operation (i.e. the data length used for the
+ * initialization of the scatterlist and the data length for the
+ * decryption operation is identical). For encryption, however,
+ * the authentication tag is created while encrypting the data.
+ * The destination buffer must hold sufficient space for the
+ * ciphertext and the authentication tag while the encryption
+ * invocation must only point to the plaintext data size. The
+ * following code snippet illustrates the memory usage
+ * buffer = kmalloc(ptbuflen + (enc ? authsize : 0));
+ * sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0));
+ * aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv);
+ */
+static inline void aead_request_set_crypt(struct aead_request *req,
+ struct scatterlist *src,
+ struct scatterlist *dst,
+ unsigned int cryptlen, u8 *iv)
+{
+ req->src = src;
+ req->dst = dst;
+ req->cryptlen = cryptlen;
+ req->iv = iv;
+}
+
+/**
+ * aead_request_set_assoc() - set the associated data scatter / gather list
+ * @req: request handle
+ * @assoc: associated data scatter / gather list
+ * @assoclen: number of bytes to process from @assoc
+ *
+ * Obsolete, do not use.
+ */
+static inline void aead_request_set_assoc(struct aead_request *req,
+ struct scatterlist *assoc,
+ unsigned int assoclen)
+{
+ req->assoc = assoc;
+ req->assoclen = assoclen;
+ req->old = true;
+}
+
+/**
+ * aead_request_set_ad - set associated data information
+ * @req: request handle
+ * @assoclen: number of bytes in associated data
+ *
+ * Setting the AD information. This function sets the length of
+ * the associated data.
+ */
+static inline void aead_request_set_ad(struct aead_request *req,
+ unsigned int assoclen)
+{
+ req->assoclen = assoclen;
+ req->old = false;
+}
+
static inline struct crypto_aead *aead_givcrypt_reqtfm(
struct aead_givcrypt_request *req)
{
@@ -38,14 +565,12 @@ static inline struct crypto_aead *aead_givcrypt_reqtfm(
static inline int crypto_aead_givencrypt(struct aead_givcrypt_request *req)
{
- struct aead_tfm *crt = crypto_aead_crt(aead_givcrypt_reqtfm(req));
- return crt->givencrypt(req);
+ return aead_givcrypt_reqtfm(req)->givencrypt(req);
};
static inline int crypto_aead_givdecrypt(struct aead_givcrypt_request *req)
{
- struct aead_tfm *crt = crypto_aead_crt(aead_givcrypt_reqtfm(req));
- return crt->givdecrypt(req);
+ return aead_givcrypt_reqtfm(req)->givdecrypt(req);
};
static inline void aead_givcrypt_set_tfm(struct aead_givcrypt_request *req,
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
new file mode 100644
index 000000000000..69d163e39101
--- /dev/null
+++ b/include/crypto/akcipher.h
@@ -0,0 +1,340 @@
+/*
+ * Public Key Encryption
+ *
+ * Copyright (c) 2015, Intel Corporation
+ * Authors: Tadeusz Struk <tadeusz.struk@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_AKCIPHER_H
+#define _CRYPTO_AKCIPHER_H
+#include <linux/crypto.h>
+
+/**
+ * struct akcipher_request - public key request
+ *
+ * @base: Common attributes for async crypto requests
+ * @src: Pointer to memory containing the input parameters
+ * The format of the parameter(s) is expeted to be Octet String
+ * @dst: Pointer to memory whare the result will be stored
+ * @src_len: Size of the input parameter
+ * @dst_len: Size of the output buffer. It needs to be at leaset
+ * as big as the expected result depending on the operation
+ * After operation it will be updated with the acctual size of the
+ * result. In case of error, where the dst_len was insufficient,
+ * it will be updated to the size required for the operation.
+ * @__ctx: Start of private context data
+ */
+struct akcipher_request {
+ struct crypto_async_request base;
+ void *src;
+ void *dst;
+ unsigned int src_len;
+ unsigned int dst_len;
+ void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+/**
+ * struct crypto_akcipher - user-instantiated objects which encapsulate
+ * algorithms and core processing logic
+ *
+ * @base: Common crypto API algorithm data structure
+ */
+struct crypto_akcipher {
+ struct crypto_tfm base;
+};
+
+/**
+ * struct akcipher_alg - generic public key algorithm
+ *
+ * @sign: Function performs a sign operation as defined by public key
+ * algorithm. In case of error, where the dst_len was insufficient,
+ * the req->dst_len will be updated to the size required for the
+ * operation
+ * @verify: Function performs a sign operation as defined by public key
+ * algorithm. In case of error, where the dst_len was insufficient,
+ * the req->dst_len will be updated to the size required for the
+ * operation
+ * @encrypt: Function performs an encrytp operation as defined by public key
+ * algorithm. In case of error, where the dst_len was insufficient,
+ * the req->dst_len will be updated to the size required for the
+ * operation
+ * @decrypt: Function performs a decrypt operation as defined by public key
+ * algorithm. In case of error, where the dst_len was insufficient,
+ * the req->dst_len will be updated to the size required for the
+ * operation
+ * @setkey: Function invokes the algorithm specific set key function, which
+ * knows how to decode and interpret the BER encoded key
+ * @init: Initialize the cryptographic transformation object.
+ * This function is used to initialize the cryptographic
+ * transformation object. This function is called only once at
+ * the instantiation time, right after the transformation context
+ * was allocated. In case the cryptographic hardware has some
+ * special requirements which need to be handled by software, this
+ * function shall check for the precise requirement of the
+ * transformation and put any software fallbacks in place.
+ * @exit: Deinitialize the cryptographic transformation object. This is a
+ * counterpart to @init, used to remove various changes set in
+ * @init.
+ *
+ * @reqsize: Request context size required by algorithm implementation
+ * @base: Common crypto API algorithm data structure
+ */
+struct akcipher_alg {
+ int (*sign)(struct akcipher_request *req);
+ int (*verify)(struct akcipher_request *req);
+ int (*encrypt)(struct akcipher_request *req);
+ int (*decrypt)(struct akcipher_request *req);
+ int (*setkey)(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen);
+ int (*init)(struct crypto_akcipher *tfm);
+ void (*exit)(struct crypto_akcipher *tfm);
+
+ unsigned int reqsize;
+ struct crypto_alg base;
+};
+
+/**
+ * DOC: Generic Public Key API
+ *
+ * The Public Key API is used with the algorithms of type
+ * CRYPTO_ALG_TYPE_AKCIPHER (listed as type "akcipher" in /proc/crypto)
+ */
+
+/**
+ * crypto_alloc_akcipher() -- allocate AKCIPHER tfm handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * public key algorithm e.g. "rsa"
+ * @type: specifies the type of the algorithm
+ * @mask: specifies the mask for the algorithm
+ *
+ * Allocate a handle for public key algorithm. The returned struct
+ * crypto_akcipher is the handle that is required for any subsequent
+ * API invocation for the public key operations.
+ *
+ * Return: allocated handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type,
+ u32 mask);
+
+static inline struct crypto_tfm *crypto_akcipher_tfm(
+ struct crypto_akcipher *tfm)
+{
+ return &tfm->base;
+}
+
+static inline struct akcipher_alg *__crypto_akcipher_alg(struct crypto_alg *alg)
+{
+ return container_of(alg, struct akcipher_alg, base);
+}
+
+static inline struct crypto_akcipher *__crypto_akcipher_tfm(
+ struct crypto_tfm *tfm)
+{
+ return container_of(tfm, struct crypto_akcipher, base);
+}
+
+static inline struct akcipher_alg *crypto_akcipher_alg(
+ struct crypto_akcipher *tfm)
+{
+ return __crypto_akcipher_alg(crypto_akcipher_tfm(tfm)->__crt_alg);
+}
+
+static inline unsigned int crypto_akcipher_reqsize(struct crypto_akcipher *tfm)
+{
+ return crypto_akcipher_alg(tfm)->reqsize;
+}
+
+static inline void akcipher_request_set_tfm(struct akcipher_request *req,
+ struct crypto_akcipher *tfm)
+{
+ req->base.tfm = crypto_akcipher_tfm(tfm);
+}
+
+static inline struct crypto_akcipher *crypto_akcipher_reqtfm(
+ struct akcipher_request *req)
+{
+ return __crypto_akcipher_tfm(req->base.tfm);
+}
+
+/**
+ * crypto_free_akcipher() -- free AKCIPHER tfm handle
+ *
+ * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
+ */
+static inline void crypto_free_akcipher(struct crypto_akcipher *tfm)
+{
+ crypto_destroy_tfm(tfm, crypto_akcipher_tfm(tfm));
+}
+
+/**
+ * akcipher_request_alloc() -- allocates public key request
+ *
+ * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
+ * @gfp: allocation flags
+ *
+ * Return: allocated handle in case of success or NULL in case of an error.
+ */
+static inline struct akcipher_request *akcipher_request_alloc(
+ struct crypto_akcipher *tfm, gfp_t gfp)
+{
+ struct akcipher_request *req;
+
+ req = kmalloc(sizeof(*req) + crypto_akcipher_reqsize(tfm), gfp);
+ if (likely(req))
+ akcipher_request_set_tfm(req, tfm);
+
+ return req;
+}
+
+/**
+ * akcipher_request_free() -- zeroize and free public key request
+ *
+ * @req: request to free
+ */
+static inline void akcipher_request_free(struct akcipher_request *req)
+{
+ kzfree(req);
+}
+
+/**
+ * akcipher_request_set_callback() -- Sets an asynchronous callback.
+ *
+ * Callback will be called when an asynchronous operation on a given
+ * request is finished.
+ *
+ * @req: request that the callback will be set for
+ * @flgs: specify for instance if the operation may backlog
+ * @cmlp: callback which will be called
+ * @data: private data used by the caller
+ */
+static inline void akcipher_request_set_callback(struct akcipher_request *req,
+ u32 flgs,
+ crypto_completion_t cmpl,
+ void *data)
+{
+ req->base.complete = cmpl;
+ req->base.data = data;
+ req->base.flags = flgs;
+}
+
+/**
+ * akcipher_request_set_crypt() -- Sets reqest parameters
+ *
+ * Sets parameters required by crypto operation
+ *
+ * @req: public key request
+ * @src: ptr to input parameter
+ * @dst: ptr of output parameter
+ * @src_len: size of the input buffer
+ * @dst_len: size of the output buffer. It will be updated by the
+ * implementation to reflect the acctual size of the result
+ */
+static inline void akcipher_request_set_crypt(struct akcipher_request *req,
+ void *src, void *dst,
+ unsigned int src_len,
+ unsigned int dst_len)
+{
+ req->src = src;
+ req->dst = dst;
+ req->src_len = src_len;
+ req->dst_len = dst_len;
+}
+
+/**
+ * crypto_akcipher_encrypt() -- Invoke public key encrypt operation
+ *
+ * Function invokes the specific public key encrypt operation for a given
+ * public key algorithm
+ *
+ * @req: asymmetric key request
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_akcipher_encrypt(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+
+ return alg->encrypt(req);
+}
+
+/**
+ * crypto_akcipher_decrypt() -- Invoke public key decrypt operation
+ *
+ * Function invokes the specific public key decrypt operation for a given
+ * public key algorithm
+ *
+ * @req: asymmetric key request
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_akcipher_decrypt(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+
+ return alg->decrypt(req);
+}
+
+/**
+ * crypto_akcipher_sign() -- Invoke public key sign operation
+ *
+ * Function invokes the specific public key sign operation for a given
+ * public key algorithm
+ *
+ * @req: asymmetric key request
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_akcipher_sign(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+
+ return alg->sign(req);
+}
+
+/**
+ * crypto_akcipher_verify() -- Invoke public key verify operation
+ *
+ * Function invokes the specific public key verify operation for a given
+ * public key algorithm
+ *
+ * @req: asymmetric key request
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_akcipher_verify(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+
+ return alg->verify(req);
+}
+
+/**
+ * crypto_akcipher_setkey() -- Invoke public key setkey operation
+ *
+ * Function invokes the algorithm specific set key function, which knows
+ * how to decode and interpret the encoded key
+ *
+ * @tfm: tfm handle
+ * @key: BER encoded private or public key
+ * @keylen: length of the key
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_akcipher_setkey(struct crypto_akcipher *tfm, void *key,
+ unsigned int keylen)
+{
+ struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+
+ return alg->setkey(tfm, key, keylen);
+}
+#endif
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 0ecb7688af71..d4ebf6e9af6a 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/skbuff.h>
+struct crypto_aead;
struct module;
struct rtattr;
struct seq_file;
@@ -126,7 +127,6 @@ struct ablkcipher_walk {
};
extern const struct crypto_type crypto_ablkcipher_type;
-extern const struct crypto_type crypto_aead_type;
extern const struct crypto_type crypto_blkcipher_type;
void crypto_mod_put(struct crypto_alg *alg);
@@ -144,6 +144,8 @@ int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
struct crypto_instance *inst,
const struct crypto_type *frontend);
+int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name,
+ u32 type, u32 mask);
void crypto_drop_spawn(struct crypto_spawn *spawn);
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
@@ -239,22 +241,6 @@ static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm)
return crypto_tfm_ctx_aligned(&tfm->base);
}
-static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
-{
- return &crypto_aead_tfm(tfm)->__crt_alg->cra_aead;
-}
-
-static inline void *crypto_aead_ctx(struct crypto_aead *tfm)
-{
- return crypto_tfm_ctx(&tfm->base);
-}
-
-static inline struct crypto_instance *crypto_aead_alg_instance(
- struct crypto_aead *aead)
-{
- return crypto_tfm_alg_instance(&aead->base);
-}
-
static inline struct crypto_blkcipher *crypto_spawn_blkcipher(
struct crypto_spawn *spawn)
{
@@ -363,21 +349,6 @@ static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue,
return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm));
}
-static inline void *aead_request_ctx(struct aead_request *req)
-{
- return req->__ctx;
-}
-
-static inline void aead_request_complete(struct aead_request *req, int err)
-{
- req->base.complete(&req->base, err);
-}
-
-static inline u32 aead_request_flags(struct aead_request *req)
-{
- return req->base.flags;
-}
-
static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
u32 type, u32 mask)
{
diff --git a/include/crypto/compress.h b/include/crypto/compress.h
index 86163ef24219..5b67af834d83 100644
--- a/include/crypto/compress.h
+++ b/include/crypto/compress.h
@@ -55,14 +55,14 @@ struct crypto_pcomp {
};
struct pcomp_alg {
- int (*compress_setup)(struct crypto_pcomp *tfm, void *params,
+ int (*compress_setup)(struct crypto_pcomp *tfm, const void *params,
unsigned int len);
int (*compress_init)(struct crypto_pcomp *tfm);
int (*compress_update)(struct crypto_pcomp *tfm,
struct comp_request *req);
int (*compress_final)(struct crypto_pcomp *tfm,
struct comp_request *req);
- int (*decompress_setup)(struct crypto_pcomp *tfm, void *params,
+ int (*decompress_setup)(struct crypto_pcomp *tfm, const void *params,
unsigned int len);
int (*decompress_init)(struct crypto_pcomp *tfm);
int (*decompress_update)(struct crypto_pcomp *tfm,
@@ -97,7 +97,7 @@ static inline struct pcomp_alg *crypto_pcomp_alg(struct crypto_pcomp *tfm)
}
static inline int crypto_compress_setup(struct crypto_pcomp *tfm,
- void *params, unsigned int len)
+ const void *params, unsigned int len)
{
return crypto_pcomp_alg(tfm)->compress_setup(tfm, params, len);
}
@@ -120,7 +120,7 @@ static inline int crypto_compress_final(struct crypto_pcomp *tfm,
}
static inline int crypto_decompress_setup(struct crypto_pcomp *tfm,
- void *params, unsigned int len)
+ const void *params, unsigned int len)
{
return crypto_pcomp_alg(tfm)->decompress_setup(tfm, params, len);
}
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h
index ba98918bbd9b..1547f540c920 100644
--- a/include/crypto/cryptd.h
+++ b/include/crypto/cryptd.h
@@ -14,6 +14,7 @@
#include <linux/crypto.h>
#include <linux/kernel.h>
+#include <crypto/aead.h>
#include <crypto/hash.h>
struct cryptd_ablkcipher {
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index 5186f750c713..9756c70899d8 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -49,8 +49,9 @@
#include <crypto/internal/rng.h>
#include <crypto/rng.h>
#include <linux/fips.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/list.h>
+#include <linux/workqueue.h>
/*
* Concatenation Helper and string operation helper
@@ -104,12 +105,13 @@ struct drbg_test_data {
};
struct drbg_state {
- spinlock_t drbg_lock; /* lock around DRBG */
+ struct mutex drbg_mutex; /* lock around DRBG */
unsigned char *V; /* internal state 10.1.1.1 1a) */
/* hash: static value 10.1.1.1 1b) hmac / ctr: key */
unsigned char *C;
/* Number of RNG requests since last reseed -- 10.1.1.1 1c) */
size_t reseed_ctr;
+ size_t reseed_threshold;
/* some memory the DRBG can use for its operation */
unsigned char *scratchpad;
void *priv_data; /* Cipher handle */
@@ -119,9 +121,12 @@ struct drbg_state {
bool fips_primed; /* Continuous test primed? */
unsigned char *prev; /* FIPS 140-2 continuous test value */
#endif
+ struct work_struct seed_work; /* asynchronous seeding support */
+ struct crypto_rng *jent;
const struct drbg_state_ops *d_ops;
const struct drbg_core *core;
- struct drbg_test_data *test_data;
+ struct drbg_string test_data;
+ struct random_ready_callback random_ready;
};
static inline __u8 drbg_statelen(struct drbg_state *drbg)
@@ -177,19 +182,8 @@ static inline size_t drbg_max_requests(struct drbg_state *drbg)
}
/*
- * kernel crypto API input data structure for DRBG generate in case dlen
- * is set to 0
- */
-struct drbg_gen {
- unsigned char *outbuf; /* output buffer for random numbers */
- unsigned int outlen; /* size of output buffer */
- struct drbg_string *addtl; /* additional information string */
- struct drbg_test_data *test_data; /* test data */
-};
-
-/*
* This is a wrapper to the kernel crypto API function of
- * crypto_rng_get_bytes() to allow the caller to provide additional data.
+ * crypto_rng_generate() to allow the caller to provide additional data.
*
* @drng DRBG handle -- see crypto_rng_get_bytes
* @outbuf output buffer -- see crypto_rng_get_bytes
@@ -204,21 +198,15 @@ static inline int crypto_drbg_get_bytes_addtl(struct crypto_rng *drng,
unsigned char *outbuf, unsigned int outlen,
struct drbg_string *addtl)
{
- int ret;
- struct drbg_gen genbuf;
- genbuf.outbuf = outbuf;
- genbuf.outlen = outlen;
- genbuf.addtl = addtl;
- genbuf.test_data = NULL;
- ret = crypto_rng_get_bytes(drng, (u8 *)&genbuf, 0);
- return ret;
+ return crypto_rng_generate(drng, addtl->buf, addtl->len,
+ outbuf, outlen);
}
/*
* TEST code
*
* This is a wrapper to the kernel crypto API function of
- * crypto_rng_get_bytes() to allow the caller to provide additional data and
+ * crypto_rng_generate() to allow the caller to provide additional data and
* allow furnishing of test_data
*
* @drng DRBG handle -- see crypto_rng_get_bytes
@@ -236,14 +224,10 @@ static inline int crypto_drbg_get_bytes_addtl_test(struct crypto_rng *drng,
struct drbg_string *addtl,
struct drbg_test_data *test_data)
{
- int ret;
- struct drbg_gen genbuf;
- genbuf.outbuf = outbuf;
- genbuf.outlen = outlen;
- genbuf.addtl = addtl;
- genbuf.test_data = test_data;
- ret = crypto_rng_get_bytes(drng, (u8 *)&genbuf, 0);
- return ret;
+ crypto_rng_set_entropy(drng, test_data->testentropy->buf,
+ test_data->testentropy->len);
+ return crypto_rng_generate(drng, addtl->buf, addtl->len,
+ outbuf, outlen);
}
/*
@@ -264,14 +248,9 @@ static inline int crypto_drbg_reset_test(struct crypto_rng *drng,
struct drbg_string *pers,
struct drbg_test_data *test_data)
{
- int ret;
- struct drbg_gen genbuf;
- genbuf.outbuf = NULL;
- genbuf.outlen = 0;
- genbuf.addtl = pers;
- genbuf.test_data = test_data;
- ret = crypto_rng_reset(drng, (u8 *)&genbuf, 0);
- return ret;
+ crypto_rng_set_entropy(drng, test_data->testentropy->buf,
+ test_data->testentropy->len);
+ return crypto_rng_reset(drng, pers->buf, pers->len);
}
/* DRBG type flags */
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 98abda9ed3aa..57c8a6ee33c2 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -66,7 +66,7 @@ struct ahash_request {
/**
* struct ahash_alg - asynchronous message digest definition
* @init: Initialize the transformation context. Intended only to initialize the
- * state of the HASH transformation at the begining. This shall fill in
+ * state of the HASH transformation at the beginning. This shall fill in
* the internal structures used during the entire duration of the whole
* transformation. No data processing happens at this point.
* @update: Push a chunk of data into the driver for transformation. This
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h
index 2eba340230a7..4b2547186519 100644
--- a/include/crypto/internal/aead.h
+++ b/include/crypto/internal/aead.h
@@ -15,16 +15,75 @@
#include <crypto/aead.h>
#include <crypto/algapi.h>
+#include <linux/stddef.h>
#include <linux/types.h>
struct rtattr;
+struct aead_instance {
+ union {
+ struct {
+ char head[offsetof(struct aead_alg, base)];
+ struct crypto_instance base;
+ } s;
+ struct aead_alg alg;
+ };
+};
+
struct crypto_aead_spawn {
struct crypto_spawn base;
};
+extern const struct crypto_type crypto_aead_type;
extern const struct crypto_type crypto_nivaead_type;
+static inline void *crypto_aead_ctx(struct crypto_aead *tfm)
+{
+ return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline struct crypto_instance *crypto_aead_alg_instance(
+ struct crypto_aead *aead)
+{
+ return crypto_tfm_alg_instance(&aead->base);
+}
+
+static inline struct crypto_instance *aead_crypto_instance(
+ struct aead_instance *inst)
+{
+ return container_of(&inst->alg.base, struct crypto_instance, alg);
+}
+
+static inline struct aead_instance *aead_instance(struct crypto_instance *inst)
+{
+ return container_of(&inst->alg, struct aead_instance, alg.base);
+}
+
+static inline struct aead_instance *aead_alg_instance(struct crypto_aead *aead)
+{
+ return aead_instance(crypto_aead_alg_instance(aead));
+}
+
+static inline void *aead_instance_ctx(struct aead_instance *inst)
+{
+ return crypto_instance_ctx(aead_crypto_instance(inst));
+}
+
+static inline void *aead_request_ctx(struct aead_request *req)
+{
+ return req->__ctx;
+}
+
+static inline void aead_request_complete(struct aead_request *req, int err)
+{
+ req->base.complete(&req->base, err);
+}
+
+static inline u32 aead_request_flags(struct aead_request *req)
+{
+ return req->base.flags;
+}
+
static inline void crypto_set_aead_spawn(
struct crypto_aead_spawn *spawn, struct crypto_instance *inst)
{
@@ -47,24 +106,27 @@ static inline struct crypto_alg *crypto_aead_spawn_alg(
return spawn->base.alg;
}
+static inline struct aead_alg *crypto_spawn_aead_alg(
+ struct crypto_aead_spawn *spawn)
+{
+ return container_of(spawn->base.alg, struct aead_alg, base);
+}
+
static inline struct crypto_aead *crypto_spawn_aead(
struct crypto_aead_spawn *spawn)
{
- return __crypto_aead_cast(
- crypto_spawn_tfm(&spawn->base, CRYPTO_ALG_TYPE_AEAD,
- CRYPTO_ALG_TYPE_MASK));
+ return crypto_spawn_tfm2(&spawn->base);
}
-struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl,
- struct rtattr **tb, u32 type,
- u32 mask);
-void aead_geniv_free(struct crypto_instance *inst);
+struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
+ struct rtattr **tb, u32 type, u32 mask);
+void aead_geniv_free(struct aead_instance *inst);
int aead_geniv_init(struct crypto_tfm *tfm);
void aead_geniv_exit(struct crypto_tfm *tfm);
static inline struct crypto_aead *aead_geniv_base(struct crypto_aead *geniv)
{
- return crypto_aead_crt(geniv)->base;
+ return geniv->child;
}
static inline void *aead_givcrypt_reqctx(struct aead_givcrypt_request *req)
@@ -78,5 +140,29 @@ static inline void aead_givcrypt_complete(struct aead_givcrypt_request *req,
aead_request_complete(&req->areq, err);
}
+static inline void crypto_aead_set_reqsize(struct crypto_aead *aead,
+ unsigned int reqsize)
+{
+ crypto_aead_crt(aead)->reqsize = reqsize;
+}
+
+static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg)
+{
+ return alg->base.cra_aead.encrypt ? alg->base.cra_aead.maxauthsize :
+ alg->maxauthsize;
+}
+
+static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead)
+{
+ return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead));
+}
+
+int crypto_register_aead(struct aead_alg *alg);
+void crypto_unregister_aead(struct aead_alg *alg);
+int crypto_register_aeads(struct aead_alg *algs, int count);
+void crypto_unregister_aeads(struct aead_alg *algs, int count);
+int aead_register_instance(struct crypto_template *tmpl,
+ struct aead_instance *inst);
+
#endif /* _CRYPTO_INTERNAL_AEAD_H */
diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h
new file mode 100644
index 000000000000..9a2bda15e454
--- /dev/null
+++ b/include/crypto/internal/akcipher.h
@@ -0,0 +1,60 @@
+/*
+ * Public Key Encryption
+ *
+ * Copyright (c) 2015, Intel Corporation
+ * Authors: Tadeusz Struk <tadeusz.struk@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_AKCIPHER_INT_H
+#define _CRYPTO_AKCIPHER_INT_H
+#include <crypto/akcipher.h>
+
+/*
+ * Transform internal helpers.
+ */
+static inline void *akcipher_request_ctx(struct akcipher_request *req)
+{
+ return req->__ctx;
+}
+
+static inline void *akcipher_tfm_ctx(struct crypto_akcipher *tfm)
+{
+ return tfm->base.__crt_ctx;
+}
+
+static inline void akcipher_request_complete(struct akcipher_request *req,
+ int err)
+{
+ req->base.complete(&req->base, err);
+}
+
+static inline const char *akcipher_alg_name(struct crypto_akcipher *tfm)
+{
+ return crypto_akcipher_tfm(tfm)->__crt_alg->cra_name;
+}
+
+/**
+ * crypto_register_akcipher() -- Register public key algorithm
+ *
+ * Function registers an implementation of a public key verify algorithm
+ *
+ * @alg: algorithm definition
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_register_akcipher(struct akcipher_alg *alg);
+
+/**
+ * crypto_unregister_akcipher() -- Unregister public key algorithm
+ *
+ * Function unregisters an implementation of a public key verify algorithm
+ *
+ * @alg: algorithm definition
+ */
+void crypto_unregister_akcipher(struct akcipher_alg *alg);
+#endif
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
new file mode 100644
index 000000000000..9ca9b871aba5
--- /dev/null
+++ b/include/crypto/internal/geniv.h
@@ -0,0 +1,24 @@
+/*
+ * geniv: IV generation
+ *
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_INTERNAL_GENIV_H
+#define _CRYPTO_INTERNAL_GENIV_H
+
+#include <crypto/internal/aead.h>
+#include <linux/spinlock.h>
+
+struct aead_geniv_ctx {
+ spinlock_t lock;
+ struct crypto_aead *child;
+};
+
+#endif /* _CRYPTO_INTERNAL_GENIV_H */
diff --git a/include/crypto/internal/rng.h b/include/crypto/internal/rng.h
index 896973369573..a52ef3483dd7 100644
--- a/include/crypto/internal/rng.h
+++ b/include/crypto/internal/rng.h
@@ -2,6 +2,7 @@
* RNG: Random Number Generator algorithms under the crypto API
*
* Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com>
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -16,11 +17,29 @@
#include <crypto/algapi.h>
#include <crypto/rng.h>
-extern const struct crypto_type crypto_rng_type;
+int crypto_register_rng(struct rng_alg *alg);
+void crypto_unregister_rng(struct rng_alg *alg);
+int crypto_register_rngs(struct rng_alg *algs, int count);
+void crypto_unregister_rngs(struct rng_alg *algs, int count);
+
+#if defined(CONFIG_CRYPTO_RNG) || defined(CONFIG_CRYPTO_RNG_MODULE)
+int crypto_del_default_rng(void);
+#else
+static inline int crypto_del_default_rng(void)
+{
+ return 0;
+}
+#endif
static inline void *crypto_rng_ctx(struct crypto_rng *tfm)
{
return crypto_tfm_ctx(&tfm->base);
}
+static inline void crypto_rng_set_entropy(struct crypto_rng *tfm,
+ const u8 *data, unsigned int len)
+{
+ crypto_rng_alg(tfm)->set_ent(tfm, data, len);
+}
+
#endif
diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h
new file mode 100644
index 000000000000..a8c86365439f
--- /dev/null
+++ b/include/crypto/internal/rsa.h
@@ -0,0 +1,27 @@
+/*
+ * RSA internal helpers
+ *
+ * Copyright (c) 2015, Intel Corporation
+ * Authors: Tadeusz Struk <tadeusz.struk@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _RSA_HELPER_
+#define _RSA_HELPER_
+#include <linux/mpi.h>
+
+struct rsa_key {
+ MPI n;
+ MPI e;
+ MPI d;
+};
+
+int rsa_parse_key(struct rsa_key *rsa_key, const void *key,
+ unsigned int key_len);
+
+void rsa_free_key(struct rsa_key *rsa_key);
+#endif
diff --git a/include/crypto/md5.h b/include/crypto/md5.h
index 65f299b08b0d..146af825eedb 100644
--- a/include/crypto/md5.h
+++ b/include/crypto/md5.h
@@ -8,6 +8,11 @@
#define MD5_BLOCK_WORDS 16
#define MD5_HASH_WORDS 4
+#define MD5_H0 0x67452301UL
+#define MD5_H1 0xefcdab89UL
+#define MD5_H2 0x98badcfeUL
+#define MD5_H3 0x10325476UL
+
struct md5_state {
u32 hash[MD5_HASH_WORDS];
u32 block[MD5_BLOCK_WORDS];
diff --git a/include/crypto/null.h b/include/crypto/null.h
index b7c864cc70df..06dc30d9f56e 100644
--- a/include/crypto/null.h
+++ b/include/crypto/null.h
@@ -8,4 +8,7 @@
#define NULL_DIGEST_SIZE 0
#define NULL_IV_SIZE 0
+struct crypto_blkcipher *crypto_get_default_null_skcipher(void);
+void crypto_put_default_null_skcipher(void);
+
#endif
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index 6e28ea5be9f1..b95ede354a66 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -2,6 +2,7 @@
* RNG: Random Number Generator algorithms under the crypto API
*
* Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com>
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -15,6 +16,50 @@
#include <linux/crypto.h>
+struct crypto_rng;
+
+/**
+ * struct rng_alg - random number generator definition
+ *
+ * @generate: The function defined by this variable obtains a
+ * random number. The random number generator transform
+ * must generate the random number out of the context
+ * provided with this call, plus any additional data
+ * if provided to the call.
+ * @seed: Seed or reseed the random number generator. With the
+ * invocation of this function call, the random number
+ * generator shall become ready for generation. If the
+ * random number generator requires a seed for setting
+ * up a new state, the seed must be provided by the
+ * consumer while invoking this function. The required
+ * size of the seed is defined with @seedsize .
+ * @set_ent: Set entropy that would otherwise be obtained from
+ * entropy source. Internal use only.
+ * @seedsize: The seed size required for a random number generator
+ * initialization defined with this variable. Some
+ * random number generators does not require a seed
+ * as the seeding is implemented internally without
+ * the need of support by the consumer. In this case,
+ * the seed size is set to zero.
+ * @base: Common crypto API algorithm data structure.
+ */
+struct rng_alg {
+ int (*generate)(struct crypto_rng *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int dlen);
+ int (*seed)(struct crypto_rng *tfm, const u8 *seed, unsigned int slen);
+ void (*set_ent)(struct crypto_rng *tfm, const u8 *data,
+ unsigned int len);
+
+ unsigned int seedsize;
+
+ struct crypto_alg base;
+};
+
+struct crypto_rng {
+ struct crypto_tfm base;
+};
+
extern struct crypto_rng *crypto_default_rng;
int crypto_get_default_rng(void);
@@ -27,11 +72,6 @@ void crypto_put_default_rng(void);
* CRYPTO_ALG_TYPE_RNG (listed as type "rng" in /proc/crypto)
*/
-static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm)
-{
- return (struct crypto_rng *)tfm;
-}
-
/**
* crypto_alloc_rng() -- allocate RNG handle
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
@@ -52,15 +92,7 @@ static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm)
* Return: allocated cipher handle in case of success; IS_ERR() is true in case
* of an error, PTR_ERR() returns the error code.
*/
-static inline struct crypto_rng *crypto_alloc_rng(const char *alg_name,
- u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_RNG;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return __crypto_rng_cast(crypto_alloc_base(alg_name, type, mask));
-}
+struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask);
static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm)
{
@@ -77,12 +109,8 @@ static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm)
*/
static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
{
- return &crypto_rng_tfm(tfm)->__crt_alg->cra_rng;
-}
-
-static inline struct rng_tfm *crypto_rng_crt(struct crypto_rng *tfm)
-{
- return &crypto_rng_tfm(tfm)->crt_rng;
+ return container_of(crypto_rng_tfm(tfm)->__crt_alg,
+ struct rng_alg, base);
}
/**
@@ -91,7 +119,28 @@ static inline struct rng_tfm *crypto_rng_crt(struct crypto_rng *tfm)
*/
static inline void crypto_free_rng(struct crypto_rng *tfm)
{
- crypto_free_tfm(crypto_rng_tfm(tfm));
+ crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm));
+}
+
+/**
+ * crypto_rng_generate() - get random number
+ * @tfm: cipher handle
+ * @src: Input buffer holding additional data, may be NULL
+ * @slen: Length of additional data
+ * @dst: output buffer holding the random numbers
+ * @dlen: length of the output buffer
+ *
+ * This function fills the caller-allocated buffer with random
+ * numbers using the random number generator referenced by the
+ * cipher handle.
+ *
+ * Return: 0 function was successful; < 0 if an error occurred
+ */
+static inline int crypto_rng_generate(struct crypto_rng *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int dlen)
+{
+ return crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
}
/**
@@ -108,7 +157,7 @@ static inline void crypto_free_rng(struct crypto_rng *tfm)
static inline int crypto_rng_get_bytes(struct crypto_rng *tfm,
u8 *rdata, unsigned int dlen)
{
- return crypto_rng_crt(tfm)->rng_gen_random(tfm, rdata, dlen);
+ return crypto_rng_generate(tfm, NULL, 0, rdata, dlen);
}
/**
@@ -128,11 +177,8 @@ static inline int crypto_rng_get_bytes(struct crypto_rng *tfm,
*
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
-static inline int crypto_rng_reset(struct crypto_rng *tfm,
- u8 *seed, unsigned int slen)
-{
- return crypto_rng_crt(tfm)->rng_reset(tfm, seed, slen);
-}
+int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed,
+ unsigned int slen);
/**
* crypto_rng_seedsize() - obtain seed size of RNG
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 20e4226a2e14..96670e7e7c14 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -102,4 +102,8 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes);
+struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
+ struct scatterlist *src,
+ unsigned int len);
+
#endif /* _CRYPTO_SCATTERWALK_H */
diff --git a/include/dt-bindings/mfd/arizona.h b/include/dt-bindings/mfd/arizona.h
index c7af7c7ef793..555609910acb 100644
--- a/include/dt-bindings/mfd/arizona.h
+++ b/include/dt-bindings/mfd/arizona.h
@@ -90,4 +90,18 @@
#define ARIZONA_INMODE_SE 1
#define ARIZONA_INMODE_DMIC 2
+#define ARIZONA_MICD_TIME_CONTINUOUS 0
+#define ARIZONA_MICD_TIME_250US 1
+#define ARIZONA_MICD_TIME_500US 2
+#define ARIZONA_MICD_TIME_1MS 3
+#define ARIZONA_MICD_TIME_2MS 4
+#define ARIZONA_MICD_TIME_4MS 5
+#define ARIZONA_MICD_TIME_8MS 6
+#define ARIZONA_MICD_TIME_16MS 7
+#define ARIZONA_MICD_TIME_32MS 8
+#define ARIZONA_MICD_TIME_64MS 9
+#define ARIZONA_MICD_TIME_128MS 10
+#define ARIZONA_MICD_TIME_256MS 11
+#define ARIZONA_MICD_TIME_512MS 12
+
#endif
diff --git a/include/dt-bindings/mfd/st-lpc.h b/include/dt-bindings/mfd/st-lpc.h
new file mode 100644
index 000000000000..e3e6c75d8822
--- /dev/null
+++ b/include/dt-bindings/mfd/st-lpc.h
@@ -0,0 +1,15 @@
+/*
+ * This header provides shared DT/Driver defines for ST's LPC device
+ *
+ * Copyright (C) 2014 STMicroelectronics -- All Rights Reserved
+ *
+ * Author: Lee Jones <lee.jones@linaro.org> for STMicroelectronics
+ */
+
+#ifndef __DT_BINDINGS_ST_LPC_H__
+#define __DT_BINDINGS_ST_LPC_H__
+
+#define ST_LPC_MODE_RTC 0
+#define ST_LPC_MODE_WDT 1
+
+#endif /* __DT_BINDINGS_ST_LPC_H__ */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index e4da5e35e29c..c187817471fb 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -158,6 +158,16 @@ typedef u32 phys_cpuid_t;
#define PHYS_CPUID_INVALID (phys_cpuid_t)(-1)
#endif
+static inline bool invalid_logical_cpuid(u32 cpuid)
+{
+ return (int)cpuid < 0;
+}
+
+static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id)
+{
+ return phys_id == PHYS_CPUID_INVALID;
+}
+
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Arch dependent functions for cpu hotplug support */
int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu);
@@ -243,50 +253,12 @@ extern bool wmi_has_guid(const char *guid);
#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400
#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800
-#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE)
-
-extern long acpi_video_get_capabilities(acpi_handle graphics_dev_handle);
+extern char acpi_video_backlight_string[];
extern long acpi_is_video_device(acpi_handle handle);
-extern void acpi_video_dmi_promote_vendor(void);
-extern void acpi_video_dmi_demote_vendor(void);
-extern int acpi_video_backlight_support(void);
-extern int acpi_video_display_switch_support(void);
-
-#else
-
-static inline long acpi_video_get_capabilities(acpi_handle graphics_dev_handle)
-{
- return 0;
-}
-
-static inline long acpi_is_video_device(acpi_handle handle)
-{
- return 0;
-}
-
-static inline void acpi_video_dmi_promote_vendor(void)
-{
-}
-
-static inline void acpi_video_dmi_demote_vendor(void)
-{
-}
-
-static inline int acpi_video_backlight_support(void)
-{
- return 0;
-}
-
-static inline int acpi_video_display_switch_support(void)
-{
- return 0;
-}
-
-#endif /* defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) */
-
extern int acpi_blacklisted(void);
extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d);
extern void acpi_osi_setup(char *str);
+extern bool acpi_osi_is_win8(void);
#ifdef CONFIG_ACPI_NUMA
int acpi_get_node(acpi_handle handle);
@@ -332,6 +304,9 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
int acpi_resources_are_enforced(void);
+int acpi_reserve_region(u64 start, unsigned int length, u8 space_id,
+ unsigned long flags, char *desc);
+
#ifdef CONFIG_HIBERNATION
void __init acpi_no_s4_hw_signature(void);
#endif
@@ -440,6 +415,7 @@ extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
#define ACPI_OST_SC_INSERT_NOT_SUPPORTED 0x82
extern void acpi_early_init(void);
+extern void acpi_subsystem_init(void);
extern int acpi_nvs_register(__u64 start, __u64 size);
@@ -494,6 +470,7 @@ static inline const char *acpi_dev_name(struct acpi_device *adev)
}
static inline void acpi_early_init(void) { }
+static inline void acpi_subsystem_init(void) { }
static inline int early_acpi_boot_init(void)
{
@@ -525,6 +502,13 @@ static inline int acpi_check_region(resource_size_t start, resource_size_t n,
return 0;
}
+static inline int acpi_reserve_region(u64 start, unsigned int length,
+ u8 space_id, unsigned long flags,
+ char *desc)
+{
+ return -ENXIO;
+}
+
struct acpi_table_header;
static inline int acpi_table_parse(char *id,
int (*handler)(struct acpi_table_header *))
@@ -569,6 +553,11 @@ static inline int acpi_device_modalias(struct device *dev,
return -ENODEV;
}
+static inline bool acpi_check_dma(struct acpi_device *adev, bool *coherent)
+{
+ return false;
+}
+
#define ACPI_PTR(_ptr) (NULL)
#endif /* !CONFIG_ACPI */
@@ -721,6 +710,8 @@ static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev)
if (adev)
adev->driver_gpios = NULL;
}
+
+int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index);
#else
static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
const struct acpi_gpio_mapping *gpios)
@@ -728,6 +719,11 @@ static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
return -ENXIO;
}
static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {}
+
+static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
+{
+ return -ENXIO;
+}
#endif
/* Device properties */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index aff923ae8c4b..d87d8eced064 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -116,7 +116,6 @@ __printf(3, 4)
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...);
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
-void bdi_unregister(struct backing_dev_info *bdi);
int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
enum wb_reason reason);
diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h
index 0e97856b2cff..14eea946e640 100644
--- a/include/linux/basic_mmio_gpio.h
+++ b/include/linux/basic_mmio_gpio.h
@@ -74,5 +74,6 @@ int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
#define BGPIOF_UNREADABLE_REG_SET BIT(1) /* reg_set is unreadable */
#define BGPIOF_UNREADABLE_REG_DIR BIT(2) /* reg_dir is unreadable */
#define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3)
+#define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */
#endif /* __BASIC_MMIO_GPIO_H */
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 68c16a6bedb3..0df4a51e1a78 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -306,6 +306,20 @@ void devm_clk_put(struct device *dev, struct clk *clk);
* @clk: clock source
* @rate: desired clock rate in Hz
*
+ * This answers the question "if I were to pass @rate to clk_set_rate(),
+ * what clock rate would I end up with?" without changing the hardware
+ * in any way. In other words:
+ *
+ * rate = clk_round_rate(clk, r);
+ *
+ * and:
+ *
+ * clk_set_rate(clk, r);
+ * rate = clk_get_rate(clk);
+ *
+ * are equivalent except the former does not modify the clock hardware
+ * in any way.
+ *
* Returns rounded clock rate in Hz, or negative errno.
*/
long clk_round_rate(struct clk *clk, unsigned long rate);
@@ -471,19 +485,6 @@ static inline void clk_disable_unprepare(struct clk *clk)
clk_unprepare(clk);
}
-/**
- * clk_add_alias - add a new clock alias
- * @alias: name for clock alias
- * @alias_dev_name: device name
- * @id: platform specific clock name
- * @dev: device
- *
- * Allows using generic clock names for drivers by adding a new alias.
- * Assumes clkdev, see clkdev.h for more info.
- */
-int clk_add_alias(const char *alias, const char *alias_dev_name, char *id,
- struct device *dev);
-
struct device_node;
struct of_phandle_args;
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index 94bad77eeb4a..a240b18e86fa 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -22,6 +22,7 @@ struct clk_lookup {
const char *dev_id;
const char *con_id;
struct clk *clk;
+ struct clk_hw *clk_hw;
};
#define CLKDEV_INIT(d, n, c) \
@@ -37,8 +38,11 @@ struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
void clkdev_add(struct clk_lookup *cl);
void clkdev_drop(struct clk_lookup *cl);
+struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id,
+ const char *dev_fmt, ...);
+
void clkdev_add_table(struct clk_lookup *, size_t);
-int clk_add_alias(const char *, const char *, char *, struct device *);
+int clk_add_alias(const char *, const char *, const char *, struct device *);
int clk_register_clkdev(struct clk *, const char *, const char *, ...);
int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t);
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 867722591be2..05be2352fef8 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -250,7 +250,23 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
#define WRITE_ONCE(x, val) \
- ({ typeof(x) __val = (val); __write_once_size(&(x), &__val, sizeof(__val)); __val; })
+ ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+
+/**
+ * READ_ONCE_CTRL - Read a value heading a control dependency
+ * @x: The value to be read, heading the control dependency
+ *
+ * Control dependencies are tricky. See Documentation/memory-barriers.txt
+ * for important information on how to use them. Note that in many cases,
+ * use of smp_load_acquire() will be much simpler. Control dependencies
+ * should be avoided except on the hottest of hotpaths.
+ */
+#define READ_ONCE_CTRL(x) \
+({ \
+ typeof(x) __val = READ_ONCE(x); \
+ smp_read_barrier_depends(); /* Enforce control dependency. */ \
+ __val; \
+})
#endif /* __KERNEL__ */
@@ -450,7 +466,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
* with an explicit memory barrier or atomic instruction that provides the
* required ordering.
*
- * If possible use READ_ONCE/ASSIGN_ONCE instead.
+ * If possible use READ_ONCE()/WRITE_ONCE() instead.
*/
#define __ACCESS_ONCE(x) ({ \
__maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 2821838256b4..b96bd299966f 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -14,8 +14,6 @@ extern void context_tracking_enter(enum ctx_state state);
extern void context_tracking_exit(enum ctx_state state);
extern void context_tracking_user_enter(void);
extern void context_tracking_user_exit(void);
-extern void __context_tracking_task_switch(struct task_struct *prev,
- struct task_struct *next);
static inline void user_enter(void)
{
@@ -51,19 +49,11 @@ static inline void exception_exit(enum ctx_state prev_ctx)
}
}
-static inline void context_tracking_task_switch(struct task_struct *prev,
- struct task_struct *next)
-{
- if (context_tracking_is_enabled())
- __context_tracking_task_switch(prev, next);
-}
#else
static inline void user_enter(void) { }
static inline void user_exit(void) { }
static inline enum ctx_state exception_enter(void) { return 0; }
static inline void exception_exit(enum ctx_state prev_ctx) { }
-static inline void context_tracking_task_switch(struct task_struct *prev,
- struct task_struct *next) { }
#endif /* !CONFIG_CONTEXT_TRACKING */
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index 6b7b96a32b75..678ecdf90cf6 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -12,6 +12,7 @@ struct context_tracking {
* may be further optimized using static keys.
*/
bool active;
+ int recursion;
enum ctx_state {
CONTEXT_KERNEL = 0,
CONTEXT_USER,
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 2ee4888c1f47..29ad97c34fd5 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -65,7 +65,9 @@ struct cpufreq_policy {
unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
should set cpufreq */
- unsigned int cpu; /* cpu nr of CPU managing this policy */
+ unsigned int cpu; /* cpu managing this policy, must be online */
+ unsigned int kobj_cpu; /* cpu managing sysfs files, can be offline */
+
struct clk *clk;
struct cpufreq_cpuinfo cpuinfo;/* see above */
@@ -80,6 +82,7 @@ struct cpufreq_policy {
struct cpufreq_governor *governor; /* see below */
void *governor_data;
bool governor_enabled; /* governor start/stop flag */
+ char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */
struct work_struct update; /* if update_policy() needs to be
* called, but you're in IRQ context */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 9c5e89254796..d075d34279df 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -151,10 +151,6 @@ extern void cpuidle_resume(void);
extern int cpuidle_enable_device(struct cpuidle_device *dev);
extern void cpuidle_disable_device(struct cpuidle_device *dev);
extern int cpuidle_play_dead(void);
-extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
- struct cpuidle_device *dev);
-extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
- struct cpuidle_device *dev);
extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
#else
@@ -190,16 +186,28 @@ static inline int cpuidle_enable_device(struct cpuidle_device *dev)
{return -ENODEV; }
static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
static inline int cpuidle_play_dead(void) {return -ENODEV; }
+static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
+ struct cpuidle_device *dev) {return NULL; }
+#endif
+
+#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
+extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev);
+extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev);
+#else
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
-static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
- struct cpuidle_device *dev) {return NULL; }
#endif
+/* kernel/sched/idle.c */
+extern void sched_idle_set_state(struct cpuidle_state *idle_state);
+extern void default_idle_call(void);
+
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
#else
diff --git a/include/linux/crc-itu-t.h b/include/linux/crc-itu-t.h
index 84920f3cc83e..a9953c762eee 100644
--- a/include/linux/crc-itu-t.h
+++ b/include/linux/crc-itu-t.h
@@ -3,7 +3,7 @@
*
* Implements the standard CRC ITU-T V.41:
* Width 16
- * Poly 0x0x1021 (x^16 + x^12 + x^15 + 1)
+ * Poly 0x1021 (x^16 + x^12 + x^15 + 1)
* Init 0
*
* This source code is licensed under the GNU General Public License,
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 10df5d2d093a..81ef938b0a8e 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -53,6 +53,7 @@
#define CRYPTO_ALG_TYPE_SHASH 0x00000009
#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
#define CRYPTO_ALG_TYPE_RNG 0x0000000c
+#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
#define CRYPTO_ALG_TYPE_PCOMPRESS 0x0000000f
#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
@@ -101,6 +102,12 @@
#define CRYPTO_ALG_INTERNAL 0x00002000
/*
+ * Temporary flag used to prevent legacy AEAD implementations from
+ * being used by user-space.
+ */
+#define CRYPTO_ALG_AEAD_NEW 0x00004000
+
+/*
* Transform masks and values (for crt_flags).
*/
#define CRYPTO_TFM_REQ_MASK 0x000fff00
@@ -138,9 +145,9 @@ struct crypto_async_request;
struct crypto_aead;
struct crypto_blkcipher;
struct crypto_hash;
-struct crypto_rng;
struct crypto_tfm;
struct crypto_type;
+struct aead_request;
struct aead_givcrypt_request;
struct skcipher_givcrypt_request;
@@ -175,32 +182,6 @@ struct ablkcipher_request {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
-/**
- * struct aead_request - AEAD request
- * @base: Common attributes for async crypto requests
- * @assoclen: Length in bytes of associated data for authentication
- * @cryptlen: Length of data to be encrypted or decrypted
- * @iv: Initialisation vector
- * @assoc: Associated data
- * @src: Source data
- * @dst: Destination data
- * @__ctx: Start of private context data
- */
-struct aead_request {
- struct crypto_async_request base;
-
- unsigned int assoclen;
- unsigned int cryptlen;
-
- u8 *iv;
-
- struct scatterlist *assoc;
- struct scatterlist *src;
- struct scatterlist *dst;
-
- void *__ctx[] CRYPTO_MINALIGN_ATTR;
-};
-
struct blkcipher_desc {
struct crypto_blkcipher *tfm;
void *info;
@@ -294,7 +275,7 @@ struct ablkcipher_alg {
};
/**
- * struct aead_alg - AEAD cipher definition
+ * struct old_aead_alg - AEAD cipher definition
* @maxauthsize: Set the maximum authentication tag size supported by the
* transformation. A transformation may support smaller tag sizes.
* As the authentication tag is a message digest to ensure the
@@ -319,7 +300,7 @@ struct ablkcipher_alg {
* All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
* mandatory and must be filled.
*/
-struct aead_alg {
+struct old_aead_alg {
int (*setkey)(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen);
int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
@@ -426,40 +407,12 @@ struct compress_alg {
unsigned int slen, u8 *dst, unsigned int *dlen);
};
-/**
- * struct rng_alg - random number generator definition
- * @rng_make_random: The function defined by this variable obtains a random
- * number. The random number generator transform must generate
- * the random number out of the context provided with this
- * call.
- * @rng_reset: Reset of the random number generator by clearing the entire state.
- * With the invocation of this function call, the random number
- * generator shall completely reinitialize its state. If the random
- * number generator requires a seed for setting up a new state,
- * the seed must be provided by the consumer while invoking this
- * function. The required size of the seed is defined with
- * @seedsize .
- * @seedsize: The seed size required for a random number generator
- * initialization defined with this variable. Some random number
- * generators like the SP800-90A DRBG does not require a seed as the
- * seeding is implemented internally without the need of support by
- * the consumer. In this case, the seed size is set to zero.
- */
-struct rng_alg {
- int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata,
- unsigned int dlen);
- int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
-
- unsigned int seedsize;
-};
-
#define cra_ablkcipher cra_u.ablkcipher
#define cra_aead cra_u.aead
#define cra_blkcipher cra_u.blkcipher
#define cra_cipher cra_u.cipher
#define cra_compress cra_u.compress
-#define cra_rng cra_u.rng
/**
* struct crypto_alg - definition of a cryptograpic cipher algorithm
@@ -505,7 +458,7 @@ struct rng_alg {
* transformation algorithm.
* @cra_type: Type of the cryptographic transformation. This is a pointer to
* struct crypto_type, which implements callbacks common for all
- * trasnformation types. There are multiple options:
+ * transformation types. There are multiple options:
* &crypto_blkcipher_type, &crypto_ablkcipher_type,
* &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type.
* This field might be empty. In that case, there are no common
@@ -555,11 +508,10 @@ struct crypto_alg {
union {
struct ablkcipher_alg ablkcipher;
- struct aead_alg aead;
+ struct old_aead_alg aead;
struct blkcipher_alg blkcipher;
struct cipher_alg cipher;
struct compress_alg compress;
- struct rng_alg rng;
} cra_u;
int (*cra_init)(struct crypto_tfm *tfm);
@@ -567,7 +519,7 @@ struct crypto_alg {
void (*cra_destroy)(struct crypto_alg *alg);
struct module *cra_module;
-};
+} CRYPTO_MINALIGN_ATTR;
/*
* Algorithm registration interface.
@@ -602,21 +554,6 @@ struct ablkcipher_tfm {
unsigned int reqsize;
};
-struct aead_tfm {
- int (*setkey)(struct crypto_aead *tfm, const u8 *key,
- unsigned int keylen);
- int (*encrypt)(struct aead_request *req);
- int (*decrypt)(struct aead_request *req);
- int (*givencrypt)(struct aead_givcrypt_request *req);
- int (*givdecrypt)(struct aead_givcrypt_request *req);
-
- struct crypto_aead *base;
-
- unsigned int ivsize;
- unsigned int authsize;
- unsigned int reqsize;
-};
-
struct blkcipher_tfm {
void *iv;
int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
@@ -655,19 +592,11 @@ struct compress_tfm {
u8 *dst, unsigned int *dlen);
};
-struct rng_tfm {
- int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
- unsigned int dlen);
- int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
-};
-
#define crt_ablkcipher crt_u.ablkcipher
-#define crt_aead crt_u.aead
#define crt_blkcipher crt_u.blkcipher
#define crt_cipher crt_u.cipher
#define crt_hash crt_u.hash
#define crt_compress crt_u.compress
-#define crt_rng crt_u.rng
struct crypto_tfm {
@@ -675,12 +604,10 @@ struct crypto_tfm {
union {
struct ablkcipher_tfm ablkcipher;
- struct aead_tfm aead;
struct blkcipher_tfm blkcipher;
struct cipher_tfm cipher;
struct hash_tfm hash;
struct compress_tfm compress;
- struct rng_tfm rng;
} crt_u;
void (*exit)(struct crypto_tfm *tfm);
@@ -694,10 +621,6 @@ struct crypto_ablkcipher {
struct crypto_tfm base;
};
-struct crypto_aead {
- struct crypto_tfm base;
-};
-
struct crypto_blkcipher {
struct crypto_tfm base;
};
@@ -714,10 +637,6 @@ struct crypto_hash {
struct crypto_tfm base;
};
-struct crypto_rng {
- struct crypto_tfm base;
-};
-
enum {
CRYPTOA_UNSPEC,
CRYPTOA_ALG,
@@ -1194,400 +1113,6 @@ static inline void ablkcipher_request_set_crypt(
}
/**
- * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API
- *
- * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD
- * (listed as type "aead" in /proc/crypto)
- *
- * The most prominent examples for this type of encryption is GCM and CCM.
- * However, the kernel supports other types of AEAD ciphers which are defined
- * with the following cipher string:
- *
- * authenc(keyed message digest, block cipher)
- *
- * For example: authenc(hmac(sha256), cbc(aes))
- *
- * The example code provided for the asynchronous block cipher operation
- * applies here as well. Naturally all *ablkcipher* symbols must be exchanged
- * the *aead* pendants discussed in the following. In addtion, for the AEAD
- * operation, the aead_request_set_assoc function must be used to set the
- * pointer to the associated data memory location before performing the
- * encryption or decryption operation. In case of an encryption, the associated
- * data memory is filled during the encryption operation. For decryption, the
- * associated data memory must contain data that is used to verify the integrity
- * of the decrypted data. Another deviation from the asynchronous block cipher
- * operation is that the caller should explicitly check for -EBADMSG of the
- * crypto_aead_decrypt. That error indicates an authentication error, i.e.
- * a breach in the integrity of the message. In essence, that -EBADMSG error
- * code is the key bonus an AEAD cipher has over "standard" block chaining
- * modes.
- */
-
-static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
-{
- return (struct crypto_aead *)tfm;
-}
-
-/**
- * crypto_alloc_aead() - allocate AEAD cipher handle
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * AEAD cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Allocate a cipher handle for an AEAD. The returned struct
- * crypto_aead is the cipher handle that is required for any subsequent
- * API invocation for that AEAD.
- *
- * Return: allocated cipher handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
- */
-struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
-
-static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
-{
- return &tfm->base;
-}
-
-/**
- * crypto_free_aead() - zeroize and free aead handle
- * @tfm: cipher handle to be freed
- */
-static inline void crypto_free_aead(struct crypto_aead *tfm)
-{
- crypto_free_tfm(crypto_aead_tfm(tfm));
-}
-
-static inline struct aead_tfm *crypto_aead_crt(struct crypto_aead *tfm)
-{
- return &crypto_aead_tfm(tfm)->crt_aead;
-}
-
-/**
- * crypto_aead_ivsize() - obtain IV size
- * @tfm: cipher handle
- *
- * The size of the IV for the aead referenced by the cipher handle is
- * returned. This IV size may be zero if the cipher does not need an IV.
- *
- * Return: IV size in bytes
- */
-static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm)
-{
- return crypto_aead_crt(tfm)->ivsize;
-}
-
-/**
- * crypto_aead_authsize() - obtain maximum authentication data size
- * @tfm: cipher handle
- *
- * The maximum size of the authentication data for the AEAD cipher referenced
- * by the AEAD cipher handle is returned. The authentication data size may be
- * zero if the cipher implements a hard-coded maximum.
- *
- * The authentication data may also be known as "tag value".
- *
- * Return: authentication data size / tag size in bytes
- */
-static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
-{
- return crypto_aead_crt(tfm)->authsize;
-}
-
-/**
- * crypto_aead_blocksize() - obtain block size of cipher
- * @tfm: cipher handle
- *
- * The block size for the AEAD referenced with the cipher handle is returned.
- * The caller may use that information to allocate appropriate memory for the
- * data returned by the encryption or decryption operation
- *
- * Return: block size of cipher
- */
-static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm)
-{
- return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm));
-}
-
-static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm));
-}
-
-static inline u32 crypto_aead_get_flags(struct crypto_aead *tfm)
-{
- return crypto_tfm_get_flags(crypto_aead_tfm(tfm));
-}
-
-static inline void crypto_aead_set_flags(struct crypto_aead *tfm, u32 flags)
-{
- crypto_tfm_set_flags(crypto_aead_tfm(tfm), flags);
-}
-
-static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
-{
- crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags);
-}
-
-/**
- * crypto_aead_setkey() - set key for cipher
- * @tfm: cipher handle
- * @key: buffer holding the key
- * @keylen: length of the key in bytes
- *
- * The caller provided key is set for the AEAD referenced by the cipher
- * handle.
- *
- * Note, the key length determines the cipher type. Many block ciphers implement
- * different cipher modes depending on the key size, such as AES-128 vs AES-192
- * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
- * is performed.
- *
- * Return: 0 if the setting of the key was successful; < 0 if an error occurred
- */
-static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct aead_tfm *crt = crypto_aead_crt(tfm);
-
- return crt->setkey(crt->base, key, keylen);
-}
-
-/**
- * crypto_aead_setauthsize() - set authentication data size
- * @tfm: cipher handle
- * @authsize: size of the authentication data / tag in bytes
- *
- * Set the authentication data size / tag size. AEAD requires an authentication
- * tag (or MAC) in addition to the associated data.
- *
- * Return: 0 if the setting of the key was successful; < 0 if an error occurred
- */
-int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
-
-static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
-{
- return __crypto_aead_cast(req->base.tfm);
-}
-
-/**
- * crypto_aead_encrypt() - encrypt plaintext
- * @req: reference to the aead_request handle that holds all information
- * needed to perform the cipher operation
- *
- * Encrypt plaintext data using the aead_request handle. That data structure
- * and how it is filled with data is discussed with the aead_request_*
- * functions.
- *
- * IMPORTANT NOTE The encryption operation creates the authentication data /
- * tag. That data is concatenated with the created ciphertext.
- * The ciphertext memory size is therefore the given number of
- * block cipher blocks + the size defined by the
- * crypto_aead_setauthsize invocation. The caller must ensure
- * that sufficient memory is available for the ciphertext and
- * the authentication tag.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
-static inline int crypto_aead_encrypt(struct aead_request *req)
-{
- return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req);
-}
-
-/**
- * crypto_aead_decrypt() - decrypt ciphertext
- * @req: reference to the ablkcipher_request handle that holds all information
- * needed to perform the cipher operation
- *
- * Decrypt ciphertext data using the aead_request handle. That data structure
- * and how it is filled with data is discussed with the aead_request_*
- * functions.
- *
- * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the
- * authentication data / tag. That authentication data / tag
- * must have the size defined by the crypto_aead_setauthsize
- * invocation.
- *
- *
- * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD
- * cipher operation performs the authentication of the data during the
- * decryption operation. Therefore, the function returns this error if
- * the authentication of the ciphertext was unsuccessful (i.e. the
- * integrity of the ciphertext or the associated data was violated);
- * < 0 if an error occurred.
- */
-static inline int crypto_aead_decrypt(struct aead_request *req)
-{
- if (req->cryptlen < crypto_aead_authsize(crypto_aead_reqtfm(req)))
- return -EINVAL;
-
- return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req);
-}
-
-/**
- * DOC: Asynchronous AEAD Request Handle
- *
- * The aead_request data structure contains all pointers to data required for
- * the AEAD cipher operation. This includes the cipher handle (which can be
- * used by multiple aead_request instances), pointer to plaintext and
- * ciphertext, asynchronous callback function, etc. It acts as a handle to the
- * aead_request_* API calls in a similar way as AEAD handle to the
- * crypto_aead_* API calls.
- */
-
-/**
- * crypto_aead_reqsize() - obtain size of the request data structure
- * @tfm: cipher handle
- *
- * Return: number of bytes
- */
-static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
-{
- return crypto_aead_crt(tfm)->reqsize;
-}
-
-/**
- * aead_request_set_tfm() - update cipher handle reference in request
- * @req: request handle to be modified
- * @tfm: cipher handle that shall be added to the request handle
- *
- * Allow the caller to replace the existing aead handle in the request
- * data structure with a different one.
- */
-static inline void aead_request_set_tfm(struct aead_request *req,
- struct crypto_aead *tfm)
-{
- req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base);
-}
-
-/**
- * aead_request_alloc() - allocate request data structure
- * @tfm: cipher handle to be registered with the request
- * @gfp: memory allocation flag that is handed to kmalloc by the API call.
- *
- * Allocate the request data structure that must be used with the AEAD
- * encrypt and decrypt API calls. During the allocation, the provided aead
- * handle is registered in the request data structure.
- *
- * Return: allocated request handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
- */
-static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
- gfp_t gfp)
-{
- struct aead_request *req;
-
- req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp);
-
- if (likely(req))
- aead_request_set_tfm(req, tfm);
-
- return req;
-}
-
-/**
- * aead_request_free() - zeroize and free request data structure
- * @req: request data structure cipher handle to be freed
- */
-static inline void aead_request_free(struct aead_request *req)
-{
- kzfree(req);
-}
-
-/**
- * aead_request_set_callback() - set asynchronous callback function
- * @req: request handle
- * @flags: specify zero or an ORing of the flags
- * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
- * increase the wait queue beyond the initial maximum size;
- * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
- * @compl: callback function pointer to be registered with the request handle
- * @data: The data pointer refers to memory that is not used by the kernel
- * crypto API, but provided to the callback function for it to use. Here,
- * the caller can provide a reference to memory the callback function can
- * operate on. As the callback function is invoked asynchronously to the
- * related functionality, it may need to access data structures of the
- * related functionality which can be referenced using this pointer. The
- * callback function can access the memory via the "data" field in the
- * crypto_async_request data structure provided to the callback function.
- *
- * Setting the callback function that is triggered once the cipher operation
- * completes
- *
- * The callback function is registered with the aead_request handle and
- * must comply with the following template
- *
- * void callback_function(struct crypto_async_request *req, int error)
- */
-static inline void aead_request_set_callback(struct aead_request *req,
- u32 flags,
- crypto_completion_t compl,
- void *data)
-{
- req->base.complete = compl;
- req->base.data = data;
- req->base.flags = flags;
-}
-
-/**
- * aead_request_set_crypt - set data buffers
- * @req: request handle
- * @src: source scatter / gather list
- * @dst: destination scatter / gather list
- * @cryptlen: number of bytes to process from @src
- * @iv: IV for the cipher operation which must comply with the IV size defined
- * by crypto_aead_ivsize()
- *
- * Setting the source data and destination data scatter / gather lists.
- *
- * For encryption, the source is treated as the plaintext and the
- * destination is the ciphertext. For a decryption operation, the use is
- * reversed - the source is the ciphertext and the destination is the plaintext.
- *
- * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption,
- * the caller must concatenate the ciphertext followed by the
- * authentication tag and provide the entire data stream to the
- * decryption operation (i.e. the data length used for the
- * initialization of the scatterlist and the data length for the
- * decryption operation is identical). For encryption, however,
- * the authentication tag is created while encrypting the data.
- * The destination buffer must hold sufficient space for the
- * ciphertext and the authentication tag while the encryption
- * invocation must only point to the plaintext data size. The
- * following code snippet illustrates the memory usage
- * buffer = kmalloc(ptbuflen + (enc ? authsize : 0));
- * sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0));
- * aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv);
- */
-static inline void aead_request_set_crypt(struct aead_request *req,
- struct scatterlist *src,
- struct scatterlist *dst,
- unsigned int cryptlen, u8 *iv)
-{
- req->src = src;
- req->dst = dst;
- req->cryptlen = cryptlen;
- req->iv = iv;
-}
-
-/**
- * aead_request_set_assoc() - set the associated data scatter / gather list
- * @req: request handle
- * @assoc: associated data scatter / gather list
- * @assoclen: number of bytes to process from @assoc
- *
- * For encryption, the memory is filled with the associated data. For
- * decryption, the memory must point to the associated data.
- */
-static inline void aead_request_set_assoc(struct aead_request *req,
- struct scatterlist *assoc,
- unsigned int assoclen)
-{
- req->assoc = assoc;
- req->assoclen = assoclen;
-}
-
-/**
* DOC: Synchronous Block Cipher API
*
* The synchronous block cipher API is used with the ciphers of type
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index cb25af461054..420311bcee38 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -45,7 +45,6 @@ extern struct dentry *arch_debugfs_dir;
/* declared over in file.c */
extern const struct file_operations debugfs_file_operations;
-extern const struct inode_operations debugfs_link_operations;
struct dentry *debugfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 30624954dec5..e9bc9292bd3a 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -185,33 +185,85 @@ static inline int dmar_device_remove(void *handle)
struct irte {
union {
+ /* Shared between remapped and posted mode*/
struct {
- __u64 present : 1,
- fpd : 1,
- dst_mode : 1,
- redir_hint : 1,
- trigger_mode : 1,
- dlvry_mode : 3,
- avail : 4,
- __reserved_1 : 4,
- vector : 8,
- __reserved_2 : 8,
- dest_id : 32;
+ __u64 present : 1, /* 0 */
+ fpd : 1, /* 1 */
+ __res0 : 6, /* 2 - 6 */
+ avail : 4, /* 8 - 11 */
+ __res1 : 3, /* 12 - 14 */
+ pst : 1, /* 15 */
+ vector : 8, /* 16 - 23 */
+ __res2 : 40; /* 24 - 63 */
+ };
+
+ /* Remapped mode */
+ struct {
+ __u64 r_present : 1, /* 0 */
+ r_fpd : 1, /* 1 */
+ dst_mode : 1, /* 2 */
+ redir_hint : 1, /* 3 */
+ trigger_mode : 1, /* 4 */
+ dlvry_mode : 3, /* 5 - 7 */
+ r_avail : 4, /* 8 - 11 */
+ r_res0 : 4, /* 12 - 15 */
+ r_vector : 8, /* 16 - 23 */
+ r_res1 : 8, /* 24 - 31 */
+ dest_id : 32; /* 32 - 63 */
+ };
+
+ /* Posted mode */
+ struct {
+ __u64 p_present : 1, /* 0 */
+ p_fpd : 1, /* 1 */
+ p_res0 : 6, /* 2 - 7 */
+ p_avail : 4, /* 8 - 11 */
+ p_res1 : 2, /* 12 - 13 */
+ p_urgent : 1, /* 14 */
+ p_pst : 1, /* 15 */
+ p_vector : 8, /* 16 - 23 */
+ p_res2 : 14, /* 24 - 37 */
+ pda_l : 26; /* 38 - 63 */
};
__u64 low;
};
union {
+ /* Shared between remapped and posted mode*/
struct {
- __u64 sid : 16,
- sq : 2,
- svt : 2,
- __reserved_3 : 44;
+ __u64 sid : 16, /* 64 - 79 */
+ sq : 2, /* 80 - 81 */
+ svt : 2, /* 82 - 83 */
+ __res3 : 44; /* 84 - 127 */
+ };
+
+ /* Posted mode*/
+ struct {
+ __u64 p_sid : 16, /* 64 - 79 */
+ p_sq : 2, /* 80 - 81 */
+ p_svt : 2, /* 82 - 83 */
+ p_res3 : 12, /* 84 - 95 */
+ pda_h : 32; /* 96 - 127 */
};
__u64 high;
};
};
+static inline void dmar_copy_shared_irte(struct irte *dst, struct irte *src)
+{
+ dst->present = src->present;
+ dst->fpd = src->fpd;
+ dst->avail = src->avail;
+ dst->pst = src->pst;
+ dst->vector = src->vector;
+ dst->sid = src->sid;
+ dst->sq = src->sq;
+ dst->svt = src->svt;
+}
+
+#define PDA_LOW_BIT 26
+#define PDA_HIGH_BIT 32
+
enum {
IRQ_REMAP_XAPIC_MODE,
IRQ_REMAP_X2APIC_MODE,
@@ -227,6 +279,7 @@ extern void dmar_msi_read(int irq, struct msi_msg *msg);
extern void dmar_msi_write(int irq, struct msi_msg *msg);
extern int dmar_set_interrupt(struct intel_iommu *iommu);
extern irqreturn_t dmar_fault(int irq, void *dev_id);
-extern int arch_setup_dmar_msi(unsigned int irq);
+extern int dmar_alloc_hwirq(int id, int node, void *arg);
+extern void dmar_free_hwirq(int irq);
#endif /* __DMAR_H__ */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index af5be0368dec..2092965afca3 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -583,6 +583,9 @@ void efi_native_runtime_setup(void);
#define EFI_FILE_INFO_ID \
EFI_GUID( 0x9576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b )
+#define EFI_SYSTEM_RESOURCE_TABLE_GUID \
+ EFI_GUID( 0xb122a263, 0x3661, 0x4f68, 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80 )
+
#define EFI_FILE_SYSTEM_GUID \
EFI_GUID( 0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b )
@@ -823,6 +826,7 @@ extern struct efi {
unsigned long fw_vendor; /* fw_vendor */
unsigned long runtime; /* runtime table */
unsigned long config_table; /* config tables */
+ unsigned long esrt; /* ESRT table */
efi_get_time_t *get_time;
efi_set_time_t *set_time;
efi_get_wakeup_time_t *get_wakeup_time;
@@ -875,6 +879,11 @@ static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned lon
#endif
extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
extern int efi_config_init(efi_config_table_type_t *arch_tables);
+#ifdef CONFIG_EFI_ESRT
+extern void __init efi_esrt_init(void);
+#else
+static inline void efi_esrt_init(void) { }
+#endif
extern int efi_config_parse_tables(void *config_tables, int count, int sz,
efi_config_table_type_t *arch_tables);
extern u64 efi_get_iobase (void);
@@ -882,12 +891,15 @@ extern u32 efi_mem_type (unsigned long phys_addr);
extern u64 efi_mem_attributes (unsigned long phys_addr);
extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);
extern int __init efi_uart_console_only (void);
+extern u64 efi_mem_desc_end(efi_memory_desc_t *md);
+extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource, struct resource *bss_resource);
extern void efi_get_time(struct timespec *now);
extern void efi_reserve_boot_services(void);
extern int efi_get_fdt_params(struct efi_fdt_params *params, int verbose);
extern struct efi_memory_map memmap;
+extern struct kobject *efi_kobj;
extern int efi_reboot_quirk_mode;
extern bool efi_poweroff_required(void);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 35ec87e490b1..b577e801b4af 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -38,7 +38,6 @@ struct backing_dev_info;
struct export_operations;
struct hd_geometry;
struct iovec;
-struct nameidata;
struct kiocb;
struct kobject;
struct pipe_inode_info;
@@ -656,6 +655,7 @@ struct inode {
struct pipe_inode_info *i_pipe;
struct block_device *i_bdev;
struct cdev *i_cdev;
+ char *i_link;
};
__u32 i_generation;
@@ -1607,12 +1607,12 @@ struct file_operations {
struct inode_operations {
struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
- void * (*follow_link) (struct dentry *, struct nameidata *);
+ const char * (*follow_link) (struct dentry *, void **);
int (*permission) (struct inode *, int);
struct posix_acl * (*get_acl)(struct inode *, int);
int (*readlink) (struct dentry *, char __user *,int);
- void (*put_link) (struct dentry *, struct nameidata *, void *);
+ void (*put_link) (struct inode *, void *);
int (*create) (struct inode *,struct dentry *, umode_t, bool);
int (*link) (struct dentry *,struct inode *,struct dentry *);
@@ -1879,6 +1879,7 @@ enum file_time_flags {
S_VERSION = 8,
};
+extern bool atime_needs_update(const struct path *, struct inode *);
extern void touch_atime(const struct path *);
static inline void file_accessed(struct file *file)
{
@@ -2704,13 +2705,14 @@ extern const struct file_operations generic_ro_fops;
extern int readlink_copy(char __user *, int, const char *);
extern int page_readlink(struct dentry *, char __user *, int);
-extern void *page_follow_link_light(struct dentry *, struct nameidata *);
-extern void page_put_link(struct dentry *, struct nameidata *, void *);
+extern const char *page_follow_link_light(struct dentry *, void **);
+extern void page_put_link(struct inode *, void *);
extern int __page_symlink(struct inode *inode, const char *symname, int len,
int nofs);
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
-extern void kfree_put_link(struct dentry *, struct nameidata *, void *);
+extern void kfree_put_link(struct inode *, void *);
+extern void free_page_put_link(struct inode *, void *);
extern int generic_readlink(struct dentry *, char __user *, int);
extern void generic_fillattr(struct inode *, struct kstat *);
int vfs_getattr_nosec(struct path *path, struct kstat *stat);
@@ -2721,6 +2723,8 @@ void __inode_sub_bytes(struct inode *inode, loff_t bytes);
void inode_sub_bytes(struct inode *inode, loff_t bytes);
loff_t inode_get_bytes(struct inode *inode);
void inode_set_bytes(struct inode *inode, loff_t bytes);
+const char *simple_follow_link(struct dentry *, void **);
+extern const struct inode_operations simple_symlink_inode_operations;
extern int iterate_dir(struct file *, struct dir_context *);
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index ab81339a8590..d12b5d566e4b 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -196,13 +196,6 @@ static inline int gpio_export_link(struct device *dev, const char *name,
return -EINVAL;
}
-static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
-{
- /* GPIO can never have been requested */
- WARN_ON(1);
- return -EINVAL;
-}
-
static inline void gpio_unexport(unsigned gpio)
{
/* GPIO can never have been exported */
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 3a7c9ffd5ab9..fd098169fe87 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -100,24 +100,25 @@ int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
/* Value get/set from non-sleeping context */
int gpiod_get_value(const struct gpio_desc *desc);
void gpiod_set_value(struct gpio_desc *desc, int value);
-void gpiod_set_array(unsigned int array_size,
- struct gpio_desc **desc_array, int *value_array);
+void gpiod_set_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array, int *value_array);
int gpiod_get_raw_value(const struct gpio_desc *desc);
void gpiod_set_raw_value(struct gpio_desc *desc, int value);
-void gpiod_set_raw_array(unsigned int array_size,
- struct gpio_desc **desc_array, int *value_array);
+void gpiod_set_raw_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
/* Value get/set from sleeping context */
int gpiod_get_value_cansleep(const struct gpio_desc *desc);
void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
-void gpiod_set_array_cansleep(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array);
+void gpiod_set_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc);
void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
-void gpiod_set_raw_array_cansleep(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array);
+void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
@@ -304,9 +305,9 @@ static inline void gpiod_set_value(struct gpio_desc *desc, int value)
/* GPIO can never have been requested */
WARN_ON(1);
}
-static inline void gpiod_set_array(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array)
+static inline void gpiod_set_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
{
/* GPIO can never have been requested */
WARN_ON(1);
@@ -322,9 +323,9 @@ static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
/* GPIO can never have been requested */
WARN_ON(1);
}
-static inline void gpiod_set_raw_array(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array)
+static inline void gpiod_set_raw_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
{
/* GPIO can never have been requested */
WARN_ON(1);
@@ -341,7 +342,7 @@ static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
/* GPIO can never have been requested */
WARN_ON(1);
}
-static inline void gpiod_set_array_cansleep(unsigned int array_size,
+static inline void gpiod_set_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
{
@@ -360,7 +361,7 @@ static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
/* GPIO can never have been requested */
WARN_ON(1);
}
-static inline void gpiod_set_raw_array_cansleep(unsigned int array_size,
+static inline void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
{
@@ -449,7 +450,6 @@ static inline int desc_to_gpio(const struct gpio_desc *desc)
int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
int gpiod_export_link(struct device *dev, const char *name,
struct gpio_desc *desc);
-int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value);
void gpiod_unexport(struct gpio_desc *desc);
#else /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */
@@ -466,11 +466,6 @@ static inline int gpiod_export_link(struct device *dev, const char *name,
return -ENOSYS;
}
-static inline int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
-{
- return -ENOSYS;
-}
-
static inline void gpiod_unexport(struct gpio_desc *desc)
{
}
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index f1b36593ec9f..cc7ec129b329 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -20,6 +20,7 @@ struct seq_file;
* struct gpio_chip - abstract a GPIO controller
* @label: for diagnostics
* @dev: optional device providing the GPIOs
+ * @cdev: class device used by sysfs interface (may be NULL)
* @owner: helps prevent removal of modules exporting active GPIOs
* @list: links gpio_chips together for traversal
* @request: optional hook for chip-specific activation, such as
@@ -41,8 +42,12 @@ struct seq_file;
* @dbg_show: optional routine to show contents in debugfs; default code
* will be used when this is omitted, but custom code can show extra
* state (such as pullup/pulldown configuration).
- * @base: identifies the first GPIO number handled by this chip; or, if
- * negative during registration, requests dynamic ID allocation.
+ * @base: identifies the first GPIO number handled by this chip;
+ * or, if negative during registration, requests dynamic ID allocation.
+ * DEPRECATION: providing anything non-negative and nailing the base
+ * base offset of GPIO chips is deprecated. Please pass -1 as base to
+ * let gpiolib select the chip base in all possible cases. We want to
+ * get rid of the static GPIO number space in the long run.
* @ngpio: the number of GPIOs handled by this controller; the last GPIO
* handled is (base + ngpio - 1).
* @desc: array of ngpio descriptors. Private.
@@ -57,7 +62,6 @@ struct seq_file;
* implies that if the chip supports IRQs, these IRQs need to be threaded
* as the chip access may sleep when e.g. reading out the IRQ status
* registers.
- * @exported: flags if the gpiochip is exported for use from sysfs. Private.
* @irq_not_threaded: flag must be set if @can_sleep is set but the
* IRQs don't need to be threaded
*
@@ -74,6 +78,7 @@ struct seq_file;
struct gpio_chip {
const char *label;
struct device *dev;
+ struct device *cdev;
struct module *owner;
struct list_head list;
@@ -109,7 +114,6 @@ struct gpio_chip {
const char *const *names;
bool can_sleep;
bool irq_not_threaded;
- bool exported;
#ifdef CONFIG_GPIOLIB_IRQCHIP
/*
@@ -121,6 +125,7 @@ struct gpio_chip {
unsigned int irq_base;
irq_flow_handler_t irq_handler;
unsigned int irq_default_type;
+ int irq_parent;
#endif
#if defined(CONFIG_OF_GPIO)
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 176b43670e5d..f17980de2662 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -815,6 +815,8 @@ void hid_disconnect(struct hid_device *hid);
const struct hid_device_id *hid_match_id(struct hid_device *hdev,
const struct hid_device_id *id);
s32 hid_snto32(__u32 value, unsigned n);
+__u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
+ unsigned offset, unsigned n);
/**
* hid_device_io_start - enable HID input during probe, remove
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 5db055821ef3..76dd4f0da5ca 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -163,6 +163,8 @@ enum hrtimer_base_type {
* @cpu: cpu number
* @active_bases: Bitfield to mark bases with active timers
* @clock_was_set_seq: Sequence counter of clock was set events
+ * @migration_enabled: The migration of hrtimers to other cpus is enabled
+ * @nohz_active: The nohz functionality is enabled
* @expires_next: absolute time of the next event which was scheduled
* via clock_set_next_event()
* @next_timer: Pointer to the first expiring timer
@@ -186,6 +188,8 @@ struct hrtimer_cpu_base {
unsigned int cpu;
unsigned int active_bases;
unsigned int clock_was_set_seq;
+ bool migration_enabled;
+ bool nohz_active;
#ifdef CONFIG_HIGH_RES_TIMERS
unsigned int in_hrtirq : 1,
hres_active : 1,
diff --git a/include/linux/htirq.h b/include/linux/htirq.h
index 70a1dbbf2093..d4a527e58434 100644
--- a/include/linux/htirq.h
+++ b/include/linux/htirq.h
@@ -1,24 +1,38 @@
#ifndef LINUX_HTIRQ_H
#define LINUX_HTIRQ_H
+struct pci_dev;
+struct irq_data;
+
struct ht_irq_msg {
u32 address_lo; /* low 32 bits of the ht irq message */
u32 address_hi; /* high 32 bits of the it irq message */
};
+typedef void (ht_irq_update_t)(struct pci_dev *dev, int irq,
+ struct ht_irq_msg *msg);
+
+struct ht_irq_cfg {
+ struct pci_dev *dev;
+ /* Update callback used to cope with buggy hardware */
+ ht_irq_update_t *update;
+ unsigned pos;
+ unsigned idx;
+ struct ht_irq_msg msg;
+};
+
/* Helper functions.. */
void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
-struct irq_data;
void mask_ht_irq(struct irq_data *data);
void unmask_ht_irq(struct irq_data *data);
/* The arch hook for getting things started */
-int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev);
+int arch_setup_ht_irq(int idx, int pos, struct pci_dev *dev,
+ ht_irq_update_t *update);
+void arch_teardown_ht_irq(unsigned int irq);
/* For drivers of buggy hardware */
-typedef void (ht_irq_update_t)(struct pci_dev *dev, int irq,
- struct ht_irq_msg *msg);
int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update);
#endif /* LINUX_HTIRQ_H */
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h
index 0bc03f100d04..9ad7828d9d34 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/i2c/twl.h
@@ -675,6 +675,7 @@ struct twl4030_power_data {
struct twl4030_resconfig *board_config;
#define TWL4030_RESCONFIG_UNDEF ((u8)-1)
bool use_poweroff; /* Board is wired for TWL poweroff */
+ bool ac_charger_quirk; /* Disable AC charger on board */
};
extern int twl4030_remove_script(u8 flags);
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 796ef9645827..d9a366d24e3b 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -87,6 +87,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
/*
* Decoding Capability Register
*/
+#define cap_pi_support(c) (((c) >> 59) & 1)
#define cap_read_drain(c) (((c) >> 55) & 1)
#define cap_write_drain(c) (((c) >> 54) & 1)
#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
@@ -115,13 +116,14 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
* Extended Capability Register
*/
+#define ecap_pasid(e) ((e >> 40) & 0x1)
#define ecap_pss(e) ((e >> 35) & 0x1f)
#define ecap_eafs(e) ((e >> 34) & 0x1)
#define ecap_nwfs(e) ((e >> 33) & 0x1)
#define ecap_srs(e) ((e >> 31) & 0x1)
#define ecap_ers(e) ((e >> 30) & 0x1)
#define ecap_prs(e) ((e >> 29) & 0x1)
-#define ecap_pasid(e) ((e >> 28) & 0x1)
+/* PASID support used to be on bit 28 */
#define ecap_dis(e) ((e >> 27) & 0x1)
#define ecap_nest(e) ((e >> 26) & 0x1)
#define ecap_mts(e) ((e >> 25) & 0x1)
@@ -295,9 +297,12 @@ struct q_inval {
/* 1MB - maximum possible interrupt remapping table size */
#define INTR_REMAP_PAGE_ORDER 8
#define INTR_REMAP_TABLE_REG_SIZE 0xf
+#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf
#define INTR_REMAP_TABLE_ENTRIES 65536
+struct irq_domain;
+
struct ir_table {
struct irte *base;
unsigned long *bitmap;
@@ -319,6 +324,9 @@ enum {
MAX_SR_DMAR_REGS
};
+#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0)
+#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
+
struct intel_iommu {
void __iomem *reg; /* Pointer to hardware regs, virtual addr */
u64 reg_phys; /* physical address of hw register set */
@@ -347,9 +355,12 @@ struct intel_iommu {
#ifdef CONFIG_IRQ_REMAP
struct ir_table *ir_table; /* Interrupt remapping info */
+ struct irq_domain *ir_domain;
+ struct irq_domain *ir_msi_domain;
#endif
struct device *iommu_dev; /* IOMMU-sysfs device */
int node;
+ u32 flags; /* Software defined flags */
};
static inline void __iommu_flush_cache(
diff --git a/include/linux/io.h b/include/linux/io.h
index 986f2bffea1e..fb5a99800e77 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -19,6 +19,7 @@
#define _LINUX_IO_H
#include <linux/types.h>
+#include <linux/init.h>
#include <asm/io.h>
#include <asm/page.h>
@@ -111,6 +112,13 @@ static inline void arch_phys_wc_del(int handle)
}
#define arch_phys_wc_add arch_phys_wc_add
+#ifndef arch_phys_wc_index
+static inline int arch_phys_wc_index(int handle)
+{
+ return -1;
+}
+#define arch_phys_wc_index arch_phys_wc_index
+#endif
#endif
#endif /* _LINUX_IO_H */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 0546b8710ce3..dc767f7c3704 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -114,6 +114,20 @@ enum iommu_attr {
DOMAIN_ATTR_MAX,
};
+/**
+ * struct iommu_dm_region - descriptor for a direct mapped memory region
+ * @list: Linked list pointers
+ * @start: System physical start address of the region
+ * @length: Length of the region in bytes
+ * @prot: IOMMU Protection flags (READ/WRITE/...)
+ */
+struct iommu_dm_region {
+ struct list_head list;
+ phys_addr_t start;
+ size_t length;
+ int prot;
+};
+
#ifdef CONFIG_IOMMU_API
/**
@@ -159,6 +173,10 @@ struct iommu_ops {
int (*domain_set_attr)(struct iommu_domain *domain,
enum iommu_attr attr, void *data);
+ /* Request/Free a list of direct mapping requirements for a device */
+ void (*get_dm_regions)(struct device *dev, struct list_head *list);
+ void (*put_dm_regions)(struct device *dev, struct list_head *list);
+
/* Window handling functions */
int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t paddr, u64 size, int prot);
@@ -193,6 +211,7 @@ extern int iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
extern void iommu_detach_device(struct iommu_domain *domain,
struct device *dev);
+extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
@@ -204,6 +223,10 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
+extern void iommu_get_dm_regions(struct device *dev, struct list_head *list);
+extern void iommu_put_dm_regions(struct device *dev, struct list_head *list);
+extern int iommu_request_dm_for_dev(struct device *dev);
+
extern int iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group);
extern void iommu_detach_group(struct iommu_domain *domain,
@@ -227,6 +250,7 @@ extern int iommu_group_unregister_notifier(struct iommu_group *group,
struct notifier_block *nb);
extern int iommu_group_id(struct iommu_group *group);
extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
+extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
void *data);
@@ -332,6 +356,11 @@ static inline void iommu_detach_device(struct iommu_domain *domain,
{
}
+static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
+{
+ return NULL;
+}
+
static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, int gfp_order, int prot)
{
@@ -373,6 +402,21 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
{
}
+static inline void iommu_get_dm_regions(struct device *dev,
+ struct list_head *list)
+{
+}
+
+static inline void iommu_put_dm_regions(struct device *dev,
+ struct list_head *list)
+{
+}
+
+static inline int iommu_request_dm_for_dev(struct device *dev)
+{
+ return -ENODEV;
+}
+
static inline int iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group)
{
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 62c6901cab55..812149160d3b 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -126,13 +126,21 @@ struct msi_desc;
struct irq_domain;
/**
- * struct irq_data - per irq and irq chip data passed down to chip functions
+ * struct irq_common_data - per irq data shared by all irqchips
+ * @state_use_accessors: status information for irq chip functions.
+ * Use accessor functions to deal with it
+ */
+struct irq_common_data {
+ unsigned int state_use_accessors;
+};
+
+/**
+ * struct irq_data - per irq chip data passed down to chip functions
* @mask: precomputed bitmask for accessing the chip registers
* @irq: interrupt number
* @hwirq: hardware interrupt number, local to the interrupt domain
* @node: node index useful for balancing
- * @state_use_accessors: status information for irq chip functions.
- * Use accessor functions to deal with it
+ * @common: point to data shared by all irqchips
* @chip: low level interrupt hardware access
* @domain: Interrupt translation domain; responsible for mapping
* between hwirq number and linux irq number.
@@ -153,7 +161,7 @@ struct irq_data {
unsigned int irq;
unsigned long hwirq;
unsigned int node;
- unsigned int state_use_accessors;
+ struct irq_common_data *common;
struct irq_chip *chip;
struct irq_domain *domain;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
@@ -166,7 +174,7 @@ struct irq_data {
};
/*
- * Bit masks for irq_data.state
+ * Bit masks for irq_common_data.state_use_accessors
*
* IRQD_TRIGGER_MASK - Mask for the trigger type bits
* IRQD_SETAFFINITY_PENDING - Affinity setting is pending
@@ -198,34 +206,36 @@ enum {
IRQD_WAKEUP_ARMED = (1 << 19),
};
+#define __irqd_to_state(d) ((d)->common->state_use_accessors)
+
static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_SETAFFINITY_PENDING;
+ return __irqd_to_state(d) & IRQD_SETAFFINITY_PENDING;
}
static inline bool irqd_is_per_cpu(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_PER_CPU;
+ return __irqd_to_state(d) & IRQD_PER_CPU;
}
static inline bool irqd_can_balance(struct irq_data *d)
{
- return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING));
+ return !(__irqd_to_state(d) & (IRQD_PER_CPU | IRQD_NO_BALANCING));
}
static inline bool irqd_affinity_was_set(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_AFFINITY_SET;
+ return __irqd_to_state(d) & IRQD_AFFINITY_SET;
}
static inline void irqd_mark_affinity_was_set(struct irq_data *d)
{
- d->state_use_accessors |= IRQD_AFFINITY_SET;
+ __irqd_to_state(d) |= IRQD_AFFINITY_SET;
}
static inline u32 irqd_get_trigger_type(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_TRIGGER_MASK;
+ return __irqd_to_state(d) & IRQD_TRIGGER_MASK;
}
/*
@@ -233,43 +243,43 @@ static inline u32 irqd_get_trigger_type(struct irq_data *d)
*/
static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
{
- d->state_use_accessors &= ~IRQD_TRIGGER_MASK;
- d->state_use_accessors |= type & IRQD_TRIGGER_MASK;
+ __irqd_to_state(d) &= ~IRQD_TRIGGER_MASK;
+ __irqd_to_state(d) |= type & IRQD_TRIGGER_MASK;
}
static inline bool irqd_is_level_type(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_LEVEL;
+ return __irqd_to_state(d) & IRQD_LEVEL;
}
static inline bool irqd_is_wakeup_set(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_WAKEUP_STATE;
+ return __irqd_to_state(d) & IRQD_WAKEUP_STATE;
}
static inline bool irqd_can_move_in_process_context(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_MOVE_PCNTXT;
+ return __irqd_to_state(d) & IRQD_MOVE_PCNTXT;
}
static inline bool irqd_irq_disabled(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_IRQ_DISABLED;
+ return __irqd_to_state(d) & IRQD_IRQ_DISABLED;
}
static inline bool irqd_irq_masked(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_IRQ_MASKED;
+ return __irqd_to_state(d) & IRQD_IRQ_MASKED;
}
static inline bool irqd_irq_inprogress(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_IRQ_INPROGRESS;
+ return __irqd_to_state(d) & IRQD_IRQ_INPROGRESS;
}
static inline bool irqd_is_wakeup_armed(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_WAKEUP_ARMED;
+ return __irqd_to_state(d) & IRQD_WAKEUP_ARMED;
}
@@ -280,12 +290,12 @@ static inline bool irqd_is_wakeup_armed(struct irq_data *d)
*/
static inline void irqd_set_chained_irq_inprogress(struct irq_data *d)
{
- d->state_use_accessors |= IRQD_IRQ_INPROGRESS;
+ __irqd_to_state(d) |= IRQD_IRQ_INPROGRESS;
}
static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d)
{
- d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS;
+ __irqd_to_state(d) &= ~IRQD_IRQ_INPROGRESS;
}
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
@@ -327,6 +337,7 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* @irq_write_msi_msg: optional to write message content for MSI
* @irq_get_irqchip_state: return the internal state of an interrupt
* @irq_set_irqchip_state: set the internal state of a interrupt
+ * @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine
* @flags: chip specific flags
*/
struct irq_chip {
@@ -369,6 +380,8 @@ struct irq_chip {
int (*irq_get_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool *state);
int (*irq_set_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool state);
+ int (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info);
+
unsigned long flags;
};
@@ -422,6 +435,7 @@ extern void irq_cpu_online(void);
extern void irq_cpu_offline(void);
extern int irq_set_affinity_locked(struct irq_data *data,
const struct cpumask *cpumask, bool force);
+extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info);
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
void irq_move_irq(struct irq_data *data);
@@ -458,6 +472,8 @@ extern void handle_nested_irq(unsigned int irq);
extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+extern void irq_chip_enable_parent(struct irq_data *data);
+extern void irq_chip_disable_parent(struct irq_data *data);
extern void irq_chip_ack_parent(struct irq_data *data);
extern int irq_chip_retrigger_hierarchy(struct irq_data *data);
extern void irq_chip_mask_parent(struct irq_data *data);
@@ -467,6 +483,8 @@ extern int irq_chip_set_affinity_parent(struct irq_data *data,
const struct cpumask *dest,
bool force);
extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
+extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
+ void *vcpu_info);
#endif
/* Handling of unhandled and spurious interrupts: */
@@ -517,6 +535,15 @@ irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle)
__irq_set_handler(irq, handle, 1, NULL);
}
+/*
+ * Set a highlevel chained flow handler and its data for a given IRQ.
+ * (a chained handler is automatically enabled and set to
+ * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD)
+ */
+void
+irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
+ void *data);
+
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set);
static inline void irq_set_status_flags(unsigned int irq, unsigned long set)
@@ -624,6 +651,23 @@ static inline u32 irq_get_trigger_type(unsigned int irq)
return d ? irqd_get_trigger_type(d) : 0;
}
+static inline int irq_data_get_node(struct irq_data *d)
+{
+ return d->node;
+}
+
+static inline struct cpumask *irq_get_affinity_mask(int irq)
+{
+ struct irq_data *d = irq_get_irq_data(irq);
+
+ return d ? d->affinity : NULL;
+}
+
+static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
+{
+ return d->affinity;
+}
+
unsigned int arch_dynirq_lower_bound(unsigned int from);
int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index dd1109fb241e..c52d1480f272 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -17,7 +17,7 @@ struct pt_regs;
/**
* struct irq_desc - interrupt descriptor
- * @irq_data: per irq and chip data passed down to chip functions
+ * @irq_common_data: per irq and chip data passed down to chip functions
* @kstat_irqs: irq stats per cpu
* @handle_irq: highlevel irq-events handler
* @preflow_handler: handler called before the flow handler (currently used by sparc)
@@ -47,6 +47,7 @@ struct pt_regs;
* @name: flow handler name for /proc/interrupts output
*/
struct irq_desc {
+ struct irq_common_data irq_common_data;
struct irq_data irq_data;
unsigned int __percpu *kstat_irqs;
irq_flow_handler_t handle_irq;
@@ -93,6 +94,15 @@ struct irq_desc {
extern struct irq_desc irq_desc[NR_IRQS];
#endif
+static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
+{
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+ return irq_to_desc(data->irq);
+#else
+ return container_of(data, struct irq_desc, irq_data);
+#endif
+}
+
static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc)
{
return &desc->irq_data;
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 676d7306a360..744ac0ec98eb 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -258,6 +258,10 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
/* V2 interfaces to support hierarchy IRQ domains. */
extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
unsigned int virq);
+extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
+ irq_hw_number_t hwirq, struct irq_chip *chip,
+ void *chip_data, irq_flow_handler_t handler,
+ void *handler_data, const char *handler_name);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
extern struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent,
unsigned int flags, unsigned int size,
@@ -281,10 +285,6 @@ extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain,
irq_hw_number_t hwirq,
struct irq_chip *chip,
void *chip_data);
-extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
- irq_hw_number_t hwirq, struct irq_chip *chip,
- void *chip_data, irq_flow_handler_t handler,
- void *handler_data, const char *handler_name);
extern void irq_domain_reset_irq_data(struct irq_data *irq_data);
extern void irq_domain_free_irqs_common(struct irq_domain *domain,
unsigned int virq,
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index ad45054309a0..9564fd78c547 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -44,6 +44,10 @@
/* Two fragments for cross MMIO pages. */
#define KVM_MAX_MMIO_FRAGMENTS 2
+#ifndef KVM_ADDRESS_SPACE_NUM
+#define KVM_ADDRESS_SPACE_NUM 1
+#endif
+
/*
* For the normal pfn, the highest 12 bits should be zero,
* so we can mask bit 62 ~ bit 52 to indicate the error pfn,
@@ -134,6 +138,7 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_ENABLE_IBS 23
#define KVM_REQ_DISABLE_IBS 24
#define KVM_REQ_APIC_PAGE_RELOAD 25
+#define KVM_REQ_SMI 26
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -230,6 +235,7 @@ struct kvm_vcpu {
int fpu_active;
int guest_fpu_loaded, guest_xcr0_loaded;
+ unsigned char fpu_counter;
wait_queue_head_t wq;
struct pid *pid;
int sigset_active;
@@ -329,6 +335,13 @@ struct kvm_kernel_irq_routing_entry {
#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
#endif
+#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
+static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+#endif
+
/*
* Note:
* memslots are not sorted by id anymore, please use id_to_memslot()
@@ -347,7 +360,7 @@ struct kvm {
spinlock_t mmu_lock;
struct mutex slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */
- struct kvm_memslots *memslots;
+ struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM];
struct srcu_struct srcu;
struct srcu_struct irq_srcu;
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
@@ -462,13 +475,25 @@ void kvm_exit(void);
void kvm_get_kvm(struct kvm *kvm);
void kvm_put_kvm(struct kvm *kvm);
-static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
+static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
{
- return rcu_dereference_check(kvm->memslots,
+ return rcu_dereference_check(kvm->memslots[as_id],
srcu_read_lock_held(&kvm->srcu)
|| lockdep_is_held(&kvm->slots_lock));
}
+static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
+{
+ return __kvm_memslots(kvm, 0);
+}
+
+static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
+{
+ int as_id = kvm_arch_vcpu_memslots_id(vcpu);
+
+ return __kvm_memslots(vcpu->kvm, as_id);
+}
+
static inline struct kvm_memory_slot *
id_to_memslot(struct kvm_memslots *slots, int id)
{
@@ -500,21 +525,22 @@ enum kvm_mr_change {
};
int kvm_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem);
+ const struct kvm_userspace_memory_region *mem);
int __kvm_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem);
+ const struct kvm_userspace_memory_region *mem);
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
struct kvm_memory_slot *dont);
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
unsigned long npages);
-void kvm_arch_memslots_updated(struct kvm *kvm);
+void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
- struct kvm_userspace_memory_region *mem,
+ const struct kvm_userspace_memory_region *mem,
enum kvm_mr_change change);
void kvm_arch_commit_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
+ const struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new,
enum kvm_mr_change change);
bool kvm_largepages_enabled(void);
void kvm_disable_largepages(void);
@@ -524,8 +550,8 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm);
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot);
-int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
- int nr_pages);
+int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
+ struct page **pages, int nr_pages);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
@@ -538,13 +564,13 @@ void kvm_release_page_dirty(struct page *page);
void kvm_set_page_accessed(struct page *page);
pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
-pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
- bool write_fault, bool *writable);
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
bool *writable);
pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
+pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
+ bool *async, bool write_fault, bool *writable);
void kvm_release_pfn_clean(pfn_t pfn);
void kvm_set_pfn_dirty(pfn_t pfn);
@@ -573,6 +599,25 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
+struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
+struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
+pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
+pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
+struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
+unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
+unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
+int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
+ int len);
+int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
+ unsigned long len);
+int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
+ unsigned long len);
+int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
+ int offset, int len);
+int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
+ unsigned long len);
+void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
+
void kvm_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
@@ -762,16 +807,10 @@ static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
}
#endif
-static inline void kvm_guest_enter(void)
+/* must be called with irqs disabled */
+static inline void __kvm_guest_enter(void)
{
- unsigned long flags;
-
- BUG_ON(preemptible());
-
- local_irq_save(flags);
guest_enter();
- local_irq_restore(flags);
-
/* KVM does not hold any references to rcu protected data when it
* switches CPU into a guest mode. In fact switching to a guest mode
* is very similar to exiting to userspace from rcu point of view. In
@@ -783,12 +822,27 @@ static inline void kvm_guest_enter(void)
rcu_virt_note_context_switch(smp_processor_id());
}
+/* must be called with irqs disabled */
+static inline void __kvm_guest_exit(void)
+{
+ guest_exit();
+}
+
+static inline void kvm_guest_enter(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __kvm_guest_enter();
+ local_irq_restore(flags);
+}
+
static inline void kvm_guest_exit(void)
{
unsigned long flags;
local_irq_save(flags);
- guest_exit();
+ __kvm_guest_exit();
local_irq_restore(flags);
}
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 931da7e917cf..1b47a185c2f0 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -28,6 +28,7 @@ struct kvm_run;
struct kvm_userspace_memory_region;
struct kvm_vcpu;
struct kvm_vcpu_init;
+struct kvm_memslots;
enum kvm_mr_change;
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index 0081f000e34b..c92ebd100d9b 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -52,10 +52,15 @@ struct lglock {
static struct lglock name = { .lock = &name ## _lock }
void lg_lock_init(struct lglock *lg, char *name);
+
void lg_local_lock(struct lglock *lg);
void lg_local_unlock(struct lglock *lg);
void lg_local_lock_cpu(struct lglock *lg, int cpu);
void lg_local_unlock_cpu(struct lglock *lg, int cpu);
+
+void lg_double_lock(struct lglock *lg, int cpu1, int cpu2);
+void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2);
+
void lg_global_lock(struct lglock *lg);
void lg_global_unlock(struct lglock *lg);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 28aeae46f355..51cb312d9bb9 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -134,7 +134,6 @@ enum {
ATA_ALL_DEVICES = (1 << ATA_MAX_DEVICES) - 1,
ATA_SHT_EMULATED = 1,
- ATA_SHT_CMD_PER_LUN = 1,
ATA_SHT_THIS_ID = -1,
ATA_SHT_USE_CLUSTERING = 1,
@@ -1364,7 +1363,6 @@ extern struct device_attribute *ata_common_sdev_attrs[];
.can_queue = ATA_DEF_QUEUE, \
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
.this_id = ATA_SHT_THIS_ID, \
- .cmd_per_lun = ATA_SHT_CMD_PER_LUN, \
.emulated = ATA_SHT_EMULATED, \
.use_clustering = ATA_SHT_USE_CLUSTERING, \
.proc_name = drv_name, \
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index ee6dbb39a809..31db7a05dd36 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -99,7 +99,7 @@ struct klp_object {
struct klp_func *funcs;
/* internal */
- struct kobject *kobj;
+ struct kobject kobj;
struct module *mod;
enum klp_state state;
};
@@ -123,6 +123,12 @@ struct klp_patch {
enum klp_state state;
};
+#define klp_for_each_object(patch, obj) \
+ for (obj = patch->objs; obj->funcs; obj++)
+
+#define klp_for_each_func(obj, func) \
+ for (func = obj->funcs; func->old_name; func++)
+
int klp_register_patch(struct klp_patch *);
int klp_unregister_patch(struct klp_patch *);
int klp_enable_patch(struct klp_patch *);
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index c5b6b5830acf..70400dc7660f 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -130,8 +130,8 @@ enum bounce_type {
};
struct lock_class_stats {
- unsigned long contention_point[4];
- unsigned long contending_point[4];
+ unsigned long contention_point[LOCKSTAT_POINTS];
+ unsigned long contending_point[LOCKSTAT_POINTS];
struct lock_time read_waittime;
struct lock_time write_waittime;
struct lock_time read_holdtime;
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index 611b69fa8594..1f7bc630d225 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -54,11 +54,16 @@ struct mbus_dram_target_info
*/
#ifdef CONFIG_PLAT_ORION
extern const struct mbus_dram_target_info *mv_mbus_dram_info(void);
+extern const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void);
#else
static inline const struct mbus_dram_target_info *mv_mbus_dram_info(void)
{
return NULL;
}
+static inline const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void)
+{
+ return NULL;
+}
#endif
int mvebu_mbus_save_cpu_target(u32 *store_addr);
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index 16a498f48169..2f434f4f79a1 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -117,6 +117,7 @@ struct arizona {
int num_core_supplies;
struct regulator_bulk_data core_supplies[ARIZONA_MAX_CORE_SUPPLIES];
struct regulator *dcvdd;
+ bool has_fully_powered_off;
struct arizona_pdata pdata;
@@ -153,7 +154,15 @@ int arizona_request_irq(struct arizona *arizona, int irq, char *name,
void arizona_free_irq(struct arizona *arizona, int irq, void *data);
int arizona_set_irq_wake(struct arizona *arizona, int irq, int on);
+#ifdef CONFIG_MFD_WM5102
int wm5102_patch(struct arizona *arizona);
+#else
+static inline int wm5102_patch(struct arizona *arizona)
+{
+ return 0;
+}
+#endif
+
int wm5110_patch(struct arizona *arizona);
int wm8997_patch(struct arizona *arizona);
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index 1789cb0f4f17..f6722677e6d0 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -156,7 +156,10 @@ struct arizona_pdata {
/** MICBIAS configurations */
struct arizona_micbias micbias[ARIZONA_MAX_MICBIAS];
- /** Mode of input structures */
+ /**
+ * Mode of input structures
+ * One of the ARIZONA_INMODE_xxx values
+ */
int inmode[ARIZONA_MAX_INPUT];
/** Mode for outputs */
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index aacc10d7789c..3499d36e6067 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -2515,9 +2515,12 @@
#define ARIZONA_IN1_DMIC_SUP_MASK 0x1800 /* IN1_DMIC_SUP - [12:11] */
#define ARIZONA_IN1_DMIC_SUP_SHIFT 11 /* IN1_DMIC_SUP - [12:11] */
#define ARIZONA_IN1_DMIC_SUP_WIDTH 2 /* IN1_DMIC_SUP - [12:11] */
-#define ARIZONA_IN1_MODE_MASK 0x0600 /* IN1_MODE - [10:9] */
-#define ARIZONA_IN1_MODE_SHIFT 9 /* IN1_MODE - [10:9] */
-#define ARIZONA_IN1_MODE_WIDTH 2 /* IN1_MODE - [10:9] */
+#define ARIZONA_IN1_MODE_MASK 0x0400 /* IN1_MODE - [10] */
+#define ARIZONA_IN1_MODE_SHIFT 10 /* IN1_MODE - [10] */
+#define ARIZONA_IN1_MODE_WIDTH 1 /* IN1_MODE - [10] */
+#define ARIZONA_IN1_SINGLE_ENDED_MASK 0x0200 /* IN1_MODE - [9] */
+#define ARIZONA_IN1_SINGLE_ENDED_SHIFT 9 /* IN1_MODE - [9] */
+#define ARIZONA_IN1_SINGLE_ENDED_WIDTH 1 /* IN1_MODE - [9] */
#define ARIZONA_IN1L_PGA_VOL_MASK 0x00FE /* IN1L_PGA_VOL - [7:1] */
#define ARIZONA_IN1L_PGA_VOL_SHIFT 1 /* IN1L_PGA_VOL - [7:1] */
#define ARIZONA_IN1L_PGA_VOL_WIDTH 7 /* IN1L_PGA_VOL - [7:1] */
@@ -2588,9 +2591,12 @@
#define ARIZONA_IN2_DMIC_SUP_MASK 0x1800 /* IN2_DMIC_SUP - [12:11] */
#define ARIZONA_IN2_DMIC_SUP_SHIFT 11 /* IN2_DMIC_SUP - [12:11] */
#define ARIZONA_IN2_DMIC_SUP_WIDTH 2 /* IN2_DMIC_SUP - [12:11] */
-#define ARIZONA_IN2_MODE_MASK 0x0600 /* IN2_MODE - [10:9] */
-#define ARIZONA_IN2_MODE_SHIFT 9 /* IN2_MODE - [10:9] */
-#define ARIZONA_IN2_MODE_WIDTH 2 /* IN2_MODE - [10:9] */
+#define ARIZONA_IN2_MODE_MASK 0x0400 /* IN2_MODE - [10] */
+#define ARIZONA_IN2_MODE_SHIFT 10 /* IN2_MODE - [10] */
+#define ARIZONA_IN2_MODE_WIDTH 1 /* IN2_MODE - [10] */
+#define ARIZONA_IN2_SINGLE_ENDED_MASK 0x0200 /* IN2_MODE - [9] */
+#define ARIZONA_IN2_SINGLE_ENDED_SHIFT 9 /* IN2_MODE - [9] */
+#define ARIZONA_IN2_SINGLE_ENDED_WIDTH 1 /* IN2_MODE - [9] */
#define ARIZONA_IN2L_PGA_VOL_MASK 0x00FE /* IN2L_PGA_VOL - [7:1] */
#define ARIZONA_IN2L_PGA_VOL_SHIFT 1 /* IN2L_PGA_VOL - [7:1] */
#define ARIZONA_IN2L_PGA_VOL_WIDTH 7 /* IN2L_PGA_VOL - [7:1] */
@@ -2661,9 +2667,12 @@
#define ARIZONA_IN3_DMIC_SUP_MASK 0x1800 /* IN3_DMIC_SUP - [12:11] */
#define ARIZONA_IN3_DMIC_SUP_SHIFT 11 /* IN3_DMIC_SUP - [12:11] */
#define ARIZONA_IN3_DMIC_SUP_WIDTH 2 /* IN3_DMIC_SUP - [12:11] */
-#define ARIZONA_IN3_MODE_MASK 0x0600 /* IN3_MODE - [10:9] */
-#define ARIZONA_IN3_MODE_SHIFT 9 /* IN3_MODE - [10:9] */
-#define ARIZONA_IN3_MODE_WIDTH 2 /* IN3_MODE - [10:9] */
+#define ARIZONA_IN3_MODE_MASK 0x0400 /* IN3_MODE - [10] */
+#define ARIZONA_IN3_MODE_SHIFT 10 /* IN3_MODE - [10] */
+#define ARIZONA_IN3_MODE_WIDTH 1 /* IN3_MODE - [10] */
+#define ARIZONA_IN3_SINGLE_ENDED_MASK 0x0200 /* IN3_MODE - [9] */
+#define ARIZONA_IN3_SINGLE_ENDED_SHIFT 9 /* IN3_MODE - [9] */
+#define ARIZONA_IN3_SINGLE_ENDED_WIDTH 1 /* IN3_MODE - [9] */
#define ARIZONA_IN3L_PGA_VOL_MASK 0x00FE /* IN3L_PGA_VOL - [7:1] */
#define ARIZONA_IN3L_PGA_VOL_SHIFT 1 /* IN3L_PGA_VOL - [7:1] */
#define ARIZONA_IN3L_PGA_VOL_WIDTH 7 /* IN3L_PGA_VOL - [7:1] */
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index dfabd6db7ddf..02f97dc568ac 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -14,6 +14,7 @@
enum {
AXP202_ID = 0,
AXP209_ID,
+ AXP221_ID,
AXP288_ID,
NR_AXP20X_VARIANTS,
};
@@ -45,6 +46,28 @@ enum {
#define AXP20X_V_LTF_DISCHRG 0x3c
#define AXP20X_V_HTF_DISCHRG 0x3d
+#define AXP22X_PWR_OUT_CTRL1 0x10
+#define AXP22X_PWR_OUT_CTRL2 0x12
+#define AXP22X_PWR_OUT_CTRL3 0x13
+#define AXP22X_DLDO1_V_OUT 0x15
+#define AXP22X_DLDO2_V_OUT 0x16
+#define AXP22X_DLDO3_V_OUT 0x17
+#define AXP22X_DLDO4_V_OUT 0x18
+#define AXP22X_ELDO1_V_OUT 0x19
+#define AXP22X_ELDO2_V_OUT 0x1a
+#define AXP22X_ELDO3_V_OUT 0x1b
+#define AXP22X_DC5LDO_V_OUT 0x1c
+#define AXP22X_DCDC1_V_OUT 0x21
+#define AXP22X_DCDC2_V_OUT 0x22
+#define AXP22X_DCDC3_V_OUT 0x23
+#define AXP22X_DCDC4_V_OUT 0x24
+#define AXP22X_DCDC5_V_OUT 0x25
+#define AXP22X_DCDC23_V_RAMP_CTRL 0x27
+#define AXP22X_ALDO1_V_OUT 0x28
+#define AXP22X_ALDO2_V_OUT 0x29
+#define AXP22X_ALDO3_V_OUT 0x2a
+#define AXP22X_CHRG_CTRL3 0x35
+
/* Interrupt */
#define AXP20X_IRQ1_EN 0x40
#define AXP20X_IRQ2_EN 0x41
@@ -100,6 +123,9 @@ enum {
#define AXP20X_VBUS_MON 0x8b
#define AXP20X_OVER_TMP 0x8f
+#define AXP22X_PWREN_CTRL1 0x8c
+#define AXP22X_PWREN_CTRL2 0x8d
+
/* GPIO */
#define AXP20X_GPIO0_CTRL 0x90
#define AXP20X_LDO5_V_OUT 0x91
@@ -108,6 +134,11 @@ enum {
#define AXP20X_GPIO20_SS 0x94
#define AXP20X_GPIO3_CTRL 0x95
+#define AXP22X_LDO_IO0_V_OUT 0x91
+#define AXP22X_LDO_IO1_V_OUT 0x93
+#define AXP22X_GPIO_STATE 0x94
+#define AXP22X_GPIO_PULL_DOWN 0x95
+
/* Battery */
#define AXP20X_CHRG_CC_31_24 0xb0
#define AXP20X_CHRG_CC_23_16 0xb1
@@ -120,6 +151,9 @@ enum {
#define AXP20X_CC_CTRL 0xb8
#define AXP20X_FG_RES 0xb9
+/* AXP22X specific registers */
+#define AXP22X_BATLOW_THRES1 0xe6
+
/* AXP288 specific registers */
#define AXP288_PMIC_ADC_H 0x56
#define AXP288_PMIC_ADC_L 0x57
@@ -158,6 +192,30 @@ enum {
AXP20X_REG_ID_MAX,
};
+enum {
+ AXP22X_DCDC1 = 0,
+ AXP22X_DCDC2,
+ AXP22X_DCDC3,
+ AXP22X_DCDC4,
+ AXP22X_DCDC5,
+ AXP22X_DC1SW,
+ AXP22X_DC5LDO,
+ AXP22X_ALDO1,
+ AXP22X_ALDO2,
+ AXP22X_ALDO3,
+ AXP22X_ELDO1,
+ AXP22X_ELDO2,
+ AXP22X_ELDO3,
+ AXP22X_DLDO1,
+ AXP22X_DLDO2,
+ AXP22X_DLDO3,
+ AXP22X_DLDO4,
+ AXP22X_RTC_LDO,
+ AXP22X_LDO_IO0,
+ AXP22X_LDO_IO1,
+ AXP22X_REG_ID_MAX,
+};
+
/* IRQs */
enum {
AXP20X_IRQ_ACIN_OVER_V = 1,
@@ -199,6 +257,34 @@ enum {
AXP20X_IRQ_GPIO0_INPUT,
};
+enum axp22x_irqs {
+ AXP22X_IRQ_ACIN_OVER_V = 1,
+ AXP22X_IRQ_ACIN_PLUGIN,
+ AXP22X_IRQ_ACIN_REMOVAL,
+ AXP22X_IRQ_VBUS_OVER_V,
+ AXP22X_IRQ_VBUS_PLUGIN,
+ AXP22X_IRQ_VBUS_REMOVAL,
+ AXP22X_IRQ_VBUS_V_LOW,
+ AXP22X_IRQ_BATT_PLUGIN,
+ AXP22X_IRQ_BATT_REMOVAL,
+ AXP22X_IRQ_BATT_ENT_ACT_MODE,
+ AXP22X_IRQ_BATT_EXIT_ACT_MODE,
+ AXP22X_IRQ_CHARG,
+ AXP22X_IRQ_CHARG_DONE,
+ AXP22X_IRQ_BATT_TEMP_HIGH,
+ AXP22X_IRQ_BATT_TEMP_LOW,
+ AXP22X_IRQ_DIE_TEMP_HIGH,
+ AXP22X_IRQ_PEK_SHORT,
+ AXP22X_IRQ_PEK_LONG,
+ AXP22X_IRQ_LOW_PWR_LVL1,
+ AXP22X_IRQ_LOW_PWR_LVL2,
+ AXP22X_IRQ_TIMER,
+ AXP22X_IRQ_PEK_RIS_EDGE,
+ AXP22X_IRQ_PEK_FAL_EDGE,
+ AXP22X_IRQ_GPIO1_INPUT,
+ AXP22X_IRQ_GPIO0_INPUT,
+};
+
enum axp288_irqs {
AXP288_IRQ_VBUS_FALL = 2,
AXP288_IRQ_VBUS_RISE,
@@ -275,4 +361,11 @@ struct axp20x_fg_pdata {
int thermistor_curve[MAX_THERM_CURVE_SIZE][2];
};
+struct axp20x_chrg_pdata {
+ int max_cc;
+ int max_cv;
+ int def_cc;
+ int def_cv;
+};
+
#endif /* __LINUX_MFD_AXP20X_H */
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 324a34683971..da72671a42fa 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -17,10 +17,29 @@
#define __LINUX_MFD_CROS_EC_H
#include <linux/cdev.h>
+#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/mfd/cros_ec_commands.h>
#include <linux/mutex.h>
+#define CROS_EC_DEV_NAME "cros_ec"
+#define CROS_EC_DEV_PD_NAME "cros_pd"
+
+/*
+ * The EC is unresponsive for a time after a reboot command. Add a
+ * simple delay to make sure that the bus stays locked.
+ */
+#define EC_REBOOT_DELAY_MS 50
+
+/*
+ * Max bus-specific overhead incurred by request/responses.
+ * I2C requires 1 additional byte for requests.
+ * I2C requires 2 additional bytes for responses.
+ * */
+#define EC_PROTO_VERSION_UNKNOWN 0
+#define EC_MAX_REQUEST_OVERHEAD 1
+#define EC_MAX_RESPONSE_OVERHEAD 2
+
/*
* Command interface between EC and AP, for LPC, I2C and SPI interfaces.
*/
@@ -42,8 +61,7 @@ enum {
* @outsize: Outgoing length in bytes
* @insize: Max number of bytes to accept from EC
* @result: EC's response to the command (separate from communication failure)
- * @outdata: Outgoing data to EC
- * @indata: Where to put the incoming data from EC
+ * @data: Where to put the incoming data from EC and outgoing data to EC
*/
struct cros_ec_command {
uint32_t version;
@@ -51,18 +69,14 @@ struct cros_ec_command {
uint32_t outsize;
uint32_t insize;
uint32_t result;
- uint8_t outdata[EC_PROTO2_MAX_PARAM_SIZE];
- uint8_t indata[EC_PROTO2_MAX_PARAM_SIZE];
+ uint8_t data[0];
};
/**
* struct cros_ec_device - Information about a ChromeOS EC device
*
- * @ec_name: name of EC device (e.g. 'chromeos-ec')
* @phys_name: name of physical comms layer (e.g. 'i2c-4')
* @dev: Device pointer for physical comms device
- * @vdev: Device pointer for virtual comms device
- * @cdev: Character device structure for virtual comms device
* @was_wake_device: true if this device was set to wake the system from
* sleep at the last suspend
* @cmd_readmem: direct read of the EC memory-mapped region, if supported
@@ -74,6 +88,7 @@ struct cros_ec_command {
*
* @priv: Private data
* @irq: Interrupt to use
+ * @id: Device id
* @din: input buffer (for data from EC)
* @dout: output buffer (for data to EC)
* \note
@@ -85,41 +100,72 @@ struct cros_ec_command {
* to using dword.
* @din_size: size of din buffer to allocate (zero to use static din)
* @dout_size: size of dout buffer to allocate (zero to use static dout)
- * @parent: pointer to parent device (e.g. i2c or spi device)
* @wake_enabled: true if this device can wake the system from sleep
* @cmd_xfer: send command to EC and get response
* Returns the number of bytes received if the communication succeeded, but
* that doesn't mean the EC was happy with the command. The caller
* should check msg.result for the EC's result code.
+ * @pkt_xfer: send packet to EC and get response
* @lock: one transaction at a time
*/
struct cros_ec_device {
/* These are used by other drivers that want to talk to the EC */
- const char *ec_name;
const char *phys_name;
struct device *dev;
- struct device *vdev;
- struct cdev cdev;
bool was_wake_device;
struct class *cros_class;
int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset,
unsigned int bytes, void *dest);
/* These are used to implement the platform-specific interface */
+ u16 max_request;
+ u16 max_response;
+ u16 max_passthru;
+ u16 proto_version;
void *priv;
int irq;
- uint8_t *din;
- uint8_t *dout;
+ u8 *din;
+ u8 *dout;
int din_size;
int dout_size;
- struct device *parent;
bool wake_enabled;
int (*cmd_xfer)(struct cros_ec_device *ec,
struct cros_ec_command *msg);
+ int (*pkt_xfer)(struct cros_ec_device *ec,
+ struct cros_ec_command *msg);
struct mutex lock;
};
+/* struct cros_ec_platform - ChromeOS EC platform information
+ *
+ * @ec_name: name of EC device (e.g. 'cros-ec', 'cros-pd', ...)
+ * used in /dev/ and sysfs.
+ * @cmd_offset: offset to apply for each command. Set when
+ * registering a devicde behind another one.
+ */
+struct cros_ec_platform {
+ const char *ec_name;
+ u16 cmd_offset;
+};
+
+/*
+ * struct cros_ec_dev - ChromeOS EC device entry point
+ *
+ * @class_dev: Device structure used in sysfs
+ * @cdev: Character device structure in /dev
+ * @ec_dev: cros_ec_device structure to talk to the physical device
+ * @dev: pointer to the platform device
+ * @cmd_offset: offset to apply for each command.
+ */
+struct cros_ec_dev {
+ struct device class_dev;
+ struct cdev cdev;
+ struct cros_ec_device *ec_dev;
+ struct device *dev;
+ u16 cmd_offset;
+};
+
/**
* cros_ec_suspend - Handle a suspend operation for the ChromeOS EC device
*
@@ -198,4 +244,16 @@ int cros_ec_remove(struct cros_ec_device *ec_dev);
*/
int cros_ec_register(struct cros_ec_device *ec_dev);
+/**
+ * cros_ec_register - Query the protocol version supported by the ChromeOS EC
+ *
+ * @ec_dev: Device to register
+ * @return 0 if ok, -ve on error
+ */
+int cros_ec_query_all(struct cros_ec_device *ec_dev);
+
+/* sysfs stuff */
+extern struct attribute_group cros_ec_attr_group;
+extern struct attribute_group cros_ec_lightbar_attr_group;
+
#endif /* __LINUX_MFD_CROS_EC_H */
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index a49cd41feea7..13b630c10d4c 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -515,7 +515,7 @@ struct ec_host_response {
/*
* Notes on commands:
*
- * Each command is an 8-byte command value. Commands which take params or
+ * Each command is an 16-bit command value. Commands which take params or
* return response data specify structs for that data. If no struct is
* specified, the command does not input or output data, respectively.
* Parameter/response length is implicit in the structs. Some underlying
@@ -966,7 +966,7 @@ struct rgb_s {
/* List of tweakable parameters. NOTE: It's __packed so it can be sent in a
* host command, but the alignment is the same regardless. Keep it that way.
*/
-struct lightbar_params {
+struct lightbar_params_v0 {
/* Timing */
int32_t google_ramp_up;
int32_t google_ramp_down;
@@ -1000,32 +1000,81 @@ struct lightbar_params {
struct rgb_s color[8]; /* 0-3 are Google colors */
} __packed;
+struct lightbar_params_v1 {
+ /* Timing */
+ int32_t google_ramp_up;
+ int32_t google_ramp_down;
+ int32_t s3s0_ramp_up;
+ int32_t s0_tick_delay[2]; /* AC=0/1 */
+ int32_t s0a_tick_delay[2]; /* AC=0/1 */
+ int32_t s0s3_ramp_down;
+ int32_t s3_sleep_for;
+ int32_t s3_ramp_up;
+ int32_t s3_ramp_down;
+ int32_t tap_tick_delay;
+ int32_t tap_display_time;
+
+ /* Tap-for-battery params */
+ uint8_t tap_pct_red;
+ uint8_t tap_pct_green;
+ uint8_t tap_seg_min_on;
+ uint8_t tap_seg_max_on;
+ uint8_t tap_seg_osc;
+ uint8_t tap_idx[3];
+
+ /* Oscillation */
+ uint8_t osc_min[2]; /* AC=0/1 */
+ uint8_t osc_max[2]; /* AC=0/1 */
+ uint8_t w_ofs[2]; /* AC=0/1 */
+
+ /* Brightness limits based on the backlight and AC. */
+ uint8_t bright_bl_off_fixed[2]; /* AC=0/1 */
+ uint8_t bright_bl_on_min[2]; /* AC=0/1 */
+ uint8_t bright_bl_on_max[2]; /* AC=0/1 */
+
+ /* Battery level thresholds */
+ uint8_t battery_threshold[LB_BATTERY_LEVELS - 1];
+
+ /* Map [AC][battery_level] to color index */
+ uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */
+ uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */
+
+ /* Color palette */
+ struct rgb_s color[8]; /* 0-3 are Google colors */
+} __packed;
+
struct ec_params_lightbar {
uint8_t cmd; /* Command (see enum lightbar_command) */
union {
struct {
/* no args */
- } dump, off, on, init, get_seq, get_params, version;
+ } dump, off, on, init, get_seq, get_params_v0, get_params_v1,
+ version, get_brightness, get_demo;
- struct num {
+ struct {
uint8_t num;
- } brightness, seq, demo;
+ } set_brightness, seq, demo;
- struct reg {
+ struct {
uint8_t ctrl, reg, value;
} reg;
- struct rgb {
+ struct {
uint8_t led, red, green, blue;
- } rgb;
+ } set_rgb;
+
+ struct {
+ uint8_t led;
+ } get_rgb;
- struct lightbar_params set_params;
+ struct lightbar_params_v0 set_params_v0;
+ struct lightbar_params_v1 set_params_v1;
};
} __packed;
struct ec_response_lightbar {
union {
- struct dump {
+ struct {
struct {
uint8_t reg;
uint8_t ic0;
@@ -1033,20 +1082,26 @@ struct ec_response_lightbar {
} vals[23];
} dump;
- struct get_seq {
+ struct {
uint8_t num;
- } get_seq;
+ } get_seq, get_brightness, get_demo;
- struct lightbar_params get_params;
+ struct lightbar_params_v0 get_params_v0;
+ struct lightbar_params_v1 get_params_v1;
- struct version {
+ struct {
uint32_t num;
uint32_t flags;
} version;
struct {
+ uint8_t red, green, blue;
+ } get_rgb;
+
+ struct {
/* no return params */
- } off, on, init, brightness, seq, reg, rgb, demo, set_params;
+ } off, on, init, set_brightness, seq, reg, set_rgb,
+ demo, set_params_v0, set_params_v1;
};
} __packed;
@@ -1056,15 +1111,20 @@ enum lightbar_command {
LIGHTBAR_CMD_OFF = 1,
LIGHTBAR_CMD_ON = 2,
LIGHTBAR_CMD_INIT = 3,
- LIGHTBAR_CMD_BRIGHTNESS = 4,
+ LIGHTBAR_CMD_SET_BRIGHTNESS = 4,
LIGHTBAR_CMD_SEQ = 5,
LIGHTBAR_CMD_REG = 6,
- LIGHTBAR_CMD_RGB = 7,
+ LIGHTBAR_CMD_SET_RGB = 7,
LIGHTBAR_CMD_GET_SEQ = 8,
LIGHTBAR_CMD_DEMO = 9,
- LIGHTBAR_CMD_GET_PARAMS = 10,
- LIGHTBAR_CMD_SET_PARAMS = 11,
+ LIGHTBAR_CMD_GET_PARAMS_V0 = 10,
+ LIGHTBAR_CMD_SET_PARAMS_V0 = 11,
LIGHTBAR_CMD_VERSION = 12,
+ LIGHTBAR_CMD_GET_BRIGHTNESS = 13,
+ LIGHTBAR_CMD_GET_RGB = 14,
+ LIGHTBAR_CMD_GET_DEMO = 15,
+ LIGHTBAR_CMD_GET_PARAMS_V1 = 16,
+ LIGHTBAR_CMD_SET_PARAMS_V1 = 17,
LIGHTBAR_NUM_CMDS
};
@@ -1421,8 +1481,40 @@ struct ec_response_rtc {
/*****************************************************************************/
/* Port80 log access */
+/* Maximum entries that can be read/written in a single command */
+#define EC_PORT80_SIZE_MAX 32
+
/* Get last port80 code from previous boot */
#define EC_CMD_PORT80_LAST_BOOT 0x48
+#define EC_CMD_PORT80_READ 0x48
+
+enum ec_port80_subcmd {
+ EC_PORT80_GET_INFO = 0,
+ EC_PORT80_READ_BUFFER,
+};
+
+struct ec_params_port80_read {
+ uint16_t subcmd;
+ union {
+ struct {
+ uint32_t offset;
+ uint32_t num_entries;
+ } read_buffer;
+ };
+} __packed;
+
+struct ec_response_port80_read {
+ union {
+ struct {
+ uint32_t writes;
+ uint32_t history_size;
+ uint32_t last_boot;
+ } get_info;
+ struct {
+ uint16_t codes[EC_PORT80_SIZE_MAX];
+ } data;
+ };
+} __packed;
struct ec_response_port80_last_boot {
uint16_t code;
@@ -1782,6 +1874,7 @@ struct ec_params_gpio_set {
/* Get GPIO value */
#define EC_CMD_GPIO_GET 0x93
+/* Version 0 of input params and response */
struct ec_params_gpio_get {
char name[32];
} __packed;
@@ -1789,6 +1882,38 @@ struct ec_response_gpio_get {
uint8_t val;
} __packed;
+/* Version 1 of input params and response */
+struct ec_params_gpio_get_v1 {
+ uint8_t subcmd;
+ union {
+ struct {
+ char name[32];
+ } get_value_by_name;
+ struct {
+ uint8_t index;
+ } get_info;
+ };
+} __packed;
+
+struct ec_response_gpio_get_v1 {
+ union {
+ struct {
+ uint8_t val;
+ } get_value_by_name, get_count;
+ struct {
+ uint8_t val;
+ char name[32];
+ uint32_t flags;
+ } get_info;
+ };
+} __packed;
+
+enum gpio_get_subcmd {
+ EC_GPIO_GET_BY_NAME = 0,
+ EC_GPIO_GET_COUNT = 1,
+ EC_GPIO_GET_INFO = 2,
+};
+
/*****************************************************************************/
/* I2C commands. Only available when flash write protect is unlocked. */
@@ -1857,13 +1982,21 @@ struct ec_params_charge_control {
/*****************************************************************************/
/*
- * Cut off battery power output if the battery supports.
+ * Cut off battery power immediately or after the host has shut down.
*
- * For unsupported battery, just don't implement this command and lets EC
- * return EC_RES_INVALID_COMMAND.
+ * return EC_RES_INVALID_COMMAND if unsupported by a board/battery.
+ * EC_RES_SUCCESS if the command was successful.
+ * EC_RES_ERROR if the cut off command failed.
*/
+
#define EC_CMD_BATTERY_CUT_OFF 0x99
+#define EC_BATTERY_CUTOFF_FLAG_AT_SHUTDOWN (1 << 0)
+
+struct ec_params_battery_cutoff {
+ uint8_t flags;
+} __packed;
+
/*****************************************************************************/
/* USB port mux control. */
@@ -2142,6 +2275,32 @@ struct ec_params_sb_wr_block {
} __packed;
/*****************************************************************************/
+/* Battery vendor parameters
+ *
+ * Get or set vendor-specific parameters in the battery. Implementations may
+ * differ between boards or batteries. On a set operation, the response
+ * contains the actual value set, which may be rounded or clipped from the
+ * requested value.
+ */
+
+#define EC_CMD_BATTERY_VENDOR_PARAM 0xb4
+
+enum ec_battery_vendor_param_mode {
+ BATTERY_VENDOR_PARAM_MODE_GET = 0,
+ BATTERY_VENDOR_PARAM_MODE_SET,
+};
+
+struct ec_params_battery_vendor_param {
+ uint32_t param;
+ uint32_t value;
+ uint8_t mode;
+} __packed;
+
+struct ec_response_battery_vendor_param {
+ uint32_t value;
+} __packed;
+
+/*****************************************************************************/
/* System commands */
/*
@@ -2338,6 +2497,80 @@ struct ec_params_reboot_ec {
/*****************************************************************************/
/*
+ * PD commands
+ *
+ * These commands are for PD MCU communication.
+ */
+
+/* EC to PD MCU exchange status command */
+#define EC_CMD_PD_EXCHANGE_STATUS 0x100
+
+/* Status of EC being sent to PD */
+struct ec_params_pd_status {
+ int8_t batt_soc; /* battery state of charge */
+} __packed;
+
+/* Status of PD being sent back to EC */
+struct ec_response_pd_status {
+ int8_t status; /* PD MCU status */
+ uint32_t curr_lim_ma; /* input current limit */
+} __packed;
+
+/* Set USB type-C port role and muxes */
+#define EC_CMD_USB_PD_CONTROL 0x101
+
+enum usb_pd_control_role {
+ USB_PD_CTRL_ROLE_NO_CHANGE = 0,
+ USB_PD_CTRL_ROLE_TOGGLE_ON = 1, /* == AUTO */
+ USB_PD_CTRL_ROLE_TOGGLE_OFF = 2,
+ USB_PD_CTRL_ROLE_FORCE_SINK = 3,
+ USB_PD_CTRL_ROLE_FORCE_SOURCE = 4,
+};
+
+enum usb_pd_control_mux {
+ USB_PD_CTRL_MUX_NO_CHANGE = 0,
+ USB_PD_CTRL_MUX_NONE = 1,
+ USB_PD_CTRL_MUX_USB = 2,
+ USB_PD_CTRL_MUX_DP = 3,
+ USB_PD_CTRL_MUX_DOCK = 4,
+ USB_PD_CTRL_MUX_AUTO = 5,
+};
+
+struct ec_params_usb_pd_control {
+ uint8_t port;
+ uint8_t role;
+ uint8_t mux;
+} __packed;
+
+/*****************************************************************************/
+/*
+ * Passthru commands
+ *
+ * Some platforms have sub-processors chained to each other. For example.
+ *
+ * AP <--> EC <--> PD MCU
+ *
+ * The top 2 bits of the command number are used to indicate which device the
+ * command is intended for. Device 0 is always the device receiving the
+ * command; other device mapping is board-specific.
+ *
+ * When a device receives a command to be passed to a sub-processor, it passes
+ * it on with the device number set back to 0. This allows the sub-processor
+ * to remain blissfully unaware of whether the command originated on the next
+ * device up the chain, or was passed through from the AP.
+ *
+ * In the above example, if the AP wants to send command 0x0002 to the PD MCU,
+ * AP sends command 0x4002 to the EC
+ * EC sends command 0x0002 to the PD MCU
+ * EC forwards PD MCU response back to the AP
+ */
+
+/* Offset and max command number for sub-device n */
+#define EC_CMD_PASSTHRU_OFFSET(n) (0x4000 * (n))
+#define EC_CMD_PASSTHRU_MAX(n) (EC_CMD_PASSTHRU_OFFSET(n) + 0x3fff)
+
+/*****************************************************************************/
+/*
* Deprecated constants. These constants have been renamed for clarity. The
* meaning and size has not changed. Programs that use the old names should
* switch to the new names soon, as the old names may not be carried forward
diff --git a/include/linux/mfd/da9055/core.h b/include/linux/mfd/da9055/core.h
index 956afa445998..5dc743fd63a6 100644
--- a/include/linux/mfd/da9055/core.h
+++ b/include/linux/mfd/da9055/core.h
@@ -89,6 +89,6 @@ static inline int da9055_reg_update(struct da9055 *da9055, unsigned char reg,
int da9055_device_init(struct da9055 *da9055);
void da9055_device_exit(struct da9055 *da9055);
-extern struct regmap_config da9055_regmap_config;
+extern const struct regmap_config da9055_regmap_config;
#endif /* __DA9055_CORE_H */
diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h
index bb995ab9a575..d4b72d519115 100644
--- a/include/linux/mfd/max77686.h
+++ b/include/linux/mfd/max77686.h
@@ -125,9 +125,4 @@ enum max77686_opmode {
MAX77686_OPMODE_STANDBY,
};
-struct max77686_opmode_data {
- int id;
- int mode;
-};
-
#endif /* __LINUX_MFD_MAX77686_H */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 83e80ab94500..f94984fb8bb2 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -829,6 +829,12 @@ struct mlx4_dev {
struct mlx4_vf_dev *dev_vfs;
};
+struct mlx4_clock_params {
+ u64 offset;
+ u8 bar;
+ u8 size;
+};
+
struct mlx4_eqe {
u8 reserved1;
u8 type;
@@ -1485,4 +1491,7 @@ int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
enum mlx4_access_reg_method method,
struct mlx4_ptys_reg *ptys_reg);
+int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
+ struct mlx4_clock_params *params);
+
#endif /* MLX4_DEVICE_H */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 9a90e7523dc2..9ec7c93d6fa3 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -696,7 +696,7 @@ int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
u32 *mkey);
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
-int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
+int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port);
void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 19f0175c0afa..4d3776d25925 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -97,6 +97,7 @@ struct mmc_ext_csd {
u8 raw_erased_mem_count; /* 181 */
u8 raw_ext_csd_structure; /* 194 */
u8 raw_card_type; /* 196 */
+ u8 raw_driver_strength; /* 197 */
u8 out_of_int_time; /* 198 */
u8 raw_pwr_cl_52_195; /* 200 */
u8 raw_pwr_cl_26_195; /* 201 */
@@ -305,6 +306,7 @@ struct mmc_card {
unsigned int sd_bus_speed; /* Bus Speed Mode set for the card */
unsigned int mmc_avail_type; /* supported device type by both host and card */
+ unsigned int drive_strength; /* for UHS-I, HS200 or HS400 */
struct dentry *debugfs_root;
struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index de722d4e9d61..258daf914c6d 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -121,6 +121,7 @@ struct mmc_data {
struct mmc_request *mrq; /* associated request */
unsigned int sg_len; /* size of scatter list */
+ int sg_count; /* mapped sg entries */
struct scatterlist *sg; /* I/O scatter list */
s32 host_cookie; /* host private data */
};
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 12111993a317..5be97676f1fa 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -226,12 +226,6 @@ struct dw_mci_dma_ops {
#define DW_MCI_QUIRK_HIGHSPEED BIT(2)
/* Unreliable card detection */
#define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(3)
-/* No write protect */
-#define DW_MCI_QUIRK_NO_WRITE_PROTECT BIT(4)
-
-/* Slot level quirks */
-/* This slot has no write protect */
-#define DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT BIT(0)
struct dma_pdata;
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index b5bedaec6223..1369e54faeb7 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -12,6 +12,7 @@
#include <linux/leds.h>
#include <linux/mutex.h>
+#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/fault-inject.h>
@@ -131,7 +132,9 @@ struct mmc_host_ops {
/* Prepare HS400 target operating frequency depending host driver */
int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios);
- int (*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv);
+ int (*select_drive_strength)(struct mmc_card *card,
+ unsigned int max_dtr, int host_drv,
+ int card_drv, int *drv_type);
void (*hw_reset)(struct mmc_host *host);
void (*card_event)(struct mmc_host *host);
@@ -285,6 +288,7 @@ struct mmc_host {
MMC_CAP2_HS400_1_2V)
#define MMC_CAP2_HSX00_1_2V (MMC_CAP2_HS200_1_2V_SDR | MMC_CAP2_HS400_1_2V)
#define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17)
+#define MMC_CAP2_NO_WRITE_PROTECT (1 << 18) /* No physical write protect pin, assume that card is always read-write */
mmc_pm_flag_t pm_caps; /* supported pm features */
@@ -321,10 +325,18 @@ struct mmc_host {
#ifdef CONFIG_MMC_DEBUG
unsigned int removed:1; /* host is being removed */
#endif
+ unsigned int can_retune:1; /* re-tuning can be used */
+ unsigned int doing_retune:1; /* re-tuning in progress */
+ unsigned int retune_now:1; /* do re-tuning at next req */
int rescan_disable; /* disable card detection */
int rescan_entered; /* used with nonremovable devices */
+ int need_retune; /* re-tuning is needed */
+ int hold_retune; /* hold off re-tuning */
+ unsigned int retune_period; /* re-tuning period in secs */
+ struct timer_list retune_timer; /* for periodic re-tuning */
+
bool trigger_card_event; /* card_event necessary */
struct mmc_card *card; /* device attached to this host */
@@ -513,4 +525,18 @@ static inline bool mmc_card_hs400(struct mmc_card *card)
return card->host->ios.timing == MMC_TIMING_MMC_HS400;
}
+void mmc_retune_timer_stop(struct mmc_host *host);
+
+static inline void mmc_retune_needed(struct mmc_host *host)
+{
+ if (host->can_retune)
+ host->need_retune = 1;
+}
+
+static inline void mmc_retune_recheck(struct mmc_host *host)
+{
+ if (host->hold_retune <= 1)
+ host->retune_now = 1;
+}
+
#endif /* LINUX_MMC_HOST_H */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 124f562118b8..15f2c4a0a62c 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -302,6 +302,7 @@ struct _mmc_csd {
#define EXT_CSD_REV 192 /* RO */
#define EXT_CSD_STRUCTURE 194 /* RO */
#define EXT_CSD_CARD_TYPE 196 /* RO */
+#define EXT_CSD_DRIVER_STRENGTH 197 /* RO */
#define EXT_CSD_OUT_OF_INTERRUPT_TIME 198 /* RO */
#define EXT_CSD_PART_SWITCH_TIME 199 /* RO */
#define EXT_CSD_PWR_CL_52_195 200 /* RO */
@@ -390,6 +391,7 @@ struct _mmc_csd {
#define EXT_CSD_TIMING_HS 1 /* High speed */
#define EXT_CSD_TIMING_HS200 2 /* HS200 */
#define EXT_CSD_TIMING_HS400 3 /* HS400 */
+#define EXT_CSD_DRV_STR_SHIFT 4 /* Driver Strength shift */
#define EXT_CSD_SEC_ER_EN BIT(0)
#define EXT_CSD_SEC_BD_BLK_EN BIT(2)
@@ -441,4 +443,6 @@ struct _mmc_csd {
#define MMC_SWITCH_MODE_CLEAR_BITS 0x02 /* Clear bits which are 1 in value */
#define MMC_SWITCH_MODE_WRITE_BYTE 0x03 /* Set target to value */
+#define mmc_driver_type_mask(n) (1 << (n))
+
#endif /* LINUX_MMC_MMC_H */
diff --git a/include/linux/mmc/sdhci-pci-data.h b/include/linux/mmc/sdhci-pci-data.h
index 8959604a13d3..fda15b6d4135 100644
--- a/include/linux/mmc/sdhci-pci-data.h
+++ b/include/linux/mmc/sdhci-pci-data.h
@@ -15,4 +15,6 @@ struct sdhci_pci_data {
extern struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev,
int slotno);
+extern int sdhci_pci_spt_drive_strength;
+
#endif
diff --git a/include/linux/module.h b/include/linux/module.h
index c883b86ea964..1e5436042eb0 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -655,4 +655,16 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr,
static inline void module_bug_cleanup(struct module *mod) {}
#endif /* CONFIG_GENERIC_BUG */
+#ifdef CONFIG_MODULE_SIG
+static inline bool module_sig_ok(struct module *module)
+{
+ return module->sig_ok;
+}
+#else /* !CONFIG_MODULE_SIG */
+static inline bool module_sig_ok(struct module *module)
+{
+ return true;
+}
+#endif /* CONFIG_MODULE_SIG */
+
#endif /* _LINUX_MODULE_H */
diff --git a/include/linux/mpi.h b/include/linux/mpi.h
index 5af1b81def49..641b7d6fd096 100644
--- a/include/linux/mpi.h
+++ b/include/linux/mpi.h
@@ -81,6 +81,8 @@ MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread);
int mpi_fromstr(MPI val, const char *str);
u32 mpi_get_keyid(MPI a, u32 *keyid);
void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign);
+int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
+ int *sign);
void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign);
int mpi_set_buffer(MPI a, const void *buffer, unsigned nbytes, int sign);
@@ -142,4 +144,17 @@ int mpi_rshift(MPI x, MPI a, unsigned n);
/*-- mpi-inv.c --*/
int mpi_invm(MPI x, MPI u, MPI v);
+/* inline functions */
+
+/**
+ * mpi_get_size() - returns max size required to store the number
+ *
+ * @a: A multi precision integer for which we want to allocate a bufer
+ *
+ * Return: size required to store the number
+ */
+static inline unsigned int mpi_get_size(MPI a)
+{
+ return a->nlimbs * BYTES_PER_MPI_LIMB;
+}
#endif /*G10_MPI_H */
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index 299d7d31fe53..9b57a9b1b081 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -296,183 +296,19 @@ struct cfi_private {
struct flchip chips[0]; /* per-chip data structure for each chip */
};
-/*
- * Returns the command address according to the given geometry.
- */
-static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
- struct map_info *map, struct cfi_private *cfi)
-{
- unsigned bankwidth = map_bankwidth(map);
- unsigned interleave = cfi_interleave(cfi);
- unsigned type = cfi->device_type;
- uint32_t addr;
-
- addr = (cmd_ofs * type) * interleave;
-
- /* Modify the unlock address if we are in compatibility mode.
- * For 16bit devices on 8 bit busses
- * and 32bit devices on 16 bit busses
- * set the low bit of the alternating bit sequence of the address.
- */
- if (((type * interleave) > bankwidth) && ((cmd_ofs & 0xff) == 0xaa))
- addr |= (type >> 1)*interleave;
-
- return addr;
-}
-
-/*
- * Transforms the CFI command for the given geometry (bus width & interleave).
- * It looks too long to be inline, but in the common case it should almost all
- * get optimised away.
- */
-static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
-{
- map_word val = { {0} };
- int wordwidth, words_per_bus, chip_mode, chips_per_word;
- unsigned long onecmd;
- int i;
-
- /* We do it this way to give the compiler a fighting chance
- of optimising away all the crap for 'bankwidth' larger than
- an unsigned long, in the common case where that support is
- disabled */
- if (map_bankwidth_is_large(map)) {
- wordwidth = sizeof(unsigned long);
- words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
- } else {
- wordwidth = map_bankwidth(map);
- words_per_bus = 1;
- }
-
- chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
- chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
-
- /* First, determine what the bit-pattern should be for a single
- device, according to chip mode and endianness... */
- switch (chip_mode) {
- default: BUG();
- case 1:
- onecmd = cmd;
- break;
- case 2:
- onecmd = cpu_to_cfi16(map, cmd);
- break;
- case 4:
- onecmd = cpu_to_cfi32(map, cmd);
- break;
- }
-
- /* Now replicate it across the size of an unsigned long, or
- just to the bus width as appropriate */
- switch (chips_per_word) {
- default: BUG();
-#if BITS_PER_LONG >= 64
- case 8:
- onecmd |= (onecmd << (chip_mode * 32));
-#endif
- case 4:
- onecmd |= (onecmd << (chip_mode * 16));
- case 2:
- onecmd |= (onecmd << (chip_mode * 8));
- case 1:
- ;
- }
+uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
+ struct map_info *map, struct cfi_private *cfi);
- /* And finally, for the multi-word case, replicate it
- in all words in the structure */
- for (i=0; i < words_per_bus; i++) {
- val.x[i] = onecmd;
- }
-
- return val;
-}
+map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi);
#define CMD(x) cfi_build_cmd((x), map, cfi)
-
-static inline unsigned long cfi_merge_status(map_word val, struct map_info *map,
- struct cfi_private *cfi)
-{
- int wordwidth, words_per_bus, chip_mode, chips_per_word;
- unsigned long onestat, res = 0;
- int i;
-
- /* We do it this way to give the compiler a fighting chance
- of optimising away all the crap for 'bankwidth' larger than
- an unsigned long, in the common case where that support is
- disabled */
- if (map_bankwidth_is_large(map)) {
- wordwidth = sizeof(unsigned long);
- words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
- } else {
- wordwidth = map_bankwidth(map);
- words_per_bus = 1;
- }
-
- chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
- chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
-
- onestat = val.x[0];
- /* Or all status words together */
- for (i=1; i < words_per_bus; i++) {
- onestat |= val.x[i];
- }
-
- res = onestat;
- switch(chips_per_word) {
- default: BUG();
-#if BITS_PER_LONG >= 64
- case 8:
- res |= (onestat >> (chip_mode * 32));
-#endif
- case 4:
- res |= (onestat >> (chip_mode * 16));
- case 2:
- res |= (onestat >> (chip_mode * 8));
- case 1:
- ;
- }
-
- /* Last, determine what the bit-pattern should be for a single
- device, according to chip mode and endianness... */
- switch (chip_mode) {
- case 1:
- break;
- case 2:
- res = cfi16_to_cpu(map, res);
- break;
- case 4:
- res = cfi32_to_cpu(map, res);
- break;
- default: BUG();
- }
- return res;
-}
-
+unsigned long cfi_merge_status(map_word val, struct map_info *map,
+ struct cfi_private *cfi);
#define MERGESTATUS(x) cfi_merge_status((x), map, cfi)
-
-/*
- * Sends a CFI command to a bank of flash for the given geometry.
- *
- * Returns the offset in flash where the command was written.
- * If prev_val is non-null, it will be set to the value at the command address,
- * before the command was written.
- */
-static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
+uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
struct map_info *map, struct cfi_private *cfi,
- int type, map_word *prev_val)
-{
- map_word val;
- uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
- val = cfi_build_cmd(cmd, map, cfi);
-
- if (prev_val)
- *prev_val = map_read(map, addr);
-
- map_write(map, val, addr);
-
- return addr - base;
-}
+ int type, map_word *prev_val);
static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
{
@@ -506,15 +342,7 @@ static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr)
}
}
-static inline void cfi_udelay(int us)
-{
- if (us >= 1000) {
- msleep((us+999)/1000);
- } else {
- udelay(us);
- cond_resched();
- }
-}
+void cfi_udelay(int us);
int __xipram cfi_qry_present(struct map_info *map, __u32 base,
struct cfi_private *cfi);
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 3d4ea7eb2b68..f25e2bdd188c 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -26,6 +26,8 @@
struct mtd_info;
struct nand_flash_dev;
+struct device_node;
+
/* Scan and identify a NAND device */
extern int nand_scan(struct mtd_info *mtd, int max_chips);
/*
@@ -542,6 +544,7 @@ struct nand_buffers {
* flash device
* @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the
* flash device.
+ * @dn: [BOARDSPECIFIC] device node describing this instance
* @read_byte: [REPLACEABLE] read one byte from the chip
* @read_word: [REPLACEABLE] read one word from the chip
* @write_byte: [REPLACEABLE] write a single byte to the chip on the
@@ -644,6 +647,8 @@ struct nand_chip {
void __iomem *IO_ADDR_R;
void __iomem *IO_ADDR_W;
+ struct device_node *dn;
+
uint8_t (*read_byte)(struct mtd_info *mtd);
u16 (*read_word)(struct mtd_info *mtd);
void (*write_byte)(struct mtd_info *mtd, uint8_t byte);
@@ -833,7 +838,6 @@ struct nand_manufacturers {
extern struct nand_flash_dev nand_flash_ids[];
extern struct nand_manufacturers nand_manuf_ids[];
-extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
extern int nand_default_bbt(struct mtd_info *mtd);
extern int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
extern int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs);
diff --git a/include/linux/namei.h b/include/linux/namei.h
index c8990779f0c3..d8c6334cd150 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -1,16 +1,15 @@
#ifndef _LINUX_NAMEI_H
#define _LINUX_NAMEI_H
-#include <linux/dcache.h>
-#include <linux/errno.h>
-#include <linux/linkage.h>
+#include <linux/kernel.h>
#include <linux/path.h>
-
-struct vfsmount;
-struct nameidata;
+#include <linux/fcntl.h>
+#include <linux/errno.h>
enum { MAX_NESTED_LINKS = 8 };
+#define MAXSYMLINKS 40
+
/*
* Type of the last component on LOOKUP_PARENT
*/
@@ -45,13 +44,29 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
#define LOOKUP_ROOT 0x2000
#define LOOKUP_EMPTY 0x4000
-extern int user_path_at(int, const char __user *, unsigned, struct path *);
extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
-#define user_path(name, path) user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW, path)
-#define user_lpath(name, path) user_path_at(AT_FDCWD, name, 0, path)
-#define user_path_dir(name, path) \
- user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path)
+static inline int user_path_at(int dfd, const char __user *name, unsigned flags,
+ struct path *path)
+{
+ return user_path_at_empty(dfd, name, flags, path, NULL);
+}
+
+static inline int user_path(const char __user *name, struct path *path)
+{
+ return user_path_at_empty(AT_FDCWD, name, LOOKUP_FOLLOW, path, NULL);
+}
+
+static inline int user_lpath(const char __user *name, struct path *path)
+{
+ return user_path_at_empty(AT_FDCWD, name, 0, path, NULL);
+}
+
+static inline int user_path_dir(const char __user *name, struct path *path)
+{
+ return user_path_at_empty(AT_FDCWD, name,
+ LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path, NULL);
+}
extern int kern_path(const char *, unsigned, struct path *);
@@ -70,9 +85,7 @@ extern int follow_up(struct path *);
extern struct dentry *lock_rename(struct dentry *, struct dentry *);
extern void unlock_rename(struct dentry *, struct dentry *);
-extern void nd_jump_link(struct nameidata *nd, struct path *path);
-extern void nd_set_link(struct nameidata *nd, char *path);
-extern char *nd_get_link(struct nameidata *nd);
+extern void nd_jump_link(struct path *path);
static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
{
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index f1bd3962e6b6..8ca6d6464ea3 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -6,7 +6,7 @@
*
* ebtables.c,v 2.0, April, 2002
*
- * This code is stongly inspired on the iptables code which is
+ * This code is strongly inspired by the iptables code which is
* Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
*/
#ifndef __LINUX_BRIDGE_EFF_H
diff --git a/include/linux/nx842.h b/include/linux/nx842.h
deleted file mode 100644
index a4d324c6406a..000000000000
--- a/include/linux/nx842.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __NX842_H__
-#define __NX842_H__
-
-int nx842_get_workmem_size(void);
-int nx842_get_workmem_size_aligned(void);
-int nx842_compress(const unsigned char *in, unsigned int in_len,
- unsigned char *out, unsigned int *out_len, void *wrkmem);
-int nx842_decompress(const unsigned char *in, unsigned int in_len,
- unsigned char *out, unsigned int *out_len, void *wrkmem);
-
-#endif
diff --git a/include/linux/of.h b/include/linux/of.h
index ddeaae6d2083..b871ff9d81d7 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -121,6 +121,8 @@ extern struct device_node *of_stdout;
extern raw_spinlock_t devtree_lock;
#ifdef CONFIG_OF
+void of_core_init(void);
+
static inline bool is_of_node(struct fwnode_handle *fwnode)
{
return fwnode && fwnode->type == FWNODE_OF;
@@ -376,6 +378,10 @@ bool of_console_check(struct device_node *dn, char *name, int index);
#else /* CONFIG_OF */
+static inline void of_core_init(void)
+{
+}
+
static inline bool is_of_node(struct fwnode_handle *fwnode)
{
return false;
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 587ee507965d..fd627a58068f 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -64,6 +64,7 @@ extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
int depth, void *data);
extern void early_init_fdt_scan_reserved_mem(void);
+extern void early_init_fdt_reserve_self(void);
extern void early_init_dt_add_memory_arch(u64 base, u64 size);
extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
bool no_map);
@@ -91,6 +92,7 @@ extern u64 fdt_translate_address(const void *blob, int node_offset);
extern void of_fdt_limit_memory(int limit);
#else /* CONFIG_OF_FLATTREE */
static inline void early_init_fdt_scan_reserved_mem(void) {}
+static inline void early_init_fdt_reserve_self(void) {}
static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
static inline void unflatten_device_tree(void) {}
static inline void unflatten_and_copy_device_tree(void) {}
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
index 3a6490e81b28..703ea5c30a33 100644
--- a/include/linux/osq_lock.h
+++ b/include/linux/osq_lock.h
@@ -32,4 +32,9 @@ static inline void osq_lock_init(struct optimistic_spin_queue *lock)
extern bool osq_lock(struct optimistic_spin_queue *lock);
extern void osq_unlock(struct optimistic_spin_queue *lock);
+static inline bool osq_is_locked(struct optimistic_spin_queue *lock)
+{
+ return atomic_read(&lock->tail) != OSQ_UNLOCKED_VAL;
+}
+
#endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 353db8dc4c6e..8a0321a8fb59 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -355,6 +355,7 @@ struct pci_dev {
unsigned int broken_intx_masking:1;
unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
unsigned int irq_managed:1;
+ unsigned int has_secondary_link:1;
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
@@ -577,9 +578,15 @@ int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 val);
+#ifdef CONFIG_PCI_BUS_ADDR_T_64BIT
+typedef u64 pci_bus_addr_t;
+#else
+typedef u32 pci_bus_addr_t;
+#endif
+
struct pci_bus_region {
- dma_addr_t start;
- dma_addr_t end;
+ pci_bus_addr_t start;
+ pci_bus_addr_t end;
};
struct pci_dynids {
@@ -773,8 +780,6 @@ void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
void pcibios_scan_specific_bus(int busn);
struct pci_bus *pci_find_bus(int domain, int busnr);
void pci_bus_add_devices(const struct pci_bus *bus);
-struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus,
- struct pci_ops *ops, void *sysdata);
struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata,
@@ -974,7 +979,6 @@ void pci_intx(struct pci_dev *dev, int enable);
bool pci_intx_mask_supported(struct pci_dev *dev);
bool pci_check_and_mask_intx(struct pci_dev *dev);
bool pci_check_and_unmask_intx(struct pci_dev *dev);
-void pci_msi_off(struct pci_dev *dev);
int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size);
int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask);
int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
@@ -1006,6 +1010,7 @@ int __must_check pci_assign_resource(struct pci_dev *dev, int i);
int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
int pci_select_bars(struct pci_dev *dev, unsigned long flags);
bool pci_device_is_present(struct pci_dev *pdev);
+void pci_ignore_hotplug(struct pci_dev *dev);
/* ROM control related routines */
int pci_enable_rom(struct pci_dev *pdev);
@@ -1043,11 +1048,6 @@ bool pci_dev_run_wake(struct pci_dev *dev);
bool pci_check_pme_status(struct pci_dev *dev);
void pci_pme_wakeup_bus(struct pci_bus *bus);
-static inline void pci_ignore_hotplug(struct pci_dev *dev)
-{
- dev->ignore_hotplug = 1;
-}
-
static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
bool enable)
{
@@ -1128,7 +1128,7 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
-static inline dma_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
+static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
{
struct pci_bus_region region;
@@ -1197,15 +1197,6 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode,
#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
-enum pci_dma_burst_strategy {
- PCI_DMA_BURST_INFINITY, /* make bursts as large as possible,
- strategy_parameter is N/A */
- PCI_DMA_BURST_BOUNDARY, /* disconnect at every strategy_parameter
- byte boundaries */
- PCI_DMA_BURST_MULTIPLE, /* disconnect at some multiple of
- strategy_parameter byte boundaries */
-};
-
struct msix_entry {
u32 vector; /* kernel uses to write allocated vector */
u16 entry; /* driver uses to specify entry, OS writes */
@@ -1430,8 +1421,6 @@ static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
{ return -EIO; }
static inline void pci_release_regions(struct pci_dev *dev) { }
-#define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0)
-
static inline void pci_block_cfg_access(struct pci_dev *dev) { }
static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev)
{ return 0; }
@@ -1905,4 +1894,15 @@ static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
{
return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
}
+
+/**
+ * pci_ari_enabled - query ARI forwarding status
+ * @bus: the PCI bus
+ *
+ * Returns true if ARI forwarding is enabled.
+ */
+static inline bool pci_ari_enabled(struct pci_bus *bus)
+{
+ return bus->self && bus->self->ari_enabled;
+}
#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 2f7b9a40f627..cb63a7b522ef 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -579,6 +579,7 @@
#define PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE 0x7800
#define PCI_DEVICE_ID_AMD_HUDSON2_SMBUS 0x780b
#define PCI_DEVICE_ID_AMD_HUDSON2_IDE 0x780c
+#define PCI_DEVICE_ID_AMD_KERNCZ_SMBUS 0x790b
#define PCI_VENDOR_ID_TRIDENT 0x1023
#define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index cf3342a8ad80..1b82d44b0a02 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -92,8 +92,6 @@ struct hw_perf_event_extra {
int idx; /* index in shared_regs->regs[] */
};
-struct event_constraint;
-
/**
* struct hw_perf_event - performance event hardware details:
*/
@@ -112,8 +110,6 @@ struct hw_perf_event {
struct hw_perf_event_extra extra_reg;
struct hw_perf_event_extra branch_reg;
-
- struct event_constraint *constraint;
};
struct { /* software */
struct hrtimer hrtimer;
@@ -124,7 +120,7 @@ struct hw_perf_event {
};
struct { /* intel_cqm */
int cqm_state;
- int cqm_rmid;
+ u32 cqm_rmid;
struct list_head cqm_events_entry;
struct list_head cqm_groups_entry;
struct list_head cqm_group_entry;
@@ -738,6 +734,22 @@ extern int perf_event_overflow(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs);
+extern void perf_event_output(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs);
+
+extern void
+perf_event_header__init_id(struct perf_event_header *header,
+ struct perf_sample_data *data,
+ struct perf_event *event);
+extern void
+perf_event__output_id_sample(struct perf_event *event,
+ struct perf_output_handle *handle,
+ struct perf_sample_data *sample);
+
+extern void
+perf_log_lost_samples(struct perf_event *event, u64 lost);
+
static inline bool is_sampling_event(struct perf_event *event)
{
return event->attr.sample_period != 0;
@@ -802,11 +814,33 @@ perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
extern struct static_key_deferred perf_sched_events;
+static __always_inline bool
+perf_sw_migrate_enabled(void)
+{
+ if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
+ return true;
+ return false;
+}
+
+static inline void perf_event_task_migrate(struct task_struct *task)
+{
+ if (perf_sw_migrate_enabled())
+ task->sched_migrated = 1;
+}
+
static inline void perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task)
{
if (static_key_false(&perf_sched_events.key))
__perf_event_task_sched_in(prev, task);
+
+ if (perf_sw_migrate_enabled() && task->sched_migrated) {
+ struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
+
+ perf_fetch_caller_regs(regs);
+ ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
+ task->sched_migrated = 0;
+ }
}
static inline void perf_event_task_sched_out(struct task_struct *prev,
@@ -929,6 +963,8 @@ perf_aux_output_skip(struct perf_output_handle *handle,
static inline void *
perf_get_aux(struct perf_output_handle *handle) { return NULL; }
static inline void
+perf_event_task_migrate(struct task_struct *task) { }
+static inline void
perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task) { }
static inline void
diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h
index 5d50b25a73d7..cb2618147c34 100644
--- a/include/linux/platform_data/gpio-omap.h
+++ b/include/linux/platform_data/gpio-omap.h
@@ -208,9 +208,17 @@ struct omap_gpio_platform_data {
int (*get_context_loss_count)(struct device *dev);
};
+#if IS_BUILTIN(CONFIG_GPIO_OMAP)
extern void omap2_gpio_prepare_for_idle(int off_mode);
extern void omap2_gpio_resume_after_idle(void);
-extern void omap_set_gpio_debounce(int gpio, int enable);
-extern void omap_set_gpio_debounce_time(int gpio, int enable);
+#else
+static inline void omap2_gpio_prepare_for_idle(int off_mode)
+{
+}
+
+static inline void omap2_gpio_resume_after_idle(void)
+{
+}
+#endif
#endif
diff --git a/include/linux/platform_data/irq-renesas-irqc.h b/include/linux/platform_data/irq-renesas-irqc.h
deleted file mode 100644
index 3ae17b3e00ed..000000000000
--- a/include/linux/platform_data/irq-renesas-irqc.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Renesas IRQC Driver
- *
- * Copyright (C) 2013 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __IRQ_RENESAS_IRQC_H__
-#define __IRQ_RENESAS_IRQC_H__
-
-struct renesas_irqc_config {
- unsigned int irq_base;
-};
-
-#endif /* __IRQ_RENESAS_IRQC_H__ */
diff --git a/include/linux/platform_data/ntc_thermistor.h b/include/linux/platform_data/ntc_thermistor.h
index 0a6de4ca4930..aed170588b74 100644
--- a/include/linux/platform_data/ntc_thermistor.h
+++ b/include/linux/platform_data/ntc_thermistor.h
@@ -27,6 +27,7 @@ enum ntc_thermistor_type {
TYPE_NCPXXWB473,
TYPE_NCPXXWL333,
TYPE_B57330V2103,
+ TYPE_NCPXXWF104,
};
struct ntc_thermistor_platform_data {
diff --git a/include/linux/platform_data/video-msm_fb.h b/include/linux/platform_data/video-msm_fb.h
deleted file mode 100644
index 31449be3eadb..000000000000
--- a/include/linux/platform_data/video-msm_fb.h
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Internal shared definitions for various MSM framebuffer parts.
- *
- * Copyright (C) 2007 Google Incorporated
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _MSM_FB_H_
-#define _MSM_FB_H_
-
-#include <linux/device.h>
-
-struct mddi_info;
-
-struct msm_fb_data {
- int xres; /* x resolution in pixels */
- int yres; /* y resolution in pixels */
- int width; /* disply width in mm */
- int height; /* display height in mm */
- unsigned output_format;
-};
-
-struct msmfb_callback {
- void (*func)(struct msmfb_callback *);
-};
-
-enum {
- MSM_MDDI_PMDH_INTERFACE,
- MSM_MDDI_EMDH_INTERFACE,
- MSM_EBI2_INTERFACE,
-};
-
-#define MSMFB_CAP_PARTIAL_UPDATES (1 << 0)
-
-struct msm_panel_data {
- /* turns off the fb memory */
- int (*suspend)(struct msm_panel_data *);
- /* turns on the fb memory */
- int (*resume)(struct msm_panel_data *);
- /* turns off the panel */
- int (*blank)(struct msm_panel_data *);
- /* turns on the panel */
- int (*unblank)(struct msm_panel_data *);
- void (*wait_vsync)(struct msm_panel_data *);
- void (*request_vsync)(struct msm_panel_data *, struct msmfb_callback *);
- void (*clear_vsync)(struct msm_panel_data *);
- /* from the enum above */
- unsigned interface_type;
- /* data to be passed to the fb driver */
- struct msm_fb_data *fb_data;
-
- /* capabilities supported by the panel */
- uint32_t caps;
-};
-
-struct msm_mddi_client_data {
- void (*suspend)(struct msm_mddi_client_data *);
- void (*resume)(struct msm_mddi_client_data *);
- void (*activate_link)(struct msm_mddi_client_data *);
- void (*remote_write)(struct msm_mddi_client_data *, uint32_t val,
- uint32_t reg);
- uint32_t (*remote_read)(struct msm_mddi_client_data *, uint32_t reg);
- void (*auto_hibernate)(struct msm_mddi_client_data *, int);
- /* custom data that needs to be passed from the board file to a
- * particular client */
- void *private_client_data;
- struct resource *fb_resource;
- /* from the list above */
- unsigned interface_type;
-};
-
-struct msm_mddi_platform_data {
- unsigned int clk_rate;
- void (*power_client)(struct msm_mddi_client_data *, int on);
-
- /* fixup the mfr name, product id */
- void (*fixup)(uint16_t *mfr_name, uint16_t *product_id);
-
- struct resource *fb_resource; /*optional*/
- /* number of clients in the list that follows */
- int num_clients;
- /* array of client information of clients */
- struct {
- unsigned product_id; /* mfr id in top 16 bits, product id
- * in lower 16 bits
- */
- char *name; /* the device name will be the platform
- * device name registered for the client,
- * it should match the name of the associated
- * driver
- */
- unsigned id; /* id for mddi client device node, will also
- * be used as device id of panel devices, if
- * the client device will have multiple panels
- * space must be left here for them
- */
- void *client_data; /* required private client data */
- unsigned int clk_rate; /* optional: if the client requires a
- * different mddi clk rate
- */
- } client_platform_data[];
-};
-
-struct mdp_blit_req;
-struct fb_info;
-struct mdp_device {
- struct device dev;
- void (*dma)(struct mdp_device *mpd, uint32_t addr,
- uint32_t stride, uint32_t w, uint32_t h, uint32_t x,
- uint32_t y, struct msmfb_callback *callback, int interface);
- void (*dma_wait)(struct mdp_device *mdp);
- int (*blit)(struct mdp_device *mdp, struct fb_info *fb,
- struct mdp_blit_req *req);
- void (*set_grp_disp)(struct mdp_device *mdp, uint32_t disp_id);
-};
-
-struct class_interface;
-int register_mdp_client(struct class_interface *class_intf);
-
-/**** private client data structs go below this line ***/
-
-struct msm_mddi_bridge_platform_data {
- /* from board file */
- int (*init)(struct msm_mddi_bridge_platform_data *,
- struct msm_mddi_client_data *);
- int (*uninit)(struct msm_mddi_bridge_platform_data *,
- struct msm_mddi_client_data *);
- /* passed to panel for use by the fb driver */
- int (*blank)(struct msm_mddi_bridge_platform_data *,
- struct msm_mddi_client_data *);
- int (*unblank)(struct msm_mddi_bridge_platform_data *,
- struct msm_mddi_client_data *);
- struct msm_fb_data fb_data;
-};
-
-
-
-#endif
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 2d29c64f8fb1..35d599e7250d 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -342,6 +342,18 @@ struct dev_pm_ops {
#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#endif
+#ifdef CONFIG_PM_SLEEP
+#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ .suspend_noirq = suspend_fn, \
+ .resume_noirq = resume_fn, \
+ .freeze_noirq = suspend_fn, \
+ .thaw_noirq = resume_fn, \
+ .poweroff_noirq = suspend_fn, \
+ .restore_noirq = resume_fn,
+#else
+#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
+#endif
+
#ifdef CONFIG_PM
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
.runtime_suspend = suspend_fn, \
@@ -529,6 +541,7 @@ enum rpm_request {
};
struct wakeup_source;
+struct wake_irq;
struct pm_domain_data;
struct pm_subsys_data {
@@ -568,6 +581,7 @@ struct dev_pm_info {
unsigned long timer_expires;
struct work_struct work;
wait_queue_head_t wait_queue;
+ struct wake_irq *wakeirq;
atomic_t usage_count;
atomic_t child_count;
unsigned int disable_depth:3;
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h
index 0b0039634410..25266c600021 100644
--- a/include/linux/pm_clock.h
+++ b/include/linux/pm_clock.h
@@ -20,6 +20,16 @@ struct pm_clk_notifier_block {
struct clk;
+#ifdef CONFIG_PM
+extern int pm_clk_runtime_suspend(struct device *dev);
+extern int pm_clk_runtime_resume(struct device *dev);
+#define USE_PM_CLK_RUNTIME_OPS \
+ .runtime_suspend = pm_clk_runtime_suspend, \
+ .runtime_resume = pm_clk_runtime_resume,
+#else
+#define USE_PM_CLK_RUNTIME_OPS
+#endif
+
#ifdef CONFIG_PM_CLK
static inline bool pm_clk_no_clocks(struct device *dev)
{
diff --git a/include/linux/pm_wakeirq.h b/include/linux/pm_wakeirq.h
new file mode 100644
index 000000000000..cd5b62db9084
--- /dev/null
+++ b/include/linux/pm_wakeirq.h
@@ -0,0 +1,51 @@
+/*
+ * pm_wakeirq.h - Device wakeirq helper functions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_PM_WAKEIRQ_H
+#define _LINUX_PM_WAKEIRQ_H
+
+#ifdef CONFIG_PM
+
+extern int dev_pm_set_wake_irq(struct device *dev, int irq);
+extern int dev_pm_set_dedicated_wake_irq(struct device *dev,
+ int irq);
+extern void dev_pm_clear_wake_irq(struct device *dev);
+extern void dev_pm_enable_wake_irq(struct device *dev);
+extern void dev_pm_disable_wake_irq(struct device *dev);
+
+#else /* !CONFIG_PM */
+
+static inline int dev_pm_set_wake_irq(struct device *dev, int irq)
+{
+ return 0;
+}
+
+static inline int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
+{
+ return 0;
+}
+
+static inline void dev_pm_clear_wake_irq(struct device *dev)
+{
+}
+
+static inline void dev_pm_enable_wake_irq(struct device *dev)
+{
+}
+
+static inline void dev_pm_disable_wake_irq(struct device *dev)
+{
+}
+
+#endif /* CONFIG_PM */
+#endif /* _LINUX_PM_WAKEIRQ_H */
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index a0f70808d7f4..a3447932df1f 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -28,9 +28,17 @@
#include <linux/types.h>
+struct wake_irq;
+
/**
* struct wakeup_source - Representation of wakeup sources
*
+ * @name: Name of the wakeup source
+ * @entry: Wakeup source list entry
+ * @lock: Wakeup source lock
+ * @wakeirq: Optional device specific wakeirq
+ * @timer: Wakeup timer list
+ * @timer_expires: Wakeup timer expiration
* @total_time: Total time this wakeup source has been active.
* @max_time: Maximum time this wakeup source has been continuously active.
* @last_time: Monotonic clock when the wakeup source's was touched last time.
@@ -47,6 +55,7 @@ struct wakeup_source {
const char *name;
struct list_head entry;
spinlock_t lock;
+ struct wake_irq *wakeirq;
struct timer_list timer;
unsigned long timer_expires;
ktime_t total_time;
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
index cf112b4075c8..522757ac9cd4 100644
--- a/include/linux/power/max17042_battery.h
+++ b/include/linux/power/max17042_battery.h
@@ -215,6 +215,10 @@ struct max17042_platform_data {
* the datasheet although it can be changed by board designers.
*/
unsigned int r_sns;
+ int vmin; /* in millivolts */
+ int vmax; /* in millivolts */
+ int temp_min; /* in tenths of degree Celsius */
+ int temp_max; /* in tenths of degree Celsius */
};
#endif /* __MAX17042_BATTERY_H_ */
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 75a1dd8dc56e..ef9f1592185d 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -206,6 +206,11 @@ struct power_supply_desc {
int (*set_property)(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val);
+ /*
+ * property_is_writeable() will be called during registration
+ * of power supply. If this happens during device probe then it must
+ * not access internal data of device (because probe did not end).
+ */
int (*property_is_writeable)(struct power_supply *psy,
enum power_supply_property psp);
void (*external_power_changed)(struct power_supply *psy);
@@ -237,6 +242,7 @@ struct power_supply {
/* private */
struct device dev;
struct work_struct changed_work;
+ struct delayed_work deferred_register_work;
spinlock_t changed_lock;
bool changed;
atomic_t use_cnt;
@@ -286,10 +292,15 @@ extern void power_supply_put(struct power_supply *psy);
#ifdef CONFIG_OF
extern struct power_supply *power_supply_get_by_phandle(struct device_node *np,
const char *property);
+extern struct power_supply *devm_power_supply_get_by_phandle(
+ struct device *dev, const char *property);
#else /* !CONFIG_OF */
static inline struct power_supply *
power_supply_get_by_phandle(struct device_node *np, const char *property)
{ return NULL; }
+static inline struct power_supply *
+devm_power_supply_get_by_phandle(struct device *dev, const char *property)
+{ return NULL; }
#endif /* CONFIG_OF */
extern void power_supply_changed(struct power_supply *psy);
extern int power_supply_am_i_supplied(struct power_supply *psy);
diff --git a/include/linux/property.h b/include/linux/property.h
index de8bdf417a35..76ebde9c11d4 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -164,4 +164,6 @@ struct property_set {
void device_add_property_set(struct device *dev, struct property_set *pset);
+bool device_dma_is_coherent(struct device *dev);
+
#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index e90628cac8fa..36262d08a9da 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -182,6 +182,8 @@ struct pwm_chip {
int pwm_set_chip_data(struct pwm_device *pwm, void *data);
void *pwm_get_chip_data(struct pwm_device *pwm);
+int pwmchip_add_with_polarity(struct pwm_chip *chip,
+ enum pwm_polarity polarity);
int pwmchip_add(struct pwm_chip *chip);
int pwmchip_remove(struct pwm_chip *chip);
struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
@@ -217,6 +219,11 @@ static inline int pwmchip_add(struct pwm_chip *chip)
return -EINVAL;
}
+static inline int pwmchip_add_inversed(struct pwm_chip *chip)
+{
+ return -EINVAL;
+}
+
static inline int pwmchip_remove(struct pwm_chip *chip)
{
return -EINVAL;
@@ -290,10 +297,15 @@ struct pwm_lookup {
#if IS_ENABLED(CONFIG_PWM)
void pwm_add_table(struct pwm_lookup *table, size_t num);
+void pwm_remove_table(struct pwm_lookup *table, size_t num);
#else
static inline void pwm_add_table(struct pwm_lookup *table, size_t num)
{
}
+
+static inline void pwm_remove_table(struct pwm_lookup *table, size_t num)
+{
+}
#endif
#ifdef CONFIG_PWM_SYSFS
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index dab545bb66b3..0485bab061fd 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -194,8 +194,9 @@ enum pxa_ssp_type {
PXA168_SSP,
PXA910_SSP,
CE4100_SSP,
- LPSS_SSP,
QUARK_X1000_SSP,
+ LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */
+ LPSS_BYT_SSP,
};
struct ssp_device {
diff --git a/include/linux/random.h b/include/linux/random.h
index b05856e16b75..e651874df2c9 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -6,14 +6,23 @@
#ifndef _LINUX_RANDOM_H
#define _LINUX_RANDOM_H
+#include <linux/list.h>
#include <uapi/linux/random.h>
+struct random_ready_callback {
+ struct list_head list;
+ void (*func)(struct random_ready_callback *rdy);
+ struct module *owner;
+};
+
extern void add_device_randomness(const void *, unsigned int);
extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value);
extern void add_interrupt_randomness(int irq, int irq_flags);
extern void get_random_bytes(void *buf, int nbytes);
+extern int add_random_ready_callback(struct random_ready_callback *rdy);
+extern void del_random_ready_callback(struct random_ready_callback *rdy);
extern void get_random_bytes_arch(void *buf, int nbytes);
void generate_random_uuid(unsigned char uuid_out[16]);
extern int random_int_secret_init(void);
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index a18b16f1dc0e..17c6b1f84a77 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -29,8 +29,8 @@
*/
static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
{
- ACCESS_ONCE(list->next) = list;
- ACCESS_ONCE(list->prev) = list;
+ WRITE_ONCE(list->next, list);
+ WRITE_ONCE(list->prev, list);
}
/*
@@ -288,7 +288,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
#define list_first_or_null_rcu(ptr, type, member) \
({ \
struct list_head *__ptr = (ptr); \
- struct list_head *__next = ACCESS_ONCE(__ptr->next); \
+ struct list_head *__next = READ_ONCE(__ptr->next); \
likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
})
@@ -549,8 +549,8 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
*/
#define hlist_for_each_entry_from_rcu(pos, member) \
for (; pos; \
- pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
- typeof(*(pos)), member))
+ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
+ &(pos)->member)), typeof(*(pos)), member))
#endif /* __KERNEL__ */
#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 0627a447c589..33a056bb886f 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -294,10 +294,6 @@ void rcu_sched_qs(void);
void rcu_bh_qs(void);
void rcu_check_callbacks(int user);
struct notifier_block;
-void rcu_idle_enter(void);
-void rcu_idle_exit(void);
-void rcu_irq_enter(void);
-void rcu_irq_exit(void);
int rcu_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu);
@@ -366,8 +362,8 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
#define rcu_note_voluntary_context_switch(t) \
do { \
rcu_all_qs(); \
- if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
- ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
+ if (READ_ONCE((t)->rcu_tasks_holdout)) \
+ WRITE_ONCE((t)->rcu_tasks_holdout, false); \
} while (0)
#else /* #ifdef CONFIG_TASKS_RCU */
#define TASKS_RCU(x) do { } while (0)
@@ -611,7 +607,7 @@ static inline void rcu_preempt_sleep_check(void)
#define __rcu_access_pointer(p, space) \
({ \
- typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
+ typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(_________p1)); \
})
@@ -630,21 +626,6 @@ static inline void rcu_preempt_sleep_check(void)
((typeof(*p) __force __kernel *)(p)); \
})
-#define __rcu_access_index(p, space) \
-({ \
- typeof(p) _________p1 = ACCESS_ONCE(p); \
- rcu_dereference_sparse(p, space); \
- (_________p1); \
-})
-#define __rcu_dereference_index_check(p, c) \
-({ \
- /* Dependency order vs. p above. */ \
- typeof(p) _________p1 = lockless_dereference(p); \
- rcu_lockdep_assert(c, \
- "suspicious rcu_dereference_index_check() usage"); \
- (_________p1); \
-})
-
/**
* RCU_INITIALIZER() - statically initialize an RCU-protected global variable
* @v: The value to statically initialize with.
@@ -661,7 +642,7 @@ static inline void rcu_preempt_sleep_check(void)
*/
#define lockless_dereference(p) \
({ \
- typeof(p) _________p1 = ACCESS_ONCE(p); \
+ typeof(p) _________p1 = READ_ONCE(p); \
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
(_________p1); \
})
@@ -704,7 +685,7 @@ static inline void rcu_preempt_sleep_check(void)
* @p: The pointer to read
*
* Return the value of the specified RCU-protected pointer, but omit the
- * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
+ * smp_read_barrier_depends() and keep the READ_ONCE(). This is useful
* when the value of this pointer is accessed, but the pointer is not
* dereferenced, for example, when testing an RCU-protected pointer against
* NULL. Although rcu_access_pointer() may also be used in cases where
@@ -789,47 +770,12 @@ static inline void rcu_preempt_sleep_check(void)
#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
/**
- * rcu_access_index() - fetch RCU index with no dereferencing
- * @p: The index to read
- *
- * Return the value of the specified RCU-protected index, but omit the
- * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
- * when the value of this index is accessed, but the index is not
- * dereferenced, for example, when testing an RCU-protected index against
- * -1. Although rcu_access_index() may also be used in cases where
- * update-side locks prevent the value of the index from changing, you
- * should instead use rcu_dereference_index_protected() for this use case.
- */
-#define rcu_access_index(p) __rcu_access_index((p), __rcu)
-
-/**
- * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
- * @p: The pointer to read, prior to dereferencing
- * @c: The conditions under which the dereference will take place
- *
- * Similar to rcu_dereference_check(), but omits the sparse checking.
- * This allows rcu_dereference_index_check() to be used on integers,
- * which can then be used as array indices. Attempting to use
- * rcu_dereference_check() on an integer will give compiler warnings
- * because the sparse address-space mechanism relies on dereferencing
- * the RCU-protected pointer. Dereferencing integers is not something
- * that even gcc will put up with.
- *
- * Note that this function does not implicitly check for RCU read-side
- * critical sections. If this function gains lots of uses, it might
- * make sense to provide versions for each flavor of RCU, but it does
- * not make sense as of early 2010.
- */
-#define rcu_dereference_index_check(p, c) \
- __rcu_dereference_index_check((p), (c))
-
-/**
* rcu_dereference_protected() - fetch RCU pointer when updates prevented
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This
+ * both the smp_read_barrier_depends() and the READ_ONCE(). This
* is useful in cases where update-side locks prevent the value of the
* pointer from changing. Please note that this primitive does -not-
* prevent the compiler from repeating this reference or combining it
@@ -1155,13 +1101,13 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
#define kfree_rcu(ptr, rcu_head) \
__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
-#if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL)
+#ifdef CONFIG_TINY_RCU
static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
{
*nextevt = KTIME_MAX;
return 0;
}
-#endif /* #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) */
+#endif /* #ifdef CONFIG_TINY_RCU */
#if defined(CONFIG_RCU_NOCB_CPU_ALL)
static inline bool rcu_is_nocb_cpu(int cpu) { return true; }
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 937edaeb150d..3df6c1ec4e25 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -159,6 +159,22 @@ static inline void rcu_cpu_stall_reset(void)
{
}
+static inline void rcu_idle_enter(void)
+{
+}
+
+static inline void rcu_idle_exit(void)
+{
+}
+
+static inline void rcu_irq_enter(void)
+{
+}
+
+static inline void rcu_irq_exit(void)
+{
+}
+
static inline void exit_rcu(void)
{
}
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index db2e31beaae7..456879143f89 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -31,9 +31,7 @@
#define __LINUX_RCUTREE_H
void rcu_note_context_switch(void);
-#ifndef CONFIG_RCU_NOCB_CPU_ALL
int rcu_needs_cpu(u64 basem, u64 *nextevt);
-#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
void rcu_cpu_stall_reset(void);
/*
@@ -93,6 +91,11 @@ void rcu_force_quiescent_state(void);
void rcu_bh_force_quiescent_state(void);
void rcu_sched_force_quiescent_state(void);
+void rcu_idle_enter(void);
+void rcu_idle_exit(void);
+void rcu_irq_enter(void);
+void rcu_irq_exit(void);
+
void exit_rcu(void);
void rcu_scheduler_starting(void);
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 116655d92269..59c55ea0f0b5 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -433,6 +433,8 @@ int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
bool *change);
int regmap_get_val_bytes(struct regmap *map);
+int regmap_get_max_register(struct regmap *map);
+int regmap_get_reg_stride(struct regmap *map);
int regmap_async_complete(struct regmap *map);
bool regmap_can_raw_write(struct regmap *map);
@@ -676,6 +678,18 @@ static inline int regmap_get_val_bytes(struct regmap *map)
return -EINVAL;
}
+static inline int regmap_get_max_register(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_get_reg_stride(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regcache_sync(struct regmap *map)
{
WARN_ONCE(1, "regmap API is disabled");
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index fffa688ac3a7..4db9fbe4889d 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -91,6 +91,7 @@ struct regulator_linear_range {
* @set_current_limit: Configure a limit for a current-limited regulator.
* The driver should select the current closest to max_uA.
* @get_current_limit: Get the configured limit for a current-limited regulator.
+ * @set_input_current_limit: Configure an input limit.
*
* @set_mode: Set the configured operating mode for the regulator.
* @get_mode: Get the configured operating mode for the regulator.
@@ -111,6 +112,7 @@ struct regulator_linear_range {
* to stabilise after being set to a new value, in microseconds.
* The function provides the from and to voltage selector, the
* function should return the worst case.
+ * @set_soft_start: Enable soft start for the regulator.
*
* @set_suspend_voltage: Set the voltage for the regulator when the system
* is suspended.
@@ -121,6 +123,9 @@ struct regulator_linear_range {
* @set_suspend_mode: Set the operating mode for the regulator when the
* system is suspended.
*
+ * @set_pull_down: Configure the regulator to pull down when the regulator
+ * is disabled.
+ *
* This struct describes regulator operations which can be implemented by
* regulator chip drivers.
*/
@@ -142,6 +147,8 @@ struct regulator_ops {
int min_uA, int max_uA);
int (*get_current_limit) (struct regulator_dev *);
+ int (*set_input_current_limit) (struct regulator_dev *, int lim_uA);
+
/* enable/disable regulator */
int (*enable) (struct regulator_dev *);
int (*disable) (struct regulator_dev *);
@@ -158,6 +165,8 @@ struct regulator_ops {
unsigned int old_selector,
unsigned int new_selector);
+ int (*set_soft_start) (struct regulator_dev *);
+
/* report regulator status ... most other accessors report
* control inputs, this reports results of combining inputs
* from Linux (and other sources) with the actual load.
@@ -187,6 +196,8 @@ struct regulator_ops {
/* set regulator suspend operating mode (defined in consumer.h) */
int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode);
+
+ int (*set_pull_down) (struct regulator_dev *);
};
/*
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index b07562e082c4..b11be1260129 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -75,6 +75,8 @@ struct regulator_state {
*
* @min_uA: Smallest current consumers may set.
* @max_uA: Largest current consumers may set.
+ * @ilim_uA: Maximum input current.
+ * @system_load: Load that isn't captured by any consumer requests.
*
* @valid_modes_mask: Mask of modes which may be configured by consumers.
* @valid_ops_mask: Operations which may be performed by consumers.
@@ -86,6 +88,8 @@ struct regulator_state {
* applied.
* @apply_uV: Apply the voltage constraint when initialising.
* @ramp_disable: Disable ramp delay when initialising or when setting voltage.
+ * @soft_start: Enable soft start so that voltage ramps slowly.
+ * @pull_down: Enable pull down when regulator is disabled.
*
* @input_uV: Input voltage for regulator when supplied by another regulator.
*
@@ -111,6 +115,9 @@ struct regulation_constraints {
/* current output range (inclusive) - for current control */
int min_uA;
int max_uA;
+ int ilim_uA;
+
+ int system_load;
/* valid regulator operating modes for this machine */
unsigned int valid_modes_mask;
@@ -138,6 +145,8 @@ struct regulation_constraints {
unsigned boot_on:1; /* bootloader/firmware enabled regulator */
unsigned apply_uV:1; /* apply uV constraint if min == max */
unsigned ramp_disable:1; /* disable ramp delay */
+ unsigned soft_start:1; /* ramp voltage slowly */
+ unsigned pull_down:1; /* pull down resistor when regulator off */
};
/**
diff --git a/include/linux/regulator/max8973-regulator.h b/include/linux/regulator/max8973-regulator.h
index f8acc052e353..f6a8a16a0d4d 100644
--- a/include/linux/regulator/max8973-regulator.h
+++ b/include/linux/regulator/max8973-regulator.h
@@ -58,6 +58,9 @@
* control signal from EN input pin. If it is false then
* voltage output will be enabled/disabled through EN bit of
* device register.
+ * @enable_gpio: Enable GPIO. If EN pin is controlled through GPIO from host
+ * then GPIO number can be provided. If no GPIO controlled then
+ * it should be -1.
* @dvs_gpio: GPIO for dvs. It should be -1 if this is tied with fixed logic.
* @dvs_def_state: Default state of dvs. 1 if it is high else 0.
*/
@@ -65,6 +68,7 @@ struct max8973_regulator_platform_data {
struct regulator_init_data *reg_init_data;
unsigned long control_flags;
bool enable_ext_control;
+ int enable_gpio;
int dvs_gpio;
unsigned dvs_def_state:1;
};
diff --git a/include/linux/rio.h b/include/linux/rio.h
index 6bda06f21930..cde976e86b48 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -298,7 +298,7 @@ struct rio_id_table {
* struct rio_net - RIO network info
* @node: Node in global list of RIO networks
* @devices: List of devices in this network
- * @switches: List of switches in this netowrk
+ * @switches: List of switches in this network
* @mports: List of master ports accessing this network
* @hport: Default port for accessing this network
* @id: RIO network ID
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index ed8f9e70df9b..a0edb992c9c3 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -221,6 +221,7 @@ static inline void *sg_virt(struct scatterlist *sg)
}
int sg_nents(struct scatterlist *sg);
+int sg_nents_for_len(struct scatterlist *sg, u64 len);
struct scatterlist *sg_next(struct scatterlist *);
struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
void sg_init_table(struct scatterlist *, unsigned int);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7de815c6fa78..6633e83e608a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -132,6 +132,7 @@ struct fs_struct;
struct perf_event_context;
struct blk_plug;
struct filename;
+struct nameidata;
#define VMACACHE_BITS 2
#define VMACACHE_SIZE (1U << VMACACHE_BITS)
@@ -261,7 +262,7 @@ extern char ___assert_task_state[1 - 2*!!(
#define set_task_state(tsk, state_value) \
do { \
(tsk)->task_state_change = _THIS_IP_; \
- set_mb((tsk)->state, (state_value)); \
+ smp_store_mb((tsk)->state, (state_value)); \
} while (0)
/*
@@ -283,7 +284,7 @@ extern char ___assert_task_state[1 - 2*!!(
#define set_current_state(state_value) \
do { \
current->task_state_change = _THIS_IP_; \
- set_mb(current->state, (state_value)); \
+ smp_store_mb(current->state, (state_value)); \
} while (0)
#else
@@ -291,7 +292,7 @@ extern char ___assert_task_state[1 - 2*!!(
#define __set_task_state(tsk, state_value) \
do { (tsk)->state = (state_value); } while (0)
#define set_task_state(tsk, state_value) \
- set_mb((tsk)->state, (state_value))
+ smp_store_mb((tsk)->state, (state_value))
/*
* set_current_state() includes a barrier so that the write of current->state
@@ -307,7 +308,7 @@ extern char ___assert_task_state[1 - 2*!!(
#define __set_current_state(state_value) \
do { current->state = (state_value); } while (0)
#define set_current_state(state_value) \
- set_mb(current->state, (state_value))
+ smp_store_mb(current->state, (state_value))
#endif
@@ -344,14 +345,10 @@ extern int runqueue_is_locked(int cpu);
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void nohz_balance_enter_idle(int cpu);
extern void set_cpu_sd_state_idle(void);
-extern int get_nohz_timer_target(int pinned);
+extern int get_nohz_timer_target(void);
#else
static inline void nohz_balance_enter_idle(int cpu) { }
static inline void set_cpu_sd_state_idle(void) { }
-static inline int get_nohz_timer_target(int pinned)
-{
- return smp_processor_id();
-}
#endif
/*
@@ -1422,9 +1419,6 @@ struct task_struct {
#endif
struct mm_struct *mm, *active_mm;
-#ifdef CONFIG_COMPAT_BRK
- unsigned brk_randomized:1;
-#endif
/* per-thread vma caching */
u32 vmacache_seqnum;
struct vm_area_struct *vmacache[VMACACHE_SIZE];
@@ -1447,10 +1441,14 @@ struct task_struct {
/* Revert to default priority/policy when forking */
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
+ unsigned sched_migrated:1;
#ifdef CONFIG_MEMCG_KMEM
unsigned memcg_kmem_skip_account:1;
#endif
+#ifdef CONFIG_COMPAT_BRK
+ unsigned brk_randomized:1;
+#endif
unsigned long atomic_flags; /* Flags needing atomic access. */
@@ -1527,7 +1525,7 @@ struct task_struct {
it with task_lock())
- initialized normally by setup_new_exec */
/* file system info */
- int link_count, total_link_count;
+ struct nameidata *nameidata;
#ifdef CONFIG_SYSVIPC
/* ipc stuff */
struct sysv_sem sysvsem;
@@ -2601,6 +2599,9 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
}
#endif
+#define tasklist_empty() \
+ list_empty(&init_task.tasks)
+
#define next_task(p) \
list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 596a0e007c62..c9e4731cf10b 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -57,24 +57,12 @@ extern unsigned int sysctl_numa_balancing_scan_size;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;
extern unsigned int sysctl_sched_time_avg;
-extern unsigned int sysctl_timer_migration;
extern unsigned int sysctl_sched_shares_window;
int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length,
loff_t *ppos);
#endif
-#ifdef CONFIG_SCHED_DEBUG
-static inline unsigned int get_sysctl_timer_migration(void)
-{
- return sysctl_timer_migration;
-}
-#else
-static inline unsigned int get_sysctl_timer_migration(void)
-{
- return 1;
-}
-#endif
/*
* control realtime throttling:
diff --git a/include/linux/security.h b/include/linux/security.h
index 18264ea9e314..52febde52479 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -43,7 +43,6 @@ struct file;
struct vfsmount;
struct path;
struct qstr;
-struct nameidata;
struct iattr;
struct fown_struct;
struct file_operations;
@@ -477,7 +476,8 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @inode_follow_link:
* Check permission to follow a symbolic link when looking up a pathname.
* @dentry contains the dentry structure for the link.
- * @nd contains the nameidata structure for the parent directory.
+ * @inode contains the inode, which itself is not stable in RCU-walk
+ * @rcu indicates whether we are in RCU-walk mode.
* Return 0 if permission is granted.
* @inode_permission:
* Check permission before accessing an inode. This hook is called by the
@@ -1553,7 +1553,8 @@ struct security_operations {
int (*inode_rename) (struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry);
int (*inode_readlink) (struct dentry *dentry);
- int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd);
+ int (*inode_follow_link) (struct dentry *dentry, struct inode *inode,
+ bool rcu);
int (*inode_permission) (struct inode *inode, int mask);
int (*inode_setattr) (struct dentry *dentry, struct iattr *attr);
int (*inode_getattr) (const struct path *path);
@@ -1839,7 +1840,8 @@ int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags);
int security_inode_readlink(struct dentry *dentry);
-int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd);
+int security_inode_follow_link(struct dentry *dentry, struct inode *inode,
+ bool rcu);
int security_inode_permission(struct inode *inode, int mask);
int security_inode_setattr(struct dentry *dentry, struct iattr *attr);
int security_inode_getattr(const struct path *path);
@@ -2242,7 +2244,8 @@ static inline int security_inode_readlink(struct dentry *dentry)
}
static inline int security_inode_follow_link(struct dentry *dentry,
- struct nameidata *nd)
+ struct inode *inode,
+ bool rcu)
{
return 0;
}
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 3e18379dfa6f..0063b24b4f36 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -120,7 +120,7 @@ do { \
/*
* Despite its name it doesn't necessarily has to be a full barrier.
* It should only guarantee that a STORE before the critical section
- * can not be reordered with a LOAD inside this section.
+ * can not be reordered with LOADs and STOREs inside this section.
* spin_lock() is the one-way barrier, this LOAD can not escape out
* of the region. So the default implementation simply ensures that
* a STORE can not move into the critical section, smp_wmb() should
diff --git a/include/linux/sw842.h b/include/linux/sw842.h
new file mode 100644
index 000000000000..109ba041c2ae
--- /dev/null
+++ b/include/linux/sw842.h
@@ -0,0 +1,12 @@
+#ifndef __SW842_H__
+#define __SW842_H__
+
+#define SW842_MEM_COMPRESS (0xf000)
+
+int sw842_compress(const u8 *src, unsigned int srclen,
+ u8 *dst, unsigned int *destlen, void *wmem);
+
+int sw842_decompress(const u8 *src, unsigned int srclen,
+ u8 *dst, unsigned int *destlen);
+
+#endif
diff --git a/include/linux/tick.h b/include/linux/tick.h
index f8492da57ad3..3741ba1a652c 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -13,8 +13,6 @@
#ifdef CONFIG_GENERIC_CLOCKEVENTS
extern void __init tick_init(void);
-extern void tick_freeze(void);
-extern void tick_unfreeze(void);
/* Should be core only, but ARM BL switcher requires it */
extern void tick_suspend_local(void);
/* Should be core only, but XEN resume magic and ARM BL switcher require it */
@@ -23,14 +21,20 @@ extern void tick_handover_do_timer(void);
extern void tick_cleanup_dead_cpu(int cpu);
#else /* CONFIG_GENERIC_CLOCKEVENTS */
static inline void tick_init(void) { }
-static inline void tick_freeze(void) { }
-static inline void tick_unfreeze(void) { }
static inline void tick_suspend_local(void) { }
static inline void tick_resume_local(void) { }
static inline void tick_handover_do_timer(void) { }
static inline void tick_cleanup_dead_cpu(int cpu) { }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
+#if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_SUSPEND)
+extern void tick_freeze(void);
+extern void tick_unfreeze(void);
+#else
+static inline void tick_freeze(void) { }
+static inline void tick_unfreeze(void) { }
+#endif
+
#ifdef CONFIG_TICK_ONESHOT
extern void tick_irq_enter(void);
# ifndef arch_needs_cpu
@@ -134,6 +138,12 @@ static inline bool tick_nohz_full_cpu(int cpu)
return cpumask_test_cpu(cpu, tick_nohz_full_mask);
}
+static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
+{
+ if (tick_nohz_full_enabled())
+ cpumask_or(mask, mask, tick_nohz_full_mask);
+}
+
extern void __tick_nohz_full_check(void);
extern void tick_nohz_full_kick(void);
extern void tick_nohz_full_kick_cpu(int cpu);
@@ -142,6 +152,7 @@ extern void __tick_nohz_task_switch(struct task_struct *tsk);
#else
static inline bool tick_nohz_full_enabled(void) { return false; }
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
+static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
static inline void __tick_nohz_full_check(void) { }
static inline void tick_nohz_full_kick_cpu(int cpu) { }
static inline void tick_nohz_full_kick(void) { }
diff --git a/include/linux/timer.h b/include/linux/timer.h
index fbb80e0030bf..61aa61dc410c 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -14,27 +14,23 @@ struct timer_list {
* All fields that change during normal runtime grouped to the
* same cacheline
*/
- struct list_head entry;
- unsigned long expires;
- struct tvec_base *base;
-
- void (*function)(unsigned long);
- unsigned long data;
-
- int slack;
+ struct hlist_node entry;
+ unsigned long expires;
+ void (*function)(unsigned long);
+ unsigned long data;
+ u32 flags;
+ int slack;
#ifdef CONFIG_TIMER_STATS
- int start_pid;
- void *start_site;
- char start_comm[16];
+ int start_pid;
+ void *start_site;
+ char start_comm[16];
#endif
#ifdef CONFIG_LOCKDEP
- struct lockdep_map lockdep_map;
+ struct lockdep_map lockdep_map;
#endif
};
-extern struct tvec_base boot_tvec_bases;
-
#ifdef CONFIG_LOCKDEP
/*
* NB: because we have to copy the lockdep_map, setting the lockdep_map key
@@ -49,9 +45,6 @@ extern struct tvec_base boot_tvec_bases;
#endif
/*
- * Note that all tvec_bases are at least 4 byte aligned and lower two bits
- * of base in timer_list is guaranteed to be zero. Use them for flags.
- *
* A deferrable timer will work normally when the system is busy, but
* will not cause a CPU to come out of idle just to service it; instead,
* the timer will be serviced when the CPU eventually wakes up with a
@@ -65,17 +58,18 @@ extern struct tvec_base boot_tvec_bases;
* workqueue locking issues. It's not meant for executing random crap
* with interrupts disabled. Abuse is monitored!
*/
-#define TIMER_DEFERRABLE 0x1LU
-#define TIMER_IRQSAFE 0x2LU
-
-#define TIMER_FLAG_MASK 0x3LU
+#define TIMER_CPUMASK 0x0007FFFF
+#define TIMER_MIGRATING 0x00080000
+#define TIMER_BASEMASK (TIMER_CPUMASK | TIMER_MIGRATING)
+#define TIMER_DEFERRABLE 0x00100000
+#define TIMER_IRQSAFE 0x00200000
#define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
- .entry = { .prev = TIMER_ENTRY_STATIC }, \
+ .entry = { .next = TIMER_ENTRY_STATIC }, \
.function = (_function), \
.expires = (_expires), \
.data = (_data), \
- .base = (void *)((unsigned long)&boot_tvec_bases + (_flags)), \
+ .flags = (_flags), \
.slack = -1, \
__TIMER_LOCKDEP_MAP_INITIALIZER( \
__FILE__ ":" __stringify(__LINE__)) \
@@ -168,7 +162,7 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
*/
static inline int timer_pending(const struct timer_list * timer)
{
- return timer->entry.next != NULL;
+ return timer->entry.pprev != NULL;
}
extern void add_timer_on(struct timer_list *timer, int cpu);
@@ -194,13 +188,10 @@ extern void set_timer_slack(struct timer_list *time, int slack_hz);
extern int timer_stats_active;
-#define TIMER_STATS_FLAG_DEFERRABLE 0x1
-
extern void init_timer_stats(void);
extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
- void *timerf, char *comm,
- unsigned int timer_flag);
+ void *timerf, char *comm, u32 flags);
extern void __timer_stats_timer_set_start_info(struct timer_list *timer,
void *addr);
@@ -247,6 +238,15 @@ extern void run_local_timers(void);
struct hrtimer;
extern enum hrtimer_restart it_real_fn(struct hrtimer *);
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+#include <linux/sysctl.h>
+
+extern unsigned int sysctl_timer_migration;
+int timer_migration_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+#endif
+
unsigned long __round_jiffies(unsigned long j, int cpu);
unsigned long __round_jiffies_relative(unsigned long j, int cpu);
unsigned long round_jiffies(unsigned long j);
diff --git a/include/linux/types.h b/include/linux/types.h
index 59698be03490..8715287c3b1f 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -139,12 +139,20 @@ typedef unsigned long blkcnt_t;
*/
#define pgoff_t unsigned long
-/* A dma_addr_t can hold any valid DMA or bus address for the platform */
+/*
+ * A dma_addr_t can hold any valid DMA address, i.e., any address returned
+ * by the DMA API.
+ *
+ * If the DMA API only uses 32-bit addresses, dma_addr_t need only be 32
+ * bits wide. Bus addresses, e.g., PCI BARs, may be wider than 32 bits,
+ * but drivers do memory-mapped I/O to ioremapped kernel virtual addresses,
+ * so they don't care about the size of the actual bus addresses.
+ */
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
typedef u64 dma_addr_t;
#else
typedef u32 dma_addr_t;
-#endif /* dma_addr_t */
+#endif
typedef unsigned __bitwise__ gfp_t;
typedef unsigned __bitwise__ fmode_t;
diff --git a/include/misc/cxl-base.h b/include/misc/cxl-base.h
new file mode 100644
index 000000000000..5ae962512fb8
--- /dev/null
+++ b/include/misc/cxl-base.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _MISC_CXL_BASE_H
+#define _MISC_CXL_BASE_H
+
+#ifdef CONFIG_CXL_BASE
+
+#define CXL_IRQ_RANGES 4
+
+struct cxl_irq_ranges {
+ irq_hw_number_t offset[CXL_IRQ_RANGES];
+ irq_hw_number_t range[CXL_IRQ_RANGES];
+};
+
+extern atomic_t cxl_use_count;
+
+static inline bool cxl_ctx_in_use(void)
+{
+ return (atomic_read(&cxl_use_count) != 0);
+}
+
+static inline void cxl_ctx_get(void)
+{
+ atomic_inc(&cxl_use_count);
+}
+
+static inline void cxl_ctx_put(void)
+{
+ atomic_dec(&cxl_use_count);
+}
+
+void cxl_slbia(struct mm_struct *mm);
+
+#else /* CONFIG_CXL_BASE */
+
+static inline bool cxl_ctx_in_use(void) { return false; }
+static inline void cxl_slbia(struct mm_struct *mm) {}
+
+#endif /* CONFIG_CXL_BASE */
+
+#endif
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
index 975cc7861f18..7a6c1d6cc173 100644
--- a/include/misc/cxl.h
+++ b/include/misc/cxl.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2014 IBM Corp.
+ * Copyright 2015 IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -10,39 +10,194 @@
#ifndef _MISC_CXL_H
#define _MISC_CXL_H
-#ifdef CONFIG_CXL_BASE
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/interrupt.h>
+#include <uapi/misc/cxl.h>
-#define CXL_IRQ_RANGES 4
+/*
+ * This documents the in kernel API for driver to use CXL. It allows kernel
+ * drivers to bind to AFUs using an AFU configuration record exposed as a PCI
+ * configuration record.
+ *
+ * This API enables control over AFU and contexts which can't be part of the
+ * generic PCI API. This API is agnostic to the actual AFU.
+ */
+
+/* Get the AFU associated with a pci_dev */
+struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev);
+
+/* Get the AFU conf record number associated with a pci_dev */
+unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev);
+
+/* Get the physical device (ie. the PCIe card) which the AFU is attached */
+struct device *cxl_get_phys_dev(struct pci_dev *dev);
+
+
+/*
+ * Context lifetime overview:
+ *
+ * An AFU context may be inited and then started and stoppped multiple times
+ * before it's released. ie.
+ * - cxl_dev_context_init()
+ * - cxl_start_context()
+ * - cxl_stop_context()
+ * - cxl_start_context()
+ * - cxl_stop_context()
+ * ...repeat...
+ * - cxl_release_context()
+ * Once released, a context can't be started again.
+ *
+ * One context is inited by the cxl driver for every pci_dev. This is to be
+ * used as a default kernel context. cxl_get_context() will get this
+ * context. This context will be released by PCI hot unplug, so doesn't need to
+ * be released explicitly by drivers.
+ *
+ * Additional kernel contexts may be inited using cxl_dev_context_init().
+ * These must be released using cxl_context_detach().
+ *
+ * Once a context has been inited, IRQs may be configured. Firstly these IRQs
+ * must be allocated (cxl_allocate_afu_irqs()), then individually mapped to
+ * specific handlers (cxl_map_afu_irq()).
+ *
+ * These IRQs can be unmapped (cxl_unmap_afu_irq()) and finally released
+ * (cxl_free_afu_irqs()).
+ *
+ * The AFU can be reset (cxl_afu_reset()). This will cause the PSL/AFU
+ * hardware to lose track of all contexts. It's upto the caller of
+ * cxl_afu_reset() to restart these contexts.
+ */
+
+/*
+ * On pci_enabled_device(), the cxl driver will init a single cxl context for
+ * use by the driver. It doesn't start this context (as that will likely
+ * generate DMA traffic for most AFUs).
+ *
+ * This gets the default context associated with this pci_dev. This context
+ * doesn't need to be released as this will be done by the PCI subsystem on hot
+ * unplug.
+ */
+struct cxl_context *cxl_get_context(struct pci_dev *dev);
+/*
+ * Allocate and initalise a context associated with a AFU PCI device. This
+ * doesn't start the context in the AFU.
+ */
+struct cxl_context *cxl_dev_context_init(struct pci_dev *dev);
+/*
+ * Release and free a context. Context should be stopped before calling.
+ */
+int cxl_release_context(struct cxl_context *ctx);
-struct cxl_irq_ranges {
- irq_hw_number_t offset[CXL_IRQ_RANGES];
- irq_hw_number_t range[CXL_IRQ_RANGES];
-};
+/*
+ * Allocate AFU interrupts for this context. num=0 will allocate the default
+ * for this AFU as given in the AFU descriptor. This number doesn't include the
+ * interrupt 0 (CAIA defines AFU IRQ 0 for page faults). Each interrupt to be
+ * used must map a handler with cxl_map_afu_irq.
+ */
+int cxl_allocate_afu_irqs(struct cxl_context *cxl, int num);
+/* Free allocated interrupts */
+void cxl_free_afu_irqs(struct cxl_context *cxl);
+
+/*
+ * Map a handler for an AFU interrupt associated with a particular context. AFU
+ * IRQS numbers start from 1 (CAIA defines AFU IRQ 0 for page faults). cookie
+ * is private data is that will be provided to the interrupt handler.
+ */
+int cxl_map_afu_irq(struct cxl_context *cxl, int num,
+ irq_handler_t handler, void *cookie, char *name);
+/* unmap mapped IRQ handlers */
+void cxl_unmap_afu_irq(struct cxl_context *cxl, int num, void *cookie);
-extern atomic_t cxl_use_count;
+/*
+ * Start work on the AFU. This starts an cxl context and associates it with a
+ * task. task == NULL will make it a kernel context.
+ */
+int cxl_start_context(struct cxl_context *ctx, u64 wed,
+ struct task_struct *task);
+/*
+ * Stop a context and remove it from the PSL
+ */
+int cxl_stop_context(struct cxl_context *ctx);
-static inline bool cxl_ctx_in_use(void)
-{
- return (atomic_read(&cxl_use_count) != 0);
-}
+/* Reset the AFU */
+int cxl_afu_reset(struct cxl_context *ctx);
-static inline void cxl_ctx_get(void)
-{
- atomic_inc(&cxl_use_count);
-}
+/*
+ * Set a context as a master context.
+ * This sets the default problem space area mapped as the full space, rather
+ * than just the per context area (for slaves).
+ */
+void cxl_set_master(struct cxl_context *ctx);
-static inline void cxl_ctx_put(void)
-{
- atomic_dec(&cxl_use_count);
-}
+/*
+ * Map and unmap the AFU Problem Space area. The amount and location mapped
+ * depends on if this context is a master or slave.
+ */
+void __iomem *cxl_psa_map(struct cxl_context *ctx);
+void cxl_psa_unmap(void __iomem *addr);
-void cxl_slbia(struct mm_struct *mm);
+/* Get the process element for this context */
+int cxl_process_element(struct cxl_context *ctx);
-#else /* CONFIG_CXL_BASE */
-static inline bool cxl_ctx_in_use(void) { return false; }
-static inline void cxl_slbia(struct mm_struct *mm) {}
+/*
+ * These calls allow drivers to create their own file descriptors and make them
+ * identical to the cxl file descriptor user API. An example use case:
+ *
+ * struct file_operations cxl_my_fops = {};
+ * ......
+ * // Init the context
+ * ctx = cxl_dev_context_init(dev);
+ * if (IS_ERR(ctx))
+ * return PTR_ERR(ctx);
+ * // Create and attach a new file descriptor to my file ops
+ * file = cxl_get_fd(ctx, &cxl_my_fops, &fd);
+ * // Start context
+ * rc = cxl_start_work(ctx, &work.work);
+ * if (rc) {
+ * fput(file);
+ * put_unused_fd(fd);
+ * return -ENODEV;
+ * }
+ * // No error paths after installing the fd
+ * fd_install(fd, file);
+ * return fd;
+ *
+ * This inits a context, and gets a file descriptor and associates some file
+ * ops to that file descriptor. If the file ops are blank, the cxl driver will
+ * fill them in with the default ones that mimic the standard user API. Once
+ * completed, the file descriptor can be installed. Once the file descriptor is
+ * installed, it's visible to the user so no errors must occur past this point.
+ *
+ * If cxl_fd_release() file op call is installed, the context will be stopped
+ * and released when the fd is released. Hence the driver won't need to manage
+ * this itself.
+ */
-#endif /* CONFIG_CXL_BASE */
+/*
+ * Take a context and associate it with my file ops. Returns the associated
+ * file and file descriptor. Any file ops which are blank are filled in by the
+ * cxl driver with the default ops to mimic the standard API.
+ */
+struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
+ int *fd);
+/* Get the context associated with this file */
+struct cxl_context *cxl_fops_get_context(struct file *file);
+/*
+ * Start a context associated a struct cxl_ioctl_start_work used by the
+ * standard cxl user API.
+ */
+int cxl_start_work(struct cxl_context *ctx,
+ struct cxl_ioctl_start_work *work);
+/*
+ * Export all the existing fops so drivers can use them
+ */
+int cxl_fd_open(struct inode *inode, struct file *file);
+int cxl_fd_release(struct inode *inode, struct file *file);
+long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm);
+unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll);
+ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
+ loff_t *off);
-#endif
+#endif /* _MISC_CXL_H */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 36ac102c97c7..f0ee97eec24d 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -168,6 +168,7 @@ struct xfrm_state {
struct xfrm_algo *ealg;
struct xfrm_algo *calg;
struct xfrm_algo_aead *aead;
+ const char *geniv;
/* Data for encapsulator */
struct xfrm_encap_tmpl *encap;
@@ -1314,6 +1315,7 @@ static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
* xfrm algorithm information
*/
struct xfrm_algo_aead_info {
+ char *geniv;
u16 icv_truncbits;
};
@@ -1323,6 +1325,7 @@ struct xfrm_algo_auth_info {
};
struct xfrm_algo_encr_info {
+ char *geniv;
u16 blockbits;
u16 defkeybits;
};
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index ac54c27a2bfd..fde33ac6b58a 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -111,8 +111,8 @@ int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
int rdma_addr_size(struct sockaddr *addr);
int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id);
-int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *smac,
- u16 *vlan_id);
+int rdma_addr_find_dmac_by_grh(const union ib_gid *sgid, const union ib_gid *dgid,
+ u8 *smac, u16 *vlan_id);
static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr)
{
@@ -160,7 +160,7 @@ static inline int rdma_ip2gid(struct sockaddr *addr, union ib_gid *gid)
}
/* Important - sockaddr should be a union of sockaddr_in and sockaddr_in6 */
-static inline void rdma_gid2ip(struct sockaddr *out, union ib_gid *gid)
+static inline void rdma_gid2ip(struct sockaddr *out, const union ib_gid *gid)
{
if (ipv6_addr_v4mapped((struct in6_addr *)gid)) {
struct sockaddr_in *out_in = (struct sockaddr_in *)out;
diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h
index ad9a3c280944..bd92130f4ac5 100644
--- a/include/rdma/ib_cache.h
+++ b/include/rdma/ib_cache.h
@@ -64,10 +64,10 @@ int ib_get_cached_gid(struct ib_device *device,
* ib_find_cached_gid() searches for the specified GID value in
* the local software cache.
*/
-int ib_find_cached_gid(struct ib_device *device,
- union ib_gid *gid,
- u8 *port_num,
- u16 *index);
+int ib_find_cached_gid(struct ib_device *device,
+ const union ib_gid *gid,
+ u8 *port_num,
+ u16 *index);
/**
* ib_get_cached_pkey - Returns a cached PKey table entry
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 9bb99e983f58..c8422d5a5a91 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -42,8 +42,11 @@
#include <rdma/ib_verbs.h>
#include <uapi/rdma/ib_user_mad.h>
-/* Management base version */
+/* Management base versions */
#define IB_MGMT_BASE_VERSION 1
+#define OPA_MGMT_BASE_VERSION 0x80
+
+#define OPA_SMP_CLASS_VERSION 0x80
/* Management classes */
#define IB_MGMT_CLASS_SUBN_LID_ROUTED 0x01
@@ -135,6 +138,10 @@ enum {
IB_MGMT_SA_DATA = 200,
IB_MGMT_DEVICE_HDR = 64,
IB_MGMT_DEVICE_DATA = 192,
+ IB_MGMT_MAD_SIZE = IB_MGMT_MAD_HDR + IB_MGMT_MAD_DATA,
+ OPA_MGMT_MAD_DATA = 2024,
+ OPA_MGMT_RMPP_DATA = 2012,
+ OPA_MGMT_MAD_SIZE = IB_MGMT_MAD_HDR + OPA_MGMT_MAD_DATA,
};
struct ib_mad_hdr {
@@ -181,12 +188,23 @@ struct ib_mad {
u8 data[IB_MGMT_MAD_DATA];
};
+struct opa_mad {
+ struct ib_mad_hdr mad_hdr;
+ u8 data[OPA_MGMT_MAD_DATA];
+};
+
struct ib_rmpp_mad {
struct ib_mad_hdr mad_hdr;
struct ib_rmpp_hdr rmpp_hdr;
u8 data[IB_MGMT_RMPP_DATA];
};
+struct opa_rmpp_mad {
+ struct ib_mad_hdr mad_hdr;
+ struct ib_rmpp_hdr rmpp_hdr;
+ u8 data[OPA_MGMT_RMPP_DATA];
+};
+
struct ib_sa_mad {
struct ib_mad_hdr mad_hdr;
struct ib_rmpp_hdr rmpp_hdr;
@@ -235,7 +253,10 @@ struct ib_class_port_info {
* includes the common MAD, RMPP, and class specific headers.
* @data_len: Indicates the total size of user-transferred data.
* @seg_count: The number of RMPP segments allocated for this send.
- * @seg_size: Size of each RMPP segment.
+ * @seg_size: Size of the data in each RMPP segment. This does not include
+ * class specific headers.
+ * @seg_rmpp_size: Size of each RMPP segment including the class specific
+ * headers.
* @timeout_ms: Time to wait for a response.
* @retries: Number of times to retry a request for a response. For MADs
* using RMPP, this applies per window. On completion, returns the number
@@ -255,6 +276,7 @@ struct ib_mad_send_buf {
int data_len;
int seg_count;
int seg_size;
+ int seg_rmpp_size;
int timeout_ms;
int retries;
};
@@ -263,7 +285,7 @@ struct ib_mad_send_buf {
* ib_response_mad - Returns if the specified MAD has been generated in
* response to a sent request or trap.
*/
-int ib_response_mad(struct ib_mad *mad);
+int ib_response_mad(const struct ib_mad_hdr *hdr);
/**
* ib_get_rmpp_resptime - Returns the RMPP response time.
@@ -401,7 +423,10 @@ struct ib_mad_send_wc {
struct ib_mad_recv_buf {
struct list_head list;
struct ib_grh *grh;
- struct ib_mad *mad;
+ union {
+ struct ib_mad *mad;
+ struct opa_mad *opa_mad;
+ };
};
/**
@@ -410,6 +435,7 @@ struct ib_mad_recv_buf {
* @recv_buf: Specifies the location of the received data buffer(s).
* @rmpp_list: Specifies a list of RMPP reassembled received MAD buffers.
* @mad_len: The length of the received MAD, without duplicated headers.
+ * @mad_seg_size: The size of individual MAD segments
*
* For received response, the wr_id contains a pointer to the ib_mad_send_buf
* for the corresponding send request.
@@ -419,6 +445,7 @@ struct ib_mad_recv_wc {
struct ib_mad_recv_buf recv_buf;
struct list_head rmpp_list;
int mad_len;
+ size_t mad_seg_size;
};
/**
@@ -618,6 +645,7 @@ int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
* automatically adjust the allocated buffer size to account for any
* additional padding that may be necessary.
* @gfp_mask: GFP mask used for the memory allocation.
+ * @base_version: Base Version of this MAD
*
* This routine allocates a MAD for sending. The returned MAD send buffer
* will reference a data buffer usable for sending a MAD, along
@@ -633,7 +661,8 @@ struct ib_mad_send_buf *ib_create_send_mad(struct ib_mad_agent *mad_agent,
u32 remote_qpn, u16 pkey_index,
int rmpp_active,
int hdr_len, int data_len,
- gfp_t gfp_mask);
+ gfp_t gfp_mask,
+ u8 base_version);
/**
* ib_is_mad_class_rmpp - returns whether given management class
@@ -675,6 +704,6 @@ void ib_free_send_mad(struct ib_mad_send_buf *send_buf);
* @agent: the agent in question
* @return: true if agent is performing rmpp, false otherwise.
*/
-int ib_mad_kernel_rmpp_agent(struct ib_mad_agent *agent);
+int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent);
#endif /* IB_MAD_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 65994a19e840..986fddb08579 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -81,6 +81,13 @@ enum rdma_transport_type {
RDMA_TRANSPORT_USNIC_UDP
};
+enum rdma_protocol_type {
+ RDMA_PROTOCOL_IB,
+ RDMA_PROTOCOL_IBOE,
+ RDMA_PROTOCOL_IWARP,
+ RDMA_PROTOCOL_USNIC_UDP
+};
+
__attribute_const__ enum rdma_transport_type
rdma_node_get_transport(enum rdma_node_type node_type);
@@ -166,6 +173,16 @@ struct ib_odp_caps {
} per_transport_caps;
};
+enum ib_cq_creation_flags {
+ IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
+};
+
+struct ib_cq_init_attr {
+ unsigned int cqe;
+ int comp_vector;
+ u32 flags;
+};
+
struct ib_device_attr {
u64 fw_ver;
__be64 sys_image_guid;
@@ -210,6 +227,8 @@ struct ib_device_attr {
int sig_prot_cap;
int sig_guard_cap;
struct ib_odp_caps odp_caps;
+ uint64_t timestamp_mask;
+ uint64_t hca_core_clock; /* in KHZ */
};
enum ib_mtu {
@@ -346,6 +365,42 @@ union rdma_protocol_stats {
struct iw_protocol_stats iw;
};
+/* Define bits for the various functionality this port needs to be supported by
+ * the core.
+ */
+/* Management 0x00000FFF */
+#define RDMA_CORE_CAP_IB_MAD 0x00000001
+#define RDMA_CORE_CAP_IB_SMI 0x00000002
+#define RDMA_CORE_CAP_IB_CM 0x00000004
+#define RDMA_CORE_CAP_IW_CM 0x00000008
+#define RDMA_CORE_CAP_IB_SA 0x00000010
+#define RDMA_CORE_CAP_OPA_MAD 0x00000020
+
+/* Address format 0x000FF000 */
+#define RDMA_CORE_CAP_AF_IB 0x00001000
+#define RDMA_CORE_CAP_ETH_AH 0x00002000
+
+/* Protocol 0xFFF00000 */
+#define RDMA_CORE_CAP_PROT_IB 0x00100000
+#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
+#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
+
+#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
+ | RDMA_CORE_CAP_IB_MAD \
+ | RDMA_CORE_CAP_IB_SMI \
+ | RDMA_CORE_CAP_IB_CM \
+ | RDMA_CORE_CAP_IB_SA \
+ | RDMA_CORE_CAP_AF_IB)
+#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
+ | RDMA_CORE_CAP_IB_MAD \
+ | RDMA_CORE_CAP_IB_CM \
+ | RDMA_CORE_CAP_AF_IB \
+ | RDMA_CORE_CAP_ETH_AH)
+#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
+ | RDMA_CORE_CAP_IW_CM)
+#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
+ | RDMA_CORE_CAP_OPA_MAD)
+
struct ib_port_attr {
enum ib_port_state state;
enum ib_mtu max_mtu;
@@ -412,6 +467,8 @@ enum ib_event_type {
IB_EVENT_GID_CHANGE,
};
+__attribute_const__ const char *ib_event_msg(enum ib_event_type event);
+
struct ib_event {
struct ib_device *device;
union {
@@ -663,6 +720,8 @@ enum ib_wc_status {
IB_WC_GENERAL_ERR
};
+__attribute_const__ const char *ib_wc_status_msg(enum ib_wc_status status);
+
enum ib_wc_opcode {
IB_WC_SEND,
IB_WC_RDMA_WRITE,
@@ -1407,7 +1466,7 @@ struct ib_flow {
struct ib_uobject *uobject;
};
-struct ib_mad;
+struct ib_mad_hdr;
struct ib_grh;
enum ib_process_mad_flags {
@@ -1474,6 +1533,13 @@ struct ib_dma_mapping_ops {
struct iw_cm_verbs;
+struct ib_port_immutable {
+ int pkey_tbl_len;
+ int gid_tbl_len;
+ u32 core_cap_flags;
+ u32 max_mad_size;
+};
+
struct ib_device {
struct device *dma_device;
@@ -1487,8 +1553,10 @@ struct ib_device {
struct list_head client_data_list;
struct ib_cache cache;
- int *pkey_tbl_len;
- int *gid_tbl_len;
+ /**
+ * port_immutable is indexed by port number
+ */
+ struct ib_port_immutable *port_immutable;
int num_comp_vectors;
@@ -1497,7 +1565,8 @@ struct ib_device {
int (*get_protocol_stats)(struct ib_device *device,
union rdma_protocol_stats *stats);
int (*query_device)(struct ib_device *device,
- struct ib_device_attr *device_attr);
+ struct ib_device_attr *device_attr,
+ struct ib_udata *udata);
int (*query_port)(struct ib_device *device,
u8 port_num,
struct ib_port_attr *port_attr);
@@ -1561,8 +1630,8 @@ struct ib_device {
int (*post_recv)(struct ib_qp *qp,
struct ib_recv_wr *recv_wr,
struct ib_recv_wr **bad_recv_wr);
- struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
- int comp_vector,
+ struct ib_cq * (*create_cq)(struct ib_device *device,
+ const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
struct ib_udata *udata);
int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
@@ -1637,10 +1706,13 @@ struct ib_device {
int (*process_mad)(struct ib_device *device,
int process_mad_flags,
u8 port_num,
- struct ib_wc *in_wc,
- struct ib_grh *in_grh,
- struct ib_mad *in_mad,
- struct ib_mad *out_mad);
+ const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh,
+ const struct ib_mad_hdr *in_mad,
+ size_t in_mad_size,
+ struct ib_mad_hdr *out_mad,
+ size_t *out_mad_size,
+ u16 *out_mad_pkey_index);
struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
struct ib_ucontext *ucontext,
struct ib_udata *udata);
@@ -1675,6 +1747,14 @@ struct ib_device {
u32 local_dma_lkey;
u8 node_type;
u8 phys_port_cnt;
+
+ /**
+ * The following mandatory functions are used only at device
+ * registration. Keep functions such as these at the end of this
+ * structure to avoid cache line misses when accessing struct ib_device
+ * in fast paths.
+ */
+ int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
};
struct ib_client {
@@ -1743,6 +1823,284 @@ int ib_query_port(struct ib_device *device,
enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
u8 port_num);
+/**
+ * rdma_start_port - Return the first valid port number for the device
+ * specified
+ *
+ * @device: Device to be checked
+ *
+ * Return start port number
+ */
+static inline u8 rdma_start_port(const struct ib_device *device)
+{
+ return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
+}
+
+/**
+ * rdma_end_port - Return the last valid port number for the device
+ * specified
+ *
+ * @device: Device to be checked
+ *
+ * Return last port number
+ */
+static inline u8 rdma_end_port(const struct ib_device *device)
+{
+ return (device->node_type == RDMA_NODE_IB_SWITCH) ?
+ 0 : device->phys_port_cnt;
+}
+
+static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
+}
+
+static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
+}
+
+static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
+}
+
+static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags &
+ (RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_PROT_ROCE);
+}
+
+/**
+ * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
+ * Management Datagrams.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * Management Datagrams (MAD) are a required part of the InfiniBand
+ * specification and are supported on all InfiniBand devices. A slightly
+ * extended version are also supported on OPA interfaces.
+ *
+ * Return: true if the port supports sending/receiving of MAD packets.
+ */
+static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
+}
+
+/**
+ * rdma_cap_opa_mad - Check if the port of device provides support for OPA
+ * Management Datagrams.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * Intel OmniPath devices extend and/or replace the InfiniBand Management
+ * datagrams with their own versions. These OPA MADs share many but not all of
+ * the characteristics of InfiniBand MADs.
+ *
+ * OPA MADs differ in the following ways:
+ *
+ * 1) MADs are variable size up to 2K
+ * IBTA defined MADs remain fixed at 256 bytes
+ * 2) OPA SMPs must carry valid PKeys
+ * 3) OPA SMP packets are a different format
+ *
+ * Return: true if the port supports OPA MAD packet formats.
+ */
+static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
+{
+ return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
+ == RDMA_CORE_CAP_OPA_MAD;
+}
+
+/**
+ * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
+ * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * Each InfiniBand node is required to provide a Subnet Management Agent
+ * that the subnet manager can access. Prior to the fabric being fully
+ * configured by the subnet manager, the SMA is accessed via a well known
+ * interface called the Subnet Management Interface (SMI). This interface
+ * uses directed route packets to communicate with the SM to get around the
+ * chicken and egg problem of the SM needing to know what's on the fabric
+ * in order to configure the fabric, and needing to configure the fabric in
+ * order to send packets to the devices on the fabric. These directed
+ * route packets do not need the fabric fully configured in order to reach
+ * their destination. The SMI is the only method allowed to send
+ * directed route packets on an InfiniBand fabric.
+ *
+ * Return: true if the port provides an SMI.
+ */
+static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
+}
+
+/**
+ * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
+ * Communication Manager.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * The InfiniBand Communication Manager is one of many pre-defined General
+ * Service Agents (GSA) that are accessed via the General Service
+ * Interface (GSI). It's role is to facilitate establishment of connections
+ * between nodes as well as other management related tasks for established
+ * connections.
+ *
+ * Return: true if the port supports an IB CM (this does not guarantee that
+ * a CM is actually running however).
+ */
+static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
+}
+
+/**
+ * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
+ * Communication Manager.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * Similar to above, but specific to iWARP connections which have a different
+ * managment protocol than InfiniBand.
+ *
+ * Return: true if the port supports an iWARP CM (this does not guarantee that
+ * a CM is actually running however).
+ */
+static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
+}
+
+/**
+ * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
+ * Subnet Administration.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * An InfiniBand Subnet Administration (SA) service is a pre-defined General
+ * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand
+ * fabrics, devices should resolve routes to other hosts by contacting the
+ * SA to query the proper route.
+ *
+ * Return: true if the port should act as a client to the fabric Subnet
+ * Administration interface. This does not imply that the SA service is
+ * running locally.
+ */
+static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
+}
+
+/**
+ * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
+ * Multicast.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * InfiniBand multicast registration is more complex than normal IPv4 or
+ * IPv6 multicast registration. Each Host Channel Adapter must register
+ * with the Subnet Manager when it wishes to join a multicast group. It
+ * should do so only once regardless of how many queue pairs it subscribes
+ * to this group. And it should leave the group only after all queue pairs
+ * attached to the group have been detached.
+ *
+ * Return: true if the port must undertake the additional adminstrative
+ * overhead of registering/unregistering with the SM and tracking of the
+ * total number of queue pairs attached to the multicast group.
+ */
+static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
+{
+ return rdma_cap_ib_sa(device, port_num);
+}
+
+/**
+ * rdma_cap_af_ib - Check if the port of device has the capability
+ * Native Infiniband Address.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
+ * GID. RoCE uses a different mechanism, but still generates a GID via
+ * a prescribed mechanism and port specific data.
+ *
+ * Return: true if the port uses a GID address to identify devices on the
+ * network.
+ */
+static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
+}
+
+/**
+ * rdma_cap_eth_ah - Check if the port of device has the capability
+ * Ethernet Address Handle.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
+ * to fabricate GIDs over Ethernet/IP specific addresses native to the
+ * port. Normally, packet headers are generated by the sending host
+ * adapter, but when sending connectionless datagrams, we must manually
+ * inject the proper headers for the fabric we are communicating over.
+ *
+ * Return: true if we are running as a RoCE port and must force the
+ * addition of a Global Route Header built from our Ethernet Address
+ * Handle into our header list for connectionless packets.
+ */
+static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
+}
+
+/**
+ * rdma_cap_read_multi_sge - Check if the port of device has the capability
+ * RDMA Read Multiple Scatter-Gather Entries.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * iWARP has a restriction that RDMA READ requests may only have a single
+ * Scatter/Gather Entry (SGE) in the work request.
+ *
+ * NOTE: although the linux kernel currently assumes all devices are either
+ * single SGE RDMA READ devices or identical SGE maximums for RDMA READs and
+ * WRITEs, according to Tom Talpey, this is not accurate. There are some
+ * devices out there that support more than a single SGE on RDMA READ
+ * requests, but do not support the same number of SGEs as they do on
+ * RDMA WRITE requests. The linux kernel would need rearchitecting to
+ * support these imbalanced READ/WRITE SGEs allowed devices. So, for now,
+ * suffice with either the device supports the same READ/WRITE SGEs, or
+ * it only gets one READ sge.
+ *
+ * Return: true for any device that allows more than one SGE in RDMA READ
+ * requests.
+ */
+static inline bool rdma_cap_read_multi_sge(struct ib_device *device,
+ u8 port_num)
+{
+ return !(device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP);
+}
+
+/**
+ * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
+ *
+ * @device: Device
+ * @port_num: Port number
+ *
+ * This MAD size includes the MAD headers and MAD payload. No other headers
+ * are included.
+ *
+ * Return the max MAD size required by the Port. Will return 0 if the port
+ * does not support MADs
+ */
+static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].max_mad_size;
+}
+
int ib_query_gid(struct ib_device *device,
u8 port_num, int index, union ib_gid *gid);
@@ -1799,8 +2157,9 @@ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
* @ah_attr: Returned attributes that can be used when creating an address
* handle for replying to the message.
*/
-int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
- struct ib_grh *grh, struct ib_ah_attr *ah_attr);
+int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
+ const struct ib_wc *wc, const struct ib_grh *grh,
+ struct ib_ah_attr *ah_attr);
/**
* ib_create_ah_from_wc - Creates an address handle associated with the
@@ -1814,8 +2173,8 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
* The address handle is used to reference a local or global destination
* in all UD QP post sends.
*/
-struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
- struct ib_grh *grh, u8 port_num);
+struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
+ const struct ib_grh *grh, u8 port_num);
/**
* ib_modify_ah - Modifies the address vector associated with an address
@@ -2011,16 +2370,15 @@ static inline int ib_post_recv(struct ib_qp *qp,
* asynchronous event not associated with a completion occurs on the CQ.
* @cq_context: Context associated with the CQ returned to the user via
* the associated completion and event handlers.
- * @cqe: The minimum size of the CQ.
- * @comp_vector - Completion vector used to signal completion events.
- * Must be >= 0 and < context->num_comp_vectors.
+ * @cq_attr: The attributes the CQ should be created upon.
*
* Users can examine the cq structure to determine the actual CQ size.
*/
struct ib_cq *ib_create_cq(struct ib_device *device,
ib_comp_handler comp_handler,
void (*event_handler)(struct ib_event *, void *),
- void *cq_context, int cqe, int comp_vector);
+ void *cq_context,
+ const struct ib_cq_init_attr *cq_attr);
/**
* ib_resize_cq - Modifies the capacity of the CQ.
diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
index 1017e0bdf8ba..036bd2772662 100644
--- a/include/rdma/iw_cm.h
+++ b/include/rdma/iw_cm.h
@@ -91,6 +91,7 @@ struct iw_cm_id {
/* Used by provider to add and remove refs on IW cm_id */
void (*add_ref)(struct iw_cm_id *);
void (*rem_ref)(struct iw_cm_id *);
+ u8 tos;
};
struct iw_cm_conn_param {
diff --git a/include/rdma/opa_smi.h b/include/rdma/opa_smi.h
new file mode 100644
index 000000000000..29063e84c253
--- /dev/null
+++ b/include/rdma/opa_smi.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2014 Intel Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if !defined(OPA_SMI_H)
+#define OPA_SMI_H
+
+#include <rdma/ib_mad.h>
+#include <rdma/ib_smi.h>
+
+#define OPA_SMP_LID_DATA_SIZE 2016
+#define OPA_SMP_DR_DATA_SIZE 1872
+#define OPA_SMP_MAX_PATH_HOPS 64
+
+#define OPA_SMI_CLASS_VERSION 0x80
+
+#define OPA_LID_PERMISSIVE cpu_to_be32(0xFFFFFFFF)
+
+struct opa_smp {
+ u8 base_version;
+ u8 mgmt_class;
+ u8 class_version;
+ u8 method;
+ __be16 status;
+ u8 hop_ptr;
+ u8 hop_cnt;
+ __be64 tid;
+ __be16 attr_id;
+ __be16 resv;
+ __be32 attr_mod;
+ __be64 mkey;
+ union {
+ struct {
+ uint8_t data[OPA_SMP_LID_DATA_SIZE];
+ } lid;
+ struct {
+ __be32 dr_slid;
+ __be32 dr_dlid;
+ u8 initial_path[OPA_SMP_MAX_PATH_HOPS];
+ u8 return_path[OPA_SMP_MAX_PATH_HOPS];
+ u8 reserved[8];
+ u8 data[OPA_SMP_DR_DATA_SIZE];
+ } dr;
+ } route;
+} __packed;
+
+
+static inline u8
+opa_get_smp_direction(struct opa_smp *smp)
+{
+ return ib_get_smp_direction((struct ib_smp *)smp);
+}
+
+static inline u8 *opa_get_smp_data(struct opa_smp *smp)
+{
+ if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ return smp->route.dr.data;
+
+ return smp->route.lid.data;
+}
+
+static inline size_t opa_get_smp_data_size(struct opa_smp *smp)
+{
+ if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ return sizeof(smp->route.dr.data);
+
+ return sizeof(smp->route.lid.data);
+}
+
+static inline size_t opa_get_smp_header_size(struct opa_smp *smp)
+{
+ if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ return sizeof(*smp) - sizeof(smp->route.dr.data);
+
+ return sizeof(*smp) - sizeof(smp->route.lid.data);
+}
+
+#endif /* OPA_SMI_H */
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 1ed2088dc9f5..c92522c192d2 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -62,6 +62,8 @@ enum rdma_cm_event_type {
RDMA_CM_EVENT_TIMEWAIT_EXIT
};
+__attribute_const__ const char *rdma_event_msg(enum rdma_cm_event_type event);
+
enum rdma_port_space {
RDMA_PS_SDP = 0x0001,
RDMA_PS_IPOIB = 0x0002,
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index d0a66aa1868d..e0a3398b1547 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -1,9 +1,6 @@
/*
* This header file contains public constants and structures used by
- * the scsi code for linux.
- *
- * For documentation on the OPCODES, MESSAGES, and SENSE values,
- * please consult the SCSI standard.
+ * the SCSI initiator code.
*/
#ifndef _SCSI_SCSI_H
#define _SCSI_SCSI_H
@@ -11,6 +8,8 @@
#include <linux/types.h>
#include <linux/scatterlist.h>
#include <linux/kernel.h>
+#include <scsi/scsi_common.h>
+#include <scsi/scsi_proto.h>
struct scsi_cmnd;
@@ -49,187 +48,6 @@ enum scsi_timeouts {
*/
#define SCAN_WILD_CARD ~0
-/*
- * SCSI opcodes
- */
-
-#define TEST_UNIT_READY 0x00
-#define REZERO_UNIT 0x01
-#define REQUEST_SENSE 0x03
-#define FORMAT_UNIT 0x04
-#define READ_BLOCK_LIMITS 0x05
-#define REASSIGN_BLOCKS 0x07
-#define INITIALIZE_ELEMENT_STATUS 0x07
-#define READ_6 0x08
-#define WRITE_6 0x0a
-#define SEEK_6 0x0b
-#define READ_REVERSE 0x0f
-#define WRITE_FILEMARKS 0x10
-#define SPACE 0x11
-#define INQUIRY 0x12
-#define RECOVER_BUFFERED_DATA 0x14
-#define MODE_SELECT 0x15
-#define RESERVE 0x16
-#define RELEASE 0x17
-#define COPY 0x18
-#define ERASE 0x19
-#define MODE_SENSE 0x1a
-#define START_STOP 0x1b
-#define RECEIVE_DIAGNOSTIC 0x1c
-#define SEND_DIAGNOSTIC 0x1d
-#define ALLOW_MEDIUM_REMOVAL 0x1e
-
-#define READ_FORMAT_CAPACITIES 0x23
-#define SET_WINDOW 0x24
-#define READ_CAPACITY 0x25
-#define READ_10 0x28
-#define WRITE_10 0x2a
-#define SEEK_10 0x2b
-#define POSITION_TO_ELEMENT 0x2b
-#define WRITE_VERIFY 0x2e
-#define VERIFY 0x2f
-#define SEARCH_HIGH 0x30
-#define SEARCH_EQUAL 0x31
-#define SEARCH_LOW 0x32
-#define SET_LIMITS 0x33
-#define PRE_FETCH 0x34
-#define READ_POSITION 0x34
-#define SYNCHRONIZE_CACHE 0x35
-#define LOCK_UNLOCK_CACHE 0x36
-#define READ_DEFECT_DATA 0x37
-#define MEDIUM_SCAN 0x38
-#define COMPARE 0x39
-#define COPY_VERIFY 0x3a
-#define WRITE_BUFFER 0x3b
-#define READ_BUFFER 0x3c
-#define UPDATE_BLOCK 0x3d
-#define READ_LONG 0x3e
-#define WRITE_LONG 0x3f
-#define CHANGE_DEFINITION 0x40
-#define WRITE_SAME 0x41
-#define UNMAP 0x42
-#define READ_TOC 0x43
-#define READ_HEADER 0x44
-#define GET_EVENT_STATUS_NOTIFICATION 0x4a
-#define LOG_SELECT 0x4c
-#define LOG_SENSE 0x4d
-#define XDWRITEREAD_10 0x53
-#define MODE_SELECT_10 0x55
-#define RESERVE_10 0x56
-#define RELEASE_10 0x57
-#define MODE_SENSE_10 0x5a
-#define PERSISTENT_RESERVE_IN 0x5e
-#define PERSISTENT_RESERVE_OUT 0x5f
-#define VARIABLE_LENGTH_CMD 0x7f
-#define REPORT_LUNS 0xa0
-#define SECURITY_PROTOCOL_IN 0xa2
-#define MAINTENANCE_IN 0xa3
-#define MAINTENANCE_OUT 0xa4
-#define MOVE_MEDIUM 0xa5
-#define EXCHANGE_MEDIUM 0xa6
-#define READ_12 0xa8
-#define SERVICE_ACTION_OUT_12 0xa9
-#define WRITE_12 0xaa
-#define READ_MEDIA_SERIAL_NUMBER 0xab /* Obsolete with SPC-2 */
-#define SERVICE_ACTION_IN_12 0xab
-#define WRITE_VERIFY_12 0xae
-#define VERIFY_12 0xaf
-#define SEARCH_HIGH_12 0xb0
-#define SEARCH_EQUAL_12 0xb1
-#define SEARCH_LOW_12 0xb2
-#define SECURITY_PROTOCOL_OUT 0xb5
-#define READ_ELEMENT_STATUS 0xb8
-#define SEND_VOLUME_TAG 0xb6
-#define WRITE_LONG_2 0xea
-#define EXTENDED_COPY 0x83
-#define RECEIVE_COPY_RESULTS 0x84
-#define ACCESS_CONTROL_IN 0x86
-#define ACCESS_CONTROL_OUT 0x87
-#define READ_16 0x88
-#define COMPARE_AND_WRITE 0x89
-#define WRITE_16 0x8a
-#define READ_ATTRIBUTE 0x8c
-#define WRITE_ATTRIBUTE 0x8d
-#define VERIFY_16 0x8f
-#define SYNCHRONIZE_CACHE_16 0x91
-#define WRITE_SAME_16 0x93
-#define SERVICE_ACTION_BIDIRECTIONAL 0x9d
-#define SERVICE_ACTION_IN_16 0x9e
-#define SERVICE_ACTION_OUT_16 0x9f
-/* values for service action in */
-#define SAI_READ_CAPACITY_16 0x10
-#define SAI_GET_LBA_STATUS 0x12
-#define SAI_REPORT_REFERRALS 0x13
-/* values for VARIABLE_LENGTH_CMD service action codes
- * see spc4r17 Section D.3.5, table D.7 and D.8 */
-#define VLC_SA_RECEIVE_CREDENTIAL 0x1800
-/* values for maintenance in */
-#define MI_REPORT_IDENTIFYING_INFORMATION 0x05
-#define MI_REPORT_TARGET_PGS 0x0a
-#define MI_REPORT_ALIASES 0x0b
-#define MI_REPORT_SUPPORTED_OPERATION_CODES 0x0c
-#define MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS 0x0d
-#define MI_REPORT_PRIORITY 0x0e
-#define MI_REPORT_TIMESTAMP 0x0f
-#define MI_MANAGEMENT_PROTOCOL_IN 0x10
-/* value for MI_REPORT_TARGET_PGS ext header */
-#define MI_EXT_HDR_PARAM_FMT 0x20
-/* values for maintenance out */
-#define MO_SET_IDENTIFYING_INFORMATION 0x06
-#define MO_SET_TARGET_PGS 0x0a
-#define MO_CHANGE_ALIASES 0x0b
-#define MO_SET_PRIORITY 0x0e
-#define MO_SET_TIMESTAMP 0x0f
-#define MO_MANAGEMENT_PROTOCOL_OUT 0x10
-/* values for variable length command */
-#define XDREAD_32 0x03
-#define XDWRITE_32 0x04
-#define XPWRITE_32 0x06
-#define XDWRITEREAD_32 0x07
-#define READ_32 0x09
-#define VERIFY_32 0x0a
-#define WRITE_32 0x0b
-#define WRITE_SAME_32 0x0d
-
-/* Values for T10/04-262r7 */
-#define ATA_16 0x85 /* 16-byte pass-thru */
-#define ATA_12 0xa1 /* 12-byte pass-thru */
-
-/* Vendor specific CDBs start here */
-#define VENDOR_SPECIFIC_CDB 0xc0
-
-/*
- * SCSI command lengths
- */
-
-#define SCSI_MAX_VARLEN_CDB_SIZE 260
-
-/* defined in T10 SCSI Primary Commands-2 (SPC2) */
-struct scsi_varlen_cdb_hdr {
- __u8 opcode; /* opcode always == VARIABLE_LENGTH_CMD */
- __u8 control;
- __u8 misc[5];
- __u8 additional_cdb_length; /* total cdb length - 8 */
- __be16 service_action;
- /* service specific data follows */
-};
-
-static inline unsigned
-scsi_varlen_cdb_length(const void *hdr)
-{
- return ((struct scsi_varlen_cdb_hdr *)hdr)->additional_cdb_length + 8;
-}
-
-extern const unsigned char scsi_command_size_tbl[8];
-#define COMMAND_SIZE(opcode) scsi_command_size_tbl[((opcode) >> 5) & 7]
-
-static inline unsigned
-scsi_command_size(const unsigned char *cmnd)
-{
- return (cmnd[0] == VARIABLE_LENGTH_CMD) ?
- scsi_varlen_cdb_length(cmnd) : COMMAND_SIZE(cmnd[0]);
-}
-
#ifdef CONFIG_ACPI
struct acpi_bus_type;
@@ -240,22 +58,6 @@ extern void
scsi_unregister_acpi_bus_type(struct acpi_bus_type *bus);
#endif
-/*
- * SCSI Architecture Model (SAM) Status codes. Taken from SAM-3 draft
- * T10/1561-D Revision 4 Draft dated 7th November 2002.
- */
-#define SAM_STAT_GOOD 0x00
-#define SAM_STAT_CHECK_CONDITION 0x02
-#define SAM_STAT_CONDITION_MET 0x04
-#define SAM_STAT_BUSY 0x08
-#define SAM_STAT_INTERMEDIATE 0x10
-#define SAM_STAT_INTERMEDIATE_CONDITION_MET 0x14
-#define SAM_STAT_RESERVATION_CONFLICT 0x18
-#define SAM_STAT_COMMAND_TERMINATED 0x22 /* obsolete in SAM-3 */
-#define SAM_STAT_TASK_SET_FULL 0x28
-#define SAM_STAT_ACA_ACTIVE 0x30
-#define SAM_STAT_TASK_ABORTED 0x40
-
/** scsi_status_is_good - check the status return.
*
* @status: the status passed up from the driver (including host and
@@ -279,86 +81,6 @@ static inline int scsi_status_is_good(int status)
(status == SAM_STAT_COMMAND_TERMINATED));
}
-/*
- * Status codes. These are deprecated as they are shifted 1 bit right
- * from those found in the SCSI standards. This causes confusion for
- * applications that are ported to several OSes. Prefer SAM Status codes
- * above.
- */
-
-#define GOOD 0x00
-#define CHECK_CONDITION 0x01
-#define CONDITION_GOOD 0x02
-#define BUSY 0x04
-#define INTERMEDIATE_GOOD 0x08
-#define INTERMEDIATE_C_GOOD 0x0a
-#define RESERVATION_CONFLICT 0x0c
-#define COMMAND_TERMINATED 0x11
-#define QUEUE_FULL 0x14
-#define ACA_ACTIVE 0x18
-#define TASK_ABORTED 0x20
-
-#define STATUS_MASK 0xfe
-
-/*
- * SENSE KEYS
- */
-
-#define NO_SENSE 0x00
-#define RECOVERED_ERROR 0x01
-#define NOT_READY 0x02
-#define MEDIUM_ERROR 0x03
-#define HARDWARE_ERROR 0x04
-#define ILLEGAL_REQUEST 0x05
-#define UNIT_ATTENTION 0x06
-#define DATA_PROTECT 0x07
-#define BLANK_CHECK 0x08
-#define COPY_ABORTED 0x0a
-#define ABORTED_COMMAND 0x0b
-#define VOLUME_OVERFLOW 0x0d
-#define MISCOMPARE 0x0e
-
-
-/*
- * DEVICE TYPES
- * Please keep them in 0x%02x format for $MODALIAS to work
- */
-
-#define TYPE_DISK 0x00
-#define TYPE_TAPE 0x01
-#define TYPE_PRINTER 0x02
-#define TYPE_PROCESSOR 0x03 /* HP scanners use this */
-#define TYPE_WORM 0x04 /* Treated as ROM by our system */
-#define TYPE_ROM 0x05
-#define TYPE_SCANNER 0x06
-#define TYPE_MOD 0x07 /* Magneto-optical disk -
- * - treated as TYPE_DISK */
-#define TYPE_MEDIUM_CHANGER 0x08
-#define TYPE_COMM 0x09 /* Communications device */
-#define TYPE_RAID 0x0c
-#define TYPE_ENCLOSURE 0x0d /* Enclosure Services Device */
-#define TYPE_RBC 0x0e
-#define TYPE_OSD 0x11
-#define TYPE_ZBC 0x14
-#define TYPE_WLUN 0x1e /* well-known logical unit */
-#define TYPE_NO_LUN 0x7f
-
-/* SCSI protocols; these are taken from SPC-3 section 7.5 */
-enum scsi_protocol {
- SCSI_PROTOCOL_FCP = 0, /* Fibre Channel */
- SCSI_PROTOCOL_SPI = 1, /* parallel SCSI */
- SCSI_PROTOCOL_SSA = 2, /* Serial Storage Architecture - Obsolete */
- SCSI_PROTOCOL_SBP = 3, /* firewire */
- SCSI_PROTOCOL_SRP = 4, /* Infiniband RDMA */
- SCSI_PROTOCOL_ISCSI = 5,
- SCSI_PROTOCOL_SAS = 6,
- SCSI_PROTOCOL_ADT = 7, /* Media Changers */
- SCSI_PROTOCOL_ATA = 8,
- SCSI_PROTOCOL_UNSPEC = 0xf, /* No specific protocol */
-};
-
-/* Returns a human-readable name for the device */
-extern const char * scsi_device_type(unsigned type);
/*
* standard mode-select header prepended to all mode-select commands
@@ -380,13 +102,6 @@ struct ccs_modesel_head {
};
/*
- * ScsiLun: 8 byte LUN.
- */
-struct scsi_lun {
- __u8 scsi_lun[8];
-};
-
-/*
* The Well Known LUNS (SAM-3) in our int representation of a LUN
*/
#define SCSI_W_LUN_BASE 0xc100
diff --git a/include/scsi/scsi_common.h b/include/scsi/scsi_common.h
new file mode 100644
index 000000000000..676b03b78e57
--- /dev/null
+++ b/include/scsi/scsi_common.h
@@ -0,0 +1,64 @@
+/*
+ * Functions used by both the SCSI initiator code and the SCSI target code.
+ */
+
+#ifndef _SCSI_COMMON_H_
+#define _SCSI_COMMON_H_
+
+#include <linux/types.h>
+#include <scsi/scsi_proto.h>
+
+static inline unsigned
+scsi_varlen_cdb_length(const void *hdr)
+{
+ return ((struct scsi_varlen_cdb_hdr *)hdr)->additional_cdb_length + 8;
+}
+
+extern const unsigned char scsi_command_size_tbl[8];
+#define COMMAND_SIZE(opcode) scsi_command_size_tbl[((opcode) >> 5) & 7]
+
+static inline unsigned
+scsi_command_size(const unsigned char *cmnd)
+{
+ return (cmnd[0] == VARIABLE_LENGTH_CMD) ?
+ scsi_varlen_cdb_length(cmnd) : COMMAND_SIZE(cmnd[0]);
+}
+
+/* Returns a human-readable name for the device */
+extern const char *scsi_device_type(unsigned type);
+
+extern void int_to_scsilun(u64, struct scsi_lun *);
+extern u64 scsilun_to_int(struct scsi_lun *);
+
+/*
+ * This is a slightly modified SCSI sense "descriptor" format header.
+ * The addition is to allow the 0x70 and 0x71 response codes. The idea
+ * is to place the salient data from either "fixed" or "descriptor" sense
+ * format into one structure to ease application processing.
+ *
+ * The original sense buffer should be kept around for those cases
+ * in which more information is required (e.g. the LBA of a MEDIUM ERROR).
+ */
+struct scsi_sense_hdr { /* See SPC-3 section 4.5 */
+ u8 response_code; /* permit: 0x0, 0x70, 0x71, 0x72, 0x73 */
+ u8 sense_key;
+ u8 asc;
+ u8 ascq;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ u8 additional_length; /* always 0 for fixed sense format */
+};
+
+static inline bool scsi_sense_valid(const struct scsi_sense_hdr *sshdr)
+{
+ if (!sshdr)
+ return false;
+
+ return (sshdr->response_code & 0x70) == 0x70;
+}
+
+extern bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
+ struct scsi_sense_hdr *sshdr);
+
+#endif /* _SCSI_COMMON_H_ */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index a4c9336811d1..ae84b2214d40 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -413,8 +413,6 @@ extern void scsi_target_reap(struct scsi_target *);
extern void scsi_target_block(struct device *);
extern void scsi_target_unblock(struct device *, enum scsi_device_state);
extern void scsi_remove_target(struct device *);
-extern void int_to_scsilun(u64, struct scsi_lun *);
-extern u64 scsilun_to_int(struct scsi_lun *);
extern const char *scsi_device_state_name(enum scsi_device_state);
extern int scsi_is_sdev_device(const struct device *);
extern int scsi_is_target_device(const struct device *);
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 5a4bb5bb66b3..4942710ef720 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -7,43 +7,12 @@
struct scsi_device;
struct Scsi_Host;
-/*
- * This is a slightly modified SCSI sense "descriptor" format header.
- * The addition is to allow the 0x70 and 0x71 response codes. The idea
- * is to place the salient data from either "fixed" or "descriptor" sense
- * format into one structure to ease application processing.
- *
- * The original sense buffer should be kept around for those cases
- * in which more information is required (e.g. the LBA of a MEDIUM ERROR).
- */
-struct scsi_sense_hdr { /* See SPC-3 section 4.5 */
- u8 response_code; /* permit: 0x0, 0x70, 0x71, 0x72, 0x73 */
- u8 sense_key;
- u8 asc;
- u8 ascq;
- u8 byte4;
- u8 byte5;
- u8 byte6;
- u8 additional_length; /* always 0 for fixed sense format */
-};
-
-static inline bool scsi_sense_valid(const struct scsi_sense_hdr *sshdr)
-{
- if (!sshdr)
- return false;
-
- return (sshdr->response_code & 0x70) == 0x70;
-}
-
-
extern void scsi_eh_finish_cmd(struct scsi_cmnd *scmd,
struct list_head *done_q);
extern void scsi_eh_flush_done_q(struct list_head *done_q);
extern void scsi_report_bus_reset(struct Scsi_Host *, int);
extern void scsi_report_device_reset(struct Scsi_Host *, int, int);
extern int scsi_block_when_processing_errors(struct scsi_device *);
-extern bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
- struct scsi_sense_hdr *sshdr);
extern bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd,
struct scsi_sense_hdr *sshdr);
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
new file mode 100644
index 000000000000..a9fbf1b38e71
--- /dev/null
+++ b/include/scsi/scsi_proto.h
@@ -0,0 +1,281 @@
+/*
+ * This header file contains public constants and structures used by
+ * both the SCSI initiator and the SCSI target code.
+ *
+ * For documentation on the OPCODES, MESSAGES, and SENSE values,
+ * please consult the SCSI standard.
+ */
+
+#ifndef _SCSI_PROTO_H_
+#define _SCSI_PROTO_H_
+
+#include <linux/types.h>
+
+/*
+ * SCSI opcodes
+ */
+
+#define TEST_UNIT_READY 0x00
+#define REZERO_UNIT 0x01
+#define REQUEST_SENSE 0x03
+#define FORMAT_UNIT 0x04
+#define READ_BLOCK_LIMITS 0x05
+#define REASSIGN_BLOCKS 0x07
+#define INITIALIZE_ELEMENT_STATUS 0x07
+#define READ_6 0x08
+#define WRITE_6 0x0a
+#define SEEK_6 0x0b
+#define READ_REVERSE 0x0f
+#define WRITE_FILEMARKS 0x10
+#define SPACE 0x11
+#define INQUIRY 0x12
+#define RECOVER_BUFFERED_DATA 0x14
+#define MODE_SELECT 0x15
+#define RESERVE 0x16
+#define RELEASE 0x17
+#define COPY 0x18
+#define ERASE 0x19
+#define MODE_SENSE 0x1a
+#define START_STOP 0x1b
+#define RECEIVE_DIAGNOSTIC 0x1c
+#define SEND_DIAGNOSTIC 0x1d
+#define ALLOW_MEDIUM_REMOVAL 0x1e
+
+#define READ_FORMAT_CAPACITIES 0x23
+#define SET_WINDOW 0x24
+#define READ_CAPACITY 0x25
+#define READ_10 0x28
+#define WRITE_10 0x2a
+#define SEEK_10 0x2b
+#define POSITION_TO_ELEMENT 0x2b
+#define WRITE_VERIFY 0x2e
+#define VERIFY 0x2f
+#define SEARCH_HIGH 0x30
+#define SEARCH_EQUAL 0x31
+#define SEARCH_LOW 0x32
+#define SET_LIMITS 0x33
+#define PRE_FETCH 0x34
+#define READ_POSITION 0x34
+#define SYNCHRONIZE_CACHE 0x35
+#define LOCK_UNLOCK_CACHE 0x36
+#define READ_DEFECT_DATA 0x37
+#define MEDIUM_SCAN 0x38
+#define COMPARE 0x39
+#define COPY_VERIFY 0x3a
+#define WRITE_BUFFER 0x3b
+#define READ_BUFFER 0x3c
+#define UPDATE_BLOCK 0x3d
+#define READ_LONG 0x3e
+#define WRITE_LONG 0x3f
+#define CHANGE_DEFINITION 0x40
+#define WRITE_SAME 0x41
+#define UNMAP 0x42
+#define READ_TOC 0x43
+#define READ_HEADER 0x44
+#define GET_EVENT_STATUS_NOTIFICATION 0x4a
+#define LOG_SELECT 0x4c
+#define LOG_SENSE 0x4d
+#define XDWRITEREAD_10 0x53
+#define MODE_SELECT_10 0x55
+#define RESERVE_10 0x56
+#define RELEASE_10 0x57
+#define MODE_SENSE_10 0x5a
+#define PERSISTENT_RESERVE_IN 0x5e
+#define PERSISTENT_RESERVE_OUT 0x5f
+#define VARIABLE_LENGTH_CMD 0x7f
+#define REPORT_LUNS 0xa0
+#define SECURITY_PROTOCOL_IN 0xa2
+#define MAINTENANCE_IN 0xa3
+#define MAINTENANCE_OUT 0xa4
+#define MOVE_MEDIUM 0xa5
+#define EXCHANGE_MEDIUM 0xa6
+#define READ_12 0xa8
+#define SERVICE_ACTION_OUT_12 0xa9
+#define WRITE_12 0xaa
+#define READ_MEDIA_SERIAL_NUMBER 0xab /* Obsolete with SPC-2 */
+#define SERVICE_ACTION_IN_12 0xab
+#define WRITE_VERIFY_12 0xae
+#define VERIFY_12 0xaf
+#define SEARCH_HIGH_12 0xb0
+#define SEARCH_EQUAL_12 0xb1
+#define SEARCH_LOW_12 0xb2
+#define SECURITY_PROTOCOL_OUT 0xb5
+#define READ_ELEMENT_STATUS 0xb8
+#define SEND_VOLUME_TAG 0xb6
+#define WRITE_LONG_2 0xea
+#define EXTENDED_COPY 0x83
+#define RECEIVE_COPY_RESULTS 0x84
+#define ACCESS_CONTROL_IN 0x86
+#define ACCESS_CONTROL_OUT 0x87
+#define READ_16 0x88
+#define COMPARE_AND_WRITE 0x89
+#define WRITE_16 0x8a
+#define READ_ATTRIBUTE 0x8c
+#define WRITE_ATTRIBUTE 0x8d
+#define VERIFY_16 0x8f
+#define SYNCHRONIZE_CACHE_16 0x91
+#define WRITE_SAME_16 0x93
+#define SERVICE_ACTION_BIDIRECTIONAL 0x9d
+#define SERVICE_ACTION_IN_16 0x9e
+#define SERVICE_ACTION_OUT_16 0x9f
+/* values for service action in */
+#define SAI_READ_CAPACITY_16 0x10
+#define SAI_GET_LBA_STATUS 0x12
+#define SAI_REPORT_REFERRALS 0x13
+/* values for VARIABLE_LENGTH_CMD service action codes
+ * see spc4r17 Section D.3.5, table D.7 and D.8 */
+#define VLC_SA_RECEIVE_CREDENTIAL 0x1800
+/* values for maintenance in */
+#define MI_REPORT_IDENTIFYING_INFORMATION 0x05
+#define MI_REPORT_TARGET_PGS 0x0a
+#define MI_REPORT_ALIASES 0x0b
+#define MI_REPORT_SUPPORTED_OPERATION_CODES 0x0c
+#define MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS 0x0d
+#define MI_REPORT_PRIORITY 0x0e
+#define MI_REPORT_TIMESTAMP 0x0f
+#define MI_MANAGEMENT_PROTOCOL_IN 0x10
+/* value for MI_REPORT_TARGET_PGS ext header */
+#define MI_EXT_HDR_PARAM_FMT 0x20
+/* values for maintenance out */
+#define MO_SET_IDENTIFYING_INFORMATION 0x06
+#define MO_SET_TARGET_PGS 0x0a
+#define MO_CHANGE_ALIASES 0x0b
+#define MO_SET_PRIORITY 0x0e
+#define MO_SET_TIMESTAMP 0x0f
+#define MO_MANAGEMENT_PROTOCOL_OUT 0x10
+/* values for variable length command */
+#define XDREAD_32 0x03
+#define XDWRITE_32 0x04
+#define XPWRITE_32 0x06
+#define XDWRITEREAD_32 0x07
+#define READ_32 0x09
+#define VERIFY_32 0x0a
+#define WRITE_32 0x0b
+#define WRITE_SAME_32 0x0d
+
+/* Values for T10/04-262r7 */
+#define ATA_16 0x85 /* 16-byte pass-thru */
+#define ATA_12 0xa1 /* 12-byte pass-thru */
+
+/* Vendor specific CDBs start here */
+#define VENDOR_SPECIFIC_CDB 0xc0
+
+/*
+ * SCSI command lengths
+ */
+
+#define SCSI_MAX_VARLEN_CDB_SIZE 260
+
+/* defined in T10 SCSI Primary Commands-2 (SPC2) */
+struct scsi_varlen_cdb_hdr {
+ __u8 opcode; /* opcode always == VARIABLE_LENGTH_CMD */
+ __u8 control;
+ __u8 misc[5];
+ __u8 additional_cdb_length; /* total cdb length - 8 */
+ __be16 service_action;
+ /* service specific data follows */
+};
+
+/*
+ * SCSI Architecture Model (SAM) Status codes. Taken from SAM-3 draft
+ * T10/1561-D Revision 4 Draft dated 7th November 2002.
+ */
+#define SAM_STAT_GOOD 0x00
+#define SAM_STAT_CHECK_CONDITION 0x02
+#define SAM_STAT_CONDITION_MET 0x04
+#define SAM_STAT_BUSY 0x08
+#define SAM_STAT_INTERMEDIATE 0x10
+#define SAM_STAT_INTERMEDIATE_CONDITION_MET 0x14
+#define SAM_STAT_RESERVATION_CONFLICT 0x18
+#define SAM_STAT_COMMAND_TERMINATED 0x22 /* obsolete in SAM-3 */
+#define SAM_STAT_TASK_SET_FULL 0x28
+#define SAM_STAT_ACA_ACTIVE 0x30
+#define SAM_STAT_TASK_ABORTED 0x40
+
+/*
+ * Status codes. These are deprecated as they are shifted 1 bit right
+ * from those found in the SCSI standards. This causes confusion for
+ * applications that are ported to several OSes. Prefer SAM Status codes
+ * above.
+ */
+
+#define GOOD 0x00
+#define CHECK_CONDITION 0x01
+#define CONDITION_GOOD 0x02
+#define BUSY 0x04
+#define INTERMEDIATE_GOOD 0x08
+#define INTERMEDIATE_C_GOOD 0x0a
+#define RESERVATION_CONFLICT 0x0c
+#define COMMAND_TERMINATED 0x11
+#define QUEUE_FULL 0x14
+#define ACA_ACTIVE 0x18
+#define TASK_ABORTED 0x20
+
+#define STATUS_MASK 0xfe
+
+/*
+ * SENSE KEYS
+ */
+
+#define NO_SENSE 0x00
+#define RECOVERED_ERROR 0x01
+#define NOT_READY 0x02
+#define MEDIUM_ERROR 0x03
+#define HARDWARE_ERROR 0x04
+#define ILLEGAL_REQUEST 0x05
+#define UNIT_ATTENTION 0x06
+#define DATA_PROTECT 0x07
+#define BLANK_CHECK 0x08
+#define COPY_ABORTED 0x0a
+#define ABORTED_COMMAND 0x0b
+#define VOLUME_OVERFLOW 0x0d
+#define MISCOMPARE 0x0e
+
+
+/*
+ * DEVICE TYPES
+ * Please keep them in 0x%02x format for $MODALIAS to work
+ */
+
+#define TYPE_DISK 0x00
+#define TYPE_TAPE 0x01
+#define TYPE_PRINTER 0x02
+#define TYPE_PROCESSOR 0x03 /* HP scanners use this */
+#define TYPE_WORM 0x04 /* Treated as ROM by our system */
+#define TYPE_ROM 0x05
+#define TYPE_SCANNER 0x06
+#define TYPE_MOD 0x07 /* Magneto-optical disk -
+ * - treated as TYPE_DISK */
+#define TYPE_MEDIUM_CHANGER 0x08
+#define TYPE_COMM 0x09 /* Communications device */
+#define TYPE_RAID 0x0c
+#define TYPE_ENCLOSURE 0x0d /* Enclosure Services Device */
+#define TYPE_RBC 0x0e
+#define TYPE_OSD 0x11
+#define TYPE_ZBC 0x14
+#define TYPE_WLUN 0x1e /* well-known logical unit */
+#define TYPE_NO_LUN 0x7f
+
+/* SCSI protocols; these are taken from SPC-3 section 7.5 */
+enum scsi_protocol {
+ SCSI_PROTOCOL_FCP = 0, /* Fibre Channel */
+ SCSI_PROTOCOL_SPI = 1, /* parallel SCSI */
+ SCSI_PROTOCOL_SSA = 2, /* Serial Storage Architecture - Obsolete */
+ SCSI_PROTOCOL_SBP = 3, /* firewire */
+ SCSI_PROTOCOL_SRP = 4, /* Infiniband RDMA */
+ SCSI_PROTOCOL_ISCSI = 5,
+ SCSI_PROTOCOL_SAS = 6,
+ SCSI_PROTOCOL_ADT = 7, /* Media Changers */
+ SCSI_PROTOCOL_ATA = 8,
+ SCSI_PROTOCOL_UNSPEC = 0xf, /* No specific protocol */
+};
+
+/*
+ * ScsiLun: 8 byte LUN.
+ */
+struct scsi_lun {
+ __u8 scsi_lun[8];
+};
+
+
+#endif /* _SCSI_PROTO_H_ */
diff --git a/include/scsi/srp.h b/include/scsi/srp.h
index 1ae84db4c9fb..5be834de491a 100644
--- a/include/scsi/srp.h
+++ b/include/scsi/srp.h
@@ -42,6 +42,7 @@
*/
#include <linux/types.h>
+#include <scsi/scsi.h>
enum {
SRP_LOGIN_REQ = 0x00,
@@ -179,7 +180,7 @@ struct srp_tsk_mgmt {
u8 reserved1[6];
u64 tag;
u8 reserved2[4];
- __be64 lun __attribute__((packed));
+ struct scsi_lun lun;
u8 reserved3[2];
u8 tsk_mgmt_func;
u8 reserved4;
@@ -200,7 +201,7 @@ struct srp_cmd {
u8 data_in_desc_cnt;
u64 tag;
u8 reserved2[4];
- __be64 lun __attribute__((packed));
+ struct scsi_lun lun;
u8 reserved3;
u8 task_attr;
u8 reserved4;
@@ -265,7 +266,7 @@ struct srp_aer_req {
__be32 req_lim_delta;
u64 tag;
u32 reserved2;
- __be64 lun;
+ struct scsi_lun lun;
__be32 sense_data_len;
u32 reserved3;
u8 sense_data[0];
diff --git a/include/sound/hda_regmap.h b/include/sound/hda_regmap.h
index 53a18b3635e2..df705908480a 100644
--- a/include/sound/hda_regmap.h
+++ b/include/sound/hda_regmap.h
@@ -9,6 +9,8 @@
#include <sound/core.h>
#include <sound/hdaudio.h>
+#define AC_AMP_FAKE_MUTE 0x10 /* fake mute bit set to amp verbs */
+
int snd_hdac_regmap_init(struct hdac_device *codec);
void snd_hdac_regmap_exit(struct hdac_device *codec);
int snd_hdac_regmap_add_vendor_verb(struct hdac_device *codec,
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 54e7af301888..006983b296dd 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -5,7 +5,6 @@
#include <linux/configfs.h>
#include <net/sock.h>
#include <net/tcp.h>
-#include <scsi/scsi_cmnd.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 480e9f82dfea..aec6f6a4477c 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -6,7 +6,6 @@
#include <linux/dma-mapping.h>
#include <linux/blkdev.h>
#include <linux/percpu_ida.h>
-#include <scsi/scsi_cmnd.h>
#include <net/sock.h>
#include <net/tcp.h>
@@ -17,23 +16,15 @@
/* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */
#define TRANSPORT_MAX_LUNS_PER_TPG 256
/*
- * By default we use 32-byte CDBs in TCM Core and subsystem plugin code.
- *
- * Note that both include/scsi/scsi_cmnd.h:MAX_COMMAND_SIZE and
- * include/linux/blkdev.h:BLOCK_MAX_CDB as of v2.6.36-rc4 still use
- * 16-byte CDBs by default and require an extra allocation for
- * 32-byte CDBs to because of legacy issues.
- *
- * Within TCM Core there are no such legacy limitiations, so we go ahead
- * use 32-byte CDBs by default and use include/scsi/scsi.h:scsi_command_size()
- * within all TCM Core and subsystem plugin code.
+ * Maximum size of a CDB that can be stored in se_cmd without allocating
+ * memory dynamically for the CDB.
*/
#define TCM_MAX_COMMAND_SIZE 32
/*
* From include/scsi/scsi_cmnd.h:SCSI_SENSE_BUFFERSIZE, currently
* defined 96, but the real limit is 252 (or 260 including the header)
*/
-#define TRANSPORT_SENSE_BUFFER SCSI_SENSE_BUFFERSIZE
+#define TRANSPORT_SENSE_BUFFER 96
/* Used by transport_send_check_condition_and_sense() */
#define SPC_SENSE_KEY_OFFSET 2
#define SPC_ADD_SENSE_LEN_OFFSET 7
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index d19840b0cac8..630d1e5e4de0 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -42,45 +42,54 @@ TRACE_EVENT(pstate_sample,
TP_PROTO(u32 core_busy,
u32 scaled_busy,
- u32 state,
+ u32 from,
+ u32 to,
u64 mperf,
u64 aperf,
+ u64 tsc,
u32 freq
),
TP_ARGS(core_busy,
scaled_busy,
- state,
+ from,
+ to,
mperf,
aperf,
+ tsc,
freq
),
TP_STRUCT__entry(
__field(u32, core_busy)
__field(u32, scaled_busy)
- __field(u32, state)
+ __field(u32, from)
+ __field(u32, to)
__field(u64, mperf)
__field(u64, aperf)
+ __field(u64, tsc)
__field(u32, freq)
-
- ),
+ ),
TP_fast_assign(
__entry->core_busy = core_busy;
__entry->scaled_busy = scaled_busy;
- __entry->state = state;
+ __entry->from = from;
+ __entry->to = to;
__entry->mperf = mperf;
__entry->aperf = aperf;
+ __entry->tsc = tsc;
__entry->freq = freq;
),
- TP_printk("core_busy=%lu scaled=%lu state=%lu mperf=%llu aperf=%llu freq=%lu ",
+ TP_printk("core_busy=%lu scaled=%lu from=%lu to=%lu mperf=%llu aperf=%llu tsc=%llu freq=%lu ",
(unsigned long)__entry->core_busy,
(unsigned long)__entry->scaled_busy,
- (unsigned long)__entry->state,
+ (unsigned long)__entry->from,
+ (unsigned long)__entry->to,
(unsigned long long)__entry->mperf,
(unsigned long long)__entry->aperf,
+ (unsigned long long)__entry->tsc,
(unsigned long)__entry->freq
)
diff --git a/include/trace/events/target.h b/include/trace/events/target.h
index 04c3c6efdcc2..50fea660c0f8 100644
--- a/include/trace/events/target.h
+++ b/include/trace/events/target.h
@@ -6,7 +6,7 @@
#include <linux/tracepoint.h>
#include <linux/trace_seq.h>
-#include <scsi/scsi.h>
+#include <scsi/scsi_proto.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index d7abef1fe6e0..073b9ac245ba 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -45,16 +45,16 @@ TRACE_EVENT(timer_start,
TP_PROTO(struct timer_list *timer,
unsigned long expires,
- unsigned int deferrable),
+ unsigned int flags),
- TP_ARGS(timer, expires, deferrable),
+ TP_ARGS(timer, expires, flags),
TP_STRUCT__entry(
__field( void *, timer )
__field( void *, function )
__field( unsigned long, expires )
__field( unsigned long, now )
- __field( unsigned int, deferrable )
+ __field( unsigned int, flags )
),
TP_fast_assign(
@@ -62,13 +62,12 @@ TRACE_EVENT(timer_start,
__entry->function = timer->function;
__entry->expires = expires;
__entry->now = jiffies;
- __entry->deferrable = deferrable;
+ __entry->flags = flags;
),
- TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] defer=%c",
+ TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] flags=0x%08x",
__entry->timer, __entry->function, __entry->expires,
- (long)__entry->expires - __entry->now,
- __entry->deferrable > 0 ? 'y':'n')
+ (long)__entry->expires - __entry->now, __entry->flags)
);
/**
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 880dd7437172..c178d13d6f4c 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -250,7 +250,6 @@ DEFINE_EVENT(writeback_class, name, \
DEFINE_WRITEBACK_EVENT(writeback_nowork);
DEFINE_WRITEBACK_EVENT(writeback_wake_background);
DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
-DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
DECLARE_EVENT_CLASS(wbc_class,
TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 871e73f99a4d..94d44ab2fda1 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -1038,6 +1038,7 @@ struct drm_radeon_cs {
#define RADEON_INFO_CURRENT_GPU_SCLK 0x22
#define RADEON_INFO_CURRENT_GPU_MCLK 0x23
#define RADEON_INFO_READ_REG 0x24
+#define RADEON_INFO_VA_UNMAP_WORKING 0x25
struct drm_radeon_info {
uint32_t request;
diff --git a/include/linux/cryptouser.h b/include/uapi/linux/cryptouser.h
index 4abf2ea6a887..2e67bb64c1da 100644
--- a/include/linux/cryptouser.h
+++ b/include/uapi/linux/cryptouser.h
@@ -25,6 +25,7 @@ enum {
CRYPTO_MSG_DELALG,
CRYPTO_MSG_UPDATEALG,
CRYPTO_MSG_GETALG,
+ CRYPTO_MSG_DELRNG,
__CRYPTO_MSG_MAX
};
#define CRYPTO_MSG_MAX (__CRYPTO_MSG_MAX - 1)
@@ -43,6 +44,7 @@ enum crypto_attr_type_t {
CRYPTOCFGA_REPORT_COMPRESS, /* struct crypto_report_comp */
CRYPTOCFGA_REPORT_RNG, /* struct crypto_report_rng */
CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */
+ CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */
__CRYPTOCFGA_MAX
#define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
@@ -101,5 +103,9 @@ struct crypto_report_rng {
unsigned int seedsize;
};
+struct crypto_report_akcipher {
+ char type[CRYPTO_MAX_NAME];
+};
+
#define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
sizeof(struct crypto_report_blkcipher))
diff --git a/include/uapi/linux/hsi/cs-protocol.h b/include/uapi/linux/hsi/cs-protocol.h
index 4957bba57cbe..f153d6ea7c62 100644
--- a/include/uapi/linux/hsi/cs-protocol.h
+++ b/include/uapi/linux/hsi/cs-protocol.h
@@ -76,6 +76,15 @@ struct cs_buffer_config {
};
/*
+ * struct for monotonic timestamp taken when the
+ * last control command was received
+ */
+struct cs_timestamp {
+ __u32 tv_sec; /* seconds */
+ __u32 tv_nsec; /* nanoseconds */
+};
+
+/*
* Struct describing the layout and contents of the driver mmap area.
* This information is meant as read-only information for the application.
*/
@@ -91,11 +100,8 @@ struct cs_mmap_config_block {
__u32 rx_ptr;
__u32 rx_ptr_boundary;
__u32 reserved3[2];
- /*
- * if enabled with CS_FEAT_TSTAMP_RX_CTRL, monotonic
- * timestamp taken when the last control command was received
- */
- struct timespec tstamp_rx_ctrl;
+ /* enabled with CS_FEAT_TSTAMP_RX_CTRL */
+ struct cs_timestamp tstamp_rx_ctrl;
};
#define CS_IO_MAGIC 'C'
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 4b60056776d1..716ad4ae4d4b 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -202,7 +202,7 @@ struct kvm_run {
__u32 exit_reason;
__u8 ready_for_interrupt_injection;
__u8 if_flag;
- __u8 padding2[2];
+ __u16 flags;
/* in (pre_kvm_run), out (post_kvm_run) */
__u64 cr8;
@@ -814,6 +814,9 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_S390_INJECT_IRQ 113
#define KVM_CAP_S390_IRQ_STATE 114
#define KVM_CAP_PPC_HWRNG 115
+#define KVM_CAP_DISABLE_QUIRKS 116
+#define KVM_CAP_X86_SMM 117
+#define KVM_CAP_MULTI_ADDRESS_SPACE 118
#ifdef KVM_CAP_IRQ_ROUTING
@@ -894,7 +897,7 @@ struct kvm_xen_hvm_config {
*
* KVM_IRQFD_FLAG_RESAMPLE indicates resamplefd is valid and specifies
* the irqfd to operate in resampling mode for level triggered interrupt
- * emlation. See Documentation/virtual/kvm/api.txt.
+ * emulation. See Documentation/virtual/kvm/api.txt.
*/
#define KVM_IRQFD_FLAG_RESAMPLE (1 << 1)
@@ -1199,6 +1202,8 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_S390_IRQ_STATE */
#define KVM_S390_SET_IRQ_STATE _IOW(KVMIO, 0xb5, struct kvm_s390_irq_state)
#define KVM_S390_GET_IRQ_STATE _IOW(KVMIO, 0xb6, struct kvm_s390_irq_state)
+/* Available with KVM_CAP_X86_SMM */
+#define KVM_SMI _IO(KVMIO, 0xb7)
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
diff --git a/include/uapi/linux/netfilter_bridge/ebtables.h b/include/uapi/linux/netfilter_bridge/ebtables.h
index 773dfe8924c7..fd2ee501726d 100644
--- a/include/uapi/linux/netfilter_bridge/ebtables.h
+++ b/include/uapi/linux/netfilter_bridge/ebtables.h
@@ -6,7 +6,7 @@
*
* ebtables.c,v 2.0, April, 2002
*
- * This code is stongly inspired on the iptables code which is
+ * This code is strongly inspired by the iptables code which is
* Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
*/
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 309211b3eb67..d97f84c080da 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -167,6 +167,7 @@ enum perf_branch_sample_type_shift {
PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */
PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */
+ PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */
PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
};
@@ -186,6 +187,7 @@ enum perf_branch_sample_type {
PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
+ PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
};
@@ -564,6 +566,10 @@ struct perf_event_mmap_page {
#define PERF_RECORD_MISC_GUEST_USER (5 << 0)
/*
+ * Indicates that /proc/PID/maps parsing are truncated by time out.
+ */
+#define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12)
+/*
* PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
* different events so can reuse the same bit position.
*/
@@ -800,6 +806,18 @@ enum perf_event_type {
*/
PERF_RECORD_ITRACE_START = 12,
+ /*
+ * Records the dropped/lost sample number.
+ *
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u64 lost;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_LOST_SAMPLES = 13,
+
PERF_RECORD_MAX, /* non-ABI */
};
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index b57b750c222f..9fd7b5d8df2f 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -36,6 +36,8 @@
/* Two-stage IOMMU */
#define VFIO_TYPE1_NESTING_IOMMU 6 /* Implies v2 */
+#define VFIO_SPAPR_TCE_v2_IOMMU 7
+
/*
* The IOCTL interface is designed for extensibility by embedding the
* structure length (argsz) and flags into structures passed between
@@ -443,6 +445,23 @@ struct vfio_iommu_type1_dma_unmap {
/* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
/*
+ * The SPAPR TCE DDW info struct provides the information about
+ * the details of Dynamic DMA window capability.
+ *
+ * @pgsizes contains a page size bitmask, 4K/64K/16M are supported.
+ * @max_dynamic_windows_supported tells the maximum number of windows
+ * which the platform can create.
+ * @levels tells the maximum number of levels in multi-level IOMMU tables;
+ * this allows splitting a table into smaller chunks which reduces
+ * the amount of physically contiguous memory required for the table.
+ */
+struct vfio_iommu_spapr_tce_ddw_info {
+ __u64 pgsizes; /* Bitmap of supported page sizes */
+ __u32 max_dynamic_windows_supported;
+ __u32 levels;
+};
+
+/*
* The SPAPR TCE info struct provides the information about the PCI bus
* address ranges available for DMA, these values are programmed into
* the hardware so the guest has to know that information.
@@ -452,14 +471,17 @@ struct vfio_iommu_type1_dma_unmap {
* addresses too so the window works as a filter rather than an offset
* for IOVA addresses.
*
- * A flag will need to be added if other page sizes are supported,
- * so as defined here, it is always 4k.
+ * Flags supported:
+ * - VFIO_IOMMU_SPAPR_INFO_DDW: informs the userspace that dynamic DMA windows
+ * (DDW) support is present. @ddw is only supported when DDW is present.
*/
struct vfio_iommu_spapr_tce_info {
__u32 argsz;
- __u32 flags; /* reserved for future use */
+ __u32 flags;
+#define VFIO_IOMMU_SPAPR_INFO_DDW (1 << 0) /* DDW supported */
__u32 dma32_window_start; /* 32 bit window start (bytes) */
__u32 dma32_window_size; /* 32 bit window size (bytes) */
+ struct vfio_iommu_spapr_tce_ddw_info ddw;
};
#define VFIO_IOMMU_SPAPR_TCE_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
@@ -470,12 +492,23 @@ struct vfio_iommu_spapr_tce_info {
* - unfreeze IO/DMA for frozen PE;
* - read PE state;
* - reset PE;
- * - configure PE.
+ * - configure PE;
+ * - inject EEH error.
*/
+struct vfio_eeh_pe_err {
+ __u32 type;
+ __u32 func;
+ __u64 addr;
+ __u64 mask;
+};
+
struct vfio_eeh_pe_op {
__u32 argsz;
__u32 flags;
__u32 op;
+ union {
+ struct vfio_eeh_pe_err err;
+ };
};
#define VFIO_EEH_PE_DISABLE 0 /* Disable EEH functionality */
@@ -492,9 +525,70 @@ struct vfio_eeh_pe_op {
#define VFIO_EEH_PE_RESET_HOT 6 /* Assert hot reset */
#define VFIO_EEH_PE_RESET_FUNDAMENTAL 7 /* Assert fundamental reset */
#define VFIO_EEH_PE_CONFIGURE 8 /* PE configuration */
+#define VFIO_EEH_PE_INJECT_ERR 9 /* Inject EEH error */
#define VFIO_EEH_PE_OP _IO(VFIO_TYPE, VFIO_BASE + 21)
+/**
+ * VFIO_IOMMU_SPAPR_REGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 17, struct vfio_iommu_spapr_register_memory)
+ *
+ * Registers user space memory where DMA is allowed. It pins
+ * user pages and does the locked memory accounting so
+ * subsequent VFIO_IOMMU_MAP_DMA/VFIO_IOMMU_UNMAP_DMA calls
+ * get faster.
+ */
+struct vfio_iommu_spapr_register_memory {
+ __u32 argsz;
+ __u32 flags;
+ __u64 vaddr; /* Process virtual address */
+ __u64 size; /* Size of mapping (bytes) */
+};
+#define VFIO_IOMMU_SPAPR_REGISTER_MEMORY _IO(VFIO_TYPE, VFIO_BASE + 17)
+
+/**
+ * VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 18, struct vfio_iommu_spapr_register_memory)
+ *
+ * Unregisters user space memory registered with
+ * VFIO_IOMMU_SPAPR_REGISTER_MEMORY.
+ * Uses vfio_iommu_spapr_register_memory for parameters.
+ */
+#define VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY _IO(VFIO_TYPE, VFIO_BASE + 18)
+
+/**
+ * VFIO_IOMMU_SPAPR_TCE_CREATE - _IOWR(VFIO_TYPE, VFIO_BASE + 19, struct vfio_iommu_spapr_tce_create)
+ *
+ * Creates an additional TCE table and programs it (sets a new DMA window)
+ * to every IOMMU group in the container. It receives page shift, window
+ * size and number of levels in the TCE table being created.
+ *
+ * It allocates and returns an offset on a PCI bus of the new DMA window.
+ */
+struct vfio_iommu_spapr_tce_create {
+ __u32 argsz;
+ __u32 flags;
+ /* in */
+ __u32 page_shift;
+ __u64 window_size;
+ __u32 levels;
+ /* out */
+ __u64 start_addr;
+};
+#define VFIO_IOMMU_SPAPR_TCE_CREATE _IO(VFIO_TYPE, VFIO_BASE + 19)
+
+/**
+ * VFIO_IOMMU_SPAPR_TCE_REMOVE - _IOW(VFIO_TYPE, VFIO_BASE + 20, struct vfio_iommu_spapr_tce_remove)
+ *
+ * Unprograms a TCE table from all groups in the container and destroys it.
+ * It receives a PCI bus offset as a window id.
+ */
+struct vfio_iommu_spapr_tce_remove {
+ __u32 argsz;
+ __u32 flags;
+ /* in */
+ __u64 start_addr;
+};
+#define VFIO_IOMMU_SPAPR_TCE_REMOVE _IO(VFIO_TYPE, VFIO_BASE + 20)
+
/* ***************************************************************** */
#endif /* _UAPIVFIO_H */
diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h
index cd6d789b73ec..99a8ca15fe64 100644
--- a/include/uapi/misc/cxl.h
+++ b/include/uapi/misc/cxl.h
@@ -32,10 +32,32 @@ struct cxl_ioctl_start_work {
#define CXL_START_WORK_ALL (CXL_START_WORK_AMR |\
CXL_START_WORK_NUM_IRQS)
+
+/* Possible modes that an afu can be in */
+#define CXL_MODE_DEDICATED 0x1
+#define CXL_MODE_DIRECTED 0x2
+
+/* possible flags for the cxl_afu_id flags field */
+#define CXL_AFUID_FLAG_SLAVE 0x1 /* In directed-mode afu is in slave mode */
+
+struct cxl_afu_id {
+ __u64 flags; /* One of CXL_AFUID_FLAG_X */
+ __u32 card_id;
+ __u32 afu_offset;
+ __u32 afu_mode; /* one of the CXL_MODE_X */
+ __u32 reserved1;
+ __u64 reserved2;
+ __u64 reserved3;
+ __u64 reserved4;
+ __u64 reserved5;
+ __u64 reserved6;
+};
+
/* ioctl numbers */
#define CXL_MAGIC 0xCA
#define CXL_IOCTL_START_WORK _IOW(CXL_MAGIC, 0x00, struct cxl_ioctl_start_work)
#define CXL_IOCTL_GET_PROCESS_ELEMENT _IOR(CXL_MAGIC, 0x01, __u32)
+#define CXL_IOCTL_GET_AFU_ID _IOR(CXL_MAGIC, 0x02, struct cxl_afu_id)
#define CXL_READ_MIN_SIZE 0x1000 /* 4K */
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index b513e662d8e4..978841eeaff1 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -91,6 +91,7 @@ enum {
enum {
IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
+ IB_USER_VERBS_EX_CMD_CREATE_CQ = IB_USER_VERBS_CMD_CREATE_CQ,
IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
};
@@ -222,6 +223,8 @@ struct ib_uverbs_ex_query_device_resp {
__u32 comp_mask;
__u32 response_length;
struct ib_uverbs_odp_caps odp_caps;
+ __u64 timestamp_mask;
+ __u64 hca_core_clock; /* in KHZ */
};
struct ib_uverbs_query_port {
@@ -353,11 +356,27 @@ struct ib_uverbs_create_cq {
__u64 driver_data[0];
};
+struct ib_uverbs_ex_create_cq {
+ __u64 user_handle;
+ __u32 cqe;
+ __u32 comp_vector;
+ __s32 comp_channel;
+ __u32 comp_mask;
+ __u32 flags;
+ __u32 reserved;
+};
+
struct ib_uverbs_create_cq_resp {
__u32 cq_handle;
__u32 cqe;
};
+struct ib_uverbs_ex_create_cq_resp {
+ struct ib_uverbs_create_cq_resp base;
+ __u32 comp_mask;
+ __u32 response_length;
+};
+
struct ib_uverbs_resize_cq {
__u64 response;
__u32 cq_handle;
diff --git a/include/video/neomagic.h b/include/video/neomagic.h
index bc5013e8059d..91e225a6107d 100644
--- a/include/video/neomagic.h
+++ b/include/video/neomagic.h
@@ -159,10 +159,7 @@ struct neofb_par {
unsigned char VCLK3NumeratorHigh;
unsigned char VCLK3Denominator;
unsigned char VerticalExt;
-
-#ifdef CONFIG_MTRR
- int mtrr;
-#endif
+ int wc_cookie;
u8 __iomem *mmio_vbase;
u8 cursorOff;
u8 *cursorPad; /* Must die !! */
diff --git a/include/video/tdfx.h b/include/video/tdfx.h
index befbaf0a92d8..69674b94bb68 100644
--- a/include/video/tdfx.h
+++ b/include/video/tdfx.h
@@ -196,7 +196,7 @@ struct tdfx_par {
u32 palette[16];
void __iomem *regbase_virt;
unsigned long iobase;
- int mtrr_handle;
+ int wc_cookie;
#ifdef CONFIG_FB_3DFX_I2C
struct tdfxfb_i2c_chan chan[2];
#endif