summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/arm_ffa.h1
-rw-r--r--include/linux/cpu.h1
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/ieee80211.h88
-rw-r--r--include/linux/mtd/nand-qpic-common.h8
-rw-r--r--include/linux/netfilter/nf_conntrack_dccp.h38
-rw-r--r--include/linux/netfs.h21
-rw-r--r--include/linux/platform_data/x86/amd-fch.h13
-rw-r--r--include/linux/psp-sev.h2
-rw-r--r--include/linux/spi/spi.h2
-rw-r--r--include/linux/suspend.h5
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/usb/typec_dp.h1
-rw-r--r--include/linux/virtio.h9
-rw-r--r--include/linux/virtio_config.h43
-rw-r--r--include/linux/virtio_features.h88
-rw-r--r--include/linux/virtio_net.h197
-rw-r--r--include/linux/virtio_pci_modern.h43
18 files changed, 458 insertions, 106 deletions
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
index 5bded24dc24f..e1634897e159 100644
--- a/include/linux/arm_ffa.h
+++ b/include/linux/arm_ffa.h
@@ -283,6 +283,7 @@ struct ffa_indirect_msg_hdr {
u32 offset;
u32 send_recv_id;
u32 size;
+ u32 res1;
uuid_t uuid;
};
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 96a3a0d6a60e..6378370a952f 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -82,6 +82,7 @@ extern ssize_t cpu_show_old_microcode(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index b085f161ed22..040c0036320f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -3608,6 +3608,8 @@ extern int simple_write_begin(struct file *file, struct address_space *mapping,
extern const struct address_space_operations ram_aops;
extern int always_delete_dentry(const struct dentry *);
extern struct inode *alloc_anon_inode(struct super_block *);
+struct inode *anon_inode_make_secure_inode(struct super_block *sb, const char *name,
+ const struct inode *context_inode);
extern int simple_nosetlease(struct file *, int, struct file_lease **, void **);
extern const struct dentry_operations simple_dentry_operations;
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 120de474a8bf..e5a2096e022e 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -9,7 +9,7 @@
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (c) 2018 - 2024 Intel Corporation
+ * Copyright (c) 2018 - 2025 Intel Corporation
*/
#ifndef LINUX_IEEE80211_H
@@ -663,18 +663,6 @@ static inline bool ieee80211_s1g_has_cssid(__le16 fc)
}
/**
- * ieee80211_is_s1g_short_beacon - check if frame is an S1G short beacon
- * @fc: frame control bytes in little-endian byteorder
- * Return: whether or not the frame is an S1G short beacon,
- * i.e. it is an S1G beacon with 'next TBTT' flag set
- */
-static inline bool ieee80211_is_s1g_short_beacon(__le16 fc)
-{
- return ieee80211_is_s1g_beacon(fc) &&
- (fc & cpu_to_le16(IEEE80211_S1G_BCN_NEXT_TBTT));
-}
-
-/**
* ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM
* @fc: frame control bytes in little-endian byteorder
* Return: whether or not the frame is an ATIM frame
@@ -2837,11 +2825,12 @@ static inline bool ieee80211_he_capa_size_ok(const u8 *data, u8 len)
#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000
#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000
-#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0
-#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1
-#define IEEE80211_6GHZ_CTRL_REG_VLP_AP 2
-#define IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP 3
-#define IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP 4
+#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0
+#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1
+#define IEEE80211_6GHZ_CTRL_REG_VLP_AP 2
+#define IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP 3
+#define IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP_OLD 4
+#define IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP 8
/**
* struct ieee80211_he_6ghz_oper - HE 6 GHz operation Information field
@@ -2859,13 +2848,31 @@ struct ieee80211_he_6ghz_oper {
#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ 2
#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ 3
#define IEEE80211_HE_6GHZ_OPER_CTRL_DUP_BEACON 0x4
-#define IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO 0x38
+#define IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO 0x78
u8 control;
u8 ccfs0;
u8 ccfs1;
u8 minrate;
} __packed;
+/**
+ * enum ieee80211_reg_conn_bits - represents Regulatory connectivity field bits.
+ *
+ * This enumeration defines bit flags used to represent regulatory connectivity
+ * field bits.
+ *
+ * @IEEE80211_REG_CONN_LPI_VALID: Indicates whether the LPI bit is valid.
+ * @IEEE80211_REG_CONN_LPI_VALUE: Represents the value of the LPI bit.
+ * @IEEE80211_REG_CONN_SP_VALID: Indicates whether the SP bit is valid.
+ * @IEEE80211_REG_CONN_SP_VALUE: Represents the value of the SP bit.
+ */
+enum ieee80211_reg_conn_bits {
+ IEEE80211_REG_CONN_LPI_VALID = BIT(0),
+ IEEE80211_REG_CONN_LPI_VALUE = BIT(1),
+ IEEE80211_REG_CONN_SP_VALID = BIT(2),
+ IEEE80211_REG_CONN_SP_VALUE = BIT(3),
+};
+
/* transmit power interpretation type of transmit power envelope element */
enum ieee80211_tx_power_intrpt_type {
IEEE80211_TPE_LOCAL_EIRP,
@@ -3847,6 +3854,7 @@ enum ieee80211_eid_ext {
WLAN_EID_EXT_FILS_PUBLIC_KEY = 12,
WLAN_EID_EXT_FILS_NONCE = 13,
WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE = 14,
+ WLAN_EID_EXT_DH_PARAMETER = 32,
WLAN_EID_EXT_HE_CAPABILITY = 35,
WLAN_EID_EXT_HE_OPERATION = 36,
WLAN_EID_EXT_UORA = 37,
@@ -3870,6 +3878,8 @@ enum ieee80211_eid_ext {
WLAN_EID_EXT_EHT_CAPABILITY = 108,
WLAN_EID_EXT_TID_TO_LINK_MAPPING = 109,
WLAN_EID_EXT_BANDWIDTH_INDICATION = 135,
+ WLAN_EID_EXT_KNOWN_STA_IDENTIFCATION = 136,
+ WLAN_EID_EXT_NON_AP_STA_REG_CON = 137,
};
/* Action category code */
@@ -4911,6 +4921,39 @@ static inline bool ieee80211_is_ftm(struct sk_buff *skb)
return false;
}
+/**
+ * ieee80211_is_s1g_short_beacon - check if frame is an S1G short beacon
+ * @fc: frame control bytes in little-endian byteorder
+ * @variable: pointer to the beacon frame elements
+ * @variable_len: length of the frame elements
+ * Return: whether or not the frame is an S1G short beacon. As per
+ * IEEE80211-2024 11.1.3.10.1, The S1G beacon compatibility element shall
+ * always be present as the first element in beacon frames generated at a
+ * TBTT (Target Beacon Transmission Time), so any frame not containing
+ * this element must have been generated at a TSBTT (Target Short Beacon
+ * Transmission Time) that is not a TBTT. Additionally, short beacons are
+ * prohibited from containing the S1G beacon compatibility element as per
+ * IEEE80211-2024 9.3.4.3 Table 9-76, so if we have an S1G beacon with
+ * either no elements or the first element is not the beacon compatibility
+ * element, we have a short beacon.
+ */
+static inline bool ieee80211_is_s1g_short_beacon(__le16 fc, const u8 *variable,
+ size_t variable_len)
+{
+ if (!ieee80211_is_s1g_beacon(fc))
+ return false;
+
+ /*
+ * If the frame does not contain at least 1 element (this is perfectly
+ * valid in a short beacon) and is an S1G beacon, we have a short
+ * beacon.
+ */
+ if (variable_len < 2)
+ return true;
+
+ return variable[0] != WLAN_EID_S1G_BCN_COMPAT;
+}
+
struct element {
u8 id;
u8 datalen;
@@ -5333,6 +5376,13 @@ static inline u16 ieee80211_mle_get_mld_capa_op(const u8 *data)
return get_unaligned_le16(common);
}
+/* Defined in Figure 9-1074t in P802.11be_D7.0 */
+#define IEEE80211_EHT_ML_EXT_MLD_CAPA_OP_PARAM_UPDATE 0x0001
+#define IEEE80211_EHT_ML_EXT_MLD_CAPA_OP_RECO_MAX_LINKS_MASK 0x001e
+#define IEEE80211_EHT_ML_EXT_MLD_CAPA_NSTR_UPDATE 0x0020
+#define IEEE80211_EHT_ML_EXT_MLD_CAPA_EMLSR_ENA_ON_ONE_LINK 0x0040
+#define IEEE80211_EHT_ML_EXT_MLD_CAPA_BTM_MLD_RECO_MULTI_AP 0x0080
+
/**
* ieee80211_mle_get_ext_mld_capa_op - returns the extended MLD capabilities
* and operations.
diff --git a/include/linux/mtd/nand-qpic-common.h b/include/linux/mtd/nand-qpic-common.h
index e8462deda6db..f0aa098a395f 100644
--- a/include/linux/mtd/nand-qpic-common.h
+++ b/include/linux/mtd/nand-qpic-common.h
@@ -237,6 +237,9 @@
* @last_data_desc - last DMA desc in data channel (tx/rx).
* @last_cmd_desc - last DMA desc in command channel.
* @txn_done - completion for NAND transfer.
+ * @bam_ce_nitems - the number of elements in the @bam_ce array
+ * @cmd_sgl_nitems - the number of elements in the @cmd_sgl array
+ * @data_sgl_nitems - the number of elements in the @data_sgl array
* @bam_ce_pos - the index in bam_ce which is available for next sgl
* @bam_ce_start - the index in bam_ce which marks the start position ce
* for current sgl. It will be used for size calculation
@@ -255,6 +258,11 @@ struct bam_transaction {
struct dma_async_tx_descriptor *last_data_desc;
struct dma_async_tx_descriptor *last_cmd_desc;
struct completion txn_done;
+
+ unsigned int bam_ce_nitems;
+ unsigned int cmd_sgl_nitems;
+ unsigned int data_sgl_nitems;
+
struct_group(bam_positions,
u32 bam_ce_pos;
u32 bam_ce_start;
diff --git a/include/linux/netfilter/nf_conntrack_dccp.h b/include/linux/netfilter/nf_conntrack_dccp.h
deleted file mode 100644
index c509ed76e714..000000000000
--- a/include/linux/netfilter/nf_conntrack_dccp.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NF_CONNTRACK_DCCP_H
-#define _NF_CONNTRACK_DCCP_H
-
-/* Exposed to userspace over nfnetlink */
-enum ct_dccp_states {
- CT_DCCP_NONE,
- CT_DCCP_REQUEST,
- CT_DCCP_RESPOND,
- CT_DCCP_PARTOPEN,
- CT_DCCP_OPEN,
- CT_DCCP_CLOSEREQ,
- CT_DCCP_CLOSING,
- CT_DCCP_TIMEWAIT,
- CT_DCCP_IGNORE,
- CT_DCCP_INVALID,
- __CT_DCCP_MAX
-};
-#define CT_DCCP_MAX (__CT_DCCP_MAX - 1)
-
-enum ct_dccp_roles {
- CT_DCCP_ROLE_CLIENT,
- CT_DCCP_ROLE_SERVER,
- __CT_DCCP_ROLE_MAX
-};
-#define CT_DCCP_ROLE_MAX (__CT_DCCP_ROLE_MAX - 1)
-
-#include <linux/netfilter/nf_conntrack_tuple_common.h>
-
-struct nf_ct_dccp {
- u_int8_t role[IP_CT_DIR_MAX];
- u_int8_t state;
- u_int8_t last_pkt;
- u_int8_t last_dir;
- u_int64_t handshake_seq;
-};
-
-#endif /* _NF_CONNTRACK_DCCP_H */
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index 065c17385e53..f43f075852c0 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -265,21 +265,20 @@ struct netfs_io_request {
bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */
refcount_t ref;
unsigned long flags;
-#define NETFS_RREQ_OFFLOAD_COLLECTION 0 /* Offload collection to workqueue */
-#define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */
-#define NETFS_RREQ_FAILED 4 /* The request failed */
-#define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes (has ref) */
-#define NETFS_RREQ_FOLIO_COPY_TO_CACHE 6 /* Copy current folio to cache from read */
-#define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */
-#define NETFS_RREQ_PAUSE 11 /* Pause subrequest generation */
+#define NETFS_RREQ_IN_PROGRESS 0 /* Unlocked when the request completes (has ref) */
+#define NETFS_RREQ_ALL_QUEUED 1 /* All subreqs are now queued */
+#define NETFS_RREQ_PAUSE 2 /* Pause subrequest generation */
+#define NETFS_RREQ_FAILED 3 /* The request failed */
+#define NETFS_RREQ_RETRYING 4 /* Set if we're in the retry path */
+#define NETFS_RREQ_SHORT_TRANSFER 5 /* Set if we have a short transfer */
+#define NETFS_RREQ_OFFLOAD_COLLECTION 8 /* Offload collection to workqueue */
+#define NETFS_RREQ_NO_UNLOCK_FOLIO 9 /* Don't unlock no_unlock_folio on completion */
+#define NETFS_RREQ_FOLIO_COPY_TO_CACHE 10 /* Copy current folio to cache from read */
+#define NETFS_RREQ_UPLOAD_TO_SERVER 11 /* Need to write to the server */
#define NETFS_RREQ_USE_IO_ITER 12 /* Use ->io_iter rather than ->i_pages */
-#define NETFS_RREQ_ALL_QUEUED 13 /* All subreqs are now queued */
-#define NETFS_RREQ_RETRYING 14 /* Set if we're in the retry path */
-#define NETFS_RREQ_SHORT_TRANSFER 15 /* Set if we have a short transfer */
#define NETFS_RREQ_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark
* write to cache on read */
const struct netfs_request_ops *netfs_ops;
- void (*cleanup)(struct netfs_io_request *req);
};
/*
diff --git a/include/linux/platform_data/x86/amd-fch.h b/include/linux/platform_data/x86/amd-fch.h
new file mode 100644
index 000000000000..2cf5153edbc2
--- /dev/null
+++ b/include/linux/platform_data/x86/amd-fch.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_AMD_FCH_H_
+#define _ASM_X86_AMD_FCH_H_
+
+#define FCH_PM_BASE 0xFED80300
+
+/* Register offsets from PM base: */
+#define FCH_PM_DECODEEN 0x00
+#define FCH_PM_DECODEEN_SMBUS0SEL GENMASK(20, 19)
+#define FCH_PM_SCRATCH 0x80
+#define FCH_PM_S5_RESET_STATUS 0xC0
+
+#endif /* _ASM_X86_AMD_FCH_H_ */
diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
index 0b3a36bdaa90..0f5f94137f6d 100644
--- a/include/linux/psp-sev.h
+++ b/include/linux/psp-sev.h
@@ -594,6 +594,7 @@ struct sev_data_snp_addr {
* @imi_en: launch flow is launching an IMI (Incoming Migration Image) for the
* purpose of guest-assisted migration.
* @rsvd: reserved
+ * @desired_tsc_khz: hypervisor desired mean TSC freq in kHz of the guest
* @gosvw: guest OS-visible workarounds, as defined by hypervisor
*/
struct sev_data_snp_launch_start {
@@ -603,6 +604,7 @@ struct sev_data_snp_launch_start {
u32 ma_en:1; /* In */
u32 imi_en:1; /* In */
u32 rsvd:30;
+ u32 desired_tsc_khz; /* In */
u8 gosvw[16]; /* In */
} __packed;
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 4789f91dae94..e9ea43234d9a 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -21,7 +21,7 @@
#include <uapi/linux/spi/spi.h>
/* Max no. of CS supported per spi device */
-#define SPI_CS_CNT_MAX 16
+#define SPI_CS_CNT_MAX 24
struct dma_chan;
struct software_node;
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index b1c76c8f2c82..6a3f92098872 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -446,6 +446,8 @@ extern int unregister_pm_notifier(struct notifier_block *nb);
extern void ksys_sync_helper(void);
extern void pm_report_hw_sleep_time(u64 t);
extern void pm_report_max_hw_sleep(u64 t);
+void pm_restrict_gfp_mask(void);
+void pm_restore_gfp_mask(void);
#define pm_notifier(fn, pri) { \
static struct notifier_block fn##_nb = \
@@ -492,6 +494,9 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
static inline void pm_report_hw_sleep_time(u64 t) {};
static inline void pm_report_max_hw_sleep(u64 t) {};
+static inline void pm_restrict_gfp_mask(void) {}
+static inline void pm_restore_gfp_mask(void) {}
+
static inline void ksys_sync_helper(void) {}
#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 1b2545b4363b..92c752f5446f 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -614,6 +614,7 @@ struct usb3_lpm_parameters {
* FIXME -- complete doc
* @authenticated: Crypto authentication passed
* @tunnel_mode: Connection native or tunneled over USB4
+ * @usb4_link: device link to the USB4 host interface
* @lpm_capable: device supports LPM
* @lpm_devinit_allow: Allow USB3 device initiated LPM, exit latency is in range
* @usb2_hw_lpm_capable: device can perform USB2 hardware LPM
@@ -724,6 +725,7 @@ struct usb_device {
unsigned reset_resume:1;
unsigned port_is_suspended:1;
enum usb_link_tunnel_mode tunnel_mode;
+ struct device_link *usb4_link;
int slot_id;
struct usb2_lpm_parameters l1_params;
diff --git a/include/linux/usb/typec_dp.h b/include/linux/usb/typec_dp.h
index f2da264d9c14..acb0ad03bdac 100644
--- a/include/linux/usb/typec_dp.h
+++ b/include/linux/usb/typec_dp.h
@@ -57,6 +57,7 @@ enum {
DP_PIN_ASSIGN_D,
DP_PIN_ASSIGN_E,
DP_PIN_ASSIGN_F, /* Not supported after v1.0b */
+ DP_PIN_ASSIGN_MAX,
};
/* DisplayPort alt mode specific commands */
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 64cb4b04be7a..04b90c88d164 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -11,6 +11,7 @@
#include <linux/gfp.h>
#include <linux/dma-mapping.h>
#include <linux/completion.h>
+#include <linux/virtio_features.h>
/**
* struct virtqueue - a queue to register buffers for sending or receiving.
@@ -141,7 +142,9 @@ struct virtio_admin_cmd {
* @config: the configuration ops for this device.
* @vringh_config: configuration ops for host vrings.
* @vqs: the list of virtqueues for this device.
- * @features: the features supported by both driver and device.
+ * @features: the 64 lower features supported by both driver and device.
+ * @features_array: the full features space supported by both driver and
+ * device.
* @priv: private pointer for the driver's use.
* @debugfs_dir: debugfs directory entry.
* @debugfs_filter_features: features to be filtered set by debugfs.
@@ -159,11 +162,11 @@ struct virtio_device {
const struct virtio_config_ops *config;
const struct vringh_config_ops *vringh_config;
struct list_head vqs;
- u64 features;
+ VIRTIO_DECLARE_FEATURES(features);
void *priv;
#ifdef CONFIG_VIRTIO_DEBUG
struct dentry *debugfs_dir;
- u64 debugfs_filter_features;
+ u64 debugfs_filter_features[VIRTIO_FEATURES_DWORDS];
#endif
};
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index b3e1d30c765b..918cf25cd3c6 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -77,7 +77,11 @@ struct virtqueue_info {
* vdev: the virtio_device
* @get_features: get the array of feature bits for this device.
* vdev: the virtio_device
- * Returns the first 64 feature bits (all we currently need).
+ * Returns the first 64 feature bits.
+ * @get_extended_features:
+ * vdev: the virtio_device
+ * Returns the first VIRTIO_FEATURES_MAX feature bits (all we currently
+ * need).
* @finalize_features: confirm what device features we'll be using.
* vdev: the virtio_device
* This sends the driver feature bits to the device: it can change
@@ -121,6 +125,8 @@ struct virtio_config_ops {
void (*del_vqs)(struct virtio_device *);
void (*synchronize_cbs)(struct virtio_device *);
u64 (*get_features)(struct virtio_device *vdev);
+ void (*get_extended_features)(struct virtio_device *vdev,
+ u64 *features);
int (*finalize_features)(struct virtio_device *vdev);
const char *(*bus_name)(struct virtio_device *vdev);
int (*set_vq_affinity)(struct virtqueue *vq,
@@ -147,13 +153,7 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
static inline bool __virtio_test_bit(const struct virtio_device *vdev,
unsigned int fbit)
{
- /* Did you forget to fix assumptions on max features? */
- if (__builtin_constant_p(fbit))
- BUILD_BUG_ON(fbit >= 64);
- else
- BUG_ON(fbit >= 64);
-
- return vdev->features & BIT_ULL(fbit);
+ return virtio_features_test_bit(vdev->features_array, fbit);
}
/**
@@ -164,13 +164,7 @@ static inline bool __virtio_test_bit(const struct virtio_device *vdev,
static inline void __virtio_set_bit(struct virtio_device *vdev,
unsigned int fbit)
{
- /* Did you forget to fix assumptions on max features? */
- if (__builtin_constant_p(fbit))
- BUILD_BUG_ON(fbit >= 64);
- else
- BUG_ON(fbit >= 64);
-
- vdev->features |= BIT_ULL(fbit);
+ virtio_features_set_bit(vdev->features_array, fbit);
}
/**
@@ -181,13 +175,7 @@ static inline void __virtio_set_bit(struct virtio_device *vdev,
static inline void __virtio_clear_bit(struct virtio_device *vdev,
unsigned int fbit)
{
- /* Did you forget to fix assumptions on max features? */
- if (__builtin_constant_p(fbit))
- BUILD_BUG_ON(fbit >= 64);
- else
- BUG_ON(fbit >= 64);
-
- vdev->features &= ~BIT_ULL(fbit);
+ virtio_features_clear_bit(vdev->features_array, fbit);
}
/**
@@ -204,6 +192,17 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev,
return __virtio_test_bit(vdev, fbit);
}
+static inline void virtio_get_features(struct virtio_device *vdev,
+ u64 *features)
+{
+ if (vdev->config->get_extended_features) {
+ vdev->config->get_extended_features(vdev, features);
+ return;
+ }
+
+ virtio_features_from_u64(features, vdev->config->get_features(vdev));
+}
+
/**
* virtio_has_dma_quirk - determine whether this device has the DMA quirk
* @vdev: the device
diff --git a/include/linux/virtio_features.h b/include/linux/virtio_features.h
new file mode 100644
index 000000000000..f748f2f87de8
--- /dev/null
+++ b/include/linux/virtio_features.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_VIRTIO_FEATURES_H
+#define _LINUX_VIRTIO_FEATURES_H
+
+#include <linux/bits.h>
+
+#define VIRTIO_FEATURES_DWORDS 2
+#define VIRTIO_FEATURES_MAX (VIRTIO_FEATURES_DWORDS * 64)
+#define VIRTIO_FEATURES_WORDS (VIRTIO_FEATURES_DWORDS * 2)
+#define VIRTIO_BIT(b) BIT_ULL((b) & 0x3f)
+#define VIRTIO_DWORD(b) ((b) >> 6)
+#define VIRTIO_DECLARE_FEATURES(name) \
+ union { \
+ u64 name; \
+ u64 name##_array[VIRTIO_FEATURES_DWORDS];\
+ }
+
+static inline bool virtio_features_chk_bit(unsigned int bit)
+{
+ if (__builtin_constant_p(bit)) {
+ /*
+ * Don't care returning the correct value: the build
+ * will fail before any bad features access
+ */
+ BUILD_BUG_ON(bit >= VIRTIO_FEATURES_MAX);
+ } else {
+ if (WARN_ON_ONCE(bit >= VIRTIO_FEATURES_MAX))
+ return false;
+ }
+ return true;
+}
+
+static inline bool virtio_features_test_bit(const u64 *features,
+ unsigned int bit)
+{
+ return virtio_features_chk_bit(bit) &&
+ !!(features[VIRTIO_DWORD(bit)] & VIRTIO_BIT(bit));
+}
+
+static inline void virtio_features_set_bit(u64 *features,
+ unsigned int bit)
+{
+ if (virtio_features_chk_bit(bit))
+ features[VIRTIO_DWORD(bit)] |= VIRTIO_BIT(bit);
+}
+
+static inline void virtio_features_clear_bit(u64 *features,
+ unsigned int bit)
+{
+ if (virtio_features_chk_bit(bit))
+ features[VIRTIO_DWORD(bit)] &= ~VIRTIO_BIT(bit);
+}
+
+static inline void virtio_features_zero(u64 *features)
+{
+ memset(features, 0, sizeof(features[0]) * VIRTIO_FEATURES_DWORDS);
+}
+
+static inline void virtio_features_from_u64(u64 *features, u64 from)
+{
+ virtio_features_zero(features);
+ features[0] = from;
+}
+
+static inline bool virtio_features_equal(const u64 *f1, const u64 *f2)
+{
+ int i;
+
+ for (i = 0; i < VIRTIO_FEATURES_DWORDS; ++i)
+ if (f1[i] != f2[i])
+ return false;
+ return true;
+}
+
+static inline void virtio_features_copy(u64 *to, const u64 *from)
+{
+ memcpy(to, from, sizeof(to[0]) * VIRTIO_FEATURES_DWORDS);
+}
+
+static inline void virtio_features_andnot(u64 *to, const u64 *f1, const u64 *f2)
+{
+ int i;
+
+ for (i = 0; i < VIRTIO_FEATURES_DWORDS; i++)
+ to[i] = f1[i] & ~f2[i];
+}
+
+#endif
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 02a9f4dc594d..20e0584db1dd 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -47,9 +47,9 @@ static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
return 0;
}
-static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
- const struct virtio_net_hdr *hdr,
- bool little_endian)
+static inline int __virtio_net_hdr_to_skb(struct sk_buff *skb,
+ const struct virtio_net_hdr *hdr,
+ bool little_endian, u8 hdr_gso_type)
{
unsigned int nh_min_len = sizeof(struct iphdr);
unsigned int gso_type = 0;
@@ -57,8 +57,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
unsigned int p_off = 0;
unsigned int ip_proto;
- if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
- switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+ if (hdr_gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ switch (hdr_gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
gso_type = SKB_GSO_TCPV4;
ip_proto = IPPROTO_TCP;
@@ -84,7 +84,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
return -EINVAL;
}
- if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
+ if (hdr_gso_type & VIRTIO_NET_HDR_GSO_ECN)
gso_type |= SKB_GSO_TCP_ECN;
if (hdr->gso_size == 0)
@@ -122,7 +122,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
if (!protocol)
virtio_net_hdr_set_proto(skb, hdr);
- else if (!virtio_net_hdr_match_proto(protocol, hdr->gso_type))
+ else if (!virtio_net_hdr_match_proto(protocol,
+ hdr_gso_type))
return -EINVAL;
else
skb->protocol = protocol;
@@ -153,7 +154,7 @@ retry:
}
}
- if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ if (hdr_gso_type != VIRTIO_NET_HDR_GSO_NONE) {
u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size);
unsigned int nh_off = p_off;
struct skb_shared_info *shinfo = skb_shinfo(skb);
@@ -199,6 +200,13 @@ retry:
return 0;
}
+static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
+ const struct virtio_net_hdr *hdr,
+ bool little_endian)
+{
+ return __virtio_net_hdr_to_skb(skb, hdr, little_endian, hdr->gso_type);
+}
+
static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
struct virtio_net_hdr *hdr,
bool little_endian,
@@ -242,4 +250,177 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
return 0;
}
+static inline unsigned int virtio_l3min(bool is_ipv6)
+{
+ return is_ipv6 ? sizeof(struct ipv6hdr) : sizeof(struct iphdr);
+}
+
+static inline int
+virtio_net_hdr_tnl_to_skb(struct sk_buff *skb,
+ const struct virtio_net_hdr_v1_hash_tunnel *vhdr,
+ bool tnl_hdr_negotiated,
+ bool tnl_csum_negotiated,
+ bool little_endian)
+{
+ const struct virtio_net_hdr *hdr = (const struct virtio_net_hdr *)vhdr;
+ unsigned int inner_nh, outer_th, inner_th;
+ unsigned int inner_l3min, outer_l3min;
+ u8 gso_inner_type, gso_tunnel_type;
+ bool outer_isv6, inner_isv6;
+ int ret;
+
+ gso_tunnel_type = hdr->gso_type & VIRTIO_NET_HDR_GSO_UDP_TUNNEL;
+ if (!gso_tunnel_type)
+ return virtio_net_hdr_to_skb(skb, hdr, little_endian);
+
+ /* Tunnel not supported/negotiated, but the hdr asks for it. */
+ if (!tnl_hdr_negotiated)
+ return -EINVAL;
+
+ /* Either ipv4 or ipv6. */
+ if (gso_tunnel_type == VIRTIO_NET_HDR_GSO_UDP_TUNNEL)
+ return -EINVAL;
+
+ /* The UDP tunnel must carry a GSO packet, but no UFO. */
+ gso_inner_type = hdr->gso_type & ~(VIRTIO_NET_HDR_GSO_ECN |
+ VIRTIO_NET_HDR_GSO_UDP_TUNNEL);
+ if (!gso_inner_type || gso_inner_type == VIRTIO_NET_HDR_GSO_UDP)
+ return -EINVAL;
+
+ /* Rely on csum being present. */
+ if (!(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
+ return -EINVAL;
+
+ /* Validate offsets. */
+ outer_isv6 = gso_tunnel_type & VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV6;
+ inner_isv6 = gso_inner_type == VIRTIO_NET_HDR_GSO_TCPV6;
+ inner_l3min = virtio_l3min(inner_isv6);
+ outer_l3min = ETH_HLEN + virtio_l3min(outer_isv6);
+
+ inner_th = __virtio16_to_cpu(little_endian, hdr->csum_start);
+ inner_nh = le16_to_cpu(vhdr->inner_nh_offset);
+ outer_th = le16_to_cpu(vhdr->outer_th_offset);
+ if (outer_th < outer_l3min ||
+ inner_nh < outer_th + sizeof(struct udphdr) ||
+ inner_th < inner_nh + inner_l3min)
+ return -EINVAL;
+
+ /* Let the basic parsing deal with plain GSO features. */
+ ret = __virtio_net_hdr_to_skb(skb, hdr, true,
+ hdr->gso_type & ~gso_tunnel_type);
+ if (ret)
+ return ret;
+
+ /* In case of USO, the inner protocol is still unknown and
+ * `inner_isv6` is just a guess, additional parsing is needed.
+ * The previous validation ensures that accessing an ipv4 inner
+ * network header is safe.
+ */
+ if (gso_inner_type == VIRTIO_NET_HDR_GSO_UDP_L4) {
+ struct iphdr *iphdr = (struct iphdr *)(skb->data + inner_nh);
+
+ inner_isv6 = iphdr->version == 6;
+ inner_l3min = virtio_l3min(inner_isv6);
+ if (inner_th < inner_nh + inner_l3min)
+ return -EINVAL;
+ }
+
+ skb_set_inner_protocol(skb, inner_isv6 ? htons(ETH_P_IPV6) :
+ htons(ETH_P_IP));
+ if (hdr->flags & VIRTIO_NET_HDR_F_UDP_TUNNEL_CSUM) {
+ if (!tnl_csum_negotiated)
+ return -EINVAL;
+
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
+ } else {
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+ }
+
+ skb->inner_transport_header = inner_th + skb_headroom(skb);
+ skb->inner_network_header = inner_nh + skb_headroom(skb);
+ skb->inner_mac_header = inner_nh + skb_headroom(skb);
+ skb->transport_header = outer_th + skb_headroom(skb);
+ skb->encapsulation = 1;
+ return 0;
+}
+
+/* Checksum-related fields validation for the driver */
+static inline int virtio_net_handle_csum_offload(struct sk_buff *skb,
+ struct virtio_net_hdr *hdr,
+ bool tnl_csum_negotiated)
+{
+ if (!(hdr->gso_type & VIRTIO_NET_HDR_GSO_UDP_TUNNEL)) {
+ if (!(hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID))
+ return 0;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (!(hdr->flags & VIRTIO_NET_HDR_F_UDP_TUNNEL_CSUM))
+ return 0;
+
+ /* tunnel csum packets are invalid when the related
+ * feature has not been negotiated
+ */
+ if (!tnl_csum_negotiated)
+ return -EINVAL;
+ skb->csum_level = 1;
+ return 0;
+ }
+
+ /* DATA_VALID is mutually exclusive with NEEDS_CSUM, and GSO
+ * over UDP tunnel requires the latter
+ */
+ if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID)
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * vlan_hlen always refers to the outermost MAC header. That also
+ * means it refers to the only MAC header, if the packet does not carry
+ * any encapsulation.
+ */
+static inline int
+virtio_net_hdr_tnl_from_skb(const struct sk_buff *skb,
+ struct virtio_net_hdr_v1_hash_tunnel *vhdr,
+ bool tnl_hdr_negotiated,
+ bool little_endian,
+ int vlan_hlen)
+{
+ struct virtio_net_hdr *hdr = (struct virtio_net_hdr *)vhdr;
+ unsigned int inner_nh, outer_th;
+ int tnl_gso_type;
+ int ret;
+
+ tnl_gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL |
+ SKB_GSO_UDP_TUNNEL_CSUM);
+ if (!tnl_gso_type)
+ return virtio_net_hdr_from_skb(skb, hdr, little_endian, false,
+ vlan_hlen);
+
+ /* Tunnel support not negotiated but skb ask for it. */
+ if (!tnl_hdr_negotiated)
+ return -EINVAL;
+
+ /* Let the basic parsing deal with plain GSO features. */
+ skb_shinfo(skb)->gso_type &= ~tnl_gso_type;
+ ret = virtio_net_hdr_from_skb(skb, hdr, true, false, vlan_hlen);
+ skb_shinfo(skb)->gso_type |= tnl_gso_type;
+ if (ret)
+ return ret;
+
+ if (skb->protocol == htons(ETH_P_IPV6))
+ hdr->gso_type |= VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV6;
+ else
+ hdr->gso_type |= VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV4;
+
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
+ hdr->flags |= VIRTIO_NET_HDR_F_UDP_TUNNEL_CSUM;
+
+ inner_nh = skb->inner_network_header - skb_headroom(skb);
+ outer_th = skb->transport_header - skb_headroom(skb);
+ vhdr->inner_nh_offset = cpu_to_le16(inner_nh);
+ vhdr->outer_th_offset = cpu_to_le16(outer_th);
+ return 0;
+}
+
#endif /* _LINUX_VIRTIO_NET_H */
diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h
index c0b1b1ca1163..48bc12d1045b 100644
--- a/include/linux/virtio_pci_modern.h
+++ b/include/linux/virtio_pci_modern.h
@@ -3,6 +3,7 @@
#define _LINUX_VIRTIO_PCI_MODERN_H
#include <linux/pci.h>
+#include <linux/virtio_config.h>
#include <linux/virtio_pci.h>
/**
@@ -95,10 +96,44 @@ static inline void vp_iowrite64_twopart(u64 val,
vp_iowrite32(val >> 32, hi);
}
-u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev);
-u64 vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev);
-void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
- u64 features);
+void
+vp_modern_get_driver_extended_features(struct virtio_pci_modern_device *mdev,
+ u64 *features);
+void vp_modern_get_extended_features(struct virtio_pci_modern_device *mdev,
+ u64 *features);
+void vp_modern_set_extended_features(struct virtio_pci_modern_device *mdev,
+ const u64 *features);
+
+static inline u64
+vp_modern_get_features(struct virtio_pci_modern_device *mdev)
+{
+ u64 features_array[VIRTIO_FEATURES_DWORDS];
+
+ vp_modern_get_extended_features(mdev, features_array);
+ return features_array[0];
+}
+
+static inline u64
+vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev)
+{
+ u64 features_array[VIRTIO_FEATURES_DWORDS];
+ int i;
+
+ vp_modern_get_driver_extended_features(mdev, features_array);
+ for (i = 1; i < VIRTIO_FEATURES_DWORDS; ++i)
+ WARN_ON_ONCE(features_array[i]);
+ return features_array[0];
+}
+
+static inline void
+vp_modern_set_features(struct virtio_pci_modern_device *mdev, u64 features)
+{
+ u64 features_array[VIRTIO_FEATURES_DWORDS];
+
+ virtio_features_from_u64(features_array, features);
+ vp_modern_set_extended_features(mdev, features_array);
+}
+
u32 vp_modern_generation(struct virtio_pci_modern_device *mdev);
u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev);
void vp_modern_set_status(struct virtio_pci_modern_device *mdev,