diff options
Diffstat (limited to 'drivers')
1061 files changed, 18664 insertions, 13112 deletions
diff --git a/drivers/accel/habanalabs/Kconfig b/drivers/accel/habanalabs/Kconfig index be85336107f9..1919fbb169c7 100644 --- a/drivers/accel/habanalabs/Kconfig +++ b/drivers/accel/habanalabs/Kconfig @@ -6,7 +6,7 @@ config DRM_ACCEL_HABANALABS tristate "HabanaLabs AI accelerators" depends on DRM_ACCEL - depends on X86_64 + depends on X86 && X86_64 depends on PCI && HAS_IOMEM select GENERIC_ALLOCATOR select HWMON diff --git a/drivers/accel/habanalabs/common/habanalabs_ioctl.c b/drivers/accel/habanalabs/common/habanalabs_ioctl.c index 8729a0c57d78..dc80ca921d90 100644 --- a/drivers/accel/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/accel/habanalabs/common/habanalabs_ioctl.c @@ -17,8 +17,6 @@ #include <linux/uaccess.h> #include <linux/vmalloc.h> -#include <asm/msr.h> - /* make sure there is space for all the signed info */ static_assert(sizeof(struct cpucp_info) <= SEC_DEV_INFO_BUF_SZ); diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c index 0825851656a2..cd24ccd20ba6 100644 --- a/drivers/accel/ivpu/ivpu_debugfs.c +++ b/drivers/accel/ivpu/ivpu_debugfs.c @@ -332,7 +332,7 @@ ivpu_force_recovery_fn(struct file *file, const char __user *user_buf, size_t si return -EINVAL; ret = ivpu_rpm_get(vdev); - if (ret) + if (ret < 0) return ret; ivpu_pm_trigger_recovery(vdev, "debugfs"); @@ -383,7 +383,7 @@ static int dct_active_set(void *data, u64 active_percent) return -EINVAL; ret = ivpu_rpm_get(vdev); - if (ret) + if (ret < 0) return ret; if (active_percent) @@ -455,7 +455,7 @@ priority_bands_fops_write(struct file *file, const char __user *user_buf, size_t if (ret < 0) return ret; - buf[size] = '\0'; + buf[ret] = '\0'; ret = sscanf(buf, "%u %u %u %u", &band, &grace_period, &process_grace_period, &process_quantum); if (ret != 4) diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index 4fa73189502e..eff1d3ca075f 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2020-2024 Intel Corporation + * Copyright (C) 2020-2025 Intel Corporation */ #include <linux/firmware.h> @@ -164,7 +164,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f args->value = vdev->platform; break; case DRM_IVPU_PARAM_CORE_CLOCK_RATE: - args->value = ivpu_hw_ratio_to_freq(vdev, vdev->hw->pll.max_ratio); + args->value = ivpu_hw_dpu_max_freq_get(vdev); break; case DRM_IVPU_PARAM_NUM_CONTEXTS: args->value = ivpu_get_context_count(vdev); @@ -421,9 +421,9 @@ void ivpu_prepare_for_reset(struct ivpu_device *vdev) { ivpu_hw_irq_disable(vdev); disable_irq(vdev->irq); - cancel_work_sync(&vdev->irq_ipc_work); - cancel_work_sync(&vdev->irq_dct_work); - cancel_work_sync(&vdev->context_abort_work); + flush_work(&vdev->irq_ipc_work); + flush_work(&vdev->irq_dct_work); + flush_work(&vdev->context_abort_work); ivpu_ipc_disable(vdev); ivpu_mmu_disable(vdev); } diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c index 7a1bb92d8c81..ccaaf6c100c0 100644 --- a/drivers/accel/ivpu/ivpu_fw.c +++ b/drivers/accel/ivpu/ivpu_fw.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2020-2024 Intel Corporation + * Copyright (C) 2020-2025 Intel Corporation */ #include <linux/firmware.h> @@ -233,10 +233,20 @@ static int ivpu_fw_parse(struct ivpu_device *vdev) fw->dvfs_mode = 0; fw->sched_mode = ivpu_fw_sched_mode_select(vdev, fw_hdr); - fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size; - fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size; ivpu_info(vdev, "Scheduler mode: %s\n", fw->sched_mode ? "HW" : "OS"); + if (fw_hdr->preemption_buffer_1_max_size) + fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_max_size; + else + fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size; + + if (fw_hdr->preemption_buffer_2_max_size) + fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_max_size; + else + fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size; + ivpu_dbg(vdev, FW_BOOT, "Preemption buffer sizes: primary %u, secondary %u\n", + fw->primary_preempt_buf_size, fw->secondary_preempt_buf_size); + if (fw_hdr->ro_section_start_address && !is_within_range(fw_hdr->ro_section_start_address, fw_hdr->ro_section_size, fw_hdr->image_load_address, @@ -534,7 +544,7 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_ boot_params->d0i3_entry_vpu_ts); ivpu_dbg(vdev, FW_BOOT, "boot_params.system_time_us = %llu\n", boot_params->system_time_us); - ivpu_dbg(vdev, FW_BOOT, "boot_params.power_profile = %u\n", + ivpu_dbg(vdev, FW_BOOT, "boot_params.power_profile = 0x%x\n", boot_params->power_profile); } @@ -566,7 +576,6 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params boot_params->magic = VPU_BOOT_PARAMS_MAGIC; boot_params->vpu_id = to_pci_dev(vdev->drm.dev)->bus->number; - boot_params->frequency = ivpu_hw_pll_freq_get(vdev); /* * This param is a debug firmware feature. It switches default clock @@ -637,7 +646,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params boot_params->d0i3_residency_time_us = 0; boot_params->d0i3_entry_vpu_ts = 0; if (IVPU_WA(disable_d0i2)) - boot_params->power_profile = 1; + boot_params->power_profile |= BIT(1); boot_params->system_time_us = ktime_to_us(ktime_get_real()); wmb(); /* Flush WC buffers after writing bootparams */ diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c index ec9a3629da3a..633160470c93 100644 --- a/drivers/accel/ivpu/ivpu_hw.c +++ b/drivers/accel/ivpu/ivpu_hw.c @@ -119,7 +119,7 @@ static void timeouts_init(struct ivpu_device *vdev) else vdev->timeout.autosuspend = 100; vdev->timeout.d0i3_entry_msg = 5; - vdev->timeout.state_dump_msg = 10; + vdev->timeout.state_dump_msg = 100; } } diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h index 16435f2756d0..d79668fe1609 100644 --- a/drivers/accel/ivpu/ivpu_hw.h +++ b/drivers/accel/ivpu/ivpu_hw.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2020-2024 Intel Corporation + * Copyright (C) 2020-2025 Intel Corporation */ #ifndef __IVPU_HW_H__ @@ -82,19 +82,19 @@ static inline u64 ivpu_hw_range_size(const struct ivpu_addr_range *range) return range->end - range->start; } -static inline u32 ivpu_hw_ratio_to_freq(struct ivpu_device *vdev, u32 ratio) +static inline u32 ivpu_hw_dpu_max_freq_get(struct ivpu_device *vdev) { - return ivpu_hw_btrs_ratio_to_freq(vdev, ratio); + return ivpu_hw_btrs_dpu_max_freq_get(vdev); } -static inline void ivpu_hw_irq_clear(struct ivpu_device *vdev) +static inline u32 ivpu_hw_dpu_freq_get(struct ivpu_device *vdev) { - ivpu_hw_ip_irq_clear(vdev); + return ivpu_hw_btrs_dpu_freq_get(vdev); } -static inline u32 ivpu_hw_pll_freq_get(struct ivpu_device *vdev) +static inline void ivpu_hw_irq_clear(struct ivpu_device *vdev) { - return ivpu_hw_btrs_pll_freq_get(vdev); + ivpu_hw_ip_irq_clear(vdev); } static inline u32 ivpu_hw_profiling_freq_get(struct ivpu_device *vdev) diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.c b/drivers/accel/ivpu/ivpu_hw_btrs.c index 56c56012b980..b236c7234daa 100644 --- a/drivers/accel/ivpu/ivpu_hw_btrs.c +++ b/drivers/accel/ivpu/ivpu_hw_btrs.c @@ -1,8 +1,10 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2020-2024 Intel Corporation + * Copyright (C) 2020-2025 Intel Corporation */ +#include <linux/units.h> + #include "ivpu_drv.h" #include "ivpu_hw.h" #include "ivpu_hw_btrs.h" @@ -28,17 +30,13 @@ #define BTRS_LNL_ALL_IRQ_MASK ((u32)-1) -#define BTRS_MTL_WP_CONFIG_1_TILE_5_3_RATIO WP_CONFIG(MTL_CONFIG_1_TILE, MTL_PLL_RATIO_5_3) -#define BTRS_MTL_WP_CONFIG_1_TILE_4_3_RATIO WP_CONFIG(MTL_CONFIG_1_TILE, MTL_PLL_RATIO_4_3) -#define BTRS_MTL_WP_CONFIG_2_TILE_5_3_RATIO WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_5_3) -#define BTRS_MTL_WP_CONFIG_2_TILE_4_3_RATIO WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_4_3) -#define BTRS_MTL_WP_CONFIG_0_TILE_PLL_OFF WP_CONFIG(0, 0) #define PLL_CDYN_DEFAULT 0x80 #define PLL_EPP_DEFAULT 0x80 #define PLL_CONFIG_DEFAULT 0x0 -#define PLL_SIMULATION_FREQ 10000000 -#define PLL_REF_CLK_FREQ 50000000 +#define PLL_REF_CLK_FREQ 50000000ull +#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ) + #define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC) #define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC) #define TIMEOUT_US (150 * USEC_PER_MSEC) @@ -62,6 +60,8 @@ #define DCT_ENABLE 0x1 #define DCT_DISABLE 0x0 +static u32 pll_ratio_to_dpu_freq(struct ivpu_device *vdev, u32 ratio); + int ivpu_hw_btrs_irqs_clear_with_0_mtl(struct ivpu_device *vdev) { REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, BTRS_MTL_ALL_IRQ_MASK); @@ -156,7 +156,7 @@ static int info_init_mtl(struct ivpu_device *vdev) hw->tile_fuse = BTRS_MTL_TILE_FUSE_ENABLE_BOTH; hw->sku = BTRS_MTL_TILE_SKU_BOTH; - hw->config = BTRS_MTL_WP_CONFIG_2_TILE_4_3_RATIO; + hw->config = WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_4_3); return 0; } @@ -334,8 +334,8 @@ int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable) prepare_wp_request(vdev, &wp, enable); - ivpu_dbg(vdev, PM, "PLL workpoint request: %u Hz, config: 0x%x, epp: 0x%x, cdyn: 0x%x\n", - PLL_RATIO_TO_FREQ(wp.target), wp.cfg, wp.epp, wp.cdyn); + ivpu_dbg(vdev, PM, "PLL workpoint request: %lu MHz, config: 0x%x, epp: 0x%x, cdyn: 0x%x\n", + pll_ratio_to_dpu_freq(vdev, wp.target) / HZ_PER_MHZ, wp.cfg, wp.epp, wp.cdyn); ret = wp_request_send(vdev, &wp); if (ret) { @@ -573,6 +573,47 @@ int ivpu_hw_btrs_wait_for_idle(struct ivpu_device *vdev) return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US); } +static u32 pll_config_get_mtl(struct ivpu_device *vdev) +{ + return REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL); +} + +static u32 pll_config_get_lnl(struct ivpu_device *vdev) +{ + return REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ); +} + +static u32 pll_ratio_to_dpu_freq_mtl(u16 ratio) +{ + return (PLL_RATIO_TO_FREQ(ratio) * 2) / 3; +} + +static u32 pll_ratio_to_dpu_freq_lnl(u16 ratio) +{ + return PLL_RATIO_TO_FREQ(ratio) / 2; +} + +static u32 pll_ratio_to_dpu_freq(struct ivpu_device *vdev, u32 ratio) +{ + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) + return pll_ratio_to_dpu_freq_mtl(ratio); + else + return pll_ratio_to_dpu_freq_lnl(ratio); +} + +u32 ivpu_hw_btrs_dpu_max_freq_get(struct ivpu_device *vdev) +{ + return pll_ratio_to_dpu_freq(vdev, vdev->hw->pll.max_ratio); +} + +u32 ivpu_hw_btrs_dpu_freq_get(struct ivpu_device *vdev) +{ + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) + return pll_ratio_to_dpu_freq_mtl(pll_config_get_mtl(vdev)); + else + return pll_ratio_to_dpu_freq_lnl(pll_config_get_lnl(vdev)); +} + /* Handler for IRQs from Buttress core (irqB) */ bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq) { @@ -582,9 +623,12 @@ bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq) if (!status) return false; - if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, FREQ_CHANGE, status)) - ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", - REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL)); + if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, FREQ_CHANGE, status)) { + u32 pll = pll_config_get_mtl(vdev); + + ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq, wp %08x, %lu MHz", + pll, pll_ratio_to_dpu_freq_mtl(pll) / HZ_PER_MHZ); + } if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR, status)) { ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0)); @@ -633,8 +677,12 @@ bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq) queue_work(system_wq, &vdev->irq_dct_work); } - if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status)) - ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ)); + if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status)) { + u32 pll = pll_config_get_lnl(vdev); + + ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq, wp %08x, %lu MHz", + pll, pll_ratio_to_dpu_freq_lnl(pll) / HZ_PER_MHZ); + } if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR, status)) { ivpu_err(vdev, "ATS_ERR LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n", @@ -717,60 +765,6 @@ void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 acti REGB_WR32(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, val); } -static u32 pll_ratio_to_freq_mtl(u32 ratio, u32 config) -{ - u32 pll_clock = PLL_REF_CLK_FREQ * ratio; - u32 cpu_clock; - - if ((config & 0xff) == MTL_PLL_RATIO_4_3) - cpu_clock = pll_clock * 2 / 4; - else - cpu_clock = pll_clock * 2 / 5; - - return cpu_clock; -} - -u32 ivpu_hw_btrs_ratio_to_freq(struct ivpu_device *vdev, u32 ratio) -{ - struct ivpu_hw_info *hw = vdev->hw; - - if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) - return pll_ratio_to_freq_mtl(ratio, hw->config); - else - return PLL_RATIO_TO_FREQ(ratio); -} - -static u32 pll_freq_get_mtl(struct ivpu_device *vdev) -{ - u32 pll_curr_ratio; - - pll_curr_ratio = REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL); - pll_curr_ratio &= VPU_HW_BTRS_MTL_CURRENT_PLL_RATIO_MASK; - - if (!ivpu_is_silicon(vdev)) - return PLL_SIMULATION_FREQ; - - return pll_ratio_to_freq_mtl(pll_curr_ratio, vdev->hw->config); -} - -static u32 pll_freq_get_lnl(struct ivpu_device *vdev) -{ - u32 pll_curr_ratio; - - pll_curr_ratio = REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ); - pll_curr_ratio &= VPU_HW_BTRS_LNL_PLL_FREQ_RATIO_MASK; - - return PLL_RATIO_TO_FREQ(pll_curr_ratio); -} - -u32 ivpu_hw_btrs_pll_freq_get(struct ivpu_device *vdev) -{ - if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) - return pll_freq_get_mtl(vdev); - else - return pll_freq_get_lnl(vdev); -} - u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev) { if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.h b/drivers/accel/ivpu/ivpu_hw_btrs.h index 1fd71b4d4ab0..d2d82651976d 100644 --- a/drivers/accel/ivpu/ivpu_hw_btrs.h +++ b/drivers/accel/ivpu/ivpu_hw_btrs.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2020-2024 Intel Corporation + * Copyright (C) 2020-2025 Intel Corporation */ #ifndef __IVPU_HW_BTRS_H__ @@ -13,9 +13,8 @@ #define PLL_PROFILING_FREQ_DEFAULT 38400000 #define PLL_PROFILING_FREQ_HIGH 400000000 -#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ) -#define DCT_DEFAULT_ACTIVE_PERCENT 15u +#define DCT_DEFAULT_ACTIVE_PERCENT 30u #define DCT_PERIOD_US 35300u int ivpu_hw_btrs_info_init(struct ivpu_device *vdev); @@ -32,12 +31,12 @@ int ivpu_hw_btrs_ip_reset(struct ivpu_device *vdev); void ivpu_hw_btrs_profiling_freq_reg_set_lnl(struct ivpu_device *vdev); void ivpu_hw_btrs_ats_print_lnl(struct ivpu_device *vdev); void ivpu_hw_btrs_clock_relinquish_disable_lnl(struct ivpu_device *vdev); +u32 ivpu_hw_btrs_dpu_max_freq_get(struct ivpu_device *vdev); +u32 ivpu_hw_btrs_dpu_freq_get(struct ivpu_device *vdev); bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq); bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq); int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable); void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 dct_percent); -u32 ivpu_hw_btrs_pll_freq_get(struct ivpu_device *vdev); -u32 ivpu_hw_btrs_ratio_to_freq(struct ivpu_device *vdev, u32 ratio); u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev); u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev); u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev); diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c index 0e096fd9b95d..39f83225c181 100644 --- a/drivers/accel/ivpu/ivpu_ipc.c +++ b/drivers/accel/ivpu/ivpu_ipc.c @@ -302,7 +302,8 @@ ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req struct ivpu_ipc_consumer cons; int ret; - drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev)); + drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev) && + pm_runtime_enabled(vdev->drm.dev)); ivpu_ipc_consumer_add(vdev, &cons, channel, NULL); diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c index 004059e4f1e8..b28da35c30b6 100644 --- a/drivers/accel/ivpu/ivpu_job.c +++ b/drivers/accel/ivpu/ivpu_job.c @@ -470,8 +470,8 @@ static void ivpu_job_destroy(struct ivpu_job *job) struct ivpu_device *vdev = job->vdev; u32 i; - ivpu_dbg(vdev, JOB, "Job destroyed: id %3u ctx %2d engine %d", - job->job_id, job->file_priv->ctx.id, job->engine_idx); + ivpu_dbg(vdev, JOB, "Job destroyed: id %3u ctx %2d cmdq_id %u engine %d", + job->job_id, job->file_priv->ctx.id, job->cmdq_id, job->engine_idx); for (i = 0; i < job->bo_count; i++) if (job->bos[i]) @@ -564,8 +564,8 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 dma_fence_signal(job->done_fence); trace_job("done", job); - ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d engine %d status 0x%x\n", - job->job_id, job->file_priv->ctx.id, job->engine_idx, job_status); + ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d cmdq_id %u engine %d status 0x%x\n", + job->job_id, job->file_priv->ctx.id, job->cmdq_id, job->engine_idx, job_status); ivpu_job_destroy(job); ivpu_stop_job_timeout_detection(vdev); @@ -664,8 +664,8 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id) } trace_job("submit", job); - ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d prio %d addr 0x%llx next %d\n", - job->job_id, file_priv->ctx.id, job->engine_idx, cmdq->priority, + ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d cmdq_id %u engine %d prio %d addr 0x%llx next %d\n", + job->job_id, file_priv->ctx.id, cmdq->id, job->engine_idx, cmdq->priority, job->cmd_buf_vpu_addr, cmdq->jobq->header.tail); mutex_unlock(&file_priv->lock); @@ -681,8 +681,8 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id) err_erase_xa: xa_erase(&vdev->submitted_jobs_xa, job->job_id); err_unlock: - mutex_unlock(&vdev->submitted_jobs_lock); mutex_unlock(&file_priv->lock); + mutex_unlock(&vdev->submitted_jobs_lock); ivpu_rpm_put(vdev); return ret; } @@ -777,7 +777,8 @@ static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv, goto err_free_handles; } - ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n", file_priv->ctx.id, buffer_count); + ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u cmdq_id %u buf_count %u\n", + file_priv->ctx.id, cmdq_id, buffer_count); job = ivpu_job_create(file_priv, engine, buffer_count); if (!job) { @@ -873,15 +874,21 @@ int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file * int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct ivpu_file_priv *file_priv = file->driver_priv; + struct ivpu_device *vdev = file_priv->vdev; struct drm_ivpu_cmdq_create *args = data; struct ivpu_cmdq *cmdq; + int ret; - if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) + if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) return -ENODEV; if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) return -EINVAL; + ret = ivpu_rpm_get(vdev); + if (ret < 0) + return ret; + mutex_lock(&file_priv->lock); cmdq = ivpu_cmdq_create(file_priv, ivpu_job_to_jsm_priority(args->priority), false); @@ -890,6 +897,8 @@ int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file * mutex_unlock(&file_priv->lock); + ivpu_rpm_put(vdev); + return cmdq ? 0 : -ENOMEM; } @@ -899,28 +908,35 @@ int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file struct ivpu_device *vdev = file_priv->vdev; struct drm_ivpu_cmdq_destroy *args = data; struct ivpu_cmdq *cmdq; - u32 cmdq_id; + u32 cmdq_id = 0; int ret; if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) return -ENODEV; + ret = ivpu_rpm_get(vdev); + if (ret < 0) + return ret; + mutex_lock(&file_priv->lock); cmdq = xa_load(&file_priv->cmdq_xa, args->cmdq_id); if (!cmdq || cmdq->is_legacy) { ret = -ENOENT; - goto err_unlock; + } else { + cmdq_id = cmdq->id; + ivpu_cmdq_destroy(file_priv, cmdq); + ret = 0; } - cmdq_id = cmdq->id; - ivpu_cmdq_destroy(file_priv, cmdq); mutex_unlock(&file_priv->lock); - ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id); - return 0; -err_unlock: - mutex_unlock(&file_priv->lock); + /* Abort any pending jobs only if cmdq was destroyed */ + if (!ret) + ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id); + + ivpu_rpm_put(vdev); + return ret; } diff --git a/drivers/accel/ivpu/ivpu_ms.c b/drivers/accel/ivpu/ivpu_ms.c index ffe7b10f8a76..2a043baf10ca 100644 --- a/drivers/accel/ivpu/ivpu_ms.c +++ b/drivers/accel/ivpu/ivpu_ms.c @@ -4,6 +4,7 @@ */ #include <drm/drm_file.h> +#include <linux/pm_runtime.h> #include "ivpu_drv.h" #include "ivpu_gem.h" @@ -44,6 +45,10 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil args->sampling_period_ns < MS_MIN_SAMPLE_PERIOD_NS) return -EINVAL; + ret = ivpu_rpm_get(vdev); + if (ret < 0) + return ret; + mutex_lock(&file_priv->ms_lock); if (get_instance_by_mask(file_priv, args->metric_group_mask)) { @@ -96,6 +101,8 @@ err_free_ms: kfree(ms); unlock: mutex_unlock(&file_priv->ms_lock); + + ivpu_rpm_put(vdev); return ret; } @@ -160,6 +167,10 @@ int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file * if (!args->metric_group_mask) return -EINVAL; + ret = ivpu_rpm_get(vdev); + if (ret < 0) + return ret; + mutex_lock(&file_priv->ms_lock); ms = get_instance_by_mask(file_priv, args->metric_group_mask); @@ -187,6 +198,7 @@ int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file * unlock: mutex_unlock(&file_priv->ms_lock); + ivpu_rpm_put(vdev); return ret; } @@ -204,11 +216,17 @@ int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file { struct ivpu_file_priv *file_priv = file->driver_priv; struct drm_ivpu_metric_streamer_stop *args = data; + struct ivpu_device *vdev = file_priv->vdev; struct ivpu_ms_instance *ms; + int ret; if (!args->metric_group_mask) return -EINVAL; + ret = ivpu_rpm_get(vdev); + if (ret < 0) + return ret; + mutex_lock(&file_priv->ms_lock); ms = get_instance_by_mask(file_priv, args->metric_group_mask); @@ -217,6 +235,7 @@ int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file mutex_unlock(&file_priv->ms_lock); + ivpu_rpm_put(vdev); return ms ? 0 : -EINVAL; } @@ -281,6 +300,9 @@ unlock: void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv) { struct ivpu_ms_instance *ms, *tmp; + struct ivpu_device *vdev = file_priv->vdev; + + pm_runtime_get_sync(vdev->drm.dev); mutex_lock(&file_priv->ms_lock); @@ -293,6 +315,8 @@ void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv) free_instance(file_priv, ms); mutex_unlock(&file_priv->ms_lock); + + pm_runtime_put_autosuspend(vdev->drm.dev); } void ivpu_ms_cleanup_all(struct ivpu_device *vdev) diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c index b5891e91f7ab..ac0e22454596 100644 --- a/drivers/accel/ivpu/ivpu_pm.c +++ b/drivers/accel/ivpu/ivpu_pm.c @@ -428,16 +428,17 @@ int ivpu_pm_dct_enable(struct ivpu_device *vdev, u8 active_percent) active_us = (DCT_PERIOD_US * active_percent) / 100; inactive_us = DCT_PERIOD_US - active_us; + vdev->pm->dct_active_percent = active_percent; + + ivpu_dbg(vdev, PM, "DCT requested %u%% (D0: %uus, D0i2: %uus)\n", + active_percent, active_us, inactive_us); + ret = ivpu_jsm_dct_enable(vdev, active_us, inactive_us); if (ret) { ivpu_err_ratelimited(vdev, "Failed to enable DCT: %d\n", ret); return ret; } - vdev->pm->dct_active_percent = active_percent; - - ivpu_dbg(vdev, PM, "DCT set to %u%% (D0: %uus, D0i2: %uus)\n", - active_percent, active_us, inactive_us); return 0; } @@ -445,15 +446,16 @@ int ivpu_pm_dct_disable(struct ivpu_device *vdev) { int ret; + vdev->pm->dct_active_percent = 0; + + ivpu_dbg(vdev, PM, "DCT requested to be disabled\n"); + ret = ivpu_jsm_dct_disable(vdev); if (ret) { ivpu_err_ratelimited(vdev, "Failed to disable DCT: %d\n", ret); return ret; } - vdev->pm->dct_active_percent = 0; - - ivpu_dbg(vdev, PM, "DCT disabled\n"); return 0; } @@ -466,7 +468,7 @@ void ivpu_pm_irq_dct_work_fn(struct work_struct *work) if (ivpu_hw_btrs_dct_get_request(vdev, &enable)) return; - if (vdev->pm->dct_active_percent) + if (enable) ret = ivpu_pm_dct_enable(vdev, DCT_DEFAULT_ACTIVE_PERCENT); else ret = ivpu_pm_dct_disable(vdev); diff --git a/drivers/accel/ivpu/ivpu_sysfs.c b/drivers/accel/ivpu/ivpu_sysfs.c index 97102feaf8dd..268ab7744a8b 100644 --- a/drivers/accel/ivpu/ivpu_sysfs.c +++ b/drivers/accel/ivpu/ivpu_sysfs.c @@ -1,10 +1,12 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2024 Intel Corporation + * Copyright (C) 2024-2025 Intel Corporation */ #include <linux/device.h> #include <linux/err.h> +#include <linux/pm_runtime.h> +#include <linux/units.h> #include "ivpu_drv.h" #include "ivpu_gem.h" @@ -90,10 +92,55 @@ sched_mode_show(struct device *dev, struct device_attribute *attr, char *buf) static DEVICE_ATTR_RO(sched_mode); +/** + * DOC: npu_max_frequency + * + * The npu_max_frequency shows maximum frequency in MHz of the NPU's data + * processing unit + */ +static ssize_t +npu_max_frequency_mhz_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct ivpu_device *vdev = to_ivpu_device(drm); + u32 freq = ivpu_hw_dpu_max_freq_get(vdev); + + return sysfs_emit(buf, "%lu\n", freq / HZ_PER_MHZ); +} + +static DEVICE_ATTR_RO(npu_max_frequency_mhz); + +/** + * DOC: npu_current_frequency_mhz + * + * The npu_current_frequency_mhz shows current frequency in MHz of the NPU's + * data processing unit + */ +static ssize_t +npu_current_frequency_mhz_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct ivpu_device *vdev = to_ivpu_device(drm); + u32 freq = 0; + + /* Read frequency only if device is active, otherwise frequency is 0 */ + if (pm_runtime_get_if_active(vdev->drm.dev) > 0) { + freq = ivpu_hw_dpu_freq_get(vdev); + + pm_runtime_put_autosuspend(vdev->drm.dev); + } + + return sysfs_emit(buf, "%lu\n", freq / HZ_PER_MHZ); +} + +static DEVICE_ATTR_RO(npu_current_frequency_mhz); + static struct attribute *ivpu_dev_attrs[] = { &dev_attr_npu_busy_time_us.attr, &dev_attr_npu_memory_utilization.attr, &dev_attr_sched_mode.attr, + &dev_attr_npu_max_frequency_mhz.attr, + &dev_attr_npu_current_frequency_mhz.attr, NULL, }; diff --git a/drivers/accel/ivpu/vpu_boot_api.h b/drivers/accel/ivpu/vpu_boot_api.h index 908e68ea1c39..218468bbbcad 100644 --- a/drivers/accel/ivpu/vpu_boot_api.h +++ b/drivers/accel/ivpu/vpu_boot_api.h @@ -26,7 +26,7 @@ * Minor version changes when API backward compatibility is preserved. * Resets to 0 if Major version is incremented. */ -#define VPU_BOOT_API_VER_MINOR 26 +#define VPU_BOOT_API_VER_MINOR 28 /* * API header changed (field names, documentation, formatting) but API itself has not been changed @@ -76,8 +76,15 @@ struct vpu_firmware_header { * submission queue size and device capabilities. */ u32 preemption_buffer_2_size; + /* + * Maximum preemption buffer size that the FW can use: no need for the host + * driver to allocate more space than that specified by these fields. + * A value of 0 means no declared limit. + */ + u32 preemption_buffer_1_max_size; + u32 preemption_buffer_2_max_size; /* Space reserved for future preemption-related fields. */ - u32 preemption_reserved[6]; + u32 preemption_reserved[4]; /* FW image read only section start address, 4KB aligned */ u64 ro_section_start_address; /* FW image read only section size, 4KB aligned */ @@ -134,7 +141,7 @@ enum vpu_trace_destination { /* * Processor bit shifts (for loggable HW components). */ -#define VPU_TRACE_PROC_BIT_ARM 0 +#define VPU_TRACE_PROC_BIT_RESERVED 0 #define VPU_TRACE_PROC_BIT_LRT 1 #define VPU_TRACE_PROC_BIT_LNN 2 #define VPU_TRACE_PROC_BIT_SHV_0 3 diff --git a/drivers/accel/ivpu/vpu_jsm_api.h b/drivers/accel/ivpu/vpu_jsm_api.h index 7215c144158c..4b6b2b3d2583 100644 --- a/drivers/accel/ivpu/vpu_jsm_api.h +++ b/drivers/accel/ivpu/vpu_jsm_api.h @@ -22,7 +22,7 @@ /* * Minor version changes when API backward compatibility is preserved. */ -#define VPU_JSM_API_VER_MINOR 25 +#define VPU_JSM_API_VER_MINOR 29 /* * API header changed (field names, documentation, formatting) but API itself has not been changed @@ -53,8 +53,7 @@ * Engine indexes. */ #define VPU_ENGINE_COMPUTE 0 -#define VPU_ENGINE_COPY 1 -#define VPU_ENGINE_NB 2 +#define VPU_ENGINE_NB 1 /* * VPU status values. @@ -126,11 +125,13 @@ enum { * When set, indicates that job queue uses native fences (as inline commands * in job queue). Such queues may also use legacy fences (as commands in batch buffers). * When cleared, indicates the job queue only uses legacy fences. - * NOTE: For queues using native fences, VPU expects that all jobs in the queue - * are immediately followed by an inline command object. This object is expected - * to be a fence signal command in most cases, but can also be a NOP in case the host - * does not need per-job fence signalling. Other inline commands objects can be - * inserted between "job and inline command" pairs. + * NOTES: + * 1. For queues using native fences, VPU expects that all jobs in the queue + * are immediately followed by an inline command object. This object is expected + * to be a fence signal command in most cases, but can also be a NOP in case the host + * does not need per-job fence signalling. Other inline commands objects can be + * inserted between "job and inline command" pairs. + * 2. Native fence queues are only supported on VPU 40xx onwards. */ VPU_JOB_QUEUE_FLAGS_USE_NATIVE_FENCE_MASK = (1 << 1U), @@ -275,6 +276,8 @@ struct vpu_inline_cmd { u64 value; /* User VA of the log buffer in which to add log entry on completion. */ u64 log_buffer_va; + /* NPU private data. */ + u64 npu_private_data; } fence; /* Other commands do not have a payload. */ /* Payload definition for future inline commands can be inserted here. */ @@ -791,12 +794,22 @@ struct vpu_jsm_metric_streamer_update { /** Metric group mask that identifies metric streamer instance. */ u64 metric_group_mask; /** - * Address and size of the buffer where the VPU will write metric data. If - * the buffer address is 0 or same as the currently used buffer the VPU will - * continue writing metric data to the current buffer. In this case the - * buffer size is ignored and the size of the current buffer is unchanged. - * If the address is non-zero and differs from the current buffer address the - * VPU will immediately switch data collection to the new buffer. + * Address and size of the buffer where the VPU will write metric data. + * This member dictates how the update operation should perform: + * 1. client needs information about the number of collected samples and the + * amount of data written to the current buffer + * 2. client wants to switch to a new buffer + * + * Case 1. is identified by the buffer address being 0 or the same as the + * currently used buffer address. In this case the buffer size is ignored and + * the size of the current buffer is unchanged. The VPU will return an update + * in the vpu_jsm_metric_streamer_done structure. The internal writing position + * into the buffer is not changed. + * + * Case 2. is identified by the address being non-zero and differs from the + * current buffer address. The VPU will immediately switch data collection to + * the new buffer. Then the VPU will return an update in the + * vpu_jsm_metric_streamer_done structure. */ u64 buffer_addr; u64 buffer_size; @@ -934,6 +947,7 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup { /* * Default quantum in 100ns units for scheduling across processes * within a priority band + * Minimum value supported by NPU is 1ms (10000 in 100ns units). */ u32 process_quantum[VPU_HWS_NUM_PRIORITY_BANDS]; /* @@ -946,8 +960,10 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup { * in situations when it's starved by the focus band. */ u32 normal_band_percentage; - /* Reserved */ - u32 reserved_0; + /* + * TDR timeout value in milliseconds. Default value of 0 meaning no timeout. + */ + u32 tdr_timeout; }; /* @@ -1024,7 +1040,10 @@ struct vpu_ipc_msg_payload_hws_set_context_sched_properties { s32 in_process_priority; /* Zero padding / Reserved */ u32 reserved_1; - /* Context quantum relative to other contexts of same priority in the same process */ + /* + * Context quantum relative to other contexts of same priority in the same process + * Minimum value supported by NPU is 1ms (10000 in 100ns units). + */ u64 context_quantum; /* Grace period when preempting context of the same priority within the same process */ u64 grace_period_same_priority; diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c index f7fb7205028d..f6b9562779de 100644 --- a/drivers/acpi/acpi_extlog.c +++ b/drivers/acpi/acpi_extlog.c @@ -15,6 +15,7 @@ #include <acpi/ghes.h> #include <asm/cpu.h> #include <asm/mce.h> +#include <asm/msr.h> #include "apei/apei-internal.h" #include <ras/ras_event.h> @@ -234,7 +235,7 @@ static int __init extlog_init(void) u64 cap; int rc; - if (rdmsrl_safe(MSR_IA32_MCG_CAP, &cap) || + if (rdmsrq_safe(MSR_IA32_MCG_CAP, &cap) || !(cap & MCG_ELOG_P) || !extlog_get_l1addr()) return -ENODEV; diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c index 794962c5c88e..b8d98b1b48ae 100644 --- a/drivers/acpi/acpi_lpit.c +++ b/drivers/acpi/acpi_lpit.c @@ -39,7 +39,7 @@ static int lpit_read_residency_counter_us(u64 *counter, bool io_mem) return 0; } - err = rdmsrl_safe(residency_info_ffh.gaddr.address, counter); + err = rdmsrq_safe(residency_info_ffh.gaddr.address, counter); if (!err) { u64 mask = GENMASK_ULL(residency_info_ffh.gaddr.bit_offset + residency_info_ffh.gaddr. bit_width - 1, diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index 3fde4496f8a2..6f8bbe1247a5 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -19,7 +19,7 @@ #include <linux/acpi.h> #include <linux/perf_event.h> #include <linux/platform_device.h> -#include <asm/cpuid.h> +#include <asm/cpuid/api.h> #include <asm/mwait.h> #include <xen/xen.h> diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 6f4fe47c955b..6481c48c22bb 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -293,7 +293,7 @@ acpi_status (*acpi_internal_method) (struct acpi_walk_state * walk_state); * expected_return_btypes - Allowed type(s) for the return value */ struct acpi_name_info { - char name[ACPI_NAMESEG_SIZE]; + char name[ACPI_NAMESEG_SIZE] __nonstring; u16 argument_list; u8 expected_btypes; }; @@ -370,7 +370,7 @@ typedef acpi_status (*acpi_object_converter) (struct acpi_namespace_node * converted_object); struct acpi_simple_repair_info { - char name[ACPI_NAMESEG_SIZE]; + char name[ACPI_NAMESEG_SIZE] __nonstring; u32 unexpected_btypes; u32 package_index; acpi_object_converter object_converter; diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c index 1bb7b71f07f1..330b5e4711da 100644 --- a/drivers/acpi/acpica/nsrepair2.c +++ b/drivers/acpi/acpica/nsrepair2.c @@ -25,7 +25,7 @@ acpi_status (*acpi_repair_function) (struct acpi_evaluate_info * info, return_object_ptr); typedef struct acpi_repair_info { - char name[ACPI_NAMESEG_SIZE]; + char name[ACPI_NAMESEG_SIZE] __nonstring; acpi_repair_function repair_function; } acpi_repair_info; diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 90b09840536d..0a7026040188 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -458,7 +458,7 @@ static void acpi_button_notify(acpi_handle handle, u32 event, void *data) acpi_pm_wakeup_event(&device->dev); button = acpi_driver_data(device); - if (button->suspended) + if (button->suspended || event == ACPI_BUTTON_NOTIFY_WAKE) return; input = button->input; diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 8db09d81918f..3c5f34892734 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -2301,6 +2301,34 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = { DMI_MATCH(DMI_PRODUCT_FAMILY, "103C_5336AN HP ZHAN 66 Pro"), }, }, + /* + * Lenovo Legion Go S; touchscreen blocks HW sleep when woken up from EC + * https://gitlab.freedesktop.org/drm/amd/-/issues/3929 + */ + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83L3"), + } + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83N6"), + } + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83Q2"), + } + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"), + } + }, { }, }; diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index a35dd0e41c27..54676e3d82dd 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -229,18 +229,20 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr, node_entry = ACPI_PTR_DIFF(node, table_hdr); entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, sizeof(struct acpi_table_pptt)); - proc_sz = sizeof(struct acpi_pptt_processor *); + proc_sz = sizeof(struct acpi_pptt_processor); - while ((unsigned long)entry + proc_sz < table_end) { + /* ignore subtable types that are smaller than a processor node */ + while ((unsigned long)entry + proc_sz <= table_end) { cpu_node = (struct acpi_pptt_processor *)entry; + if (entry->type == ACPI_PPTT_TYPE_PROCESSOR && cpu_node->parent == node_entry) return 0; if (entry->length == 0) return 0; + entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry, entry->length); - } return 1; } @@ -270,18 +272,21 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he table_end = (unsigned long)table_hdr + table_hdr->length; entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, sizeof(struct acpi_table_pptt)); - proc_sz = sizeof(struct acpi_pptt_processor *); + proc_sz = sizeof(struct acpi_pptt_processor); /* find the processor structure associated with this cpuid */ - while ((unsigned long)entry + proc_sz < table_end) { + while ((unsigned long)entry + proc_sz <= table_end) { cpu_node = (struct acpi_pptt_processor *)entry; if (entry->length == 0) { pr_warn("Invalid zero length subtable\n"); break; } + /* entry->length may not equal proc_sz, revalidate the processor structure length */ if (entry->type == ACPI_PPTT_TYPE_PROCESSOR && acpi_cpu_id == cpu_node->acpi_processor_id && + (unsigned long)entry + entry->length <= table_end && + entry->length == proc_sz + cpu_node->number_of_priv_resources * sizeof(u32) && acpi_pptt_leaf_node(table_hdr, cpu_node)) { return (struct acpi_pptt_processor *)entry; } diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 53996f1a2d80..64b8d1e19594 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -20,6 +20,7 @@ #include <acpi/processor.h> #ifdef CONFIG_X86 #include <asm/cpufeature.h> +#include <asm/msr.h> #endif #define ACPI_PROCESSOR_FILE_PERFORMANCE "performance" diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index 00d045e5f524..d1541a386fbc 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -18,9 +18,12 @@ #include <linux/sched.h> #include <linux/cpufreq.h> #include <linux/acpi.h> +#include <linux/uaccess.h> #include <acpi/processor.h> #include <asm/io.h> -#include <linux/uaccess.h> +#ifdef CONFIG_X86 +#include <asm/msr.h> +#endif /* ignore_tpc: * 0 -> acpi processor driver doesn't ignore _TPC values diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 76052006bd87..5fc2c8ee61b1 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -6373,7 +6373,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m, seq_printf(m, " node %d", buffer->target_node->debug_id); seq_printf(m, " size %zd:%zd offset %lx\n", buffer->data_size, buffer->offsets_size, - proc->alloc.vm_start - buffer->user_data); + buffer->user_data - proc->alloc.vm_start); } static void print_binder_work_ilocked(struct seq_file *m, diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c index 94c6446604fc..98da8c4eea59 100644 --- a/drivers/android/binderfs.c +++ b/drivers/android/binderfs.c @@ -187,7 +187,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode, inode_lock(d_inode(root)); /* look it up */ - dentry = lookup_one_len(name, root, name_len); + dentry = lookup_noperm(&QSTR(name), root); if (IS_ERR(dentry)) { inode_unlock(d_inode(root)); ret = PTR_ERR(dentry); @@ -487,7 +487,7 @@ static struct dentry *binderfs_create_dentry(struct dentry *parent, { struct dentry *dentry; - dentry = lookup_one_len(name, parent, strlen(name)); + dentry = lookup_noperm(&QSTR(name), parent); if (IS_ERR(dentry)) return dentry; diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c index ba300cc0a3a3..2e4463d3a356 100644 --- a/drivers/ata/libata-sata.c +++ b/drivers/ata/libata-sata.c @@ -1510,6 +1510,8 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link) unsigned int err_mask, tag; u8 *sense, sk = 0, asc = 0, ascq = 0; u64 sense_valid, val; + u16 extended_sense; + bool aux_icc_valid; int ret = 0; err_mask = ata_read_log_page(dev, ATA_LOG_SENSE_NCQ, 0, buf, 2); @@ -1529,6 +1531,8 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link) sense_valid = (u64)buf[8] | ((u64)buf[9] << 8) | ((u64)buf[10] << 16) | ((u64)buf[11] << 24); + extended_sense = get_unaligned_le16(&buf[14]); + aux_icc_valid = extended_sense & BIT(15); ata_qc_for_each_raw(ap, qc, tag) { if (!(qc->flags & ATA_QCFLAG_EH) || @@ -1556,6 +1560,17 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link) continue; } + qc->result_tf.nsect = sense[6]; + qc->result_tf.hob_nsect = sense[7]; + qc->result_tf.lbal = sense[8]; + qc->result_tf.lbam = sense[9]; + qc->result_tf.lbah = sense[10]; + qc->result_tf.hob_lbal = sense[11]; + qc->result_tf.hob_lbam = sense[12]; + qc->result_tf.hob_lbah = sense[13]; + if (aux_icc_valid) + qc->result_tf.auxiliary = get_unaligned_le32(&sense[16]); + /* Set sense without also setting scsicmd->result */ scsi_build_sense_buffer(dev->flags & ATA_DFLAG_D_SENSE, qc->scsicmd->sense_buffer, sk, diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 2796c0da8257..c0eb8c67a9ff 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2453,8 +2453,8 @@ static unsigned int ata_msense_control_ata_feature(struct ata_device *dev, */ put_unaligned_be16(ATA_FEATURE_SUB_MPAGE_LEN - 4, &buf[2]); - if (dev->flags & ATA_DFLAG_CDL) - buf[4] = 0x02; /* Support T2A and T2B pages */ + if (dev->flags & ATA_DFLAG_CDL_ENABLED) + buf[4] = 0x02; /* T2A and T2B pages enabled */ else buf[4] = 0; @@ -3886,12 +3886,11 @@ static int ata_mselect_control_spg0(struct ata_queued_cmd *qc, } /* - * Translate MODE SELECT control mode page, sub-pages f2h (ATA feature mode + * Translate MODE SELECT control mode page, sub-page f2h (ATA feature mode * page) into a SET FEATURES command. */ -static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc, - const u8 *buf, int len, - u16 *fp) +static int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc, + const u8 *buf, int len, u16 *fp) { struct ata_device *dev = qc->dev; struct ata_taskfile *tf = &qc->tf; @@ -3909,17 +3908,27 @@ static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc, /* Check cdl_ctrl */ switch (buf[0] & 0x03) { case 0: - /* Disable CDL */ + /* Disable CDL if it is enabled */ + if (!(dev->flags & ATA_DFLAG_CDL_ENABLED)) + return 0; + ata_dev_dbg(dev, "Disabling CDL\n"); cdl_action = 0; dev->flags &= ~ATA_DFLAG_CDL_ENABLED; break; case 0x02: - /* Enable CDL T2A/T2B: NCQ priority must be disabled */ + /* + * Enable CDL if not already enabled. Since this is mutually + * exclusive with NCQ priority, allow this only if NCQ priority + * is disabled. + */ + if (dev->flags & ATA_DFLAG_CDL_ENABLED) + return 0; if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) { ata_dev_err(dev, "NCQ priority must be disabled to enable CDL\n"); return -EINVAL; } + ata_dev_dbg(dev, "Enabling CDL\n"); cdl_action = 1; dev->flags |= ATA_DFLAG_CDL_ENABLED; break; diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c index 434f380114af..03dbaf4a13a7 100644 --- a/drivers/ata/pata_pxa.c +++ b/drivers/ata/pata_pxa.c @@ -223,10 +223,16 @@ static int pxa_ata_probe(struct platform_device *pdev) ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start, resource_size(cmd_res)); + if (!ap->ioaddr.cmd_addr) + return -ENOMEM; ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start, resource_size(ctl_res)); + if (!ap->ioaddr.ctl_addr) + return -ENOMEM; ap->ioaddr.bmdma_addr = devm_ioremap(&pdev->dev, dma_res->start, resource_size(dma_res)); + if (!ap->ioaddr.bmdma_addr) + return -ENOMEM; /* * Adjust register offsets diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c index a482741eb181..c3042eca6332 100644 --- a/drivers/ata/sata_sx4.c +++ b/drivers/ata/sata_sx4.c @@ -1117,9 +1117,14 @@ static int pdc20621_prog_dimm0(struct ata_host *host) mmio += PDC_CHIP0_OFS; for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++) - pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, - pdc_i2c_read_data[i].reg, - &spd0[pdc_i2c_read_data[i].ofs]); + if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, + pdc_i2c_read_data[i].reg, + &spd0[pdc_i2c_read_data[i].ofs])) { + dev_err(host->dev, + "Failed in i2c read at index %d: device=%#x, reg=%#x\n", + i, PDC_DIMM0_SPD_DEV_ADDRESS, pdc_i2c_read_data[i].reg); + return -EIO; + } data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4); data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) | @@ -1284,6 +1289,8 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host) /* Programming DIMM0 Module Control Register (index_CID0:80h) */ size = pdc20621_prog_dimm0(host); + if (size < 0) + return size; dev_dbg(host->dev, "Local DIMM Size = %dMB\n", size); /* Programming DIMM Module Global Control Register (index_CID0:88h) */ diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c index afa4df4c5a3f..95717d509ca9 100644 --- a/drivers/base/auxiliary.c +++ b/drivers/base/auxiliary.c @@ -156,6 +156,16 @@ * }, * .ops = my_custom_ops, * }; + * + * Please note that such custom ops approach is valid, but it is hard to implement + * it right without global locks per-device to protect from auxiliary_drv removal + * during call to that ops. In addition, this implementation lacks proper module + * dependency, which causes to load/unload races between auxiliary parent and devices + * modules. + * + * The most easiest way to provide these ops reliably without needing to + * have a lock is to EXPORT_SYMBOL*() them and rely on already existing + * modules infrastructure for validity and correct dependencies chains. */ static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id, diff --git a/drivers/base/base.h b/drivers/base/base.h index 0042e4774b0c..123031a757d9 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -73,6 +73,7 @@ static inline void subsys_put(struct subsys_private *sp) kset_put(&sp->subsys); } +struct subsys_private *bus_to_subsys(const struct bus_type *bus); struct subsys_private *class_to_subsys(const struct class *class); struct driver_private { @@ -180,6 +181,22 @@ int driver_add_groups(const struct device_driver *drv, const struct attribute_gr void driver_remove_groups(const struct device_driver *drv, const struct attribute_group **groups); void device_driver_detach(struct device *dev); +static inline void device_set_driver(struct device *dev, const struct device_driver *drv) +{ + /* + * Majority (all?) read accesses to dev->driver happens either + * while holding device lock or in bus/driver code that is only + * invoked when the device is bound to a driver and there is no + * concern of the pointer being changed while it is being read. + * However when reading device's uevent file we read driver pointer + * without taking device lock (so we do not block there for + * arbitrary amount of time). We use WRITE_ONCE() here to prevent + * tearing so that READ_ONCE() can safely be used in uevent code. + */ + // FIXME - this cast should not be needed "soon" + WRITE_ONCE(dev->driver, (struct device_driver *)drv); +} + int devres_release_all(struct device *dev); void device_block_probing(void); void device_unblock_probing(void); diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 5ea3b03af9ba..5e75e1bce551 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -57,7 +57,7 @@ static int __must_check bus_rescan_devices_helper(struct device *dev, * NULL. A call to subsys_put() must be done when finished with the pointer in * order for it to be properly freed. */ -static struct subsys_private *bus_to_subsys(const struct bus_type *bus) +struct subsys_private *bus_to_subsys(const struct bus_type *bus) { struct subsys_private *sp = NULL; struct kobject *kobj; diff --git a/drivers/base/core.c b/drivers/base/core.c index d2f9d3a59d6b..cbc0099d8ef2 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -2624,6 +2624,35 @@ static const char *dev_uevent_name(const struct kobject *kobj) return NULL; } +/* + * Try filling "DRIVER=<name>" uevent variable for a device. Because this + * function may race with binding and unbinding the device from a driver, + * we need to be careful. Binding is generally safe, at worst we miss the + * fact that the device is already bound to a driver (but the driver + * information that is delivered through uevents is best-effort, it may + * become obsolete as soon as it is generated anyways). Unbinding is more + * risky as driver pointer is transitioning to NULL, so READ_ONCE() should + * be used to make sure we are dealing with the same pointer, and to + * ensure that driver structure is not going to disappear from under us + * we take bus' drivers klist lock. The assumption that only registered + * driver can be bound to a device, and to unregister a driver bus code + * will take the same lock. + */ +static void dev_driver_uevent(const struct device *dev, struct kobj_uevent_env *env) +{ + struct subsys_private *sp = bus_to_subsys(dev->bus); + + if (sp) { + scoped_guard(spinlock, &sp->klist_drivers.k_lock) { + struct device_driver *drv = READ_ONCE(dev->driver); + if (drv) + add_uevent_var(env, "DRIVER=%s", drv->name); + } + + subsys_put(sp); + } +} + static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) { const struct device *dev = kobj_to_dev(kobj); @@ -2655,8 +2684,8 @@ static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) if (dev->type && dev->type->name) add_uevent_var(env, "DEVTYPE=%s", dev->type->name); - if (dev->driver) - add_uevent_var(env, "DRIVER=%s", dev->driver->name); + /* Add "DRIVER=%s" variable if the device is bound to a driver */ + dev_driver_uevent(dev, env); /* Add common DT information about the device */ of_device_uevent(dev, env); @@ -2726,11 +2755,8 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, if (!env) return -ENOMEM; - /* Synchronize with really_probe() */ - device_lock(dev); /* let the kset specific function add its keys */ retval = kset->uevent_ops->uevent(&dev->kobj, env); - device_unlock(dev); if (retval) goto out; @@ -3700,7 +3726,7 @@ done: device_pm_remove(dev); dpm_sysfs_remove(dev); DPMError: - dev->driver = NULL; + device_set_driver(dev, NULL); bus_remove_device(dev); BusError: device_remove_attrs(dev); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index a7e511849875..7779ab0ca7ce 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -600,6 +600,8 @@ CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow); CPU_SHOW_VULN_FALLBACK(gds); CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling); CPU_SHOW_VULN_FALLBACK(ghostwrite); +CPU_SHOW_VULN_FALLBACK(old_microcode); +CPU_SHOW_VULN_FALLBACK(indirect_target_selection); static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); @@ -616,6 +618,8 @@ static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NU static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL); static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL); static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL); +static DEVICE_ATTR(old_microcode, 0444, cpu_show_old_microcode, NULL); +static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -633,6 +637,8 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_gather_data_sampling.attr, &dev_attr_reg_file_data_sampling.attr, &dev_attr_ghostwrite.attr, + &dev_attr_old_microcode.attr, + &dev_attr_indirect_target_selection.attr, NULL }; diff --git a/drivers/base/dd.c b/drivers/base/dd.c index f0e4b4aba885..b526e0e0f52d 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -550,7 +550,7 @@ static void device_unbind_cleanup(struct device *dev) arch_teardown_dma_ops(dev); kfree(dev->dma_range_map); dev->dma_range_map = NULL; - dev->driver = NULL; + device_set_driver(dev, NULL); dev_set_drvdata(dev, NULL); if (dev->pm_domain && dev->pm_domain->dismiss) dev->pm_domain->dismiss(dev); @@ -629,8 +629,7 @@ static int really_probe(struct device *dev, const struct device_driver *drv) } re_probe: - // FIXME - this cast should not be needed "soon" - dev->driver = (struct device_driver *)drv; + device_set_driver(dev, drv); /* If using pinctrl, bind pins now before probing */ ret = pinctrl_bind_pins(dev); @@ -1014,7 +1013,7 @@ static int __device_attach(struct device *dev, bool allow_async) if (ret == 0) ret = 1; else { - dev->driver = NULL; + device_set_driver(dev, NULL); ret = 0; } } else { diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index 6dd1a8860f1c..31bfb3194b4c 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -296,7 +296,7 @@ static int delete_path(const char *nodepath) return err; } -static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *stat) +static int dev_mynode(struct device *dev, struct inode *inode) { /* did we create it */ if (inode->i_private != &thread) @@ -304,13 +304,13 @@ static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *sta /* does the dev_t match */ if (is_blockdev(dev)) { - if (!S_ISBLK(stat->mode)) + if (!S_ISBLK(inode->i_mode)) return 0; } else { - if (!S_ISCHR(stat->mode)) + if (!S_ISCHR(inode->i_mode)) return 0; } - if (stat->rdev != dev->devt) + if (inode->i_rdev != dev->devt) return 0; /* ours */ @@ -321,20 +321,16 @@ static int handle_remove(const char *nodename, struct device *dev) { struct path parent; struct dentry *dentry; - struct kstat stat; - struct path p; + struct inode *inode; int deleted = 0; - int err; + int err = 0; dentry = kern_path_locked(nodename, &parent); if (IS_ERR(dentry)) return PTR_ERR(dentry); - p.mnt = parent.mnt; - p.dentry = dentry; - err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE, - AT_STATX_SYNC_AS_STAT); - if (!err && dev_mynode(dev, d_inode(dentry), &stat)) { + inode = d_inode(dentry); + if (dev_mynode(dev, inode)) { struct iattr newattrs; /* * before unlinking this node, reset permissions @@ -342,7 +338,7 @@ static int handle_remove(const char *nodename, struct device *dev) */ newattrs.ia_uid = GLOBAL_ROOT_UID; newattrs.ia_gid = GLOBAL_ROOT_GID; - newattrs.ia_mode = stat.mode & ~0777; + newattrs.ia_mode = inode->i_mode & ~0777; newattrs.ia_valid = ATTR_UID|ATTR_GID|ATTR_MODE; inode_lock(d_inode(dentry)); diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 8f3a41d9bfaa..19469e7f88c2 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -816,21 +816,6 @@ static int add_memory_block(unsigned long block_id, unsigned long state, return 0; } -static int __init add_boot_memory_block(unsigned long base_section_nr) -{ - unsigned long nr; - - for_each_present_section_nr(base_section_nr, nr) { - if (nr >= (base_section_nr + sections_per_block)) - break; - - return add_memory_block(memory_block_id(base_section_nr), - MEM_ONLINE, NULL, NULL); - } - - return 0; -} - static int add_hotplug_memory_block(unsigned long block_id, struct vmem_altmap *altmap, struct memory_group *group) @@ -957,7 +942,7 @@ static const struct attribute_group *memory_root_attr_groups[] = { void __init memory_dev_init(void) { int ret; - unsigned long block_sz, nr; + unsigned long block_sz, block_id, nr; /* Validate the configured memory block size */ block_sz = memory_block_size_bytes(); @@ -970,15 +955,23 @@ void __init memory_dev_init(void) panic("%s() failed to register subsystem: %d\n", __func__, ret); /* - * Create entries for memory sections that were found - * during boot and have been initialized + * Create entries for memory sections that were found during boot + * and have been initialized. Use @block_id to track the last + * handled block and initialize it to an invalid value (ULONG_MAX) + * to bypass the block ID matching check for the first present + * block so that it can be covered. */ - for (nr = 0; nr <= __highest_present_section_nr; - nr += sections_per_block) { - ret = add_boot_memory_block(nr); - if (ret) - panic("%s() failed to add memory block: %d\n", __func__, - ret); + block_id = ULONG_MAX; + for_each_present_section_nr(0, nr) { + if (block_id != ULONG_MAX && memory_block_id(nr) == block_id) + continue; + + block_id = memory_block_id(nr); + ret = add_memory_block(block_id, MEM_ONLINE, NULL, NULL); + if (ret) { + panic("%s() failed to add memory block: %d\n", + __func__, ret); + } } } diff --git a/drivers/base/module.c b/drivers/base/module.c index 5bc71bea883a..218aaa096455 100644 --- a/drivers/base/module.c +++ b/drivers/base/module.c @@ -42,16 +42,13 @@ int module_add_driver(struct module *mod, const struct device_driver *drv) if (mod) mk = &mod->mkobj; else if (drv->mod_name) { - struct kobject *mkobj; - - /* Lookup built-in module entry in /sys/modules */ - mkobj = kset_find_obj(module_kset, drv->mod_name); - if (mkobj) { - mk = container_of(mkobj, struct module_kobject, kobj); + /* Lookup or create built-in module entry in /sys/modules */ + mk = lookup_or_create_module_kobject(drv->mod_name); + if (mk) { /* remember our module structure */ drv->p->mkobj = mk; - /* kset_find_obj took a reference */ - kobject_put(mkobj); + /* lookup_or_create_module_kobject took a reference */ + kobject_put(&mk->kobj); } } diff --git a/drivers/base/node.c b/drivers/base/node.c index cd13ef287011..618712071a1e 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -468,7 +468,7 @@ static ssize_t node_read_meminfo(struct device *dev, nid, K(node_page_state(pgdat, NR_PAGETABLE)), nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)), nid, 0UL, - nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)), + nid, 0UL, nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), nid, K(sreclaimable + node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)), diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 1813cfd0c4bd..cfccf3ff36e7 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -1440,7 +1440,7 @@ static void platform_shutdown(struct device *_dev) static int platform_dma_configure(struct device *dev) { - struct platform_driver *drv = to_platform_driver(dev->driver); + struct device_driver *drv = READ_ONCE(dev->driver); struct fwnode_handle *fwnode = dev_fwnode(dev); enum dev_dma_attr attr; int ret = 0; @@ -1451,8 +1451,8 @@ static int platform_dma_configure(struct device *dev) attr = acpi_get_dma_attr(to_acpi_device_node(fwnode)); ret = acpi_dma_configure(dev, attr); } - /* @drv may not be valid when we're called from the IOMMU layer */ - if (ret || !dev->driver || drv->driver_managed_dma) + /* @dev->driver may not be valid when we're called from the IOMMU layer */ + if (ret || !drv || to_platform_driver(drv)->driver_managed_dma) return ret; ret = iommu_device_use_default_domain(dev); diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c index b1726a3515f6..5c78fa6ae772 100644 --- a/drivers/base/swnode.c +++ b/drivers/base/swnode.c @@ -1080,6 +1080,7 @@ void software_node_notify(struct device *dev) if (!swnode) return; + kobject_get(&swnode->kobj); ret = sysfs_create_link(&dev->kobj, &swnode->kobj, "software_node"); if (ret) return; @@ -1089,8 +1090,6 @@ void software_node_notify(struct device *dev) sysfs_remove_link(&dev->kobj, "software_node"); return; } - - kobject_get(&swnode->kobj); } void software_node_notify_remove(struct device *dev) diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index a97f2c40c640..0f70e2374e7f 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -367,7 +367,7 @@ config BLK_DEV_RBD tristate "Rados block device (RBD)" depends on INET && BLOCK select CEPH_LIB - select LIBCRC32C + select CRC32 select CRYPTO_AES select CRYPTO help @@ -388,12 +388,6 @@ config BLK_DEV_UBLK definition isn't finalized yet, and might change according to future requirement, so mark is as experimental now. - Say Y if you want to get better performance because task_work_add() - can be used in IO path for replacing io_uring cmd, which will become - shared between IO tasks and ubq daemon, meantime task_work_add() can - can handle batch more effectively, but task_work_add() isn't exported - for module, so ublk has to be built to kernel. - config BLKDEV_UBLK_LEGACY_OPCODES bool "Support legacy command opcode" depends on BLK_DEV_UBLK @@ -413,4 +407,23 @@ config BLKDEV_UBLK_LEGACY_OPCODES source "drivers/block/rnbd/Kconfig" +config BLK_DEV_ZONED_LOOP + tristate "Zoned loopback device support" + depends on BLK_DEV_ZONED + help + Saying Y here will allow you to use create a zoned block device using + regular files for zones (one file per zones). This is useful to test + file systems, device mapper and applications that support zoned block + devices. To create a zoned loop device, no user utility is needed, a + zoned loop device can be created (or re-started) using a command + like: + + echo "add id=0,zone_size_mb=256,capacity_mb=16384,conv_zones=11" > \ + /dev/zloop-control + + See Documentation/admin-guide/blockdev/zoned_loop.rst for usage + details. + + If unsure, say N. + endif # BLK_DEV diff --git a/drivers/block/Makefile b/drivers/block/Makefile index 1105a2d4fdcb..097707aca725 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile @@ -41,5 +41,6 @@ obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/ obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk/ obj-$(CONFIG_BLK_DEV_UBLK) += ublk_drv.o +obj-$(CONFIG_BLK_DEV_ZONED_LOOP) += zloop.o swim_mod-y := swim.o swim_asm.o diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 292f127cae0a..b1be6c510372 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -54,32 +54,33 @@ static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) /* * Insert a new page for a given sector, if one does not already exist. */ -static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp) +static struct page *brd_insert_page(struct brd_device *brd, sector_t sector, + blk_opf_t opf) + __releases(rcu) + __acquires(rcu) { - pgoff_t idx = sector >> PAGE_SECTORS_SHIFT; - struct page *page; - int ret = 0; - - page = brd_lookup_page(brd, sector); - if (page) - return 0; + gfp_t gfp = (opf & REQ_NOWAIT) ? GFP_NOWAIT : GFP_NOIO; + struct page *page, *ret; + rcu_read_unlock(); page = alloc_page(gfp | __GFP_ZERO | __GFP_HIGHMEM); + rcu_read_lock(); if (!page) - return -ENOMEM; + return ERR_PTR(-ENOMEM); xa_lock(&brd->brd_pages); - ret = __xa_insert(&brd->brd_pages, idx, page, gfp); - if (!ret) - brd->brd_nr_pages++; - xa_unlock(&brd->brd_pages); - - if (ret < 0) { + ret = __xa_cmpxchg(&brd->brd_pages, sector >> PAGE_SECTORS_SHIFT, NULL, + page, gfp); + if (ret) { + xa_unlock(&brd->brd_pages); __free_page(page); - if (ret == -EBUSY) - ret = 0; + if (xa_is_err(ret)) + return ERR_PTR(xa_err(ret)); + return ret; } - return ret; + brd->brd_nr_pages++; + xa_unlock(&brd->brd_pages); + return page; } /* @@ -100,143 +101,77 @@ static void brd_free_pages(struct brd_device *brd) } /* - * copy_to_brd_setup must be called before copy_to_brd. It may sleep. + * Process a single segment. The segment is capped to not cross page boundaries + * in both the bio and the brd backing memory. */ -static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n, - gfp_t gfp) -{ - unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; - size_t copy; - int ret; - - copy = min_t(size_t, n, PAGE_SIZE - offset); - ret = brd_insert_page(brd, sector, gfp); - if (ret) - return ret; - if (copy < n) { - sector += copy >> SECTOR_SHIFT; - ret = brd_insert_page(brd, sector, gfp); - } - return ret; -} - -/* - * Copy n bytes from src to the brd starting at sector. Does not sleep. - */ -static void copy_to_brd(struct brd_device *brd, const void *src, - sector_t sector, size_t n) +static bool brd_rw_bvec(struct brd_device *brd, struct bio *bio) { + struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter); + sector_t sector = bio->bi_iter.bi_sector; + u32 offset = (sector & (PAGE_SECTORS - 1)) << SECTOR_SHIFT; + blk_opf_t opf = bio->bi_opf; struct page *page; - void *dst; - unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; - size_t copy; + void *kaddr; - copy = min_t(size_t, n, PAGE_SIZE - offset); - page = brd_lookup_page(brd, sector); - BUG_ON(!page); - - dst = kmap_atomic(page); - memcpy(dst + offset, src, copy); - kunmap_atomic(dst); - - if (copy < n) { - src += copy; - sector += copy >> SECTOR_SHIFT; - copy = n - copy; - page = brd_lookup_page(brd, sector); - BUG_ON(!page); - - dst = kmap_atomic(page); - memcpy(dst, src, copy); - kunmap_atomic(dst); - } -} + bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset); -/* - * Copy n bytes to dst from the brd starting at sector. Does not sleep. - */ -static void copy_from_brd(void *dst, struct brd_device *brd, - sector_t sector, size_t n) -{ - struct page *page; - void *src; - unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; - size_t copy; - - copy = min_t(size_t, n, PAGE_SIZE - offset); + rcu_read_lock(); page = brd_lookup_page(brd, sector); - if (page) { - src = kmap_atomic(page); - memcpy(dst, src + offset, copy); - kunmap_atomic(src); - } else - memset(dst, 0, copy); - - if (copy < n) { - dst += copy; - sector += copy >> SECTOR_SHIFT; - copy = n - copy; - page = brd_lookup_page(brd, sector); - if (page) { - src = kmap_atomic(page); - memcpy(dst, src, copy); - kunmap_atomic(src); - } else - memset(dst, 0, copy); + if (!page && op_is_write(opf)) { + page = brd_insert_page(brd, sector, opf); + if (IS_ERR(page)) + goto out_error; } -} - -/* - * Process a single bvec of a bio. - */ -static int brd_do_bvec(struct brd_device *brd, struct page *page, - unsigned int len, unsigned int off, blk_opf_t opf, - sector_t sector) -{ - void *mem; - int err = 0; + kaddr = bvec_kmap_local(&bv); if (op_is_write(opf)) { - /* - * Must use NOIO because we don't want to recurse back into the - * block or filesystem layers from page reclaim. - */ - gfp_t gfp = opf & REQ_NOWAIT ? GFP_NOWAIT : GFP_NOIO; - - err = copy_to_brd_setup(brd, sector, len, gfp); - if (err) - goto out; - } - - mem = kmap_atomic(page); - if (!op_is_write(opf)) { - copy_from_brd(mem + off, brd, sector, len); - flush_dcache_page(page); + memcpy_to_page(page, offset, kaddr, bv.bv_len); } else { - flush_dcache_page(page); - copy_to_brd(brd, mem + off, sector, len); + if (page) + memcpy_from_page(kaddr, page, offset, bv.bv_len); + else + memset(kaddr, 0, bv.bv_len); } - kunmap_atomic(mem); + kunmap_local(kaddr); + rcu_read_unlock(); + + bio_advance_iter_single(bio, &bio->bi_iter, bv.bv_len); + return true; + +out_error: + rcu_read_unlock(); + if (PTR_ERR(page) == -ENOMEM && (opf & REQ_NOWAIT)) + bio_wouldblock_error(bio); + else + bio_io_error(bio); + return false; +} -out: - return err; +static void brd_free_one_page(struct rcu_head *head) +{ + struct page *page = container_of(head, struct page, rcu_head); + + __free_page(page); } static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size) { - sector_t aligned_sector = (sector + PAGE_SECTORS) & ~PAGE_SECTORS; + sector_t aligned_sector = round_up(sector, PAGE_SECTORS); + sector_t aligned_end = round_down( + sector + (size >> SECTOR_SHIFT), PAGE_SECTORS); struct page *page; - size -= (aligned_sector - sector) * SECTOR_SIZE; + if (aligned_end <= aligned_sector) + return; + xa_lock(&brd->brd_pages); - while (size >= PAGE_SIZE && aligned_sector < rd_size * 2) { + while (aligned_sector < aligned_end && aligned_sector < rd_size * 2) { page = __xa_erase(&brd->brd_pages, aligned_sector >> PAGE_SECTORS_SHIFT); if (page) { - __free_page(page); + call_rcu(&page->rcu_head, brd_free_one_page); brd->brd_nr_pages--; } aligned_sector += PAGE_SECTORS; - size -= PAGE_SIZE; } xa_unlock(&brd->brd_pages); } @@ -244,36 +179,18 @@ static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size) static void brd_submit_bio(struct bio *bio) { struct brd_device *brd = bio->bi_bdev->bd_disk->private_data; - sector_t sector = bio->bi_iter.bi_sector; - struct bio_vec bvec; - struct bvec_iter iter; if (unlikely(op_is_discard(bio->bi_opf))) { - brd_do_discard(brd, sector, bio->bi_iter.bi_size); + brd_do_discard(brd, bio->bi_iter.bi_sector, + bio->bi_iter.bi_size); bio_endio(bio); return; } - bio_for_each_segment(bvec, bio, iter) { - unsigned int len = bvec.bv_len; - int err; - - /* Don't support un-aligned buffer */ - WARN_ON_ONCE((bvec.bv_offset & (SECTOR_SIZE - 1)) || - (len & (SECTOR_SIZE - 1))); - - err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset, - bio->bi_opf, sector); - if (err) { - if (err == -ENOMEM && bio->bi_opf & REQ_NOWAIT) { - bio_wouldblock_error(bio); - return; - } - bio_io_error(bio); + do { + if (!brd_rw_bvec(brd, bio)) return; - } - sector += len >> SECTOR_SHIFT; - } + } while (bio->bi_iter.bi_size); bio_endio(bio); } diff --git a/drivers/block/drbd/Kconfig b/drivers/block/drbd/Kconfig index 6fb4e38fca88..495a72da04c6 100644 --- a/drivers/block/drbd/Kconfig +++ b/drivers/block/drbd/Kconfig @@ -10,7 +10,7 @@ config BLK_DEV_DRBD tristate "DRBD Distributed Replicated Block Device support" depends on PROC_FS && INET select LRU_CACHE - select LIBCRC32C + select CRC32 help NOTE: In order to authenticate connections you have to select diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 674527d770dc..e2b1f377f585 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -211,72 +211,6 @@ static void loop_set_size(struct loop_device *lo, loff_t size) kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); } -static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) -{ - struct iov_iter i; - ssize_t bw; - - iov_iter_bvec(&i, ITER_SOURCE, bvec, 1, bvec->bv_len); - - bw = vfs_iter_write(file, &i, ppos, 0); - - if (likely(bw == bvec->bv_len)) - return 0; - - printk_ratelimited(KERN_ERR - "loop: Write error at byte offset %llu, length %i.\n", - (unsigned long long)*ppos, bvec->bv_len); - if (bw >= 0) - bw = -EIO; - return bw; -} - -static int lo_write_simple(struct loop_device *lo, struct request *rq, - loff_t pos) -{ - struct bio_vec bvec; - struct req_iterator iter; - int ret = 0; - - rq_for_each_segment(bvec, rq, iter) { - ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos); - if (ret < 0) - break; - cond_resched(); - } - - return ret; -} - -static int lo_read_simple(struct loop_device *lo, struct request *rq, - loff_t pos) -{ - struct bio_vec bvec; - struct req_iterator iter; - struct iov_iter i; - ssize_t len; - - rq_for_each_segment(bvec, rq, iter) { - iov_iter_bvec(&i, ITER_DEST, &bvec, 1, bvec.bv_len); - len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0); - if (len < 0) - return len; - - flush_dcache_page(bvec.bv_page); - - if (len != bvec.bv_len) { - struct bio *bio; - - __rq_for_each_bio(bio, rq) - zero_fill_bio(bio); - break; - } - cond_resched(); - } - - return 0; -} - static void loop_clear_limits(struct loop_device *lo, int mode) { struct queue_limits lim = queue_limits_start_update(lo->lo_queue); @@ -342,7 +276,7 @@ static void lo_complete_rq(struct request *rq) struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); blk_status_t ret = BLK_STS_OK; - if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) || + if (cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) || req_op(rq) != REQ_OP_READ) { if (cmd->ret < 0) ret = errno_to_blk_status(cmd->ret); @@ -358,14 +292,13 @@ static void lo_complete_rq(struct request *rq) cmd->ret = 0; blk_mq_requeue_request(rq, true); } else { - if (cmd->use_aio) { - struct bio *bio = rq->bio; + struct bio *bio = rq->bio; - while (bio) { - zero_fill_bio(bio); - bio = bio->bi_next; - } + while (bio) { + zero_fill_bio(bio); + bio = bio->bi_next; } + ret = BLK_STS_IOERR; end_io: blk_mq_end_request(rq, ret); @@ -445,9 +378,14 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, cmd->iocb.ki_pos = pos; cmd->iocb.ki_filp = file; - cmd->iocb.ki_complete = lo_rw_aio_complete; - cmd->iocb.ki_flags = IOCB_DIRECT; - cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); + cmd->iocb.ki_ioprio = req_get_ioprio(rq); + if (cmd->use_aio) { + cmd->iocb.ki_complete = lo_rw_aio_complete; + cmd->iocb.ki_flags = IOCB_DIRECT; + } else { + cmd->iocb.ki_complete = NULL; + cmd->iocb.ki_flags = 0; + } if (rw == ITER_SOURCE) ret = file->f_op->write_iter(&cmd->iocb, &iter); @@ -458,7 +396,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, if (ret != -EIOCBQUEUED) lo_rw_aio_complete(&cmd->iocb, ret); - return 0; + return -EIOCBQUEUED; } static int do_req_filebacked(struct loop_device *lo, struct request *rq) @@ -466,15 +404,6 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq) struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset; - /* - * lo_write_simple and lo_read_simple should have been covered - * by io submit style function like lo_rw_aio(), one blocker - * is that lo_read_simple() need to call flush_dcache_page after - * the page is written from kernel, and it isn't easy to handle - * this in io submit style function which submits all segments - * of the req at one time. And direct read IO doesn't need to - * run flush_dcache_page(). - */ switch (req_op(rq)) { case REQ_OP_FLUSH: return lo_req_flush(lo, rq); @@ -490,15 +419,9 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq) case REQ_OP_DISCARD: return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE); case REQ_OP_WRITE: - if (cmd->use_aio) - return lo_rw_aio(lo, cmd, pos, ITER_SOURCE); - else - return lo_write_simple(lo, rq, pos); + return lo_rw_aio(lo, cmd, pos, ITER_SOURCE); case REQ_OP_READ: - if (cmd->use_aio) - return lo_rw_aio(lo, cmd, pos, ITER_DEST); - else - return lo_read_simple(lo, rq, pos); + return lo_rw_aio(lo, cmd, pos, ITER_DEST); default: WARN_ON_ONCE(1); return -EIO; @@ -582,6 +505,17 @@ static void loop_assign_backing_file(struct loop_device *lo, struct file *file) lo->lo_min_dio_size = loop_query_min_dio_size(lo); } +static int loop_check_backing_file(struct file *file) +{ + if (!file->f_op->read_iter) + return -EINVAL; + + if ((file->f_mode & FMODE_WRITE) && !file->f_op->write_iter) + return -EINVAL; + + return 0; +} + /* * loop_change_fd switched the backing store of a loopback device to * a new file. This is useful for operating system installers to free up @@ -603,6 +537,10 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, if (!file) return -EBADF; + error = loop_check_backing_file(file); + if (error) + return error; + /* suppress uevents while reconfiguring the device */ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1); @@ -662,19 +600,20 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, * dependency. */ fput(old_file); + dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0); if (partscan) loop_reread_partitions(lo); error = 0; done: - /* enable and uncork uevent now that we are done */ - dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0); + kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); return error; out_err: loop_global_unlock(lo, is_loop); out_putf: fput(file); + dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0); goto done; } @@ -1039,6 +978,11 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode, if (!file) return -EBADF; + + error = loop_check_backing_file(file); + if (error) + return error; + is_loop = is_loop_device(file); /* This is safe, since we have a reference from open(). */ @@ -1129,8 +1073,8 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode, if (partscan) clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state); - /* enable and uncork uevent now that we are done */ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0); + kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); loop_global_unlock(lo, is_loop); if (partscan) @@ -1921,7 +1865,6 @@ static void loop_handle_cmd(struct loop_cmd *cmd) struct loop_device *lo = rq->q->queuedata; int ret = 0; struct mem_cgroup *old_memcg = NULL; - const bool use_aio = cmd->use_aio; if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { ret = -EIO; @@ -1951,7 +1894,7 @@ static void loop_handle_cmd(struct loop_cmd *cmd) } failed: /* complete non-aio request */ - if (!use_aio || ret) { + if (ret != -EIOCBQUEUED) { if (ret == -EOPNOTSUPP) cmd->ret = ret; else diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index 3bb9cee0a9b5..aa163ae9b2aa 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -2031,7 +2031,7 @@ static int null_add_dev(struct nullb_device *dev) nullb->disk->minors = 1; nullb->disk->fops = &null_ops; nullb->disk->private_data = nullb; - strscpy_pad(nullb->disk->disk_name, nullb->disk_name, DISK_NAME_LEN); + strscpy(nullb->disk->disk_name, nullb->disk_name); if (nullb->dev->zoned) { rv = null_register_zoned_dev(nullb); diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 65b96c083b3c..d5cc7bd2875c 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -725,7 +725,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command * scmd = blk_mq_rq_to_pdu(rq); if (cgc->buflen) { - ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, + ret = blk_rq_map_kern(rq, cgc->buffer, cgc->buflen, GFP_NOIO); if (ret) goto out; diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c index 2ee6e9bd4e28..2df8941a6b14 100644 --- a/drivers/block/rnbd/rnbd-srv.c +++ b/drivers/block/rnbd/rnbd-srv.c @@ -147,12 +147,7 @@ static int process_rdma(struct rnbd_srv_session *srv_sess, bio = bio_alloc(file_bdev(sess_dev->bdev_file), 1, rnbd_to_bio_flags(le32_to_cpu(msg->rw)), GFP_KERNEL); - if (bio_add_page(bio, virt_to_page(data), datalen, - offset_in_page(data)) != datalen) { - rnbd_srv_err_rl(sess_dev, "Failed to map data to bio\n"); - err = -EINVAL; - goto bio_put; - } + bio_add_virt_nofail(bio, data, datalen); bio->bi_opf = rnbd_to_bio_flags(le32_to_cpu(msg->rw)); if (bio_has_data(bio) && diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 2fd05c1bd30b..6f51072776f1 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -50,6 +50,8 @@ /* private ioctl command mirror */ #define UBLK_CMD_DEL_DEV_ASYNC _IOC_NR(UBLK_U_CMD_DEL_DEV_ASYNC) +#define UBLK_CMD_UPDATE_SIZE _IOC_NR(UBLK_U_CMD_UPDATE_SIZE) +#define UBLK_CMD_QUIESCE_DEV _IOC_NR(UBLK_U_CMD_QUIESCE_DEV) #define UBLK_IO_REGISTER_IO_BUF _IOC_NR(UBLK_U_IO_REGISTER_IO_BUF) #define UBLK_IO_UNREGISTER_IO_BUF _IOC_NR(UBLK_U_IO_UNREGISTER_IO_BUF) @@ -64,7 +66,10 @@ | UBLK_F_CMD_IOCTL_ENCODE \ | UBLK_F_USER_COPY \ | UBLK_F_ZONED \ - | UBLK_F_USER_RECOVERY_FAIL_IO) + | UBLK_F_USER_RECOVERY_FAIL_IO \ + | UBLK_F_UPDATE_SIZE \ + | UBLK_F_AUTO_BUF_REG \ + | UBLK_F_QUIESCE) #define UBLK_F_ALL_RECOVERY_FLAGS (UBLK_F_USER_RECOVERY \ | UBLK_F_USER_RECOVERY_REISSUE \ @@ -77,7 +82,11 @@ UBLK_PARAM_TYPE_DMA_ALIGN | UBLK_PARAM_TYPE_SEGMENT) struct ublk_rq_data { - struct kref ref; + refcount_t ref; + + /* for auto-unregister buffer in case of UBLK_F_AUTO_BUF_REG */ + u16 buf_index; + void *buf_ctx_handle; }; struct ublk_uring_cmd_pdu { @@ -99,6 +108,9 @@ struct ublk_uring_cmd_pdu { * setup in ublk uring_cmd handler */ struct ublk_queue *ubq; + + struct ublk_auto_buf_reg buf; + u16 tag; }; @@ -123,15 +135,6 @@ struct ublk_uring_cmd_pdu { #define UBLK_IO_FLAG_OWNED_BY_SRV 0x02 /* - * IO command is aborted, so this flag is set in case of - * !UBLK_IO_FLAG_ACTIVE. - * - * After this flag is observed, any pending or new incoming request - * associated with this io command will be failed immediately - */ -#define UBLK_IO_FLAG_ABORTED 0x04 - -/* * UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires * get data buffer address from ublksrv. * @@ -140,6 +143,14 @@ struct ublk_uring_cmd_pdu { */ #define UBLK_IO_FLAG_NEED_GET_DATA 0x08 +/* + * request buffer is registered automatically, so we have to unregister it + * before completing this request. + * + * io_uring will unregister buffer automatically for us during exiting. + */ +#define UBLK_IO_FLAG_AUTO_BUF_REG 0x10 + /* atomic RW with ubq->cancel_lock */ #define UBLK_IO_FLAG_CANCELED 0x80000000 @@ -149,7 +160,12 @@ struct ublk_io { unsigned int flags; int res; - struct io_uring_cmd *cmd; + union { + /* valid if UBLK_IO_FLAG_ACTIVE is set */ + struct io_uring_cmd *cmd; + /* valid if UBLK_IO_FLAG_OWNED_BY_SRV is set */ + struct request *req; + }; }; struct ublk_queue { @@ -199,8 +215,6 @@ struct ublk_device { struct completion completion; unsigned int nr_queues_ready; unsigned int nr_privileged_daemon; - - struct work_struct nosrv_work; }; /* header of ublk_params */ @@ -209,16 +223,17 @@ struct ublk_params_header { __u32 types; }; -static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq); - +static void ublk_io_release(void *priv); +static void ublk_stop_dev_unlocked(struct ublk_device *ub); +static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq); static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub, - struct ublk_queue *ubq, int tag, size_t offset); + const struct ublk_queue *ubq, int tag, size_t offset); static inline unsigned int ublk_req_build_flags(struct request *req); -static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq, - int tag); -static inline bool ublk_dev_is_user_copy(const struct ublk_device *ub) + +static inline struct ublksrv_io_desc * +ublk_get_iod(const struct ublk_queue *ubq, unsigned tag) { - return ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY); + return &ubq->io_cmd_buf[tag]; } static inline bool ublk_dev_is_zoned(const struct ublk_device *ub) @@ -372,8 +387,7 @@ static int ublk_report_zones(struct gendisk *disk, sector_t sector, if (ret) goto free_req; - ret = blk_rq_map_kern(disk->queue, req, buffer, buffer_length, - GFP_KERNEL); + ret = blk_rq_map_kern(req, buffer, buffer_length, GFP_KERNEL); if (ret) goto erase_desc; @@ -493,7 +507,6 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq, #endif static inline void __ublk_complete_rq(struct request *req); -static void ublk_complete_rq(struct kref *ref); static dev_t ublk_chr_devt; static const struct class ublk_chr_class = { @@ -620,14 +633,25 @@ static void ublk_apply_params(struct ublk_device *ub) ublk_dev_param_zoned_apply(ub); } +static inline bool ublk_support_zero_copy(const struct ublk_queue *ubq) +{ + return ubq->flags & UBLK_F_SUPPORT_ZERO_COPY; +} + +static inline bool ublk_support_auto_buf_reg(const struct ublk_queue *ubq) +{ + return ubq->flags & UBLK_F_AUTO_BUF_REG; +} + static inline bool ublk_support_user_copy(const struct ublk_queue *ubq) { - return ubq->flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY); + return ubq->flags & UBLK_F_USER_COPY; } static inline bool ublk_need_map_io(const struct ublk_queue *ubq) { - return !ublk_support_user_copy(ubq); + return !ublk_support_user_copy(ubq) && !ublk_support_zero_copy(ubq) && + !ublk_support_auto_buf_reg(ubq); } static inline bool ublk_need_req_ref(const struct ublk_queue *ubq) @@ -635,8 +659,16 @@ static inline bool ublk_need_req_ref(const struct ublk_queue *ubq) /* * read()/write() is involved in user copy, so request reference * has to be grabbed + * + * for zero copy, request buffer need to be registered to io_uring + * buffer table, so reference is needed + * + * For auto buffer register, ublk server still may issue + * UBLK_IO_COMMIT_AND_FETCH_REQ before one registered buffer is used up, + * so reference is required too. */ - return ublk_support_user_copy(ubq); + return ublk_support_user_copy(ubq) || ublk_support_zero_copy(ubq) || + ublk_support_auto_buf_reg(ubq); } static inline void ublk_init_req_ref(const struct ublk_queue *ubq, @@ -645,7 +677,7 @@ static inline void ublk_init_req_ref(const struct ublk_queue *ubq, if (ublk_need_req_ref(ubq)) { struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); - kref_init(&data->ref); + refcount_set(&data->ref, 1); } } @@ -655,7 +687,7 @@ static inline bool ublk_get_req_ref(const struct ublk_queue *ubq, if (ublk_need_req_ref(ubq)) { struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); - return kref_get_unless_zero(&data->ref); + return refcount_inc_not_zero(&data->ref); } return true; @@ -667,7 +699,8 @@ static inline void ublk_put_req_ref(const struct ublk_queue *ubq, if (ublk_need_req_ref(ubq)) { struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); - kref_put(&data->ref, ublk_complete_rq); + if (refcount_dec_and_test(&data->ref)) + __ublk_complete_rq(req); } else { __ublk_complete_rq(req); } @@ -703,12 +736,6 @@ static inline bool ublk_rq_has_data(const struct request *rq) return bio_has_data(rq->bio); } -static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq, - int tag) -{ - return &ubq->io_cmd_buf[tag]; -} - static inline struct ublksrv_io_desc * ublk_queue_cmd_buf(struct ublk_device *ub, int q_id) { @@ -1074,7 +1101,7 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu( static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq) { - return ubq->ubq_daemon->flags & PF_EXITING; + return !ubq->ubq_daemon || ubq->ubq_daemon->flags & PF_EXITING; } /* todo: handle partial completion */ @@ -1085,12 +1112,6 @@ static inline void __ublk_complete_rq(struct request *req) unsigned int unmapped_bytes; blk_status_t res = BLK_STS_OK; - /* called from ublk_abort_queue() code path */ - if (io->flags & UBLK_IO_FLAG_ABORTED) { - res = BLK_STS_IOERR; - goto exit; - } - /* failed read IO if nothing is read */ if (!io->res && req_op(req) == REQ_OP_READ) io->res = -EIO; @@ -1131,37 +1152,12 @@ exit: blk_mq_end_request(req, res); } -static void ublk_complete_rq(struct kref *ref) -{ - struct ublk_rq_data *data = container_of(ref, struct ublk_rq_data, - ref); - struct request *req = blk_mq_rq_from_pdu(data); - - __ublk_complete_rq(req); -} - -/* - * Since ublk_rq_task_work_cb always fails requests immediately during - * exiting, __ublk_fail_req() is only called from abort context during - * exiting. So lock is unnecessary. - * - * Also aborting may not be started yet, keep in mind that one failed - * request may be issued by block layer again. - */ -static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io, - struct request *req) +static void ublk_complete_io_cmd(struct ublk_io *io, struct request *req, + int res, unsigned issue_flags) { - WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE); + /* read cmd first because req will overwrite it */ + struct io_uring_cmd *cmd = io->cmd; - if (ublk_nosrv_should_reissue_outstanding(ubq->dev)) - blk_mq_requeue_request(req, false); - else - ublk_put_req_ref(ubq, req); -} - -static void ubq_complete_io_cmd(struct ublk_io *io, int res, - unsigned issue_flags) -{ /* mark this cmd owned by ublksrv */ io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV; @@ -1171,8 +1167,10 @@ static void ubq_complete_io_cmd(struct ublk_io *io, int res, */ io->flags &= ~UBLK_IO_FLAG_ACTIVE; + io->req = req; + /* tell ublksrv one io request is coming */ - io_uring_cmd_done(io->cmd, res, 0, issue_flags); + io_uring_cmd_done(cmd, res, 0, issue_flags); } #define UBLK_REQUEUE_DELAY_MS 3 @@ -1187,16 +1185,91 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq, blk_mq_end_request(rq, BLK_STS_IOERR); } +static void ublk_auto_buf_reg_fallback(struct request *req) +{ + const struct ublk_queue *ubq = req->mq_hctx->driver_data; + struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag); + struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); + + iod->op_flags |= UBLK_IO_F_NEED_REG_BUF; + refcount_set(&data->ref, 1); +} + +static bool ublk_auto_buf_reg(struct request *req, struct ublk_io *io, + unsigned int issue_flags) +{ + struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(io->cmd); + struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); + int ret; + + ret = io_buffer_register_bvec(io->cmd, req, ublk_io_release, + pdu->buf.index, issue_flags); + if (ret) { + if (pdu->buf.flags & UBLK_AUTO_BUF_REG_FALLBACK) { + ublk_auto_buf_reg_fallback(req); + return true; + } + blk_mq_end_request(req, BLK_STS_IOERR); + return false; + } + /* one extra reference is dropped by ublk_io_release */ + refcount_set(&data->ref, 2); + + data->buf_ctx_handle = io_uring_cmd_ctx_handle(io->cmd); + /* store buffer index in request payload */ + data->buf_index = pdu->buf.index; + io->flags |= UBLK_IO_FLAG_AUTO_BUF_REG; + return true; +} + +static bool ublk_prep_auto_buf_reg(struct ublk_queue *ubq, + struct request *req, struct ublk_io *io, + unsigned int issue_flags) +{ + if (ublk_support_auto_buf_reg(ubq) && ublk_rq_has_data(req)) + return ublk_auto_buf_reg(req, io, issue_flags); + + ublk_init_req_ref(ubq, req); + return true; +} + +static bool ublk_start_io(const struct ublk_queue *ubq, struct request *req, + struct ublk_io *io) +{ + unsigned mapped_bytes = ublk_map_io(ubq, req, io); + + /* partially mapped, update io descriptor */ + if (unlikely(mapped_bytes != blk_rq_bytes(req))) { + /* + * Nothing mapped, retry until we succeed. + * + * We may never succeed in mapping any bytes here because + * of OOM. TODO: reserve one buffer with single page pinned + * for providing forward progress guarantee. + */ + if (unlikely(!mapped_bytes)) { + blk_mq_requeue_request(req, false); + blk_mq_delay_kick_requeue_list(req->q, + UBLK_REQUEUE_DELAY_MS); + return false; + } + + ublk_get_iod(ubq, req->tag)->nr_sectors = + mapped_bytes >> 9; + } + + return true; +} + static void ublk_dispatch_req(struct ublk_queue *ubq, struct request *req, unsigned int issue_flags) { int tag = req->tag; struct ublk_io *io = &ubq->ios[tag]; - unsigned int mapped_bytes; - pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n", - __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags, + pr_devel("%s: complete: qid %d tag %d io_flags %x addr %llx\n", + __func__, ubq->q_id, req->tag, io->flags, ublk_get_iod(ubq, req->tag)->addr); /* @@ -1216,54 +1289,22 @@ static void ublk_dispatch_req(struct ublk_queue *ubq, if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) { /* * We have not handled UBLK_IO_NEED_GET_DATA command yet, - * so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv + * so immediately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv * and notify it. */ - if (!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA)) { - io->flags |= UBLK_IO_FLAG_NEED_GET_DATA; - pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n", - __func__, io->cmd->cmd_op, ubq->q_id, - req->tag, io->flags); - ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags); - return; - } - /* - * We have handled UBLK_IO_NEED_GET_DATA command, - * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just - * do the copy work. - */ - io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA; - /* update iod->addr because ublksrv may have passed a new io buffer */ - ublk_get_iod(ubq, req->tag)->addr = io->addr; - pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n", - __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags, - ublk_get_iod(ubq, req->tag)->addr); + io->flags |= UBLK_IO_FLAG_NEED_GET_DATA; + pr_devel("%s: need get data. qid %d tag %d io_flags %x\n", + __func__, ubq->q_id, req->tag, io->flags); + ublk_complete_io_cmd(io, req, UBLK_IO_RES_NEED_GET_DATA, + issue_flags); + return; } - mapped_bytes = ublk_map_io(ubq, req, io); - - /* partially mapped, update io descriptor */ - if (unlikely(mapped_bytes != blk_rq_bytes(req))) { - /* - * Nothing mapped, retry until we succeed. - * - * We may never succeed in mapping any bytes here because - * of OOM. TODO: reserve one buffer with single page pinned - * for providing forward progress guarantee. - */ - if (unlikely(!mapped_bytes)) { - blk_mq_requeue_request(req, false); - blk_mq_delay_kick_requeue_list(req->q, - UBLK_REQUEUE_DELAY_MS); - return; - } - - ublk_get_iod(ubq, req->tag)->nr_sectors = - mapped_bytes >> 9; - } + if (!ublk_start_io(ubq, req, io)) + return; - ublk_init_req_ref(ubq, req); - ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags); + if (ublk_prep_auto_buf_reg(ubq, req, io, issue_flags)) + ublk_complete_io_cmd(io, req, UBLK_IO_RES_OK, issue_flags); } static void ublk_cmd_tw_cb(struct io_uring_cmd *cmd, @@ -1314,8 +1355,6 @@ static void ublk_queue_cmd_list(struct ublk_queue *ubq, struct rq_list *l) static enum blk_eh_timer_return ublk_timeout(struct request *rq) { struct ublk_queue *ubq = rq->mq_hctx->driver_data; - unsigned int nr_inflight = 0; - int i; if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) { if (!ubq->timeout) { @@ -1326,30 +1365,11 @@ static enum blk_eh_timer_return ublk_timeout(struct request *rq) return BLK_EH_DONE; } - if (!ubq_daemon_is_dying(ubq)) - return BLK_EH_RESET_TIMER; - - for (i = 0; i < ubq->q_depth; i++) { - struct ublk_io *io = &ubq->ios[i]; - - if (!(io->flags & UBLK_IO_FLAG_ACTIVE)) - nr_inflight++; - } - - /* cancelable uring_cmd can't help us if all commands are in-flight */ - if (nr_inflight == ubq->q_depth) { - struct ublk_device *ub = ubq->dev; - - if (ublk_abort_requests(ub, ubq)) { - schedule_work(&ub->nosrv_work); - } - return BLK_EH_DONE; - } - return BLK_EH_RESET_TIMER; } -static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq) +static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq, + bool check_cancel) { blk_status_t res; @@ -1368,7 +1388,7 @@ static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq) if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort)) return BLK_STS_IOERR; - if (unlikely(ubq->canceling)) + if (check_cancel && unlikely(ubq->canceling)) return BLK_STS_IOERR; /* fill iod to slot in io cmd buffer */ @@ -1387,7 +1407,7 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq = bd->rq; blk_status_t res; - res = ublk_prep_req(ubq, rq); + res = ublk_prep_req(ubq, rq, false); if (res != BLK_STS_OK) return res; @@ -1419,7 +1439,7 @@ static void ublk_queue_rqs(struct rq_list *rqlist) ublk_queue_cmd_list(ubq, &submit_list); ubq = this_q; - if (ublk_prep_req(ubq, req) == BLK_STS_OK) + if (ublk_prep_req(ubq, req, true) == BLK_STS_OK) rq_list_add_tail(&submit_list, req); else rq_list_add_tail(&requeue_list, req); @@ -1447,6 +1467,37 @@ static const struct blk_mq_ops ublk_mq_ops = { .timeout = ublk_timeout, }; +static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq) +{ + int i; + + /* All old ioucmds have to be completed */ + ubq->nr_io_ready = 0; + + /* + * old daemon is PF_EXITING, put it now + * + * It could be NULL in case of closing one quisced device. + */ + if (ubq->ubq_daemon) + put_task_struct(ubq->ubq_daemon); + /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */ + ubq->ubq_daemon = NULL; + ubq->timeout = false; + + for (i = 0; i < ubq->q_depth; i++) { + struct ublk_io *io = &ubq->ios[i]; + + /* + * UBLK_IO_FLAG_CANCELED is kept for avoiding to touch + * io->cmd + */ + io->flags &= UBLK_IO_FLAG_CANCELED; + io->cmd = NULL; + io->addr = 0; + } +} + static int ublk_ch_open(struct inode *inode, struct file *filp) { struct ublk_device *ub = container_of(inode->i_cdev, @@ -1458,10 +1509,119 @@ static int ublk_ch_open(struct inode *inode, struct file *filp) return 0; } +static void ublk_reset_ch_dev(struct ublk_device *ub) +{ + int i; + + for (i = 0; i < ub->dev_info.nr_hw_queues; i++) + ublk_queue_reinit(ub, ublk_get_queue(ub, i)); + + /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */ + ub->mm = NULL; + ub->nr_queues_ready = 0; + ub->nr_privileged_daemon = 0; +} + +static struct gendisk *ublk_get_disk(struct ublk_device *ub) +{ + struct gendisk *disk; + + spin_lock(&ub->lock); + disk = ub->ub_disk; + if (disk) + get_device(disk_to_dev(disk)); + spin_unlock(&ub->lock); + + return disk; +} + +static void ublk_put_disk(struct gendisk *disk) +{ + if (disk) + put_device(disk_to_dev(disk)); +} + static int ublk_ch_release(struct inode *inode, struct file *filp) { struct ublk_device *ub = filp->private_data; + struct gendisk *disk; + int i; + + /* + * disk isn't attached yet, either device isn't live, or it has + * been removed already, so we needn't to do anything + */ + disk = ublk_get_disk(ub); + if (!disk) + goto out; + + /* + * All uring_cmd are done now, so abort any request outstanding to + * the ublk server + * + * This can be done in lockless way because ublk server has been + * gone + * + * More importantly, we have to provide forward progress guarantee + * without holding ub->mutex, otherwise control task grabbing + * ub->mutex triggers deadlock + * + * All requests may be inflight, so ->canceling may not be set, set + * it now. + */ + for (i = 0; i < ub->dev_info.nr_hw_queues; i++) { + struct ublk_queue *ubq = ublk_get_queue(ub, i); + + ubq->canceling = true; + ublk_abort_queue(ub, ubq); + } + blk_mq_kick_requeue_list(disk->queue); + /* + * All infligh requests have been completed or requeued and any new + * request will be failed or requeued via `->canceling` now, so it is + * fine to grab ub->mutex now. + */ + mutex_lock(&ub->mutex); + + /* double check after grabbing lock */ + if (!ub->ub_disk) + goto unlock; + + /* + * Transition the device to the nosrv state. What exactly this + * means depends on the recovery flags + */ + blk_mq_quiesce_queue(disk->queue); + if (ublk_nosrv_should_stop_dev(ub)) { + /* + * Allow any pending/future I/O to pass through quickly + * with an error. This is needed because del_gendisk + * waits for all pending I/O to complete + */ + for (i = 0; i < ub->dev_info.nr_hw_queues; i++) + ublk_get_queue(ub, i)->force_abort = true; + blk_mq_unquiesce_queue(disk->queue); + + ublk_stop_dev_unlocked(ub); + } else { + if (ublk_nosrv_dev_should_queue_io(ub)) { + /* ->canceling is set and all requests are aborted */ + ub->dev_info.state = UBLK_S_DEV_QUIESCED; + } else { + ub->dev_info.state = UBLK_S_DEV_FAIL_IO; + for (i = 0; i < ub->dev_info.nr_hw_queues; i++) + ublk_get_queue(ub, i)->fail_io = true; + } + blk_mq_unquiesce_queue(disk->queue); + } +unlock: + mutex_unlock(&ub->mutex); + ublk_put_disk(disk); + + /* all uring_cmd has been done now, reset device & ubq */ + ublk_reset_ch_dev(ub); +out: clear_bit(UB_STATE_OPEN, &ub->state); return 0; } @@ -1504,34 +1664,26 @@ static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma) return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot); } -static void ublk_commit_completion(struct ublk_device *ub, - const struct ublksrv_io_cmd *ub_cmd) +static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io, + struct request *req) { - u32 qid = ub_cmd->q_id, tag = ub_cmd->tag; - struct ublk_queue *ubq = ublk_get_queue(ub, qid); - struct ublk_io *io = &ubq->ios[tag]; - struct request *req; - - /* now this cmd slot is owned by nbd driver */ - io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV; - io->res = ub_cmd->result; - - /* find the io request and complete */ - req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag); - if (WARN_ON_ONCE(unlikely(!req))) - return; - - if (req_op(req) == REQ_OP_ZONE_APPEND) - req->__sector = ub_cmd->zone_append_lba; + WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE); - if (likely(!blk_should_fake_timeout(req->q))) - ublk_put_req_ref(ubq, req); + if (ublk_nosrv_should_reissue_outstanding(ubq->dev)) + blk_mq_requeue_request(req, false); + else { + io->res = -EIO; + __ublk_complete_rq(req); + } } /* - * Called from ubq_daemon context via cancel fn, meantime quiesce ublk - * blk-mq queue, so we are called exclusively with blk-mq and ubq_daemon - * context, so everything is serialized. + * Called from ublk char device release handler, when any uring_cmd is + * done, meantime request queue is "quiesced" since all inflight requests + * can't be completed because ublk server is dead. + * + * So no one can hold our request IO reference any more, simply ignore the + * reference, and complete the request immediately */ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq) { @@ -1540,54 +1692,28 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq) for (i = 0; i < ubq->q_depth; i++) { struct ublk_io *io = &ubq->ios[i]; - if (!(io->flags & UBLK_IO_FLAG_ACTIVE)) { - struct request *rq; - - /* - * Either we fail the request or ublk_rq_task_work_cb - * will do it - */ - rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i); - if (rq && blk_mq_request_started(rq)) { - io->flags |= UBLK_IO_FLAG_ABORTED; - __ublk_fail_req(ubq, io, rq); - } - } + if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV) + __ublk_fail_req(ubq, io, io->req); } } /* Must be called when queue is frozen */ -static bool ublk_mark_queue_canceling(struct ublk_queue *ubq) +static void ublk_mark_queue_canceling(struct ublk_queue *ubq) { - bool canceled; - spin_lock(&ubq->cancel_lock); - canceled = ubq->canceling; - if (!canceled) + if (!ubq->canceling) ubq->canceling = true; spin_unlock(&ubq->cancel_lock); - - return canceled; } -static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq) +static void ublk_start_cancel(struct ublk_queue *ubq) { - bool was_canceled = ubq->canceling; - struct gendisk *disk; - - if (was_canceled) - return false; - - spin_lock(&ub->lock); - disk = ub->ub_disk; - if (disk) - get_device(disk_to_dev(disk)); - spin_unlock(&ub->lock); + struct ublk_device *ub = ubq->dev; + struct gendisk *disk = ublk_get_disk(ub); /* Our disk has been dead */ if (!disk) - return false; - + return; /* * Now we are serialized with ublk_queue_rq() * @@ -1596,25 +1722,36 @@ static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq) * touch completed uring_cmd */ blk_mq_quiesce_queue(disk->queue); - was_canceled = ublk_mark_queue_canceling(ubq); - if (!was_canceled) { - /* abort queue is for making forward progress */ - ublk_abort_queue(ub, ubq); - } + ublk_mark_queue_canceling(ubq); blk_mq_unquiesce_queue(disk->queue); - put_device(disk_to_dev(disk)); - - return !was_canceled; + ublk_put_disk(disk); } -static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io, +static void ublk_cancel_cmd(struct ublk_queue *ubq, unsigned tag, unsigned int issue_flags) { + struct ublk_io *io = &ubq->ios[tag]; + struct ublk_device *ub = ubq->dev; + struct request *req; bool done; if (!(io->flags & UBLK_IO_FLAG_ACTIVE)) return; + /* + * Don't try to cancel this command if the request is started for + * avoiding race between io_uring_cmd_done() and + * io_uring_cmd_complete_in_task(). + * + * Either the started request will be aborted via __ublk_abort_rq(), + * then this uring_cmd is canceled next time, or it will be done in + * task work function ublk_dispatch_req() because io_uring guarantees + * that ublk_dispatch_req() is always called + */ + req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag); + if (req && blk_mq_request_started(req) && req->tag == tag) + return; + spin_lock(&ubq->cancel_lock); done = !!(io->flags & UBLK_IO_FLAG_CANCELED); if (!done) @@ -1628,6 +1765,17 @@ static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io, /* * The ublk char device won't be closed when calling cancel fn, so both * ublk device and queue are guaranteed to be live + * + * Two-stage cancel: + * + * - make every active uring_cmd done in ->cancel_fn() + * + * - aborting inflight ublk IO requests in ublk char device release handler, + * which depends on 1st stage because device can only be closed iff all + * uring_cmd are done + * + * Do _not_ try to acquire ub->mutex before all inflight requests are + * aborted, otherwise deadlock may be caused. */ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd, unsigned int issue_flags) @@ -1635,9 +1783,6 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd, struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); struct ublk_queue *ubq = pdu->ubq; struct task_struct *task; - struct ublk_device *ub; - bool need_schedule; - struct ublk_io *io; if (WARN_ON_ONCE(!ubq)) return; @@ -1649,16 +1794,11 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd, if (WARN_ON_ONCE(task && task != ubq->ubq_daemon)) return; - ub = ubq->dev; - need_schedule = ublk_abort_requests(ub, ubq); - - io = &ubq->ios[pdu->tag]; - WARN_ON_ONCE(io->cmd != cmd); - ublk_cancel_cmd(ubq, io, issue_flags); + if (!ubq->canceling) + ublk_start_cancel(ubq); - if (need_schedule) { - schedule_work(&ub->nosrv_work); - } + WARN_ON_ONCE(ubq->ios[pdu->tag].cmd != cmd); + ublk_cancel_cmd(ubq, pdu->tag, issue_flags); } static inline bool ublk_queue_ready(struct ublk_queue *ubq) @@ -1671,7 +1811,7 @@ static void ublk_cancel_queue(struct ublk_queue *ubq) int i; for (i = 0; i < ubq->q_depth; i++) - ublk_cancel_cmd(ubq, &ubq->ios[i], IO_URING_F_UNLOCKED); + ublk_cancel_cmd(ubq, i, IO_URING_F_UNLOCKED); } /* Cancel all pending commands, must be called after del_gendisk() returns */ @@ -1709,33 +1849,20 @@ static void ublk_wait_tagset_rqs_idle(struct ublk_device *ub) } } -static void __ublk_quiesce_dev(struct ublk_device *ub) -{ - pr_devel("%s: quiesce ub: dev_id %d state %s\n", - __func__, ub->dev_info.dev_id, - ub->dev_info.state == UBLK_S_DEV_LIVE ? - "LIVE" : "QUIESCED"); - blk_mq_quiesce_queue(ub->ub_disk->queue); - ublk_wait_tagset_rqs_idle(ub); - ub->dev_info.state = UBLK_S_DEV_QUIESCED; -} - -static void ublk_unquiesce_dev(struct ublk_device *ub) +static void ublk_force_abort_dev(struct ublk_device *ub) { int i; - pr_devel("%s: unquiesce ub: dev_id %d state %s\n", + pr_devel("%s: force abort ub: dev_id %d state %s\n", __func__, ub->dev_info.dev_id, ub->dev_info.state == UBLK_S_DEV_LIVE ? "LIVE" : "QUIESCED"); - /* quiesce_work has run. We let requeued rqs be aborted - * before running fallback_wq. "force_abort" must be seen - * after request queue is unqiuesced. Then del_gendisk() - * can move on. - */ + blk_mq_quiesce_queue(ub->ub_disk->queue); + if (ub->dev_info.state == UBLK_S_DEV_LIVE) + ublk_wait_tagset_rqs_idle(ub); + for (i = 0; i < ub->dev_info.nr_hw_queues; i++) ublk_get_queue(ub, i)->force_abort = true; - blk_mq_unquiesce_queue(ub->ub_disk->queue); /* We may have requeued some rqs in ublk_quiesce_queue() */ blk_mq_kick_requeue_list(ub->ub_disk->queue); @@ -1756,61 +1883,51 @@ static struct gendisk *ublk_detach_disk(struct ublk_device *ub) return disk; } -static void ublk_stop_dev(struct ublk_device *ub) +static void ublk_stop_dev_unlocked(struct ublk_device *ub) + __must_hold(&ub->mutex) { struct gendisk *disk; - mutex_lock(&ub->mutex); if (ub->dev_info.state == UBLK_S_DEV_DEAD) - goto unlock; - if (ublk_nosrv_dev_should_queue_io(ub)) { - if (ub->dev_info.state == UBLK_S_DEV_LIVE) - __ublk_quiesce_dev(ub); - ublk_unquiesce_dev(ub); - } + return; + + if (ublk_nosrv_dev_should_queue_io(ub)) + ublk_force_abort_dev(ub); del_gendisk(ub->ub_disk); disk = ublk_detach_disk(ub); put_disk(disk); - unlock: +} + +static void ublk_stop_dev(struct ublk_device *ub) +{ + mutex_lock(&ub->mutex); + ublk_stop_dev_unlocked(ub); mutex_unlock(&ub->mutex); ublk_cancel_dev(ub); } -static void ublk_nosrv_work(struct work_struct *work) +/* reset ublk io_uring queue & io flags */ +static void ublk_reset_io_flags(struct ublk_device *ub) { - struct ublk_device *ub = - container_of(work, struct ublk_device, nosrv_work); - int i; + int i, j; - if (ublk_nosrv_should_stop_dev(ub)) { - ublk_stop_dev(ub); - return; - } - - mutex_lock(&ub->mutex); - if (ub->dev_info.state != UBLK_S_DEV_LIVE) - goto unlock; + for (i = 0; i < ub->dev_info.nr_hw_queues; i++) { + struct ublk_queue *ubq = ublk_get_queue(ub, i); - if (ublk_nosrv_dev_should_queue_io(ub)) { - __ublk_quiesce_dev(ub); - } else { - blk_mq_quiesce_queue(ub->ub_disk->queue); - ub->dev_info.state = UBLK_S_DEV_FAIL_IO; - for (i = 0; i < ub->dev_info.nr_hw_queues; i++) { - ublk_get_queue(ub, i)->fail_io = true; - } - blk_mq_unquiesce_queue(ub->ub_disk->queue); + /* UBLK_IO_FLAG_CANCELED can be cleared now */ + spin_lock(&ubq->cancel_lock); + for (j = 0; j < ubq->q_depth; j++) + ubq->ios[j].flags &= ~UBLK_IO_FLAG_CANCELED; + spin_unlock(&ubq->cancel_lock); + ubq->canceling = false; + ubq->fail_io = false; } - - unlock: - mutex_unlock(&ub->mutex); - ublk_cancel_dev(ub); } /* device can only be started after all IOs are ready */ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq) + __must_hold(&ub->mutex) { - mutex_lock(&ub->mutex); ubq->nr_io_ready++; if (ublk_queue_ready(ubq)) { ubq->ubq_daemon = current; @@ -1820,18 +1937,12 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq) if (capable(CAP_SYS_ADMIN)) ub->nr_privileged_daemon++; } - if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues) - complete_all(&ub->completion); - mutex_unlock(&ub->mutex); -} -static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id, - int tag) -{ - struct ublk_queue *ubq = ublk_get_queue(ub, q_id); - struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag); - - ublk_queue_cmd(ubq, req); + if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues) { + /* now we are ready for handling ublk io request */ + ublk_reset_io_flags(ub); + complete_all(&ub->completion); + } } static inline int ublk_check_cmd_op(u32 cmd_op) @@ -1870,6 +1981,20 @@ static inline void ublk_prep_cancel(struct io_uring_cmd *cmd, io_uring_cmd_mark_cancelable(cmd, issue_flags); } +static inline int ublk_set_auto_buf_reg(struct io_uring_cmd *cmd) +{ + struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); + + pdu->buf = ublk_sqe_addr_to_auto_buf_reg(READ_ONCE(cmd->sqe->addr)); + + if (pdu->buf.reserved0 || pdu->buf.reserved1) + return -EINVAL; + + if (pdu->buf.flags & ~UBLK_AUTO_BUF_REG_F_MASK) + return -EINVAL; + return 0; +} + static void ublk_io_release(void *priv) { struct request *rq = priv; @@ -1879,13 +2004,16 @@ static void ublk_io_release(void *priv) } static int ublk_register_io_buf(struct io_uring_cmd *cmd, - struct ublk_queue *ubq, unsigned int tag, + const struct ublk_queue *ubq, unsigned int tag, unsigned int index, unsigned int issue_flags) { struct ublk_device *ub = cmd->file->private_data; struct request *req; int ret; + if (!ublk_support_zero_copy(ubq)) + return -EINVAL; + req = __ublk_check_and_get_req(ub, ubq, tag, 0); if (!req) return -EINVAL; @@ -1901,11 +2029,151 @@ static int ublk_register_io_buf(struct io_uring_cmd *cmd, } static int ublk_unregister_io_buf(struct io_uring_cmd *cmd, + const struct ublk_queue *ubq, unsigned int index, unsigned int issue_flags) { + if (!ublk_support_zero_copy(ubq)) + return -EINVAL; + return io_buffer_unregister_bvec(cmd, index, issue_flags); } +static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq, + struct ublk_io *io, __u64 buf_addr) +{ + struct ublk_device *ub = ubq->dev; + int ret = 0; + + /* + * When handling FETCH command for setting up ublk uring queue, + * ub->mutex is the innermost lock, and we won't block for handling + * FETCH, so it is fine even for IO_URING_F_NONBLOCK. + */ + mutex_lock(&ub->mutex); + /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */ + if (ublk_queue_ready(ubq)) { + ret = -EBUSY; + goto out; + } + + /* allow each command to be FETCHed at most once */ + if (io->flags & UBLK_IO_FLAG_ACTIVE) { + ret = -EINVAL; + goto out; + } + + WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV); + + if (ublk_need_map_io(ubq)) { + /* + * FETCH_RQ has to provide IO buffer if NEED GET + * DATA is not enabled + */ + if (!buf_addr && !ublk_need_get_data(ubq)) + goto out; + } else if (buf_addr) { + /* User copy requires addr to be unset */ + ret = -EINVAL; + goto out; + } + + if (ublk_support_auto_buf_reg(ubq)) { + ret = ublk_set_auto_buf_reg(cmd); + if (ret) + goto out; + } + + ublk_fill_io_cmd(io, cmd, buf_addr); + ublk_mark_io_ready(ub, ubq); +out: + mutex_unlock(&ub->mutex); + return ret; +} + +static int ublk_commit_and_fetch(const struct ublk_queue *ubq, + struct ublk_io *io, struct io_uring_cmd *cmd, + const struct ublksrv_io_cmd *ub_cmd, + unsigned int issue_flags) +{ + struct request *req = io->req; + + if (ublk_need_map_io(ubq)) { + /* + * COMMIT_AND_FETCH_REQ has to provide IO buffer if + * NEED GET DATA is not enabled or it is Read IO. + */ + if (!ub_cmd->addr && (!ublk_need_get_data(ubq) || + req_op(req) == REQ_OP_READ)) + return -EINVAL; + } else if (req_op(req) != REQ_OP_ZONE_APPEND && ub_cmd->addr) { + /* + * User copy requires addr to be unset when command is + * not zone append + */ + return -EINVAL; + } + + if (ublk_support_auto_buf_reg(ubq)) { + int ret; + + /* + * `UBLK_F_AUTO_BUF_REG` only works iff `UBLK_IO_FETCH_REQ` + * and `UBLK_IO_COMMIT_AND_FETCH_REQ` are issued from same + * `io_ring_ctx`. + * + * If this uring_cmd's io_ring_ctx isn't same with the + * one for registering the buffer, it is ublk server's + * responsibility for unregistering the buffer, otherwise + * this ublk request gets stuck. + */ + if (io->flags & UBLK_IO_FLAG_AUTO_BUF_REG) { + struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); + + if (data->buf_ctx_handle == io_uring_cmd_ctx_handle(cmd)) + io_buffer_unregister_bvec(cmd, data->buf_index, + issue_flags); + io->flags &= ~UBLK_IO_FLAG_AUTO_BUF_REG; + } + + ret = ublk_set_auto_buf_reg(cmd); + if (ret) + return ret; + } + + ublk_fill_io_cmd(io, cmd, ub_cmd->addr); + + /* now this cmd slot is owned by ublk driver */ + io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV; + io->res = ub_cmd->result; + + if (req_op(req) == REQ_OP_ZONE_APPEND) + req->__sector = ub_cmd->zone_append_lba; + + if (likely(!blk_should_fake_timeout(req->q))) + ublk_put_req_ref(ubq, req); + + return 0; +} + +static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io) +{ + struct request *req = io->req; + + /* + * We have handled UBLK_IO_NEED_GET_DATA command, + * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just + * do the copy work. + */ + io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA; + /* update iod->addr because ublksrv may have passed a new io buffer */ + ublk_get_iod(ubq, req->tag)->addr = io->addr; + pr_devel("%s: update iod->addr: qid %d tag %d io_flags %x addr %llx\n", + __func__, ubq->q_id, req->tag, io->flags, + ublk_get_iod(ubq, req->tag)->addr); + + return ublk_start_io(ubq, req, io); +} + static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags, const struct ublksrv_io_cmd *ub_cmd) @@ -1916,7 +2184,6 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, u32 cmd_op = cmd->cmd_op; unsigned tag = ub_cmd->tag; int ret = -EINVAL; - struct request *req; pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n", __func__, cmd->cmd_op, ub_cmd->q_id, tag, @@ -1926,9 +2193,6 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, goto out; ubq = ublk_get_queue(ub, ub_cmd->q_id); - if (!ubq || ub_cmd->q_id != ubq->q_id) - goto out; - if (ubq->ubq_daemon && ubq->ubq_daemon != current) goto out; @@ -1943,6 +2207,11 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, goto out; } + /* only UBLK_IO_FETCH_REQ is allowed if io is not OWNED_BY_SRV */ + if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV) && + _IOC_NR(cmd_op) != UBLK_IO_FETCH_REQ) + goto out; + /* * ensure that the user issues UBLK_IO_NEED_GET_DATA * iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA. @@ -1960,68 +2229,23 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, case UBLK_IO_REGISTER_IO_BUF: return ublk_register_io_buf(cmd, ubq, tag, ub_cmd->addr, issue_flags); case UBLK_IO_UNREGISTER_IO_BUF: - return ublk_unregister_io_buf(cmd, ub_cmd->addr, issue_flags); + return ublk_unregister_io_buf(cmd, ubq, ub_cmd->addr, issue_flags); case UBLK_IO_FETCH_REQ: - /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */ - if (ublk_queue_ready(ubq)) { - ret = -EBUSY; - goto out; - } - /* - * The io is being handled by server, so COMMIT_RQ is expected - * instead of FETCH_REQ - */ - if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV) - goto out; - - if (ublk_need_map_io(ubq)) { - /* - * FETCH_RQ has to provide IO buffer if NEED GET - * DATA is not enabled - */ - if (!ub_cmd->addr && !ublk_need_get_data(ubq)) - goto out; - } else if (ub_cmd->addr) { - /* User copy requires addr to be unset */ - ret = -EINVAL; + ret = ublk_fetch(cmd, ubq, io, ub_cmd->addr); + if (ret) goto out; - } - - ublk_fill_io_cmd(io, cmd, ub_cmd->addr); - ublk_mark_io_ready(ub, ubq); break; case UBLK_IO_COMMIT_AND_FETCH_REQ: - req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag); - - if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)) - goto out; - - if (ublk_need_map_io(ubq)) { - /* - * COMMIT_AND_FETCH_REQ has to provide IO buffer if - * NEED GET DATA is not enabled or it is Read IO. - */ - if (!ub_cmd->addr && (!ublk_need_get_data(ubq) || - req_op(req) == REQ_OP_READ)) - goto out; - } else if (req_op(req) != REQ_OP_ZONE_APPEND && ub_cmd->addr) { - /* - * User copy requires addr to be unset when command is - * not zone append - */ - ret = -EINVAL; + ret = ublk_commit_and_fetch(ubq, io, cmd, ub_cmd, issue_flags); + if (ret) goto out; - } - - ublk_fill_io_cmd(io, cmd, ub_cmd->addr); - ublk_commit_completion(ub, ub_cmd); break; case UBLK_IO_NEED_GET_DATA: - if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)) - goto out; - ublk_fill_io_cmd(io, cmd, ub_cmd->addr); - ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag); - break; + io->addr = ub_cmd->addr; + if (!ublk_get_data(ubq, io)) + return -EIOCBQUEUED; + + return UBLK_IO_RES_OK; default: goto out; } @@ -2035,13 +2259,10 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, } static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub, - struct ublk_queue *ubq, int tag, size_t offset) + const struct ublk_queue *ubq, int tag, size_t offset) { struct request *req; - if (!ublk_need_req_ref(ubq)) - return NULL; - req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag); if (!req) return NULL; @@ -2155,6 +2376,9 @@ static struct request *ublk_check_and_get_req(struct kiocb *iocb, if (!ubq) return ERR_PTR(-EINVAL); + if (!ublk_support_user_copy(ubq)) + return ERR_PTR(-EACCES); + if (tag >= ubq->q_depth) return ERR_PTR(-EINVAL); @@ -2388,7 +2612,6 @@ static void ublk_remove(struct ublk_device *ub) bool unprivileged; ublk_stop_dev(ub); - cancel_work_sync(&ub->nosrv_work); cdev_device_del(&ub->cdev, &ub->cdev_dev); unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV; ublk_put_device(ub); @@ -2413,9 +2636,9 @@ static struct ublk_device *ublk_get_device_from_id(int idx) return ub; } -static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd) +static int ublk_ctrl_start_dev(struct ublk_device *ub, + const struct ublksrv_ctrl_cmd *header) { - const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe); const struct ublk_param_basic *p = &ub->params.basic; int ublksrv_pid = (int)header->data[0]; struct queue_limits lim = { @@ -2534,9 +2757,8 @@ out_unlock: } static int ublk_ctrl_get_queue_affinity(struct ublk_device *ub, - struct io_uring_cmd *cmd) + const struct ublksrv_ctrl_cmd *header) { - const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe); void __user *argp = (void __user *)(unsigned long)header->addr; cpumask_var_t cpumask; unsigned long queue; @@ -2585,9 +2807,8 @@ static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info *info) info->nr_hw_queues, info->queue_depth); } -static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd) +static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header) { - const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe); void __user *argp = (void __user *)(unsigned long)header->addr; struct ublksrv_ctrl_dev_info info; struct ublk_device *ub; @@ -2622,6 +2843,11 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd) return -EINVAL; } + if ((info.flags & UBLK_F_QUIESCE) && !(info.flags & UBLK_F_USER_RECOVERY)) { + pr_warn("UBLK_F_QUIESCE requires UBLK_F_USER_RECOVERY\n"); + return -EINVAL; + } + /* * unprivileged device can't be trusted, but RECOVERY and * RECOVERY_REISSUE still may hang error handling, so can't @@ -2638,8 +2864,11 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd) * For USER_COPY, we depends on userspace to fill request * buffer by pwrite() to ublk char device, which can't be * used for unprivileged device + * + * Same with zero copy or auto buffer register. */ - if (info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY)) + if (info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY | + UBLK_F_AUTO_BUF_REG)) return -EINVAL; } @@ -2675,7 +2904,6 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd) goto out_unlock; mutex_init(&ub->mutex); spin_lock_init(&ub->lock); - INIT_WORK(&ub->nosrv_work, ublk_nosrv_work); ret = ublk_alloc_dev_number(ub, header->dev_id); if (ret < 0) @@ -2697,13 +2925,19 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd) ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE | UBLK_F_URING_CMD_COMP_IN_TASK; - /* GET_DATA isn't needed any more with USER_COPY */ - if (ublk_dev_is_user_copy(ub)) + /* GET_DATA isn't needed any more with USER_COPY or ZERO COPY */ + if (ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY | + UBLK_F_AUTO_BUF_REG)) ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA; - /* Zoned storage support requires user copy feature */ + /* + * Zoned storage support requires reuse `ublksrv_io_cmd->addr` for + * returning write_append_lba, which is only allowed in case of + * user copy or zero copy + */ if (ublk_dev_is_zoned(ub) && - (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !ublk_dev_is_user_copy(ub))) { + (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !(ub->dev_info.flags & + (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY)))) { ret = -EINVAL; goto out_free_dev_number; } @@ -2807,14 +3041,12 @@ static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd) static int ublk_ctrl_stop_dev(struct ublk_device *ub) { ublk_stop_dev(ub); - cancel_work_sync(&ub->nosrv_work); return 0; } static int ublk_ctrl_get_dev_info(struct ublk_device *ub, - struct io_uring_cmd *cmd) + const struct ublksrv_ctrl_cmd *header) { - const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe); void __user *argp = (void __user *)(unsigned long)header->addr; if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr) @@ -2843,9 +3075,8 @@ static void ublk_ctrl_fill_params_devt(struct ublk_device *ub) } static int ublk_ctrl_get_params(struct ublk_device *ub, - struct io_uring_cmd *cmd) + const struct ublksrv_ctrl_cmd *header) { - const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe); void __user *argp = (void __user *)(unsigned long)header->addr; struct ublk_params_header ph; int ret; @@ -2874,9 +3105,8 @@ static int ublk_ctrl_get_params(struct ublk_device *ub, } static int ublk_ctrl_set_params(struct ublk_device *ub, - struct io_uring_cmd *cmd) + const struct ublksrv_ctrl_cmd *header) { - const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe); void __user *argp = (void __user *)(unsigned long)header->addr; struct ublk_params_header ph; int ret = -EFAULT; @@ -2914,43 +3144,14 @@ static int ublk_ctrl_set_params(struct ublk_device *ub, return ret; } -static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq) -{ - int i; - - WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq))); - - /* All old ioucmds have to be completed */ - ubq->nr_io_ready = 0; - /* old daemon is PF_EXITING, put it now */ - put_task_struct(ubq->ubq_daemon); - /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */ - ubq->ubq_daemon = NULL; - ubq->timeout = false; - ubq->canceling = false; - - for (i = 0; i < ubq->q_depth; i++) { - struct ublk_io *io = &ubq->ios[i]; - - /* forget everything now and be ready for new FETCH_REQ */ - io->flags = 0; - io->cmd = NULL; - io->addr = 0; - } -} - static int ublk_ctrl_start_recovery(struct ublk_device *ub, - struct io_uring_cmd *cmd) + const struct ublksrv_ctrl_cmd *header) { - const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe); int ret = -EINVAL; - int i; mutex_lock(&ub->mutex); if (ublk_nosrv_should_stop_dev(ub)) goto out_unlock; - if (!ub->nr_queues_ready) - goto out_unlock; /* * START_RECOVERY is only allowd after: * @@ -2974,12 +3175,6 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub, goto out_unlock; } pr_devel("%s: start recovery for dev id %d.\n", __func__, header->dev_id); - for (i = 0; i < ub->dev_info.nr_hw_queues; i++) - ublk_queue_reinit(ub, ublk_get_queue(ub, i)); - /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */ - ub->mm = NULL; - ub->nr_queues_ready = 0; - ub->nr_privileged_daemon = 0; init_completion(&ub->completion); ret = 0; out_unlock: @@ -2988,12 +3183,10 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub, } static int ublk_ctrl_end_recovery(struct ublk_device *ub, - struct io_uring_cmd *cmd) + const struct ublksrv_ctrl_cmd *header) { - const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe); int ublksrv_pid = (int)header->data[0]; int ret = -EINVAL; - int i; pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n", __func__, ub->dev_info.nr_hw_queues, header->dev_id); @@ -3013,33 +3206,18 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub, goto out_unlock; } ub->dev_info.ublksrv_pid = ublksrv_pid; + ub->dev_info.state = UBLK_S_DEV_LIVE; pr_devel("%s: new ublksrv_pid %d, dev id %d\n", __func__, ublksrv_pid, header->dev_id); - - if (ublk_nosrv_dev_should_queue_io(ub)) { - ub->dev_info.state = UBLK_S_DEV_LIVE; - blk_mq_unquiesce_queue(ub->ub_disk->queue); - pr_devel("%s: queue unquiesced, dev id %d.\n", - __func__, header->dev_id); - blk_mq_kick_requeue_list(ub->ub_disk->queue); - } else { - blk_mq_quiesce_queue(ub->ub_disk->queue); - ub->dev_info.state = UBLK_S_DEV_LIVE; - for (i = 0; i < ub->dev_info.nr_hw_queues; i++) { - ublk_get_queue(ub, i)->fail_io = false; - } - blk_mq_unquiesce_queue(ub->ub_disk->queue); - } - + blk_mq_kick_requeue_list(ub->ub_disk->queue); ret = 0; out_unlock: mutex_unlock(&ub->mutex); return ret; } -static int ublk_ctrl_get_features(struct io_uring_cmd *cmd) +static int ublk_ctrl_get_features(const struct ublksrv_ctrl_cmd *header) { - const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe); void __user *argp = (void __user *)(unsigned long)header->addr; u64 features = UBLK_F_ALL; @@ -3052,6 +3230,127 @@ static int ublk_ctrl_get_features(struct io_uring_cmd *cmd) return 0; } +static void ublk_ctrl_set_size(struct ublk_device *ub, const struct ublksrv_ctrl_cmd *header) +{ + struct ublk_param_basic *p = &ub->params.basic; + u64 new_size = header->data[0]; + + mutex_lock(&ub->mutex); + p->dev_sectors = new_size; + set_capacity_and_notify(ub->ub_disk, p->dev_sectors); + mutex_unlock(&ub->mutex); +} + +struct count_busy { + const struct ublk_queue *ubq; + unsigned int nr_busy; +}; + +static bool ublk_count_busy_req(struct request *rq, void *data) +{ + struct count_busy *idle = data; + + if (!blk_mq_request_started(rq) && rq->mq_hctx->driver_data == idle->ubq) + idle->nr_busy += 1; + return true; +} + +/* uring_cmd is guaranteed to be active if the associated request is idle */ +static bool ubq_has_idle_io(const struct ublk_queue *ubq) +{ + struct count_busy data = { + .ubq = ubq, + }; + + blk_mq_tagset_busy_iter(&ubq->dev->tag_set, ublk_count_busy_req, &data); + return data.nr_busy < ubq->q_depth; +} + +/* Wait until each hw queue has at least one idle IO */ +static int ublk_wait_for_idle_io(struct ublk_device *ub, + unsigned int timeout_ms) +{ + unsigned int elapsed = 0; + int ret; + + while (elapsed < timeout_ms && !signal_pending(current)) { + unsigned int queues_cancelable = 0; + int i; + + for (i = 0; i < ub->dev_info.nr_hw_queues; i++) { + struct ublk_queue *ubq = ublk_get_queue(ub, i); + + queues_cancelable += !!ubq_has_idle_io(ubq); + } + + /* + * Each queue needs at least one active command for + * notifying ublk server + */ + if (queues_cancelable == ub->dev_info.nr_hw_queues) + break; + + msleep(UBLK_REQUEUE_DELAY_MS); + elapsed += UBLK_REQUEUE_DELAY_MS; + } + + if (signal_pending(current)) + ret = -EINTR; + else if (elapsed >= timeout_ms) + ret = -EBUSY; + else + ret = 0; + + return ret; +} + +static int ublk_ctrl_quiesce_dev(struct ublk_device *ub, + const struct ublksrv_ctrl_cmd *header) +{ + /* zero means wait forever */ + u64 timeout_ms = header->data[0]; + struct gendisk *disk; + int i, ret = -ENODEV; + + if (!(ub->dev_info.flags & UBLK_F_QUIESCE)) + return -EOPNOTSUPP; + + mutex_lock(&ub->mutex); + disk = ublk_get_disk(ub); + if (!disk) + goto unlock; + if (ub->dev_info.state == UBLK_S_DEV_DEAD) + goto put_disk; + + ret = 0; + /* already in expected state */ + if (ub->dev_info.state != UBLK_S_DEV_LIVE) + goto put_disk; + + /* Mark all queues as canceling */ + blk_mq_quiesce_queue(disk->queue); + for (i = 0; i < ub->dev_info.nr_hw_queues; i++) { + struct ublk_queue *ubq = ublk_get_queue(ub, i); + + ubq->canceling = true; + } + blk_mq_unquiesce_queue(disk->queue); + + if (!timeout_ms) + timeout_ms = UINT_MAX; + ret = ublk_wait_for_idle_io(ub, timeout_ms); + +put_disk: + ublk_put_disk(disk); +unlock: + mutex_unlock(&ub->mutex); + + /* Cancel pending uring_cmd */ + if (!ret) + ublk_cancel_dev(ub); + return ret; +} + /* * All control commands are sent via /dev/ublk-control, so we have to check * the destination device's permission @@ -3137,6 +3436,8 @@ static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub, case UBLK_CMD_SET_PARAMS: case UBLK_CMD_START_USER_RECOVERY: case UBLK_CMD_END_USER_RECOVERY: + case UBLK_CMD_UPDATE_SIZE: + case UBLK_CMD_QUIESCE_DEV: mask = MAY_READ | MAY_WRITE; break; default: @@ -3178,7 +3479,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd, goto out; if (cmd_op == UBLK_U_CMD_GET_FEATURES) { - ret = ublk_ctrl_get_features(cmd); + ret = ublk_ctrl_get_features(header); goto out; } @@ -3195,17 +3496,17 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd, switch (_IOC_NR(cmd_op)) { case UBLK_CMD_START_DEV: - ret = ublk_ctrl_start_dev(ub, cmd); + ret = ublk_ctrl_start_dev(ub, header); break; case UBLK_CMD_STOP_DEV: ret = ublk_ctrl_stop_dev(ub); break; case UBLK_CMD_GET_DEV_INFO: case UBLK_CMD_GET_DEV_INFO2: - ret = ublk_ctrl_get_dev_info(ub, cmd); + ret = ublk_ctrl_get_dev_info(ub, header); break; case UBLK_CMD_ADD_DEV: - ret = ublk_ctrl_add_dev(cmd); + ret = ublk_ctrl_add_dev(header); break; case UBLK_CMD_DEL_DEV: ret = ublk_ctrl_del_dev(&ub, true); @@ -3214,19 +3515,26 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd, ret = ublk_ctrl_del_dev(&ub, false); break; case UBLK_CMD_GET_QUEUE_AFFINITY: - ret = ublk_ctrl_get_queue_affinity(ub, cmd); + ret = ublk_ctrl_get_queue_affinity(ub, header); break; case UBLK_CMD_GET_PARAMS: - ret = ublk_ctrl_get_params(ub, cmd); + ret = ublk_ctrl_get_params(ub, header); break; case UBLK_CMD_SET_PARAMS: - ret = ublk_ctrl_set_params(ub, cmd); + ret = ublk_ctrl_set_params(ub, header); break; case UBLK_CMD_START_USER_RECOVERY: - ret = ublk_ctrl_start_recovery(ub, cmd); + ret = ublk_ctrl_start_recovery(ub, header); break; case UBLK_CMD_END_USER_RECOVERY: - ret = ublk_ctrl_end_recovery(ub, cmd); + ret = ublk_ctrl_end_recovery(ub, header); + break; + case UBLK_CMD_UPDATE_SIZE: + ublk_ctrl_set_size(ub, header); + ret = 0; + break; + case UBLK_CMD_QUIESCE_DEV: + ret = ublk_ctrl_quiesce_dev(ub, header); break; default: ret = -EOPNOTSUPP; @@ -3261,6 +3569,7 @@ static int __init ublk_init(void) BUILD_BUG_ON((u64)UBLKSRV_IO_BUF_OFFSET + UBLKSRV_IO_BUF_TOTAL_SIZE < UBLKSRV_IO_BUF_OFFSET); + BUILD_BUG_ON(sizeof(struct ublk_auto_buf_reg) != 8); init_waitqueue_head(&ublk_idr_wq); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 7cffea01d868..30bca8cb7106 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -571,7 +571,7 @@ static int virtblk_submit_zone_report(struct virtio_blk *vblk, vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT); vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector); - err = blk_rq_map_kern(q, req, report_buf, report_len, GFP_KERNEL); + err = blk_rq_map_kern(req, report_buf, report_len, GFP_KERNEL); if (err) goto out; @@ -817,7 +817,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str) vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID); vbr->out_hdr.sector = 0; - err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL); + err = blk_rq_map_kern(req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL); if (err) goto out; diff --git a/drivers/block/zloop.c b/drivers/block/zloop.c new file mode 100644 index 000000000000..553b1a713ab9 --- /dev/null +++ b/drivers/block/zloop.c @@ -0,0 +1,1385 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2025, Christoph Hellwig. + * Copyright (c) 2025, Western Digital Corporation or its affiliates. + * + * Zoned Loop Device driver - exports a zoned block device using one file per + * zone as backing storage. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/blk-mq.h> +#include <linux/blkzoned.h> +#include <linux/pagemap.h> +#include <linux/miscdevice.h> +#include <linux/falloc.h> +#include <linux/mutex.h> +#include <linux/parser.h> +#include <linux/seq_file.h> + +/* + * Options for adding (and removing) a device. + */ +enum { + ZLOOP_OPT_ERR = 0, + ZLOOP_OPT_ID = (1 << 0), + ZLOOP_OPT_CAPACITY = (1 << 1), + ZLOOP_OPT_ZONE_SIZE = (1 << 2), + ZLOOP_OPT_ZONE_CAPACITY = (1 << 3), + ZLOOP_OPT_NR_CONV_ZONES = (1 << 4), + ZLOOP_OPT_BASE_DIR = (1 << 5), + ZLOOP_OPT_NR_QUEUES = (1 << 6), + ZLOOP_OPT_QUEUE_DEPTH = (1 << 7), + ZLOOP_OPT_BUFFERED_IO = (1 << 8), +}; + +static const match_table_t zloop_opt_tokens = { + { ZLOOP_OPT_ID, "id=%d" }, + { ZLOOP_OPT_CAPACITY, "capacity_mb=%u" }, + { ZLOOP_OPT_ZONE_SIZE, "zone_size_mb=%u" }, + { ZLOOP_OPT_ZONE_CAPACITY, "zone_capacity_mb=%u" }, + { ZLOOP_OPT_NR_CONV_ZONES, "conv_zones=%u" }, + { ZLOOP_OPT_BASE_DIR, "base_dir=%s" }, + { ZLOOP_OPT_NR_QUEUES, "nr_queues=%u" }, + { ZLOOP_OPT_QUEUE_DEPTH, "queue_depth=%u" }, + { ZLOOP_OPT_BUFFERED_IO, "buffered_io" }, + { ZLOOP_OPT_ERR, NULL } +}; + +/* Default values for the "add" operation. */ +#define ZLOOP_DEF_ID -1 +#define ZLOOP_DEF_ZONE_SIZE ((256ULL * SZ_1M) >> SECTOR_SHIFT) +#define ZLOOP_DEF_NR_ZONES 64 +#define ZLOOP_DEF_NR_CONV_ZONES 8 +#define ZLOOP_DEF_BASE_DIR "/var/local/zloop" +#define ZLOOP_DEF_NR_QUEUES 1 +#define ZLOOP_DEF_QUEUE_DEPTH 128 +#define ZLOOP_DEF_BUFFERED_IO false + +/* Arbitrary limit on the zone size (16GB). */ +#define ZLOOP_MAX_ZONE_SIZE_MB 16384 + +struct zloop_options { + unsigned int mask; + int id; + sector_t capacity; + sector_t zone_size; + sector_t zone_capacity; + unsigned int nr_conv_zones; + char *base_dir; + unsigned int nr_queues; + unsigned int queue_depth; + bool buffered_io; +}; + +/* + * Device states. + */ +enum { + Zlo_creating = 0, + Zlo_live, + Zlo_deleting, +}; + +enum zloop_zone_flags { + ZLOOP_ZONE_CONV = 0, + ZLOOP_ZONE_SEQ_ERROR, +}; + +struct zloop_zone { + struct file *file; + + unsigned long flags; + struct mutex lock; + enum blk_zone_cond cond; + sector_t start; + sector_t wp; + + gfp_t old_gfp_mask; +}; + +struct zloop_device { + unsigned int id; + unsigned int state; + + struct blk_mq_tag_set tag_set; + struct gendisk *disk; + + struct workqueue_struct *workqueue; + bool buffered_io; + + const char *base_dir; + struct file *data_dir; + + unsigned int zone_shift; + sector_t zone_size; + sector_t zone_capacity; + unsigned int nr_zones; + unsigned int nr_conv_zones; + unsigned int block_size; + + struct zloop_zone zones[] __counted_by(nr_zones); +}; + +struct zloop_cmd { + struct work_struct work; + atomic_t ref; + sector_t sector; + sector_t nr_sectors; + long ret; + struct kiocb iocb; + struct bio_vec *bvec; +}; + +static DEFINE_IDR(zloop_index_idr); +static DEFINE_MUTEX(zloop_ctl_mutex); + +static unsigned int rq_zone_no(struct request *rq) +{ + struct zloop_device *zlo = rq->q->queuedata; + + return blk_rq_pos(rq) >> zlo->zone_shift; +} + +static int zloop_update_seq_zone(struct zloop_device *zlo, unsigned int zone_no) +{ + struct zloop_zone *zone = &zlo->zones[zone_no]; + struct kstat stat; + sector_t file_sectors; + int ret; + + lockdep_assert_held(&zone->lock); + + ret = vfs_getattr(&zone->file->f_path, &stat, STATX_SIZE, 0); + if (ret < 0) { + pr_err("Failed to get zone %u file stat (err=%d)\n", + zone_no, ret); + set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags); + return ret; + } + + file_sectors = stat.size >> SECTOR_SHIFT; + if (file_sectors > zlo->zone_capacity) { + pr_err("Zone %u file too large (%llu sectors > %llu)\n", + zone_no, file_sectors, zlo->zone_capacity); + return -EINVAL; + } + + if (file_sectors & ((zlo->block_size >> SECTOR_SHIFT) - 1)) { + pr_err("Zone %u file size not aligned to block size %u\n", + zone_no, zlo->block_size); + return -EINVAL; + } + + if (!file_sectors) { + zone->cond = BLK_ZONE_COND_EMPTY; + zone->wp = zone->start; + } else if (file_sectors == zlo->zone_capacity) { + zone->cond = BLK_ZONE_COND_FULL; + zone->wp = zone->start + zlo->zone_size; + } else { + zone->cond = BLK_ZONE_COND_CLOSED; + zone->wp = zone->start + file_sectors; + } + + return 0; +} + +static int zloop_open_zone(struct zloop_device *zlo, unsigned int zone_no) +{ + struct zloop_zone *zone = &zlo->zones[zone_no]; + int ret = 0; + + if (test_bit(ZLOOP_ZONE_CONV, &zone->flags)) + return -EIO; + + mutex_lock(&zone->lock); + + if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) { + ret = zloop_update_seq_zone(zlo, zone_no); + if (ret) + goto unlock; + } + + switch (zone->cond) { + case BLK_ZONE_COND_EXP_OPEN: + break; + case BLK_ZONE_COND_EMPTY: + case BLK_ZONE_COND_CLOSED: + case BLK_ZONE_COND_IMP_OPEN: + zone->cond = BLK_ZONE_COND_EXP_OPEN; + break; + case BLK_ZONE_COND_FULL: + default: + ret = -EIO; + break; + } + +unlock: + mutex_unlock(&zone->lock); + + return ret; +} + +static int zloop_close_zone(struct zloop_device *zlo, unsigned int zone_no) +{ + struct zloop_zone *zone = &zlo->zones[zone_no]; + int ret = 0; + + if (test_bit(ZLOOP_ZONE_CONV, &zone->flags)) + return -EIO; + + mutex_lock(&zone->lock); + + if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) { + ret = zloop_update_seq_zone(zlo, zone_no); + if (ret) + goto unlock; + } + + switch (zone->cond) { + case BLK_ZONE_COND_CLOSED: + break; + case BLK_ZONE_COND_IMP_OPEN: + case BLK_ZONE_COND_EXP_OPEN: + if (zone->wp == zone->start) + zone->cond = BLK_ZONE_COND_EMPTY; + else + zone->cond = BLK_ZONE_COND_CLOSED; + break; + case BLK_ZONE_COND_EMPTY: + case BLK_ZONE_COND_FULL: + default: + ret = -EIO; + break; + } + +unlock: + mutex_unlock(&zone->lock); + + return ret; +} + +static int zloop_reset_zone(struct zloop_device *zlo, unsigned int zone_no) +{ + struct zloop_zone *zone = &zlo->zones[zone_no]; + int ret = 0; + + if (test_bit(ZLOOP_ZONE_CONV, &zone->flags)) + return -EIO; + + mutex_lock(&zone->lock); + + if (!test_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags) && + zone->cond == BLK_ZONE_COND_EMPTY) + goto unlock; + + if (vfs_truncate(&zone->file->f_path, 0)) { + set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags); + ret = -EIO; + goto unlock; + } + + zone->cond = BLK_ZONE_COND_EMPTY; + zone->wp = zone->start; + clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags); + +unlock: + mutex_unlock(&zone->lock); + + return ret; +} + +static int zloop_reset_all_zones(struct zloop_device *zlo) +{ + unsigned int i; + int ret; + + for (i = zlo->nr_conv_zones; i < zlo->nr_zones; i++) { + ret = zloop_reset_zone(zlo, i); + if (ret) + return ret; + } + + return 0; +} + +static int zloop_finish_zone(struct zloop_device *zlo, unsigned int zone_no) +{ + struct zloop_zone *zone = &zlo->zones[zone_no]; + int ret = 0; + + if (test_bit(ZLOOP_ZONE_CONV, &zone->flags)) + return -EIO; + + mutex_lock(&zone->lock); + + if (!test_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags) && + zone->cond == BLK_ZONE_COND_FULL) + goto unlock; + + if (vfs_truncate(&zone->file->f_path, zlo->zone_size << SECTOR_SHIFT)) { + set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags); + ret = -EIO; + goto unlock; + } + + zone->cond = BLK_ZONE_COND_FULL; + zone->wp = zone->start + zlo->zone_size; + clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags); + + unlock: + mutex_unlock(&zone->lock); + + return ret; +} + +static void zloop_put_cmd(struct zloop_cmd *cmd) +{ + struct request *rq = blk_mq_rq_from_pdu(cmd); + + if (!atomic_dec_and_test(&cmd->ref)) + return; + kfree(cmd->bvec); + cmd->bvec = NULL; + if (likely(!blk_should_fake_timeout(rq->q))) + blk_mq_complete_request(rq); +} + +static void zloop_rw_complete(struct kiocb *iocb, long ret) +{ + struct zloop_cmd *cmd = container_of(iocb, struct zloop_cmd, iocb); + + cmd->ret = ret; + zloop_put_cmd(cmd); +} + +static void zloop_rw(struct zloop_cmd *cmd) +{ + struct request *rq = blk_mq_rq_from_pdu(cmd); + struct zloop_device *zlo = rq->q->queuedata; + unsigned int zone_no = rq_zone_no(rq); + sector_t sector = blk_rq_pos(rq); + sector_t nr_sectors = blk_rq_sectors(rq); + bool is_append = req_op(rq) == REQ_OP_ZONE_APPEND; + bool is_write = req_op(rq) == REQ_OP_WRITE || is_append; + int rw = is_write ? ITER_SOURCE : ITER_DEST; + struct req_iterator rq_iter; + struct zloop_zone *zone; + struct iov_iter iter; + struct bio_vec tmp; + sector_t zone_end; + int nr_bvec = 0; + int ret; + + atomic_set(&cmd->ref, 2); + cmd->sector = sector; + cmd->nr_sectors = nr_sectors; + cmd->ret = 0; + + /* We should never get an I/O beyond the device capacity. */ + if (WARN_ON_ONCE(zone_no >= zlo->nr_zones)) { + ret = -EIO; + goto out; + } + zone = &zlo->zones[zone_no]; + zone_end = zone->start + zlo->zone_capacity; + + /* + * The block layer should never send requests that are not fully + * contained within the zone. + */ + if (WARN_ON_ONCE(sector + nr_sectors > zone->start + zlo->zone_size)) { + ret = -EIO; + goto out; + } + + if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) { + mutex_lock(&zone->lock); + ret = zloop_update_seq_zone(zlo, zone_no); + mutex_unlock(&zone->lock); + if (ret) + goto out; + } + + if (!test_bit(ZLOOP_ZONE_CONV, &zone->flags) && is_write) { + mutex_lock(&zone->lock); + + if (is_append) { + sector = zone->wp; + cmd->sector = sector; + } + + /* + * Write operations must be aligned to the write pointer and + * fully contained within the zone capacity. + */ + if (sector != zone->wp || zone->wp + nr_sectors > zone_end) { + pr_err("Zone %u: unaligned write: sect %llu, wp %llu\n", + zone_no, sector, zone->wp); + ret = -EIO; + goto unlock; + } + + /* Implicitly open the target zone. */ + if (zone->cond == BLK_ZONE_COND_CLOSED || + zone->cond == BLK_ZONE_COND_EMPTY) + zone->cond = BLK_ZONE_COND_IMP_OPEN; + + /* + * Advance the write pointer of sequential zones. If the write + * fails, the wp position will be corrected when the next I/O + * copmpletes. + */ + zone->wp += nr_sectors; + if (zone->wp == zone_end) + zone->cond = BLK_ZONE_COND_FULL; + } + + rq_for_each_bvec(tmp, rq, rq_iter) + nr_bvec++; + + if (rq->bio != rq->biotail) { + struct bio_vec *bvec; + + cmd->bvec = kmalloc_array(nr_bvec, sizeof(*cmd->bvec), GFP_NOIO); + if (!cmd->bvec) { + ret = -EIO; + goto unlock; + } + + /* + * The bios of the request may be started from the middle of + * the 'bvec' because of bio splitting, so we can't directly + * copy bio->bi_iov_vec to new bvec. The rq_for_each_bvec + * API will take care of all details for us. + */ + bvec = cmd->bvec; + rq_for_each_bvec(tmp, rq, rq_iter) { + *bvec = tmp; + bvec++; + } + iov_iter_bvec(&iter, rw, cmd->bvec, nr_bvec, blk_rq_bytes(rq)); + } else { + /* + * Same here, this bio may be started from the middle of the + * 'bvec' because of bio splitting, so offset from the bvec + * must be passed to iov iterator + */ + iov_iter_bvec(&iter, rw, + __bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter), + nr_bvec, blk_rq_bytes(rq)); + iter.iov_offset = rq->bio->bi_iter.bi_bvec_done; + } + + cmd->iocb.ki_pos = (sector - zone->start) << SECTOR_SHIFT; + cmd->iocb.ki_filp = zone->file; + cmd->iocb.ki_complete = zloop_rw_complete; + if (!zlo->buffered_io) + cmd->iocb.ki_flags = IOCB_DIRECT; + cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); + + if (rw == ITER_SOURCE) + ret = zone->file->f_op->write_iter(&cmd->iocb, &iter); + else + ret = zone->file->f_op->read_iter(&cmd->iocb, &iter); +unlock: + if (!test_bit(ZLOOP_ZONE_CONV, &zone->flags) && is_write) + mutex_unlock(&zone->lock); +out: + if (ret != -EIOCBQUEUED) + zloop_rw_complete(&cmd->iocb, ret); + zloop_put_cmd(cmd); +} + +static void zloop_handle_cmd(struct zloop_cmd *cmd) +{ + struct request *rq = blk_mq_rq_from_pdu(cmd); + struct zloop_device *zlo = rq->q->queuedata; + + switch (req_op(rq)) { + case REQ_OP_READ: + case REQ_OP_WRITE: + case REQ_OP_ZONE_APPEND: + /* + * zloop_rw() always executes asynchronously or completes + * directly. + */ + zloop_rw(cmd); + return; + case REQ_OP_FLUSH: + /* + * Sync the entire FS containing the zone files instead of + * walking all files + */ + cmd->ret = sync_filesystem(file_inode(zlo->data_dir)->i_sb); + break; + case REQ_OP_ZONE_RESET: + cmd->ret = zloop_reset_zone(zlo, rq_zone_no(rq)); + break; + case REQ_OP_ZONE_RESET_ALL: + cmd->ret = zloop_reset_all_zones(zlo); + break; + case REQ_OP_ZONE_FINISH: + cmd->ret = zloop_finish_zone(zlo, rq_zone_no(rq)); + break; + case REQ_OP_ZONE_OPEN: + cmd->ret = zloop_open_zone(zlo, rq_zone_no(rq)); + break; + case REQ_OP_ZONE_CLOSE: + cmd->ret = zloop_close_zone(zlo, rq_zone_no(rq)); + break; + default: + WARN_ON_ONCE(1); + pr_err("Unsupported operation %d\n", req_op(rq)); + cmd->ret = -EOPNOTSUPP; + break; + } + + blk_mq_complete_request(rq); +} + +static void zloop_cmd_workfn(struct work_struct *work) +{ + struct zloop_cmd *cmd = container_of(work, struct zloop_cmd, work); + int orig_flags = current->flags; + + current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO; + zloop_handle_cmd(cmd); + current->flags = orig_flags; +} + +static void zloop_complete_rq(struct request *rq) +{ + struct zloop_cmd *cmd = blk_mq_rq_to_pdu(rq); + struct zloop_device *zlo = rq->q->queuedata; + unsigned int zone_no = cmd->sector >> zlo->zone_shift; + struct zloop_zone *zone = &zlo->zones[zone_no]; + blk_status_t sts = BLK_STS_OK; + + switch (req_op(rq)) { + case REQ_OP_READ: + if (cmd->ret < 0) + pr_err("Zone %u: failed read sector %llu, %llu sectors\n", + zone_no, cmd->sector, cmd->nr_sectors); + + if (cmd->ret >= 0 && cmd->ret != blk_rq_bytes(rq)) { + /* short read */ + struct bio *bio; + + __rq_for_each_bio(bio, rq) + zero_fill_bio(bio); + } + break; + case REQ_OP_WRITE: + case REQ_OP_ZONE_APPEND: + if (cmd->ret < 0) + pr_err("Zone %u: failed %swrite sector %llu, %llu sectors\n", + zone_no, + req_op(rq) == REQ_OP_WRITE ? "" : "append ", + cmd->sector, cmd->nr_sectors); + + if (cmd->ret >= 0 && cmd->ret != blk_rq_bytes(rq)) { + pr_err("Zone %u: partial write %ld/%u B\n", + zone_no, cmd->ret, blk_rq_bytes(rq)); + cmd->ret = -EIO; + } + + if (cmd->ret < 0 && !test_bit(ZLOOP_ZONE_CONV, &zone->flags)) { + /* + * A write to a sequential zone file failed: mark the + * zone as having an error. This will be corrected and + * cleared when the next IO is submitted. + */ + set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags); + break; + } + if (req_op(rq) == REQ_OP_ZONE_APPEND) + rq->__sector = cmd->sector; + + break; + default: + break; + } + + if (cmd->ret < 0) + sts = errno_to_blk_status(cmd->ret); + blk_mq_end_request(rq, sts); +} + +static blk_status_t zloop_queue_rq(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *bd) +{ + struct request *rq = bd->rq; + struct zloop_cmd *cmd = blk_mq_rq_to_pdu(rq); + struct zloop_device *zlo = rq->q->queuedata; + + if (zlo->state == Zlo_deleting) + return BLK_STS_IOERR; + + blk_mq_start_request(rq); + + INIT_WORK(&cmd->work, zloop_cmd_workfn); + queue_work(zlo->workqueue, &cmd->work); + + return BLK_STS_OK; +} + +static const struct blk_mq_ops zloop_mq_ops = { + .queue_rq = zloop_queue_rq, + .complete = zloop_complete_rq, +}; + +static int zloop_open(struct gendisk *disk, blk_mode_t mode) +{ + struct zloop_device *zlo = disk->private_data; + int ret; + + ret = mutex_lock_killable(&zloop_ctl_mutex); + if (ret) + return ret; + + if (zlo->state != Zlo_live) + ret = -ENXIO; + mutex_unlock(&zloop_ctl_mutex); + return ret; +} + +static int zloop_report_zones(struct gendisk *disk, sector_t sector, + unsigned int nr_zones, report_zones_cb cb, void *data) +{ + struct zloop_device *zlo = disk->private_data; + struct blk_zone blkz = {}; + unsigned int first, i; + int ret; + + first = disk_zone_no(disk, sector); + if (first >= zlo->nr_zones) + return 0; + nr_zones = min(nr_zones, zlo->nr_zones - first); + + for (i = 0; i < nr_zones; i++) { + unsigned int zone_no = first + i; + struct zloop_zone *zone = &zlo->zones[zone_no]; + + mutex_lock(&zone->lock); + + if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) { + ret = zloop_update_seq_zone(zlo, zone_no); + if (ret) { + mutex_unlock(&zone->lock); + return ret; + } + } + + blkz.start = zone->start; + blkz.len = zlo->zone_size; + blkz.wp = zone->wp; + blkz.cond = zone->cond; + if (test_bit(ZLOOP_ZONE_CONV, &zone->flags)) { + blkz.type = BLK_ZONE_TYPE_CONVENTIONAL; + blkz.capacity = zlo->zone_size; + } else { + blkz.type = BLK_ZONE_TYPE_SEQWRITE_REQ; + blkz.capacity = zlo->zone_capacity; + } + + mutex_unlock(&zone->lock); + + ret = cb(&blkz, i, data); + if (ret) + return ret; + } + + return nr_zones; +} + +static void zloop_free_disk(struct gendisk *disk) +{ + struct zloop_device *zlo = disk->private_data; + unsigned int i; + + for (i = 0; i < zlo->nr_zones; i++) { + struct zloop_zone *zone = &zlo->zones[i]; + + mapping_set_gfp_mask(zone->file->f_mapping, + zone->old_gfp_mask); + fput(zone->file); + } + + fput(zlo->data_dir); + destroy_workqueue(zlo->workqueue); + kfree(zlo->base_dir); + kvfree(zlo); +} + +static const struct block_device_operations zloop_fops = { + .owner = THIS_MODULE, + .open = zloop_open, + .report_zones = zloop_report_zones, + .free_disk = zloop_free_disk, +}; + +__printf(3, 4) +static struct file *zloop_filp_open_fmt(int oflags, umode_t mode, + const char *fmt, ...) +{ + struct file *file; + va_list ap; + char *p; + + va_start(ap, fmt); + p = kvasprintf(GFP_KERNEL, fmt, ap); + va_end(ap); + + if (!p) + return ERR_PTR(-ENOMEM); + file = filp_open(p, oflags, mode); + kfree(p); + return file; +} + +static int zloop_get_block_size(struct zloop_device *zlo, + struct zloop_zone *zone) +{ + struct block_device *sb_bdev = zone->file->f_mapping->host->i_sb->s_bdev; + struct kstat st; + + /* + * If the FS block size is lower than or equal to 4K, use that as the + * device block size. Otherwise, fallback to the FS direct IO alignment + * constraint if that is provided, and to the FS underlying device + * physical block size if the direct IO alignment is unknown. + */ + if (file_inode(zone->file)->i_sb->s_blocksize <= SZ_4K) + zlo->block_size = file_inode(zone->file)->i_sb->s_blocksize; + else if (!vfs_getattr(&zone->file->f_path, &st, STATX_DIOALIGN, 0) && + (st.result_mask & STATX_DIOALIGN)) + zlo->block_size = st.dio_offset_align; + else if (sb_bdev) + zlo->block_size = bdev_physical_block_size(sb_bdev); + else + zlo->block_size = SECTOR_SIZE; + + if (zlo->zone_capacity & ((zlo->block_size >> SECTOR_SHIFT) - 1)) { + pr_err("Zone capacity is not aligned to block size %u\n", + zlo->block_size); + return -EINVAL; + } + + return 0; +} + +static int zloop_init_zone(struct zloop_device *zlo, struct zloop_options *opts, + unsigned int zone_no, bool restore) +{ + struct zloop_zone *zone = &zlo->zones[zone_no]; + int oflags = O_RDWR; + struct kstat stat; + sector_t file_sectors; + int ret; + + mutex_init(&zone->lock); + zone->start = (sector_t)zone_no << zlo->zone_shift; + + if (!restore) + oflags |= O_CREAT; + + if (!opts->buffered_io) + oflags |= O_DIRECT; + + if (zone_no < zlo->nr_conv_zones) { + /* Conventional zone file. */ + set_bit(ZLOOP_ZONE_CONV, &zone->flags); + zone->cond = BLK_ZONE_COND_NOT_WP; + zone->wp = U64_MAX; + + zone->file = zloop_filp_open_fmt(oflags, 0600, "%s/%u/cnv-%06u", + zlo->base_dir, zlo->id, zone_no); + if (IS_ERR(zone->file)) { + pr_err("Failed to open zone %u file %s/%u/cnv-%06u (err=%ld)", + zone_no, zlo->base_dir, zlo->id, zone_no, + PTR_ERR(zone->file)); + return PTR_ERR(zone->file); + } + + if (!zlo->block_size) { + ret = zloop_get_block_size(zlo, zone); + if (ret) + return ret; + } + + ret = vfs_getattr(&zone->file->f_path, &stat, STATX_SIZE, 0); + if (ret < 0) { + pr_err("Failed to get zone %u file stat\n", zone_no); + return ret; + } + file_sectors = stat.size >> SECTOR_SHIFT; + + if (restore && file_sectors != zlo->zone_size) { + pr_err("Invalid conventional zone %u file size (%llu sectors != %llu)\n", + zone_no, file_sectors, zlo->zone_capacity); + return ret; + } + + ret = vfs_truncate(&zone->file->f_path, + zlo->zone_size << SECTOR_SHIFT); + if (ret < 0) { + pr_err("Failed to truncate zone %u file (err=%d)\n", + zone_no, ret); + return ret; + } + + return 0; + } + + /* Sequential zone file. */ + zone->file = zloop_filp_open_fmt(oflags, 0600, "%s/%u/seq-%06u", + zlo->base_dir, zlo->id, zone_no); + if (IS_ERR(zone->file)) { + pr_err("Failed to open zone %u file %s/%u/seq-%06u (err=%ld)", + zone_no, zlo->base_dir, zlo->id, zone_no, + PTR_ERR(zone->file)); + return PTR_ERR(zone->file); + } + + if (!zlo->block_size) { + ret = zloop_get_block_size(zlo, zone); + if (ret) + return ret; + } + + zloop_get_block_size(zlo, zone); + + mutex_lock(&zone->lock); + ret = zloop_update_seq_zone(zlo, zone_no); + mutex_unlock(&zone->lock); + + return ret; +} + +static bool zloop_dev_exists(struct zloop_device *zlo) +{ + struct file *cnv, *seq; + bool exists; + + cnv = zloop_filp_open_fmt(O_RDONLY, 0600, "%s/%u/cnv-%06u", + zlo->base_dir, zlo->id, 0); + seq = zloop_filp_open_fmt(O_RDONLY, 0600, "%s/%u/seq-%06u", + zlo->base_dir, zlo->id, 0); + exists = !IS_ERR(cnv) || !IS_ERR(seq); + + if (!IS_ERR(cnv)) + fput(cnv); + if (!IS_ERR(seq)) + fput(seq); + + return exists; +} + +static int zloop_ctl_add(struct zloop_options *opts) +{ + struct queue_limits lim = { + .max_hw_sectors = SZ_1M >> SECTOR_SHIFT, + .max_hw_zone_append_sectors = SZ_1M >> SECTOR_SHIFT, + .chunk_sectors = opts->zone_size, + .features = BLK_FEAT_ZONED, + }; + unsigned int nr_zones, i, j; + struct zloop_device *zlo; + int ret = -EINVAL; + bool restore; + + __module_get(THIS_MODULE); + + nr_zones = opts->capacity >> ilog2(opts->zone_size); + if (opts->nr_conv_zones >= nr_zones) { + pr_err("Invalid number of conventional zones %u\n", + opts->nr_conv_zones); + goto out; + } + + zlo = kvzalloc(struct_size(zlo, zones, nr_zones), GFP_KERNEL); + if (!zlo) { + ret = -ENOMEM; + goto out; + } + zlo->state = Zlo_creating; + + ret = mutex_lock_killable(&zloop_ctl_mutex); + if (ret) + goto out_free_dev; + + /* Allocate id, if @opts->id >= 0, we're requesting that specific id */ + if (opts->id >= 0) { + ret = idr_alloc(&zloop_index_idr, zlo, + opts->id, opts->id + 1, GFP_KERNEL); + if (ret == -ENOSPC) + ret = -EEXIST; + } else { + ret = idr_alloc(&zloop_index_idr, zlo, 0, 0, GFP_KERNEL); + } + mutex_unlock(&zloop_ctl_mutex); + if (ret < 0) + goto out_free_dev; + + zlo->id = ret; + zlo->zone_shift = ilog2(opts->zone_size); + zlo->zone_size = opts->zone_size; + if (opts->zone_capacity) + zlo->zone_capacity = opts->zone_capacity; + else + zlo->zone_capacity = zlo->zone_size; + zlo->nr_zones = nr_zones; + zlo->nr_conv_zones = opts->nr_conv_zones; + zlo->buffered_io = opts->buffered_io; + + zlo->workqueue = alloc_workqueue("zloop%d", WQ_UNBOUND | WQ_FREEZABLE, + opts->nr_queues * opts->queue_depth, zlo->id); + if (!zlo->workqueue) { + ret = -ENOMEM; + goto out_free_idr; + } + + if (opts->base_dir) + zlo->base_dir = kstrdup(opts->base_dir, GFP_KERNEL); + else + zlo->base_dir = kstrdup(ZLOOP_DEF_BASE_DIR, GFP_KERNEL); + if (!zlo->base_dir) { + ret = -ENOMEM; + goto out_destroy_workqueue; + } + + zlo->data_dir = zloop_filp_open_fmt(O_RDONLY | O_DIRECTORY, 0, "%s/%u", + zlo->base_dir, zlo->id); + if (IS_ERR(zlo->data_dir)) { + ret = PTR_ERR(zlo->data_dir); + pr_warn("Failed to open directory %s/%u (err=%d)\n", + zlo->base_dir, zlo->id, ret); + goto out_free_base_dir; + } + + /* + * If we already have zone files, we are restoring a device created by a + * previous add operation. In this case, zloop_init_zone() will check + * that the zone files are consistent with the zone configuration given. + */ + restore = zloop_dev_exists(zlo); + for (i = 0; i < nr_zones; i++) { + ret = zloop_init_zone(zlo, opts, i, restore); + if (ret) + goto out_close_files; + } + + lim.physical_block_size = zlo->block_size; + lim.logical_block_size = zlo->block_size; + + zlo->tag_set.ops = &zloop_mq_ops; + zlo->tag_set.nr_hw_queues = opts->nr_queues; + zlo->tag_set.queue_depth = opts->queue_depth; + zlo->tag_set.numa_node = NUMA_NO_NODE; + zlo->tag_set.cmd_size = sizeof(struct zloop_cmd); + zlo->tag_set.driver_data = zlo; + + ret = blk_mq_alloc_tag_set(&zlo->tag_set); + if (ret) { + pr_err("blk_mq_alloc_tag_set failed (err=%d)\n", ret); + goto out_close_files; + } + + zlo->disk = blk_mq_alloc_disk(&zlo->tag_set, &lim, zlo); + if (IS_ERR(zlo->disk)) { + pr_err("blk_mq_alloc_disk failed (err=%d)\n", ret); + ret = PTR_ERR(zlo->disk); + goto out_cleanup_tags; + } + zlo->disk->flags = GENHD_FL_NO_PART; + zlo->disk->fops = &zloop_fops; + zlo->disk->private_data = zlo; + sprintf(zlo->disk->disk_name, "zloop%d", zlo->id); + set_capacity(zlo->disk, (u64)lim.chunk_sectors * zlo->nr_zones); + + ret = blk_revalidate_disk_zones(zlo->disk); + if (ret) + goto out_cleanup_disk; + + ret = add_disk(zlo->disk); + if (ret) { + pr_err("add_disk failed (err=%d)\n", ret); + goto out_cleanup_disk; + } + + mutex_lock(&zloop_ctl_mutex); + zlo->state = Zlo_live; + mutex_unlock(&zloop_ctl_mutex); + + pr_info("Added device %d: %u zones of %llu MB, %u B block size\n", + zlo->id, zlo->nr_zones, + ((sector_t)zlo->zone_size << SECTOR_SHIFT) >> 20, + zlo->block_size); + + return 0; + +out_cleanup_disk: + put_disk(zlo->disk); +out_cleanup_tags: + blk_mq_free_tag_set(&zlo->tag_set); +out_close_files: + for (j = 0; j < i; j++) { + struct zloop_zone *zone = &zlo->zones[j]; + + if (!IS_ERR_OR_NULL(zone->file)) + fput(zone->file); + } + fput(zlo->data_dir); +out_free_base_dir: + kfree(zlo->base_dir); +out_destroy_workqueue: + destroy_workqueue(zlo->workqueue); +out_free_idr: + mutex_lock(&zloop_ctl_mutex); + idr_remove(&zloop_index_idr, zlo->id); + mutex_unlock(&zloop_ctl_mutex); +out_free_dev: + kvfree(zlo); +out: + module_put(THIS_MODULE); + if (ret == -ENOENT) + ret = -EINVAL; + return ret; +} + +static int zloop_ctl_remove(struct zloop_options *opts) +{ + struct zloop_device *zlo; + int ret; + + if (!(opts->mask & ZLOOP_OPT_ID)) { + pr_err("No ID specified\n"); + return -EINVAL; + } + + ret = mutex_lock_killable(&zloop_ctl_mutex); + if (ret) + return ret; + + zlo = idr_find(&zloop_index_idr, opts->id); + if (!zlo || zlo->state == Zlo_creating) { + ret = -ENODEV; + } else if (zlo->state == Zlo_deleting) { + ret = -EINVAL; + } else { + idr_remove(&zloop_index_idr, zlo->id); + zlo->state = Zlo_deleting; + } + + mutex_unlock(&zloop_ctl_mutex); + if (ret) + return ret; + + del_gendisk(zlo->disk); + put_disk(zlo->disk); + blk_mq_free_tag_set(&zlo->tag_set); + + pr_info("Removed device %d\n", opts->id); + + module_put(THIS_MODULE); + + return 0; +} + +static int zloop_parse_options(struct zloop_options *opts, const char *buf) +{ + substring_t args[MAX_OPT_ARGS]; + char *options, *o, *p; + unsigned int token; + int ret = 0; + + /* Set defaults. */ + opts->mask = 0; + opts->id = ZLOOP_DEF_ID; + opts->capacity = ZLOOP_DEF_ZONE_SIZE * ZLOOP_DEF_NR_ZONES; + opts->zone_size = ZLOOP_DEF_ZONE_SIZE; + opts->nr_conv_zones = ZLOOP_DEF_NR_CONV_ZONES; + opts->nr_queues = ZLOOP_DEF_NR_QUEUES; + opts->queue_depth = ZLOOP_DEF_QUEUE_DEPTH; + opts->buffered_io = ZLOOP_DEF_BUFFERED_IO; + + if (!buf) + return 0; + + /* Skip leading spaces before the options. */ + while (isspace(*buf)) + buf++; + + options = o = kstrdup(buf, GFP_KERNEL); + if (!options) + return -ENOMEM; + + /* Parse the options, doing only some light invalid value checks. */ + while ((p = strsep(&o, ",\n")) != NULL) { + if (!*p) + continue; + + token = match_token(p, zloop_opt_tokens, args); + opts->mask |= token; + switch (token) { + case ZLOOP_OPT_ID: + if (match_int(args, &opts->id)) { + ret = -EINVAL; + goto out; + } + break; + case ZLOOP_OPT_CAPACITY: + if (match_uint(args, &token)) { + ret = -EINVAL; + goto out; + } + if (!token) { + pr_err("Invalid capacity\n"); + ret = -EINVAL; + goto out; + } + opts->capacity = + ((sector_t)token * SZ_1M) >> SECTOR_SHIFT; + break; + case ZLOOP_OPT_ZONE_SIZE: + if (match_uint(args, &token)) { + ret = -EINVAL; + goto out; + } + if (!token || token > ZLOOP_MAX_ZONE_SIZE_MB || + !is_power_of_2(token)) { + pr_err("Invalid zone size %u\n", token); + ret = -EINVAL; + goto out; + } + opts->zone_size = + ((sector_t)token * SZ_1M) >> SECTOR_SHIFT; + break; + case ZLOOP_OPT_ZONE_CAPACITY: + if (match_uint(args, &token)) { + ret = -EINVAL; + goto out; + } + if (!token) { + pr_err("Invalid zone capacity\n"); + ret = -EINVAL; + goto out; + } + opts->zone_capacity = + ((sector_t)token * SZ_1M) >> SECTOR_SHIFT; + break; + case ZLOOP_OPT_NR_CONV_ZONES: + if (match_uint(args, &token)) { + ret = -EINVAL; + goto out; + } + opts->nr_conv_zones = token; + break; + case ZLOOP_OPT_BASE_DIR: + p = match_strdup(args); + if (!p) { + ret = -ENOMEM; + goto out; + } + kfree(opts->base_dir); + opts->base_dir = p; + break; + case ZLOOP_OPT_NR_QUEUES: + if (match_uint(args, &token)) { + ret = -EINVAL; + goto out; + } + if (!token) { + pr_err("Invalid number of queues\n"); + ret = -EINVAL; + goto out; + } + opts->nr_queues = min(token, num_online_cpus()); + break; + case ZLOOP_OPT_QUEUE_DEPTH: + if (match_uint(args, &token)) { + ret = -EINVAL; + goto out; + } + if (!token) { + pr_err("Invalid queue depth\n"); + ret = -EINVAL; + goto out; + } + opts->queue_depth = token; + break; + case ZLOOP_OPT_BUFFERED_IO: + opts->buffered_io = true; + break; + case ZLOOP_OPT_ERR: + default: + pr_warn("unknown parameter or missing value '%s'\n", p); + ret = -EINVAL; + goto out; + } + } + + ret = -EINVAL; + if (opts->capacity <= opts->zone_size) { + pr_err("Invalid capacity\n"); + goto out; + } + + if (opts->zone_capacity > opts->zone_size) { + pr_err("Invalid zone capacity\n"); + goto out; + } + + ret = 0; +out: + kfree(options); + return ret; +} + +enum { + ZLOOP_CTL_ADD, + ZLOOP_CTL_REMOVE, +}; + +static struct zloop_ctl_op { + int code; + const char *name; +} zloop_ctl_ops[] = { + { ZLOOP_CTL_ADD, "add" }, + { ZLOOP_CTL_REMOVE, "remove" }, + { -1, NULL }, +}; + +static ssize_t zloop_ctl_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *pos) +{ + struct zloop_options opts = { }; + struct zloop_ctl_op *op; + const char *buf, *opts_buf; + int i, ret; + + if (count > PAGE_SIZE) + return -ENOMEM; + + buf = memdup_user_nul(ubuf, count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + for (i = 0; i < ARRAY_SIZE(zloop_ctl_ops); i++) { + op = &zloop_ctl_ops[i]; + if (!op->name) { + pr_err("Invalid operation\n"); + ret = -EINVAL; + goto out; + } + if (!strncmp(buf, op->name, strlen(op->name))) + break; + } + + if (count <= strlen(op->name)) + opts_buf = NULL; + else + opts_buf = buf + strlen(op->name); + + ret = zloop_parse_options(&opts, opts_buf); + if (ret) { + pr_err("Failed to parse options\n"); + goto out; + } + + switch (op->code) { + case ZLOOP_CTL_ADD: + ret = zloop_ctl_add(&opts); + break; + case ZLOOP_CTL_REMOVE: + ret = zloop_ctl_remove(&opts); + break; + default: + pr_err("Invalid operation\n"); + ret = -EINVAL; + goto out; + } + +out: + kfree(opts.base_dir); + kfree(buf); + return ret ? ret : count; +} + +static int zloop_ctl_show(struct seq_file *seq_file, void *private) +{ + const struct match_token *tok; + int i; + + /* Add operation */ + seq_printf(seq_file, "%s ", zloop_ctl_ops[0].name); + for (i = 0; i < ARRAY_SIZE(zloop_opt_tokens); i++) { + tok = &zloop_opt_tokens[i]; + if (!tok->pattern) + break; + if (i) + seq_putc(seq_file, ','); + seq_puts(seq_file, tok->pattern); + } + seq_putc(seq_file, '\n'); + + /* Remove operation */ + seq_puts(seq_file, zloop_ctl_ops[1].name); + seq_puts(seq_file, " id=%d\n"); + + return 0; +} + +static int zloop_ctl_open(struct inode *inode, struct file *file) +{ + file->private_data = NULL; + return single_open(file, zloop_ctl_show, NULL); +} + +static int zloop_ctl_release(struct inode *inode, struct file *file) +{ + return single_release(inode, file); +} + +static const struct file_operations zloop_ctl_fops = { + .owner = THIS_MODULE, + .open = zloop_ctl_open, + .release = zloop_ctl_release, + .write = zloop_ctl_write, + .read = seq_read, +}; + +static struct miscdevice zloop_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "zloop-control", + .fops = &zloop_ctl_fops, +}; + +static int __init zloop_init(void) +{ + int ret; + + ret = misc_register(&zloop_misc); + if (ret) { + pr_err("Failed to register misc device: %d\n", ret); + return ret; + } + pr_info("Module loaded\n"); + + return 0; +} + +static void __exit zloop_exit(void) +{ + misc_deregister(&zloop_misc); + idr_destroy(&zloop_index_idr); +} + +module_init(zloop_init); +module_exit(zloop_exit); + +MODULE_DESCRIPTION("Zoned loopback device"); +MODULE_LICENSE("GPL"); diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c index c1e69fcc9c4f..0a759ea26fd3 100644 --- a/drivers/bluetooth/btintel_pcie.c +++ b/drivers/bluetooth/btintel_pcie.c @@ -957,8 +957,10 @@ static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb) /* This is a debug event that comes from IML and OP image when it * starts execution. There is no need pass this event to stack. */ - if (skb->data[2] == 0x97) + if (skb->data[2] == 0x97) { + hci_recv_diag(hdev, skb); return 0; + } } return hci_recv_frame(hdev, skb); @@ -974,7 +976,6 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data, u8 pkt_type; u16 plen; u32 pcie_pkt_type; - struct sk_buff *new_skb; void *pdata; struct hci_dev *hdev = data->hdev; @@ -1051,24 +1052,20 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data, bt_dev_dbg(hdev, "pkt_type: 0x%2.2x len: %u", pkt_type, plen); - new_skb = bt_skb_alloc(plen, GFP_ATOMIC); - if (!new_skb) { - bt_dev_err(hdev, "Failed to allocate memory for skb of len: %u", - skb->len); - ret = -ENOMEM; - goto exit_error; - } - - hci_skb_pkt_type(new_skb) = pkt_type; - skb_put_data(new_skb, skb->data, plen); + hci_skb_pkt_type(skb) = pkt_type; hdev->stat.byte_rx += plen; + skb_trim(skb, plen); if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT) - ret = btintel_pcie_recv_event(hdev, new_skb); + ret = btintel_pcie_recv_event(hdev, skb); else - ret = hci_recv_frame(hdev, new_skb); + ret = hci_recv_frame(hdev, skb); + skb = NULL; /* skb is freed in the callee */ exit_error: + if (skb) + kfree_skb(skb); + if (ret) hdev->stat.err_rx++; @@ -1202,8 +1199,6 @@ static void btintel_pcie_rx_work(struct work_struct *work) struct btintel_pcie_data *data = container_of(work, struct btintel_pcie_data, rx_work); struct sk_buff *skb; - int err; - struct hci_dev *hdev = data->hdev; if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) { /* Unlike usb products, controller will not send hardware @@ -1224,11 +1219,7 @@ static void btintel_pcie_rx_work(struct work_struct *work) /* Process the sk_buf in queue and send to the HCI layer */ while ((skb = skb_dequeue(&data->rx_skb_q))) { - err = btintel_pcie_recv_frame(data, skb); - if (err) - bt_dev_err(hdev, "Failed to send received frame: %d", - err); - kfree_skb(skb); + btintel_pcie_recv_frame(data, skb); } } @@ -1281,10 +1272,8 @@ static void btintel_pcie_msix_rx_handle(struct btintel_pcie_data *data) bt_dev_dbg(hdev, "RXQ: cr_hia: %u cr_tia: %u", cr_hia, cr_tia); /* Check CR_TIA and CR_HIA for change */ - if (cr_tia == cr_hia) { - bt_dev_warn(hdev, "RXQ: no new CD found"); + if (cr_tia == cr_hia) return; - } rxq = &data->rxq; @@ -1320,6 +1309,16 @@ static irqreturn_t btintel_pcie_msix_isr(int irq, void *data) return IRQ_WAKE_THREAD; } +static inline bool btintel_pcie_is_rxq_empty(struct btintel_pcie_data *data) +{ + return data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM] == data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM]; +} + +static inline bool btintel_pcie_is_txackq_empty(struct btintel_pcie_data *data) +{ + return data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] == data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM]; +} + static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id) { struct msix_entry *entry = dev_id; @@ -1351,12 +1350,18 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id) btintel_pcie_msix_gp0_handler(data); /* For TX */ - if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) + if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) { btintel_pcie_msix_tx_handle(data); + if (!btintel_pcie_is_rxq_empty(data)) + btintel_pcie_msix_rx_handle(data); + } /* For RX */ - if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1) + if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1) { btintel_pcie_msix_rx_handle(data); + if (!btintel_pcie_is_txackq_empty(data)) + btintel_pcie_msix_tx_handle(data); + } /* * Before sending the interrupt the HW disables it to prevent a nested diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c index edd5eead1e93..1d26207b2ba7 100644 --- a/drivers/bluetooth/btmtksdio.c +++ b/drivers/bluetooth/btmtksdio.c @@ -723,6 +723,10 @@ static int btmtksdio_close(struct hci_dev *hdev) { struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); + /* Skip btmtksdio_close if BTMTKSDIO_FUNC_ENABLED isn't set */ + if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) + return 0; + sdio_claim_host(bdev->func); /* Disable interrupt */ @@ -1443,11 +1447,15 @@ static void btmtksdio_remove(struct sdio_func *func) if (!bdev) return; + hdev = bdev->hdev; + + /* Make sure to call btmtksdio_close before removing sdio card */ + if (test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) + btmtksdio_close(hdev); + /* Be consistent the state in btmtksdio_probe */ pm_runtime_get_noresume(bdev->dev); - hdev = bdev->hdev; - sdio_set_drvdata(func, NULL); hci_unregister_dev(hdev); hci_free_dev(hdev); diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c index 5091dea762a0..604ab2bba231 100644 --- a/drivers/bluetooth/btnxpuart.c +++ b/drivers/bluetooth/btnxpuart.c @@ -1286,7 +1286,9 @@ static void nxp_coredump(struct hci_dev *hdev) u8 pcmd = 2; skb = nxp_drv_send_cmd(hdev, HCI_NXP_TRIGGER_DUMP, 1, &pcmd); - if (!IS_ERR(skb)) + if (IS_ERR(skb)) + bt_dev_err(hdev, "Failed to trigger FW Dump. (%ld)", PTR_ERR(skb)); + else kfree_skb(skb); } @@ -1445,9 +1447,6 @@ static int nxp_shutdown(struct hci_dev *hdev) /* HCI_NXP_IND_RESET command may not returns any response */ if (!IS_ERR(skb)) kfree_skb(skb); - } else if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) { - nxpdev->new_baudrate = nxpdev->fw_init_baudrate; - nxp_set_baudrate_cmd(hdev, NULL); } return 0; @@ -1799,13 +1798,15 @@ static void nxp_serdev_remove(struct serdev_device *serdev) clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); wake_up_interruptible(&nxpdev->check_boot_sign_wait_q); wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q); - } - - if (test_bit(HCI_RUNNING, &hdev->flags)) { - /* Ensure shutdown callback is executed before unregistering, so - * that baudrate is reset to initial value. + } else { + /* Restore FW baudrate to fw_init_baudrate if changed. + * This will ensure FW baudrate is in sync with + * driver baudrate in case this driver is re-inserted. */ - nxp_shutdown(hdev); + if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) { + nxpdev->new_baudrate = nxpdev->fw_init_baudrate; + nxp_set_baudrate_cmd(hdev, NULL); + } } ps_cleanup(nxpdev); diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index 3d6778b95e00..edefb9dc76aa 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -889,7 +889,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, if (le32_to_cpu(ver.soc_id) == QCA_WCN3950_SOC_ID_T) variant = "t"; else if (le32_to_cpu(ver.soc_id) == QCA_WCN3950_SOC_ID_S) - variant = "u"; + variant = "s"; snprintf(config.fwname, sizeof(config.fwname), "qca/cmnv%02x%s.bin", rom_ver, variant); diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index d3eba0d4a57d..7838c89e529e 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -1215,6 +1215,8 @@ next: rtl_dev_err(hdev, "mandatory config file %s not found", btrtl_dev->ic_info->cfg_name); ret = btrtl_dev->cfg_len; + if (!ret) + ret = -EINVAL; goto err_free; } } diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 5012b5ff92c8..256b451bbe06 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -3010,55 +3010,27 @@ static void btusb_coredump_qca(struct hci_dev *hdev) bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err); } -/* - * ==0: not a dump pkt. - * < 0: fails to handle a dump pkt - * > 0: otherwise. - */ +/* Return: 0 on success, negative errno on failure. */ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) { - int ret = 1; + int ret = 0; + unsigned int skip = 0; u8 pkt_type; - u8 *sk_ptr; - unsigned int sk_len; u16 seqno; u32 dump_size; - struct hci_event_hdr *event_hdr; - struct hci_acl_hdr *acl_hdr; struct qca_dump_hdr *dump_hdr; struct btusb_data *btdata = hci_get_drvdata(hdev); struct usb_device *udev = btdata->udev; pkt_type = hci_skb_pkt_type(skb); - sk_ptr = skb->data; - sk_len = skb->len; - - if (pkt_type == HCI_ACLDATA_PKT) { - acl_hdr = hci_acl_hdr(skb); - if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE) - return 0; - sk_ptr += HCI_ACL_HDR_SIZE; - sk_len -= HCI_ACL_HDR_SIZE; - event_hdr = (struct hci_event_hdr *)sk_ptr; - } else { - event_hdr = hci_event_hdr(skb); - } - - if ((event_hdr->evt != HCI_VENDOR_PKT) - || (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE))) - return 0; - - sk_ptr += HCI_EVENT_HDR_SIZE; - sk_len -= HCI_EVENT_HDR_SIZE; + skip = sizeof(struct hci_event_hdr); + if (pkt_type == HCI_ACLDATA_PKT) + skip += sizeof(struct hci_acl_hdr); - dump_hdr = (struct qca_dump_hdr *)sk_ptr; - if ((sk_len < offsetof(struct qca_dump_hdr, data)) - || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) - || (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) - return 0; + skb_pull(skb, skip); + dump_hdr = (struct qca_dump_hdr *)skb->data; - /*it is dump pkt now*/ seqno = le16_to_cpu(dump_hdr->seqno); if (seqno == 0) { set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags); @@ -3078,16 +3050,15 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) btdata->qca_dump.ram_dump_size = dump_size; btdata->qca_dump.ram_dump_seqno = 0; - sk_ptr += offsetof(struct qca_dump_hdr, data0); - sk_len -= offsetof(struct qca_dump_hdr, data0); + + skb_pull(skb, offsetof(struct qca_dump_hdr, data0)); usb_disable_autosuspend(udev); bt_dev_info(hdev, "%s memdump size(%u)\n", (pkt_type == HCI_ACLDATA_PKT) ? "ACL" : "event", dump_size); } else { - sk_ptr += offsetof(struct qca_dump_hdr, data); - sk_len -= offsetof(struct qca_dump_hdr, data); + skb_pull(skb, offsetof(struct qca_dump_hdr, data)); } if (!btdata->qca_dump.ram_dump_size) { @@ -3107,7 +3078,6 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) return ret; } - skb_pull(skb, skb->len - sk_len); hci_devcd_append(hdev, skb); btdata->qca_dump.ram_dump_seqno++; if (seqno == QCA_LAST_SEQUENCE_NUM) { @@ -3132,17 +3102,74 @@ out: return ret; } +/* Return: true if the ACL packet is a dump packet, false otherwise. */ +static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_event_hdr *event_hdr; + struct hci_acl_hdr *acl_hdr; + struct qca_dump_hdr *dump_hdr; + struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); + bool is_dump = false; + + if (!clone) + return false; + + acl_hdr = skb_pull_data(clone, sizeof(*acl_hdr)); + if (!acl_hdr || (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)) + goto out; + + event_hdr = skb_pull_data(clone, sizeof(*event_hdr)); + if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT)) + goto out; + + dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr)); + if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || + (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) + goto out; + + is_dump = true; +out: + consume_skb(clone); + return is_dump; +} + +/* Return: true if the event packet is a dump packet, false otherwise. */ +static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_event_hdr *event_hdr; + struct qca_dump_hdr *dump_hdr; + struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); + bool is_dump = false; + + if (!clone) + return false; + + event_hdr = skb_pull_data(clone, sizeof(*event_hdr)); + if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT)) + goto out; + + dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr)); + if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || + (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) + goto out; + + is_dump = true; +out: + consume_skb(clone); + return is_dump; +} + static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb) { - if (handle_dump_pkt_qca(hdev, skb)) - return 0; + if (acl_pkt_is_dump_qca(hdev, skb)) + return handle_dump_pkt_qca(hdev, skb); return hci_recv_frame(hdev, skb); } static int btusb_recv_evt_qca(struct hci_dev *hdev, struct sk_buff *skb) { - if (handle_dump_pkt_qca(hdev, skb)) - return 0; + if (evt_pkt_is_dump_qca(hdev, skb)) + return handle_dump_pkt_qca(hdev, skb); return hci_recv_frame(hdev, skb); } diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c index a51935d37e5d..59f4d7bdffdc 100644 --- a/drivers/bluetooth/hci_vhci.c +++ b/drivers/bluetooth/hci_vhci.c @@ -289,18 +289,18 @@ static void vhci_coredump(struct hci_dev *hdev) static void vhci_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb) { - char buf[80]; + const char *buf; - snprintf(buf, sizeof(buf), "Controller Name: vhci_ctrl\n"); + buf = "Controller Name: vhci_ctrl\n"; skb_put_data(skb, buf, strlen(buf)); - snprintf(buf, sizeof(buf), "Firmware Version: vhci_fw\n"); + buf = "Firmware Version: vhci_fw\n"; skb_put_data(skb, buf, strlen(buf)); - snprintf(buf, sizeof(buf), "Driver: vhci_drv\n"); + buf = "Driver: vhci_drv\n"; skb_put_data(skb, buf, strlen(buf)); - snprintf(buf, sizeof(buf), "Vendor: vhci\n"); + buf = "Vendor: vhci\n"; skb_put_data(skb, buf, strlen(buf)); } diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index b163e043c687..21a10552da61 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -3677,8 +3677,7 @@ static void cdrom_sysctl_register(void) static void cdrom_sysctl_unregister(void) { - if (cdrom_sysctl_header) - unregister_sysctl_table(cdrom_sysctl_header); + unregister_sysctl_table(cdrom_sysctl_header); } #else /* CONFIG_SYSCTL */ diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 8e41731d3642..bf490967241a 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -16,7 +16,7 @@ #include <linux/mmzone.h> #include <asm/page.h> /* PAGE_SIZE */ #include <asm/e820/api.h> -#include <asm/amd_nb.h> +#include <asm/amd/nb.h> #include <asm/gart.h> #include "agp.h" diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c index e424360fb4a1..4787391bb6b4 100644 --- a/drivers/char/agp/nvidia-agp.c +++ b/drivers/char/agp/nvidia-agp.c @@ -11,6 +11,7 @@ #include <linux/page-flags.h> #include <linux/mm.h> #include <linux/jiffies.h> +#include <asm/msr.h> #include "agp.h" /* NVIDIA registers */ diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c index 143406bc6939..d2b00458761e 100644 --- a/drivers/char/hw_random/atmel-rng.c +++ b/drivers/char/hw_random/atmel-rng.c @@ -37,6 +37,7 @@ struct atmel_trng { struct clk *clk; void __iomem *base; struct hwrng rng; + struct device *dev; bool has_half_rate; }; @@ -59,9 +60,9 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max, u32 *data = buf; int ret; - ret = pm_runtime_get_sync((struct device *)trng->rng.priv); + ret = pm_runtime_get_sync(trng->dev); if (ret < 0) { - pm_runtime_put_sync((struct device *)trng->rng.priv); + pm_runtime_put_sync(trng->dev); return ret; } @@ -79,8 +80,8 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max, ret = 4; out: - pm_runtime_mark_last_busy((struct device *)trng->rng.priv); - pm_runtime_put_sync_autosuspend((struct device *)trng->rng.priv); + pm_runtime_mark_last_busy(trng->dev); + pm_runtime_put_sync_autosuspend(trng->dev); return ret; } @@ -134,9 +135,9 @@ static int atmel_trng_probe(struct platform_device *pdev) return -ENODEV; trng->has_half_rate = data->has_half_rate; + trng->dev = &pdev->dev; trng->rng.name = pdev->name; trng->rng.read = atmel_trng_read; - trng->rng.priv = (unsigned long)&pdev->dev; platform_set_drvdata(pdev, trng); #ifndef CONFIG_PM diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c index 1e3048f2bb38..b7fa1bc1122b 100644 --- a/drivers/char/hw_random/mtk-rng.c +++ b/drivers/char/hw_random/mtk-rng.c @@ -36,6 +36,7 @@ struct mtk_rng { void __iomem *base; struct clk *clk; struct hwrng rng; + struct device *dev; }; static int mtk_rng_init(struct hwrng *rng) @@ -85,7 +86,7 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) struct mtk_rng *priv = to_mtk_rng(rng); int retval = 0; - pm_runtime_get_sync((struct device *)priv->rng.priv); + pm_runtime_get_sync(priv->dev); while (max >= sizeof(u32)) { if (!mtk_rng_wait_ready(rng, wait)) @@ -97,8 +98,8 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) max -= sizeof(u32); } - pm_runtime_mark_last_busy((struct device *)priv->rng.priv); - pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv); + pm_runtime_mark_last_busy(priv->dev); + pm_runtime_put_sync_autosuspend(priv->dev); return retval || !wait ? retval : -EIO; } @@ -112,13 +113,13 @@ static int mtk_rng_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; + priv->dev = &pdev->dev; priv->rng.name = pdev->name; #ifndef CONFIG_PM priv->rng.init = mtk_rng_init; priv->rng.cleanup = mtk_rng_cleanup; #endif priv->rng.read = mtk_rng_read; - priv->rng.priv = (unsigned long)&pdev->dev; priv->rng.quality = 900; priv->clk = devm_clk_get(&pdev->dev, "rng"); diff --git a/drivers/char/hw_random/npcm-rng.c b/drivers/char/hw_random/npcm-rng.c index 9ff00f096f38..3e308c890bd2 100644 --- a/drivers/char/hw_random/npcm-rng.c +++ b/drivers/char/hw_random/npcm-rng.c @@ -32,6 +32,7 @@ struct npcm_rng { void __iomem *base; struct hwrng rng; + struct device *dev; u32 clkp; }; @@ -57,7 +58,7 @@ static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) int retval = 0; int ready; - pm_runtime_get_sync((struct device *)priv->rng.priv); + pm_runtime_get_sync(priv->dev); while (max) { if (wait) { @@ -79,8 +80,8 @@ static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) max--; } - pm_runtime_mark_last_busy((struct device *)priv->rng.priv); - pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv); + pm_runtime_mark_last_busy(priv->dev); + pm_runtime_put_sync_autosuspend(priv->dev); return retval || !wait ? retval : -EIO; } @@ -109,7 +110,7 @@ static int npcm_rng_probe(struct platform_device *pdev) #endif priv->rng.name = pdev->name; priv->rng.read = npcm_rng_read; - priv->rng.priv = (unsigned long)&pdev->dev; + priv->dev = &pdev->dev; priv->clkp = (u32)(uintptr_t)of_device_get_match_data(&pdev->dev); writel(NPCM_RNG_M1ROSEL, priv->base + NPCM_RNGMODE_REG); diff --git a/drivers/char/hw_random/rockchip-rng.c b/drivers/char/hw_random/rockchip-rng.c index 161050591663..fb4a30b95507 100644 --- a/drivers/char/hw_random/rockchip-rng.c +++ b/drivers/char/hw_random/rockchip-rng.c @@ -93,6 +93,30 @@ #define TRNG_v1_VERSION_CODE 0x46bc /* end of TRNG_V1 register definitions */ +/* + * RKRNG register definitions + * The RKRNG IP is a stand-alone TRNG implementation (not part of a crypto IP) + * and can be found in the Rockchip RK3576, Rockchip RK3562 and Rockchip RK3528 + * SoCs. It can either output true randomness (TRNG) or "deterministic" + * randomness derived from hashing the true entropy (DRNG). This driver + * implementation uses just the true entropy, and leaves stretching the entropy + * up to Linux. + */ +#define RKRNG_CFG 0x0000 +#define RKRNG_CTRL 0x0010 +#define RKRNG_CTRL_REQ_TRNG BIT(4) +#define RKRNG_STATE 0x0014 +#define RKRNG_STATE_TRNG_RDY BIT(4) +#define RKRNG_TRNG_DATA0 0x0050 +#define RKRNG_TRNG_DATA1 0x0054 +#define RKRNG_TRNG_DATA2 0x0058 +#define RKRNG_TRNG_DATA3 0x005C +#define RKRNG_TRNG_DATA4 0x0060 +#define RKRNG_TRNG_DATA5 0x0064 +#define RKRNG_TRNG_DATA6 0x0068 +#define RKRNG_TRNG_DATA7 0x006C +#define RKRNG_READ_LEN 32 + /* Before removing this assert, give rk3588_rng_read an upper bound of 32 */ static_assert(RK_RNG_MAX_BYTE <= (TRNG_V1_RAND7 + 4 - TRNG_V1_RAND0), "You raised RK_RNG_MAX_BYTE and broke rk3588-rng, congrats."); @@ -205,6 +229,46 @@ out: return (ret < 0) ? ret : to_read; } +static int rk3576_rng_init(struct hwrng *rng) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + + return rk_rng_enable_clks(rk_rng); +} + +static int rk3576_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + size_t to_read = min_t(size_t, max, RKRNG_READ_LEN); + int ret = 0; + u32 val; + + ret = pm_runtime_resume_and_get(rk_rng->dev); + if (ret < 0) + return ret; + + rk_rng_writel(rk_rng, RKRNG_CTRL_REQ_TRNG | (RKRNG_CTRL_REQ_TRNG << 16), + RKRNG_CTRL); + + if (readl_poll_timeout(rk_rng->base + RKRNG_STATE, val, + (val & RKRNG_STATE_TRNG_RDY), RK_RNG_POLL_PERIOD_US, + RK_RNG_POLL_TIMEOUT_US)) { + dev_err(rk_rng->dev, "timed out waiting for data\n"); + ret = -ETIMEDOUT; + goto out; + } + + rk_rng_writel(rk_rng, RKRNG_STATE_TRNG_RDY, RKRNG_STATE); + + memcpy_fromio(buf, rk_rng->base + RKRNG_TRNG_DATA0, to_read); + +out: + pm_runtime_mark_last_busy(rk_rng->dev); + pm_runtime_put_sync_autosuspend(rk_rng->dev); + + return (ret < 0) ? ret : to_read; +} + static int rk3588_rng_init(struct hwrng *rng) { struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); @@ -305,6 +369,14 @@ static const struct rk_rng_soc_data rk3568_soc_data = { .reset_optional = false, }; +static const struct rk_rng_soc_data rk3576_soc_data = { + .rk_rng_init = rk3576_rng_init, + .rk_rng_read = rk3576_rng_read, + .rk_rng_cleanup = rk3588_rng_cleanup, + .quality = 999, /* as determined by actual testing */ + .reset_optional = true, +}; + static const struct rk_rng_soc_data rk3588_soc_data = { .rk_rng_init = rk3588_rng_init, .rk_rng_read = rk3588_rng_read, @@ -397,6 +469,7 @@ static const struct dev_pm_ops rk_rng_pm_ops = { static const struct of_device_id rk_rng_dt_match[] = { { .compatible = "rockchip,rk3568-rng", .data = (void *)&rk3568_soc_data }, + { .compatible = "rockchip,rk3576-rng", .data = (void *)&rk3576_soc_data }, { .compatible = "rockchip,rk3588-rng", .data = (void *)&rk3588_soc_data }, { /* sentinel */ }, }; diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 169eed162a7f..48839958b0b1 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -61,29 +61,11 @@ static inline int page_is_allowed(unsigned long pfn) { return devmem_is_allowed(pfn); } -static inline int range_is_allowed(unsigned long pfn, unsigned long size) -{ - u64 from = ((u64)pfn) << PAGE_SHIFT; - u64 to = from + size; - u64 cursor = from; - - while (cursor < to) { - if (!devmem_is_allowed(pfn)) - return 0; - cursor += PAGE_SIZE; - pfn++; - } - return 1; -} #else static inline int page_is_allowed(unsigned long pfn) { return 1; } -static inline int range_is_allowed(unsigned long pfn, unsigned long size) -{ - return 1; -} #endif static inline bool should_stop_iteration(void) diff --git a/drivers/char/misc.c b/drivers/char/misc.c index f7dd455dd0dd..dda466f9181a 100644 --- a/drivers/char/misc.c +++ b/drivers/char/misc.c @@ -315,7 +315,7 @@ static int __init misc_init(void) goto fail_remove; err = -EIO; - if (register_chrdev(MISC_MAJOR, "misc", &misc_fops)) + if (__register_chrdev(MISC_MAJOR, 0, MINORMASK + 1, "misc", &misc_fops)) goto fail_printk; return 0; diff --git a/drivers/char/random.c b/drivers/char/random.c index 38f2fab29c56..5f22a08101f6 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -309,11 +309,11 @@ static void crng_reseed(struct work_struct *work) * key value, at index 4, so the state should always be zeroed out * immediately after using in order to maintain forward secrecy. * If the state cannot be erased in a timely manner, then it is - * safer to set the random_data parameter to &chacha_state[4] so - * that this function overwrites it before returning. + * safer to set the random_data parameter to &chacha_state->x[4] + * so that this function overwrites it before returning. */ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], - u32 chacha_state[CHACHA_STATE_WORDS], + struct chacha_state *chacha_state, u8 *random_data, size_t random_data_len) { u8 first_block[CHACHA_BLOCK_SIZE]; @@ -321,8 +321,8 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], BUG_ON(random_data_len > 32); chacha_init_consts(chacha_state); - memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE); - memset(&chacha_state[12], 0, sizeof(u32) * 4); + memcpy(&chacha_state->x[4], key, CHACHA_KEY_SIZE); + memset(&chacha_state->x[12], 0, sizeof(u32) * 4); chacha20_block(chacha_state, first_block); memcpy(key, first_block, CHACHA_KEY_SIZE); @@ -335,7 +335,7 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], * random data. It also returns up to 32 bytes on its own of random data * that may be used; random_data_len may not be greater than 32. */ -static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], +static void crng_make_state(struct chacha_state *chacha_state, u8 *random_data, size_t random_data_len) { unsigned long flags; @@ -395,7 +395,7 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], static void _get_random_bytes(void *buf, size_t len) { - u32 chacha_state[CHACHA_STATE_WORDS]; + struct chacha_state chacha_state; u8 tmp[CHACHA_BLOCK_SIZE]; size_t first_block_len; @@ -403,26 +403,26 @@ static void _get_random_bytes(void *buf, size_t len) return; first_block_len = min_t(size_t, 32, len); - crng_make_state(chacha_state, buf, first_block_len); + crng_make_state(&chacha_state, buf, first_block_len); len -= first_block_len; buf += first_block_len; while (len) { if (len < CHACHA_BLOCK_SIZE) { - chacha20_block(chacha_state, tmp); + chacha20_block(&chacha_state, tmp); memcpy(buf, tmp, len); memzero_explicit(tmp, sizeof(tmp)); break; } - chacha20_block(chacha_state, buf); - if (unlikely(chacha_state[12] == 0)) - ++chacha_state[13]; + chacha20_block(&chacha_state, buf); + if (unlikely(chacha_state.x[12] == 0)) + ++chacha_state.x[13]; len -= CHACHA_BLOCK_SIZE; buf += CHACHA_BLOCK_SIZE; } - memzero_explicit(chacha_state, sizeof(chacha_state)); + chacha_zeroize_state(&chacha_state); } /* @@ -441,7 +441,7 @@ EXPORT_SYMBOL(get_random_bytes); static ssize_t get_random_bytes_user(struct iov_iter *iter) { - u32 chacha_state[CHACHA_STATE_WORDS]; + struct chacha_state chacha_state; u8 block[CHACHA_BLOCK_SIZE]; size_t ret = 0, copied; @@ -453,21 +453,22 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter) * bytes, in case userspace causes copy_to_iter() below to sleep * forever, so that we still retain forward secrecy in that case. */ - crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE); + crng_make_state(&chacha_state, (u8 *)&chacha_state.x[4], + CHACHA_KEY_SIZE); /* * However, if we're doing a read of len <= 32, we don't need to * use chacha_state after, so we can simply return those bytes to * the user directly. */ if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) { - ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter); + ret = copy_to_iter(&chacha_state.x[4], CHACHA_KEY_SIZE, iter); goto out_zero_chacha; } for (;;) { - chacha20_block(chacha_state, block); - if (unlikely(chacha_state[12] == 0)) - ++chacha_state[13]; + chacha20_block(&chacha_state, block); + if (unlikely(chacha_state.x[12] == 0)) + ++chacha_state.x[13]; copied = copy_to_iter(block, sizeof(block), iter); ret += copied; @@ -484,7 +485,7 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter) memzero_explicit(block, sizeof(block)); out_zero_chacha: - memzero_explicit(chacha_state, sizeof(chacha_state)); + chacha_zeroize_state(&chacha_state); return ret ? ret : -EFAULT; } diff --git a/drivers/char/tpm/eventlog/tpm1.c b/drivers/char/tpm/eventlog/tpm1.c index 12ee42a31c71..e7913b2853d5 100644 --- a/drivers/char/tpm/eventlog/tpm1.c +++ b/drivers/char/tpm/eventlog/tpm1.c @@ -257,11 +257,8 @@ static int tpm1_ascii_bios_measurements_show(struct seq_file *m, void *v) (unsigned char *)(v + sizeof(struct tcpa_event)); eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL); - if (!eventname) { - printk(KERN_ERR "%s: ERROR - No Memory for event name\n ", - __func__); - return -EFAULT; - } + if (!eventname) + return -ENOMEM; /* 1st: PCR */ seq_printf(m, "%2d ", do_endian_conversion(event->pcr_index)); diff --git a/drivers/char/tpm/tpm-buf.c b/drivers/char/tpm/tpm-buf.c index e49a19fea3bd..dc882fc9fa9e 100644 --- a/drivers/char/tpm/tpm-buf.c +++ b/drivers/char/tpm/tpm-buf.c @@ -201,7 +201,7 @@ static void tpm_buf_read(struct tpm_buf *buf, off_t *offset, size_t count, void */ u8 tpm_buf_read_u8(struct tpm_buf *buf, off_t *offset) { - u8 value; + u8 value = 0; tpm_buf_read(buf, offset, sizeof(value), &value); @@ -218,7 +218,7 @@ EXPORT_SYMBOL_GPL(tpm_buf_read_u8); */ u16 tpm_buf_read_u16(struct tpm_buf *buf, off_t *offset) { - u16 value; + u16 value = 0; tpm_buf_read(buf, offset, sizeof(value), &value); @@ -235,7 +235,7 @@ EXPORT_SYMBOL_GPL(tpm_buf_read_u16); */ u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset) { - u32 value; + u32 value = 0; tpm_buf_read(buf, offset, sizeof(value), &value); diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c index 3f89635ba5e8..7b5049b3d476 100644 --- a/drivers/char/tpm/tpm2-sessions.c +++ b/drivers/char/tpm/tpm2-sessions.c @@ -40,11 +40,6 @@ * * These are the usage functions: * - * tpm2_start_auth_session() which allocates the opaque auth structure - * and gets a session from the TPM. This must be called before - * any of the following functions. The session is protected by a - * session_key which is derived from a random salt value - * encrypted to the NULL seed. * tpm2_end_auth_session() kills the session and frees the resources. * Under normal operation this function is done by * tpm_buf_check_hmac_response(), so this is only to be used on @@ -963,16 +958,13 @@ err: } /** - * tpm2_start_auth_session() - create a HMAC authentication session with the TPM - * @chip: the TPM chip structure to create the session with + * tpm2_start_auth_session() - Create an a HMAC authentication session + * @chip: A TPM chip * - * This function loads the NULL seed from its saved context and starts - * an authentication session on the null seed, fills in the - * @chip->auth structure to contain all the session details necessary - * for performing the HMAC, encrypt and decrypt operations and - * returns. The NULL seed is flushed before this function returns. + * Loads the ephemeral key (null seed), and starts an HMAC authenticated + * session. The null seed is flushed before the return. * - * Return: zero on success or actual error encountered. + * Returns zero on success, or a POSIX error code. */ int tpm2_start_auth_session(struct tpm_chip *chip) { @@ -1024,7 +1016,7 @@ int tpm2_start_auth_session(struct tpm_chip *chip) /* hash algorithm for session */ tpm_buf_append_u16(&buf, TPM_ALG_SHA256); - rc = tpm_transmit_cmd(chip, &buf, 0, "start auth session"); + rc = tpm_ret_to_err(tpm_transmit_cmd(chip, &buf, 0, "StartAuthSession")); tpm2_flush_context(chip, null_key); if (rc == TPM2_RC_SUCCESS) diff --git a/drivers/char/tpm/tpm_crb_ffa.c b/drivers/char/tpm/tpm_crb_ffa.c index 3169a87a56b6..4ead61f01299 100644 --- a/drivers/char/tpm/tpm_crb_ffa.c +++ b/drivers/char/tpm/tpm_crb_ffa.c @@ -38,9 +38,11 @@ * messages. * * All requests with FFA_MSG_SEND_DIRECT_REQ and FFA_MSG_SEND_DIRECT_RESP - * are using the AArch32 SMC calling convention with register usage as - * defined in FF-A specification: - * w0: Function ID (0x8400006F or 0x84000070) + * are using the AArch32 or AArch64 SMC calling convention with register usage + * as defined in FF-A specification: + * w0: Function ID + * -for 32-bit: 0x8400006F or 0x84000070 + * -for 64-bit: 0xC400006F or 0xC4000070 * w1: Source/Destination IDs * w2: Reserved (MBZ) * w3-w7: Implementation defined, free to be used below @@ -68,7 +70,8 @@ #define CRB_FFA_GET_INTERFACE_VERSION 0x0f000001 /* - * Return information on a given feature of the TPM service + * Notifies the TPM service that a TPM command or TPM locality request is + * ready to be processed, and allows the TPM service to process it. * Call register usage: * w3: Not used (MBZ) * w4: TPM service function ID, CRB_FFA_START @@ -105,7 +108,10 @@ struct tpm_crb_ffa { u16 minor_version; /* lock to protect sending of FF-A messages: */ struct mutex msg_data_lock; - struct ffa_send_direct_data direct_msg_data; + union { + struct ffa_send_direct_data direct_msg_data; + struct ffa_send_direct_data2 direct_msg_data2; + }; }; static struct tpm_crb_ffa *tpm_crb_ffa; @@ -185,18 +191,34 @@ static int __tpm_crb_ffa_send_recieve(unsigned long func_id, msg_ops = tpm_crb_ffa->ffa_dev->ops->msg_ops; - memset(&tpm_crb_ffa->direct_msg_data, 0x00, - sizeof(struct ffa_send_direct_data)); - - tpm_crb_ffa->direct_msg_data.data1 = func_id; - tpm_crb_ffa->direct_msg_data.data2 = a0; - tpm_crb_ffa->direct_msg_data.data3 = a1; - tpm_crb_ffa->direct_msg_data.data4 = a2; + if (ffa_partition_supports_direct_req2_recv(tpm_crb_ffa->ffa_dev)) { + memset(&tpm_crb_ffa->direct_msg_data2, 0x00, + sizeof(struct ffa_send_direct_data2)); + + tpm_crb_ffa->direct_msg_data2.data[0] = func_id; + tpm_crb_ffa->direct_msg_data2.data[1] = a0; + tpm_crb_ffa->direct_msg_data2.data[2] = a1; + tpm_crb_ffa->direct_msg_data2.data[3] = a2; + + ret = msg_ops->sync_send_receive2(tpm_crb_ffa->ffa_dev, + &tpm_crb_ffa->direct_msg_data2); + if (!ret) + ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data2.data[0]); + } else { + memset(&tpm_crb_ffa->direct_msg_data, 0x00, + sizeof(struct ffa_send_direct_data)); + + tpm_crb_ffa->direct_msg_data.data1 = func_id; + tpm_crb_ffa->direct_msg_data.data2 = a0; + tpm_crb_ffa->direct_msg_data.data3 = a1; + tpm_crb_ffa->direct_msg_data.data4 = a2; + + ret = msg_ops->sync_send_receive(tpm_crb_ffa->ffa_dev, + &tpm_crb_ffa->direct_msg_data); + if (!ret) + ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data.data1); + } - ret = msg_ops->sync_send_receive(tpm_crb_ffa->ffa_dev, - &tpm_crb_ffa->direct_msg_data); - if (!ret) - ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data.data1); return ret; } @@ -231,8 +253,13 @@ int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor) rc = __tpm_crb_ffa_send_recieve(CRB_FFA_GET_INTERFACE_VERSION, 0x00, 0x00, 0x00); if (!rc) { - *major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data.data2); - *minor = CRB_FFA_MINOR_VERSION(tpm_crb_ffa->direct_msg_data.data2); + if (ffa_partition_supports_direct_req2_recv(tpm_crb_ffa->ffa_dev)) { + *major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data2.data[1]); + *minor = CRB_FFA_MINOR_VERSION(tpm_crb_ffa->direct_msg_data2.data[1]); + } else { + *major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data.data2); + *minor = CRB_FFA_MINOR_VERSION(tpm_crb_ffa->direct_msg_data.data2); + } } return rc; @@ -277,8 +304,9 @@ static int tpm_crb_ffa_probe(struct ffa_device *ffa_dev) tpm_crb_ffa = ERR_PTR(-ENODEV); // set tpm_crb_ffa so we can detect probe failure - if (!ffa_partition_supports_direct_recv(ffa_dev)) { - pr_err("TPM partition doesn't support direct message receive.\n"); + if (!ffa_partition_supports_direct_recv(ffa_dev) && + !ffa_partition_supports_direct_req2_recv(ffa_dev)) { + dev_warn(&ffa_dev->dev, "partition doesn't support direct message receive.\n"); return -EINVAL; } @@ -299,17 +327,17 @@ static int tpm_crb_ffa_probe(struct ffa_device *ffa_dev) rc = tpm_crb_ffa_get_interface_version(&tpm_crb_ffa->major_version, &tpm_crb_ffa->minor_version); if (rc) { - pr_err("failed to get crb interface version. rc:%d", rc); + dev_err(&ffa_dev->dev, "failed to get crb interface version. rc:%d\n", rc); goto out; } - pr_info("ABI version %u.%u", tpm_crb_ffa->major_version, + dev_info(&ffa_dev->dev, "ABI version %u.%u\n", tpm_crb_ffa->major_version, tpm_crb_ffa->minor_version); if (tpm_crb_ffa->major_version != CRB_FFA_VERSION_MAJOR || (tpm_crb_ffa->minor_version > 0 && tpm_crb_ffa->minor_version < CRB_FFA_VERSION_MINOR)) { - pr_err("Incompatible ABI version"); + dev_warn(&ffa_dev->dev, "Incompatible ABI version\n"); goto out; } diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h index 970d02c337c7..6c3aa480396b 100644 --- a/drivers/char/tpm/tpm_tis_core.h +++ b/drivers/char/tpm/tpm_tis_core.h @@ -54,7 +54,7 @@ enum tis_int_flags { enum tis_defaults { TIS_MEM_LEN = 0x5000, TIS_SHORT_TIMEOUT = 750, /* ms */ - TIS_LONG_TIMEOUT = 2000, /* 2 sec */ + TIS_LONG_TIMEOUT = 4000, /* 4 secs */ TIS_TIMEOUT_MIN_ATML = 14700, /* usecs */ TIS_TIMEOUT_MAX_ATML = 15000, /* usecs */ }; diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 5f04951d0dd4..088182e54deb 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1576,8 +1576,8 @@ static void handle_control_message(struct virtio_device *vdev, break; case VIRTIO_CONSOLE_RESIZE: { struct { - __u16 rows; - __u16 cols; + __virtio16 cols; + __virtio16 rows; } size; if (!is_console_port(port)) @@ -1585,7 +1585,8 @@ static void handle_control_message(struct virtio_device *vdev, memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt), sizeof(size)); - set_console_size(port, size.rows, size.cols); + set_console_size(port, virtio16_to_cpu(vdev, size.rows), + virtio16_to_cpu(vdev, size.cols)); port->cons.hvc->irq_requested = 1; resize_console(port); diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c index 014db6386624..8ddf3a9a53df 100644 --- a/drivers/clk/clk-s2mps11.c +++ b/drivers/clk/clk-s2mps11.c @@ -137,6 +137,8 @@ static int s2mps11_clk_probe(struct platform_device *pdev) if (!clk_data) return -ENOMEM; + clk_data->num = S2MPS11_CLKS_NUM; + switch (hwid) { case S2MPS11X: s2mps11_reg = S2MPS11_REG_RTC_CTRL; @@ -186,7 +188,6 @@ static int s2mps11_clk_probe(struct platform_device *pdev) clk_data->hws[i] = &s2mps11_clks[i].hw; } - clk_data->num = S2MPS11_CLKS_NUM; of_clk_add_hw_provider(s2mps11_clks->clk_np, of_clk_hw_onecell_get, clk_data); diff --git a/drivers/clk/rockchip/clk-rk3576.c b/drivers/clk/rockchip/clk-rk3576.c index 595e010341f7..be703f250197 100644 --- a/drivers/clk/rockchip/clk-rk3576.c +++ b/drivers/clk/rockchip/clk-rk3576.c @@ -541,6 +541,8 @@ static struct rockchip_clk_branch rk3576_clk_branches[] __initdata = { RK3576_CLKGATE_CON(5), 14, GFLAGS), GATE(CLK_OTPC_AUTO_RD_G, "clk_otpc_auto_rd_g", "xin24m", 0, RK3576_CLKGATE_CON(5), 15, GFLAGS), + GATE(CLK_OTP_PHY_G, "clk_otp_phy_g", "xin24m", 0, + RK3576_CLKGATE_CON(6), 0, GFLAGS), COMPOSITE(CLK_MIPI_CAMERAOUT_M0, "clk_mipi_cameraout_m0", mux_24m_spll_gpll_cpll_p, 0, RK3576_CLKSEL_CON(38), 8, 2, MFLAGS, 0, 8, DFLAGS, RK3576_CLKGATE_CON(6), 3, GFLAGS), diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c index bb66c906ebbb..e83d4fd40240 100644 --- a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c +++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c @@ -412,19 +412,23 @@ static const struct clk_parent_data mmc0_mmc1_parents[] = { { .hw = &pll_periph0_2x_clk.common.hw }, { .hw = &pll_audio1_div2_clk.common.hw }, }; -static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc0_clk, "mmc0", mmc0_mmc1_parents, 0x830, - 0, 4, /* M */ - 8, 2, /* P */ - 24, 3, /* mux */ - BIT(31), /* gate */ - 0); - -static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc1_clk, "mmc1", mmc0_mmc1_parents, 0x834, - 0, 4, /* M */ - 8, 2, /* P */ - 24, 3, /* mux */ - BIT(31), /* gate */ - 0); +static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0", + mmc0_mmc1_parents, 0x830, + 0, 4, /* M */ + 8, 2, /* P */ + 24, 3, /* mux */ + BIT(31), /* gate */ + 2, /* post-div */ + 0); + +static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", + mmc0_mmc1_parents, 0x834, + 0, 4, /* M */ + 8, 2, /* P */ + 24, 3, /* mux */ + BIT(31), /* gate */ + 2, /* post-div */ + 0); static const struct clk_parent_data mmc2_parents[] = { { .fw_name = "hosc" }, @@ -433,12 +437,14 @@ static const struct clk_parent_data mmc2_parents[] = { { .hw = &pll_periph0_800M_clk.common.hw }, { .hw = &pll_audio1_div2_clk.common.hw }, }; -static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc2_clk, "mmc2", mmc2_parents, 0x838, - 0, 4, /* M */ - 8, 2, /* P */ - 24, 3, /* mux */ - BIT(31), /* gate */ - 0); +static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc2_parents, + 0x838, + 0, 4, /* M */ + 8, 2, /* P */ + 24, 3, /* mux */ + BIT(31), /* gate */ + 2, /* post-div */ + 0); static SUNXI_CCU_GATE_HWS(bus_mmc0_clk, "bus-mmc0", psi_ahb_hws, 0x84c, BIT(0), 0); diff --git a/drivers/clk/sunxi-ng/ccu_mp.h b/drivers/clk/sunxi-ng/ccu_mp.h index b35aeec70484..bb09c649bfa3 100644 --- a/drivers/clk/sunxi-ng/ccu_mp.h +++ b/drivers/clk/sunxi-ng/ccu_mp.h @@ -52,6 +52,28 @@ struct ccu_mp { } \ } +#define SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(_struct, _name, _parents, \ + _reg, \ + _mshift, _mwidth, \ + _pshift, _pwidth, \ + _muxshift, _muxwidth, \ + _gate, _postdiv, _flags)\ + struct ccu_mp _struct = { \ + .enable = _gate, \ + .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \ + .p = _SUNXI_CCU_DIV(_pshift, _pwidth), \ + .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \ + .fixed_post_div = _postdiv, \ + .common = { \ + .reg = _reg, \ + .features = CCU_FEATURE_FIXED_POSTDIV, \ + .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \ + _parents, \ + &ccu_mp_ops, \ + _flags), \ + } \ + } + #define SUNXI_CCU_MP_WITH_MUX_GATE(_struct, _name, _parents, _reg, \ _mshift, _mwidth, \ _pshift, _pwidth, \ @@ -109,8 +131,7 @@ struct ccu_mp { _mshift, _mwidth, \ _pshift, _pwidth, \ _muxshift, _muxwidth, \ - _gate, _features, \ - _flags) \ + _gate, _flags, _features) \ struct ccu_mp _struct = { \ .enable = _gate, \ .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \ diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c index 39f7c2d736d1..b603c25f3dfa 100644 --- a/drivers/clocksource/i8253.c +++ b/drivers/clocksource/i8253.c @@ -103,7 +103,7 @@ int __init clocksource_i8253_init(void) #ifdef CONFIG_CLKEVT_I8253 void clockevent_i8253_disable(void) { - raw_spin_lock(&i8253_lock); + guard(raw_spinlock_irqsave)(&i8253_lock); /* * Writing the MODE register should stop the counter, according to @@ -132,8 +132,6 @@ void clockevent_i8253_disable(void) outb_p(0, PIT_CH0); outb_p(0x30, PIT_MODE); - - raw_spin_unlock(&i8253_lock); } static int pit_shutdown(struct clock_event_device *evt) diff --git a/drivers/comedi/drivers/jr3_pci.c b/drivers/comedi/drivers/jr3_pci.c index cdc842b32bab..75dce1ff2419 100644 --- a/drivers/comedi/drivers/jr3_pci.c +++ b/drivers/comedi/drivers/jr3_pci.c @@ -758,7 +758,7 @@ static void jr3_pci_detach(struct comedi_device *dev) struct jr3_pci_dev_private *devpriv = dev->private; if (devpriv) - timer_delete_sync(&devpriv->timer); + timer_shutdown_sync(&devpriv->timer); comedi_pci_detach(dev); } diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 4f9cb943d945..0d46402e3094 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -76,7 +76,7 @@ config ARM_VEXPRESS_SPC_CPUFREQ config ARM_BRCMSTB_AVS_CPUFREQ tristate "Broadcom STB AVS CPUfreq driver" depends on (ARCH_BRCMSTB && !ARM_SCMI_CPUFREQ) || COMPILE_TEST - default y + default y if ARCH_BRCMSTB && !ARM_SCMI_CPUFREQ help Some Broadcom STB SoCs use a co-processor running proprietary firmware ("AVS") to handle voltage and frequency scaling. This driver provides @@ -88,7 +88,7 @@ config ARM_HIGHBANK_CPUFREQ tristate "Calxeda Highbank-based" depends on ARCH_HIGHBANK || COMPILE_TEST depends on CPUFREQ_DT && REGULATOR && PL320_MBOX - default m + default m if ARCH_HIGHBANK help This adds the CPUFreq driver for Calxeda Highbank SoC based boards. @@ -133,7 +133,7 @@ config ARM_MEDIATEK_CPUFREQ config ARM_MEDIATEK_CPUFREQ_HW tristate "MediaTek CPUFreq HW driver" depends on ARCH_MEDIATEK || COMPILE_TEST - default m + default m if ARCH_MEDIATEK help Support for the CPUFreq HW driver. Some MediaTek chipsets have a HW engine to offload the steps @@ -181,7 +181,7 @@ config ARM_RASPBERRYPI_CPUFREQ config ARM_S3C64XX_CPUFREQ bool "Samsung S3C64XX" depends on CPU_S3C6410 || COMPILE_TEST - default y + default CPU_S3C6410 help This adds the CPUFreq driver for Samsung S3C6410 SoC. @@ -190,7 +190,7 @@ config ARM_S3C64XX_CPUFREQ config ARM_S5PV210_CPUFREQ bool "Samsung S5PV210 and S5PC110" depends on CPU_S5PV210 || COMPILE_TEST - default y + default CPU_S5PV210 help This adds the CPUFreq driver for Samsung S5PV210 and S5PC110 SoCs. @@ -214,7 +214,7 @@ config ARM_SCMI_CPUFREQ config ARM_SPEAR_CPUFREQ bool "SPEAr CPUFreq support" depends on PLAT_SPEAR || COMPILE_TEST - default y + default PLAT_SPEAR help This adds the CPUFreq driver support for SPEAr SOCs. @@ -233,7 +233,7 @@ config ARM_TEGRA20_CPUFREQ tristate "Tegra20/30 CPUFreq support" depends on ARCH_TEGRA || COMPILE_TEST depends on CPUFREQ_DT - default y + default ARCH_TEGRA help This adds the CPUFreq driver support for Tegra20/30 SOCs. @@ -241,7 +241,7 @@ config ARM_TEGRA124_CPUFREQ bool "Tegra124 CPUFreq support" depends on ARCH_TEGRA || COMPILE_TEST depends on CPUFREQ_DT - default y + default ARCH_TEGRA help This adds the CPUFreq driver support for Tegra124 SOCs. @@ -256,14 +256,14 @@ config ARM_TEGRA194_CPUFREQ tristate "Tegra194 CPUFreq support" depends on ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC || (64BIT && COMPILE_TEST) depends on TEGRA_BPMP - default y + default ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC help This adds CPU frequency driver support for Tegra194 SOCs. config ARM_TI_CPUFREQ bool "Texas Instruments CPUFreq support" depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST - default y + default ARCH_OMAP2PLUS || ARCH_K3 help This driver enables valid OPPs on the running platform based on values contained within the SoC in use. Enable this in order to diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 924314cdeebc..ea4b8f220a05 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -79,11 +79,11 @@ static bool boost_state(unsigned int cpu) case X86_VENDOR_INTEL: case X86_VENDOR_CENTAUR: case X86_VENDOR_ZHAOXIN: - rdmsrl_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &msr); + rdmsrq_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &msr); return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); case X86_VENDOR_HYGON: case X86_VENDOR_AMD: - rdmsrl_on_cpu(cpu, MSR_K7_HWCR, &msr); + rdmsrq_on_cpu(cpu, MSR_K7_HWCR, &msr); return !(msr & MSR_K7_HWCR_CPB_DIS); } return false; @@ -110,14 +110,14 @@ static int boost_set_msr(bool enable) return -EINVAL; } - rdmsrl(msr_addr, val); + rdmsrq(msr_addr, val); if (enable) val &= ~msr_mask; else val |= msr_mask; - wrmsrl(msr_addr, val); + wrmsrq(msr_addr, val); return 0; } @@ -909,8 +909,19 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency) pr_warn(FW_WARN "P-state 0 is not max freq\n"); - if (acpi_cpufreq_driver.set_boost) - policy->boost_supported = true; + if (acpi_cpufreq_driver.set_boost) { + if (policy->boost_supported) { + /* + * The firmware may have altered boost state while the + * CPU was offline (for example during a suspend-resume + * cycle). + */ + if (policy->boost_enabled != boost_state(cpu)) + set_boost(policy, policy->boost_enabled); + } else { + policy->boost_supported = true; + } + } return result; diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c index e671bc7d1550..c8d031b297d2 100644 --- a/drivers/cpufreq/amd-pstate-ut.c +++ b/drivers/cpufreq/amd-pstate-ut.c @@ -31,6 +31,8 @@ #include <acpi/cppc_acpi.h> +#include <asm/msr.h> + #include "amd-pstate.h" @@ -90,9 +92,9 @@ static int amd_pstate_ut_check_enabled(u32 index) if (get_shared_mem()) return 0; - ret = rdmsrl_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable); + ret = rdmsrq_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable); if (ret) { - pr_err("%s rdmsrl_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret); + pr_err("%s rdmsrq_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret); return ret; } @@ -137,7 +139,7 @@ static int amd_pstate_ut_check_perf(u32 index) lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf; lowest_perf = cppc_perf.lowest_perf; } else { - ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1); + ret = rdmsrq_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1); if (ret) { pr_err("%s read CPPC_CAP1 ret=%d error!\n", __func__, ret); return ret; diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 6789eed1bb5b..66fdc74f13ef 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -197,7 +197,7 @@ static u8 msr_get_epp(struct amd_cpudata *cpudata) u64 value; int ret; - ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value); + ret = rdmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value); if (ret < 0) { pr_debug("Could not retrieve energy perf value (%d)\n", ret); return ret; @@ -258,10 +258,10 @@ static int msr_update_perf(struct cpufreq_policy *policy, u8 min_perf, return 0; if (fast_switch) { - wrmsrl(MSR_AMD_CPPC_REQ, value); + wrmsrq(MSR_AMD_CPPC_REQ, value); return 0; } else { - int ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); + int ret = wrmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); if (ret) return ret; @@ -309,7 +309,7 @@ static int msr_set_epp(struct cpufreq_policy *policy, u8 epp) if (value == prev) return 0; - ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); + ret = wrmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); if (ret) { pr_err("failed to set energy perf value (%d)\n", ret); return ret; @@ -371,7 +371,7 @@ static int shmem_set_epp(struct cpufreq_policy *policy, u8 epp) static inline int msr_cppc_enable(struct cpufreq_policy *policy) { - return wrmsrl_safe_on_cpu(policy->cpu, MSR_AMD_CPPC_ENABLE, 1); + return wrmsrq_safe_on_cpu(policy->cpu, MSR_AMD_CPPC_ENABLE, 1); } static int shmem_cppc_enable(struct cpufreq_policy *policy) @@ -391,7 +391,7 @@ static int msr_init_perf(struct amd_cpudata *cpudata) union perf_cached perf = READ_ONCE(cpudata->perf); u64 cap1, numerator; - int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, + int ret = rdmsrq_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, &cap1); if (ret) return ret; @@ -518,8 +518,8 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata) unsigned long flags; local_irq_save(flags); - rdmsrl(MSR_IA32_APERF, aperf); - rdmsrl(MSR_IA32_MPERF, mperf); + rdmsrq(MSR_IA32_APERF, aperf); + rdmsrq(MSR_IA32_MPERF, mperf); tsc = rdtsc(); if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) { @@ -607,13 +607,16 @@ static void amd_pstate_update_min_max_limit(struct cpufreq_policy *policy) union perf_cached perf = READ_ONCE(cpudata->perf); perf.max_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->max); - perf.min_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->min); + WRITE_ONCE(cpudata->max_limit_freq, policy->max); - if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) + if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) { perf.min_limit_perf = min(perf.nominal_perf, perf.max_limit_perf); + WRITE_ONCE(cpudata->min_limit_freq, min(cpudata->nominal_freq, cpudata->max_limit_freq)); + } else { + perf.min_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->min); + WRITE_ONCE(cpudata->min_limit_freq, policy->min); + } - WRITE_ONCE(cpudata->max_limit_freq, policy->max); - WRITE_ONCE(cpudata->min_limit_freq, policy->min); WRITE_ONCE(cpudata->perf, perf); } @@ -769,7 +772,7 @@ static int amd_pstate_init_boost_support(struct amd_cpudata *cpudata) goto exit_err; } - ret = rdmsrl_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val); + ret = rdmsrq_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val); if (ret) { pr_err_once("failed to read initial CPU boost state!\n"); ret = -EIO; @@ -788,18 +791,8 @@ exit_err: static void amd_perf_ctl_reset(unsigned int cpu) { - wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0); -} - -/* - * Set amd-pstate preferred core enable can't be done directly from cpufreq callbacks - * due to locking, so queue the work for later. - */ -static void amd_pstste_sched_prefcore_workfn(struct work_struct *work) -{ - sched_set_itmt_support(); + wrmsrq_on_cpu(cpu, MSR_AMD_PERF_CTL, 0); } -static DECLARE_WORK(sched_prefcore_work, amd_pstste_sched_prefcore_workfn); #define CPPC_MAX_PERF U8_MAX @@ -811,14 +804,8 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata) cpudata->hw_prefcore = true; - /* - * The priorities can be set regardless of whether or not - * sched_set_itmt_support(true) has been called and it is valid to - * update them at any time after it has been called. - */ + /* Priorities must be initialized before ITMT support can be toggled on. */ sched_set_itmt_core_prio((int)READ_ONCE(cpudata->prefcore_ranking), cpudata->cpu); - - schedule_work(&sched_prefcore_work); } static void amd_pstate_update_limits(unsigned int cpu) @@ -844,8 +831,10 @@ static void amd_pstate_update_limits(unsigned int cpu) if (highest_perf_changed) { WRITE_ONCE(cpudata->prefcore_ranking, cur_high); - if (cur_high < CPPC_MAX_PERF) + if (cur_high < CPPC_MAX_PERF) { sched_set_itmt_core_prio((int)cur_high, cpu); + sched_update_asym_prefer_cpu(cpu, prev_high, cur_high); + } } } @@ -1193,6 +1182,9 @@ static ssize_t show_energy_performance_preference( static void amd_pstate_driver_cleanup(void) { + if (amd_pstate_prefcore) + sched_clear_itmt_support(); + cppc_state = AMD_PSTATE_DISABLE; current_pstate_driver = NULL; } @@ -1235,6 +1227,10 @@ static int amd_pstate_register_driver(int mode) return ret; } + /* Enable ITMT support once all CPUs have initialized their asym priorities. */ + if (amd_pstate_prefcore) + sched_set_itmt_support(); + return 0; } @@ -1491,7 +1487,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) } if (cpu_feature_enabled(X86_FEATURE_CPPC)) { - ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value); + ret = rdmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value); if (ret) return ret; WRITE_ONCE(cpudata->cppc_req_cached, value); diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c index 59b19b9975e8..13fed4b9e02b 100644 --- a/drivers/cpufreq/amd_freq_sensitivity.c +++ b/drivers/cpufreq/amd_freq_sensitivity.c @@ -129,7 +129,7 @@ static int __init amd_freq_sensitivity_init(void) pci_dev_put(pcidev); } - if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val)) + if (rdmsrq_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val)) return -ENODEV; if (!(val >> CLASS_CODE_SHIFT)) diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c index 4994c86feb57..b1d29b7af232 100644 --- a/drivers/cpufreq/apple-soc-cpufreq.c +++ b/drivers/cpufreq/apple-soc-cpufreq.c @@ -134,11 +134,17 @@ static const struct of_device_id apple_soc_cpufreq_of_match[] __maybe_unused = { static unsigned int apple_soc_cpufreq_get_rate(unsigned int cpu) { - struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); - struct apple_cpu_priv *priv = policy->driver_data; + struct cpufreq_policy *policy; + struct apple_cpu_priv *priv; struct cpufreq_frequency_table *p; unsigned int pstate; + policy = cpufreq_cpu_get_raw(cpu); + if (unlikely(!policy)) + return 0; + + priv = policy->driver_data; + if (priv->info->cur_pstate_mask) { u32 reg = readl_relaxed(priv->reg_base + APPLE_DVFS_STATUS); diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index b3d74f9adcf0..cb93f00bafdb 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -747,7 +747,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu) int ret; if (!policy) - return -ENODEV; + return 0; cpu_data = policy->driver_data; diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 2aa00769cf09..a010da0f6337 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -175,6 +175,7 @@ static const struct of_device_id blocklist[] __initconst = { { .compatible = "qcom,sm8350", }, { .compatible = "qcom,sm8450", }, { .compatible = "qcom,sm8550", }, + { .compatible = "qcom,sm8650", }, { .compatible = "st,stih407", }, { .compatible = "st,stih410", }, diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 3841c9da6cac..f45ded62b0e0 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -536,16 +536,18 @@ void cpufreq_disable_fast_switch(struct cpufreq_policy *policy) EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch); static unsigned int __resolve_freq(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) + unsigned int target_freq, + unsigned int min, unsigned int max, + unsigned int relation) { unsigned int idx; - target_freq = clamp_val(target_freq, policy->min, policy->max); + target_freq = clamp_val(target_freq, min, max); if (!policy->freq_table) return target_freq; - idx = cpufreq_frequency_table_target(policy, target_freq, relation); + idx = cpufreq_frequency_table_target(policy, target_freq, min, max, relation); policy->cached_resolved_idx = idx; policy->cached_target_freq = target_freq; return policy->freq_table[idx].frequency; @@ -565,7 +567,21 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy, unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, unsigned int target_freq) { - return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE); + unsigned int min = READ_ONCE(policy->min); + unsigned int max = READ_ONCE(policy->max); + + /* + * If this function runs in parallel with cpufreq_set_policy(), it may + * read policy->min before the update and policy->max after the update + * or the other way around, so there is no ordering guarantee. + * + * Resolve this by always honoring the max (in case it comes from + * thermal throttling or similar). + */ + if (unlikely(min > max)) + min = max; + + return __resolve_freq(policy, target_freq, min, max, CPUFREQ_RELATION_LE); } EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq); @@ -2384,7 +2400,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, if (cpufreq_disabled()) return -ENODEV; - target_freq = __resolve_freq(policy, target_freq, relation); + target_freq = __resolve_freq(policy, target_freq, policy->min, + policy->max, relation); pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", policy->cpu, target_freq, relation, old_target_freq); @@ -2708,11 +2725,18 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, * Resolve policy min/max to available frequencies. It ensures * no frequency resolution will neither overshoot the requested maximum * nor undershoot the requested minimum. + * + * Avoid storing intermediate values in policy->max or policy->min and + * compiler optimizations around them because they may be accessed + * concurrently by cpufreq_driver_resolve_freq() during the update. */ - policy->min = new_data.min; - policy->max = new_data.max; - policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L); - policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H); + WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max, + new_data.min, new_data.max, + CPUFREQ_RELATION_H)); + new_data.min = __resolve_freq(policy, new_data.min, new_data.min, + new_data.max, CPUFREQ_RELATION_L); + WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min); + trace_cpu_frequency_limits(policy); cpufreq_update_pressure(policy); diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index a7c38b8b3e78..0e65d37c9231 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -76,7 +76,8 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy, return freq_next; } - index = cpufreq_frequency_table_target(policy, freq_next, relation); + index = cpufreq_frequency_table_target(policy, freq_next, policy->min, + policy->max, relation); freq_req = freq_table[index].frequency; freq_reduc = freq_req * od_tuners->powersave_bias / 1000; freq_avg = freq_req - freq_reduc; diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c index d23a97ba6478..320a0af2266a 100644 --- a/drivers/cpufreq/e_powersaver.c +++ b/drivers/cpufreq/e_powersaver.c @@ -225,12 +225,12 @@ static int eps_cpu_init(struct cpufreq_policy *policy) return -ENODEV; } /* Enable Enhanced PowerSaver */ - rdmsrl(MSR_IA32_MISC_ENABLE, val); + rdmsrq(MSR_IA32_MISC_ENABLE, val); if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP; - wrmsrl(MSR_IA32_MISC_ENABLE, val); + wrmsrq(MSR_IA32_MISC_ENABLE, val); /* Can be locked at 0 */ - rdmsrl(MSR_IA32_MISC_ENABLE, val); + rdmsrq(MSR_IA32_MISC_ENABLE, val); if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { pr_info("Can't enable Enhanced PowerSaver\n"); return -ENODEV; diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c index 36494b855e41..fc5a58088b35 100644 --- a/drivers/cpufreq/elanfreq.c +++ b/drivers/cpufreq/elanfreq.c @@ -21,7 +21,6 @@ #include <linux/cpufreq.h> #include <asm/cpu_device_id.h> -#include <asm/msr.h> #include <linux/timex.h> #include <linux/io.h> diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index c03a91502f84..35de513af6c9 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c @@ -115,8 +115,8 @@ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy) EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify); int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) + unsigned int target_freq, unsigned int min, + unsigned int max, unsigned int relation) { struct cpufreq_frequency_table optimal = { .driver_data = ~0, @@ -147,7 +147,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, cpufreq_for_each_valid_entry_idx(pos, table, i) { freq = pos->frequency; - if ((freq < policy->min) || (freq > policy->max)) + if (freq < min || freq > max) continue; if (freq == target_freq) { optimal.driver_data = i; diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 4aad79d26c64..db8c99535e61 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -598,7 +598,10 @@ static bool turbo_is_disabled(void) { u64 misc_en; - rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); + if (!cpu_feature_enabled(X86_FEATURE_IDA)) + return true; + + rdmsrq(MSR_IA32_MISC_ENABLE, misc_en); return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); } @@ -620,7 +623,7 @@ static s16 intel_pstate_get_epb(struct cpudata *cpu_data) if (!boot_cpu_has(X86_FEATURE_EPB)) return -ENXIO; - ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); + ret = rdmsrq_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); if (ret) return (s16)ret; @@ -637,7 +640,7 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data) * MSR_HWP_REQUEST, so need to read and get EPP. */ if (!hwp_req_data) { - epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, + epp = rdmsrq_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &hwp_req_data); if (epp) return epp; @@ -659,12 +662,12 @@ static int intel_pstate_set_epb(int cpu, s16 pref) if (!boot_cpu_has(X86_FEATURE_EPB)) return -ENXIO; - ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); + ret = rdmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); if (ret) return ret; epb = (epb & ~0x0f) | pref; - wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb); + wrmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb); return 0; } @@ -762,7 +765,7 @@ static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp) * function, so it cannot run in parallel with the update below. */ WRITE_ONCE(cpu->hwp_req_cached, value); - ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); + ret = wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); if (!ret) cpu->epp_cached = epp; @@ -916,7 +919,7 @@ static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf) if (ratio <= 0) { u64 cap; - rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap); + rdmsrq_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap); ratio = HWP_GUARANTEED_PERF(cap); } @@ -1088,7 +1091,7 @@ static void __intel_pstate_get_hwp_cap(struct cpudata *cpu) { u64 cap; - rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap); + rdmsrq_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap); WRITE_ONCE(cpu->hwp_cap_cached, cap); cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap); cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap); @@ -1162,7 +1165,7 @@ static void intel_pstate_hwp_set(unsigned int cpu) if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) min = max; - rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); + rdmsrq_on_cpu(cpu, MSR_HWP_REQUEST, &value); value &= ~HWP_MIN_PERF(~0L); value |= HWP_MIN_PERF(min); @@ -1209,7 +1212,7 @@ static void intel_pstate_hwp_set(unsigned int cpu) } skip_epp: WRITE_ONCE(cpu_data->hwp_req_cached, value); - wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); + wrmsrq_on_cpu(cpu, MSR_HWP_REQUEST, value); } static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata); @@ -1256,7 +1259,7 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu) if (boot_cpu_has(X86_FEATURE_HWP_EPP)) value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); - wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); + wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); mutex_lock(&hybrid_capacity_lock); @@ -1285,7 +1288,7 @@ static void set_power_ctl_ee_state(bool input) u64 power_ctl; mutex_lock(&intel_pstate_driver_lock); - rdmsrl(MSR_IA32_POWER_CTL, power_ctl); + rdmsrq(MSR_IA32_POWER_CTL, power_ctl); if (input) { power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE); power_ctl_ee_state = POWER_CTL_EE_ENABLE; @@ -1293,7 +1296,7 @@ static void set_power_ctl_ee_state(bool input) power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE); power_ctl_ee_state = POWER_CTL_EE_DISABLE; } - wrmsrl(MSR_IA32_POWER_CTL, power_ctl); + wrmsrq(MSR_IA32_POWER_CTL, power_ctl); mutex_unlock(&intel_pstate_driver_lock); } @@ -1302,7 +1305,7 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata); static void intel_pstate_hwp_reenable(struct cpudata *cpu) { intel_pstate_hwp_enable(cpu); - wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached)); + wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached)); } static int intel_pstate_suspend(struct cpufreq_policy *policy) @@ -1703,7 +1706,7 @@ static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribut u64 power_ctl; int enable; - rdmsrl(MSR_IA32_POWER_CTL, power_ctl); + rdmsrq(MSR_IA32_POWER_CTL, power_ctl); enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE)); return sprintf(buf, "%d\n", !enable); } @@ -1855,7 +1858,7 @@ static void intel_pstate_notify_work(struct work_struct *work) hybrid_update_capacity(cpudata); } - wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); + wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); } static DEFINE_RAW_SPINLOCK(hwp_notify_lock); @@ -1877,7 +1880,7 @@ void notify_hwp_interrupt(void) if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE)) status_mask |= HWP_HIGHEST_PERF_CHANGE_STATUS; - rdmsrl_safe(MSR_HWP_STATUS, &value); + rdmsrq_safe(MSR_HWP_STATUS, &value); if (!(value & status_mask)) return; @@ -1894,7 +1897,7 @@ void notify_hwp_interrupt(void) return; ack_intr: - wrmsrl_safe(MSR_HWP_STATUS, 0); + wrmsrq_safe(MSR_HWP_STATUS, 0); raw_spin_unlock_irqrestore(&hwp_notify_lock, flags); } @@ -1905,8 +1908,8 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) if (!cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY)) return; - /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ - wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); + /* wrmsrq_on_cpu has to be outside spinlock as this can result in IPC */ + wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); raw_spin_lock_irq(&hwp_notify_lock); cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask); @@ -1933,9 +1936,9 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE)) interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ; - /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ - wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, interrupt_mask); - wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); + /* wrmsrq_on_cpu has to be outside spinlock as this can result in IPC */ + wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, interrupt_mask); + wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); } } @@ -1974,9 +1977,9 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata) { /* First disable HWP notification interrupt till we activate again */ if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) - wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); + wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); - wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); + wrmsrq_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); intel_pstate_enable_hwp_interrupt(cpudata); @@ -1990,7 +1993,7 @@ static int atom_get_min_pstate(int not_used) { u64 value; - rdmsrl(MSR_ATOM_CORE_RATIOS, value); + rdmsrq(MSR_ATOM_CORE_RATIOS, value); return (value >> 8) & 0x7F; } @@ -1998,7 +2001,7 @@ static int atom_get_max_pstate(int not_used) { u64 value; - rdmsrl(MSR_ATOM_CORE_RATIOS, value); + rdmsrq(MSR_ATOM_CORE_RATIOS, value); return (value >> 16) & 0x7F; } @@ -2006,7 +2009,7 @@ static int atom_get_turbo_pstate(int not_used) { u64 value; - rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value); + rdmsrq(MSR_ATOM_CORE_TURBO_RATIOS, value); return value & 0x7F; } @@ -2041,7 +2044,7 @@ static int silvermont_get_scaling(void) static int silvermont_freq_table[] = { 83300, 100000, 133300, 116700, 80000}; - rdmsrl(MSR_FSB_FREQ, value); + rdmsrq(MSR_FSB_FREQ, value); i = value & 0x7; WARN_ON(i > 4); @@ -2057,7 +2060,7 @@ static int airmont_get_scaling(void) 83300, 100000, 133300, 116700, 80000, 93300, 90000, 88900, 87500}; - rdmsrl(MSR_FSB_FREQ, value); + rdmsrq(MSR_FSB_FREQ, value); i = value & 0xF; WARN_ON(i > 8); @@ -2068,7 +2071,7 @@ static void atom_get_vid(struct cpudata *cpudata) { u64 value; - rdmsrl(MSR_ATOM_CORE_VIDS, value); + rdmsrq(MSR_ATOM_CORE_VIDS, value); cpudata->vid.min = int_tofp((value >> 8) & 0x7f); cpudata->vid.max = int_tofp((value >> 16) & 0x7f); cpudata->vid.ratio = div_fp( @@ -2076,7 +2079,7 @@ static void atom_get_vid(struct cpudata *cpudata) int_tofp(cpudata->pstate.max_pstate - cpudata->pstate.min_pstate)); - rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value); + rdmsrq(MSR_ATOM_CORE_TURBO_VIDS, value); cpudata->vid.turbo = value & 0x7f; } @@ -2084,7 +2087,7 @@ static int core_get_min_pstate(int cpu) { u64 value; - rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value); + rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &value); return (value >> 40) & 0xFF; } @@ -2092,7 +2095,7 @@ static int core_get_max_pstate_physical(int cpu) { u64 value; - rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value); + rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &value); return (value >> 8) & 0xFF; } @@ -2106,13 +2109,13 @@ static int core_get_tdp_ratio(int cpu, u64 plat_info) int err; /* Get the TDP level (0, 1, 2) to get ratios */ - err = rdmsrl_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); + err = rdmsrq_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); if (err) return err; /* TDP MSR are continuous starting at 0x648 */ tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03); - err = rdmsrl_safe_on_cpu(cpu, tdp_msr, &tdp_ratio); + err = rdmsrq_safe_on_cpu(cpu, tdp_msr, &tdp_ratio); if (err) return err; @@ -2137,7 +2140,7 @@ static int core_get_max_pstate(int cpu) int tdp_ratio; int err; - rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info); + rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info); max_pstate = (plat_info >> 8) & 0xFF; tdp_ratio = core_get_tdp_ratio(cpu, plat_info); @@ -2149,7 +2152,7 @@ static int core_get_max_pstate(int cpu) return tdp_ratio; } - err = rdmsrl_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar); + err = rdmsrq_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar); if (!err) { int tar_levels; @@ -2169,7 +2172,7 @@ static int core_get_turbo_pstate(int cpu) u64 value; int nont, ret; - rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value); + rdmsrq_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value); nont = core_get_max_pstate(cpu); ret = (value) & 255; if (ret <= nont) @@ -2198,7 +2201,7 @@ static int knl_get_turbo_pstate(int cpu) u64 value; int nont, ret; - rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value); + rdmsrq_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value); nont = core_get_max_pstate(cpu); ret = (((value) >> 8) & 0xFF); if (ret <= nont) @@ -2209,7 +2212,7 @@ static int knl_get_turbo_pstate(int cpu) static int hwp_get_cpu_scaling(int cpu) { if (hybrid_scaling_factor) { - struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); + struct cpuinfo_x86 *c = &cpu_data(cpu); u8 cpu_type = c->topo.intel_type; /* @@ -2244,7 +2247,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) * the CPU being updated, so force the register update to run on the * right CPU. */ - wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, + wrmsrq_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); } @@ -2351,7 +2354,7 @@ static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu) return; hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min; - wrmsrl(MSR_HWP_REQUEST, hwp_req); + wrmsrq(MSR_HWP_REQUEST, hwp_req); cpu->last_update = cpu->sample.time; } @@ -2364,7 +2367,7 @@ static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu) expired = time_after64(cpu->sample.time, cpu->last_update + hwp_boost_hold_time_ns); if (expired) { - wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached); + wrmsrq(MSR_HWP_REQUEST, cpu->hwp_req_cached); cpu->hwp_boost_min = 0; } } @@ -2425,8 +2428,8 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) u64 tsc; local_irq_save(flags); - rdmsrl(MSR_IA32_APERF, aperf); - rdmsrl(MSR_IA32_MPERF, mperf); + rdmsrq(MSR_IA32_APERF, aperf); + rdmsrq(MSR_IA32_MPERF, mperf); tsc = rdtsc(); if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { local_irq_restore(flags); @@ -2520,7 +2523,7 @@ static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) return; cpu->pstate.current_pstate = pstate; - wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); + wrmsrq(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); } static void intel_pstate_adjust_pstate(struct cpudata *cpu) @@ -3100,19 +3103,19 @@ static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max, WRITE_ONCE(cpu->hwp_req_cached, value); if (fast_switch) - wrmsrl(MSR_HWP_REQUEST, value); + wrmsrq(MSR_HWP_REQUEST, value); else - wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); + wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); } static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu, u32 target_pstate, bool fast_switch) { if (fast_switch) - wrmsrl(MSR_IA32_PERF_CTL, + wrmsrq(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, target_pstate)); else - wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, + wrmsrq_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, target_pstate)); } @@ -3256,7 +3259,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) intel_pstate_get_hwp_cap(cpu); - rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value); + rdmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value); WRITE_ONCE(cpu->hwp_req_cached, value); cpu->epp_cached = intel_pstate_get_epp(cpu, value); @@ -3323,7 +3326,7 @@ static int intel_cpufreq_suspend(struct cpufreq_policy *policy) * written by it may not be suitable. */ value &= ~HWP_DESIRED_PERF(~0L); - wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); + wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); WRITE_ONCE(cpu->hwp_req_cached, value); } @@ -3573,7 +3576,7 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void) id = x86_match_cpu(intel_pstate_cpu_oob_ids); if (id) { - rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); + rdmsrq(MSR_MISC_PWR_MGMT, misc_pwr); if (misc_pwr & BITMASK_OOB) { pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n"); pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n"); @@ -3629,7 +3632,7 @@ static bool intel_pstate_hwp_is_enabled(void) { u64 value; - rdmsrl(MSR_PM_ENABLE, value); + rdmsrq(MSR_PM_ENABLE, value); return !!(value & 0x1); } diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index 68ccd73c8129..ba0e08c8486a 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -136,7 +136,7 @@ static void do_longhaul1(unsigned int mults_index) { union msr_bcr2 bcr2; - rdmsrl(MSR_VIA_BCR2, bcr2.val); + rdmsrq(MSR_VIA_BCR2, bcr2.val); /* Enable software clock multiplier */ bcr2.bits.ESOFTBF = 1; bcr2.bits.CLOCKMUL = mults_index & 0xff; @@ -144,16 +144,16 @@ static void do_longhaul1(unsigned int mults_index) /* Sync to timer tick */ safe_halt(); /* Change frequency on next halt or sleep */ - wrmsrl(MSR_VIA_BCR2, bcr2.val); + wrmsrq(MSR_VIA_BCR2, bcr2.val); /* Invoke transition */ ACPI_FLUSH_CPU_CACHE(); halt(); /* Disable software clock multiplier */ local_irq_disable(); - rdmsrl(MSR_VIA_BCR2, bcr2.val); + rdmsrq(MSR_VIA_BCR2, bcr2.val); bcr2.bits.ESOFTBF = 0; - wrmsrl(MSR_VIA_BCR2, bcr2.val); + wrmsrq(MSR_VIA_BCR2, bcr2.val); } /* For processor with Longhaul MSR */ @@ -164,7 +164,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index, union msr_longhaul longhaul; u32 t; - rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); + rdmsrq(MSR_VIA_LONGHAUL, longhaul.val); /* Setup new frequency */ if (!revid_errata) longhaul.bits.RevisionKey = longhaul.bits.RevisionID; @@ -180,7 +180,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index, /* Raise voltage if necessary */ if (can_scale_voltage && dir) { longhaul.bits.EnableSoftVID = 1; - wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); + wrmsrq(MSR_VIA_LONGHAUL, longhaul.val); /* Change voltage */ if (!cx_address) { ACPI_FLUSH_CPU_CACHE(); @@ -194,12 +194,12 @@ static void do_powersaver(int cx_address, unsigned int mults_index, t = inl(acpi_gbl_FADT.xpm_timer_block.address); } longhaul.bits.EnableSoftVID = 0; - wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); + wrmsrq(MSR_VIA_LONGHAUL, longhaul.val); } /* Change frequency on next halt or sleep */ longhaul.bits.EnableSoftBusRatio = 1; - wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); + wrmsrq(MSR_VIA_LONGHAUL, longhaul.val); if (!cx_address) { ACPI_FLUSH_CPU_CACHE(); halt(); @@ -212,12 +212,12 @@ static void do_powersaver(int cx_address, unsigned int mults_index, } /* Disable bus ratio bit */ longhaul.bits.EnableSoftBusRatio = 0; - wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); + wrmsrq(MSR_VIA_LONGHAUL, longhaul.val); /* Reduce voltage if necessary */ if (can_scale_voltage && !dir) { longhaul.bits.EnableSoftVID = 1; - wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); + wrmsrq(MSR_VIA_LONGHAUL, longhaul.val); /* Change voltage */ if (!cx_address) { ACPI_FLUSH_CPU_CACHE(); @@ -231,7 +231,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index, t = inl(acpi_gbl_FADT.xpm_timer_block.address); } longhaul.bits.EnableSoftVID = 0; - wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); + wrmsrq(MSR_VIA_LONGHAUL, longhaul.val); } } @@ -534,7 +534,7 @@ static void longhaul_setup_voltagescaling(void) unsigned int j, speed, pos, kHz_step, numvscales; int min_vid_speed; - rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); + rdmsrq(MSR_VIA_LONGHAUL, longhaul.val); if (!(longhaul.bits.RevisionID & 1)) { pr_info("Voltage scaling not supported by CPU\n"); return; diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c index fb2197dc170f..31039330a3ba 100644 --- a/drivers/cpufreq/powernow-k7.c +++ b/drivers/cpufreq/powernow-k7.c @@ -219,13 +219,13 @@ static void change_FID(int fid) { union msr_fidvidctl fidvidctl; - rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); + rdmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val); if (fidvidctl.bits.FID != fid) { fidvidctl.bits.SGTC = latency; fidvidctl.bits.FID = fid; fidvidctl.bits.VIDC = 0; fidvidctl.bits.FIDC = 1; - wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); + wrmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val); } } @@ -234,13 +234,13 @@ static void change_VID(int vid) { union msr_fidvidctl fidvidctl; - rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); + rdmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val); if (fidvidctl.bits.VID != vid) { fidvidctl.bits.SGTC = latency; fidvidctl.bits.VID = vid; fidvidctl.bits.FIDC = 0; fidvidctl.bits.VIDC = 1; - wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); + wrmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val); } } @@ -260,7 +260,7 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index) fid = powernow_table[index].driver_data & 0xFF; vid = (powernow_table[index].driver_data & 0xFF00) >> 8; - rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); + rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val); cfid = fidvidstatus.bits.CFID; freqs.old = fsb * fid_codes[cfid] / 10; @@ -557,7 +557,7 @@ static unsigned int powernow_get(unsigned int cpu) if (cpu) return 0; - rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); + rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val); cfid = fidvidstatus.bits.CFID; return fsb * fid_codes[cfid] / 10; @@ -598,7 +598,7 @@ static int powernow_cpu_init(struct cpufreq_policy *policy) if (policy->cpu != 0) return -ENODEV; - rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); + rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val); recalibrate_cpu_khz(); diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c index 103d2519dff7..b360f03a116f 100644 --- a/drivers/cpufreq/sc520_freq.c +++ b/drivers/cpufreq/sc520_freq.c @@ -21,7 +21,6 @@ #include <linux/io.h> #include <asm/cpu_device_id.h> -#include <asm/msr.h> #define MMCR_BASE 0xfffef000 /* The default base address */ #define OFFS_CPUCTL 0x2 /* CPU Control Register */ diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index c310aeebc8f3..944e899eb1be 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -37,11 +37,17 @@ static struct cpufreq_driver scmi_cpufreq_driver; static unsigned int scmi_cpufreq_get_rate(unsigned int cpu) { - struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); - struct scmi_data *priv = policy->driver_data; + struct cpufreq_policy *policy; + struct scmi_data *priv; unsigned long rate; int ret; + policy = cpufreq_cpu_get_raw(cpu); + if (unlikely(!policy)) + return 0; + + priv = policy->driver_data; + ret = perf_ops->freq_get(ph, priv->domain_id, &rate, false); if (ret) return 0; diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index 17cda84f00df..dcbb0ae7dd47 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -29,9 +29,16 @@ static struct scpi_ops *scpi_ops; static unsigned int scpi_cpufreq_get_rate(unsigned int cpu) { - struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); - struct scpi_data *priv = policy->driver_data; - unsigned long rate = clk_get_rate(priv->clk); + struct cpufreq_policy *policy; + struct scpi_data *priv; + unsigned long rate; + + policy = cpufreq_cpu_get_raw(cpu); + if (unlikely(!policy)) + return 0; + + priv = policy->driver_data; + rate = clk_get_rate(priv->clk); return rate / 1000; } diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c index 47d6840b3489..744312a44279 100644 --- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c +++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c @@ -194,7 +194,9 @@ static int sun50i_cpufreq_get_efuse(void) struct nvmem_cell *speedbin_nvmem; const struct of_device_id *match; struct device *cpu_dev; - u32 *speedbin; + void *speedbin_ptr; + u32 speedbin = 0; + size_t len; int ret; cpu_dev = get_cpu_device(0); @@ -217,14 +219,18 @@ static int sun50i_cpufreq_get_efuse(void) return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem), "Could not get nvmem cell\n"); - speedbin = nvmem_cell_read(speedbin_nvmem, NULL); + speedbin_ptr = nvmem_cell_read(speedbin_nvmem, &len); nvmem_cell_put(speedbin_nvmem); - if (IS_ERR(speedbin)) - return PTR_ERR(speedbin); + if (IS_ERR(speedbin_ptr)) + return PTR_ERR(speedbin_ptr); - ret = opp_data->efuse_xlate(*speedbin); + if (len <= 4) + memcpy(&speedbin, speedbin_ptr, len); + speedbin = le32_to_cpu(speedbin); - kfree(speedbin); + ret = opp_data->efuse_xlate(speedbin); + + kfree(speedbin_ptr); return ret; }; diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 47082782008a..5686369779be 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -530,13 +530,6 @@ source "drivers/crypto/cavium/nitrox/Kconfig" source "drivers/crypto/marvell/Kconfig" source "drivers/crypto/intel/Kconfig" -config CRYPTO_DEV_CAVIUM_ZIP - tristate "Cavium ZIP driver" - depends on PCI && 64BIT && (ARM64 || COMPILE_TEST) - help - Select this option if you want to enable compression/decompression - acceleration on Cavium's ARM based SoCs - config CRYPTO_DEV_QCE tristate "Qualcomm crypto engine accelerator" depends on ARCH_QCOM || COMPILE_TEST diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index c97f0ebc55ec..22eadcc8f4a2 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -8,12 +8,9 @@ obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o obj-$(CONFIG_CRYPTO_DEV_ATMEL_I2C) += atmel-i2c.o obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA204A) += atmel-sha204a.o -obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/ obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/ obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/ obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/ -obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/ -obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/ obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/ obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o @@ -50,3 +47,4 @@ obj-y += hisilicon/ obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/ obj-y += intel/ obj-y += starfive/ +obj-y += cavium/ diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index 19b7fb4a93e8..f9cf00d690e2 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -33,22 +33,30 @@ static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq) if (sg_nents_for_len(areq->src, areq->cryptlen) > MAX_SG || sg_nents_for_len(areq->dst, areq->cryptlen) > MAX_SG) { - algt->stat_fb_maxsg++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_maxsg++; + return true; } if (areq->cryptlen < crypto_skcipher_ivsize(tfm)) { - algt->stat_fb_leniv++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_leniv++; + return true; } if (areq->cryptlen == 0) { - algt->stat_fb_len0++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_len0++; + return true; } if (areq->cryptlen % 16) { - algt->stat_fb_mod16++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_mod16++; + return true; } @@ -56,12 +64,16 @@ static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq) sg = areq->src; while (sg) { if (!IS_ALIGNED(sg->offset, sizeof(u32))) { - algt->stat_fb_srcali++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_srcali++; + return true; } todo = min(len, sg->length); if (todo % 4) { - algt->stat_fb_srclen++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_srclen++; + return true; } len -= todo; @@ -72,12 +84,16 @@ static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq) sg = areq->dst; while (sg) { if (!IS_ALIGNED(sg->offset, sizeof(u32))) { - algt->stat_fb_dstali++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_dstali++; + return true; } todo = min(len, sg->length); if (todo % 4) { - algt->stat_fb_dstlen++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_dstlen++; + return true; } len -= todo; @@ -100,9 +116,7 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq) algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base); -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG algt->stat_fb++; -#endif } skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); @@ -146,9 +160,8 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm), op->keylen); -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG - algt->stat_req++; -#endif + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_req++; flow = rctx->flow; @@ -275,13 +288,16 @@ theend_sgs: } else { if (nr_sgs > 0) dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); - dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE); + + if (nr_sgd > 0) + dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE); } theend_iv: if (areq->iv && ivsize > 0) { - if (rctx->addr_iv) + if (!dma_mapping_error(ce->dev, rctx->addr_iv)) dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE); + offset = areq->cryptlen - ivsize; if (rctx->op_dir & CE_DECRYPTION) { memcpy(areq->iv, chan->backup_iv, ivsize); @@ -434,17 +450,17 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm) crypto_skcipher_set_reqsize(sktfm, sizeof(struct sun8i_cipher_req_ctx) + crypto_skcipher_reqsize(op->fallback_tfm)); - memcpy(algt->fbname, - crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)), - CRYPTO_MAX_ALG_NAME); + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + memcpy(algt->fbname, + crypto_skcipher_driver_name(op->fallback_tfm), + CRYPTO_MAX_ALG_NAME); - err = pm_runtime_get_sync(op->ce->dev); + err = pm_runtime_resume_and_get(op->ce->dev); if (err < 0) goto error_pm; return 0; error_pm: - pm_runtime_put_noidle(op->ce->dev); crypto_free_skcipher(op->fallback_tfm); return err; } diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c index ec1ffda9ea32..658f520cee0c 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c @@ -832,13 +832,12 @@ static int sun8i_ce_pm_init(struct sun8i_ce_dev *ce) err = pm_runtime_set_suspended(ce->dev); if (err) return err; - pm_runtime_enable(ce->dev); - return err; -} -static void sun8i_ce_pm_exit(struct sun8i_ce_dev *ce) -{ - pm_runtime_disable(ce->dev); + err = devm_pm_runtime_enable(ce->dev); + if (err) + return err; + + return 0; } static int sun8i_ce_get_clks(struct sun8i_ce_dev *ce) @@ -1041,7 +1040,7 @@ static int sun8i_ce_probe(struct platform_device *pdev) "sun8i-ce-ns", ce); if (err) { dev_err(ce->dev, "Cannot request CryptoEngine Non-secure IRQ (err=%d)\n", err); - goto error_irq; + goto error_pm; } err = sun8i_ce_register_algs(ce); @@ -1082,8 +1081,6 @@ static int sun8i_ce_probe(struct platform_device *pdev) return 0; error_alg: sun8i_ce_unregister_algs(ce); -error_irq: - sun8i_ce_pm_exit(ce); error_pm: sun8i_ce_free_chanlist(ce, MAXFLOW - 1); return err; @@ -1104,8 +1101,6 @@ static void sun8i_ce_remove(struct platform_device *pdev) #endif sun8i_ce_free_chanlist(ce, MAXFLOW - 1); - - sun8i_ce_pm_exit(ce); } static const struct of_device_id sun8i_ce_crypto_of_match_table[] = { diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c index 6072dd9f390b..bef44f350167 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c @@ -23,6 +23,18 @@ #include <linux/string.h> #include "sun8i-ce.h" +static void sun8i_ce_hash_stat_fb_inc(struct crypto_ahash *tfm) +{ + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) { + struct sun8i_ce_alg_template *algt __maybe_unused; + struct ahash_alg *alg = crypto_ahash_alg(tfm); + + algt = container_of(alg, struct sun8i_ce_alg_template, + alg.hash.base); + algt->stat_fb++; + } +} + int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm) { struct sun8i_ce_hash_tfm_ctx *op = crypto_ahash_ctx(tfm); @@ -48,15 +60,16 @@ int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm) sizeof(struct sun8i_ce_hash_reqctx) + crypto_ahash_reqsize(op->fallback_tfm)); - memcpy(algt->fbname, crypto_ahash_driver_name(op->fallback_tfm), - CRYPTO_MAX_ALG_NAME); + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + memcpy(algt->fbname, + crypto_ahash_driver_name(op->fallback_tfm), + CRYPTO_MAX_ALG_NAME); - err = pm_runtime_get_sync(op->ce->dev); + err = pm_runtime_resume_and_get(op->ce->dev); if (err < 0) goto error_pm; return 0; error_pm: - pm_runtime_put_noidle(op->ce->dev); crypto_free_ahash(op->fallback_tfm); return err; } @@ -78,7 +91,9 @@ int sun8i_ce_hash_init(struct ahash_request *areq) memset(rctx, 0, sizeof(struct sun8i_ce_hash_reqctx)); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); return crypto_ahash_init(&rctx->fallback_req); } @@ -90,7 +105,9 @@ int sun8i_ce_hash_export(struct ahash_request *areq, void *out) struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); return crypto_ahash_export(&rctx->fallback_req, out); } @@ -102,7 +119,9 @@ int sun8i_ce_hash_import(struct ahash_request *areq, const void *in) struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); return crypto_ahash_import(&rctx->fallback_req, in); } @@ -113,21 +132,13 @@ int sun8i_ce_hash_final(struct ahash_request *areq) struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); - ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.result = areq->result; - - if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) { - struct sun8i_ce_alg_template *algt __maybe_unused; - struct ahash_alg *alg = crypto_ahash_alg(tfm); + sun8i_ce_hash_stat_fb_inc(tfm); - algt = container_of(alg, struct sun8i_ce_alg_template, - alg.hash.base); -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG - algt->stat_fb++; -#endif - } + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); + ahash_request_set_crypt(&rctx->fallback_req, NULL, areq->result, 0); return crypto_ahash_final(&rctx->fallback_req); } @@ -139,10 +150,10 @@ int sun8i_ce_hash_update(struct ahash_request *areq) struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.nbytes = areq->nbytes; - rctx->fallback_req.src = areq->src; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); + ahash_request_set_crypt(&rctx->fallback_req, areq->src, NULL, areq->nbytes); return crypto_ahash_update(&rctx->fallback_req); } @@ -153,24 +164,14 @@ int sun8i_ce_hash_finup(struct ahash_request *areq) struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); - ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - - rctx->fallback_req.nbytes = areq->nbytes; - rctx->fallback_req.src = areq->src; - rctx->fallback_req.result = areq->result; + sun8i_ce_hash_stat_fb_inc(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) { - struct sun8i_ce_alg_template *algt __maybe_unused; - struct ahash_alg *alg = crypto_ahash_alg(tfm); - - algt = container_of(alg, struct sun8i_ce_alg_template, - alg.hash.base); -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG - algt->stat_fb++; -#endif - } + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); + ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result, + areq->nbytes); return crypto_ahash_finup(&rctx->fallback_req); } @@ -181,24 +182,14 @@ static int sun8i_ce_hash_digest_fb(struct ahash_request *areq) struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); - ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - - rctx->fallback_req.nbytes = areq->nbytes; - rctx->fallback_req.src = areq->src; - rctx->fallback_req.result = areq->result; - - if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) { - struct sun8i_ce_alg_template *algt __maybe_unused; - struct ahash_alg *alg = crypto_ahash_alg(tfm); + sun8i_ce_hash_stat_fb_inc(tfm); - algt = container_of(alg, struct sun8i_ce_alg_template, - alg.hash.base); -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG - algt->stat_fb++; -#endif - } + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); + ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result, + areq->nbytes); return crypto_ahash_digest(&rctx->fallback_req); } @@ -213,22 +204,30 @@ static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq) algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base); if (areq->nbytes == 0) { - algt->stat_fb_len0++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_len0++; + return true; } /* we need to reserve one SG for padding one */ if (sg_nents_for_len(areq->src, areq->nbytes) > MAX_SG - 1) { - algt->stat_fb_maxsg++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_maxsg++; + return true; } sg = areq->src; while (sg) { if (sg->length % 4) { - algt->stat_fb_srclen++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_srclen++; + return true; } if (!IS_ALIGNED(sg->offset, sizeof(u32))) { - algt->stat_fb_srcali++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_srcali++; + return true; } sg = sg_next(sg); @@ -244,21 +243,11 @@ int sun8i_ce_hash_digest(struct ahash_request *areq) struct sun8i_ce_alg_template *algt; struct sun8i_ce_dev *ce; struct crypto_engine *engine; - struct scatterlist *sg; - int nr_sgs, e, i; + int e; if (sun8i_ce_hash_need_fallback(areq)) return sun8i_ce_hash_digest_fb(areq); - nr_sgs = sg_nents_for_len(areq->src, areq->nbytes); - if (nr_sgs > MAX_SG - 1) - return sun8i_ce_hash_digest_fb(areq); - - for_each_sg(areq->src, sg, nr_sgs, i) { - if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32))) - return sun8i_ce_hash_digest_fb(areq); - } - algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base); ce = algt->ce; @@ -343,9 +332,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) u32 common; u64 byte_count; __le32 *bf; - void *buf = NULL; + void *buf, *result; int j, i, todo; - void *result = NULL; u64 bs; int digestsize; dma_addr_t addr_res, addr_pad; @@ -365,22 +353,22 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) buf = kcalloc(2, bs, GFP_KERNEL | GFP_DMA); if (!buf) { err = -ENOMEM; - goto theend; + goto err_out; } bf = (__le32 *)buf; result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA); if (!result) { err = -ENOMEM; - goto theend; + goto err_free_buf; } flow = rctx->flow; chan = &ce->chanlist[flow]; -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG - algt->stat_req++; -#endif + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_req++; + dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes); cet = chan->tl; @@ -398,7 +386,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) if (nr_sgs <= 0 || nr_sgs > MAX_SG) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; - goto theend; + goto err_free_result; } len = areq->nbytes; @@ -411,7 +399,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) if (len > 0) { dev_err(ce->dev, "remaining len %d\n", len); err = -EINVAL; - goto theend; + goto err_unmap_src; } addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE); cet->t_dst[0].addr = desc_addr_val_le32(ce, addr_res); @@ -419,7 +407,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) if (dma_mapping_error(ce->dev, addr_res)) { dev_err(ce->dev, "DMA map dest\n"); err = -EINVAL; - goto theend; + goto err_unmap_src; } byte_count = areq->nbytes; @@ -441,7 +429,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) } if (!j) { err = -EINVAL; - goto theend; + goto err_unmap_result; } addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE); @@ -450,7 +438,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) if (dma_mapping_error(ce->dev, addr_pad)) { dev_err(ce->dev, "DMA error on padding SG\n"); err = -EINVAL; - goto theend; + goto err_unmap_result; } if (ce->variant->hash_t_dlen_in_bits) @@ -463,16 +451,25 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) err = sun8i_ce_run_task(ce, flow, crypto_ahash_alg_name(tfm)); dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE); - dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); + +err_unmap_result: dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE); + if (!err) + memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize); +err_unmap_src: + dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); - memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize); -theend: - kfree(buf); +err_free_result: kfree(result); + +err_free_buf: + kfree(buf); + +err_out: local_bh_disable(); crypto_finalize_hash_request(engine, breq, err); local_bh_enable(); + return 0; } diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h index 3b5c2af013d0..83df4d719053 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h @@ -308,8 +308,8 @@ struct sun8i_ce_hash_tfm_ctx { * @flow: the flow to use for this request */ struct sun8i_ce_hash_reqctx { - struct ahash_request fallback_req; int flow; + struct ahash_request fallback_req; // keep at the end }; /* diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index 9b9605ce8ee6..8831bcb230c2 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -141,7 +141,7 @@ static int sun8i_ss_setup_ivs(struct skcipher_request *areq) /* we need to copy all IVs from source in case DMA is bi-directionnal */ while (sg && len) { - if (sg_dma_len(sg) == 0) { + if (sg->length == 0) { sg = sg_next(sg); continue; } diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c index 753f67a36dc5..8bc08089f044 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c @@ -150,7 +150,9 @@ int sun8i_ss_hash_init(struct ahash_request *areq) memset(rctx, 0, sizeof(struct sun8i_ss_hash_reqctx)); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); return crypto_ahash_init(&rctx->fallback_req); } @@ -162,7 +164,9 @@ int sun8i_ss_hash_export(struct ahash_request *areq, void *out) struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); return crypto_ahash_export(&rctx->fallback_req, out); } @@ -174,7 +178,9 @@ int sun8i_ss_hash_import(struct ahash_request *areq, const void *in) struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); return crypto_ahash_import(&rctx->fallback_req, in); } @@ -186,9 +192,10 @@ int sun8i_ss_hash_final(struct ahash_request *areq) struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.result = areq->result; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); + ahash_request_set_crypt(&rctx->fallback_req, NULL, areq->result, 0); if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) { struct ahash_alg *alg = crypto_ahash_alg(tfm); @@ -212,10 +219,10 @@ int sun8i_ss_hash_update(struct ahash_request *areq) struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.nbytes = areq->nbytes; - rctx->fallback_req.src = areq->src; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); + ahash_request_set_crypt(&rctx->fallback_req, areq->src, NULL, areq->nbytes); return crypto_ahash_update(&rctx->fallback_req); } @@ -227,12 +234,11 @@ int sun8i_ss_hash_finup(struct ahash_request *areq) struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - - rctx->fallback_req.nbytes = areq->nbytes; - rctx->fallback_req.src = areq->src; - rctx->fallback_req.result = areq->result; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); + ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result, + areq->nbytes); if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) { struct ahash_alg *alg = crypto_ahash_alg(tfm); @@ -256,12 +262,11 @@ static int sun8i_ss_hash_digest_fb(struct ahash_request *areq) struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - - rctx->fallback_req.nbytes = areq->nbytes; - rctx->fallback_req.src = areq->src; - rctx->fallback_req.result = areq->result; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); + ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result, + areq->nbytes); if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) { struct ahash_alg *alg = crypto_ahash_alg(tfm); diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index e0af611a95d8..38e8a61e9166 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c @@ -12,9 +12,6 @@ #include <linux/interrupt.h> #include <linux/spinlock_types.h> #include <linux/scatterlist.h> -#include <linux/crypto.h> -#include <linux/hash.h> -#include <crypto/internal/hash.h> #include <linux/dma-mapping.h> #include <crypto/algapi.h> #include <crypto/aead.h> @@ -72,7 +69,7 @@ static inline int crypto4xx_crypt(struct skcipher_request *req, { struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher); - __le32 iv[AES_IV_SIZE]; + __le32 iv[AES_IV_SIZE / 4]; if (check_blocksize && !IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) return -EINVAL; @@ -429,7 +426,7 @@ static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt) struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto4xx_aead_reqctx *rctx = aead_request_ctx(req); struct crypto_aead *aead = crypto_aead_reqtfm(req); - __le32 iv[16]; + __le32 iv[4]; u32 tmp_sa[SA_AES128_CCM_LEN + 4]; struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *)tmp_sa; unsigned int len = req->cryptlen; @@ -602,106 +599,3 @@ int crypto4xx_decrypt_aes_gcm(struct aead_request *req) { return crypto4xx_crypt_aes_gcm(req, true); } - -/* - * HASH SHA1 Functions - */ -static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm, - unsigned int sa_len, - unsigned char ha, - unsigned char hm) -{ - struct crypto_alg *alg = tfm->__crt_alg; - struct crypto4xx_alg *my_alg; - struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); - struct dynamic_sa_hash160 *sa; - int rc; - - my_alg = container_of(__crypto_ahash_alg(alg), struct crypto4xx_alg, - alg.u.hash); - ctx->dev = my_alg->dev; - - /* Create SA */ - if (ctx->sa_in || ctx->sa_out) - crypto4xx_free_sa(ctx); - - rc = crypto4xx_alloc_sa(ctx, sa_len); - if (rc) - return rc; - - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct crypto4xx_ctx)); - sa = (struct dynamic_sa_hash160 *)ctx->sa_in; - set_dynamic_sa_command_0(&sa->ctrl, SA_SAVE_HASH, SA_NOT_SAVE_IV, - SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA, - SA_NO_HEADER_PROC, ha, SA_CIPHER_ALG_NULL, - SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC, - SA_OPCODE_HASH, DIR_INBOUND); - set_dynamic_sa_command_1(&sa->ctrl, 0, SA_HASH_MODE_HASH, - CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF, - SA_SEQ_MASK_OFF, SA_MC_ENABLE, - SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD, - SA_NOT_COPY_HDR); - /* Need to zero hash digest in SA */ - memset(sa->inner_digest, 0, sizeof(sa->inner_digest)); - memset(sa->outer_digest, 0, sizeof(sa->outer_digest)); - - return 0; -} - -int crypto4xx_hash_init(struct ahash_request *req) -{ - struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); - int ds; - struct dynamic_sa_ctl *sa; - - sa = ctx->sa_in; - ds = crypto_ahash_digestsize( - __crypto_ahash_cast(req->base.tfm)); - sa->sa_command_0.bf.digest_len = ds >> 2; - sa->sa_command_0.bf.load_hash_state = SA_LOAD_HASH_FROM_SA; - - return 0; -} - -int crypto4xx_hash_update(struct ahash_request *req) -{ - struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); - struct scatterlist dst; - unsigned int ds = crypto_ahash_digestsize(ahash); - - sg_init_one(&dst, req->result, ds); - - return crypto4xx_build_pd(&req->base, ctx, req->src, &dst, - req->nbytes, NULL, 0, ctx->sa_in, - ctx->sa_len, 0, NULL); -} - -int crypto4xx_hash_final(struct ahash_request *req) -{ - return 0; -} - -int crypto4xx_hash_digest(struct ahash_request *req) -{ - struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); - struct scatterlist dst; - unsigned int ds = crypto_ahash_digestsize(ahash); - - sg_init_one(&dst, req->result, ds); - - return crypto4xx_build_pd(&req->base, ctx, req->src, &dst, - req->nbytes, NULL, 0, ctx->sa_in, - ctx->sa_len, 0, NULL); -} - -/* - * SHA1 Algorithm - */ -int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm) -{ - return crypto4xx_hash_alg_init(tfm, SA_HASH160_LEN, SA_HASH_ALG_SHA1, - SA_HASH_MODE_HASH); -} diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index ec3ccfa60445..8cdc66d520c9 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -485,18 +485,6 @@ static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev, } } -static void crypto4xx_copy_digest_to_dst(void *dst, - struct pd_uinfo *pd_uinfo, - struct crypto4xx_ctx *ctx) -{ - struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in; - - if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) { - memcpy(dst, pd_uinfo->sr_va->save_digest, - SA_HASH_ALG_SHA1_DIGEST_SIZE); - } -} - static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev, struct pd_uinfo *pd_uinfo) { @@ -549,23 +537,6 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev, skcipher_request_complete(req, 0); } -static void crypto4xx_ahash_done(struct crypto4xx_device *dev, - struct pd_uinfo *pd_uinfo) -{ - struct crypto4xx_ctx *ctx; - struct ahash_request *ahash_req; - - ahash_req = ahash_request_cast(pd_uinfo->async_req); - ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(ahash_req)); - - crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo, ctx); - crypto4xx_ret_sg_desc(dev, pd_uinfo); - - if (pd_uinfo->state & PD_ENTRY_BUSY) - ahash_request_complete(ahash_req, -EINPROGRESS); - ahash_request_complete(ahash_req, 0); -} - static void crypto4xx_aead_done(struct crypto4xx_device *dev, struct pd_uinfo *pd_uinfo, struct ce_pd *pd) @@ -642,9 +613,6 @@ static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx) case CRYPTO_ALG_TYPE_AEAD: crypto4xx_aead_done(dev, pd_uinfo, pd); break; - case CRYPTO_ALG_TYPE_AHASH: - crypto4xx_ahash_done(dev, pd_uinfo); - break; } } @@ -676,7 +644,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req, struct scatterlist *src, struct scatterlist *dst, const unsigned int datalen, - const __le32 *iv, const u32 iv_len, + const void *iv, const u32 iv_len, const struct dynamic_sa_ctl *req_sa, const unsigned int sa_len, const unsigned int assoclen, @@ -912,8 +880,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req, } pd->pd_ctl.w = PD_CTL_HOST_READY | - ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) || - (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ? + ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ? PD_CTL_HASH_FINAL : 0); pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen); pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0); @@ -1019,10 +986,6 @@ static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, rc = crypto_register_aead(&alg->alg.u.aead); break; - case CRYPTO_ALG_TYPE_AHASH: - rc = crypto_register_ahash(&alg->alg.u.hash); - break; - case CRYPTO_ALG_TYPE_RNG: rc = crypto_register_rng(&alg->alg.u.rng); break; @@ -1048,10 +1011,6 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev) list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) { list_del(&alg->entry); switch (alg->alg.type) { - case CRYPTO_ALG_TYPE_AHASH: - crypto_unregister_ahash(&alg->alg.u.hash); - break; - case CRYPTO_ALG_TYPE_AEAD: crypto_unregister_aead(&alg->alg.u.aead); break; diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h index 3adcc5e65694..ee36630c670f 100644 --- a/drivers/crypto/amcc/crypto4xx_core.h +++ b/drivers/crypto/amcc/crypto4xx_core.h @@ -16,7 +16,6 @@ #include <linux/ratelimit.h> #include <linux/mutex.h> #include <linux/scatterlist.h> -#include <crypto/internal/hash.h> #include <crypto/internal/aead.h> #include <crypto/internal/rng.h> #include <crypto/internal/skcipher.h> @@ -135,7 +134,6 @@ struct crypto4xx_alg_common { u32 type; union { struct skcipher_alg cipher; - struct ahash_alg hash; struct aead_alg aead; struct rng_alg rng; } u; @@ -147,6 +145,12 @@ struct crypto4xx_alg { struct crypto4xx_device *dev; }; +#if IS_ENABLED(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION >= 120000 +#define BUILD_PD_ACCESS __attribute__((access(read_only, 6, 7))) +#else +#define BUILD_PD_ACCESS +#endif + int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size); void crypto4xx_free_sa(struct crypto4xx_ctx *ctx); int crypto4xx_build_pd(struct crypto_async_request *req, @@ -154,11 +158,11 @@ int crypto4xx_build_pd(struct crypto_async_request *req, struct scatterlist *src, struct scatterlist *dst, const unsigned int datalen, - const __le32 *iv, const u32 iv_len, + const void *iv, const u32 iv_len, const struct dynamic_sa_ctl *sa, const unsigned int sa_len, const unsigned int assoclen, - struct scatterlist *dst_tmp); + struct scatterlist *dst_tmp) BUILD_PD_ACCESS; int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen); int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher, @@ -177,11 +181,6 @@ int crypto4xx_encrypt_noiv_block(struct skcipher_request *req); int crypto4xx_decrypt_noiv_block(struct skcipher_request *req); int crypto4xx_rfc3686_encrypt(struct skcipher_request *req); int crypto4xx_rfc3686_decrypt(struct skcipher_request *req); -int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm); -int crypto4xx_hash_digest(struct ahash_request *req); -int crypto4xx_hash_final(struct ahash_request *req); -int crypto4xx_hash_update(struct ahash_request *req); -int crypto4xx_hash_init(struct ahash_request *req); /* * Note: Only use this function to copy items that is word aligned. diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 14bf86957d31..27c5d000b4b2 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -1743,7 +1743,8 @@ static struct skcipher_alg aes_xts_alg = { .base.cra_driver_name = "atmel-xts-aes", .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct atmel_aes_xts_ctx), - .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, + .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_KERN_DRIVER_ONLY, .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, @@ -2220,7 +2221,7 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd) static void atmel_aes_crypto_alg_init(struct crypto_alg *alg) { - alg->cra_flags |= CRYPTO_ALG_ASYNC; + alg->cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; alg->cra_alignmask = 0xf; alg->cra_priority = ATMEL_AES_PRIORITY; alg->cra_module = THIS_MODULE; diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 67a170608566..2cc36da163e8 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -1254,7 +1254,8 @@ static int atmel_sha_cra_init(struct crypto_tfm *tfm) static void atmel_sha_alg_init(struct ahash_alg *alg) { alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY; - alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC; + alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY; alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_ctx); alg->halg.base.cra_module = THIS_MODULE; alg->halg.base.cra_init = atmel_sha_cra_init; @@ -2041,7 +2042,8 @@ static void atmel_sha_hmac_cra_exit(struct crypto_tfm *tfm) static void atmel_sha_hmac_alg_init(struct ahash_alg *alg) { alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY; - alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC; + alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY; alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx); alg->halg.base.cra_module = THIS_MODULE; alg->halg.base.cra_init = atmel_sha_hmac_cra_init; diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c index 75bebec2c757..0fcf4a39de27 100644 --- a/drivers/crypto/atmel-sha204a.c +++ b/drivers/crypto/atmel-sha204a.c @@ -163,6 +163,12 @@ static int atmel_sha204a_probe(struct i2c_client *client) i2c_priv->hwrng.name = dev_name(&client->dev); i2c_priv->hwrng.read = atmel_sha204a_rng_read; + /* + * According to review by Bill Cox [1], this HWRNG has very low entropy. + * [1] https://www.metzdowd.com/pipermail/cryptography/2014-December/023858.html + */ + i2c_priv->hwrng.quality = 1; + ret = devm_hwrng_register(&client->dev, &i2c_priv->hwrng); if (ret) dev_warn(&client->dev, "failed to register RNG (%d)\n", ret); diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index de9717e221e4..098f5532f389 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c @@ -785,7 +785,7 @@ static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm) static void atmel_tdes_skcipher_alg_init(struct skcipher_alg *alg) { alg->base.cra_priority = ATMEL_TDES_PRIORITY; - alg->base.cra_flags = CRYPTO_ALG_ASYNC; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx); alg->base.cra_module = THIS_MODULE; diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index d4b39184dbdb..38ff931059b4 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -573,6 +573,7 @@ static const struct soc_device_attribute caam_imx_soc_table[] = { { .soc_id = "i.MX7*", .data = &caam_imx7_data }, { .soc_id = "i.MX8M*", .data = &caam_imx7_data }, { .soc_id = "i.MX8ULP", .data = &caam_imx8ulp_data }, + { .soc_id = "i.MX8QM", .data = &caam_imx8ulp_data }, { .soc_id = "VF*", .data = &caam_vf610_data }, { .family = "Freescale i.MX" }, { /* sentinel */ } diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c index 7701d00bcb3a..b6e7c0b29d4e 100644 --- a/drivers/crypto/caam/qi.c +++ b/drivers/crypto/caam/qi.c @@ -122,12 +122,12 @@ int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req) qm_fd_addr_set64(&fd, addr); do { + refcount_inc(&req->drv_ctx->refcnt); ret = qman_enqueue(req->drv_ctx->req_fq, &fd); - if (likely(!ret)) { - refcount_inc(&req->drv_ctx->refcnt); + if (likely(!ret)) return 0; - } + refcount_dec(&req->drv_ctx->refcnt); if (ret != -EBUSY) break; num_retries++; diff --git a/drivers/crypto/cavium/Makefile b/drivers/crypto/cavium/Makefile index 4679c06b611f..75227c587ed0 100644 --- a/drivers/crypto/cavium/Makefile +++ b/drivers/crypto/cavium/Makefile @@ -2,4 +2,5 @@ # # Makefile for Cavium crypto device drivers # -obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += zip/ +obj-$(CONFIG_CRYPTO_DEV_CPT) += cpt/ +obj-$(CONFIG_CRYPTO_DEV_NITROX) += nitrox/ diff --git a/drivers/crypto/cavium/zip/Makefile b/drivers/crypto/cavium/zip/Makefile deleted file mode 100644 index 020d189d793d..000000000000 --- a/drivers/crypto/cavium/zip/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Makefile for Cavium's ZIP Driver. -# - -obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += thunderx_zip.o -thunderx_zip-y := zip_main.o \ - zip_device.o \ - zip_crypto.o \ - zip_mem.o \ - zip_deflate.o \ - zip_inflate.o diff --git a/drivers/crypto/cavium/zip/common.h b/drivers/crypto/cavium/zip/common.h deleted file mode 100644 index 54f6fb054119..000000000000 --- a/drivers/crypto/cavium/zip/common.h +++ /dev/null @@ -1,222 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#ifndef __COMMON_H__ -#define __COMMON_H__ - -#include <linux/delay.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/io.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/seq_file.h> -#include <linux/string.h> -#include <linux/types.h> - -/* Device specific zlib function definitions */ -#include "zip_device.h" - -/* ZIP device definitions */ -#include "zip_main.h" - -/* ZIP memory allocation/deallocation related definitions */ -#include "zip_mem.h" - -/* Device specific structure definitions */ -#include "zip_regs.h" - -#define ZIP_ERROR -1 - -#define ZIP_FLUSH_FINISH 4 - -#define RAW_FORMAT 0 /* for rawpipe */ -#define ZLIB_FORMAT 1 /* for zpipe */ -#define GZIP_FORMAT 2 /* for gzpipe */ -#define LZS_FORMAT 3 /* for lzspipe */ - -/* Max number of ZIP devices supported */ -#define MAX_ZIP_DEVICES 2 - -/* Configures the number of zip queues to be used */ -#define ZIP_NUM_QUEUES 2 - -#define DYNAMIC_STOP_EXCESS 1024 - -/* Maximum buffer sizes in direct mode */ -#define MAX_INPUT_BUFFER_SIZE (64 * 1024) -#define MAX_OUTPUT_BUFFER_SIZE (64 * 1024) - -/** - * struct zip_operation - common data structure for comp and decomp operations - * @input: Next input byte is read from here - * @output: Next output byte written here - * @ctx_addr: Inflate context buffer address - * @history: Pointer to the history buffer - * @input_len: Number of bytes available at next_in - * @input_total_len: Total number of input bytes read - * @output_len: Remaining free space at next_out - * @output_total_len: Total number of bytes output so far - * @csum: Checksum value of the uncompressed data - * @flush: Flush flag - * @format: Format (depends on stream's wrap) - * @speed: Speed depends on stream's level - * @ccode: Compression code ( stream's strategy) - * @lzs_flag: Flag for LZS support - * @begin_file: Beginning of file indication for inflate - * @history_len: Size of the history data - * @end_file: Ending of the file indication for inflate - * @compcode: Completion status of the ZIP invocation - * @bytes_read: Input bytes read in current instruction - * @bits_processed: Total bits processed for entire file - * @sizeofptr: To distinguish between ILP32 and LP64 - * @sizeofzops: Optional just for padding - * - * This structure is used to maintain the required meta data for the - * comp and decomp operations. - */ -struct zip_operation { - u8 *input; - u8 *output; - u64 ctx_addr; - u64 history; - - u32 input_len; - u32 input_total_len; - - u32 output_len; - u32 output_total_len; - - u32 csum; - u32 flush; - - u32 format; - u32 speed; - u32 ccode; - u32 lzs_flag; - - u32 begin_file; - u32 history_len; - - u32 end_file; - u32 compcode; - u32 bytes_read; - u32 bits_processed; - - u32 sizeofptr; - u32 sizeofzops; -}; - -static inline int zip_poll_result(union zip_zres_s *result) -{ - int retries = 1000; - - while (!result->s.compcode) { - if (!--retries) { - pr_err("ZIP ERR: request timed out"); - return -ETIMEDOUT; - } - udelay(10); - /* - * Force re-reading of compcode which is updated - * by the ZIP coprocessor. - */ - rmb(); - } - return 0; -} - -/* error messages */ -#define zip_err(fmt, args...) pr_err("ZIP ERR:%s():%d: " \ - fmt "\n", __func__, __LINE__, ## args) - -#ifdef MSG_ENABLE -/* Enable all messages */ -#define zip_msg(fmt, args...) pr_info("ZIP_MSG:" fmt "\n", ## args) -#else -#define zip_msg(fmt, args...) -#endif - -#if defined(ZIP_DEBUG_ENABLE) && defined(MSG_ENABLE) - -#ifdef DEBUG_LEVEL - -#define FILE_NAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : \ - strrchr(__FILE__, '\\') ? strrchr(__FILE__, '\\') + 1 : __FILE__) - -#if DEBUG_LEVEL >= 4 - -#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s: %s() : %d: " \ - fmt "\n", FILE_NAME, __func__, __LINE__, ## args) - -#elif DEBUG_LEVEL >= 3 - -#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s: %s() : %d: " \ - fmt "\n", FILE_NAME, __func__, __LINE__, ## args) - -#elif DEBUG_LEVEL >= 2 - -#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s() : %d: " \ - fmt "\n", __func__, __LINE__, ## args) - -#else - -#define zip_dbg(fmt, args...) pr_info("ZIP DBG:" fmt "\n", ## args) - -#endif /* DEBUG LEVEL >=4 */ - -#else - -#define zip_dbg(fmt, args...) pr_info("ZIP DBG:" fmt "\n", ## args) - -#endif /* DEBUG_LEVEL */ -#else - -#define zip_dbg(fmt, args...) - -#endif /* ZIP_DEBUG_ENABLE && MSG_ENABLE*/ - -#endif diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c deleted file mode 100644 index 02e87f2d50db..000000000000 --- a/drivers/crypto/cavium/zip/zip_crypto.c +++ /dev/null @@ -1,261 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#include "zip_crypto.h" - -static void zip_static_init_zip_ops(struct zip_operation *zip_ops, - int lzs_flag) -{ - zip_ops->flush = ZIP_FLUSH_FINISH; - - /* equivalent to level 6 of opensource zlib */ - zip_ops->speed = 1; - - if (!lzs_flag) { - zip_ops->ccode = 0; /* Auto Huffman */ - zip_ops->lzs_flag = 0; - zip_ops->format = ZLIB_FORMAT; - } else { - zip_ops->ccode = 3; /* LZS Encoding */ - zip_ops->lzs_flag = 1; - zip_ops->format = LZS_FORMAT; - } - zip_ops->begin_file = 1; - zip_ops->history_len = 0; - zip_ops->end_file = 1; - zip_ops->compcode = 0; - zip_ops->csum = 1; /* Adler checksum desired */ -} - -static int zip_ctx_init(struct zip_kernel_ctx *zip_ctx, int lzs_flag) -{ - struct zip_operation *comp_ctx = &zip_ctx->zip_comp; - struct zip_operation *decomp_ctx = &zip_ctx->zip_decomp; - - zip_static_init_zip_ops(comp_ctx, lzs_flag); - zip_static_init_zip_ops(decomp_ctx, lzs_flag); - - comp_ctx->input = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE); - if (!comp_ctx->input) - return -ENOMEM; - - comp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE); - if (!comp_ctx->output) - goto err_comp_input; - - decomp_ctx->input = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE); - if (!decomp_ctx->input) - goto err_comp_output; - - decomp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE); - if (!decomp_ctx->output) - goto err_decomp_input; - - return 0; - -err_decomp_input: - zip_data_buf_free(decomp_ctx->input, MAX_INPUT_BUFFER_SIZE); - -err_comp_output: - zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE); - -err_comp_input: - zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE); - - return -ENOMEM; -} - -static void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx) -{ - struct zip_operation *comp_ctx = &zip_ctx->zip_comp; - struct zip_operation *dec_ctx = &zip_ctx->zip_decomp; - - zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE); - zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE); - - zip_data_buf_free(dec_ctx->input, MAX_INPUT_BUFFER_SIZE); - zip_data_buf_free(dec_ctx->output, MAX_OUTPUT_BUFFER_SIZE); -} - -static int zip_compress(const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen, - struct zip_kernel_ctx *zip_ctx) -{ - struct zip_operation *zip_ops = NULL; - struct zip_state *zip_state; - struct zip_device *zip = NULL; - int ret; - - if (!zip_ctx || !src || !dst || !dlen) - return -ENOMEM; - - zip = zip_get_device(zip_get_node_id()); - if (!zip) - return -ENODEV; - - zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC); - if (!zip_state) - return -ENOMEM; - - zip_ops = &zip_ctx->zip_comp; - - zip_ops->input_len = slen; - zip_ops->output_len = *dlen; - memcpy(zip_ops->input, src, slen); - - ret = zip_deflate(zip_ops, zip_state, zip); - - if (!ret) { - *dlen = zip_ops->output_len; - memcpy(dst, zip_ops->output, *dlen); - } - kfree(zip_state); - return ret; -} - -static int zip_decompress(const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen, - struct zip_kernel_ctx *zip_ctx) -{ - struct zip_operation *zip_ops = NULL; - struct zip_state *zip_state; - struct zip_device *zip = NULL; - int ret; - - if (!zip_ctx || !src || !dst || !dlen) - return -ENOMEM; - - zip = zip_get_device(zip_get_node_id()); - if (!zip) - return -ENODEV; - - zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC); - if (!zip_state) - return -ENOMEM; - - zip_ops = &zip_ctx->zip_decomp; - memcpy(zip_ops->input, src, slen); - - /* Work around for a bug in zlib which needs an extra bytes sometimes */ - if (zip_ops->ccode != 3) /* Not LZS Encoding */ - zip_ops->input[slen++] = 0; - - zip_ops->input_len = slen; - zip_ops->output_len = *dlen; - - ret = zip_inflate(zip_ops, zip_state, zip); - - if (!ret) { - *dlen = zip_ops->output_len; - memcpy(dst, zip_ops->output, *dlen); - } - kfree(zip_state); - return ret; -} - -/* SCOMP framework start */ -void *zip_alloc_scomp_ctx_deflate(void) -{ - int ret; - struct zip_kernel_ctx *zip_ctx; - - zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL); - if (!zip_ctx) - return ERR_PTR(-ENOMEM); - - ret = zip_ctx_init(zip_ctx, 0); - - if (ret) { - kfree_sensitive(zip_ctx); - return ERR_PTR(ret); - } - - return zip_ctx; -} - -void *zip_alloc_scomp_ctx_lzs(void) -{ - int ret; - struct zip_kernel_ctx *zip_ctx; - - zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL); - if (!zip_ctx) - return ERR_PTR(-ENOMEM); - - ret = zip_ctx_init(zip_ctx, 1); - - if (ret) { - kfree_sensitive(zip_ctx); - return ERR_PTR(ret); - } - - return zip_ctx; -} - -void zip_free_scomp_ctx(void *ctx) -{ - struct zip_kernel_ctx *zip_ctx = ctx; - - zip_ctx_exit(zip_ctx); - kfree_sensitive(zip_ctx); -} - -int zip_scomp_compress(struct crypto_scomp *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen, void *ctx) -{ - struct zip_kernel_ctx *zip_ctx = ctx; - - return zip_compress(src, slen, dst, dlen, zip_ctx); -} - -int zip_scomp_decompress(struct crypto_scomp *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen, void *ctx) -{ - struct zip_kernel_ctx *zip_ctx = ctx; - - return zip_decompress(src, slen, dst, dlen, zip_ctx); -} /* SCOMP framework end */ diff --git a/drivers/crypto/cavium/zip/zip_crypto.h b/drivers/crypto/cavium/zip/zip_crypto.h deleted file mode 100644 index 10899ece2d1f..000000000000 --- a/drivers/crypto/cavium/zip/zip_crypto.h +++ /dev/null @@ -1,68 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#ifndef __ZIP_CRYPTO_H__ -#define __ZIP_CRYPTO_H__ - -#include <crypto/internal/scompress.h> -#include "common.h" -#include "zip_deflate.h" -#include "zip_inflate.h" - -struct zip_kernel_ctx { - struct zip_operation zip_comp; - struct zip_operation zip_decomp; -}; - -void *zip_alloc_scomp_ctx_deflate(void); -void *zip_alloc_scomp_ctx_lzs(void); -void zip_free_scomp_ctx(void *zip_ctx); -int zip_scomp_compress(struct crypto_scomp *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen, void *ctx); -int zip_scomp_decompress(struct crypto_scomp *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen, void *ctx); -#endif diff --git a/drivers/crypto/cavium/zip/zip_deflate.c b/drivers/crypto/cavium/zip/zip_deflate.c deleted file mode 100644 index d7133f857d67..000000000000 --- a/drivers/crypto/cavium/zip/zip_deflate.c +++ /dev/null @@ -1,200 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#include <linux/delay.h> -#include <linux/sched.h> - -#include "common.h" -#include "zip_deflate.h" - -/* Prepares the deflate zip command */ -static int prepare_zip_command(struct zip_operation *zip_ops, - struct zip_state *s, union zip_inst_s *zip_cmd) -{ - union zip_zres_s *result_ptr = &s->result; - - memset(zip_cmd, 0, sizeof(s->zip_cmd)); - memset(result_ptr, 0, sizeof(s->result)); - - /* IWORD #0 */ - /* History gather */ - zip_cmd->s.hg = 0; - /* compression enable = 1 for deflate */ - zip_cmd->s.ce = 1; - /* sf (sync flush) */ - zip_cmd->s.sf = 1; - /* ef (end of file) */ - if (zip_ops->flush == ZIP_FLUSH_FINISH) { - zip_cmd->s.ef = 1; - zip_cmd->s.sf = 0; - } - - zip_cmd->s.cc = zip_ops->ccode; - /* ss (compression speed/storage) */ - zip_cmd->s.ss = zip_ops->speed; - - /* IWORD #1 */ - /* adler checksum */ - zip_cmd->s.adlercrc32 = zip_ops->csum; - zip_cmd->s.historylength = zip_ops->history_len; - zip_cmd->s.dg = 0; - - /* IWORD # 6 and 7 - compression input/history pointer */ - zip_cmd->s.inp_ptr_addr.s.addr = __pa(zip_ops->input); - zip_cmd->s.inp_ptr_ctl.s.length = (zip_ops->input_len + - zip_ops->history_len); - zip_cmd->s.ds = 0; - - /* IWORD # 8 and 9 - Output pointer */ - zip_cmd->s.out_ptr_addr.s.addr = __pa(zip_ops->output); - zip_cmd->s.out_ptr_ctl.s.length = zip_ops->output_len; - /* maximum number of output-stream bytes that can be written */ - zip_cmd->s.totaloutputlength = zip_ops->output_len; - - /* IWORD # 10 and 11 - Result pointer */ - zip_cmd->s.res_ptr_addr.s.addr = __pa(result_ptr); - /* Clearing completion code */ - result_ptr->s.compcode = 0; - - return 0; -} - -/** - * zip_deflate - API to offload deflate operation to hardware - * @zip_ops: Pointer to zip operation structure - * @s: Pointer to the structure representing zip state - * @zip_dev: Pointer to zip device structure - * - * This function prepares the zip deflate command and submits it to the zip - * engine for processing. - * - * Return: 0 if successful or error code - */ -int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s, - struct zip_device *zip_dev) -{ - union zip_inst_s *zip_cmd = &s->zip_cmd; - union zip_zres_s *result_ptr = &s->result; - u32 queue; - - /* Prepares zip command based on the input parameters */ - prepare_zip_command(zip_ops, s, zip_cmd); - - atomic64_add(zip_ops->input_len, &zip_dev->stats.comp_in_bytes); - /* Loads zip command into command queues and rings door bell */ - queue = zip_load_instr(zip_cmd, zip_dev); - - /* Stats update for compression requests submitted */ - atomic64_inc(&zip_dev->stats.comp_req_submit); - - /* Wait for completion or error */ - zip_poll_result(result_ptr); - - /* Stats update for compression requests completed */ - atomic64_inc(&zip_dev->stats.comp_req_complete); - - zip_ops->compcode = result_ptr->s.compcode; - switch (zip_ops->compcode) { - case ZIP_CMD_NOTDONE: - zip_dbg("Zip instruction not yet completed"); - return ZIP_ERROR; - - case ZIP_CMD_SUCCESS: - zip_dbg("Zip instruction completed successfully"); - zip_update_cmd_bufs(zip_dev, queue); - break; - - case ZIP_CMD_DTRUNC: - zip_dbg("Output Truncate error"); - /* Returning ZIP_ERROR to avoid copy to user */ - return ZIP_ERROR; - - default: - zip_err("Zip instruction failed. Code:%d", zip_ops->compcode); - return ZIP_ERROR; - } - - /* Update the CRC depending on the format */ - switch (zip_ops->format) { - case RAW_FORMAT: - zip_dbg("RAW Format: %d ", zip_ops->format); - /* Get checksum from engine, need to feed it again */ - zip_ops->csum = result_ptr->s.adler32; - break; - - case ZLIB_FORMAT: - zip_dbg("ZLIB Format: %d ", zip_ops->format); - zip_ops->csum = result_ptr->s.adler32; - break; - - case GZIP_FORMAT: - zip_dbg("GZIP Format: %d ", zip_ops->format); - zip_ops->csum = result_ptr->s.crc32; - break; - - case LZS_FORMAT: - zip_dbg("LZS Format: %d ", zip_ops->format); - break; - - default: - zip_err("Unknown Format:%d\n", zip_ops->format); - } - - atomic64_add(result_ptr->s.totalbyteswritten, - &zip_dev->stats.comp_out_bytes); - - /* Update output_len */ - if (zip_ops->output_len < result_ptr->s.totalbyteswritten) { - /* Dynamic stop && strm->output_len < zipconstants[onfsize] */ - zip_err("output_len (%d) < total bytes written(%d)\n", - zip_ops->output_len, result_ptr->s.totalbyteswritten); - zip_ops->output_len = 0; - - } else { - zip_ops->output_len = result_ptr->s.totalbyteswritten; - } - - return 0; -} diff --git a/drivers/crypto/cavium/zip/zip_deflate.h b/drivers/crypto/cavium/zip/zip_deflate.h deleted file mode 100644 index 1d32e76edc4d..000000000000 --- a/drivers/crypto/cavium/zip/zip_deflate.h +++ /dev/null @@ -1,62 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#ifndef __ZIP_DEFLATE_H__ -#define __ZIP_DEFLATE_H__ - -/** - * zip_deflate - API to offload deflate operation to hardware - * @zip_ops: Pointer to zip operation structure - * @s: Pointer to the structure representing zip state - * @zip_dev: Pointer to the structure representing zip device - * - * This function prepares the zip deflate command and submits it to the zip - * engine by ringing the doorbell. - * - * Return: 0 if successful or error code - */ -int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s, - struct zip_device *zip_dev); -#endif diff --git a/drivers/crypto/cavium/zip/zip_device.c b/drivers/crypto/cavium/zip/zip_device.c deleted file mode 100644 index f174ec29ed69..000000000000 --- a/drivers/crypto/cavium/zip/zip_device.c +++ /dev/null @@ -1,202 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#include "common.h" -#include "zip_deflate.h" - -/** - * zip_cmd_queue_consumed - Calculates the space consumed in the command queue. - * - * @zip_dev: Pointer to zip device structure - * @queue: Queue number - * - * Return: Bytes consumed in the command queue buffer. - */ -static inline u32 zip_cmd_queue_consumed(struct zip_device *zip_dev, int queue) -{ - return ((zip_dev->iq[queue].sw_head - zip_dev->iq[queue].sw_tail) * - sizeof(u64 *)); -} - -/** - * zip_load_instr - Submits the instruction into the ZIP command queue - * @instr: Pointer to the instruction to be submitted - * @zip_dev: Pointer to ZIP device structure to which the instruction is to - * be submitted - * - * This function copies the ZIP instruction to the command queue and rings the - * doorbell to notify the engine of the instruction submission. The command - * queue is maintained in a circular fashion. When there is space for exactly - * one instruction in the queue, next chunk pointer of the queue is made to - * point to the head of the queue, thus maintaining a circular queue. - * - * Return: Queue number to which the instruction was submitted - */ -u32 zip_load_instr(union zip_inst_s *instr, - struct zip_device *zip_dev) -{ - union zip_quex_doorbell dbell; - u32 queue = 0; - u32 consumed = 0; - u64 *ncb_ptr = NULL; - union zip_nptr_s ncp; - - /* - * Distribute the instructions between the enabled queues based on - * the CPU id. - */ - if (raw_smp_processor_id() % 2 == 0) - queue = 0; - else - queue = 1; - - zip_dbg("CPU Core: %d Queue number:%d", raw_smp_processor_id(), queue); - - /* Take cmd buffer lock */ - spin_lock(&zip_dev->iq[queue].lock); - - /* - * Command Queue implementation - * 1. If there is place for new instructions, push the cmd at sw_head. - * 2. If there is place for exactly one instruction, push the new cmd - * at the sw_head. Make sw_head point to the sw_tail to make it - * circular. Write sw_head's physical address to the "Next-Chunk - * Buffer Ptr" to make it cmd_hw_tail. - * 3. Ring the door bell. - */ - zip_dbg("sw_head : %lx", zip_dev->iq[queue].sw_head); - zip_dbg("sw_tail : %lx", zip_dev->iq[queue].sw_tail); - - consumed = zip_cmd_queue_consumed(zip_dev, queue); - /* Check if there is space to push just one cmd */ - if ((consumed + 128) == (ZIP_CMD_QBUF_SIZE - 8)) { - zip_dbg("Cmd queue space available for single command"); - /* Space for one cmd, pust it and make it circular queue */ - memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr, - sizeof(union zip_inst_s)); - zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */ - - /* Now, point the "Next-Chunk Buffer Ptr" to sw_head */ - ncb_ptr = zip_dev->iq[queue].sw_head; - - zip_dbg("ncb addr :0x%lx sw_head addr :0x%lx", - ncb_ptr, zip_dev->iq[queue].sw_head - 16); - - /* Using Circular command queue */ - zip_dev->iq[queue].sw_head = zip_dev->iq[queue].sw_tail; - /* Mark this buffer for free */ - zip_dev->iq[queue].free_flag = 1; - - /* Write new chunk buffer address at "Next-Chunk Buffer Ptr" */ - ncp.u_reg64 = 0ull; - ncp.s.addr = __pa(zip_dev->iq[queue].sw_head); - *ncb_ptr = ncp.u_reg64; - zip_dbg("*ncb_ptr :0x%lx sw_head[phys] :0x%lx", - *ncb_ptr, __pa(zip_dev->iq[queue].sw_head)); - - zip_dev->iq[queue].pend_cnt++; - - } else { - zip_dbg("Enough space is available for commands"); - /* Push this cmd to cmd queue buffer */ - memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr, - sizeof(union zip_inst_s)); - zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */ - - zip_dev->iq[queue].pend_cnt++; - } - zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx", - zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail, - zip_dev->iq[queue].hw_tail); - - zip_dbg(" Pushed the new cmd : pend_cnt : %d", - zip_dev->iq[queue].pend_cnt); - - /* Ring the doorbell */ - dbell.u_reg64 = 0ull; - dbell.s.dbell_cnt = 1; - zip_reg_write(dbell.u_reg64, - (zip_dev->reg_base + ZIP_QUEX_DOORBELL(queue))); - - /* Unlock cmd buffer lock */ - spin_unlock(&zip_dev->iq[queue].lock); - - return queue; -} - -/** - * zip_update_cmd_bufs - Updates the queue statistics after posting the - * instruction - * @zip_dev: Pointer to zip device structure - * @queue: Queue number - */ -void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue) -{ - /* Take cmd buffer lock */ - spin_lock(&zip_dev->iq[queue].lock); - - /* Check if the previous buffer can be freed */ - if (zip_dev->iq[queue].free_flag == 1) { - zip_dbg("Free flag. Free cmd buffer, adjust sw head and tail"); - /* Reset the free flag */ - zip_dev->iq[queue].free_flag = 0; - - /* Point the hw_tail to start of the new chunk buffer */ - zip_dev->iq[queue].hw_tail = zip_dev->iq[queue].sw_head; - } else { - zip_dbg("Free flag not set. increment hw tail"); - zip_dev->iq[queue].hw_tail += 16; /* 16 64_bit words = 128B */ - } - - zip_dev->iq[queue].done_cnt++; - zip_dev->iq[queue].pend_cnt--; - - zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx", - zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail, - zip_dev->iq[queue].hw_tail); - zip_dbg(" Got CC : pend_cnt : %d\n", zip_dev->iq[queue].pend_cnt); - - spin_unlock(&zip_dev->iq[queue].lock); -} diff --git a/drivers/crypto/cavium/zip/zip_device.h b/drivers/crypto/cavium/zip/zip_device.h deleted file mode 100644 index 9e18b3b93d38..000000000000 --- a/drivers/crypto/cavium/zip/zip_device.h +++ /dev/null @@ -1,108 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#ifndef __ZIP_DEVICE_H__ -#define __ZIP_DEVICE_H__ - -#include <linux/types.h> -#include "zip_main.h" - -struct sg_info { - /* - * Pointer to the input data when scatter_gather == 0 and - * pointer to the input gather list buffer when scatter_gather == 1 - */ - union zip_zptr_s *gather; - - /* - * Pointer to the output data when scatter_gather == 0 and - * pointer to the output scatter list buffer when scatter_gather == 1 - */ - union zip_zptr_s *scatter; - - /* - * Holds size of the output buffer pointed by scatter list - * when scatter_gather == 1 - */ - u64 scatter_buf_size; - - /* for gather data */ - u64 gather_enable; - - /* for scatter data */ - u64 scatter_enable; - - /* Number of gather list pointers for gather data */ - u32 gbuf_cnt; - - /* Number of scatter list pointers for scatter data */ - u32 sbuf_cnt; - - /* Buffers allocation state */ - u8 alloc_state; -}; - -/** - * struct zip_state - Structure representing the required information related - * to a command - * @zip_cmd: Pointer to zip instruction structure - * @result: Pointer to zip result structure - * @ctx: Context pointer for inflate - * @history: Decompression history pointer - * @sginfo: Scatter-gather info structure - */ -struct zip_state { - union zip_inst_s zip_cmd; - union zip_zres_s result; - union zip_zptr_s *ctx; - union zip_zptr_s *history; - struct sg_info sginfo; -}; - -#define ZIP_CONTEXT_SIZE 2048 -#define ZIP_INFLATE_HISTORY_SIZE 32768 -#define ZIP_DEFLATE_HISTORY_SIZE 32768 - -#endif diff --git a/drivers/crypto/cavium/zip/zip_inflate.c b/drivers/crypto/cavium/zip/zip_inflate.c deleted file mode 100644 index 7e0d73e2f89e..000000000000 --- a/drivers/crypto/cavium/zip/zip_inflate.c +++ /dev/null @@ -1,223 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#include <linux/delay.h> -#include <linux/sched.h> - -#include "common.h" -#include "zip_inflate.h" - -static int prepare_inflate_zcmd(struct zip_operation *zip_ops, - struct zip_state *s, union zip_inst_s *zip_cmd) -{ - union zip_zres_s *result_ptr = &s->result; - - memset(zip_cmd, 0, sizeof(s->zip_cmd)); - memset(result_ptr, 0, sizeof(s->result)); - - /* IWORD#0 */ - - /* Decompression History Gather list - no gather list */ - zip_cmd->s.hg = 0; - /* For decompression, CE must be 0x0. */ - zip_cmd->s.ce = 0; - /* For decompression, SS must be 0x0. */ - zip_cmd->s.ss = 0; - /* For decompression, SF should always be set. */ - zip_cmd->s.sf = 1; - - /* Begin File */ - if (zip_ops->begin_file == 0) - zip_cmd->s.bf = 0; - else - zip_cmd->s.bf = 1; - - zip_cmd->s.ef = 1; - /* 0: for Deflate decompression, 3: for LZS decompression */ - zip_cmd->s.cc = zip_ops->ccode; - - /* IWORD #1*/ - - /* adler checksum */ - zip_cmd->s.adlercrc32 = zip_ops->csum; - - /* - * HISTORYLENGTH must be 0x0 for any ZIP decompress operation. - * History data is added to a decompression operation via IWORD3. - */ - zip_cmd->s.historylength = 0; - zip_cmd->s.ds = 0; - - /* IWORD # 8 and 9 - Output pointer */ - zip_cmd->s.out_ptr_addr.s.addr = __pa(zip_ops->output); - zip_cmd->s.out_ptr_ctl.s.length = zip_ops->output_len; - - /* Maximum number of output-stream bytes that can be written */ - zip_cmd->s.totaloutputlength = zip_ops->output_len; - - zip_dbg("Data Direct Input case "); - - /* IWORD # 6 and 7 - input pointer */ - zip_cmd->s.dg = 0; - zip_cmd->s.inp_ptr_addr.s.addr = __pa((u8 *)zip_ops->input); - zip_cmd->s.inp_ptr_ctl.s.length = zip_ops->input_len; - - /* IWORD # 10 and 11 - Result pointer */ - zip_cmd->s.res_ptr_addr.s.addr = __pa(result_ptr); - - /* Clearing completion code */ - result_ptr->s.compcode = 0; - - /* Returning 0 for time being.*/ - return 0; -} - -/** - * zip_inflate - API to offload inflate operation to hardware - * @zip_ops: Pointer to zip operation structure - * @s: Pointer to the structure representing zip state - * @zip_dev: Pointer to zip device structure - * - * This function prepares the zip inflate command and submits it to the zip - * engine for processing. - * - * Return: 0 if successful or error code - */ -int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s, - struct zip_device *zip_dev) -{ - union zip_inst_s *zip_cmd = &s->zip_cmd; - union zip_zres_s *result_ptr = &s->result; - u32 queue; - - /* Prepare inflate zip command */ - prepare_inflate_zcmd(zip_ops, s, zip_cmd); - - atomic64_add(zip_ops->input_len, &zip_dev->stats.decomp_in_bytes); - - /* Load inflate command to zip queue and ring the doorbell */ - queue = zip_load_instr(zip_cmd, zip_dev); - - /* Decompression requests submitted stats update */ - atomic64_inc(&zip_dev->stats.decomp_req_submit); - - /* Wait for completion or error */ - zip_poll_result(result_ptr); - - /* Decompression requests completed stats update */ - atomic64_inc(&zip_dev->stats.decomp_req_complete); - - zip_ops->compcode = result_ptr->s.compcode; - switch (zip_ops->compcode) { - case ZIP_CMD_NOTDONE: - zip_dbg("Zip Instruction not yet completed\n"); - return ZIP_ERROR; - - case ZIP_CMD_SUCCESS: - zip_dbg("Zip Instruction completed successfully\n"); - break; - - case ZIP_CMD_DYNAMIC_STOP: - zip_dbg(" Dynamic stop Initiated\n"); - break; - - default: - zip_dbg("Instruction failed. Code = %d\n", zip_ops->compcode); - atomic64_inc(&zip_dev->stats.decomp_bad_reqs); - zip_update_cmd_bufs(zip_dev, queue); - return ZIP_ERROR; - } - - zip_update_cmd_bufs(zip_dev, queue); - - if ((zip_ops->ccode == 3) && (zip_ops->flush == 4) && - (zip_ops->compcode != ZIP_CMD_DYNAMIC_STOP)) - result_ptr->s.ef = 1; - - zip_ops->csum = result_ptr->s.adler32; - - atomic64_add(result_ptr->s.totalbyteswritten, - &zip_dev->stats.decomp_out_bytes); - - if (zip_ops->output_len < result_ptr->s.totalbyteswritten) { - zip_err("output_len (%d) < total bytes written (%d)\n", - zip_ops->output_len, result_ptr->s.totalbyteswritten); - zip_ops->output_len = 0; - } else { - zip_ops->output_len = result_ptr->s.totalbyteswritten; - } - - zip_ops->bytes_read = result_ptr->s.totalbytesread; - zip_ops->bits_processed = result_ptr->s.totalbitsprocessed; - zip_ops->end_file = result_ptr->s.ef; - if (zip_ops->end_file) { - switch (zip_ops->format) { - case RAW_FORMAT: - zip_dbg("RAW Format: %d ", zip_ops->format); - /* Get checksum from engine */ - zip_ops->csum = result_ptr->s.adler32; - break; - - case ZLIB_FORMAT: - zip_dbg("ZLIB Format: %d ", zip_ops->format); - zip_ops->csum = result_ptr->s.adler32; - break; - - case GZIP_FORMAT: - zip_dbg("GZIP Format: %d ", zip_ops->format); - zip_ops->csum = result_ptr->s.crc32; - break; - - case LZS_FORMAT: - zip_dbg("LZS Format: %d ", zip_ops->format); - break; - - default: - zip_err("Format error:%d\n", zip_ops->format); - } - } - - return 0; -} diff --git a/drivers/crypto/cavium/zip/zip_inflate.h b/drivers/crypto/cavium/zip/zip_inflate.h deleted file mode 100644 index 6b20f179978e..000000000000 --- a/drivers/crypto/cavium/zip/zip_inflate.h +++ /dev/null @@ -1,62 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#ifndef __ZIP_INFLATE_H__ -#define __ZIP_INFLATE_H__ - -/** - * zip_inflate - API to offload inflate operation to hardware - * @zip_ops: Pointer to zip operation structure - * @s: Pointer to the structure representing zip state - * @zip_dev: Pointer to the structure representing zip device - * - * This function prepares the zip inflate command and submits it to the zip - * engine for processing. - * - * Return: 0 if successful or error code - */ -int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s, - struct zip_device *zip_dev); -#endif diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c deleted file mode 100644 index abd58de4343d..000000000000 --- a/drivers/crypto/cavium/zip/zip_main.c +++ /dev/null @@ -1,603 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#include "common.h" -#include "zip_crypto.h" - -#define DRV_NAME "ThunderX-ZIP" - -static struct zip_device *zip_dev[MAX_ZIP_DEVICES]; - -static const struct pci_device_id zip_id_table[] = { - { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDERX_ZIP) }, - { 0, } -}; - -static void zip_debugfs_init(void); -static void zip_debugfs_exit(void); -static int zip_register_compression_device(void); -static void zip_unregister_compression_device(void); - -void zip_reg_write(u64 val, u64 __iomem *addr) -{ - writeq(val, addr); -} - -u64 zip_reg_read(u64 __iomem *addr) -{ - return readq(addr); -} - -/* - * Allocates new ZIP device structure - * Returns zip_device pointer or NULL if cannot allocate memory for zip_device - */ -static struct zip_device *zip_alloc_device(struct pci_dev *pdev) -{ - struct zip_device *zip = NULL; - int idx; - - for (idx = 0; idx < MAX_ZIP_DEVICES; idx++) { - if (!zip_dev[idx]) - break; - } - - /* To ensure that the index is within the limit */ - if (idx < MAX_ZIP_DEVICES) - zip = devm_kzalloc(&pdev->dev, sizeof(*zip), GFP_KERNEL); - - if (!zip) - return NULL; - - zip_dev[idx] = zip; - zip->index = idx; - return zip; -} - -/** - * zip_get_device - Get ZIP device based on node id of cpu - * - * @node: Node id of the current cpu - * Return: Pointer to Zip device structure - */ -struct zip_device *zip_get_device(int node) -{ - if ((node < MAX_ZIP_DEVICES) && (node >= 0)) - return zip_dev[node]; - - zip_err("ZIP device not found for node id %d\n", node); - return NULL; -} - -/** - * zip_get_node_id - Get the node id of the current cpu - * - * Return: Node id of the current cpu - */ -int zip_get_node_id(void) -{ - return cpu_to_node(raw_smp_processor_id()); -} - -/* Initializes the ZIP h/w sub-system */ -static int zip_init_hw(struct zip_device *zip) -{ - union zip_cmd_ctl cmd_ctl; - union zip_constants constants; - union zip_que_ena que_ena; - union zip_quex_map que_map; - union zip_que_pri que_pri; - - union zip_quex_sbuf_addr que_sbuf_addr; - union zip_quex_sbuf_ctl que_sbuf_ctl; - - int q = 0; - - /* Enable the ZIP Engine(Core) Clock */ - cmd_ctl.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CMD_CTL); - cmd_ctl.s.forceclk = 1; - zip_reg_write(cmd_ctl.u_reg64 & 0xFF, (zip->reg_base + ZIP_CMD_CTL)); - - zip_msg("ZIP_CMD_CTL : 0x%016llx", - zip_reg_read(zip->reg_base + ZIP_CMD_CTL)); - - constants.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CONSTANTS); - zip->depth = constants.s.depth; - zip->onfsize = constants.s.onfsize; - zip->ctxsize = constants.s.ctxsize; - - zip_msg("depth: 0x%016llx , onfsize : 0x%016llx , ctxsize : 0x%016llx", - zip->depth, zip->onfsize, zip->ctxsize); - - /* - * Program ZIP_QUE(0..7)_SBUF_ADDR and ZIP_QUE(0..7)_SBUF_CTL to - * have the correct buffer pointer and size configured for each - * instruction queue. - */ - for (q = 0; q < ZIP_NUM_QUEUES; q++) { - que_sbuf_ctl.u_reg64 = 0ull; - que_sbuf_ctl.s.size = (ZIP_CMD_QBUF_SIZE / sizeof(u64)); - que_sbuf_ctl.s.inst_be = 0; - que_sbuf_ctl.s.stream_id = 0; - zip_reg_write(que_sbuf_ctl.u_reg64, - (zip->reg_base + ZIP_QUEX_SBUF_CTL(q))); - - zip_msg("QUEX_SBUF_CTL[%d]: 0x%016llx", q, - zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_CTL(q))); - } - - for (q = 0; q < ZIP_NUM_QUEUES; q++) { - memset(&zip->iq[q], 0x0, sizeof(struct zip_iq)); - - spin_lock_init(&zip->iq[q].lock); - - if (zip_cmd_qbuf_alloc(zip, q)) { - while (q != 0) { - q--; - zip_cmd_qbuf_free(zip, q); - } - return -ENOMEM; - } - - /* Initialize tail ptr to head */ - zip->iq[q].sw_tail = zip->iq[q].sw_head; - zip->iq[q].hw_tail = zip->iq[q].sw_head; - - /* Write the physical addr to register */ - que_sbuf_addr.u_reg64 = 0ull; - que_sbuf_addr.s.ptr = (__pa(zip->iq[q].sw_head) >> - ZIP_128B_ALIGN); - - zip_msg("QUE[%d]_PTR(PHYS): 0x%016llx", q, - (u64)que_sbuf_addr.s.ptr); - - zip_reg_write(que_sbuf_addr.u_reg64, - (zip->reg_base + ZIP_QUEX_SBUF_ADDR(q))); - - zip_msg("QUEX_SBUF_ADDR[%d]: 0x%016llx", q, - zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_ADDR(q))); - - zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx", - zip->iq[q].sw_head, zip->iq[q].sw_tail, - zip->iq[q].hw_tail); - zip_dbg("sw_head phy addr : 0x%lx", que_sbuf_addr.s.ptr); - } - - /* - * Queue-to-ZIP core mapping - * If a queue is not mapped to a particular core, it is equivalent to - * the ZIP core being disabled. - */ - que_ena.u_reg64 = 0x0ull; - /* Enabling queues based on ZIP_NUM_QUEUES */ - for (q = 0; q < ZIP_NUM_QUEUES; q++) - que_ena.s.ena |= (0x1 << q); - zip_reg_write(que_ena.u_reg64, (zip->reg_base + ZIP_QUE_ENA)); - - zip_msg("QUE_ENA : 0x%016llx", - zip_reg_read(zip->reg_base + ZIP_QUE_ENA)); - - for (q = 0; q < ZIP_NUM_QUEUES; q++) { - que_map.u_reg64 = 0ull; - /* Mapping each queue to two ZIP cores */ - que_map.s.zce = 0x3; - zip_reg_write(que_map.u_reg64, - (zip->reg_base + ZIP_QUEX_MAP(q))); - - zip_msg("QUE_MAP(%d) : 0x%016llx", q, - zip_reg_read(zip->reg_base + ZIP_QUEX_MAP(q))); - } - - que_pri.u_reg64 = 0ull; - for (q = 0; q < ZIP_NUM_QUEUES; q++) - que_pri.s.pri |= (0x1 << q); /* Higher Priority RR */ - zip_reg_write(que_pri.u_reg64, (zip->reg_base + ZIP_QUE_PRI)); - - zip_msg("QUE_PRI %016llx", zip_reg_read(zip->reg_base + ZIP_QUE_PRI)); - - return 0; -} - -static void zip_reset(struct zip_device *zip) -{ - union zip_cmd_ctl cmd_ctl; - - cmd_ctl.u_reg64 = 0x0ull; - cmd_ctl.s.reset = 1; /* Forces ZIP cores to do reset */ - zip_reg_write(cmd_ctl.u_reg64, (zip->reg_base + ZIP_CMD_CTL)); -} - -static int zip_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - struct device *dev = &pdev->dev; - struct zip_device *zip = NULL; - int err; - - zip = zip_alloc_device(pdev); - if (!zip) - return -ENOMEM; - - dev_info(dev, "Found ZIP device %d %x:%x on Node %d\n", zip->index, - pdev->vendor, pdev->device, dev_to_node(dev)); - - pci_set_drvdata(pdev, zip); - zip->pdev = pdev; - - err = pci_enable_device(pdev); - if (err) { - dev_err(dev, "Failed to enable PCI device"); - goto err_free_device; - } - - err = pci_request_regions(pdev, DRV_NAME); - if (err) { - dev_err(dev, "PCI request regions failed 0x%x", err); - goto err_disable_device; - } - - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); - if (err) { - dev_err(dev, "Unable to get usable 48-bit DMA configuration\n"); - goto err_release_regions; - } - - /* MAP configuration registers */ - zip->reg_base = pci_ioremap_bar(pdev, PCI_CFG_ZIP_PF_BAR0); - if (!zip->reg_base) { - dev_err(dev, "ZIP: Cannot map BAR0 CSR memory space, aborting"); - err = -ENOMEM; - goto err_release_regions; - } - - /* Initialize ZIP Hardware */ - err = zip_init_hw(zip); - if (err) - goto err_release_regions; - - /* Register with the Kernel Crypto Interface */ - err = zip_register_compression_device(); - if (err < 0) { - zip_err("ZIP: Kernel Crypto Registration failed\n"); - goto err_register; - } - - /* comp-decomp statistics are handled with debugfs interface */ - zip_debugfs_init(); - - return 0; - -err_register: - zip_reset(zip); - -err_release_regions: - if (zip->reg_base) - iounmap(zip->reg_base); - pci_release_regions(pdev); - -err_disable_device: - pci_disable_device(pdev); - -err_free_device: - pci_set_drvdata(pdev, NULL); - - /* Remove zip_dev from zip_device list, free the zip_device memory */ - zip_dev[zip->index] = NULL; - devm_kfree(dev, zip); - - return err; -} - -static void zip_remove(struct pci_dev *pdev) -{ - struct zip_device *zip = pci_get_drvdata(pdev); - int q = 0; - - if (!zip) - return; - - zip_debugfs_exit(); - - zip_unregister_compression_device(); - - if (zip->reg_base) { - zip_reset(zip); - iounmap(zip->reg_base); - } - - pci_release_regions(pdev); - pci_disable_device(pdev); - - /* - * Free Command Queue buffers. This free should be called for all - * the enabled Queues. - */ - for (q = 0; q < ZIP_NUM_QUEUES; q++) - zip_cmd_qbuf_free(zip, q); - - pci_set_drvdata(pdev, NULL); - /* remove zip device from zip device list */ - zip_dev[zip->index] = NULL; -} - -/* PCI Sub-System Interface */ -static struct pci_driver zip_driver = { - .name = DRV_NAME, - .id_table = zip_id_table, - .probe = zip_probe, - .remove = zip_remove, -}; - -/* Kernel Crypto Subsystem Interface */ - -static struct scomp_alg zip_scomp_deflate = { - .alloc_ctx = zip_alloc_scomp_ctx_deflate, - .free_ctx = zip_free_scomp_ctx, - .compress = zip_scomp_compress, - .decompress = zip_scomp_decompress, - .base = { - .cra_name = "deflate", - .cra_driver_name = "deflate-scomp-cavium", - .cra_module = THIS_MODULE, - .cra_priority = 300, - } -}; - -static struct scomp_alg zip_scomp_lzs = { - .alloc_ctx = zip_alloc_scomp_ctx_lzs, - .free_ctx = zip_free_scomp_ctx, - .compress = zip_scomp_compress, - .decompress = zip_scomp_decompress, - .base = { - .cra_name = "lzs", - .cra_driver_name = "lzs-scomp-cavium", - .cra_module = THIS_MODULE, - .cra_priority = 300, - } -}; - -static int zip_register_compression_device(void) -{ - int ret; - - ret = crypto_register_scomp(&zip_scomp_deflate); - if (ret < 0) { - zip_err("Deflate scomp algorithm registration failed\n"); - return ret; - } - - ret = crypto_register_scomp(&zip_scomp_lzs); - if (ret < 0) { - zip_err("LZS scomp algorithm registration failed\n"); - goto err_unregister_scomp_deflate; - } - - return ret; - -err_unregister_scomp_deflate: - crypto_unregister_scomp(&zip_scomp_deflate); - - return ret; -} - -static void zip_unregister_compression_device(void) -{ - crypto_unregister_scomp(&zip_scomp_deflate); - crypto_unregister_scomp(&zip_scomp_lzs); -} - -/* - * debugfs functions - */ -#ifdef CONFIG_DEBUG_FS -#include <linux/debugfs.h> - -/* Displays ZIP device statistics */ -static int zip_stats_show(struct seq_file *s, void *unused) -{ - u64 val = 0ull; - u64 avg_chunk = 0ull, avg_cr = 0ull; - u32 q = 0; - - int index = 0; - struct zip_device *zip; - struct zip_stats *st; - - for (index = 0; index < MAX_ZIP_DEVICES; index++) { - u64 pending = 0; - - if (zip_dev[index]) { - zip = zip_dev[index]; - st = &zip->stats; - - /* Get all the pending requests */ - for (q = 0; q < ZIP_NUM_QUEUES; q++) { - val = zip_reg_read((zip->reg_base + - ZIP_DBG_QUEX_STA(q))); - pending += val >> 32 & 0xffffff; - } - - val = atomic64_read(&st->comp_req_complete); - avg_chunk = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0; - - val = atomic64_read(&st->comp_out_bytes); - avg_cr = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0; - seq_printf(s, " ZIP Device %d Stats\n" - "-----------------------------------\n" - "Comp Req Submitted : \t%lld\n" - "Comp Req Completed : \t%lld\n" - "Compress In Bytes : \t%lld\n" - "Compressed Out Bytes : \t%lld\n" - "Average Chunk size : \t%llu\n" - "Average Compression ratio : \t%llu\n" - "Decomp Req Submitted : \t%lld\n" - "Decomp Req Completed : \t%lld\n" - "Decompress In Bytes : \t%lld\n" - "Decompressed Out Bytes : \t%lld\n" - "Decompress Bad requests : \t%lld\n" - "Pending Req : \t%lld\n" - "---------------------------------\n", - index, - (u64)atomic64_read(&st->comp_req_submit), - (u64)atomic64_read(&st->comp_req_complete), - (u64)atomic64_read(&st->comp_in_bytes), - (u64)atomic64_read(&st->comp_out_bytes), - avg_chunk, - avg_cr, - (u64)atomic64_read(&st->decomp_req_submit), - (u64)atomic64_read(&st->decomp_req_complete), - (u64)atomic64_read(&st->decomp_in_bytes), - (u64)atomic64_read(&st->decomp_out_bytes), - (u64)atomic64_read(&st->decomp_bad_reqs), - pending); - } - } - return 0; -} - -/* Clears stats data */ -static int zip_clear_show(struct seq_file *s, void *unused) -{ - int index = 0; - - for (index = 0; index < MAX_ZIP_DEVICES; index++) { - if (zip_dev[index]) { - memset(&zip_dev[index]->stats, 0, - sizeof(struct zip_stats)); - seq_printf(s, "Cleared stats for zip %d\n", index); - } - } - - return 0; -} - -static struct zip_registers zipregs[64] = { - {"ZIP_CMD_CTL ", 0x0000ull}, - {"ZIP_THROTTLE ", 0x0010ull}, - {"ZIP_CONSTANTS ", 0x00A0ull}, - {"ZIP_QUE0_MAP ", 0x1400ull}, - {"ZIP_QUE1_MAP ", 0x1408ull}, - {"ZIP_QUE_ENA ", 0x0500ull}, - {"ZIP_QUE_PRI ", 0x0508ull}, - {"ZIP_QUE0_DONE ", 0x2000ull}, - {"ZIP_QUE1_DONE ", 0x2008ull}, - {"ZIP_QUE0_DOORBELL ", 0x4000ull}, - {"ZIP_QUE1_DOORBELL ", 0x4008ull}, - {"ZIP_QUE0_SBUF_ADDR ", 0x1000ull}, - {"ZIP_QUE1_SBUF_ADDR ", 0x1008ull}, - {"ZIP_QUE0_SBUF_CTL ", 0x1200ull}, - {"ZIP_QUE1_SBUF_CTL ", 0x1208ull}, - { NULL, 0} -}; - -/* Prints registers' contents */ -static int zip_regs_show(struct seq_file *s, void *unused) -{ - u64 val = 0; - int i = 0, index = 0; - - for (index = 0; index < MAX_ZIP_DEVICES; index++) { - if (zip_dev[index]) { - seq_printf(s, "--------------------------------\n" - " ZIP Device %d Registers\n" - "--------------------------------\n", - index); - - i = 0; - - while (zipregs[i].reg_name) { - val = zip_reg_read((zip_dev[index]->reg_base + - zipregs[i].reg_offset)); - seq_printf(s, "%s: 0x%016llx\n", - zipregs[i].reg_name, val); - i++; - } - } - } - return 0; -} - -DEFINE_SHOW_ATTRIBUTE(zip_stats); -DEFINE_SHOW_ATTRIBUTE(zip_clear); -DEFINE_SHOW_ATTRIBUTE(zip_regs); - -/* Root directory for thunderx_zip debugfs entry */ -static struct dentry *zip_debugfs_root; - -static void zip_debugfs_init(void) -{ - if (!debugfs_initialized()) - return; - - zip_debugfs_root = debugfs_create_dir("thunderx_zip", NULL); - - /* Creating files for entries inside thunderx_zip directory */ - debugfs_create_file("zip_stats", 0444, zip_debugfs_root, NULL, - &zip_stats_fops); - - debugfs_create_file("zip_clear", 0444, zip_debugfs_root, NULL, - &zip_clear_fops); - - debugfs_create_file("zip_regs", 0444, zip_debugfs_root, NULL, - &zip_regs_fops); - -} - -static void zip_debugfs_exit(void) -{ - debugfs_remove_recursive(zip_debugfs_root); -} - -#else -static void __init zip_debugfs_init(void) { } -static void __exit zip_debugfs_exit(void) { } -#endif -/* debugfs - end */ - -module_pci_driver(zip_driver); - -MODULE_AUTHOR("Cavium Inc"); -MODULE_DESCRIPTION("Cavium Inc ThunderX ZIP Driver"); -MODULE_LICENSE("GPL v2"); -MODULE_DEVICE_TABLE(pci, zip_id_table); diff --git a/drivers/crypto/cavium/zip/zip_main.h b/drivers/crypto/cavium/zip/zip_main.h deleted file mode 100644 index e1e4fa92ce80..000000000000 --- a/drivers/crypto/cavium/zip/zip_main.h +++ /dev/null @@ -1,120 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#ifndef __ZIP_MAIN_H__ -#define __ZIP_MAIN_H__ - -#include "zip_device.h" -#include "zip_regs.h" - -/* PCI device IDs */ -#define PCI_DEVICE_ID_THUNDERX_ZIP 0xA01A - -/* ZIP device BARs */ -#define PCI_CFG_ZIP_PF_BAR0 0 /* Base addr for normal regs */ - -/* Maximum available zip queues */ -#define ZIP_MAX_NUM_QUEUES 8 - -#define ZIP_128B_ALIGN 7 - -/* Command queue buffer size */ -#define ZIP_CMD_QBUF_SIZE (8064 + 8) - -struct zip_registers { - char *reg_name; - u64 reg_offset; -}; - -/* ZIP Compression - Decompression stats */ -struct zip_stats { - atomic64_t comp_req_submit; - atomic64_t comp_req_complete; - atomic64_t decomp_req_submit; - atomic64_t decomp_req_complete; - atomic64_t comp_in_bytes; - atomic64_t comp_out_bytes; - atomic64_t decomp_in_bytes; - atomic64_t decomp_out_bytes; - atomic64_t decomp_bad_reqs; -}; - -/* ZIP Instruction Queue */ -struct zip_iq { - u64 *sw_head; - u64 *sw_tail; - u64 *hw_tail; - u64 done_cnt; - u64 pend_cnt; - u64 free_flag; - - /* ZIP IQ lock */ - spinlock_t lock; -}; - -/* ZIP Device */ -struct zip_device { - u32 index; - void __iomem *reg_base; - struct pci_dev *pdev; - - /* Different ZIP Constants */ - u64 depth; - u64 onfsize; - u64 ctxsize; - - struct zip_iq iq[ZIP_MAX_NUM_QUEUES]; - struct zip_stats stats; -}; - -/* Prototypes */ -struct zip_device *zip_get_device(int node_id); -int zip_get_node_id(void); -void zip_reg_write(u64 val, u64 __iomem *addr); -u64 zip_reg_read(u64 __iomem *addr); -void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue); -u32 zip_load_instr(union zip_inst_s *instr, struct zip_device *zip_dev); - -#endif /* ZIP_MAIN_H */ diff --git a/drivers/crypto/cavium/zip/zip_mem.c b/drivers/crypto/cavium/zip/zip_mem.c deleted file mode 100644 index b3e0843a9169..000000000000 --- a/drivers/crypto/cavium/zip/zip_mem.c +++ /dev/null @@ -1,114 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#include <linux/types.h> -#include <linux/vmalloc.h> - -#include "common.h" - -/** - * zip_cmd_qbuf_alloc - Allocates a cmd buffer for ZIP Instruction Queue - * @zip: Pointer to zip device structure - * @q: Queue number to allocate bufffer to - * Return: 0 if successful, -ENOMEM otherwise - */ -int zip_cmd_qbuf_alloc(struct zip_device *zip, int q) -{ - zip->iq[q].sw_head = (u64 *)__get_free_pages((GFP_KERNEL | GFP_DMA), - get_order(ZIP_CMD_QBUF_SIZE)); - - if (!zip->iq[q].sw_head) - return -ENOMEM; - - memset(zip->iq[q].sw_head, 0, ZIP_CMD_QBUF_SIZE); - - zip_dbg("cmd_qbuf_alloc[%d] Success : %p\n", q, zip->iq[q].sw_head); - return 0; -} - -/** - * zip_cmd_qbuf_free - Frees the cmd Queue buffer - * @zip: Pointer to zip device structure - * @q: Queue number to free buffer of - */ -void zip_cmd_qbuf_free(struct zip_device *zip, int q) -{ - zip_dbg("Freeing cmd_qbuf 0x%lx\n", zip->iq[q].sw_tail); - - free_pages((u64)zip->iq[q].sw_tail, get_order(ZIP_CMD_QBUF_SIZE)); -} - -/** - * zip_data_buf_alloc - Allocates memory for a data bufffer - * @size: Size of the buffer to allocate - * Returns: Pointer to the buffer allocated - */ -u8 *zip_data_buf_alloc(u64 size) -{ - u8 *ptr; - - ptr = (u8 *)__get_free_pages((GFP_KERNEL | GFP_DMA), - get_order(size)); - - if (!ptr) - return NULL; - - memset(ptr, 0, size); - - zip_dbg("Data buffer allocation success\n"); - return ptr; -} - -/** - * zip_data_buf_free - Frees the memory of a data buffer - * @ptr: Pointer to the buffer - * @size: Buffer size - */ -void zip_data_buf_free(u8 *ptr, u64 size) -{ - zip_dbg("Freeing data buffer 0x%lx\n", ptr); - - free_pages((u64)ptr, get_order(size)); -} diff --git a/drivers/crypto/cavium/zip/zip_mem.h b/drivers/crypto/cavium/zip/zip_mem.h deleted file mode 100644 index f8f2f08c4a5c..000000000000 --- a/drivers/crypto/cavium/zip/zip_mem.h +++ /dev/null @@ -1,78 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#ifndef __ZIP_MEM_H__ -#define __ZIP_MEM_H__ - -/** - * zip_cmd_qbuf_free - Frees the cmd Queue buffer - * @zip: Pointer to zip device structure - * @q: Queue nmber to free buffer of - */ -void zip_cmd_qbuf_free(struct zip_device *zip, int q); - -/** - * zip_cmd_qbuf_alloc - Allocates a Chunk/cmd buffer for ZIP Inst(cmd) Queue - * @zip: Pointer to zip device structure - * @q: Queue number to allocate bufffer to - * Return: 0 if successful, 1 otherwise - */ -int zip_cmd_qbuf_alloc(struct zip_device *zip, int q); - -/** - * zip_data_buf_alloc - Allocates memory for a data bufffer - * @size: Size of the buffer to allocate - * Returns: Pointer to the buffer allocated - */ -u8 *zip_data_buf_alloc(u64 size); - -/** - * zip_data_buf_free - Frees the memory of a data buffer - * @ptr: Pointer to the buffer - * @size: Buffer size - */ -void zip_data_buf_free(u8 *ptr, u64 size); - -#endif diff --git a/drivers/crypto/cavium/zip/zip_regs.h b/drivers/crypto/cavium/zip/zip_regs.h deleted file mode 100644 index 874e0236c87e..000000000000 --- a/drivers/crypto/cavium/zip/zip_regs.h +++ /dev/null @@ -1,1347 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#ifndef __ZIP_REGS_H__ -#define __ZIP_REGS_H__ - -/* - * Configuration and status register (CSR) address and type definitions for - * Cavium ZIP. - */ - -#include <linux/kern_levels.h> - -/* ZIP invocation result completion status codes */ -#define ZIP_CMD_NOTDONE 0x0 - -/* Successful completion. */ -#define ZIP_CMD_SUCCESS 0x1 - -/* Output truncated */ -#define ZIP_CMD_DTRUNC 0x2 - -/* Dynamic Stop */ -#define ZIP_CMD_DYNAMIC_STOP 0x3 - -/* Uncompress ran out of input data when IWORD0[EF] was set */ -#define ZIP_CMD_ITRUNC 0x4 - -/* Uncompress found the reserved block type 3 */ -#define ZIP_CMD_RBLOCK 0x5 - -/* - * Uncompress found LEN != ZIP_CMD_NLEN in an uncompressed block in the input. - */ -#define ZIP_CMD_NLEN 0x6 - -/* Uncompress found a bad code in the main Huffman codes. */ -#define ZIP_CMD_BADCODE 0x7 - -/* Uncompress found a bad code in the 19 Huffman codes encoding lengths. */ -#define ZIP_CMD_BADCODE2 0x8 - -/* Compress found a zero-length input. */ -#define ZIP_CMD_ZERO_LEN 0x9 - -/* The compress or decompress encountered an internal parity error. */ -#define ZIP_CMD_PARITY 0xA - -/* - * Uncompress found a string identifier that precedes the uncompressed data and - * decompression history. - */ -#define ZIP_CMD_FATAL 0xB - -/** - * enum zip_int_vec_e - ZIP MSI-X Vector Enumeration, enumerates the MSI-X - * interrupt vectors. - */ -enum zip_int_vec_e { - ZIP_INT_VEC_E_ECCE = 0x10, - ZIP_INT_VEC_E_FIFE = 0x11, - ZIP_INT_VEC_E_QUE0_DONE = 0x0, - ZIP_INT_VEC_E_QUE0_ERR = 0x8, - ZIP_INT_VEC_E_QUE1_DONE = 0x1, - ZIP_INT_VEC_E_QUE1_ERR = 0x9, - ZIP_INT_VEC_E_QUE2_DONE = 0x2, - ZIP_INT_VEC_E_QUE2_ERR = 0xa, - ZIP_INT_VEC_E_QUE3_DONE = 0x3, - ZIP_INT_VEC_E_QUE3_ERR = 0xb, - ZIP_INT_VEC_E_QUE4_DONE = 0x4, - ZIP_INT_VEC_E_QUE4_ERR = 0xc, - ZIP_INT_VEC_E_QUE5_DONE = 0x5, - ZIP_INT_VEC_E_QUE5_ERR = 0xd, - ZIP_INT_VEC_E_QUE6_DONE = 0x6, - ZIP_INT_VEC_E_QUE6_ERR = 0xe, - ZIP_INT_VEC_E_QUE7_DONE = 0x7, - ZIP_INT_VEC_E_QUE7_ERR = 0xf, - ZIP_INT_VEC_E_ENUM_LAST = 0x12, -}; - -/** - * union zip_zptr_addr_s - ZIP Generic Pointer Structure for ADDR. - * - * It is the generic format of pointers in ZIP_INST_S. - */ -union zip_zptr_addr_s { - u64 u_reg64; - struct { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_49_63 : 15; - u64 addr : 49; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 addr : 49; - u64 reserved_49_63 : 15; -#endif - } s; - -}; - -/** - * union zip_zptr_ctl_s - ZIP Generic Pointer Structure for CTL. - * - * It is the generic format of pointers in ZIP_INST_S. - */ -union zip_zptr_ctl_s { - u64 u_reg64; - struct { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_112_127 : 16; - u64 length : 16; - u64 reserved_67_95 : 29; - u64 fw : 1; - u64 nc : 1; - u64 data_be : 1; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 data_be : 1; - u64 nc : 1; - u64 fw : 1; - u64 reserved_67_95 : 29; - u64 length : 16; - u64 reserved_112_127 : 16; -#endif - } s; -}; - -/** - * union zip_inst_s - ZIP Instruction Structure. - * Each ZIP instruction has 16 words (they are called IWORD0 to IWORD15 within - * the structure). - */ -union zip_inst_s { - u64 u_reg64[16]; - struct { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 doneint : 1; - u64 reserved_56_62 : 7; - u64 totaloutputlength : 24; - u64 reserved_27_31 : 5; - u64 exn : 3; - u64 reserved_23_23 : 1; - u64 exbits : 7; - u64 reserved_12_15 : 4; - u64 sf : 1; - u64 ss : 2; - u64 cc : 2; - u64 ef : 1; - u64 bf : 1; - u64 ce : 1; - u64 reserved_3_3 : 1; - u64 ds : 1; - u64 dg : 1; - u64 hg : 1; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 hg : 1; - u64 dg : 1; - u64 ds : 1; - u64 reserved_3_3 : 1; - u64 ce : 1; - u64 bf : 1; - u64 ef : 1; - u64 cc : 2; - u64 ss : 2; - u64 sf : 1; - u64 reserved_12_15 : 4; - u64 exbits : 7; - u64 reserved_23_23 : 1; - u64 exn : 3; - u64 reserved_27_31 : 5; - u64 totaloutputlength : 24; - u64 reserved_56_62 : 7; - u64 doneint : 1; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) - u64 historylength : 16; - u64 reserved_96_111 : 16; - u64 adlercrc32 : 32; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 adlercrc32 : 32; - u64 reserved_96_111 : 16; - u64 historylength : 16; -#endif - union zip_zptr_addr_s ctx_ptr_addr; - union zip_zptr_ctl_s ctx_ptr_ctl; - union zip_zptr_addr_s his_ptr_addr; - union zip_zptr_ctl_s his_ptr_ctl; - union zip_zptr_addr_s inp_ptr_addr; - union zip_zptr_ctl_s inp_ptr_ctl; - union zip_zptr_addr_s out_ptr_addr; - union zip_zptr_ctl_s out_ptr_ctl; - union zip_zptr_addr_s res_ptr_addr; - union zip_zptr_ctl_s res_ptr_ctl; -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_817_831 : 15; - u64 wq_ptr : 49; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 wq_ptr : 49; - u64 reserved_817_831 : 15; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_882_895 : 14; - u64 tt : 2; - u64 reserved_874_879 : 6; - u64 grp : 10; - u64 tag : 32; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 tag : 32; - u64 grp : 10; - u64 reserved_874_879 : 6; - u64 tt : 2; - u64 reserved_882_895 : 14; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_896_959 : 64; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 reserved_896_959 : 64; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_960_1023 : 64; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 reserved_960_1023 : 64; -#endif - } s; -}; - -/** - * union zip_nptr_s - ZIP Instruction Next-Chunk-Buffer Pointer (NPTR) - * Structure - * - * ZIP_NPTR structure is used to chain all the zip instruction buffers - * together. ZIP instruction buffers are managed (allocated and released) by - * the software. - */ -union zip_nptr_s { - u64 u_reg64; - struct { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_49_63 : 15; - u64 addr : 49; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 addr : 49; - u64 reserved_49_63 : 15; -#endif - } s; -}; - -/** - * union zip_zptr_s - ZIP Generic Pointer Structure. - * - * It is the generic format of pointers in ZIP_INST_S. - */ -union zip_zptr_s { - u64 u_reg64[2]; - struct { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_49_63 : 15; - u64 addr : 49; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 addr : 49; - u64 reserved_49_63 : 15; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_112_127 : 16; - u64 length : 16; - u64 reserved_67_95 : 29; - u64 fw : 1; - u64 nc : 1; - u64 data_be : 1; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 data_be : 1; - u64 nc : 1; - u64 fw : 1; - u64 reserved_67_95 : 29; - u64 length : 16; - u64 reserved_112_127 : 16; -#endif - } s; -}; - -/** - * union zip_zres_s - ZIP Result Structure - * - * The ZIP coprocessor writes the result structure after it completes the - * invocation. The result structure is exactly 24 bytes, and each invocation of - * the ZIP coprocessor produces exactly one result structure. - */ -union zip_zres_s { - u64 u_reg64[3]; - struct { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 crc32 : 32; - u64 adler32 : 32; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 adler32 : 32; - u64 crc32 : 32; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) - u64 totalbyteswritten : 32; - u64 totalbytesread : 32; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 totalbytesread : 32; - u64 totalbyteswritten : 32; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) - u64 totalbitsprocessed : 32; - u64 doneint : 1; - u64 reserved_155_158 : 4; - u64 exn : 3; - u64 reserved_151_151 : 1; - u64 exbits : 7; - u64 reserved_137_143 : 7; - u64 ef : 1; - - volatile u64 compcode : 8; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - - volatile u64 compcode : 8; - u64 ef : 1; - u64 reserved_137_143 : 7; - u64 exbits : 7; - u64 reserved_151_151 : 1; - u64 exn : 3; - u64 reserved_155_158 : 4; - u64 doneint : 1; - u64 totalbitsprocessed : 32; -#endif - } s; -}; - -/** - * union zip_cmd_ctl - Structure representing the register that controls - * clock and reset. - */ -union zip_cmd_ctl { - u64 u_reg64; - struct zip_cmd_ctl_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_2_63 : 62; - u64 forceclk : 1; - u64 reset : 1; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 reset : 1; - u64 forceclk : 1; - u64 reserved_2_63 : 62; -#endif - } s; -}; - -#define ZIP_CMD_CTL 0x0ull - -/** - * union zip_constants - Data structure representing the register that contains - * all of the current implementation-related parameters of the zip core in this - * chip. - */ -union zip_constants { - u64 u_reg64; - struct zip_constants_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 nexec : 8; - u64 reserved_49_55 : 7; - u64 syncflush_capable : 1; - u64 depth : 16; - u64 onfsize : 12; - u64 ctxsize : 12; - u64 reserved_1_7 : 7; - u64 disabled : 1; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 disabled : 1; - u64 reserved_1_7 : 7; - u64 ctxsize : 12; - u64 onfsize : 12; - u64 depth : 16; - u64 syncflush_capable : 1; - u64 reserved_49_55 : 7; - u64 nexec : 8; -#endif - } s; -}; - -#define ZIP_CONSTANTS 0x00A0ull - -/** - * union zip_corex_bist_status - Represents registers which have the BIST - * status of memories in zip cores. - * - * Each bit is the BIST result of an individual memory - * (per bit, 0 = pass and 1 = fail). - */ -union zip_corex_bist_status { - u64 u_reg64; - struct zip_corex_bist_status_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_53_63 : 11; - u64 bstatus : 53; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 bstatus : 53; - u64 reserved_53_63 : 11; -#endif - } s; -}; - -static inline u64 ZIP_COREX_BIST_STATUS(u64 param1) -{ - if (param1 <= 1) - return 0x0520ull + (param1 & 1) * 0x8ull; - pr_err("ZIP_COREX_BIST_STATUS: %llu\n", param1); - return 0; -} - -/** - * union zip_ctl_bist_status - Represents register that has the BIST status of - * memories in ZIP_CTL (instruction buffer, G/S pointer FIFO, input data - * buffer, output data buffers). - * - * Each bit is the BIST result of an individual memory - * (per bit, 0 = pass and 1 = fail). - */ -union zip_ctl_bist_status { - u64 u_reg64; - struct zip_ctl_bist_status_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_9_63 : 55; - u64 bstatus : 9; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 bstatus : 9; - u64 reserved_9_63 : 55; -#endif - } s; -}; - -#define ZIP_CTL_BIST_STATUS 0x0510ull - -/** - * union zip_ctl_cfg - Represents the register that controls the behavior of - * the ZIP DMA engines. - * - * It is recommended to keep default values for normal operation. Changing the - * values of the fields may be useful for diagnostics. - */ -union zip_ctl_cfg { - u64 u_reg64; - struct zip_ctl_cfg_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_52_63 : 12; - u64 ildf : 4; - u64 reserved_36_47 : 12; - u64 drtf : 4; - u64 reserved_27_31 : 5; - u64 stcf : 3; - u64 reserved_19_23 : 5; - u64 ldf : 3; - u64 reserved_2_15 : 14; - u64 busy : 1; - u64 reserved_0_0 : 1; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 reserved_0_0 : 1; - u64 busy : 1; - u64 reserved_2_15 : 14; - u64 ldf : 3; - u64 reserved_19_23 : 5; - u64 stcf : 3; - u64 reserved_27_31 : 5; - u64 drtf : 4; - u64 reserved_36_47 : 12; - u64 ildf : 4; - u64 reserved_52_63 : 12; -#endif - } s; -}; - -#define ZIP_CTL_CFG 0x0560ull - -/** - * union zip_dbg_corex_inst - Represents the registers that reflect the status - * of the current instruction that the ZIP core is executing or has executed. - * - * These registers are only for debug use. - */ -union zip_dbg_corex_inst { - u64 u_reg64; - struct zip_dbg_corex_inst_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 busy : 1; - u64 reserved_35_62 : 28; - u64 qid : 3; - u64 iid : 32; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 iid : 32; - u64 qid : 3; - u64 reserved_35_62 : 28; - u64 busy : 1; -#endif - } s; -}; - -static inline u64 ZIP_DBG_COREX_INST(u64 param1) -{ - if (param1 <= 1) - return 0x0640ull + (param1 & 1) * 0x8ull; - pr_err("ZIP_DBG_COREX_INST: %llu\n", param1); - return 0; -} - -/** - * union zip_dbg_corex_sta - Represents registers that reflect the status of - * the zip cores. - * - * They are for debug use only. - */ -union zip_dbg_corex_sta { - u64 u_reg64; - struct zip_dbg_corex_sta_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 busy : 1; - u64 reserved_37_62 : 26; - u64 ist : 5; - u64 nie : 32; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 nie : 32; - u64 ist : 5; - u64 reserved_37_62 : 26; - u64 busy : 1; -#endif - } s; -}; - -static inline u64 ZIP_DBG_COREX_STA(u64 param1) -{ - if (param1 <= 1) - return 0x0680ull + (param1 & 1) * 0x8ull; - pr_err("ZIP_DBG_COREX_STA: %llu\n", param1); - return 0; -} - -/** - * union zip_dbg_quex_sta - Represets registers that reflect status of the zip - * instruction queues. - * - * They are for debug use only. - */ -union zip_dbg_quex_sta { - u64 u_reg64; - struct zip_dbg_quex_sta_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 busy : 1; - u64 reserved_56_62 : 7; - u64 rqwc : 24; - u64 nii : 32; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 nii : 32; - u64 rqwc : 24; - u64 reserved_56_62 : 7; - u64 busy : 1; -#endif - } s; -}; - -static inline u64 ZIP_DBG_QUEX_STA(u64 param1) -{ - if (param1 <= 7) - return 0x1800ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_DBG_QUEX_STA: %llu\n", param1); - return 0; -} - -/** - * union zip_ecc_ctl - Represents the register that enables ECC for each - * individual internal memory that requires ECC. - * - * For debug purpose, it can also flip one or two bits in the ECC data. - */ -union zip_ecc_ctl { - u64 u_reg64; - struct zip_ecc_ctl_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_19_63 : 45; - u64 vmem_cdis : 1; - u64 vmem_fs : 2; - u64 reserved_15_15 : 1; - u64 idf1_cdis : 1; - u64 idf1_fs : 2; - u64 reserved_11_11 : 1; - u64 idf0_cdis : 1; - u64 idf0_fs : 2; - u64 reserved_7_7 : 1; - u64 gspf_cdis : 1; - u64 gspf_fs : 2; - u64 reserved_3_3 : 1; - u64 iqf_cdis : 1; - u64 iqf_fs : 2; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 iqf_fs : 2; - u64 iqf_cdis : 1; - u64 reserved_3_3 : 1; - u64 gspf_fs : 2; - u64 gspf_cdis : 1; - u64 reserved_7_7 : 1; - u64 idf0_fs : 2; - u64 idf0_cdis : 1; - u64 reserved_11_11 : 1; - u64 idf1_fs : 2; - u64 idf1_cdis : 1; - u64 reserved_15_15 : 1; - u64 vmem_fs : 2; - u64 vmem_cdis : 1; - u64 reserved_19_63 : 45; -#endif - } s; -}; - -#define ZIP_ECC_CTL 0x0568ull - -/* NCB - zip_ecce_ena_w1c */ -union zip_ecce_ena_w1c { - u64 u_reg64; - struct zip_ecce_ena_w1c_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_37_63 : 27; - u64 dbe : 5; - u64 reserved_5_31 : 27; - u64 sbe : 5; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 sbe : 5; - u64 reserved_5_31 : 27; - u64 dbe : 5; - u64 reserved_37_63 : 27; -#endif - } s; -}; - -#define ZIP_ECCE_ENA_W1C 0x0598ull - -/* NCB - zip_ecce_ena_w1s */ -union zip_ecce_ena_w1s { - u64 u_reg64; - struct zip_ecce_ena_w1s_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_37_63 : 27; - u64 dbe : 5; - u64 reserved_5_31 : 27; - u64 sbe : 5; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 sbe : 5; - u64 reserved_5_31 : 27; - u64 dbe : 5; - u64 reserved_37_63 : 27; -#endif - } s; -}; - -#define ZIP_ECCE_ENA_W1S 0x0590ull - -/** - * union zip_ecce_int - Represents the register that contains the status of the - * ECC interrupt sources. - */ -union zip_ecce_int { - u64 u_reg64; - struct zip_ecce_int_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_37_63 : 27; - u64 dbe : 5; - u64 reserved_5_31 : 27; - u64 sbe : 5; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 sbe : 5; - u64 reserved_5_31 : 27; - u64 dbe : 5; - u64 reserved_37_63 : 27; -#endif - } s; -}; - -#define ZIP_ECCE_INT 0x0580ull - -/* NCB - zip_ecce_int_w1s */ -union zip_ecce_int_w1s { - u64 u_reg64; - struct zip_ecce_int_w1s_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_37_63 : 27; - u64 dbe : 5; - u64 reserved_5_31 : 27; - u64 sbe : 5; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 sbe : 5; - u64 reserved_5_31 : 27; - u64 dbe : 5; - u64 reserved_37_63 : 27; -#endif - } s; -}; - -#define ZIP_ECCE_INT_W1S 0x0588ull - -/* NCB - zip_fife_ena_w1c */ -union zip_fife_ena_w1c { - u64 u_reg64; - struct zip_fife_ena_w1c_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_42_63 : 22; - u64 asserts : 42; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 asserts : 42; - u64 reserved_42_63 : 22; -#endif - } s; -}; - -#define ZIP_FIFE_ENA_W1C 0x0090ull - -/* NCB - zip_fife_ena_w1s */ -union zip_fife_ena_w1s { - u64 u_reg64; - struct zip_fife_ena_w1s_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_42_63 : 22; - u64 asserts : 42; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 asserts : 42; - u64 reserved_42_63 : 22; -#endif - } s; -}; - -#define ZIP_FIFE_ENA_W1S 0x0088ull - -/* NCB - zip_fife_int */ -union zip_fife_int { - u64 u_reg64; - struct zip_fife_int_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_42_63 : 22; - u64 asserts : 42; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 asserts : 42; - u64 reserved_42_63 : 22; -#endif - } s; -}; - -#define ZIP_FIFE_INT 0x0078ull - -/* NCB - zip_fife_int_w1s */ -union zip_fife_int_w1s { - u64 u_reg64; - struct zip_fife_int_w1s_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_42_63 : 22; - u64 asserts : 42; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 asserts : 42; - u64 reserved_42_63 : 22; -#endif - } s; -}; - -#define ZIP_FIFE_INT_W1S 0x0080ull - -/** - * union zip_msix_pbax - Represents the register that is the MSI-X PBA table - * - * The bit number is indexed by the ZIP_INT_VEC_E enumeration. - */ -union zip_msix_pbax { - u64 u_reg64; - struct zip_msix_pbax_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 pend : 64; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 pend : 64; -#endif - } s; -}; - -static inline u64 ZIP_MSIX_PBAX(u64 param1) -{ - if (param1 == 0) - return 0x0000838000FF0000ull; - pr_err("ZIP_MSIX_PBAX: %llu\n", param1); - return 0; -} - -/** - * union zip_msix_vecx_addr - Represents the register that is the MSI-X vector - * table, indexed by the ZIP_INT_VEC_E enumeration. - */ -union zip_msix_vecx_addr { - u64 u_reg64; - struct zip_msix_vecx_addr_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_49_63 : 15; - u64 addr : 47; - u64 reserved_1_1 : 1; - u64 secvec : 1; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 secvec : 1; - u64 reserved_1_1 : 1; - u64 addr : 47; - u64 reserved_49_63 : 15; -#endif - } s; -}; - -static inline u64 ZIP_MSIX_VECX_ADDR(u64 param1) -{ - if (param1 <= 17) - return 0x0000838000F00000ull + (param1 & 31) * 0x10ull; - pr_err("ZIP_MSIX_VECX_ADDR: %llu\n", param1); - return 0; -} - -/** - * union zip_msix_vecx_ctl - Represents the register that is the MSI-X vector - * table, indexed by the ZIP_INT_VEC_E enumeration. - */ -union zip_msix_vecx_ctl { - u64 u_reg64; - struct zip_msix_vecx_ctl_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_33_63 : 31; - u64 mask : 1; - u64 reserved_20_31 : 12; - u64 data : 20; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 data : 20; - u64 reserved_20_31 : 12; - u64 mask : 1; - u64 reserved_33_63 : 31; -#endif - } s; -}; - -static inline u64 ZIP_MSIX_VECX_CTL(u64 param1) -{ - if (param1 <= 17) - return 0x0000838000F00008ull + (param1 & 31) * 0x10ull; - pr_err("ZIP_MSIX_VECX_CTL: %llu\n", param1); - return 0; -} - -/** - * union zip_quex_done - Represents the registers that contain the per-queue - * instruction done count. - */ -union zip_quex_done { - u64 u_reg64; - struct zip_quex_done_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_20_63 : 44; - u64 done : 20; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 done : 20; - u64 reserved_20_63 : 44; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_DONE(u64 param1) -{ - if (param1 <= 7) - return 0x2000ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_DONE: %llu\n", param1); - return 0; -} - -/** - * union zip_quex_done_ack - Represents the registers on write to which will - * decrement the per-queue instructiona done count. - */ -union zip_quex_done_ack { - u64 u_reg64; - struct zip_quex_done_ack_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_20_63 : 44; - u64 done_ack : 20; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 done_ack : 20; - u64 reserved_20_63 : 44; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_DONE_ACK(u64 param1) -{ - if (param1 <= 7) - return 0x2200ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_DONE_ACK: %llu\n", param1); - return 0; -} - -/** - * union zip_quex_done_ena_w1c - Represents the register which when written - * 1 to will disable the DONEINT interrupt for the queue. - */ -union zip_quex_done_ena_w1c { - u64 u_reg64; - struct zip_quex_done_ena_w1c_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_1_63 : 63; - u64 done_ena : 1; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 done_ena : 1; - u64 reserved_1_63 : 63; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_DONE_ENA_W1C(u64 param1) -{ - if (param1 <= 7) - return 0x2600ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_DONE_ENA_W1C: %llu\n", param1); - return 0; -} - -/** - * union zip_quex_done_ena_w1s - Represents the register that when written 1 to - * will enable the DONEINT interrupt for the queue. - */ -union zip_quex_done_ena_w1s { - u64 u_reg64; - struct zip_quex_done_ena_w1s_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_1_63 : 63; - u64 done_ena : 1; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 done_ena : 1; - u64 reserved_1_63 : 63; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_DONE_ENA_W1S(u64 param1) -{ - if (param1 <= 7) - return 0x2400ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_DONE_ENA_W1S: %llu\n", param1); - return 0; -} - -/** - * union zip_quex_done_wait - Represents the register that specifies the per - * queue interrupt coalescing settings. - */ -union zip_quex_done_wait { - u64 u_reg64; - struct zip_quex_done_wait_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_48_63 : 16; - u64 time_wait : 16; - u64 reserved_20_31 : 12; - u64 num_wait : 20; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 num_wait : 20; - u64 reserved_20_31 : 12; - u64 time_wait : 16; - u64 reserved_48_63 : 16; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_DONE_WAIT(u64 param1) -{ - if (param1 <= 7) - return 0x2800ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_DONE_WAIT: %llu\n", param1); - return 0; -} - -/** - * union zip_quex_doorbell - Represents doorbell registers for the ZIP - * instruction queues. - */ -union zip_quex_doorbell { - u64 u_reg64; - struct zip_quex_doorbell_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_20_63 : 44; - u64 dbell_cnt : 20; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 dbell_cnt : 20; - u64 reserved_20_63 : 44; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_DOORBELL(u64 param1) -{ - if (param1 <= 7) - return 0x4000ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_DOORBELL: %llu\n", param1); - return 0; -} - -union zip_quex_err_ena_w1c { - u64 u_reg64; - struct zip_quex_err_ena_w1c_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_5_63 : 59; - u64 mdbe : 1; - u64 nwrp : 1; - u64 nrrp : 1; - u64 irde : 1; - u64 dovf : 1; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 dovf : 1; - u64 irde : 1; - u64 nrrp : 1; - u64 nwrp : 1; - u64 mdbe : 1; - u64 reserved_5_63 : 59; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_ERR_ENA_W1C(u64 param1) -{ - if (param1 <= 7) - return 0x3600ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_ERR_ENA_W1C: %llu\n", param1); - return 0; -} - -union zip_quex_err_ena_w1s { - u64 u_reg64; - struct zip_quex_err_ena_w1s_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_5_63 : 59; - u64 mdbe : 1; - u64 nwrp : 1; - u64 nrrp : 1; - u64 irde : 1; - u64 dovf : 1; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 dovf : 1; - u64 irde : 1; - u64 nrrp : 1; - u64 nwrp : 1; - u64 mdbe : 1; - u64 reserved_5_63 : 59; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_ERR_ENA_W1S(u64 param1) -{ - if (param1 <= 7) - return 0x3400ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_ERR_ENA_W1S: %llu\n", param1); - return 0; -} - -/** - * union zip_quex_err_int - Represents registers that contain the per-queue - * error interrupts. - */ -union zip_quex_err_int { - u64 u_reg64; - struct zip_quex_err_int_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_5_63 : 59; - u64 mdbe : 1; - u64 nwrp : 1; - u64 nrrp : 1; - u64 irde : 1; - u64 dovf : 1; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 dovf : 1; - u64 irde : 1; - u64 nrrp : 1; - u64 nwrp : 1; - u64 mdbe : 1; - u64 reserved_5_63 : 59; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_ERR_INT(u64 param1) -{ - if (param1 <= 7) - return 0x3000ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_ERR_INT: %llu\n", param1); - return 0; -} - -/* NCB - zip_que#_err_int_w1s */ -union zip_quex_err_int_w1s { - u64 u_reg64; - struct zip_quex_err_int_w1s_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_5_63 : 59; - u64 mdbe : 1; - u64 nwrp : 1; - u64 nrrp : 1; - u64 irde : 1; - u64 dovf : 1; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 dovf : 1; - u64 irde : 1; - u64 nrrp : 1; - u64 nwrp : 1; - u64 mdbe : 1; - u64 reserved_5_63 : 59; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_ERR_INT_W1S(u64 param1) -{ - if (param1 <= 7) - return 0x3200ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_ERR_INT_W1S: %llu\n", param1); - return 0; -} - -/** - * union zip_quex_gcfg - Represents the registers that reflect status of the - * zip instruction queues,debug use only. - */ -union zip_quex_gcfg { - u64 u_reg64; - struct zip_quex_gcfg_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_4_63 : 60; - u64 iqb_ldwb : 1; - u64 cbw_sty : 1; - u64 l2ld_cmd : 2; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 l2ld_cmd : 2; - u64 cbw_sty : 1; - u64 iqb_ldwb : 1; - u64 reserved_4_63 : 60; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_GCFG(u64 param1) -{ - if (param1 <= 7) - return 0x1A00ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_GCFG: %llu\n", param1); - return 0; -} - -/** - * union zip_quex_map - Represents the registers that control how each - * instruction queue maps to zip cores. - */ -union zip_quex_map { - u64 u_reg64; - struct zip_quex_map_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_2_63 : 62; - u64 zce : 2; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 zce : 2; - u64 reserved_2_63 : 62; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_MAP(u64 param1) -{ - if (param1 <= 7) - return 0x1400ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_MAP: %llu\n", param1); - return 0; -} - -/** - * union zip_quex_sbuf_addr - Represents the registers that set the buffer - * parameters for the instruction queues. - * - * When quiescent (i.e. outstanding doorbell count is 0), it is safe to rewrite - * this register to effectively reset the command buffer state machine. - * These registers must be programmed after SW programs the corresponding - * ZIP_QUE(0..7)_SBUF_CTL. - */ -union zip_quex_sbuf_addr { - u64 u_reg64; - struct zip_quex_sbuf_addr_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_49_63 : 15; - u64 ptr : 42; - u64 off : 7; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 off : 7; - u64 ptr : 42; - u64 reserved_49_63 : 15; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_SBUF_ADDR(u64 param1) -{ - if (param1 <= 7) - return 0x1000ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_SBUF_ADDR: %llu\n", param1); - return 0; -} - -/** - * union zip_quex_sbuf_ctl - Represents the registers that set the buffer - * parameters for the instruction queues. - * - * When quiescent (i.e. outstanding doorbell count is 0), it is safe to rewrite - * this register to effectively reset the command buffer state machine. - * These registers must be programmed before SW programs the corresponding - * ZIP_QUE(0..7)_SBUF_ADDR. - */ -union zip_quex_sbuf_ctl { - u64 u_reg64; - struct zip_quex_sbuf_ctl_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_45_63 : 19; - u64 size : 13; - u64 inst_be : 1; - u64 reserved_24_30 : 7; - u64 stream_id : 8; - u64 reserved_12_15 : 4; - u64 aura : 12; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 aura : 12; - u64 reserved_12_15 : 4; - u64 stream_id : 8; - u64 reserved_24_30 : 7; - u64 inst_be : 1; - u64 size : 13; - u64 reserved_45_63 : 19; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_SBUF_CTL(u64 param1) -{ - if (param1 <= 7) - return 0x1200ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_SBUF_CTL: %llu\n", param1); - return 0; -} - -/** - * union zip_que_ena - Represents queue enable register - * - * If a queue is disabled, ZIP_CTL stops fetching instructions from the queue. - */ -union zip_que_ena { - u64 u_reg64; - struct zip_que_ena_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_8_63 : 56; - u64 ena : 8; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 ena : 8; - u64 reserved_8_63 : 56; -#endif - } s; -}; - -#define ZIP_QUE_ENA 0x0500ull - -/** - * union zip_que_pri - Represents the register that defines the priority - * between instruction queues. - */ -union zip_que_pri { - u64 u_reg64; - struct zip_que_pri_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_8_63 : 56; - u64 pri : 8; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 pri : 8; - u64 reserved_8_63 : 56; -#endif - } s; -}; - -#define ZIP_QUE_PRI 0x0508ull - -/** - * union zip_throttle - Represents the register that controls the maximum - * number of in-flight X2I data fetch transactions. - * - * Writing 0 to this register causes the ZIP module to temporarily suspend NCB - * accesses; it is not recommended for normal operation, but may be useful for - * diagnostics. - */ -union zip_throttle { - u64 u_reg64; - struct zip_throttle_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_6_63 : 58; - u64 ld_infl : 6; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 ld_infl : 6; - u64 reserved_6_63 : 58; -#endif - } s; -}; - -#define ZIP_THROTTLE 0x0010ull - -#endif /* _CSRS_ZIP__ */ diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c index d11daaf47f06..685d42ec7ade 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes.c +++ b/drivers/crypto/ccp/ccp-crypto-aes.c @@ -7,15 +7,16 @@ * Author: Tom Lendacky <thomas.lendacky@amd.com> */ -#include <linux/module.h> -#include <linux/sched.h> -#include <linux/delay.h> -#include <linux/scatterlist.h> -#include <linux/crypto.h> -#include <crypto/algapi.h> #include <crypto/aes.h> #include <crypto/ctr.h> -#include <crypto/scatterwalk.h> +#include <crypto/internal/skcipher.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/string.h> #include "ccp-crypto.h" diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c index afae30adb703..91b1189c47de 100644 --- a/drivers/crypto/ccp/ccp-crypto-des3.c +++ b/drivers/crypto/ccp/ccp-crypto-des3.c @@ -7,14 +7,15 @@ * Author: Gary R Hook <ghook@amd.com> */ +#include <crypto/internal/des.h> +#include <crypto/internal/skcipher.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/list.h> #include <linux/module.h> -#include <linux/sched.h> -#include <linux/delay.h> #include <linux/scatterlist.h> -#include <linux/crypto.h> -#include <crypto/algapi.h> -#include <crypto/scatterwalk.h> -#include <crypto/internal/des.h> +#include <linux/slab.h> +#include <linux/string.h> #include "ccp-crypto.h" diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index ecd58b38c46e..bc90aba5162a 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -7,14 +7,17 @@ * Author: Tom Lendacky <thomas.lendacky@amd.com> */ -#include <linux/module.h> -#include <linux/moduleparam.h> +#include <crypto/internal/akcipher.h> +#include <crypto/internal/hash.h> +#include <crypto/internal/skcipher.h> +#include <linux/ccp.h> +#include <linux/err.h> #include <linux/kernel.h> #include <linux/list.h> -#include <linux/ccp.h> +#include <linux/module.h> #include <linux/scatterlist.h> -#include <crypto/internal/hash.h> -#include <crypto/internal/akcipher.h> +#include <linux/slab.h> +#include <linux/spinlock.h> #include "ccp-crypto.h" diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index cb8e99936abb..109b5aef4034 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -8,13 +8,14 @@ * Author: Gary R Hook <gary.hook@amd.com> */ -#include <linux/dma-mapping.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/interrupt.h> -#include <crypto/scatterwalk.h> #include <crypto/des.h> +#include <crypto/scatterwalk.h> +#include <crypto/utils.h> #include <linux/ccp.h> +#include <linux/dma-mapping.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/module.h> #include "ccp-dev.h" diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 2e87ca0e292a..3451bada884e 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -33,6 +33,7 @@ #include <asm/cacheflush.h> #include <asm/e820/types.h> #include <asm/sev.h> +#include <asm/msr.h> #include "psp-dev.h" #include "sev-dev.h" @@ -109,6 +110,15 @@ static void *sev_init_ex_buffer; */ static struct sev_data_range_list *snp_range_list; +static void __sev_firmware_shutdown(struct sev_device *sev, bool panic); + +static int snp_shutdown_on_panic(struct notifier_block *nb, + unsigned long reason, void *arg); + +static struct notifier_block snp_panic_notifier = { + .notifier_call = snp_shutdown_on_panic, +}; + static inline bool sev_version_greater_or_equal(u8 maj, u8 min) { struct sev_device *sev = psp_master->sev_data; @@ -1060,7 +1070,7 @@ static inline int __sev_do_init_locked(int *psp_ret) static void snp_set_hsave_pa(void *arg) { - wrmsrl(MSR_VM_HSAVE_PA, 0); + wrmsrq(MSR_VM_HSAVE_PA, 0); } static int snp_filter_reserved_mem_regions(struct resource *rs, void *arg) @@ -1112,7 +1122,7 @@ static int __sev_snp_init_locked(int *error) if (!sev_version_greater_or_equal(SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR)) { dev_dbg(sev->dev, "SEV-SNP support requires firmware version >= %d:%d\n", SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR); - return 0; + return -EOPNOTSUPP; } /* SNP_INIT requires MSR_VM_HSAVE_PA to be cleared on all CPUs. */ @@ -1176,21 +1186,34 @@ static int __sev_snp_init_locked(int *error) wbinvd_on_all_cpus(); rc = __sev_do_cmd_locked(cmd, arg, error); - if (rc) + if (rc) { + dev_err(sev->dev, "SEV-SNP: %s failed rc %d, error %#x\n", + cmd == SEV_CMD_SNP_INIT_EX ? "SNP_INIT_EX" : "SNP_INIT", + rc, *error); return rc; + } /* Prepare for first SNP guest launch after INIT. */ wbinvd_on_all_cpus(); rc = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, error); - if (rc) + if (rc) { + dev_err(sev->dev, "SEV-SNP: SNP_DF_FLUSH failed rc %d, error %#x\n", + rc, *error); return rc; + } sev->snp_initialized = true; dev_dbg(sev->dev, "SEV-SNP firmware initialized\n"); + dev_info(sev->dev, "SEV-SNP API:%d.%d build:%d\n", sev->api_major, + sev->api_minor, sev->build); + + atomic_notifier_chain_register(&panic_notifier_list, + &snp_panic_notifier); + sev_es_tmr_size = SNP_TMR_SIZE; - return rc; + return 0; } static void __sev_platform_init_handle_tmr(struct sev_device *sev) @@ -1287,16 +1310,22 @@ static int __sev_platform_init_locked(int *error) if (error) *error = psp_ret; - if (rc) + if (rc) { + dev_err(sev->dev, "SEV: %s failed %#x, rc %d\n", + sev_init_ex_buffer ? "INIT_EX" : "INIT", psp_ret, rc); return rc; + } sev->state = SEV_STATE_INIT; /* Prepare for first SEV guest launch after INIT */ wbinvd_on_all_cpus(); rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error); - if (rc) + if (rc) { + dev_err(sev->dev, "SEV: DF_FLUSH failed %#x, rc %d\n", + *error, rc); return rc; + } dev_dbg(sev->dev, "SEV firmware initialized\n"); @@ -1319,19 +1348,9 @@ static int _sev_platform_init_locked(struct sev_platform_init_args *args) if (sev->state == SEV_STATE_INIT) return 0; - /* - * Legacy guests cannot be running while SNP_INIT(_EX) is executing, - * so perform SEV-SNP initialization at probe time. - */ rc = __sev_snp_init_locked(&args->error); - if (rc && rc != -ENODEV) { - /* - * Don't abort the probe if SNP INIT failed, - * continue to initialize the legacy SEV firmware. - */ - dev_err(sev->dev, "SEV-SNP: failed to INIT rc %d, error %#x\n", - rc, args->error); - } + if (rc && rc != -ENODEV) + return rc; /* Defer legacy SEV/SEV-ES support if allowed by caller/module. */ if (args->probe && !psp_init_on_probe) @@ -1367,8 +1386,11 @@ static int __sev_platform_shutdown_locked(int *error) return 0; ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error); - if (ret) + if (ret) { + dev_err(sev->dev, "SEV: failed to SHUTDOWN error %#x, rc %d\n", + *error, ret); return ret; + } sev->state = SEV_STATE_UNINIT; dev_dbg(sev->dev, "SEV firmware shutdown\n"); @@ -1389,6 +1411,37 @@ static int sev_get_platform_state(int *state, int *error) return rc; } +static int sev_move_to_init_state(struct sev_issue_cmd *argp, bool *shutdown_required) +{ + struct sev_platform_init_args init_args = {0}; + int rc; + + rc = _sev_platform_init_locked(&init_args); + if (rc) { + argp->error = SEV_RET_INVALID_PLATFORM_STATE; + return rc; + } + + *shutdown_required = true; + + return 0; +} + +static int snp_move_to_init_state(struct sev_issue_cmd *argp, bool *shutdown_required) +{ + int error, rc; + + rc = __sev_snp_init_locked(&error); + if (rc) { + argp->error = SEV_RET_INVALID_PLATFORM_STATE; + return rc; + } + + *shutdown_required = true; + + return 0; +} + static int sev_ioctl_do_reset(struct sev_issue_cmd *argp, bool writable) { int state, rc; @@ -1441,24 +1494,31 @@ static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp) static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp, bool writable) { struct sev_device *sev = psp_master->sev_data; + bool shutdown_required = false; int rc; if (!writable) return -EPERM; if (sev->state == SEV_STATE_UNINIT) { - rc = __sev_platform_init_locked(&argp->error); + rc = sev_move_to_init_state(argp, &shutdown_required); if (rc) return rc; } - return __sev_do_cmd_locked(cmd, NULL, &argp->error); + rc = __sev_do_cmd_locked(cmd, NULL, &argp->error); + + if (shutdown_required) + __sev_firmware_shutdown(sev, false); + + return rc; } static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable) { struct sev_device *sev = psp_master->sev_data; struct sev_user_data_pek_csr input; + bool shutdown_required = false; struct sev_data_pek_csr data; void __user *input_address; void *blob = NULL; @@ -1490,7 +1550,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable) cmd: if (sev->state == SEV_STATE_UNINIT) { - ret = __sev_platform_init_locked(&argp->error); + ret = sev_move_to_init_state(argp, &shutdown_required); if (ret) goto e_free_blob; } @@ -1511,6 +1571,9 @@ cmd: } e_free_blob: + if (shutdown_required) + __sev_firmware_shutdown(sev, false); + kfree(blob); return ret; } @@ -1682,9 +1745,12 @@ static int __sev_snp_shutdown_locked(int *error, bool panic) ret = __sev_do_cmd_locked(SEV_CMD_SNP_SHUTDOWN_EX, &data, error); /* SHUTDOWN may require DF_FLUSH */ if (*error == SEV_RET_DFFLUSH_REQUIRED) { - ret = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, NULL); + int dfflush_error = SEV_RET_NO_FW_CALL; + + ret = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, &dfflush_error); if (ret) { - dev_err(sev->dev, "SEV-SNP DF_FLUSH failed\n"); + dev_err(sev->dev, "SEV-SNP DF_FLUSH failed, ret = %d, error = %#x\n", + ret, dfflush_error); return ret; } /* reissue the shutdown command */ @@ -1692,7 +1758,8 @@ static int __sev_snp_shutdown_locked(int *error, bool panic) error); } if (ret) { - dev_err(sev->dev, "SEV-SNP firmware shutdown failed\n"); + dev_err(sev->dev, "SEV-SNP firmware shutdown failed, rc %d, error %#x\n", + ret, *error); return ret; } @@ -1718,6 +1785,12 @@ static int __sev_snp_shutdown_locked(int *error, bool panic) sev->snp_initialized = false; dev_dbg(sev->dev, "SEV-SNP firmware shutdown\n"); + atomic_notifier_chain_unregister(&panic_notifier_list, + &snp_panic_notifier); + + /* Reset TMR size back to default */ + sev_es_tmr_size = SEV_TMR_SIZE; + return ret; } @@ -1726,6 +1799,7 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable) struct sev_device *sev = psp_master->sev_data; struct sev_user_data_pek_cert_import input; struct sev_data_pek_cert_import data; + bool shutdown_required = false; void *pek_blob, *oca_blob; int ret; @@ -1756,7 +1830,7 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable) /* If platform is not in INIT state then transition it to INIT */ if (sev->state != SEV_STATE_INIT) { - ret = __sev_platform_init_locked(&argp->error); + ret = sev_move_to_init_state(argp, &shutdown_required); if (ret) goto e_free_oca; } @@ -1764,6 +1838,9 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable) ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, &data, &argp->error); e_free_oca: + if (shutdown_required) + __sev_firmware_shutdown(sev, false); + kfree(oca_blob); e_free_pek: kfree(pek_blob); @@ -1880,32 +1957,23 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) struct sev_data_pdh_cert_export data; void __user *input_cert_chain_address; void __user *input_pdh_cert_address; + bool shutdown_required = false; int ret; - /* If platform is not in INIT state then transition it to INIT. */ - if (sev->state != SEV_STATE_INIT) { - if (!writable) - return -EPERM; - - ret = __sev_platform_init_locked(&argp->error); - if (ret) - return ret; - } - if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) return -EFAULT; memset(&data, 0, sizeof(data)); + input_pdh_cert_address = (void __user *)input.pdh_cert_address; + input_cert_chain_address = (void __user *)input.cert_chain_address; + /* Userspace wants to query the certificate length. */ if (!input.pdh_cert_address || !input.pdh_cert_len || !input.cert_chain_address) goto cmd; - input_pdh_cert_address = (void __user *)input.pdh_cert_address; - input_cert_chain_address = (void __user *)input.cert_chain_address; - /* Allocate a physically contiguous buffer to store the PDH blob. */ if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) return -EFAULT; @@ -1931,6 +1999,17 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) data.cert_chain_len = input.cert_chain_len; cmd: + /* If platform is not in INIT state then transition it to INIT. */ + if (sev->state != SEV_STATE_INIT) { + if (!writable) { + ret = -EPERM; + goto e_free_cert; + } + ret = sev_move_to_init_state(argp, &shutdown_required); + if (ret) + goto e_free_cert; + } + ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, &data, &argp->error); /* If we query the length, FW responded with expected data. */ @@ -1957,6 +2036,9 @@ cmd: } e_free_cert: + if (shutdown_required) + __sev_firmware_shutdown(sev, false); + kfree(cert_blob); e_free_pdh: kfree(pdh_blob); @@ -1966,12 +2048,13 @@ e_free_pdh: static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp) { struct sev_device *sev = psp_master->sev_data; + bool shutdown_required = false; struct sev_data_snp_addr buf; struct page *status_page; + int ret, error; void *data; - int ret; - if (!sev->snp_initialized || !argp->data) + if (!argp->data) return -EINVAL; status_page = alloc_page(GFP_KERNEL_ACCOUNT); @@ -1980,6 +2063,12 @@ static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp) data = page_address(status_page); + if (!sev->snp_initialized) { + ret = snp_move_to_init_state(argp, &shutdown_required); + if (ret) + goto cleanup; + } + /* * Firmware expects status page to be in firmware-owned state, otherwise * it will report firmware error code INVALID_PAGE_STATE (0x1A). @@ -2008,6 +2097,9 @@ static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp) ret = -EFAULT; cleanup: + if (shutdown_required) + __sev_snp_shutdown_locked(&error, false); + __free_pages(status_page, 0); return ret; } @@ -2016,21 +2108,33 @@ static int sev_ioctl_do_snp_commit(struct sev_issue_cmd *argp) { struct sev_device *sev = psp_master->sev_data; struct sev_data_snp_commit buf; + bool shutdown_required = false; + int ret, error; - if (!sev->snp_initialized) - return -EINVAL; + if (!sev->snp_initialized) { + ret = snp_move_to_init_state(argp, &shutdown_required); + if (ret) + return ret; + } buf.len = sizeof(buf); - return __sev_do_cmd_locked(SEV_CMD_SNP_COMMIT, &buf, &argp->error); + ret = __sev_do_cmd_locked(SEV_CMD_SNP_COMMIT, &buf, &argp->error); + + if (shutdown_required) + __sev_snp_shutdown_locked(&error, false); + + return ret; } static int sev_ioctl_do_snp_set_config(struct sev_issue_cmd *argp, bool writable) { struct sev_device *sev = psp_master->sev_data; struct sev_user_data_snp_config config; + bool shutdown_required = false; + int ret, error; - if (!sev->snp_initialized || !argp->data) + if (!argp->data) return -EINVAL; if (!writable) @@ -2039,17 +2143,29 @@ static int sev_ioctl_do_snp_set_config(struct sev_issue_cmd *argp, bool writable if (copy_from_user(&config, (void __user *)argp->data, sizeof(config))) return -EFAULT; - return __sev_do_cmd_locked(SEV_CMD_SNP_CONFIG, &config, &argp->error); + if (!sev->snp_initialized) { + ret = snp_move_to_init_state(argp, &shutdown_required); + if (ret) + return ret; + } + + ret = __sev_do_cmd_locked(SEV_CMD_SNP_CONFIG, &config, &argp->error); + + if (shutdown_required) + __sev_snp_shutdown_locked(&error, false); + + return ret; } static int sev_ioctl_do_snp_vlek_load(struct sev_issue_cmd *argp, bool writable) { struct sev_device *sev = psp_master->sev_data; struct sev_user_data_snp_vlek_load input; + bool shutdown_required = false; + int ret, error; void *blob; - int ret; - if (!sev->snp_initialized || !argp->data) + if (!argp->data) return -EINVAL; if (!writable) @@ -2068,8 +2184,18 @@ static int sev_ioctl_do_snp_vlek_load(struct sev_issue_cmd *argp, bool writable) input.vlek_wrapped_address = __psp_pa(blob); + if (!sev->snp_initialized) { + ret = snp_move_to_init_state(argp, &shutdown_required); + if (ret) + goto cleanup; + } + ret = __sev_do_cmd_locked(SEV_CMD_SNP_VLEK_LOAD, &input, &argp->error); + if (shutdown_required) + __sev_snp_shutdown_locked(&error, false); + +cleanup: kfree(blob); return ret; @@ -2339,6 +2465,15 @@ static void sev_firmware_shutdown(struct sev_device *sev) mutex_unlock(&sev_cmd_mutex); } +void sev_platform_shutdown(void) +{ + if (!psp_master || !psp_master->sev_data) + return; + + sev_firmware_shutdown(psp_master->sev_data); +} +EXPORT_SYMBOL_GPL(sev_platform_shutdown); + void sev_dev_destroy(struct psp_device *psp) { struct sev_device *sev = psp->sev_data; @@ -2373,10 +2508,6 @@ static int snp_shutdown_on_panic(struct notifier_block *nb, return NOTIFY_DONE; } -static struct notifier_block snp_panic_notifier = { - .notifier_call = snp_shutdown_on_panic, -}; - int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd, void *data, int *error) { @@ -2390,9 +2521,7 @@ EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user); void sev_pci_init(void) { struct sev_device *sev = psp_master->sev_data; - struct sev_platform_init_args args = {0}; u8 api_major, api_minor, build; - int rc; if (!sev) return; @@ -2415,18 +2544,6 @@ void sev_pci_init(void) api_major, api_minor, build, sev->api_major, sev->api_minor, sev->build); - /* Initialize the platform */ - args.probe = true; - rc = sev_platform_init(&args); - if (rc) - dev_err(sev->dev, "SEV: failed to INIT error %#x, rc %d\n", - args.error, rc); - - dev_info(sev->dev, "SEV%s API:%d.%d build:%d\n", sev->snp_initialized ? - "-SNP" : "", sev->api_major, sev->api_minor, sev->build); - - atomic_notifier_chain_register(&panic_notifier_list, - &snp_panic_notifier); return; err: @@ -2443,7 +2560,4 @@ void sev_pci_exit(void) return; sev_firmware_shutdown(sev); - - atomic_notifier_chain_unregister(&panic_notifier_list, - &snp_panic_notifier); } diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index 2ebc878da160..e1be2072d680 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -375,6 +375,7 @@ static const struct tee_vdata teev1 = { static const struct tee_vdata teev2 = { .ring_wptr_reg = 0x10950, /* C2PMSG_20 */ .ring_rptr_reg = 0x10954, /* C2PMSG_21 */ + .info_reg = 0x109e8, /* C2PMSG_58 */ }; static const struct platform_access_vdata pa_v1 = { @@ -440,6 +441,7 @@ static const struct psp_vdata pspv5 = { .cmdresp_reg = 0x10944, /* C2PMSG_17 */ .cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */ .cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */ + .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */ .feature_reg = 0x109fc, /* C2PMSG_63 */ .inten_reg = 0x10510, /* P2CMSG_INTEN */ .intsts_reg = 0x10514, /* P2CMSG_INTSTS */ @@ -535,6 +537,7 @@ static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x1134), (kernel_ulong_t)&dev_vdata[7] }, { PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] }, { PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] }, + { PCI_VDEVICE(AMD, 0x17D8), (kernel_ulong_t)&dev_vdata[8] }, /* Last entry must be zero */ { 0, } }; diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index d3f5d108b898..7c41f9593d03 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -862,7 +862,7 @@ int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *d return -EINVAL; } - algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL); + algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN, GFP_KERNEL); if (!algs) return -ENOMEM; @@ -5224,7 +5224,7 @@ static int qm_pre_store_caps(struct hisi_qm *qm) size_t i, size; size = ARRAY_SIZE(qm_cap_query_info); - qm_cap = devm_kzalloc(&pdev->dev, sizeof(*qm_cap) * size, GFP_KERNEL); + qm_cap = devm_kcalloc(&pdev->dev, sizeof(*qm_cap), size, GFP_KERNEL); if (!qm_cap) return -ENOMEM; diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index 1dc2378aa88b..e050f5ff5efb 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -491,8 +491,9 @@ static int img_hash_init(struct ahash_request *req) struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); - rctx->fallback_req.base.flags = req->base.flags - & CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); return crypto_ahash_init(&rctx->fallback_req); } @@ -555,10 +556,10 @@ static int img_hash_update(struct ahash_request *req) struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); - rctx->fallback_req.base.flags = req->base.flags - & CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.nbytes = req->nbytes; - rctx->fallback_req.src = req->src; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); + ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes); return crypto_ahash_update(&rctx->fallback_req); } @@ -570,9 +571,10 @@ static int img_hash_final(struct ahash_request *req) struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); - rctx->fallback_req.base.flags = req->base.flags - & CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.result = req->result; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); + ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0); return crypto_ahash_final(&rctx->fallback_req); } @@ -584,11 +586,12 @@ static int img_hash_finup(struct ahash_request *req) struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); - rctx->fallback_req.base.flags = req->base.flags - & CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.nbytes = req->nbytes; - rctx->fallback_req.src = req->src; - rctx->fallback_req.result = req->result; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); + ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result, + req->nbytes); + return crypto_ahash_finup(&rctx->fallback_req); } @@ -600,8 +603,9 @@ static int img_hash_import(struct ahash_request *req, const void *in) struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); - rctx->fallback_req.base.flags = req->base.flags - & CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); return crypto_ahash_import(&rctx->fallback_req, in); } @@ -613,8 +617,9 @@ static int img_hash_export(struct ahash_request *req, void *out) struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); - rctx->fallback_req.base.flags = req->base.flags - & CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); return crypto_ahash_export(&rctx->fallback_req, out); } diff --git a/drivers/crypto/inside-secure/eip93/eip93-hash.c b/drivers/crypto/inside-secure/eip93/eip93-hash.c index df1b05ac5a57..ac13d90a2b7c 100644 --- a/drivers/crypto/inside-secure/eip93/eip93-hash.c +++ b/drivers/crypto/inside-secure/eip93/eip93-hash.c @@ -97,12 +97,20 @@ void eip93_hash_handle_result(struct crypto_async_request *async, int err) static void eip93_hash_init_sa_state_digest(u32 hash, u8 *digest) { - u32 sha256_init[] = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, - SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 }; - u32 sha224_init[] = { SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, - SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7 }; - u32 sha1_init[] = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }; - u32 md5_init[] = { MD5_H0, MD5_H1, MD5_H2, MD5_H3 }; + static const u32 sha256_init[] = { + SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, + SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 + }; + static const u32 sha224_init[] = { + SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, + SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7 + }; + static const u32 sha1_init[] = { + SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 + }; + static const u32 md5_init[] = { + MD5_H0, MD5_H1, MD5_H2, MD5_H3 + }; /* Init HASH constant */ switch (hash) { diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index f44c08f5f5ec..d2b632193beb 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -2043,7 +2043,7 @@ struct safexcel_alg_template safexcel_alg_cbcmac = { .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, - .cra_blocksize = 1, + .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), .cra_init = safexcel_ahash_cra_init, .cra_exit = safexcel_ahash_cra_exit, diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index 09d9589f2d68..23f585219fb4 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -725,7 +725,7 @@ static int alloc_wq_table(int max_wqs) for (cpu = 0; cpu < nr_cpus; cpu++) { entry = per_cpu_ptr(wq_table, cpu); - entry->wqs = kcalloc(max_wqs, sizeof(struct wq *), GFP_KERNEL); + entry->wqs = kcalloc(max_wqs, sizeof(*entry->wqs), GFP_KERNEL); if (!entry->wqs) { free_wq_table(); return -ENOMEM; @@ -894,7 +894,7 @@ out: static void rebalance_wq_table(void) { const struct cpumask *node_cpus; - int node, cpu, iaa = -1; + int node_cpu, node, cpu, iaa = 0; if (nr_iaa == 0) return; @@ -905,36 +905,29 @@ static void rebalance_wq_table(void) clear_wq_table(); if (nr_iaa == 1) { - for (cpu = 0; cpu < nr_cpus; cpu++) { - if (WARN_ON(wq_table_add_wqs(0, cpu))) { - pr_debug("could not add any wqs for iaa 0 to cpu %d!\n", cpu); - return; - } + for_each_possible_cpu(cpu) { + if (WARN_ON(wq_table_add_wqs(0, cpu))) + goto err; } return; } for_each_node_with_cpus(node) { + cpu = 0; node_cpus = cpumask_of_node(node); - for (cpu = 0; cpu < cpumask_weight(node_cpus); cpu++) { - int node_cpu = cpumask_nth(cpu, node_cpus); - - if (WARN_ON(node_cpu >= nr_cpu_ids)) { - pr_debug("node_cpu %d doesn't exist!\n", node_cpu); - return; - } - - if ((cpu % cpus_per_iaa) == 0) - iaa++; - - if (WARN_ON(wq_table_add_wqs(iaa, node_cpu))) { - pr_debug("could not add any wqs for iaa %d to cpu %d!\n", iaa, cpu); - return; - } + for_each_cpu(node_cpu, node_cpus) { + iaa = cpu / cpus_per_iaa; + if (WARN_ON(wq_table_add_wqs(iaa, node_cpu))) + goto err; + cpu++; } } + + return; +err: + pr_debug("could not add any wqs for iaa %d to cpu %d!\n", iaa, cpu); } static inline int check_completion(struct device *dev, @@ -999,12 +992,9 @@ out: static int deflate_generic_decompress(struct acomp_req *req) { - ACOMP_REQUEST_ON_STACK(fbreq, crypto_acomp_reqtfm(req)); + ACOMP_FBREQ_ON_STACK(fbreq, req); int ret; - acomp_request_set_callback(fbreq, 0, NULL, NULL); - acomp_request_set_params(fbreq, req->src, req->dst, req->slen, - req->dlen); ret = crypto_acomp_decompress(fbreq); req->dlen = fbreq->dlen; @@ -1020,8 +1010,7 @@ static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq, static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req, struct idxd_wq *wq, dma_addr_t src_addr, unsigned int slen, - dma_addr_t dst_addr, unsigned int *dlen, - u32 compression_crc); + dma_addr_t dst_addr, unsigned int *dlen); static void iaa_desc_complete(struct idxd_desc *idxd_desc, enum idxd_complete_type comp_type, @@ -1087,10 +1076,10 @@ static void iaa_desc_complete(struct idxd_desc *idxd_desc, } if (ctx->compress && compression_ctx->verify_compress) { + u32 *compression_crc = acomp_request_ctx(ctx->req); dma_addr_t src_addr, dst_addr; - u32 compression_crc; - compression_crc = idxd_desc->iax_completion->crc; + *compression_crc = idxd_desc->iax_completion->crc; ret = iaa_remap_for_verify(dev, iaa_wq, ctx->req, &src_addr, &dst_addr); if (ret) { @@ -1100,8 +1089,7 @@ static void iaa_desc_complete(struct idxd_desc *idxd_desc, } ret = iaa_compress_verify(ctx->tfm, ctx->req, iaa_wq->wq, src_addr, - ctx->req->slen, dst_addr, &ctx->req->dlen, - compression_crc); + ctx->req->slen, dst_addr, &ctx->req->dlen); if (ret) { dev_dbg(dev, "%s: compress verify failed ret=%d\n", __func__, ret); err = -EIO; @@ -1130,11 +1118,11 @@ out: static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, struct idxd_wq *wq, dma_addr_t src_addr, unsigned int slen, - dma_addr_t dst_addr, unsigned int *dlen, - u32 *compression_crc) + dma_addr_t dst_addr, unsigned int *dlen) { struct iaa_device_compression_mode *active_compression_mode; struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); + u32 *compression_crc = acomp_request_ctx(req); struct iaa_device *iaa_device; struct idxd_desc *idxd_desc; struct iax_hw_desc *desc; @@ -1187,8 +1175,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, " src_addr %llx, dst_addr %llx\n", __func__, active_compression_mode->name, src_addr, dst_addr); - } else if (ctx->async_mode) - req->base.data = idxd_desc; + } dev_dbg(dev, "%s: compression mode %s," " desc->src1_addr %llx, desc->src1_size %d," @@ -1282,11 +1269,11 @@ out: static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req, struct idxd_wq *wq, dma_addr_t src_addr, unsigned int slen, - dma_addr_t dst_addr, unsigned int *dlen, - u32 compression_crc) + dma_addr_t dst_addr, unsigned int *dlen) { struct iaa_device_compression_mode *active_compression_mode; struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); + u32 *compression_crc = acomp_request_ctx(req); struct iaa_device *iaa_device; struct idxd_desc *idxd_desc; struct iax_hw_desc *desc; @@ -1346,10 +1333,10 @@ static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req, goto err; } - if (compression_crc != idxd_desc->iax_completion->crc) { + if (*compression_crc != idxd_desc->iax_completion->crc) { ret = -EINVAL; dev_dbg(dev, "(verify) iaa comp/decomp crc mismatch:" - " comp=0x%x, decomp=0x%x\n", compression_crc, + " comp=0x%x, decomp=0x%x\n", *compression_crc, idxd_desc->iax_completion->crc); print_hex_dump(KERN_INFO, "cmp-rec: ", DUMP_PREFIX_OFFSET, 8, 1, idxd_desc->iax_completion, 64, 0); @@ -1369,8 +1356,7 @@ err: static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, struct idxd_wq *wq, dma_addr_t src_addr, unsigned int slen, - dma_addr_t dst_addr, unsigned int *dlen, - bool disable_async) + dma_addr_t dst_addr, unsigned int *dlen) { struct iaa_device_compression_mode *active_compression_mode; struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); @@ -1412,7 +1398,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, desc->src1_size = slen; desc->completion_addr = idxd_desc->compl_dma; - if (ctx->use_irq && !disable_async) { + if (ctx->use_irq) { desc->flags |= IDXD_OP_FLAG_RCI; idxd_desc->crypto.req = req; @@ -1425,8 +1411,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, " src_addr %llx, dst_addr %llx\n", __func__, active_compression_mode->name, src_addr, dst_addr); - } else if (ctx->async_mode && !disable_async) - req->base.data = idxd_desc; + } dev_dbg(dev, "%s: decompression mode %s," " desc->src1_addr %llx, desc->src1_size %d," @@ -1446,7 +1431,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, update_total_decomp_calls(); update_wq_decomp_calls(wq); - if (ctx->async_mode && !disable_async) { + if (ctx->async_mode) { ret = -EINPROGRESS; dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__); goto out; @@ -1474,7 +1459,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, *dlen = req->dlen; - if (!ctx->async_mode || disable_async) + if (!ctx->async_mode) idxd_free_desc(wq, idxd_desc); /* Update stats */ @@ -1496,7 +1481,6 @@ static int iaa_comp_acompress(struct acomp_req *req) dma_addr_t src_addr, dst_addr; int nr_sgs, cpu, ret = 0; struct iaa_wq *iaa_wq; - u32 compression_crc; struct idxd_wq *wq; struct device *dev; @@ -1557,7 +1541,7 @@ static int iaa_comp_acompress(struct acomp_req *req) req->dst, req->dlen, sg_dma_len(req->dst)); ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr, - &req->dlen, &compression_crc); + &req->dlen); if (ret == -EINPROGRESS) return ret; @@ -1569,7 +1553,7 @@ static int iaa_comp_acompress(struct acomp_req *req) } ret = iaa_compress_verify(tfm, req, wq, src_addr, req->slen, - dst_addr, &req->dlen, compression_crc); + dst_addr, &req->dlen); if (ret) dev_dbg(dev, "asynchronous compress verification failed ret=%d\n", ret); @@ -1655,7 +1639,7 @@ static int iaa_comp_adecompress(struct acomp_req *req) req->dst, req->dlen, sg_dma_len(req->dst)); ret = iaa_decompress(tfm, req, wq, src_addr, req->slen, - dst_addr, &req->dlen, false); + dst_addr, &req->dlen); if (ret == -EINPROGRESS) return ret; @@ -1699,6 +1683,7 @@ static struct acomp_alg iaa_acomp_fixed_deflate = { .cra_driver_name = "deflate-iaa", .cra_flags = CRYPTO_ALG_ASYNC, .cra_ctxsize = sizeof(struct iaa_compression_ctx), + .cra_reqsize = sizeof(u32), .cra_module = THIS_MODULE, .cra_priority = IAA_ALG_PRIORITY, } diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig index 02fb8abe4e6e..359c61f0c8a1 100644 --- a/drivers/crypto/intel/qat/Kconfig +++ b/drivers/crypto/intel/qat/Kconfig @@ -70,6 +70,18 @@ config CRYPTO_DEV_QAT_420XX To compile this as a module, choose M here: the module will be called qat_420xx. +config CRYPTO_DEV_QAT_6XXX + tristate "Support for Intel(R) QuickAssist Technology QAT_6XXX" + depends on (X86 || COMPILE_TEST) + depends on PCI + select CRYPTO_DEV_QAT + help + Support for Intel(R) QuickAssist Technology QAT_6xxx + for accelerating crypto and compression workloads. + + To compile this as a module, choose M here: the module + will be called qat_6xxx. + config CRYPTO_DEV_QAT_DH895xCCVF tristate "Support for Intel(R) DH895xCC Virtual Function" depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST) diff --git a/drivers/crypto/intel/qat/Makefile b/drivers/crypto/intel/qat/Makefile index 235b69f4f3f7..abef14207afa 100644 --- a/drivers/crypto/intel/qat/Makefile +++ b/drivers/crypto/intel/qat/Makefile @@ -1,10 +1,12 @@ # SPDX-License-Identifier: GPL-2.0 +subdir-ccflags-y := -I$(src)/qat_common obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/ obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/ obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/ obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/ obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx/ obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx/ +obj-$(CONFIG_CRYPTO_DEV_QAT_6XXX) += qat_6xxx/ obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/ obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/ obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf/ diff --git a/drivers/crypto/intel/qat/qat_420xx/Makefile b/drivers/crypto/intel/qat/qat_420xx/Makefile index 72b24b1804cf..f6df54d2993e 100644 --- a/drivers/crypto/intel/qat/qat_420xx/Makefile +++ b/drivers/crypto/intel/qat/qat_420xx/Makefile @@ -1,4 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx.o qat_420xx-y := adf_drv.o adf_420xx_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index 4feeef83f7a3..7c3c0f561c95 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -9,15 +9,14 @@ #include <adf_common_drv.h> #include <adf_fw_config.h> #include <adf_gen4_config.h> -#include <adf_gen4_dc.h> #include <adf_gen4_hw_csr_data.h> #include <adf_gen4_hw_data.h> #include <adf_gen4_pfvf.h> #include <adf_gen4_pm.h> #include <adf_gen4_ras.h> -#include <adf_gen4_timer.h> #include <adf_gen4_tl.h> #include <adf_gen4_vf_mig.h> +#include <adf_timer.h> #include "adf_420xx_hw_data.h" #include "icp_qat_hw.h" @@ -93,7 +92,6 @@ static const struct adf_fw_config adf_fw_dcc_config[] = { static struct adf_hw_device_class adf_420xx_class = { .name = ADF_420XX_DEVICE_NAME, .type = DEV_420XX, - .instances = 0, }; static u32 get_ae_mask(struct adf_hw_device_data *self) @@ -469,8 +467,8 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->enable_pm = adf_gen4_enable_pm; hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt; hw_data->dev_config = adf_gen4_dev_config; - hw_data->start_timer = adf_gen4_timer_start; - hw_data->stop_timer = adf_gen4_timer_stop; + hw_data->start_timer = adf_timer_start; + hw_data->stop_timer = adf_timer_stop; hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock; hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; hw_data->clock_frequency = ADF_420XX_AE_FREQ; diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c index 8084aa0f7f41..cfa00daeb4fb 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c @@ -14,7 +14,7 @@ #include "adf_420xx_hw_data.h" static const struct pci_device_id adf_pci_tbl[] = { - { PCI_VDEVICE(INTEL, ADF_420XX_PCI_DEVICE_ID), }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_420XX) }, { } }; MODULE_DEVICE_TABLE(pci, adf_pci_tbl); @@ -186,11 +186,19 @@ static void adf_remove(struct pci_dev *pdev) adf_cleanup_accel(accel_dev); } +static void adf_shutdown(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + adf_dev_down(accel_dev); +} + static struct pci_driver adf_driver = { .id_table = adf_pci_tbl, .name = ADF_420XX_DEVICE_NAME, .probe = adf_probe, .remove = adf_remove, + .shutdown = adf_shutdown, .sriov_configure = adf_sriov_configure, .err_handler = &adf_err_handler, }; diff --git a/drivers/crypto/intel/qat/qat_4xxx/Makefile b/drivers/crypto/intel/qat/qat_4xxx/Makefile index e8480bb80dee..188b611445e6 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/Makefile +++ b/drivers/crypto/intel/qat/qat_4xxx/Makefile @@ -1,4 +1,3 @@ # SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) -ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx.o qat_4xxx-y := adf_drv.o adf_4xxx_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 4eb6ef99efdd..bd0b1b1015c0 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -9,15 +9,14 @@ #include <adf_common_drv.h> #include <adf_fw_config.h> #include <adf_gen4_config.h> -#include <adf_gen4_dc.h> #include <adf_gen4_hw_csr_data.h> #include <adf_gen4_hw_data.h> #include <adf_gen4_pfvf.h> #include <adf_gen4_pm.h> #include "adf_gen4_ras.h" -#include <adf_gen4_timer.h> #include <adf_gen4_tl.h> #include <adf_gen4_vf_mig.h> +#include <adf_timer.h> #include "adf_4xxx_hw_data.h" #include "icp_qat_hw.h" @@ -96,7 +95,6 @@ static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dcc_config)); static struct adf_hw_device_class adf_4xxx_class = { .name = ADF_4XXX_DEVICE_NAME, .type = DEV_4XXX, - .instances = 0, }; static u32 get_ae_mask(struct adf_hw_device_data *self) @@ -422,13 +420,13 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK; hw_data->num_rps = ADF_GEN4_MAX_RPS; switch (dev_id) { - case ADF_402XX_PCI_DEVICE_ID: + case PCI_DEVICE_ID_INTEL_QAT_402XX: hw_data->fw_name = ADF_402XX_FW; hw_data->fw_mmp_name = ADF_402XX_MMP; hw_data->uof_get_name = uof_get_name_402xx; hw_data->get_ena_thd_mask = get_ena_thd_mask; break; - case ADF_401XX_PCI_DEVICE_ID: + case PCI_DEVICE_ID_INTEL_QAT_401XX: hw_data->fw_name = ADF_4XXX_FW; hw_data->fw_mmp_name = ADF_4XXX_MMP; hw_data->uof_get_name = uof_get_name_4xxx; @@ -455,8 +453,8 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->enable_pm = adf_gen4_enable_pm; hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt; hw_data->dev_config = adf_gen4_dev_config; - hw_data->start_timer = adf_gen4_timer_start; - hw_data->stop_timer = adf_gen4_timer_stop; + hw_data->start_timer = adf_timer_start; + hw_data->stop_timer = adf_timer_stop; hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock; hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; hw_data->clock_frequency = ADF_4XXX_AE_FREQ; diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c index 5537a9991e4e..c9be5dcddb27 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c @@ -14,9 +14,9 @@ #include "adf_4xxx_hw_data.h" static const struct pci_device_id adf_pci_tbl[] = { - { PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), }, - { PCI_VDEVICE(INTEL, ADF_401XX_PCI_DEVICE_ID), }, - { PCI_VDEVICE(INTEL, ADF_402XX_PCI_DEVICE_ID), }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_4XXX) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_401XX) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_402XX) }, { } }; MODULE_DEVICE_TABLE(pci, adf_pci_tbl); @@ -188,11 +188,19 @@ static void adf_remove(struct pci_dev *pdev) adf_cleanup_accel(accel_dev); } +static void adf_shutdown(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + adf_dev_down(accel_dev); +} + static struct pci_driver adf_driver = { .id_table = adf_pci_tbl, .name = ADF_4XXX_DEVICE_NAME, .probe = adf_probe, .remove = adf_remove, + .shutdown = adf_shutdown, .sriov_configure = adf_sriov_configure, .err_handler = &adf_err_handler, }; diff --git a/drivers/crypto/intel/qat/qat_6xxx/Makefile b/drivers/crypto/intel/qat/qat_6xxx/Makefile new file mode 100644 index 000000000000..4b4de67cb0c2 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_6xxx/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_CRYPTO_DEV_QAT_6XXX) += qat_6xxx.o +qat_6xxx-y := adf_drv.o adf_6xxx_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c new file mode 100644 index 000000000000..359a6447ccb8 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c @@ -0,0 +1,845 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2025 Intel Corporation */ +#include <linux/array_size.h> +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/bits.h> +#include <linux/iopoll.h> +#include <linux/pci.h> +#include <linux/types.h> + +#include <adf_accel_devices.h> +#include <adf_admin.h> +#include <adf_cfg.h> +#include <adf_cfg_services.h> +#include <adf_clock.h> +#include <adf_common_drv.h> +#include <adf_fw_config.h> +#include <adf_gen6_pm.h> +#include <adf_gen6_ras.h> +#include <adf_gen6_shared.h> +#include <adf_timer.h> +#include "adf_6xxx_hw_data.h" +#include "icp_qat_fw_comp.h" +#include "icp_qat_hw_51_comp.h" + +#define RP_GROUP_0_MASK (BIT(0) | BIT(2)) +#define RP_GROUP_1_MASK (BIT(1) | BIT(3)) +#define RP_GROUP_ALL_MASK (RP_GROUP_0_MASK | RP_GROUP_1_MASK) + +#define ADF_AE_GROUP_0 GENMASK(3, 0) +#define ADF_AE_GROUP_1 GENMASK(7, 4) +#define ADF_AE_GROUP_2 BIT(8) + +struct adf_ring_config { + u32 ring_mask; + enum adf_cfg_service_type ring_type; + const unsigned long *thrd_mask; +}; + +static u32 rmask_two_services[] = { + RP_GROUP_0_MASK, + RP_GROUP_1_MASK, +}; + +enum adf_gen6_rps { + RP0 = 0, + RP1 = 1, + RP2 = 2, + RP3 = 3, + RP_MAX = RP3 +}; + +/* + * thrd_mask_[sym|asym|cpr|dcc]: these static arrays define the thread + * configuration for handling requests of specific services across the + * accelerator engines. Each element in an array corresponds to an + * accelerator engine, with the value being a bitmask that specifies which + * threads within that engine are capable of processing the particular service. + * + * For example, a value of 0x0C means that threads 2 and 3 are enabled for the + * service in the respective accelerator engine. + */ +static const unsigned long thrd_mask_sym[ADF_6XXX_MAX_ACCELENGINES] = { + 0x0C, 0x0C, 0x0C, 0x0C, 0x1C, 0x1C, 0x1C, 0x1C, 0x00 +}; + +static const unsigned long thrd_mask_asym[ADF_6XXX_MAX_ACCELENGINES] = { + 0x70, 0x70, 0x70, 0x70, 0x60, 0x60, 0x60, 0x60, 0x00 +}; + +static const unsigned long thrd_mask_cpr[ADF_6XXX_MAX_ACCELENGINES] = { + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00 +}; + +static const unsigned long thrd_mask_dcc[ADF_6XXX_MAX_ACCELENGINES] = { + 0x00, 0x00, 0x00, 0x00, 0x07, 0x07, 0x03, 0x03, 0x00 +}; + +static const char *const adf_6xxx_fw_objs[] = { + [ADF_FW_CY_OBJ] = ADF_6XXX_CY_OBJ, + [ADF_FW_DC_OBJ] = ADF_6XXX_DC_OBJ, + [ADF_FW_ADMIN_OBJ] = ADF_6XXX_ADMIN_OBJ, +}; + +static const struct adf_fw_config adf_default_fw_config[] = { + { ADF_AE_GROUP_1, ADF_FW_DC_OBJ }, + { ADF_AE_GROUP_0, ADF_FW_CY_OBJ }, + { ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ }, +}; + +static struct adf_hw_device_class adf_6xxx_class = { + .name = ADF_6XXX_DEVICE_NAME, + .type = DEV_6XXX, +}; + +static bool services_supported(unsigned long mask) +{ + int num_svc; + + if (mask >= BIT(SVC_BASE_COUNT)) + return false; + + num_svc = hweight_long(mask); + switch (num_svc) { + case ADF_ONE_SERVICE: + return true; + case ADF_TWO_SERVICES: + case ADF_THREE_SERVICES: + return !test_bit(SVC_DCC, &mask); + default: + return false; + } +} + +static int get_service(unsigned long *mask) +{ + if (test_and_clear_bit(SVC_ASYM, mask)) + return SVC_ASYM; + + if (test_and_clear_bit(SVC_SYM, mask)) + return SVC_SYM; + + if (test_and_clear_bit(SVC_DC, mask)) + return SVC_DC; + + if (test_and_clear_bit(SVC_DCC, mask)) + return SVC_DCC; + + return -EINVAL; +} + +static enum adf_cfg_service_type get_ring_type(enum adf_services service) +{ + switch (service) { + case SVC_SYM: + return SYM; + case SVC_ASYM: + return ASYM; + case SVC_DC: + case SVC_DCC: + return COMP; + default: + return UNUSED; + } +} + +static const unsigned long *get_thrd_mask(enum adf_services service) +{ + switch (service) { + case SVC_SYM: + return thrd_mask_sym; + case SVC_ASYM: + return thrd_mask_asym; + case SVC_DC: + return thrd_mask_cpr; + case SVC_DCC: + return thrd_mask_dcc; + default: + return NULL; + } +} + +static int get_rp_config(struct adf_accel_dev *accel_dev, struct adf_ring_config *rp_config, + unsigned int *num_services) +{ + unsigned int i, nservices; + unsigned long mask; + int ret, service; + + ret = adf_get_service_mask(accel_dev, &mask); + if (ret) + return ret; + + nservices = hweight_long(mask); + if (nservices > MAX_NUM_CONCURR_SVC) + return -EINVAL; + + for (i = 0; i < nservices; i++) { + service = get_service(&mask); + if (service < 0) + return service; + + rp_config[i].ring_type = get_ring_type(service); + rp_config[i].thrd_mask = get_thrd_mask(service); + + /* + * If there is only one service enabled, use all ring pairs for + * that service. + * If there are two services enabled, use ring pairs 0 and 2 for + * one service and ring pairs 1 and 3 for the other service. + */ + switch (nservices) { + case ADF_ONE_SERVICE: + rp_config[i].ring_mask = RP_GROUP_ALL_MASK; + break; + case ADF_TWO_SERVICES: + rp_config[i].ring_mask = rmask_two_services[i]; + break; + case ADF_THREE_SERVICES: + rp_config[i].ring_mask = BIT(i); + + /* If ASYM is enabled, use additional ring pair */ + if (service == SVC_ASYM) + rp_config[i].ring_mask |= BIT(RP3); + + break; + default: + return -EINVAL; + } + } + + *num_services = nservices; + + return 0; +} + +static u32 adf_gen6_get_arb_mask(struct adf_accel_dev *accel_dev, unsigned int ae) +{ + struct adf_ring_config rp_config[MAX_NUM_CONCURR_SVC]; + unsigned int num_services, i, thrd; + u32 ring_mask, thd2arb_mask = 0; + const unsigned long *p_mask; + + if (get_rp_config(accel_dev, rp_config, &num_services)) + return 0; + + /* + * The thd2arb_mask maps ring pairs to threads within an accelerator engine. + * It ensures that jobs submitted to ring pairs are scheduled on threads capable + * of handling the specified service type. + * + * Each group of 4 bits in the mask corresponds to a thread, with each bit + * indicating whether a job from a ring pair can be scheduled on that thread. + * The use of 4 bits is due to the organization of ring pairs into groups of + * four, where each group shares the same configuration. + */ + for (i = 0; i < num_services; i++) { + p_mask = &rp_config[i].thrd_mask[ae]; + ring_mask = rp_config[i].ring_mask; + + for_each_set_bit(thrd, p_mask, ADF_NUM_THREADS_PER_AE) + thd2arb_mask |= ring_mask << (thrd * 4); + } + + return thd2arb_mask; +} + +static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) +{ + enum adf_cfg_service_type rps[ADF_GEN6_NUM_BANKS_PER_VF] = { }; + struct adf_ring_config rp_config[MAX_NUM_CONCURR_SVC]; + unsigned int num_services, rp_num, i; + unsigned long cfg_mask; + u16 ring_to_svc_map; + + if (get_rp_config(accel_dev, rp_config, &num_services)) + return 0; + + /* + * Loop through the configured services and populate the `rps` array that + * contains what service that particular ring pair can handle (i.e. symmetric + * crypto, asymmetric crypto, data compression or compression chaining). + */ + for (i = 0; i < num_services; i++) { + cfg_mask = rp_config[i].ring_mask; + for_each_set_bit(rp_num, &cfg_mask, ADF_GEN6_NUM_BANKS_PER_VF) + rps[rp_num] = rp_config[i].ring_type; + } + + /* + * The ring_mask is structured into segments of 3 bits, with each + * segment representing the service configuration for a specific ring pair. + * Since ring pairs are organized into groups of 4, the ring_mask contains 4 + * such 3-bit segments, each corresponding to one ring pair. + * + * The device has 64 ring pairs, which are organized in groups of 4, namely + * 16 groups. Each group has the same configuration, represented here by + * `ring_to_svc_map`. + */ + ring_to_svc_map = rps[RP0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT | + rps[RP1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT | + rps[RP2] << ADF_CFG_SERV_RING_PAIR_2_SHIFT | + rps[RP3] << ADF_CFG_SERV_RING_PAIR_3_SHIFT; + + return ring_to_svc_map; +} + +static u32 get_accel_mask(struct adf_hw_device_data *self) +{ + return ADF_GEN6_ACCELERATORS_MASK; +} + +static u32 get_num_accels(struct adf_hw_device_data *self) +{ + return ADF_GEN6_MAX_ACCELERATORS; +} + +static u32 get_num_aes(struct adf_hw_device_data *self) +{ + return self ? hweight32(self->ae_mask) : 0; +} + +static u32 get_misc_bar_id(struct adf_hw_device_data *self) +{ + return ADF_GEN6_PMISC_BAR; +} + +static u32 get_etr_bar_id(struct adf_hw_device_data *self) +{ + return ADF_GEN6_ETR_BAR; +} + +static u32 get_sram_bar_id(struct adf_hw_device_data *self) +{ + return ADF_GEN6_SRAM_BAR; +} + +static enum dev_sku_info get_sku(struct adf_hw_device_data *self) +{ + return DEV_SKU_1; +} + +static void get_arb_info(struct arb_info *arb_info) +{ + arb_info->arb_cfg = ADF_GEN6_ARB_CONFIG; + arb_info->arb_offset = ADF_GEN6_ARB_OFFSET; + arb_info->wt2sam_offset = ADF_GEN6_ARB_WRK_2_SER_MAP_OFFSET; +} + +static void get_admin_info(struct admin_info *admin_csrs_info) +{ + admin_csrs_info->mailbox_offset = ADF_GEN6_MAILBOX_BASE_OFFSET; + admin_csrs_info->admin_msg_ur = ADF_GEN6_ADMINMSGUR_OFFSET; + admin_csrs_info->admin_msg_lr = ADF_GEN6_ADMINMSGLR_OFFSET; +} + +static u32 get_heartbeat_clock(struct adf_hw_device_data *self) +{ + return ADF_GEN6_COUNTER_FREQ; +} + +static void enable_error_correction(struct adf_accel_dev *accel_dev) +{ + void __iomem *csr = adf_get_pmisc_base(accel_dev); + + /* + * Enable all error notification bits in errsou3 except VFLR + * notification on host. + */ + ADF_CSR_WR(csr, ADF_GEN6_ERRMSK3, ADF_GEN6_VFLNOTIFY); +} + +static void enable_ints(struct adf_accel_dev *accel_dev) +{ + void __iomem *addr = adf_get_pmisc_base(accel_dev); + + /* Enable bundle interrupts */ + ADF_CSR_WR(addr, ADF_GEN6_SMIAPF_RP_X0_MASK_OFFSET, 0); + ADF_CSR_WR(addr, ADF_GEN6_SMIAPF_RP_X1_MASK_OFFSET, 0); + + /* Enable misc interrupts */ + ADF_CSR_WR(addr, ADF_GEN6_SMIAPF_MASK_OFFSET, 0); +} + +static void set_ssm_wdtimer(struct adf_accel_dev *accel_dev) +{ + void __iomem *addr = adf_get_pmisc_base(accel_dev); + u64 val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE; + u64 val = ADF_SSM_WDT_DEFAULT_VALUE; + + /* Enable watchdog timer for sym and dc */ + ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTATHL_OFFSET, ADF_SSMWDTATHH_OFFSET, val); + ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTCNVL_OFFSET, ADF_SSMWDTCNVH_OFFSET, val); + ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTUCSL_OFFSET, ADF_SSMWDTUCSH_OFFSET, val); + ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTDCPRL_OFFSET, ADF_SSMWDTDCPRH_OFFSET, val); + + /* Enable watchdog timer for pke */ + ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTPKEL_OFFSET, ADF_SSMWDTPKEH_OFFSET, val_pke); +} + +/* + * The vector routing table is used to select the MSI-X entry to use for each + * interrupt source. + * The first ADF_GEN6_ETR_MAX_BANKS entries correspond to ring interrupts. + * The final entry corresponds to VF2PF or error interrupts. + * This vector table could be used to configure one MSI-X entry to be shared + * between multiple interrupt sources. + * + * The default routing is set to have a one to one correspondence between the + * interrupt source and the MSI-X entry used. + */ +static void set_msix_default_rttable(struct adf_accel_dev *accel_dev) +{ + void __iomem *csr = adf_get_pmisc_base(accel_dev); + unsigned int i; + + for (i = 0; i <= ADF_GEN6_ETR_MAX_BANKS; i++) + ADF_CSR_WR(csr, ADF_GEN6_MSIX_RTTABLE_OFFSET(i), i); +} + +static int reset_ring_pair(void __iomem *csr, u32 bank_number) +{ + u32 status; + int ret; + + /* + * Write rpresetctl register BIT(0) as 1. + * Since rpresetctl registers have no RW fields, no need to preserve + * values for other bits. Just write directly. + */ + ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number), + ADF_WQM_CSR_RPRESETCTL_RESET); + + /* Read rpresetsts register and wait for rp reset to complete */ + ret = read_poll_timeout(ADF_CSR_RD, status, + status & ADF_WQM_CSR_RPRESETSTS_STATUS, + ADF_RPRESET_POLL_DELAY_US, + ADF_RPRESET_POLL_TIMEOUT_US, true, + csr, ADF_WQM_CSR_RPRESETSTS(bank_number)); + if (ret) + return ret; + + /* When ring pair reset is done, clear rpresetsts */ + ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number), ADF_WQM_CSR_RPRESETSTS_STATUS); + + return 0; +} + +static int ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + void __iomem *csr = adf_get_etr_base(accel_dev); + int ret; + + if (bank_number >= hw_data->num_banks) + return -EINVAL; + + dev_dbg(&GET_DEV(accel_dev), "ring pair reset for bank:%d\n", bank_number); + + ret = reset_ring_pair(csr, bank_number); + if (ret) + dev_err(&GET_DEV(accel_dev), "ring pair reset failed (timeout)\n"); + else + dev_dbg(&GET_DEV(accel_dev), "ring pair reset successful\n"); + + return ret; +} + +static int build_comp_block(void *ctx, enum adf_dc_algo algo) +{ + struct icp_qat_fw_comp_req *req_tmpl = ctx; + struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; + struct icp_qat_hw_comp_51_config_csr_lower hw_comp_lower_csr = { }; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + u32 lower_val; + + switch (algo) { + case QAT_DEFLATE: + header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC; + break; + default: + return -EINVAL; + } + + hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_51_LLLBD_CTRL_LLLBD_DISABLED; + hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_1; + lower_val = ICP_QAT_FW_COMP_51_BUILD_CONFIG_LOWER(hw_comp_lower_csr); + cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val; + cd_pars->u.sl.comp_slice_cfg_word[1] = 0; + + return 0; +} + +static int build_decomp_block(void *ctx, enum adf_dc_algo algo) +{ + struct icp_qat_fw_comp_req *req_tmpl = ctx; + struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + + switch (algo) { + case QAT_DEFLATE: + header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS; + break; + default: + return -EINVAL; + } + + cd_pars->u.sl.comp_slice_cfg_word[0] = 0; + cd_pars->u.sl.comp_slice_cfg_word[1] = 0; + + return 0; +} + +static void adf_gen6_init_dc_ops(struct adf_dc_ops *dc_ops) +{ + dc_ops->build_comp_block = build_comp_block; + dc_ops->build_decomp_block = build_decomp_block; +} + +static int adf_gen6_init_thd2arb_map(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + u32 *thd2arb_map = hw_data->thd_to_arb_map; + unsigned int i; + + for (i = 0; i < hw_data->num_engines; i++) { + thd2arb_map[i] = adf_gen6_get_arb_mask(accel_dev, i); + dev_dbg(&GET_DEV(accel_dev), "ME:%d arb_mask:%#x\n", i, thd2arb_map[i]); + } + + return 0; +} + +static void set_vc_csr_for_bank(void __iomem *csr, u32 bank_number) +{ + u32 value; + + /* + * After each PF FLR, for each of the 64 ring pairs in the PF, the + * driver must program the ringmodectl CSRs. + */ + value = ADF_CSR_RD(csr, ADF_GEN6_CSR_RINGMODECTL(bank_number)); + value |= FIELD_PREP(ADF_GEN6_RINGMODECTL_TC_MASK, ADF_GEN6_RINGMODECTL_TC_DEFAULT); + value |= FIELD_PREP(ADF_GEN6_RINGMODECTL_TC_EN_MASK, ADF_GEN6_RINGMODECTL_TC_EN_OP1); + ADF_CSR_WR(csr, ADF_GEN6_CSR_RINGMODECTL(bank_number), value); +} + +static int set_vc_config(struct adf_accel_dev *accel_dev) +{ + struct pci_dev *pdev = accel_to_pci_dev(accel_dev); + u32 value; + int err; + + /* + * After each PF FLR, the driver must program the Port Virtual Channel (VC) + * Control Registers. + * Read PVC0CTL then write the masked values. + */ + pci_read_config_dword(pdev, ADF_GEN6_PVC0CTL_OFFSET, &value); + value |= FIELD_PREP(ADF_GEN6_PVC0CTL_TCVCMAP_MASK, ADF_GEN6_PVC0CTL_TCVCMAP_DEFAULT); + err = pci_write_config_dword(pdev, ADF_GEN6_PVC0CTL_OFFSET, value); + if (err) { + dev_err(&GET_DEV(accel_dev), "pci write to PVC0CTL failed\n"); + return pcibios_err_to_errno(err); + } + + /* Read PVC1CTL then write masked values */ + pci_read_config_dword(pdev, ADF_GEN6_PVC1CTL_OFFSET, &value); + value |= FIELD_PREP(ADF_GEN6_PVC1CTL_TCVCMAP_MASK, ADF_GEN6_PVC1CTL_TCVCMAP_DEFAULT); + value |= FIELD_PREP(ADF_GEN6_PVC1CTL_VCEN_MASK, ADF_GEN6_PVC1CTL_VCEN_ON); + err = pci_write_config_dword(pdev, ADF_GEN6_PVC1CTL_OFFSET, value); + if (err) + dev_err(&GET_DEV(accel_dev), "pci write to PVC1CTL failed\n"); + + return pcibios_err_to_errno(err); +} + +static int adf_gen6_set_vc(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + void __iomem *csr = adf_get_etr_base(accel_dev); + u32 i; + + for (i = 0; i < hw_data->num_banks; i++) { + dev_dbg(&GET_DEV(accel_dev), "set virtual channels for bank:%d\n", i); + set_vc_csr_for_bank(csr, i); + } + + return set_vc_config(accel_dev); +} + +static u32 get_ae_mask(struct adf_hw_device_data *self) +{ + unsigned long fuses = self->fuses[ADF_FUSECTL4]; + u32 mask = ADF_6XXX_ACCELENGINES_MASK; + + /* + * If bit 0 is set in the fuses, the first 4 engines are disabled. + * If bit 4 is set, the second group of 4 engines are disabled. + * If bit 8 is set, the admin engine (bit 8) is disabled. + */ + if (test_bit(0, &fuses)) + mask &= ~ADF_AE_GROUP_0; + + if (test_bit(4, &fuses)) + mask &= ~ADF_AE_GROUP_1; + + if (test_bit(8, &fuses)) + mask &= ~ADF_AE_GROUP_2; + + return mask; +} + +static u32 get_accel_cap(struct adf_accel_dev *accel_dev) +{ + u32 capabilities_sym, capabilities_asym; + u32 capabilities_dc; + unsigned long mask; + u32 caps = 0; + u32 fusectl1; + + fusectl1 = GET_HW_DATA(accel_dev)->fuses[ADF_FUSECTL1]; + + /* Read accelerator capabilities mask */ + capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | + ICP_ACCEL_CAPABILITIES_CIPHER | + ICP_ACCEL_CAPABILITIES_AUTHENTICATION | + ICP_ACCEL_CAPABILITIES_SHA3 | + ICP_ACCEL_CAPABILITIES_SHA3_EXT | + ICP_ACCEL_CAPABILITIES_CHACHA_POLY | + ICP_ACCEL_CAPABILITIES_AESGCM_SPC | + ICP_ACCEL_CAPABILITIES_AES_V2; + + /* A set bit in fusectl1 means the corresponding feature is OFF in this SKU */ + if (fusectl1 & ICP_ACCEL_GEN6_MASK_UCS_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; + } + if (fusectl1 & ICP_ACCEL_GEN6_MASK_AUTH_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; + } + + capabilities_asym = 0; + + capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION | + ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION | + ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION | + ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; + + if (fusectl1 & ICP_ACCEL_GEN6_MASK_CPR_SLICE) { + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION; + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION; + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; + } + + if (adf_get_service_mask(accel_dev, &mask)) + return 0; + + if (test_bit(SVC_ASYM, &mask)) + caps |= capabilities_asym; + if (test_bit(SVC_SYM, &mask)) + caps |= capabilities_sym; + if (test_bit(SVC_DC, &mask)) + caps |= capabilities_dc; + if (test_bit(SVC_DCC, &mask)) { + /* + * Sym capabilities are available for chaining operations, + * but sym crypto instances cannot be supported + */ + caps = capabilities_dc | capabilities_sym; + caps &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; + } + + return caps; +} + +static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) +{ + return ARRAY_SIZE(adf_default_fw_config); +} + +static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + int num_fw_objs = ARRAY_SIZE(adf_6xxx_fw_objs); + int id; + + id = adf_default_fw_config[obj_num].obj; + if (id >= num_fw_objs) + return NULL; + + return adf_6xxx_fw_objs[id]; +} + +static const char *uof_get_name_6xxx(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + return uof_get_name(accel_dev, obj_num); +} + +static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + if (obj_num >= uof_get_num_objs(accel_dev)) + return -EINVAL; + + return adf_default_fw_config[obj_num].obj; +} + +static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + return adf_default_fw_config[obj_num].ae_mask; +} + +static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) +{ + if (adf_gen6_init_thd2arb_map(accel_dev)) + dev_warn(&GET_DEV(accel_dev), + "Failed to generate thread to arbiter mapping"); + + return GET_HW_DATA(accel_dev)->thd_to_arb_map; +} + +static int adf_init_device(struct adf_accel_dev *accel_dev) +{ + void __iomem *addr = adf_get_pmisc_base(accel_dev); + u32 status; + u32 csr; + int ret; + + /* Temporarily mask PM interrupt */ + csr = ADF_CSR_RD(addr, ADF_GEN6_ERRMSK2); + csr |= ADF_GEN6_PM_SOU; + ADF_CSR_WR(addr, ADF_GEN6_ERRMSK2, csr); + + /* Set DRV_ACTIVE bit to power up the device */ + ADF_CSR_WR(addr, ADF_GEN6_PM_INTERRUPT, ADF_GEN6_PM_DRV_ACTIVE); + + /* Poll status register to make sure the device is powered up */ + ret = read_poll_timeout(ADF_CSR_RD, status, + status & ADF_GEN6_PM_INIT_STATE, + ADF_GEN6_PM_POLL_DELAY_US, + ADF_GEN6_PM_POLL_TIMEOUT_US, true, addr, + ADF_GEN6_PM_STATUS); + if (ret) { + dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n"); + return ret; + } + + dev_dbg(&GET_DEV(accel_dev), "Setting virtual channels for device qat_dev%d\n", + accel_dev->accel_id); + + ret = adf_gen6_set_vc(accel_dev); + if (ret) + dev_err(&GET_DEV(accel_dev), "Failed to set virtual channels\n"); + + return ret; +} + +static int enable_pm(struct adf_accel_dev *accel_dev) +{ + return adf_init_admin_pm(accel_dev, ADF_GEN6_PM_DEFAULT_IDLE_FILTER); +} + +static int dev_config(struct adf_accel_dev *accel_dev) +{ + int ret; + + ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC); + if (ret) + return ret; + + ret = adf_cfg_section_add(accel_dev, "Accelerator0"); + if (ret) + return ret; + + switch (adf_get_service_enabled(accel_dev)) { + case SVC_DC: + case SVC_DCC: + ret = adf_gen6_comp_dev_config(accel_dev); + break; + default: + ret = adf_gen6_no_dev_config(accel_dev); + break; + } + if (ret) + return ret; + + __set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); + + return ret; +} + +void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class = &adf_6xxx_class; + hw_data->instance_id = adf_6xxx_class.instances++; + hw_data->num_banks = ADF_GEN6_ETR_MAX_BANKS; + hw_data->num_banks_per_vf = ADF_GEN6_NUM_BANKS_PER_VF; + hw_data->num_rings_per_bank = ADF_GEN6_NUM_RINGS_PER_BANK; + hw_data->num_accel = ADF_GEN6_MAX_ACCELERATORS; + hw_data->num_engines = ADF_6XXX_MAX_ACCELENGINES; + hw_data->num_logical_accel = 1; + hw_data->tx_rx_gap = ADF_GEN6_RX_RINGS_OFFSET; + hw_data->tx_rings_mask = ADF_GEN6_TX_RINGS_MASK; + hw_data->ring_to_svc_map = 0; + hw_data->alloc_irq = adf_isr_resource_alloc; + hw_data->free_irq = adf_isr_resource_free; + hw_data->enable_error_correction = enable_error_correction; + hw_data->get_accel_mask = get_accel_mask; + hw_data->get_ae_mask = get_ae_mask; + hw_data->get_num_accels = get_num_accels; + hw_data->get_num_aes = get_num_aes; + hw_data->get_sram_bar_id = get_sram_bar_id; + hw_data->get_etr_bar_id = get_etr_bar_id; + hw_data->get_misc_bar_id = get_misc_bar_id; + hw_data->get_arb_info = get_arb_info; + hw_data->get_admin_info = get_admin_info; + hw_data->get_accel_cap = get_accel_cap; + hw_data->get_sku = get_sku; + hw_data->init_admin_comms = adf_init_admin_comms; + hw_data->exit_admin_comms = adf_exit_admin_comms; + hw_data->send_admin_init = adf_send_admin_init; + hw_data->init_arb = adf_init_arb; + hw_data->exit_arb = adf_exit_arb; + hw_data->get_arb_mapping = adf_get_arbiter_mapping; + hw_data->enable_ints = enable_ints; + hw_data->reset_device = adf_reset_flr; + hw_data->admin_ae_mask = ADF_6XXX_ADMIN_AE_MASK; + hw_data->fw_name = ADF_6XXX_FW; + hw_data->fw_mmp_name = ADF_6XXX_MMP; + hw_data->uof_get_name = uof_get_name_6xxx; + hw_data->uof_get_num_objs = uof_get_num_objs; + hw_data->uof_get_obj_type = uof_get_obj_type; + hw_data->uof_get_ae_mask = uof_get_ae_mask; + hw_data->set_msix_rttable = set_msix_default_rttable; + hw_data->set_ssm_wdtimer = set_ssm_wdtimer; + hw_data->get_ring_to_svc_map = get_ring_to_svc_map; + hw_data->disable_iov = adf_disable_sriov; + hw_data->ring_pair_reset = ring_pair_reset; + hw_data->dev_config = dev_config; + hw_data->get_hb_clock = get_heartbeat_clock; + hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; + hw_data->start_timer = adf_timer_start; + hw_data->stop_timer = adf_timer_stop; + hw_data->init_device = adf_init_device; + hw_data->enable_pm = enable_pm; + hw_data->services_supported = services_supported; + + adf_gen6_init_hw_csr_ops(&hw_data->csr_ops); + adf_gen6_init_pf_pfvf_ops(&hw_data->pfvf_ops); + adf_gen6_init_dc_ops(&hw_data->dc_ops); + adf_gen6_init_ras_ops(&hw_data->ras_ops); +} + +void adf_clean_hw_data_6xxx(struct adf_hw_device_data *hw_data) +{ + if (hw_data->dev_class->instances) + hw_data->dev_class->instances--; +} diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h new file mode 100644 index 000000000000..78e2e2c5816e --- /dev/null +++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2025 Intel Corporation */ +#ifndef ADF_6XXX_HW_DATA_H_ +#define ADF_6XXX_HW_DATA_H_ + +#include <linux/bits.h> +#include <linux/time.h> +#include <linux/units.h> + +#include "adf_accel_devices.h" +#include "adf_cfg_common.h" +#include "adf_dc.h" + +/* PCIe configuration space */ +#define ADF_GEN6_BAR_MASK (BIT(0) | BIT(2) | BIT(4)) +#define ADF_GEN6_SRAM_BAR 0 +#define ADF_GEN6_PMISC_BAR 1 +#define ADF_GEN6_ETR_BAR 2 +#define ADF_6XXX_MAX_ACCELENGINES 9 + +/* Clocks frequency */ +#define ADF_GEN6_COUNTER_FREQ (100 * HZ_PER_MHZ) + +/* Physical function fuses */ +#define ADF_GEN6_FUSECTL0_OFFSET 0x2C8 +#define ADF_GEN6_FUSECTL1_OFFSET 0x2CC +#define ADF_GEN6_FUSECTL4_OFFSET 0x2D8 + +/* Accelerators */ +#define ADF_GEN6_ACCELERATORS_MASK 0x1 +#define ADF_GEN6_MAX_ACCELERATORS 1 + +/* MSI-X interrupt */ +#define ADF_GEN6_SMIAPF_RP_X0_MASK_OFFSET 0x41A040 +#define ADF_GEN6_SMIAPF_RP_X1_MASK_OFFSET 0x41A044 +#define ADF_GEN6_SMIAPF_MASK_OFFSET 0x41A084 +#define ADF_GEN6_MSIX_RTTABLE_OFFSET(i) (0x409000 + ((i) * 4)) + +/* Bank and ring configuration */ +#define ADF_GEN6_NUM_RINGS_PER_BANK 2 +#define ADF_GEN6_NUM_BANKS_PER_VF 4 +#define ADF_GEN6_ETR_MAX_BANKS 64 +#define ADF_GEN6_RX_RINGS_OFFSET 1 +#define ADF_GEN6_TX_RINGS_MASK 0x1 + +/* Arbiter configuration */ +#define ADF_GEN6_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0)) +#define ADF_GEN6_ARB_OFFSET 0x000 +#define ADF_GEN6_ARB_WRK_2_SER_MAP_OFFSET 0x400 + +/* Admin interface configuration */ +#define ADF_GEN6_ADMINMSGUR_OFFSET 0x500574 +#define ADF_GEN6_ADMINMSGLR_OFFSET 0x500578 +#define ADF_GEN6_MAILBOX_BASE_OFFSET 0x600970 + +/* + * Watchdog timers + * Timeout is in cycles. Clock speed may vary across products but this + * value should be a few milli-seconds. + */ +#define ADF_SSM_WDT_DEFAULT_VALUE 0x7000000ULL +#define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x8000000ULL +#define ADF_SSMWDTATHL_OFFSET 0x5208 +#define ADF_SSMWDTATHH_OFFSET 0x520C +#define ADF_SSMWDTCNVL_OFFSET 0x5408 +#define ADF_SSMWDTCNVH_OFFSET 0x540C +#define ADF_SSMWDTUCSL_OFFSET 0x5808 +#define ADF_SSMWDTUCSH_OFFSET 0x580C +#define ADF_SSMWDTDCPRL_OFFSET 0x5A08 +#define ADF_SSMWDTDCPRH_OFFSET 0x5A0C +#define ADF_SSMWDTPKEL_OFFSET 0x5E08 +#define ADF_SSMWDTPKEH_OFFSET 0x5E0C + +/* Ring reset */ +#define ADF_RPRESET_POLL_TIMEOUT_US (5 * USEC_PER_SEC) +#define ADF_RPRESET_POLL_DELAY_US 20 +#define ADF_WQM_CSR_RPRESETCTL_RESET BIT(0) +#define ADF_WQM_CSR_RPRESETCTL(bank) (0x6000 + (bank) * 8) +#define ADF_WQM_CSR_RPRESETSTS_STATUS BIT(0) +#define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4) + +/* Controls and sets up the corresponding ring mode of operation */ +#define ADF_GEN6_CSR_RINGMODECTL(bank) (0x9000 + (bank) * 4) + +/* Specifies the traffic class to use for the transactions to/from the ring */ +#define ADF_GEN6_RINGMODECTL_TC_MASK GENMASK(18, 16) +#define ADF_GEN6_RINGMODECTL_TC_DEFAULT 0x7 + +/* Specifies usage of tc for the transactions to/from this ring */ +#define ADF_GEN6_RINGMODECTL_TC_EN_MASK GENMASK(20, 19) + +/* + * Use the value programmed in the tc field for request descriptor + * and metadata read transactions + */ +#define ADF_GEN6_RINGMODECTL_TC_EN_OP1 0x1 + +/* VC0 Resource Control Register */ +#define ADF_GEN6_PVC0CTL_OFFSET 0x204 +#define ADF_GEN6_PVC0CTL_TCVCMAP_OFFSET 1 +#define ADF_GEN6_PVC0CTL_TCVCMAP_MASK GENMASK(7, 1) +#define ADF_GEN6_PVC0CTL_TCVCMAP_DEFAULT 0x7F + +/* VC1 Resource Control Register */ +#define ADF_GEN6_PVC1CTL_OFFSET 0x210 +#define ADF_GEN6_PVC1CTL_TCVCMAP_OFFSET 1 +#define ADF_GEN6_PVC1CTL_TCVCMAP_MASK GENMASK(7, 1) +#define ADF_GEN6_PVC1CTL_TCVCMAP_DEFAULT 0x40 +#define ADF_GEN6_PVC1CTL_VCEN_OFFSET 31 +#define ADF_GEN6_PVC1CTL_VCEN_MASK BIT(31) +/* RW bit: 0x1 - enables a Virtual Channel, 0x0 - disables */ +#define ADF_GEN6_PVC1CTL_VCEN_ON 0x1 + +/* Error source mask registers */ +#define ADF_GEN6_ERRMSK0 0x41A210 +#define ADF_GEN6_ERRMSK1 0x41A214 +#define ADF_GEN6_ERRMSK2 0x41A218 +#define ADF_GEN6_ERRMSK3 0x41A21C + +#define ADF_GEN6_VFLNOTIFY BIT(7) + +/* Number of heartbeat counter pairs */ +#define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE + +/* Physical function fuses */ +#define ADF_6XXX_ACCELENGINES_MASK GENMASK(8, 0) +#define ADF_6XXX_ADMIN_AE_MASK GENMASK(8, 8) + +/* Firmware binaries */ +#define ADF_6XXX_FW "qat_6xxx.bin" +#define ADF_6XXX_MMP "qat_6xxx_mmp.bin" +#define ADF_6XXX_CY_OBJ "qat_6xxx_cy.bin" +#define ADF_6XXX_DC_OBJ "qat_6xxx_dc.bin" +#define ADF_6XXX_ADMIN_OBJ "qat_6xxx_admin.bin" + +enum icp_qat_gen6_slice_mask { + ICP_ACCEL_GEN6_MASK_UCS_SLICE = BIT(0), + ICP_ACCEL_GEN6_MASK_AUTH_SLICE = BIT(1), + ICP_ACCEL_GEN6_MASK_PKE_SLICE = BIT(2), + ICP_ACCEL_GEN6_MASK_CPR_SLICE = BIT(3), + ICP_ACCEL_GEN6_MASK_DCPRZ_SLICE = BIT(4), + ICP_ACCEL_GEN6_MASK_WCP_WAT_SLICE = BIT(6), +}; + +void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data); +void adf_clean_hw_data_6xxx(struct adf_hw_device_data *hw_data); + +#endif /* ADF_6XXX_HW_DATA_H_ */ diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c new file mode 100644 index 000000000000..c1dc9c56fdf5 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2025 Intel Corporation */ +#include <linux/array_size.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/errno.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/types.h> + +#include <adf_accel_devices.h> +#include <adf_cfg.h> +#include <adf_common_drv.h> +#include <adf_dbgfs.h> + +#include "adf_gen6_shared.h" +#include "adf_6xxx_hw_data.h" + +static int bar_map[] = { + 0, /* SRAM */ + 2, /* PMISC */ + 4, /* ETR */ +}; + +static void adf_device_down(void *accel_dev) +{ + adf_dev_down(accel_dev); +} + +static void adf_dbgfs_cleanup(void *accel_dev) +{ + adf_dbgfs_exit(accel_dev); +} + +static void adf_cfg_device_remove(void *accel_dev) +{ + adf_cfg_dev_remove(accel_dev); +} + +static void adf_cleanup_hw_data(void *accel_dev) +{ + struct adf_accel_dev *accel_device = accel_dev; + + if (accel_device->hw_device) { + adf_clean_hw_data_6xxx(accel_device->hw_device); + accel_device->hw_device = NULL; + } +} + +static void adf_devmgr_remove(void *accel_dev) +{ + adf_devmgr_rm_dev(accel_dev, NULL); +} + +static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct adf_accel_pci *accel_pci_dev; + struct adf_hw_device_data *hw_data; + struct device *dev = &pdev->dev; + struct adf_accel_dev *accel_dev; + struct adf_bar *bar; + unsigned int i; + int ret; + + if (num_possible_nodes() > 1 && dev_to_node(dev) < 0) { + /* + * If the accelerator is connected to a node with no memory + * there is no point in using the accelerator since the remote + * memory transaction will be very slow. + */ + return dev_err_probe(dev, -EINVAL, "Invalid NUMA configuration.\n"); + } + + accel_dev = devm_kzalloc(dev, sizeof(*accel_dev), GFP_KERNEL); + if (!accel_dev) + return -ENOMEM; + + INIT_LIST_HEAD(&accel_dev->crypto_list); + INIT_LIST_HEAD(&accel_dev->list); + accel_pci_dev = &accel_dev->accel_pci_dev; + accel_pci_dev->pci_dev = pdev; + accel_dev->owner = THIS_MODULE; + + hw_data = devm_kzalloc(dev, sizeof(*hw_data), GFP_KERNEL); + if (!hw_data) + return -ENOMEM; + + pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); + pci_read_config_dword(pdev, ADF_GEN6_FUSECTL4_OFFSET, &hw_data->fuses[ADF_FUSECTL4]); + pci_read_config_dword(pdev, ADF_GEN6_FUSECTL0_OFFSET, &hw_data->fuses[ADF_FUSECTL0]); + pci_read_config_dword(pdev, ADF_GEN6_FUSECTL1_OFFSET, &hw_data->fuses[ADF_FUSECTL1]); + + if (!(hw_data->fuses[ADF_FUSECTL1] & ICP_ACCEL_GEN6_MASK_WCP_WAT_SLICE)) + return dev_err_probe(dev, -EFAULT, "Wireless mode is not supported.\n"); + + /* Enable PCI device */ + ret = pcim_enable_device(pdev); + if (ret) + return dev_err_probe(dev, ret, "Cannot enable PCI device.\n"); + + ret = adf_devmgr_add_dev(accel_dev, NULL); + if (ret) + return dev_err_probe(dev, ret, "Failed to add new accelerator device.\n"); + + ret = devm_add_action_or_reset(dev, adf_devmgr_remove, accel_dev); + if (ret) + return ret; + + accel_dev->hw_device = hw_data; + adf_init_hw_data_6xxx(accel_dev->hw_device); + + ret = devm_add_action_or_reset(dev, adf_cleanup_hw_data, accel_dev); + if (ret) + return ret; + + /* Get Accelerators and Accelerator Engine masks */ + hw_data->accel_mask = hw_data->get_accel_mask(hw_data); + hw_data->ae_mask = hw_data->get_ae_mask(hw_data); + accel_pci_dev->sku = hw_data->get_sku(hw_data); + + /* If the device has no acceleration engines then ignore it */ + if (!hw_data->accel_mask || !hw_data->ae_mask || + (~hw_data->ae_mask & ADF_GEN6_ACCELERATORS_MASK)) { + ret = -EFAULT; + return dev_err_probe(dev, ret, "No acceleration units were found.\n"); + } + + /* Create device configuration table */ + ret = adf_cfg_dev_add(accel_dev); + if (ret) + return ret; + + ret = devm_add_action_or_reset(dev, adf_cfg_device_remove, accel_dev); + if (ret) + return ret; + + /* Set DMA identifier */ + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (ret) + return dev_err_probe(dev, ret, "No usable DMA configuration.\n"); + + ret = adf_gen6_cfg_dev_init(accel_dev); + if (ret) + return dev_err_probe(dev, ret, "Failed to initialize configuration.\n"); + + /* Get accelerator capability mask */ + hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); + if (!hw_data->accel_capabilities_mask) { + ret = -EINVAL; + return dev_err_probe(dev, ret, "Failed to get capabilities mask.\n"); + } + + for (i = 0; i < ARRAY_SIZE(bar_map); i++) { + bar = &accel_pci_dev->pci_bars[i]; + + /* Map 64-bit PCIe BAR */ + bar->virt_addr = pcim_iomap_region(pdev, bar_map[i], pci_name(pdev)); + if (IS_ERR(bar->virt_addr)) { + ret = PTR_ERR(bar->virt_addr); + return dev_err_probe(dev, ret, "Failed to ioremap PCI region.\n"); + } + } + + pci_set_master(pdev); + + /* + * The PCI config space is saved at this point and will be restored + * after a Function Level Reset (FLR) as the FLR does not completely + * restore it. + */ + ret = pci_save_state(pdev); + if (ret) + return dev_err_probe(dev, ret, "Failed to save pci state.\n"); + + accel_dev->ras_errors.enabled = true; + + adf_dbgfs_init(accel_dev); + + ret = devm_add_action_or_reset(dev, adf_dbgfs_cleanup, accel_dev); + if (ret) + return ret; + + ret = adf_dev_up(accel_dev, true); + if (ret) + return ret; + + ret = devm_add_action_or_reset(dev, adf_device_down, accel_dev); + if (ret) + return ret; + + ret = adf_sysfs_init(accel_dev); + + return ret; +} + +static void adf_shutdown(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + adf_dev_down(accel_dev); +} + +static const struct pci_device_id adf_pci_tbl[] = { + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_6XXX) }, + { } +}; +MODULE_DEVICE_TABLE(pci, adf_pci_tbl); + +static struct pci_driver adf_driver = { + .id_table = adf_pci_tbl, + .name = ADF_6XXX_DEVICE_NAME, + .probe = adf_probe, + .shutdown = adf_shutdown, + .sriov_configure = adf_sriov_configure, + .err_handler = &adf_err_handler, +}; +module_pci_driver(adf_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Intel"); +MODULE_FIRMWARE(ADF_6XXX_FW); +MODULE_FIRMWARE(ADF_6XXX_MMP); +MODULE_DESCRIPTION("Intel(R) QuickAssist Technology for GEN6 Devices"); +MODULE_SOFTDEP("pre: crypto-intel_qat"); +MODULE_IMPORT_NS("CRYPTO_QAT"); diff --git a/drivers/crypto/intel/qat/qat_c3xxx/Makefile b/drivers/crypto/intel/qat/qat_c3xxx/Makefile index d9e568572da8..43604c025f0c 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/Makefile +++ b/drivers/crypto/intel/qat/qat_c3xxx/Makefile @@ -1,4 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o qat_c3xxx-y := adf_drv.o adf_c3xxx_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c index e78f7bfd30b8..07f2c42a68f5 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c @@ -5,7 +5,6 @@ #include <adf_clock.h> #include <adf_common_drv.h> #include <adf_gen2_config.h> -#include <adf_gen2_dc.h> #include <adf_gen2_hw_csr_data.h> #include <adf_gen2_hw_data.h> #include <adf_gen2_pfvf.h> @@ -22,7 +21,6 @@ static const u32 thrd_to_arb_map[ADF_C3XXX_MAX_ACCELENGINES] = { static struct adf_hw_device_class c3xxx_class = { .name = ADF_C3XXX_DEVICE_NAME, .type = DEV_C3XXX, - .instances = 0 }; static u32 get_accel_mask(struct adf_hw_device_data *self) diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c index b825b35ab4bf..bceb5dd8b148 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c @@ -19,24 +19,6 @@ #include <adf_dbgfs.h> #include "adf_c3xxx_hw_data.h" -static const struct pci_device_id adf_pci_tbl[] = { - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX), }, - { } -}; -MODULE_DEVICE_TABLE(pci, adf_pci_tbl); - -static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); -static void adf_remove(struct pci_dev *dev); - -static struct pci_driver adf_driver = { - .id_table = adf_pci_tbl, - .name = ADF_C3XXX_DEVICE_NAME, - .probe = adf_probe, - .remove = adf_remove, - .sriov_configure = adf_sriov_configure, - .err_handler = &adf_err_handler, -}; - static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) { pci_release_regions(accel_dev->accel_pci_dev.pci_dev); @@ -227,6 +209,29 @@ static void adf_remove(struct pci_dev *pdev) kfree(accel_dev); } +static void adf_shutdown(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + adf_dev_down(accel_dev); +} + +static const struct pci_device_id adf_pci_tbl[] = { + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX) }, + { } +}; +MODULE_DEVICE_TABLE(pci, adf_pci_tbl); + +static struct pci_driver adf_driver = { + .id_table = adf_pci_tbl, + .name = ADF_C3XXX_DEVICE_NAME, + .probe = adf_probe, + .remove = adf_remove, + .shutdown = adf_shutdown, + .sriov_configure = adf_sriov_configure, + .err_handler = &adf_err_handler, +}; + static int __init adfdrv_init(void) { request_module("intel_qat"); diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile index 31a908a211ac..03f6745b4aa2 100644 --- a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile +++ b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile @@ -1,4 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o qat_c3xxxvf-y := adf_drv.o adf_c3xxxvf_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c index a512ca4efd3f..db3c33fa1881 100644 --- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c @@ -3,7 +3,6 @@ #include <adf_accel_devices.h> #include <adf_common_drv.h> #include <adf_gen2_config.h> -#include <adf_gen2_dc.h> #include <adf_gen2_hw_csr_data.h> #include <adf_gen2_hw_data.h> #include <adf_gen2_pfvf.h> @@ -13,7 +12,6 @@ static struct adf_hw_device_class c3xxxiov_class = { .name = ADF_C3XXXVF_DEVICE_NAME, .type = DEV_C3XXXVF, - .instances = 0 }; static u32 get_accel_mask(struct adf_hw_device_data *self) diff --git a/drivers/crypto/intel/qat/qat_c62x/Makefile b/drivers/crypto/intel/qat/qat_c62x/Makefile index cbdaaa135e84..f3d722bef088 100644 --- a/drivers/crypto/intel/qat/qat_c62x/Makefile +++ b/drivers/crypto/intel/qat/qat_c62x/Makefile @@ -1,4 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o qat_c62x-y := adf_drv.o adf_c62x_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c index 32ebe09477a8..0b410b41474d 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c @@ -5,7 +5,6 @@ #include <adf_clock.h> #include <adf_common_drv.h> #include <adf_gen2_config.h> -#include <adf_gen2_dc.h> #include <adf_gen2_hw_csr_data.h> #include <adf_gen2_hw_data.h> #include <adf_gen2_pfvf.h> @@ -22,7 +21,6 @@ static const u32 thrd_to_arb_map[ADF_C62X_MAX_ACCELENGINES] = { static struct adf_hw_device_class c62x_class = { .name = ADF_C62X_DEVICE_NAME, .type = DEV_C62X, - .instances = 0 }; static u32 get_accel_mask(struct adf_hw_device_data *self) diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c index 8a7bdec358d6..23ccb72b6ea2 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c @@ -19,24 +19,6 @@ #include <adf_dbgfs.h> #include "adf_c62x_hw_data.h" -static const struct pci_device_id adf_pci_tbl[] = { - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X), }, - { } -}; -MODULE_DEVICE_TABLE(pci, adf_pci_tbl); - -static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); -static void adf_remove(struct pci_dev *dev); - -static struct pci_driver adf_driver = { - .id_table = adf_pci_tbl, - .name = ADF_C62X_DEVICE_NAME, - .probe = adf_probe, - .remove = adf_remove, - .sriov_configure = adf_sriov_configure, - .err_handler = &adf_err_handler, -}; - static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) { pci_release_regions(accel_dev->accel_pci_dev.pci_dev); @@ -227,6 +209,29 @@ static void adf_remove(struct pci_dev *pdev) kfree(accel_dev); } +static void adf_shutdown(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + adf_dev_down(accel_dev); +} + +static const struct pci_device_id adf_pci_tbl[] = { + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X) }, + { } +}; +MODULE_DEVICE_TABLE(pci, adf_pci_tbl); + +static struct pci_driver adf_driver = { + .id_table = adf_pci_tbl, + .name = ADF_C62X_DEVICE_NAME, + .probe = adf_probe, + .remove = adf_remove, + .shutdown = adf_shutdown, + .sriov_configure = adf_sriov_configure, + .err_handler = &adf_err_handler, +}; + static int __init adfdrv_init(void) { request_module("intel_qat"); diff --git a/drivers/crypto/intel/qat/qat_c62xvf/Makefile b/drivers/crypto/intel/qat/qat_c62xvf/Makefile index 60e499b041ec..ed7f3f722d99 100644 --- a/drivers/crypto/intel/qat/qat_c62xvf/Makefile +++ b/drivers/crypto/intel/qat/qat_c62xvf/Makefile @@ -1,4 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o qat_c62xvf-y := adf_drv.o adf_c62xvf_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c index 4aaaaf921734..7f00035d3661 100644 --- a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c @@ -3,7 +3,6 @@ #include <adf_accel_devices.h> #include <adf_common_drv.h> #include <adf_gen2_config.h> -#include <adf_gen2_dc.h> #include <adf_gen2_hw_csr_data.h> #include <adf_gen2_hw_data.h> #include <adf_gen2_pfvf.h> @@ -13,7 +12,6 @@ static struct adf_hw_device_class c62xiov_class = { .name = ADF_C62XVF_DEVICE_NAME, .type = DEV_C62XVF, - .instances = 0 }; static u32 get_accel_mask(struct adf_hw_device_data *self) diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index af5df29fd2e3..66bb295ace28 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -8,19 +8,19 @@ intel_qat-y := adf_accel_engine.o \ adf_cfg_services.o \ adf_clock.o \ adf_ctl_drv.o \ + adf_dc.o \ adf_dev_mgr.o \ adf_gen2_config.o \ - adf_gen2_dc.o \ adf_gen2_hw_csr_data.o \ adf_gen2_hw_data.o \ adf_gen4_config.o \ - adf_gen4_dc.o \ adf_gen4_hw_csr_data.o \ adf_gen4_hw_data.o \ adf_gen4_pm.o \ adf_gen4_ras.o \ - adf_gen4_timer.o \ adf_gen4_vf_mig.o \ + adf_gen6_ras.o \ + adf_gen6_shared.o \ adf_hw_arbiter.o \ adf_init.o \ adf_isr.o \ @@ -30,6 +30,7 @@ intel_qat-y := adf_accel_engine.o \ adf_sysfs.o \ adf_sysfs_ras_counters.o \ adf_sysfs_rl.o \ + adf_timer.o \ adf_transport.o \ qat_algs.o \ qat_algs_send.o \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index dc21551153cb..2ee526063213 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -12,6 +12,7 @@ #include <linux/qat/qat_mig_dev.h> #include <linux/wordpart.h> #include "adf_cfg_common.h" +#include "adf_dc.h" #include "adf_rl.h" #include "adf_telemetry.h" #include "adf_pfvf_msg.h" @@ -25,14 +26,18 @@ #define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf" #define ADF_4XXX_DEVICE_NAME "4xxx" #define ADF_420XX_DEVICE_NAME "420xx" -#define ADF_4XXX_PCI_DEVICE_ID 0x4940 -#define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941 -#define ADF_401XX_PCI_DEVICE_ID 0x4942 -#define ADF_401XXIOV_PCI_DEVICE_ID 0x4943 -#define ADF_402XX_PCI_DEVICE_ID 0x4944 -#define ADF_402XXIOV_PCI_DEVICE_ID 0x4945 -#define ADF_420XX_PCI_DEVICE_ID 0x4946 -#define ADF_420XXIOV_PCI_DEVICE_ID 0x4947 +#define ADF_6XXX_DEVICE_NAME "6xxx" +#define PCI_DEVICE_ID_INTEL_QAT_4XXX 0x4940 +#define PCI_DEVICE_ID_INTEL_QAT_4XXXIOV 0x4941 +#define PCI_DEVICE_ID_INTEL_QAT_401XX 0x4942 +#define PCI_DEVICE_ID_INTEL_QAT_401XXIOV 0x4943 +#define PCI_DEVICE_ID_INTEL_QAT_402XX 0x4944 +#define PCI_DEVICE_ID_INTEL_QAT_402XXIOV 0x4945 +#define PCI_DEVICE_ID_INTEL_QAT_420XX 0x4946 +#define PCI_DEVICE_ID_INTEL_QAT_420XXIOV 0x4947 +#define PCI_DEVICE_ID_INTEL_QAT_6XXX 0x4948 +#define PCI_DEVICE_ID_INTEL_QAT_6XXX_IOV 0x4949 + #define ADF_DEVICE_FUSECTL_OFFSET 0x40 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C #define ADF_DEVICE_FUSECTL_MASK 0x80000000 @@ -267,7 +272,8 @@ struct adf_pfvf_ops { }; struct adf_dc_ops { - void (*build_deflate_ctx)(void *ctx); + int (*build_comp_block)(void *ctx, enum adf_dc_algo algo); + int (*build_decomp_block)(void *ctx, enum adf_dc_algo algo); }; struct qat_migdev_ops { diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c index acad526eb741..573388c37100 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.c +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c @@ -449,6 +449,7 @@ int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay) return adf_send_admin(accel_dev, &req, &resp, ae_mask); } +EXPORT_SYMBOL_GPL(adf_init_admin_pm); int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, size_t buff_size) diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h index 89df3888d7ea..15fdf9854b81 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h @@ -48,6 +48,7 @@ enum adf_device_type { DEV_C3XXXVF, DEV_4XXX, DEV_420XX, + DEV_6XXX, }; struct adf_dev_status_info { diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c index 30abcd9e1283..c39871291da7 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c @@ -116,7 +116,7 @@ int adf_parse_service_string(struct adf_accel_dev *accel_dev, const char *in, return adf_service_mask_to_string(mask, out, out_len); } -static int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *mask) +int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *mask) { char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { }; size_t len; @@ -138,6 +138,7 @@ static int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long * return ret; } +EXPORT_SYMBOL_GPL(adf_get_service_mask); int adf_get_service_enabled(struct adf_accel_dev *accel_dev) { diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h index f6bafc15cbc6..3742c450878f 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h @@ -32,5 +32,6 @@ enum { int adf_parse_service_string(struct adf_accel_dev *accel_dev, const char *in, size_t in_len, char *out, size_t out_len); int adf_get_service_enabled(struct adf_accel_dev *accel_dev); +int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *mask); #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c b/drivers/crypto/intel/qat/qat_common/adf_dc.c index 47261b1c1da6..3e8fb4e3ed97 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c +++ b/drivers/crypto/intel/qat/qat_common/adf_dc.c @@ -1,22 +1,21 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2022 Intel Corporation */ #include "adf_accel_devices.h" -#include "adf_gen2_dc.h" +#include "adf_dc.h" #include "icp_qat_fw_comp.h" -static void qat_comp_build_deflate_ctx(void *ctx) +int qat_comp_build_ctx(struct adf_accel_dev *accel_dev, void *ctx, enum adf_dc_algo algo) { - struct icp_qat_fw_comp_req *req_tmpl = (struct icp_qat_fw_comp_req *)ctx; - struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; - struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; - struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars; + struct icp_qat_fw_comp_req *req_tmpl = ctx; struct icp_qat_fw_comp_cd_hdr *comp_cd_ctrl = &req_tmpl->comp_cd_ctrl; + struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + int ret; memset(req_tmpl, 0, sizeof(*req_tmpl)); header->hdr_flags = ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET); header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP; - header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC; header->comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA, QAT_COMN_PTR_TYPE_SGL); @@ -26,12 +25,14 @@ static void qat_comp_build_deflate_ctx(void *ctx) ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST, ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST, ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF); - cd_pars->u.sl.comp_slice_cfg_word[0] = - ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_COMPRESS, - ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED, - ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE, - ICP_QAT_HW_COMPRESSION_DEPTH_1, - ICP_QAT_HW_COMPRESSION_FILE_TYPE_0); + + /* Build HW config block for compression */ + ret = GET_DC_OPS(accel_dev)->build_comp_block(ctx, algo); + if (ret) { + dev_err(&GET_DEV(accel_dev), "Failed to build compression block\n"); + return ret; + } + req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER; req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC; req_pars->req_par_flags = @@ -45,26 +46,19 @@ static void qat_comp_build_deflate_ctx(void *ctx) ICP_QAT_FW_COMP_NO_XXHASH_ACC, ICP_QAT_FW_COMP_CNV_ERROR_NONE, ICP_QAT_FW_COMP_NO_APPEND_CRC, - ICP_QAT_FW_COMP_NO_DROP_DATA); + ICP_QAT_FW_COMP_NO_DROP_DATA, + ICP_QAT_FW_COMP_NO_PARTIAL_DECOMPRESS); ICP_QAT_FW_COMN_NEXT_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); ICP_QAT_FW_COMN_CURR_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_COMP); /* Fill second half of the template for decompression */ memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl)); req_tmpl++; - header = &req_tmpl->comn_hdr; - header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS; - cd_pars = &req_tmpl->cd_pars; - cd_pars->u.sl.comp_slice_cfg_word[0] = - ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS, - ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED, - ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE, - ICP_QAT_HW_COMPRESSION_DEPTH_1, - ICP_QAT_HW_COMPRESSION_FILE_TYPE_0); -} -void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops) -{ - dc_ops->build_deflate_ctx = qat_comp_build_deflate_ctx; + /* Build HW config block for decompression */ + ret = GET_DC_OPS(accel_dev)->build_decomp_block(req_tmpl, algo); + if (ret) + dev_err(&GET_DEV(accel_dev), "Failed to build decompression block\n"); + + return ret; } -EXPORT_SYMBOL_GPL(adf_gen2_init_dc_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_dc.h b/drivers/crypto/intel/qat/qat_common/adf_dc.h new file mode 100644 index 000000000000..6cb5e09054a6 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_dc.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2025 Intel Corporation */ +#ifndef ADF_DC_H +#define ADF_DC_H + +struct adf_accel_dev; + +enum adf_dc_algo { + QAT_DEFLATE, + QAT_LZ4, + QAT_LZ4S, + QAT_ZSTD, +}; + +int qat_comp_build_ctx(struct adf_accel_dev *accel_dev, void *ctx, enum adf_dc_algo algo); + +#endif /* ADF_DC_H */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_config.h b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h index 4f86696800c9..78957fa900b7 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_fw_config.h +++ b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h @@ -8,6 +8,7 @@ enum adf_fw_objs { ADF_FW_ASYM_OBJ, ADF_FW_DC_OBJ, ADF_FW_ADMIN_OBJ, + ADF_FW_CY_OBJ, }; struct adf_fw_config { diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h deleted file mode 100644 index 6eae023354d7..000000000000 --- a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h +++ /dev/null @@ -1,10 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright(c) 2022 Intel Corporation */ -#ifndef ADF_GEN2_DC_H -#define ADF_GEN2_DC_H - -#include "adf_accel_devices.h" - -void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops); - -#endif /* ADF_GEN2_DC_H */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c index 2b263442c856..6a505e9a5cf9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c @@ -1,7 +1,9 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2020 Intel Corporation */ #include "adf_common_drv.h" +#include "adf_dc.h" #include "adf_gen2_hw_data.h" +#include "icp_qat_fw_comp.h" #include "icp_qat_hw.h" #include <linux/pci.h> @@ -169,3 +171,58 @@ void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev) } } EXPORT_SYMBOL_GPL(adf_gen2_set_ssm_wdtimer); + +static int adf_gen2_build_comp_block(void *ctx, enum adf_dc_algo algo) +{ + struct icp_qat_fw_comp_req *req_tmpl = ctx; + struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + + switch (algo) { + case QAT_DEFLATE: + header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC; + break; + default: + return -EINVAL; + } + + cd_pars->u.sl.comp_slice_cfg_word[0] = + ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_COMPRESS, + ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED, + ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE, + ICP_QAT_HW_COMPRESSION_DEPTH_1, + ICP_QAT_HW_COMPRESSION_FILE_TYPE_0); + + return 0; +} + +static int adf_gen2_build_decomp_block(void *ctx, enum adf_dc_algo algo) +{ + struct icp_qat_fw_comp_req *req_tmpl = ctx; + struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + + switch (algo) { + case QAT_DEFLATE: + header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS; + break; + default: + return -EINVAL; + } + + cd_pars->u.sl.comp_slice_cfg_word[0] = + ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS, + ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED, + ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE, + ICP_QAT_HW_COMPRESSION_DEPTH_1, + ICP_QAT_HW_COMPRESSION_FILE_TYPE_0); + + return 0; +} + +void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops) +{ + dc_ops->build_comp_block = adf_gen2_build_comp_block; + dc_ops->build_decomp_block = adf_gen2_build_decomp_block; +} +EXPORT_SYMBOL_GPL(adf_gen2_init_dc_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h index 708e9186127b..59bad368a921 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h @@ -88,5 +88,6 @@ void adf_gen2_get_arb_info(struct arb_info *arb_info); void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev); u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev); void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); +void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops); #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h index a716545a764c..34a63cf40db2 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h @@ -5,6 +5,7 @@ #include <linux/types.h> #include "adf_accel_devices.h" +#include "adf_common_drv.h" #define ADF_GEN2_ERRSOU3 (0x3A000 + 0x0C) #define ADF_GEN2_ERRSOU5 (0x3A000 + 0xD8) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c index f97e7a880f3a..afcdfdd0a37a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c @@ -11,7 +11,7 @@ #include "qat_compression.h" #include "qat_crypto.h" -static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev) +int adf_crypto_dev_config(struct adf_accel_dev *accel_dev) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; int banks = GET_MAX_BANKS(accel_dev); @@ -117,7 +117,7 @@ err: return ret; } -static int adf_comp_dev_config(struct adf_accel_dev *accel_dev) +int adf_comp_dev_config(struct adf_accel_dev *accel_dev) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; int banks = GET_MAX_BANKS(accel_dev); @@ -187,7 +187,7 @@ err: return ret; } -static int adf_no_dev_config(struct adf_accel_dev *accel_dev) +int adf_no_dev_config(struct adf_accel_dev *accel_dev) { unsigned long val; int ret; diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h index bb87655f69a8..38a674c27e40 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h @@ -7,5 +7,8 @@ int adf_gen4_dev_config(struct adf_accel_dev *accel_dev); int adf_gen4_cfg_dev_init(struct adf_accel_dev *accel_dev); +int adf_crypto_dev_config(struct adf_accel_dev *accel_dev); +int adf_comp_dev_config(struct adf_accel_dev *accel_dev); +int adf_no_dev_config(struct adf_accel_dev *accel_dev); #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c deleted file mode 100644 index 5859238e37de..000000000000 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c +++ /dev/null @@ -1,83 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Copyright(c) 2022 Intel Corporation */ -#include "adf_accel_devices.h" -#include "icp_qat_fw_comp.h" -#include "icp_qat_hw_20_comp.h" -#include "adf_gen4_dc.h" - -static void qat_comp_build_deflate(void *ctx) -{ - struct icp_qat_fw_comp_req *req_tmpl = - (struct icp_qat_fw_comp_req *)ctx; - struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; - struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; - struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars; - struct icp_qat_hw_comp_20_config_csr_upper hw_comp_upper_csr = {0}; - struct icp_qat_hw_comp_20_config_csr_lower hw_comp_lower_csr = {0}; - struct icp_qat_hw_decomp_20_config_csr_lower hw_decomp_lower_csr = {0}; - u32 upper_val; - u32 lower_val; - - memset(req_tmpl, 0, sizeof(*req_tmpl)); - header->hdr_flags = - ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET); - header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP; - header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC; - header->comn_req_flags = - ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA, - QAT_COMN_PTR_TYPE_SGL); - header->serv_specif_flags = - ICP_QAT_FW_COMP_FLAGS_BUILD(ICP_QAT_FW_COMP_STATELESS_SESSION, - ICP_QAT_FW_COMP_AUTO_SELECT_BEST, - ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST, - ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST, - ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF); - hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL; - hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77; - hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED; - hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1; - hw_comp_lower_csr.hash_update = ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW; - hw_comp_lower_csr.edmm = ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED; - hw_comp_upper_csr.nice = ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL; - hw_comp_upper_csr.lazy = ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL; - - upper_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(hw_comp_upper_csr); - lower_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(hw_comp_lower_csr); - - cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val; - cd_pars->u.sl.comp_slice_cfg_word[1] = upper_val; - - req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER; - req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC; - req_pars->req_par_flags = - ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(ICP_QAT_FW_COMP_SOP, - ICP_QAT_FW_COMP_EOP, - ICP_QAT_FW_COMP_BFINAL, - ICP_QAT_FW_COMP_CNV, - ICP_QAT_FW_COMP_CNV_RECOVERY, - ICP_QAT_FW_COMP_NO_CNV_DFX, - ICP_QAT_FW_COMP_CRC_MODE_LEGACY, - ICP_QAT_FW_COMP_NO_XXHASH_ACC, - ICP_QAT_FW_COMP_CNV_ERROR_NONE, - ICP_QAT_FW_COMP_NO_APPEND_CRC, - ICP_QAT_FW_COMP_NO_DROP_DATA); - - /* Fill second half of the template for decompression */ - memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl)); - req_tmpl++; - header = &req_tmpl->comn_hdr; - header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS; - cd_pars = &req_tmpl->cd_pars; - - hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE; - lower_val = ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(hw_decomp_lower_csr); - - cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val; - cd_pars->u.sl.comp_slice_cfg_word[1] = 0; -} - -void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops) -{ - dc_ops->build_deflate_ctx = qat_comp_build_deflate; -} -EXPORT_SYMBOL_GPL(adf_gen4_init_dc_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h deleted file mode 100644 index 0b1a6774412e..000000000000 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h +++ /dev/null @@ -1,10 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright(c) 2022 Intel Corporation */ -#ifndef ADF_GEN4_DC_H -#define ADF_GEN4_DC_H - -#include "adf_accel_devices.h" - -void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops); - -#endif /* ADF_GEN4_DC_H */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c index 099949a2421c..0406cb09c5bb 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c @@ -9,6 +9,8 @@ #include "adf_fw_config.h" #include "adf_gen4_hw_data.h" #include "adf_gen4_pm.h" +#include "icp_qat_fw_comp.h" +#include "icp_qat_hw_20_comp.h" u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self) { @@ -663,3 +665,71 @@ int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number return ret; } EXPORT_SYMBOL_GPL(adf_gen4_bank_state_restore); + +static int adf_gen4_build_comp_block(void *ctx, enum adf_dc_algo algo) +{ + struct icp_qat_fw_comp_req *req_tmpl = ctx; + struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; + struct icp_qat_hw_comp_20_config_csr_upper hw_comp_upper_csr = { }; + struct icp_qat_hw_comp_20_config_csr_lower hw_comp_lower_csr = { }; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + u32 upper_val; + u32 lower_val; + + switch (algo) { + case QAT_DEFLATE: + header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC; + break; + default: + return -EINVAL; + } + + hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL; + hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77; + hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED; + hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1; + hw_comp_lower_csr.hash_update = ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW; + hw_comp_lower_csr.edmm = ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED; + hw_comp_upper_csr.nice = ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL; + hw_comp_upper_csr.lazy = ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL; + + upper_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(hw_comp_upper_csr); + lower_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(hw_comp_lower_csr); + + cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val; + cd_pars->u.sl.comp_slice_cfg_word[1] = upper_val; + + return 0; +} + +static int adf_gen4_build_decomp_block(void *ctx, enum adf_dc_algo algo) +{ + struct icp_qat_fw_comp_req *req_tmpl = ctx; + struct icp_qat_hw_decomp_20_config_csr_lower hw_decomp_lower_csr = { }; + struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + u32 lower_val; + + switch (algo) { + case QAT_DEFLATE: + header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS; + break; + default: + return -EINVAL; + } + + hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE; + lower_val = ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(hw_decomp_lower_csr); + + cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val; + cd_pars->u.sl.comp_slice_cfg_word[1] = 0; + + return 0; +} + +void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops) +{ + dc_ops->build_comp_block = adf_gen4_build_comp_block; + dc_ops->build_decomp_block = adf_gen4_build_decomp_block; +} +EXPORT_SYMBOL_GPL(adf_gen4_init_dc_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index 51fc2eaa263e..e4f4d5fa616d 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -7,6 +7,7 @@ #include "adf_accel_devices.h" #include "adf_cfg_common.h" +#include "adf_dc.h" /* PCIe configuration space */ #define ADF_GEN4_BAR_MASK (BIT(0) | BIT(2) | BIT(4)) @@ -180,5 +181,6 @@ int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number, int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number, struct bank_state *state); bool adf_gen4_services_supported(unsigned long service_mask); +void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops); #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h index 17d1b774d4a8..2c8708117f70 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h @@ -4,6 +4,7 @@ #define ADF_GEN4_PFVF_H #include "adf_accel_devices.h" +#include "adf_common_drv.h" #ifdef CONFIG_PCI_IOV void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h new file mode 100644 index 000000000000..9a5b995f7ada --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2025 Intel Corporation */ +#ifndef ADF_GEN6_PM_H +#define ADF_GEN6_PM_H + +#include <linux/bits.h> +#include <linux/time.h> + +struct adf_accel_dev; + +/* Power management */ +#define ADF_GEN6_PM_POLL_DELAY_US 20 +#define ADF_GEN6_PM_POLL_TIMEOUT_US USEC_PER_SEC +#define ADF_GEN6_PM_STATUS 0x50A00C +#define ADF_GEN6_PM_INTERRUPT 0x50A028 + +/* Power management source in ERRSOU2 and ERRMSK2 */ +#define ADF_GEN6_PM_SOU BIT(18) + +/* cpm_pm_interrupt bitfields */ +#define ADF_GEN6_PM_DRV_ACTIVE BIT(20) + +#define ADF_GEN6_PM_DEFAULT_IDLE_FILTER 0x6 + +/* cpm_pm_status bitfields */ +#define ADF_GEN6_PM_INIT_STATE BIT(21) + +#endif /* ADF_GEN6_PM_H */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.c new file mode 100644 index 000000000000..967253082a98 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.c @@ -0,0 +1,818 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2025 Intel Corporation */ +#include <linux/bitfield.h> +#include <linux/types.h> + +#include "adf_common_drv.h" +#include "adf_gen6_ras.h" +#include "adf_sysfs_ras_counters.h" + +static void enable_errsou_reporting(void __iomem *csr) +{ + /* Enable correctable error reporting in ERRSOU0 */ + ADF_CSR_WR(csr, ADF_GEN6_ERRMSK0, 0); + + /* Enable uncorrectable error reporting in ERRSOU1 */ + ADF_CSR_WR(csr, ADF_GEN6_ERRMSK1, 0); + + /* + * Enable uncorrectable error reporting in ERRSOU2 + * but disable PM interrupt by default + */ + ADF_CSR_WR(csr, ADF_GEN6_ERRMSK2, ADF_GEN6_ERRSOU2_PM_INT_BIT); + + /* Enable uncorrectable error reporting in ERRSOU3 */ + ADF_CSR_WR(csr, ADF_GEN6_ERRMSK3, 0); +} + +static void enable_ae_error_reporting(struct adf_accel_dev *accel_dev, void __iomem *csr) +{ + u32 ae_mask = GET_HW_DATA(accel_dev)->ae_mask; + + /* Enable acceleration engine correctable error reporting */ + ADF_CSR_WR(csr, ADF_GEN6_HIAECORERRLOGENABLE_CPP0, ae_mask); + + /* Enable acceleration engine uncorrectable error reporting */ + ADF_CSR_WR(csr, ADF_GEN6_HIAEUNCERRLOGENABLE_CPP0, ae_mask); +} + +static void enable_cpp_error_reporting(struct adf_accel_dev *accel_dev, void __iomem *csr) +{ + /* Enable HI CPP agents command parity error reporting */ + ADF_CSR_WR(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOGENABLE, + ADF_6XXX_HICPPAGENTCMDPARERRLOG_MASK); + + ADF_CSR_WR(csr, ADF_GEN6_CPP_CFC_ERR_CTRL, ADF_GEN6_CPP_CFC_ERR_CTRL_MASK); +} + +static void enable_ti_ri_error_reporting(void __iomem *csr) +{ + u32 reg, mask; + + /* Enable RI memory error reporting */ + mask = ADF_GEN6_RIMEM_PARERR_FATAL_MASK | ADF_GEN6_RIMEM_PARERR_CERR_MASK; + ADF_CSR_WR(csr, ADF_GEN6_RI_MEM_PAR_ERR_EN0, mask); + + /* Enable IOSF primary command parity error reporting */ + ADF_CSR_WR(csr, ADF_GEN6_RIMISCCTL, ADF_GEN6_RIMISCSTS_BIT); + + /* Enable TI internal memory parity error reporting */ + reg = ADF_CSR_RD(csr, ADF_GEN6_TI_CI_PAR_ERR_MASK); + reg &= ~ADF_GEN6_TI_CI_PAR_STS_MASK; + ADF_CSR_WR(csr, ADF_GEN6_TI_CI_PAR_ERR_MASK, reg); + + reg = ADF_CSR_RD(csr, ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK); + reg &= ~ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK; + ADF_CSR_WR(csr, ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK, reg); + + reg = ADF_CSR_RD(csr, ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK); + reg &= ~ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK; + ADF_CSR_WR(csr, ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK, reg); + + reg = ADF_CSR_RD(csr, ADF_GEN6_TI_CD_PAR_ERR_MASK); + reg &= ~ADF_GEN6_TI_CD_PAR_STS_MASK; + ADF_CSR_WR(csr, ADF_GEN6_TI_CD_PAR_ERR_MASK, reg); + + reg = ADF_CSR_RD(csr, ADF_GEN6_TI_TRNSB_PAR_ERR_MASK); + reg &= ~ADF_GEN6_TI_TRNSB_PAR_STS_MASK; + ADF_CSR_WR(csr, ADF_GEN6_TI_TRNSB_PAR_ERR_MASK, reg); + + /* Enable error handling in RI, TI CPP interface control registers */ + ADF_CSR_WR(csr, ADF_GEN6_RICPPINTCTL, ADF_GEN6_RICPPINTCTL_MASK); + ADF_CSR_WR(csr, ADF_GEN6_TICPPINTCTL, ADF_GEN6_TICPPINTCTL_MASK); + + /* + * Enable error detection and reporting in TIMISCSTS + * with bits 1, 2 and 30 value preserved + */ + reg = ADF_CSR_RD(csr, ADF_GEN6_TIMISCCTL); + reg &= ADF_GEN6_TIMSCCTL_RELAY_MASK; + reg |= ADF_GEN6_TIMISCCTL_BIT; + ADF_CSR_WR(csr, ADF_GEN6_TIMISCCTL, reg); +} + +static void enable_ssm_error_reporting(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + /* Enable SSM interrupts */ + ADF_CSR_WR(csr, ADF_GEN6_INTMASKSSM, 0); +} + +static void adf_gen6_enable_ras(struct adf_accel_dev *accel_dev) +{ + void __iomem *csr = adf_get_pmisc_base(accel_dev); + + enable_errsou_reporting(csr); + enable_ae_error_reporting(accel_dev, csr); + enable_cpp_error_reporting(accel_dev, csr); + enable_ti_ri_error_reporting(csr); + enable_ssm_error_reporting(accel_dev, csr); +} + +static void disable_errsou_reporting(void __iomem *csr) +{ + u32 val; + + /* Disable correctable error reporting in ERRSOU0 */ + ADF_CSR_WR(csr, ADF_GEN6_ERRMSK0, ADF_GEN6_ERRSOU0_MASK); + + /* Disable uncorrectable error reporting in ERRSOU1 */ + ADF_CSR_WR(csr, ADF_GEN6_ERRMSK1, ADF_GEN6_ERRMSK1_MASK); + + /* Disable uncorrectable error reporting in ERRSOU2 */ + val = ADF_CSR_RD(csr, ADF_GEN6_ERRMSK2); + val |= ADF_GEN6_ERRSOU2_DIS_MASK; + ADF_CSR_WR(csr, ADF_GEN6_ERRMSK2, val); + + /* Disable uncorrectable error reporting in ERRSOU3 */ + ADF_CSR_WR(csr, ADF_GEN6_ERRMSK3, ADF_GEN6_ERRSOU3_DIS_MASK); +} + +static void disable_ae_error_reporting(void __iomem *csr) +{ + /* Disable acceleration engine correctable error reporting */ + ADF_CSR_WR(csr, ADF_GEN6_HIAECORERRLOGENABLE_CPP0, 0); + + /* Disable acceleration engine uncorrectable error reporting */ + ADF_CSR_WR(csr, ADF_GEN6_HIAEUNCERRLOGENABLE_CPP0, 0); +} + +static void disable_cpp_error_reporting(void __iomem *csr) +{ + /* Disable HI CPP agents command parity error reporting */ + ADF_CSR_WR(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOGENABLE, 0); + + ADF_CSR_WR(csr, ADF_GEN6_CPP_CFC_ERR_CTRL, ADF_GEN6_CPP_CFC_ERR_CTRL_DIS_MASK); +} + +static void disable_ti_ri_error_reporting(void __iomem *csr) +{ + u32 reg; + + /* Disable RI memory error reporting */ + ADF_CSR_WR(csr, ADF_GEN6_RI_MEM_PAR_ERR_EN0, 0); + + /* Disable IOSF primary command parity error reporting */ + reg = ADF_CSR_RD(csr, ADF_GEN6_RIMISCCTL); + reg &= ~ADF_GEN6_RIMISCSTS_BIT; + ADF_CSR_WR(csr, ADF_GEN6_RIMISCCTL, reg); + + /* Disable TI internal memory parity error reporting */ + ADF_CSR_WR(csr, ADF_GEN6_TI_CI_PAR_ERR_MASK, ADF_GEN6_TI_CI_PAR_STS_MASK); + ADF_CSR_WR(csr, ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK, ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK); + ADF_CSR_WR(csr, ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK, ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK); + ADF_CSR_WR(csr, ADF_GEN6_TI_CD_PAR_ERR_MASK, ADF_GEN6_TI_CD_PAR_STS_MASK); + ADF_CSR_WR(csr, ADF_GEN6_TI_TRNSB_PAR_ERR_MASK, ADF_GEN6_TI_TRNSB_PAR_STS_MASK); + + /* Disable error handling in RI, TI CPP interface control registers */ + reg = ADF_CSR_RD(csr, ADF_GEN6_RICPPINTCTL); + reg &= ~ADF_GEN6_RICPPINTCTL_MASK; + ADF_CSR_WR(csr, ADF_GEN6_RICPPINTCTL, reg); + + reg = ADF_CSR_RD(csr, ADF_GEN6_TICPPINTCTL); + reg &= ~ADF_GEN6_TICPPINTCTL_MASK; + ADF_CSR_WR(csr, ADF_GEN6_TICPPINTCTL, reg); + + /* + * Disable error detection and reporting in TIMISCSTS + * with bits 1, 2 and 30 value preserved + */ + reg = ADF_CSR_RD(csr, ADF_GEN6_TIMISCCTL); + reg &= ADF_GEN6_TIMSCCTL_RELAY_MASK; + ADF_CSR_WR(csr, ADF_GEN6_TIMISCCTL, reg); +} + +static void disable_ssm_error_reporting(void __iomem *csr) +{ + /* Disable SSM interrupts */ + ADF_CSR_WR(csr, ADF_GEN6_INTMASKSSM, ADF_GEN6_INTMASKSSM_MASK); +} + +static void adf_gen6_disable_ras(struct adf_accel_dev *accel_dev) +{ + void __iomem *csr = adf_get_pmisc_base(accel_dev); + + disable_errsou_reporting(csr); + disable_ae_error_reporting(csr); + disable_cpp_error_reporting(csr); + disable_ti_ri_error_reporting(csr); + disable_ssm_error_reporting(csr); +} + +static void adf_gen6_process_errsou0(struct adf_accel_dev *accel_dev, void __iomem *csr) +{ + u32 ae, errsou; + + ae = ADF_CSR_RD(csr, ADF_GEN6_HIAECORERRLOG_CPP0); + ae &= GET_HW_DATA(accel_dev)->ae_mask; + + dev_warn(&GET_DEV(accel_dev), "Correctable error detected: %#x\n", ae); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + + /* Clear interrupt from ERRSOU0 */ + ADF_CSR_WR(csr, ADF_GEN6_HIAECORERRLOG_CPP0, ae); + + errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU0); + if (errsou & ADF_GEN6_ERRSOU0_MASK) + dev_warn(&GET_DEV(accel_dev), "errsou0 still set: %#x\n", errsou); +} + +static void adf_handle_cpp_ae_unc(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + u32 ae; + + if (!(errsou & ADF_GEN6_ERRSOU1_CPP0_MEUNC_BIT)) + return; + + ae = ADF_CSR_RD(csr, ADF_GEN6_HIAEUNCERRLOG_CPP0); + ae &= GET_HW_DATA(accel_dev)->ae_mask; + if (ae) { + dev_err(&GET_DEV(accel_dev), "Uncorrectable error detected: %#x\n", ae); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN6_HIAEUNCERRLOG_CPP0, ae); + } +} + +static void adf_handle_cpp_cmd_par_err(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + u32 cmd_par_err; + + if (!(errsou & ADF_GEN6_ERRSOU1_CPP_CMDPARERR_BIT)) + return; + + cmd_par_err = ADF_CSR_RD(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOG); + cmd_par_err &= ADF_6XXX_HICPPAGENTCMDPARERRLOG_MASK; + if (cmd_par_err) { + dev_err(&GET_DEV(accel_dev), "HI CPP agent command parity error: %#x\n", + cmd_par_err); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOG, cmd_par_err); + } +} + +static void adf_handle_ri_mem_par_err(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + u32 rimem_parerr_sts; + + if (!(errsou & ADF_GEN6_ERRSOU1_RIMEM_PARERR_STS_BIT)) + return; + + rimem_parerr_sts = ADF_CSR_RD(csr, ADF_GEN6_RIMEM_PARERR_STS); + rimem_parerr_sts &= ADF_GEN6_RIMEM_PARERR_CERR_MASK | + ADF_GEN6_RIMEM_PARERR_FATAL_MASK; + if (rimem_parerr_sts & ADF_GEN6_RIMEM_PARERR_CERR_MASK) { + dev_err(&GET_DEV(accel_dev), "RI memory parity correctable error: %#x\n", + rimem_parerr_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + } + + if (rimem_parerr_sts & ADF_GEN6_RIMEM_PARERR_FATAL_MASK) { + dev_err(&GET_DEV(accel_dev), "RI memory parity fatal error: %#x\n", + rimem_parerr_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + } + + ADF_CSR_WR(csr, ADF_GEN6_RIMEM_PARERR_STS, rimem_parerr_sts); +} + +static void adf_handle_ti_ci_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr) +{ + u32 ti_ci_par_sts; + + ti_ci_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_CI_PAR_STS); + ti_ci_par_sts &= ADF_GEN6_TI_CI_PAR_STS_MASK; + if (ti_ci_par_sts) { + dev_err(&GET_DEV(accel_dev), "TI memory parity error: %#x\n", ti_ci_par_sts); + ADF_CSR_WR(csr, ADF_GEN6_TI_CI_PAR_STS, ti_ci_par_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } +} + +static void adf_handle_ti_pullfub_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr) +{ + u32 ti_pullfub_par_sts; + + ti_pullfub_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_PULL0FUB_PAR_STS); + ti_pullfub_par_sts &= ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK; + if (ti_pullfub_par_sts) { + dev_err(&GET_DEV(accel_dev), "TI pull parity error: %#x\n", ti_pullfub_par_sts); + ADF_CSR_WR(csr, ADF_GEN6_TI_PULL0FUB_PAR_STS, ti_pullfub_par_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } +} + +static void adf_handle_ti_pushfub_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr) +{ + u32 ti_pushfub_par_sts; + + ti_pushfub_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_PUSHFUB_PAR_STS); + ti_pushfub_par_sts &= ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK; + if (ti_pushfub_par_sts) { + dev_err(&GET_DEV(accel_dev), "TI push parity error: %#x\n", ti_pushfub_par_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN6_TI_PUSHFUB_PAR_STS, ti_pushfub_par_sts); + } +} + +static void adf_handle_ti_cd_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr) +{ + u32 ti_cd_par_sts; + + ti_cd_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_CD_PAR_STS); + ti_cd_par_sts &= ADF_GEN6_TI_CD_PAR_STS_MASK; + if (ti_cd_par_sts) { + dev_err(&GET_DEV(accel_dev), "TI CD parity error: %#x\n", ti_cd_par_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN6_TI_CD_PAR_STS, ti_cd_par_sts); + } +} + +static void adf_handle_ti_trnsb_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr) +{ + u32 ti_trnsb_par_sts; + + ti_trnsb_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_TRNSB_PAR_STS); + ti_trnsb_par_sts &= ADF_GEN6_TI_TRNSB_PAR_STS_MASK; + if (ti_trnsb_par_sts) { + dev_err(&GET_DEV(accel_dev), "TI TRNSB parity error: %#x\n", ti_trnsb_par_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN6_TI_TRNSB_PAR_STS, ti_trnsb_par_sts); + } +} + +static void adf_handle_iosfp_cmd_parerr(struct adf_accel_dev *accel_dev, void __iomem *csr) +{ + u32 rimiscsts; + + rimiscsts = ADF_CSR_RD(csr, ADF_GEN6_RIMISCSTS); + rimiscsts &= ADF_GEN6_RIMISCSTS_BIT; + if (rimiscsts) { + dev_err(&GET_DEV(accel_dev), "Command parity error detected on IOSFP: %#x\n", + rimiscsts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN6_RIMISCSTS, rimiscsts); + } +} + +static void adf_handle_ti_err(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + if (!(errsou & ADF_GEN6_ERRSOU1_TIMEM_PARERR_STS_BIT)) + return; + + adf_handle_ti_ci_par_sts(accel_dev, csr); + adf_handle_ti_pullfub_par_sts(accel_dev, csr); + adf_handle_ti_pushfub_par_sts(accel_dev, csr); + adf_handle_ti_cd_par_sts(accel_dev, csr); + adf_handle_ti_trnsb_par_sts(accel_dev, csr); + adf_handle_iosfp_cmd_parerr(accel_dev, csr); +} + +static void adf_handle_sfi_cmd_parerr(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + if (!(errsou & ADF_GEN6_ERRSOU1_SFICMD_PARERR_BIT)) + return; + + dev_err(&GET_DEV(accel_dev), + "Command parity error detected on streaming fabric interface\n"); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); +} + +static void adf_gen6_process_errsou1(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + adf_handle_cpp_ae_unc(accel_dev, csr, errsou); + adf_handle_cpp_cmd_par_err(accel_dev, csr, errsou); + adf_handle_ri_mem_par_err(accel_dev, csr, errsou); + adf_handle_ti_err(accel_dev, csr, errsou); + adf_handle_sfi_cmd_parerr(accel_dev, csr, errsou); + + errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU1); + if (errsou & ADF_GEN6_ERRSOU1_MASK) + dev_warn(&GET_DEV(accel_dev), "errsou1 still set: %#x\n", errsou); +} + +static void adf_handle_cerrssmsh(struct adf_accel_dev *accel_dev, void __iomem *csr) +{ + u32 reg; + + reg = ADF_CSR_RD(csr, ADF_GEN6_CERRSSMSH); + reg &= ADF_GEN6_CERRSSMSH_ERROR_BIT; + if (reg) { + dev_warn(&GET_DEV(accel_dev), + "Correctable error on ssm shared memory: %#x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + ADF_CSR_WR(csr, ADF_GEN6_CERRSSMSH, reg); + } +} + +static void adf_handle_uerrssmsh(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 iastatssm) +{ + u32 reg; + + if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_SH_ERR_BIT)) + return; + + reg = ADF_CSR_RD(csr, ADF_GEN6_UERRSSMSH); + reg &= ADF_GEN6_UERRSSMSH_MASK; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "Fatal error on ssm shared memory: %#x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN6_UERRSSMSH, reg); + } +} + +static void adf_handle_pperr_err(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 iastatssm) +{ + u32 reg; + + if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_PPERR_BIT)) + return; + + reg = ADF_CSR_RD(csr, ADF_GEN6_PPERR); + reg &= ADF_GEN6_PPERR_MASK; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "Fatal push or pull data error: %#x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN6_PPERR, reg); + } +} + +static void adf_handle_scmpar_err(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 iastatssm) +{ + u32 reg; + + if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_SCMPAR_ERR_BIT)) + return; + + reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS); + reg &= ADF_GEN6_SCM_PAR_ERR_MASK; + if (reg) { + dev_err(&GET_DEV(accel_dev), "Fatal error on SCM: %#x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg); + } +} + +static void adf_handle_cpppar_err(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 iastatssm) +{ + u32 reg; + + if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_CPPPAR_ERR_BIT)) + return; + + reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS); + reg &= ADF_GEN6_CPP_PAR_ERR_MASK; + if (reg) { + dev_err(&GET_DEV(accel_dev), "Fatal error on CPP: %#x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg); + } +} + +static void adf_handle_rfpar_err(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 iastatssm) +{ + u32 reg; + + if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_RFPAR_ERR_BIT)) + return; + + reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS); + reg &= ADF_GEN6_RF_PAR_ERR_MASK; + if (reg) { + dev_err(&GET_DEV(accel_dev), "Fatal error on RF Parity: %#x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg); + } +} + +static void adf_handle_unexp_cpl_err(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 iastatssm) +{ + u32 reg; + + if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_UNEXP_CPL_ERR_BIT)) + return; + + reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS); + reg &= ADF_GEN6_UNEXP_CPL_ERR_MASK; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "Fatal error for AXI unexpected tag/length: %#x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg); + } +} + +static void adf_handle_iaintstatssm(struct adf_accel_dev *accel_dev, void __iomem *csr) +{ + u32 iastatssm = ADF_CSR_RD(csr, ADF_GEN6_IAINTSTATSSM); + + iastatssm &= ADF_GEN6_IAINTSTATSSM_MASK; + if (!iastatssm) + return; + + adf_handle_uerrssmsh(accel_dev, csr, iastatssm); + adf_handle_pperr_err(accel_dev, csr, iastatssm); + adf_handle_scmpar_err(accel_dev, csr, iastatssm); + adf_handle_cpppar_err(accel_dev, csr, iastatssm); + adf_handle_rfpar_err(accel_dev, csr, iastatssm); + adf_handle_unexp_cpl_err(accel_dev, csr, iastatssm); + + ADF_CSR_WR(csr, ADF_GEN6_IAINTSTATSSM, iastatssm); +} + +static void adf_handle_ssm(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 errsou) +{ + if (!(errsou & ADF_GEN6_ERRSOU2_SSM_ERR_BIT)) + return; + + adf_handle_cerrssmsh(accel_dev, csr); + adf_handle_iaintstatssm(accel_dev, csr); +} + +static void adf_handle_cpp_cfc_err(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + u32 reg; + + if (!(errsou & ADF_GEN6_ERRSOU2_CPP_CFC_ERR_STATUS_BIT)) + return; + + reg = ADF_CSR_RD(csr, ADF_GEN6_CPP_CFC_ERR_STATUS); + if (reg & ADF_GEN6_CPP_CFC_ERR_STATUS_DATAPAR_BIT) { + dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: data parity: %#x", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + + if (reg & ADF_GEN6_CPP_CFC_ERR_STATUS_CMDPAR_BIT) { + dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: command parity: %#x", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + } + + if (reg & ADF_GEN6_CPP_CFC_FATAL_ERR_BIT) { + dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: errors: %#x", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + } + + ADF_CSR_WR(csr, ADF_GEN6_CPP_CFC_ERR_STATUS_CLR, + ADF_GEN6_CPP_CFC_ERR_STATUS_CLR_MASK); +} + +static void adf_gen6_process_errsou2(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + adf_handle_ssm(accel_dev, csr, errsou); + adf_handle_cpp_cfc_err(accel_dev, csr, errsou); + + errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU2); + if (errsou & ADF_GEN6_ERRSOU2_MASK) + dev_warn(&GET_DEV(accel_dev), "errsou2 still set: %#x\n", errsou); +} + +static void adf_handle_timiscsts(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + u32 timiscsts; + + if (!(errsou & ADF_GEN6_ERRSOU3_TIMISCSTS_BIT)) + return; + + timiscsts = ADF_CSR_RD(csr, ADF_GEN6_TIMISCSTS); + if (timiscsts) { + dev_err(&GET_DEV(accel_dev), "Fatal error in transmit interface: %#x\n", + timiscsts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + } +} + +static void adf_handle_ricppintsts(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + u32 ricppintsts; + + if (!(errsou & ADF_GEN6_ERRSOU3_RICPPINTSTS_MASK)) + return; + + ricppintsts = ADF_CSR_RD(csr, ADF_GEN6_RICPPINTSTS); + ricppintsts &= ADF_GEN6_RICPPINTSTS_MASK; + if (ricppintsts) { + dev_err(&GET_DEV(accel_dev), "RI push pull error: %#x\n", ricppintsts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN6_RICPPINTSTS, ricppintsts); + } +} + +static void adf_handle_ticppintsts(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + u32 ticppintsts; + + if (!(errsou & ADF_GEN6_ERRSOU3_TICPPINTSTS_MASK)) + return; + + ticppintsts = ADF_CSR_RD(csr, ADF_GEN6_TICPPINTSTS); + ticppintsts &= ADF_GEN6_TICPPINTSTS_MASK; + if (ticppintsts) { + dev_err(&GET_DEV(accel_dev), "TI push pull error: %#x\n", ticppintsts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN6_TICPPINTSTS, ticppintsts); + } +} + +static void adf_handle_atufaultstatus(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + u32 max_rp_num = GET_HW_DATA(accel_dev)->num_banks; + u32 atufaultstatus; + u32 i; + + if (!(errsou & ADF_GEN6_ERRSOU3_ATUFAULTSTATUS_BIT)) + return; + + for (i = 0; i < max_rp_num; i++) { + atufaultstatus = ADF_CSR_RD(csr, ADF_GEN6_ATUFAULTSTATUS(i)); + + atufaultstatus &= ADF_GEN6_ATUFAULTSTATUS_BIT; + if (atufaultstatus) { + dev_err(&GET_DEV(accel_dev), "Ring pair (%u) ATU detected fault: %#x\n", i, + atufaultstatus); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN6_ATUFAULTSTATUS(i), atufaultstatus); + } + } +} + +static void adf_handle_rlterror(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + u32 rlterror; + + if (!(errsou & ADF_GEN6_ERRSOU3_RLTERROR_BIT)) + return; + + rlterror = ADF_CSR_RD(csr, ADF_GEN6_RLT_ERRLOG); + rlterror &= ADF_GEN6_RLT_ERRLOG_MASK; + if (rlterror) { + dev_err(&GET_DEV(accel_dev), "Error in rate limiting block: %#x\n", rlterror); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN6_RLT_ERRLOG, rlterror); + } +} + +static void adf_handle_vflr(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 errsou) +{ + if (!(errsou & ADF_GEN6_ERRSOU3_VFLRNOTIFY_BIT)) + return; + + dev_err(&GET_DEV(accel_dev), "Uncorrectable error in VF\n"); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); +} + +static void adf_handle_tc_vc_map_error(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + if (!(errsou & ADF_GEN6_ERRSOU3_TC_VC_MAP_ERROR_BIT)) + return; + + dev_err(&GET_DEV(accel_dev), "Violation of PCIe TC VC mapping\n"); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); +} + +static void adf_handle_pcie_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + if (!(errsou & ADF_GEN6_ERRSOU3_PCIE_DEVHALT_BIT)) + return; + + dev_err(&GET_DEV(accel_dev), + "DEVHALT due to an error in an incoming transaction\n"); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); +} + +static void adf_handle_pg_req_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + if (!(errsou & ADF_GEN6_ERRSOU3_PG_REQ_DEVHALT_BIT)) + return; + + dev_err(&GET_DEV(accel_dev), + "Error due to response failure in response to a page request\n"); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); +} + +static void adf_handle_xlt_cpl_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + if (!(errsou & ADF_GEN6_ERRSOU3_XLT_CPL_DEVHALT_BIT)) + return; + + dev_err(&GET_DEV(accel_dev), "Error status for a address translation request\n"); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); +} + +static void adf_handle_ti_int_err_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + if (!(errsou & ADF_GEN6_ERRSOU3_TI_INT_ERR_DEVHALT_BIT)) + return; + + dev_err(&GET_DEV(accel_dev), "DEVHALT due to a TI internal memory error\n"); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); +} + +static void adf_gen6_process_errsou3(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + adf_handle_timiscsts(accel_dev, csr, errsou); + adf_handle_ricppintsts(accel_dev, csr, errsou); + adf_handle_ticppintsts(accel_dev, csr, errsou); + adf_handle_atufaultstatus(accel_dev, csr, errsou); + adf_handle_rlterror(accel_dev, csr, errsou); + adf_handle_vflr(accel_dev, csr, errsou); + adf_handle_tc_vc_map_error(accel_dev, csr, errsou); + adf_handle_pcie_devhalt(accel_dev, csr, errsou); + adf_handle_pg_req_devhalt(accel_dev, csr, errsou); + adf_handle_xlt_cpl_devhalt(accel_dev, csr, errsou); + adf_handle_ti_int_err_devhalt(accel_dev, csr, errsou); + + errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU3); + if (errsou & ADF_GEN6_ERRSOU3_MASK) + dev_warn(&GET_DEV(accel_dev), "errsou3 still set: %#x\n", errsou); +} + +static void adf_gen6_is_reset_required(struct adf_accel_dev *accel_dev, void __iomem *csr, + bool *reset_required) +{ + u8 reset, dev_state; + u32 gensts; + + gensts = ADF_CSR_RD(csr, ADF_GEN6_GENSTS); + dev_state = FIELD_GET(ADF_GEN6_GENSTS_DEVICE_STATE_MASK, gensts); + reset = FIELD_GET(ADF_GEN6_GENSTS_RESET_TYPE_MASK, gensts); + if (dev_state == ADF_GEN6_GENSTS_DEVHALT && reset == ADF_GEN6_GENSTS_PFLR) { + *reset_required = true; + return; + } + + if (reset == ADF_GEN6_GENSTS_COLD_RESET) + dev_err(&GET_DEV(accel_dev), "Fatal error, cold reset required\n"); + + *reset_required = false; +} + +static bool adf_gen6_handle_interrupt(struct adf_accel_dev *accel_dev, bool *reset_required) +{ + void __iomem *csr = adf_get_pmisc_base(accel_dev); + bool handled = false; + u32 errsou; + + errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU0); + if (errsou & ADF_GEN6_ERRSOU0_MASK) { + adf_gen6_process_errsou0(accel_dev, csr); + handled = true; + } + + errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU1); + if (errsou & ADF_GEN6_ERRSOU1_MASK) { + adf_gen6_process_errsou1(accel_dev, csr, errsou); + handled = true; + } + + errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU2); + if (errsou & ADF_GEN6_ERRSOU2_MASK) { + adf_gen6_process_errsou2(accel_dev, csr, errsou); + handled = true; + } + + errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU3); + if (errsou & ADF_GEN6_ERRSOU3_MASK) { + adf_gen6_process_errsou3(accel_dev, csr, errsou); + handled = true; + } + + adf_gen6_is_reset_required(accel_dev, csr, reset_required); + + return handled; +} + +void adf_gen6_init_ras_ops(struct adf_ras_ops *ras_ops) +{ + ras_ops->enable_ras_errors = adf_gen6_enable_ras; + ras_ops->disable_ras_errors = adf_gen6_disable_ras; + ras_ops->handle_interrupt = adf_gen6_handle_interrupt; +} +EXPORT_SYMBOL_GPL(adf_gen6_init_ras_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.h new file mode 100644 index 000000000000..66ced271d173 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.h @@ -0,0 +1,504 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2025 Intel Corporation */ +#ifndef ADF_GEN6_RAS_H_ +#define ADF_GEN6_RAS_H_ + +#include <linux/bits.h> + +struct adf_ras_ops; + +/* Error source registers */ +#define ADF_GEN6_ERRSOU0 0x41A200 +#define ADF_GEN6_ERRSOU1 0x41A204 +#define ADF_GEN6_ERRSOU2 0x41A208 +#define ADF_GEN6_ERRSOU3 0x41A20C + +/* Error source mask registers */ +#define ADF_GEN6_ERRMSK0 0x41A210 +#define ADF_GEN6_ERRMSK1 0x41A214 +#define ADF_GEN6_ERRMSK2 0x41A218 +#define ADF_GEN6_ERRMSK3 0x41A21C + +/* ERRSOU0 Correctable error mask */ +#define ADF_GEN6_ERRSOU0_MASK BIT(0) + +#define ADF_GEN6_ERRSOU1_CPP0_MEUNC_BIT BIT(0) +#define ADF_GEN6_ERRSOU1_CPP_CMDPARERR_BIT BIT(1) +#define ADF_GEN6_ERRSOU1_RIMEM_PARERR_STS_BIT BIT(2) +#define ADF_GEN6_ERRSOU1_TIMEM_PARERR_STS_BIT BIT(3) +#define ADF_GEN6_ERRSOU1_SFICMD_PARERR_BIT BIT(4) + +#define ADF_GEN6_ERRSOU1_MASK ( \ + (ADF_GEN6_ERRSOU1_CPP0_MEUNC_BIT) | \ + (ADF_GEN6_ERRSOU1_CPP_CMDPARERR_BIT) | \ + (ADF_GEN6_ERRSOU1_RIMEM_PARERR_STS_BIT) | \ + (ADF_GEN6_ERRSOU1_TIMEM_PARERR_STS_BIT) | \ + (ADF_GEN6_ERRSOU1_SFICMD_PARERR_BIT)) + +#define ADF_GEN6_ERRMSK1_CPP0_MEUNC_BIT BIT(0) +#define ADF_GEN6_ERRMSK1_CPP_CMDPARERR_BIT BIT(1) +#define ADF_GEN6_ERRMSK1_RIMEM_PARERR_STS_BIT BIT(2) +#define ADF_GEN6_ERRMSK1_TIMEM_PARERR_STS_BIT BIT(3) +#define ADF_GEN6_ERRMSK1_IOSFCMD_PARERR_BIT BIT(4) + +#define ADF_GEN6_ERRMSK1_MASK ( \ + (ADF_GEN6_ERRMSK1_CPP0_MEUNC_BIT) | \ + (ADF_GEN6_ERRMSK1_CPP_CMDPARERR_BIT) | \ + (ADF_GEN6_ERRMSK1_RIMEM_PARERR_STS_BIT) | \ + (ADF_GEN6_ERRMSK1_TIMEM_PARERR_STS_BIT) | \ + (ADF_GEN6_ERRMSK1_IOSFCMD_PARERR_BIT)) + +/* HI AE Uncorrectable error log */ +#define ADF_GEN6_HIAEUNCERRLOG_CPP0 0x41A300 + +/* HI AE Uncorrectable error log enable */ +#define ADF_GEN6_HIAEUNCERRLOGENABLE_CPP0 0x41A320 + +/* HI AE Correctable error log */ +#define ADF_GEN6_HIAECORERRLOG_CPP0 0x41A308 + +/* HI AE Correctable error log enable */ +#define ADF_GEN6_HIAECORERRLOGENABLE_CPP0 0x41A318 + +/* HI CPP Agent Command parity error log */ +#define ADF_GEN6_HICPPAGENTCMDPARERRLOG 0x41A310 + +/* HI CPP Agent command parity error logging enable */ +#define ADF_GEN6_HICPPAGENTCMDPARERRLOGENABLE 0x41A314 + +#define ADF_6XXX_HICPPAGENTCMDPARERRLOG_MASK 0x1B + +/* RI Memory parity error status register */ +#define ADF_GEN6_RIMEM_PARERR_STS 0x41B128 + +/* RI Memory parity error reporting enable */ +#define ADF_GEN6_RI_MEM_PAR_ERR_EN0 0x41B12C + +/* + * RI Memory parity error mask + * BIT(4) - ri_tlq_phdr parity error + * BIT(5) - ri_tlq_pdata parity error + * BIT(6) - ri_tlq_nphdr parity error + * BIT(7) - ri_tlq_npdata parity error + * BIT(8) - ri_tlq_cplhdr parity error + * BIT(10) - BIT(13) - ri_tlq_cpldata[0:3] parity error + * BIT(19) - ri_cds_cmd_fifo parity error + * BIT(20) - ri_obc_ricpl_fifo parity error + * BIT(21) - ri_obc_tiricpl_fifo parity error + * BIT(22) - ri_obc_cppcpl_fifo parity error + * BIT(23) - ri_obc_pendcpl_fifo parity error + * BIT(24) - ri_cpp_cmd_fifo parity error + * BIT(25) - ri_cds_ticmd_fifo parity error + * BIT(26) - riti_cmd_fifo parity error + * BIT(27) - ri_int_msixtbl parity error + * BIT(28) - ri_int_imstbl parity error + * BIT(30) - ri_kpt_fuses parity error + */ +#define ADF_GEN6_RIMEM_PARERR_FATAL_MASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | BIT(6) | \ + BIT(7) | BIT(8) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | \ + BIT(22) | BIT(23) | BIT(24) | BIT(25) | BIT(26) | BIT(27) | \ + BIT(28) | BIT(30)) + +#define ADF_GEN6_RIMEM_PARERR_CERR_MASK \ + (BIT(10) | BIT(11) | BIT(12) | BIT(13)) + +/* TI CI parity status */ +#define ADF_GEN6_TI_CI_PAR_STS 0x50060C + +/* TI CI parity reporting mask */ +#define ADF_GEN6_TI_CI_PAR_ERR_MASK 0x500608 + +/* + * TI CI parity status mask + * BIT(0) - CdCmdQ_sts patiry error status + * BIT(1) - CdDataQ_sts parity error status + * BIT(3) - CPP_SkidQ_sts parity error status + */ +#define ADF_GEN6_TI_CI_PAR_STS_MASK \ + (BIT(0) | BIT(1) | BIT(3)) + +/* TI PULLFUB parity status */ +#define ADF_GEN6_TI_PULL0FUB_PAR_STS 0x500618 + +/* TI PULLFUB parity error reporting mask */ +#define ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK 0x500614 + +/* + * TI PULLFUB parity status mask + * BIT(0) - TrnPullReqQ_sts parity status + * BIT(1) - TrnSharedDataQ_sts parity status + * BIT(2) - TrnPullReqDataQ_sts parity status + * BIT(4) - CPP_CiPullReqQ_sts parity status + * BIT(5) - CPP_TrnPullReqQ_sts parity status + * BIT(6) - CPP_PullidQ_sts parity status + * BIT(7) - CPP_WaitDataQ_sts parity status + * BIT(8) - CPP_CdDataQ_sts parity status + * BIT(9) - CPP_TrnDataQP0_sts parity status + * BIT(10) - BIT(11) - CPP_TrnDataQRF[00:01]_sts parity status + * BIT(12) - CPP_TrnDataQP1_sts parity status + * BIT(13) - BIT(14) - CPP_TrnDataQRF[10:11]_sts parity status + */ +#define ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | \ + BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14)) + +/* TI PUSHUB parity status */ +#define ADF_GEN6_TI_PUSHFUB_PAR_STS 0x500630 + +/* TI PUSHFUB parity error reporting mask */ +#define ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK 0x50062C + +/* + * TI PUSHUB parity status mask + * BIT(0) - SbPushReqQ_sts parity status + * BIT(1) - BIT(2) - SbPushDataQ[0:1]_sts parity status + * BIT(4) - CPP_CdPushReqQ_sts parity status + * BIT(5) - BIT(6) - CPP_CdPushDataQ[0:1]_sts parity status + * BIT(7) - CPP_SbPushReqQ_sts parity status + * BIT(8) - CPP_SbPushDataQP_sts parity status + * BIT(9) - BIT(10) - CPP_SbPushDataQRF[0:1]_sts parity status + */ +#define ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | \ + BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10)) + +/* TI CD parity status */ +#define ADF_GEN6_TI_CD_PAR_STS 0x50063C + +/* TI CD parity error mask */ +#define ADF_GEN6_TI_CD_PAR_ERR_MASK 0x500638 + +/* + * TI CD parity status mask + * BIT(0) - BIT(15) - CtxMdRam[0:15]_sts parity status + * BIT(16) - Leaf2ClusterRam_sts parity status + * BIT(17) - BIT(18) - Ring2LeafRam[0:1]_sts parity status + * BIT(19) - VirtualQ_sts parity status + * BIT(20) - DtRdQ_sts parity status + * BIT(21) - DtWrQ_sts parity status + * BIT(22) - RiCmdQ_sts parity status + * BIT(23) - BypassQ_sts parity status + * BIT(24) - DtRdQ_sc_sts parity status + * BIT(25) - DtWrQ_sc_sts parity status + */ +#define ADF_GEN6_TI_CD_PAR_STS_MASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | \ + BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | \ + BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | \ + BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25)) + +/* TI TRNSB parity status */ +#define ADF_GEN6_TI_TRNSB_PAR_STS 0x500648 + +/* TI TRNSB parity error reporting mask */ +#define ADF_GEN6_TI_TRNSB_PAR_ERR_MASK 0x500644 + +/* + * TI TRNSB parity status mask + * BIT(0) - TrnPHdrQP_sts parity status + * BIT(1) - TrnPHdrQRF_sts parity status + * BIT(2) - TrnPDataQP_sts parity status + * BIT(3) - BIT(6) - TrnPDataQRF[0:3]_sts parity status + * BIT(7) - TrnNpHdrQP_sts parity status + * BIT(8) - BIT(9) - TrnNpHdrQRF[0:1]_sts parity status + * BIT(10) - TrnCplHdrQ_sts parity status + * BIT(11) - TrnPutObsReqQ_sts parity status + * BIT(12) - TrnPushReqQ_sts parity status + * BIT(13) - SbSplitIdRam_sts parity status + * BIT(14) - SbReqCountQ_sts parity status + * BIT(15) - SbCplTrkRam_sts parity status + * BIT(16) - SbGetObsReqQ_sts parity status + * BIT(17) - SbEpochIdQ_sts parity status + * BIT(18) - SbAtCplHdrQ_sts parity status + * BIT(19) - SbAtCplDataQ_sts parity status + * BIT(20) - SbReqCountRam_sts parity status + * BIT(21) - SbAtCplHdrQ_sc_sts parity status + */ +#define ADF_GEN6_TI_TRNSB_PAR_STS_MASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | \ + BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | \ + BIT(13) | BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | \ + BIT(19) | BIT(20) | BIT(21)) + +/* Status register to log misc error on RI */ +#define ADF_GEN6_RIMISCSTS 0x41B1B8 + +/* Status control register to log misc RI error */ +#define ADF_GEN6_RIMISCCTL 0x41B1BC + +/* + * ERRSOU2 bit mask + * BIT(0) - SSM Interrupt Mask + * BIT(1) - CFC on CPP. ORed of CFC Push error and Pull error + * BIT(2) - BIT(4) - CPP attention interrupts + * BIT(18) - PM interrupt + */ +#define ADF_GEN6_ERRSOU2_SSM_ERR_BIT BIT(0) +#define ADF_GEN6_ERRSOU2_CPP_CFC_ERR_STATUS_BIT BIT(1) +#define ADF_GEN6_ERRSOU2_CPP_CFC_ATT_INT_MASK \ + (BIT(2) | BIT(3) | BIT(4)) + +#define ADF_GEN6_ERRSOU2_PM_INT_BIT BIT(18) + +#define ADF_GEN6_ERRSOU2_MASK \ + (ADF_GEN6_ERRSOU2_SSM_ERR_BIT | \ + ADF_GEN6_ERRSOU2_CPP_CFC_ERR_STATUS_BIT) + +#define ADF_GEN6_ERRSOU2_DIS_MASK \ + (ADF_GEN6_ERRSOU2_SSM_ERR_BIT | \ + ADF_GEN6_ERRSOU2_CPP_CFC_ERR_STATUS_BIT | \ + ADF_GEN6_ERRSOU2_CPP_CFC_ATT_INT_MASK) + +#define ADF_GEN6_IAINTSTATSSM 0x28 + +/* IAINTSTATSSM error bit mask definitions */ +#define ADF_GEN6_IAINTSTATSSM_SH_ERR_BIT BIT(0) +#define ADF_GEN6_IAINTSTATSSM_PPERR_BIT BIT(2) +#define ADF_GEN6_IAINTSTATSSM_SCMPAR_ERR_BIT BIT(4) +#define ADF_GEN6_IAINTSTATSSM_CPPPAR_ERR_BIT BIT(5) +#define ADF_GEN6_IAINTSTATSSM_RFPAR_ERR_BIT BIT(6) +#define ADF_GEN6_IAINTSTATSSM_UNEXP_CPL_ERR_BIT BIT(7) + +#define ADF_GEN6_IAINTSTATSSM_MASK \ + (ADF_GEN6_IAINTSTATSSM_SH_ERR_BIT | \ + ADF_GEN6_IAINTSTATSSM_PPERR_BIT | \ + ADF_GEN6_IAINTSTATSSM_SCMPAR_ERR_BIT | \ + ADF_GEN6_IAINTSTATSSM_CPPPAR_ERR_BIT | \ + ADF_GEN6_IAINTSTATSSM_RFPAR_ERR_BIT | \ + ADF_GEN6_IAINTSTATSSM_UNEXP_CPL_ERR_BIT) + +#define ADF_GEN6_UERRSSMSH 0x18 + +/* + * UERRSSMSH error bit mask definitions + * + * BIT(0) - Indicates one uncorrectable error + * BIT(15) - Indicates multiple uncorrectable errors + * in device shared memory + */ +#define ADF_GEN6_UERRSSMSH_MASK (BIT(0) | BIT(15)) + +/* + * CERRSSMSH error bit + * BIT(0) - Indicates one correctable error + */ +#define ADF_GEN6_CERRSSMSH_ERROR_BIT (BIT(0) | BIT(15) | BIT(24)) +#define ADF_GEN6_CERRSSMSH 0x10 + +#define ADF_GEN6_INTMASKSSM 0x0 + +/* + * Error reporting mask in INTMASKSSM + * BIT(0) - Shared memory uncorrectable interrupt mask + * BIT(2) - PPERR interrupt mask + * BIT(4) - SCM parity error interrupt mask + * BIT(5) - CPP parity error interrupt mask + * BIT(6) - SHRAM RF parity error interrupt mask + * BIT(7) - AXI unexpected completion error mask + */ +#define ADF_GEN6_INTMASKSSM_MASK \ + (BIT(0) | BIT(2) | BIT(4) | BIT(5) | BIT(6) | BIT(7)) + +/* CPP push or pull error */ +#define ADF_GEN6_PPERR 0x8 + +#define ADF_GEN6_PPERR_MASK (BIT(0) | BIT(1)) + +/* + * SSM_FERR_STATUS error bit mask definitions + */ +#define ADF_GEN6_SCM_PAR_ERR_MASK BIT(5) +#define ADF_GEN6_CPP_PAR_ERR_MASK (BIT(0) | BIT(1) | BIT(2)) +#define ADF_GEN6_UNEXP_CPL_ERR_MASK (BIT(3) | BIT(4) | BIT(10) | BIT(11)) +#define ADF_GEN6_RF_PAR_ERR_MASK BIT(16) + +#define ADF_GEN6_SSM_FERR_STATUS 0x9C + +#define ADF_GEN6_CPP_CFC_ERR_STATUS 0x640C04 + +/* + * BIT(0) - Indicates one or more CPP CFC errors + * BIT(1) - Indicates multiple CPP CFC errors + * BIT(7) - Indicates CPP CFC command parity error type + * BIT(8) - Indicates CPP CFC data parity error type + */ +#define ADF_GEN6_CPP_CFC_ERR_STATUS_ERR_BIT BIT(0) +#define ADF_GEN6_CPP_CFC_ERR_STATUS_MERR_BIT BIT(1) +#define ADF_GEN6_CPP_CFC_ERR_STATUS_CMDPAR_BIT BIT(7) +#define ADF_GEN6_CPP_CFC_ERR_STATUS_DATAPAR_BIT BIT(8) +#define ADF_GEN6_CPP_CFC_FATAL_ERR_BIT \ + (ADF_GEN6_CPP_CFC_ERR_STATUS_ERR_BIT | \ + ADF_GEN6_CPP_CFC_ERR_STATUS_MERR_BIT) + +/* + * BIT(0) - Enables CFC to detect and log a push/pull data error + * BIT(1) - Enables CFC to generate interrupt to PCIEP for a CPP error + * BIT(4) - When 1 parity detection is disabled + * BIT(5) - When 1 parity detection is disabled on CPP command bus + * BIT(6) - When 1 parity detection is disabled on CPP push/pull bus + * BIT(9) - When 1 RF parity error detection is disabled + */ +#define ADF_GEN6_CPP_CFC_ERR_CTRL_MASK (BIT(0) | BIT(1)) + +#define ADF_GEN6_CPP_CFC_ERR_CTRL_DIS_MASK \ + (BIT(4) | BIT(5) | BIT(6) | BIT(9) | BIT(10)) + +#define ADF_GEN6_CPP_CFC_ERR_CTRL 0x640C00 + +/* + * BIT(0) - Clears bit(0) of ADF_GEN6_CPP_CFC_ERR_STATUS + * when an error is reported on CPP + * BIT(1) - Clears bit(1) of ADF_GEN6_CPP_CFC_ERR_STATUS + * when multiple errors are reported on CPP + * BIT(2) - Clears bit(2) of ADF_GEN6_CPP_CFC_ERR_STATUS + * when attention interrupt is reported + */ +#define ADF_GEN6_CPP_CFC_ERR_STATUS_CLR_MASK (BIT(0) | BIT(1) | BIT(2)) +#define ADF_GEN6_CPP_CFC_ERR_STATUS_CLR 0x640C08 + +/* + * ERRSOU3 bit masks + * BIT(0) - indicates error response order overflow and/or BME error + * BIT(1) - indicates RI push/pull error + * BIT(2) - indicates TI push/pull error + * BIT(5) - indicates TI pull parity error + * BIT(6) - indicates RI push parity error + * BIT(7) - indicates VFLR interrupt + * BIT(8) - indicates ring pair interrupts for ATU detected fault + * BIT(9) - indicates rate limiting error + */ +#define ADF_GEN6_ERRSOU3_TIMISCSTS_BIT BIT(0) +#define ADF_GEN6_ERRSOU3_RICPPINTSTS_MASK (BIT(1) | BIT(6)) +#define ADF_GEN6_ERRSOU3_TICPPINTSTS_MASK (BIT(2) | BIT(5)) +#define ADF_GEN6_ERRSOU3_VFLRNOTIFY_BIT BIT(7) +#define ADF_GEN6_ERRSOU3_ATUFAULTSTATUS_BIT BIT(8) +#define ADF_GEN6_ERRSOU3_RLTERROR_BIT BIT(9) +#define ADF_GEN6_ERRSOU3_TC_VC_MAP_ERROR_BIT BIT(16) +#define ADF_GEN6_ERRSOU3_PCIE_DEVHALT_BIT BIT(17) +#define ADF_GEN6_ERRSOU3_PG_REQ_DEVHALT_BIT BIT(18) +#define ADF_GEN6_ERRSOU3_XLT_CPL_DEVHALT_BIT BIT(19) +#define ADF_GEN6_ERRSOU3_TI_INT_ERR_DEVHALT_BIT BIT(20) + +#define ADF_GEN6_ERRSOU3_MASK ( \ + (ADF_GEN6_ERRSOU3_TIMISCSTS_BIT) | \ + (ADF_GEN6_ERRSOU3_RICPPINTSTS_MASK) | \ + (ADF_GEN6_ERRSOU3_TICPPINTSTS_MASK) | \ + (ADF_GEN6_ERRSOU3_VFLRNOTIFY_BIT) | \ + (ADF_GEN6_ERRSOU3_ATUFAULTSTATUS_BIT) | \ + (ADF_GEN6_ERRSOU3_RLTERROR_BIT) | \ + (ADF_GEN6_ERRSOU3_TC_VC_MAP_ERROR_BIT) | \ + (ADF_GEN6_ERRSOU3_PCIE_DEVHALT_BIT) | \ + (ADF_GEN6_ERRSOU3_PG_REQ_DEVHALT_BIT) | \ + (ADF_GEN6_ERRSOU3_XLT_CPL_DEVHALT_BIT) | \ + (ADF_GEN6_ERRSOU3_TI_INT_ERR_DEVHALT_BIT)) + +#define ADF_GEN6_ERRSOU3_DIS_MASK ( \ + (ADF_GEN6_ERRSOU3_TIMISCSTS_BIT) | \ + (ADF_GEN6_ERRSOU3_RICPPINTSTS_MASK) | \ + (ADF_GEN6_ERRSOU3_TICPPINTSTS_MASK) | \ + (ADF_GEN6_ERRSOU3_VFLRNOTIFY_BIT) | \ + (ADF_GEN6_ERRSOU3_ATUFAULTSTATUS_BIT) | \ + (ADF_GEN6_ERRSOU3_RLTERROR_BIT) | \ + (ADF_GEN6_ERRSOU3_TC_VC_MAP_ERROR_BIT)) + +/* Rate limiting error log register */ +#define ADF_GEN6_RLT_ERRLOG 0x508814 + +#define ADF_GEN6_RLT_ERRLOG_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(3)) + +/* TI misc status register */ +#define ADF_GEN6_TIMISCSTS 0x50054C + +/* TI misc error reporting mask */ +#define ADF_GEN6_TIMISCCTL 0x500548 + +/* + * TI Misc error reporting control mask + * BIT(0) - Enables error detection and logging in TIMISCSTS register + * BIT(1) - It has effect only when SRIOV enabled, this bit is 0 by default + * BIT(2) - Enables the D-F-x counter within the dispatch arbiter + * to start based on the command triggered from + * BIT(30) - Disables VFLR functionality + * bits 1, 2 and 30 value should be preserved and not meant to be changed + * within RAS. + */ +#define ADF_GEN6_TIMISCCTL_BIT BIT(0) +#define ADF_GEN6_TIMSCCTL_RELAY_MASK (BIT(1) | BIT(2) | BIT(30)) + +/* RI CPP interface status register */ +#define ADF_GEN6_RICPPINTSTS 0x41A330 + +/* + * Uncorrectable error mask in RICPPINTSTS register + * BIT(0) - RI asserted the CPP error signal during a push + * BIT(1) - RI detected the CPP error signal asserted during a pull + * BIT(2) - RI detected a push data parity error + * BIT(3) - RI detected a push valid parity error + */ +#define ADF_GEN6_RICPPINTSTS_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(3)) + +/* RI CPP interface register control */ +#define ADF_GEN6_RICPPINTCTL 0x41A32C + +/* + * Control bit mask for RICPPINTCTL register + * BIT(0) - value of 1 enables error detection and reporting + * on the RI CPP Push interface + * BIT(1) - value of 1 enables error detection and reporting + * on the RI CPP Pull interface + * BIT(2) - value of 1 enables error detection and reporting + * on the RI Parity + * BIT(3) - value of 1 enable checking parity on CPP + */ +#define ADF_GEN6_RICPPINTCTL_MASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4)) + +/* TI CPP interface status register */ +#define ADF_GEN6_TICPPINTSTS 0x50053C + +/* + * Uncorrectable error mask in TICPPINTSTS register + * BIT(0) - value of 1 indicates that the TI asserted + * the CPP error signal during a push + * BIT(1) - value of 1 indicates that the TI detected + * the CPP error signal asserted during a pull + * BIT(2) - value of 1 indicates that the TI detected + * a pull data parity error + */ +#define ADF_GEN6_TICPPINTSTS_MASK (BIT(0) | BIT(1) | BIT(2)) + +/* TI CPP interface status register control */ +#define ADF_GEN6_TICPPINTCTL 0x500538 + +/* + * Control bit mask for TICPPINTCTL register + * BIT(0) - value of 1 enables error detection and reporting on + * the TI CPP Push interface + * BIT(1) - value of 1 enables error detection and reporting on + * the TI CPP Push interface + * BIT(2) - value of 1 enables parity error detection and logging on + * the TI CPP Pull interface + * BIT(3) - value of 1 enables CPP CMD and Pull Data parity checking + */ +#define ADF_GEN6_TICPPINTCTL_MASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4)) + +/* ATU fault status register */ +#define ADF_GEN6_ATUFAULTSTATUS(i) (0x506000 + ((i) * 0x4)) + +#define ADF_GEN6_ATUFAULTSTATUS_BIT BIT(0) + +/* Command parity error detected on IOSFP command to QAT */ +#define ADF_GEN6_RIMISCSTS_BIT BIT(0) + +#define ADF_GEN6_GENSTS 0x41A220 +#define ADF_GEN6_GENSTS_DEVICE_STATE_MASK GENMASK(1, 0) +#define ADF_GEN6_GENSTS_RESET_TYPE_MASK GENMASK(3, 2) +#define ADF_GEN6_GENSTS_PFLR 0x1 +#define ADF_GEN6_GENSTS_COLD_RESET 0x3 +#define ADF_GEN6_GENSTS_DEVHALT 0x1 + +void adf_gen6_init_ras_ops(struct adf_ras_ops *ras_ops); + +#endif /* ADF_GEN6_RAS_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c new file mode 100644 index 000000000000..58a072e2f936 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2025 Intel Corporation */ +#include <linux/export.h> + +#include "adf_gen4_config.h" +#include "adf_gen4_hw_csr_data.h" +#include "adf_gen4_pfvf.h" +#include "adf_gen6_shared.h" + +struct adf_accel_dev; +struct adf_pfvf_ops; +struct adf_hw_csr_ops; + +/* + * QAT GEN4 and GEN6 devices often differ in terms of supported features, + * options and internal logic. However, some of the mechanisms and register + * layout are shared between those two GENs. This file serves as an abstraction + * layer that allows to use existing GEN4 implementation that is also + * applicable to GEN6 without additional overhead and complexity. + */ +void adf_gen6_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops) +{ + adf_gen4_init_pf_pfvf_ops(pfvf_ops); +} +EXPORT_SYMBOL_GPL(adf_gen6_init_pf_pfvf_ops); + +void adf_gen6_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) +{ + return adf_gen4_init_hw_csr_ops(csr_ops); +} +EXPORT_SYMBOL_GPL(adf_gen6_init_hw_csr_ops); + +int adf_gen6_cfg_dev_init(struct adf_accel_dev *accel_dev) +{ + return adf_gen4_cfg_dev_init(accel_dev); +} +EXPORT_SYMBOL_GPL(adf_gen6_cfg_dev_init); + +int adf_gen6_comp_dev_config(struct adf_accel_dev *accel_dev) +{ + return adf_comp_dev_config(accel_dev); +} +EXPORT_SYMBOL_GPL(adf_gen6_comp_dev_config); + +int adf_gen6_no_dev_config(struct adf_accel_dev *accel_dev) +{ + return adf_no_dev_config(accel_dev); +} +EXPORT_SYMBOL_GPL(adf_gen6_no_dev_config); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h new file mode 100644 index 000000000000..bc8e71e984fc --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2025 Intel Corporation */ +#ifndef ADF_GEN6_SHARED_H_ +#define ADF_GEN6_SHARED_H_ + +struct adf_hw_csr_ops; +struct adf_accel_dev; +struct adf_pfvf_ops; + +void adf_gen6_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops); +void adf_gen6_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); +int adf_gen6_cfg_dev_init(struct adf_accel_dev *accel_dev); +int adf_gen6_comp_dev_config(struct adf_accel_dev *accel_dev); +int adf_gen6_no_dev_config(struct adf_accel_dev *accel_dev); +#endif/* ADF_GEN6_SHARED_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c b/drivers/crypto/intel/qat/qat_common/adf_timer.c index 35ccb91d6ec1..8962a49f145a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_timer.c @@ -12,9 +12,9 @@ #include "adf_admin.h" #include "adf_accel_devices.h" #include "adf_common_drv.h" -#include "adf_gen4_timer.h" +#include "adf_timer.h" -#define ADF_GEN4_TIMER_PERIOD_MS 200 +#define ADF_DEFAULT_TIMER_PERIOD_MS 200 /* This periodic update is used to trigger HB, RL & TL fw events */ static void work_handler(struct work_struct *work) @@ -27,16 +27,16 @@ static void work_handler(struct work_struct *work) accel_dev = timer_ctx->accel_dev; adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx, - msecs_to_jiffies(ADF_GEN4_TIMER_PERIOD_MS)); + msecs_to_jiffies(ADF_DEFAULT_TIMER_PERIOD_MS)); time_periods = div_u64(ktime_ms_delta(ktime_get_real(), timer_ctx->initial_ktime), - ADF_GEN4_TIMER_PERIOD_MS); + ADF_DEFAULT_TIMER_PERIOD_MS); if (adf_send_admin_tim_sync(accel_dev, time_periods)) dev_err(&GET_DEV(accel_dev), "Failed to synchronize qat timer\n"); } -int adf_gen4_timer_start(struct adf_accel_dev *accel_dev) +int adf_timer_start(struct adf_accel_dev *accel_dev) { struct adf_timer *timer_ctx; @@ -50,13 +50,13 @@ int adf_gen4_timer_start(struct adf_accel_dev *accel_dev) INIT_DELAYED_WORK(&timer_ctx->work_ctx, work_handler); adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx, - msecs_to_jiffies(ADF_GEN4_TIMER_PERIOD_MS)); + msecs_to_jiffies(ADF_DEFAULT_TIMER_PERIOD_MS)); return 0; } -EXPORT_SYMBOL_GPL(adf_gen4_timer_start); +EXPORT_SYMBOL_GPL(adf_timer_start); -void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev) +void adf_timer_stop(struct adf_accel_dev *accel_dev) { struct adf_timer *timer_ctx = accel_dev->timer; @@ -68,4 +68,4 @@ void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev) kfree(timer_ctx); accel_dev->timer = NULL; } -EXPORT_SYMBOL_GPL(adf_gen4_timer_stop); +EXPORT_SYMBOL_GPL(adf_timer_stop); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h b/drivers/crypto/intel/qat/qat_common/adf_timer.h index 66a709e7b358..68e5136d6ba1 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h +++ b/drivers/crypto/intel/qat/qat_common/adf_timer.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* Copyright(c) 2023 Intel Corporation */ -#ifndef ADF_GEN4_TIMER_H_ -#define ADF_GEN4_TIMER_H_ +#ifndef ADF_TIMER_H_ +#define ADF_TIMER_H_ #include <linux/ktime.h> #include <linux/workqueue.h> @@ -15,7 +15,7 @@ struct adf_timer { ktime_t initial_ktime; }; -int adf_gen4_timer_start(struct adf_accel_dev *accel_dev); -void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev); +int adf_timer_start(struct adf_accel_dev *accel_dev); +void adf_timer_stop(struct adf_accel_dev *accel_dev); -#endif /* ADF_GEN4_TIMER_H_ */ +#endif /* ADF_TIMER_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h index 04f645957e28..81969c515a17 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h @@ -44,6 +44,7 @@ enum icp_qat_fw_comp_20_cmd_id { #define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK 0x1 #define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS 7 #define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK 0x1 +#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MAX_VALUE 0xFFFFFFFF #define ICP_QAT_FW_COMP_FLAGS_BUILD(sesstype, autoselect, enhanced_asb, \ ret_uncomp, secure_ram) \ @@ -117,7 +118,7 @@ struct icp_qat_fw_comp_req_params { #define ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(sop, eop, bfinal, cnv, cnvnr, \ cnvdfx, crc, xxhash_acc, \ cnv_error_type, append_crc, \ - drop_data) \ + drop_data, partial_decomp) \ ((((sop) & ICP_QAT_FW_COMP_SOP_MASK) << \ ICP_QAT_FW_COMP_SOP_BITPOS) | \ (((eop) & ICP_QAT_FW_COMP_EOP_MASK) << \ @@ -139,7 +140,9 @@ struct icp_qat_fw_comp_req_params { (((append_crc) & ICP_QAT_FW_COMP_APPEND_CRC_MASK) \ << ICP_QAT_FW_COMP_APPEND_CRC_BITPOS) | \ (((drop_data) & ICP_QAT_FW_COMP_DROP_DATA_MASK) \ - << ICP_QAT_FW_COMP_DROP_DATA_BITPOS)) + << ICP_QAT_FW_COMP_DROP_DATA_BITPOS) | \ + (((partial_decomp) & ICP_QAT_FW_COMP_PARTIAL_DECOMP_MASK) \ + << ICP_QAT_FW_COMP_PARTIAL_DECOMP_BITPOS)) #define ICP_QAT_FW_COMP_NOT_SOP 0 #define ICP_QAT_FW_COMP_SOP 1 @@ -161,6 +164,8 @@ struct icp_qat_fw_comp_req_params { #define ICP_QAT_FW_COMP_NO_APPEND_CRC 0 #define ICP_QAT_FW_COMP_DROP_DATA 1 #define ICP_QAT_FW_COMP_NO_DROP_DATA 0 +#define ICP_QAT_FW_COMP_PARTIAL_DECOMPRESS 1 +#define ICP_QAT_FW_COMP_NO_PARTIAL_DECOMPRESS 0 #define ICP_QAT_FW_COMP_SOP_BITPOS 0 #define ICP_QAT_FW_COMP_SOP_MASK 0x1 #define ICP_QAT_FW_COMP_EOP_BITPOS 1 @@ -189,6 +194,8 @@ struct icp_qat_fw_comp_req_params { #define ICP_QAT_FW_COMP_APPEND_CRC_MASK 0x1 #define ICP_QAT_FW_COMP_DROP_DATA_BITPOS 25 #define ICP_QAT_FW_COMP_DROP_DATA_MASK 0x1 +#define ICP_QAT_FW_COMP_PARTIAL_DECOMP_BITPOS 27 +#define ICP_QAT_FW_COMP_PARTIAL_DECOMP_MASK 0x1 #define ICP_QAT_FW_COMP_SOP_GET(flags) \ QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_SOP_BITPOS, \ @@ -281,8 +288,18 @@ struct icp_qat_fw_comp_req { union { struct icp_qat_fw_xlt_req_params xlt_pars; __u32 resrvd1[ICP_QAT_FW_NUM_LONGWORDS_2]; + struct { + __u32 partial_decompress_length; + __u32 partial_decompress_offset; + } partial_decompress; } u1; - __u32 resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2]; + union { + __u32 resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2]; + struct { + __u32 asb_value; + __u32 reserved; + } asb_threshold; + } u3; struct icp_qat_fw_comp_cd_hdr comp_cd_ctrl; union { struct icp_qat_fw_xlt_cd_hdr xlt_cd_ctrl; diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h index 7eb5daef4f88..6887930c7995 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h @@ -35,6 +35,7 @@ struct icp_qat_fw_loader_chip_info { u32 wakeup_event_val; bool fw_auth; bool css_3k; + bool dual_sign; bool tgroup_share_ustore; u32 fcu_ctl_csr; u32 fcu_sts_csr; diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp.h new file mode 100644 index 000000000000..dce639152345 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2025 Intel Corporation */ +#ifndef ICP_QAT_HW_51_COMP_H_ +#define ICP_QAT_HW_51_COMP_H_ + +#include <linux/types.h> + +#include "icp_qat_fw.h" +#include "icp_qat_hw_51_comp_defs.h" + +struct icp_qat_hw_comp_51_config_csr_lower { + enum icp_qat_hw_comp_51_abd abd; + enum icp_qat_hw_comp_51_lllbd_ctrl lllbd; + enum icp_qat_hw_comp_51_search_depth sd; + enum icp_qat_hw_comp_51_min_match_control mmctrl; + enum icp_qat_hw_comp_51_lz4_block_checksum lbc; +}; + +static inline u32 +ICP_QAT_FW_COMP_51_BUILD_CONFIG_LOWER(struct icp_qat_hw_comp_51_config_csr_lower csr) +{ + u32 val32 = 0; + + QAT_FIELD_SET(val32, csr.abd, + ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_BITPOS, + ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_MASK); + QAT_FIELD_SET(val32, csr.lllbd, + ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_BITPOS, + ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_MASK); + QAT_FIELD_SET(val32, csr.sd, + ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_BITPOS, + ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_MASK); + QAT_FIELD_SET(val32, csr.mmctrl, + ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS, + ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_MASK); + QAT_FIELD_SET(val32, csr.lbc, + ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_BITPOS, + ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_MASK); + + return val32; +} + +struct icp_qat_hw_comp_51_config_csr_upper { + enum icp_qat_hw_comp_51_dmm_algorithm edmm; + enum icp_qat_hw_comp_51_bms bms; + enum icp_qat_hw_comp_51_scb_mode_reset_mask scb_mode_reset; +}; + +static inline u32 +ICP_QAT_FW_COMP_51_BUILD_CONFIG_UPPER(struct icp_qat_hw_comp_51_config_csr_upper csr) +{ + u32 val32 = 0; + + QAT_FIELD_SET(val32, csr.edmm, + ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_BITPOS, + ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_MASK); + QAT_FIELD_SET(val32, csr.bms, + ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_BITPOS, + ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_MASK); + QAT_FIELD_SET(val32, csr.scb_mode_reset, + ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS, + ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK); + + return val32; +} + +struct icp_qat_hw_decomp_51_config_csr_lower { + enum icp_qat_hw_decomp_51_lz4_block_checksum lbc; +}; + +static inline u32 +ICP_QAT_FW_DECOMP_51_BUILD_CONFIG_LOWER(struct icp_qat_hw_decomp_51_config_csr_lower csr) +{ + u32 val32 = 0; + + QAT_FIELD_SET(val32, csr.lbc, + ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_BITPOS, + ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_MASK); + + return val32; +} + +struct icp_qat_hw_decomp_51_config_csr_upper { + enum icp_qat_hw_decomp_51_bms bms; +}; + +static inline u32 +ICP_QAT_FW_DECOMP_51_BUILD_CONFIG_UPPER(struct icp_qat_hw_decomp_51_config_csr_upper csr) +{ + u32 val32 = 0; + + QAT_FIELD_SET(val32, csr.bms, + ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_BITPOS, + ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_MASK); + + return val32; +} + +#endif /* ICP_QAT_HW_51_COMP_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp_defs.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp_defs.h new file mode 100644 index 000000000000..e745688c5da4 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp_defs.h @@ -0,0 +1,318 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2025 Intel Corporation */ +#ifndef ICP_QAT_HW_51_COMP_DEFS_H_ +#define ICP_QAT_HW_51_COMP_DEFS_H_ + +#include <linux/bits.h> + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SOM_CONTROL_BITPOS 28 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SOM_CONTROL_MASK GENMASK(1, 0) +enum icp_qat_hw_comp_51_som_control { + ICP_QAT_HW_COMP_51_SOM_CONTROL_NORMAL_MODE = 0x0, + ICP_QAT_HW_COMP_51_SOM_CONTROL_DICTIONARY_MODE = 0x1, + ICP_QAT_HW_COMP_51_SOM_CONTROL_INPUT_CRC = 0x2, + ICP_QAT_HW_COMP_51_SOM_CONTROL_RESERVED_MODE = 0x3, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SOM_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_SOM_CONTROL_NORMAL_MODE +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS 27 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_skip_hash_rd_control { + ICP_QAT_HW_COMP_51_SKIP_HASH_RD_CONTROL_NO_SKIP = 0x0, + ICP_QAT_HW_COMP_51_SKIP_HASH_RD_CONTROL_SKIP_HASH_READS = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_RD_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_SKIP_HASH_RD_CONTROL_NO_SKIP +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYPASS_COMPRESSION_BITPOS 25 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYPASS_COMPRESSION_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_bypass_compression { + ICP_QAT_HW_COMP_51_BYPASS_COMPRESSION_DISABLED = 0x0, + ICP_QAT_HW_COMP_51_BYPASS_COMPRESSION_ENABLED = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYPASS_COMPRESSION_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_BYPASS_COMPRESSION_DISABLED +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_BITPOS 22 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_dmm_algorithm { + ICP_QAT_HW_COMP_51_DMM_ALGORITHM_EDMM_ENABLED = 0x0, + ICP_QAT_HW_COMP_51_DMM_ALGORITHM_ZSTD_DMM_LITE = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_DMM_ALGORITHM_EDMM_ENABLED +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_TOKEN_FUSION_INTERNAL_ONLY_BITPOS 21 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_TOKEN_FUSION_INTERNAL_ONLY_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_token_fusion_internal_only { + ICP_QAT_HW_COMP_51_TOKEN_FUSION_INTERNAL_ONLY_ENABLED = 0x0, + ICP_QAT_HW_COMP_51_TOKEN_FUSION_INTERNAL_ONLY_DISABLED = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_TOKEN_FUSION_INTERNAL_ONLY_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_TOKEN_FUSION_INTERNAL_ONLY_ENABLED +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_BITPOS 19 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_MASK GENMASK(1, 0) +enum icp_qat_hw_comp_51_bms { + ICP_QAT_HW_COMP_51_BMS_BMS_64KB = 0x0, + ICP_QAT_HW_COMP_51_BMS_BMS_256KB = 0x1, + ICP_QAT_HW_COMP_51_BMS_BMS_1MB = 0x2, + ICP_QAT_HW_COMP_51_BMS_BMS_4MB = 0x3, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_BMS_BMS_64KB +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS 18 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_scb_mode_reset_mask { + ICP_QAT_HW_COMP_51_SCB_MODE_RESET_MASK_DO_NOT_RESET_HB_HT = 0x0, + ICP_QAT_HW_COMP_51_SCB_MODE_RESET_MASK_RESET_HB_HT = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_SCB_MODE_RESET_MASK_DO_NOT_RESET_HB_HT +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_BITPOS 2 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_zstd_frame_gen_dec_en { + ICP_QAT_HW_COMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_DISABLE = 0x0, + ICP_QAT_HW_COMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_ENABLE = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_ENABLE +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_CNV_DISABLE_BITPOS 1 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_CNV_DISABLE_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_cnv_disable { + ICP_QAT_HW_COMP_51_CNV_DISABLE_CNV_ENABLED = 0x0, + ICP_QAT_HW_COMP_51_CNV_DISABLE_CNV_DISABLED = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_CNV_DISABLE_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_CNV_DISABLE_CNV_ENABLED +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ASB_DISABLE_BITPOS 0 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ASB_DISABLE_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_asb_disable { + ICP_QAT_HW_COMP_51_ASB_DISABLE_ASB_ENABLED = 0x0, + ICP_QAT_HW_COMP_51_ASB_DISABLE_ASB_DISABLED = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ASB_DISABLE_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_ASB_DISABLE_ASB_ENABLED +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_BITPOS 21 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_spec_decoder_internal_only { + ICP_QAT_HW_COMP_51_SPEC_DECODER_INTERNAL_ONLY_NORMAL = 0x0, + ICP_QAT_HW_COMP_51_SPEC_DECODER_INTERNAL_ONLY_DISABLED = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_SPEC_DECODER_INTERNAL_ONLY_NORMAL +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_BITPOS 20 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_mini_xcam_internal_only { + ICP_QAT_HW_COMP_51_MINI_XCAM_INTERNAL_ONLY_NORMAL = 0x0, + ICP_QAT_HW_COMP_51_MINI_XCAM_INTERNAL_ONLY_DISABLED = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_MINI_XCAM_INTERNAL_ONLY_NORMAL +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_REP_OFF_ENC_INTERNAL_ONLY_BITPOS 19 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_REP_OFF_ENC_INTERNAL_ONLY_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_rep_off_enc_internal_only { + ICP_QAT_HW_COMP_51_REP_OFF_ENC_INTERNAL_ONLY_ENABLED = 0x0, + ICP_QAT_HW_COMP_51_REP_OFF_ENC_INTERNAL_ONLY_DISABLED = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_REP_OFF_ENC_INTERNAL_ONLY_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_REP_OFF_ENC_INTERNAL_ONLY_ENABLED +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_PROG_BLOCK_DROP_INTERNAL_ONLY_BITPOS 18 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_PROG_BLOCK_DROP_INTERNAL_ONLY_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_prog_block_drop_internal_only { + ICP_QAT_HW_COMP_51_PROG_BLOCK_DROP_INTERNAL_ONLY_DISABLE = 0x0, + ICP_QAT_HW_COMP_51_PROG_BLOCK_DROP_INTERNAL_ONLY_ENABLE = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_PROG_BLOCK_DROP_INTERNAL_ONLY_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_PROG_BLOCK_DROP_INTERNAL_ONLY_DISABLE +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_BITPOS 17 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_skip_hash_override_internal_only { + ICP_QAT_HW_COMP_51_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_DETERMINE_HASH_PARAMS = 0x0, + ICP_QAT_HW_COMP_51_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_OVERRIDE_HASH_PARAMS = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_DETERMINE_HASH_PARAMS +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_HBS_BITPOS 14 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_HBS_MASK GENMASK(2, 0) +enum icp_qat_hw_comp_51_hbs { + ICP_QAT_HW_COMP_51_HBS_32KB = 0x0, + ICP_QAT_HW_COMP_51_HBS_64KB = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_HBS_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_HBS_32KB +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_BITPOS 13 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_abd { + ICP_QAT_HW_COMP_51_ABD_ABD_ENABLED = 0x0, + ICP_QAT_HW_COMP_51_ABD_ABD_DISABLED = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_ABD_ABD_ENABLED +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_BITPOS 12 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_lllbd_ctrl { + ICP_QAT_HW_COMP_51_LLLBD_CTRL_LLLBD_ENABLED = 0x0, + ICP_QAT_HW_COMP_51_LLLBD_CTRL_LLLBD_DISABLED = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_LLLBD_CTRL_LLLBD_ENABLED +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_BITPOS 8 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_MASK GENMASK(3, 0) +enum icp_qat_hw_comp_51_search_depth { + ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_1 = 0x1, + ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_6 = 0x3, + ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_9 = 0x4, + ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_10 = 0x4, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_1 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_FORMAT_BITPOS 5 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_FORMAT_MASK GENMASK(2, 0) +enum icp_qat_hw_comp_51_format { + ICP_QAT_HW_COMP_51_FORMAT_ILZ77 = 0x1, + ICP_QAT_HW_COMP_51_FORMAT_LZ4 = 0x2, + ICP_QAT_HW_COMP_51_FORMAT_LZ4s = 0x3, + ICP_QAT_HW_COMP_51_FORMAT_ZSTD = 0x4, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_FORMAT_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_FORMAT_ILZ77 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS 4 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_min_match_control { + ICP_QAT_HW_COMP_51_MIN_MATCH_CONTROL_MATCH_3B = 0x0, + ICP_QAT_HW_COMP_51_MIN_MATCH_CONTROL_MATCH_4B = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_MIN_MATCH_CONTROL_MATCH_3B +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS 3 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_COLLISION_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_skip_hash_collision { + ICP_QAT_HW_COMP_51_SKIP_HASH_COLLISION_ALLOW = 0x0, + ICP_QAT_HW_COMP_51_SKIP_HASH_COLLISION_DONT_ALLOW = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_COLLISION_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_SKIP_HASH_COLLISION_ALLOW +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS 2 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_UPDATE_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_skip_hash_update { + ICP_QAT_HW_COMP_51_SKIP_HASH_UPDATE_ALLOW = 0x0, + ICP_QAT_HW_COMP_51_SKIP_HASH_UPDATE_DONT_ALLOW = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_UPDATE_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_SKIP_HASH_UPDATE_ALLOW +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYTE_SKIP_BITPOS 1 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYTE_SKIP_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_byte_skip { + ICP_QAT_HW_COMP_51_BYTE_SKIP_3BYTE_TOKEN = 0x0, + ICP_QAT_HW_COMP_51_BYTE_SKIP_3BYTE_LITERAL = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYTE_SKIP_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_BYTE_SKIP_3BYTE_TOKEN +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_BITPOS 0 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_lz4_block_checksum { + ICP_QAT_HW_COMP_51_LZ4_BLOCK_CHECKSUM_ABSENT = 0x0, + ICP_QAT_HW_COMP_51_LZ4_BLOCK_CHECKSUM_PRESENT = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_LZ4_BLOCK_CHECKSUM_ABSENT +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_DISCARD_DATA_BITPOS 26 +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_DISCARD_DATA_MASK GENMASK(0, 0) +enum icp_qat_hw_decomp_51_discard_data { + ICP_QAT_HW_DECOMP_51_DISCARD_DATA_DISABLED = 0x0, + ICP_QAT_HW_DECOMP_51_DISCARD_DATA_ENABLED = 0x1, +}; + +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_DISCARD_DATA_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_51_DISCARD_DATA_DISABLED +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_BITPOS 19 +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_MASK GENMASK(1, 0) +enum icp_qat_hw_decomp_51_bms { + ICP_QAT_HW_DECOMP_51_BMS_BMS_64KB = 0x0, + ICP_QAT_HW_DECOMP_51_BMS_BMS_256KB = 0x1, + ICP_QAT_HW_DECOMP_51_BMS_BMS_1MB = 0x2, + ICP_QAT_HW_DECOMP_51_BMS_BMS_4MB = 0x3, +}; + +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_51_BMS_BMS_64KB +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_BITPOS 2 +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_MASK GENMASK(0, 0) +enum icp_qat_hw_decomp_51_zstd_frame_gen_dec_en { + ICP_QAT_HW_DECOMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_DISABLE = 0x0, + ICP_QAT_HW_DECOMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_ENABLE = 0x1, +}; + +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_ENABLE +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_BITPOS 21 +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_MASK GENMASK(0, 0) +enum icp_qat_hw_decomp_51_spec_decoder_internal_only { + ICP_QAT_HW_DECOMP_51_SPEC_DECODER_INTERNAL_ONLY_NORMAL = 0x0, + ICP_QAT_HW_DECOMP_51_SPEC_DECODER_INTERNAL_ONLY_DISABLED = 0x1, +}; + +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_51_SPEC_DECODER_INTERNAL_ONLY_NORMAL +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_BITPOS 20 +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_MASK GENMASK(0, 0) +enum icp_qat_hw_decomp_51_mini_xcam_internal_only { + ICP_QAT_HW_DECOMP_51_MINI_XCAM_INTERNAL_ONLY_NORMAL = 0x0, + ICP_QAT_HW_DECOMP_51_MINI_XCAM_INTERNAL_ONLY_DISABLED = 0x1, +}; + +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_51_MINI_XCAM_INTERNAL_ONLY_NORMAL +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_HBS_BITPOS 14 +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_HBS_MASK GENMASK(2, 0) +enum icp_qat_hw_decomp_51_hbs { + ICP_QAT_HW_DECOMP_51_HBS_32KB = 0x0, + ICP_QAT_HW_DECOMP_51_HBS_64KB = 0x1, +}; + +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_HBS_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_51_HBS_32KB +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_FORMAT_BITPOS 5 +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_FORMAT_MASK GENMASK(2, 0) +enum icp_qat_hw_decomp_51_format { + ICP_QAT_HW_DECOMP_51_FORMAT_ILZ77 = 0x1, + ICP_QAT_HW_DECOMP_51_FORMAT_LZ4 = 0x2, + ICP_QAT_HW_DECOMP_51_FORMAT_RESERVED = 0x3, + ICP_QAT_HW_DECOMP_51_FORMAT_ZSTD = 0x4, +}; + +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_FORMAT_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_51_FORMAT_ILZ77 +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_BITPOS 0 +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_MASK GENMASK(0, 0) +enum icp_qat_hw_decomp_51_lz4_block_checksum { + ICP_QAT_HW_DECOMP_51_LZ4_BLOCK_CHECKSUM_ABSENT = 0x0, + ICP_QAT_HW_DECOMP_51_LZ4_BLOCK_CHECKSUM_PRESENT = 0x1, +}; + +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_51_LZ4_BLOCK_CHECKSUM_ABSENT + +#endif /* ICP_QAT_HW_51_COMP_DEFS_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h index 1c7bcd8e4055..6313c35eff0c 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h @@ -7,6 +7,7 @@ #define ICP_QAT_AC_C62X_DEV_TYPE 0x01000000 #define ICP_QAT_AC_C3XXX_DEV_TYPE 0x02000000 #define ICP_QAT_AC_4XXX_A_DEV_TYPE 0x08000000 +#define ICP_QAT_AC_6XXX_DEV_TYPE 0x80000000 #define ICP_QAT_UCLO_MAX_AE 17 #define ICP_QAT_UCLO_MAX_CTX 8 #define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX) @@ -81,6 +82,21 @@ #define ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN 0x40000 #define ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN 0x30000 +/* All lengths below are in bytes */ +#define ICP_QAT_DUALSIGN_OPAQUE_HDR_LEN 12 +#define ICP_QAT_DUALSIGN_OPAQUE_HDR_ALIGN_LEN 16 +#define ICP_QAT_DUALSIGN_OPAQUE_DATA_LEN 3540 +#define ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN 64 +#define ICP_QAT_DUALSIGN_XMSS_SIG_LEN 2692 +#define ICP_QAT_DUALSIGN_XMSS_SIG_ALIGN_LEN 2696 +#define ICP_QAT_DUALSIGN_MISC_INFO_LEN 16 +#define ICP_QAT_DUALSIGN_FW_TYPE_LEN 7 +#define ICP_QAT_DUALSIGN_MODULE_TYPE 0x14 +#define ICP_QAT_DUALSIGN_HDR_LEN 0x375 +#define ICP_QAT_DUALSIGN_HDR_VER 0x40001 +#define ICP_QAT_DUALSIGN_HDR_LEN_OFFSET 4 +#define ICP_QAT_DUALSIGN_HDR_VER_OFFSET 8 + #define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf) #define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf) #define ICP_QAT_SHARED_USTORE_MODE(ae_mode) (((ae_mode) >> 0xb) & 0x1) @@ -440,6 +456,13 @@ struct icp_qat_fw_auth_desc { unsigned int img_ae_init_data_low; unsigned int img_ae_insts_high; unsigned int img_ae_insts_low; + unsigned int cpp_mask; + unsigned int reserved; + unsigned int xmss_pubkey_high; + unsigned int xmss_pubkey_low; + unsigned int xmss_sig_high; + unsigned int xmss_sig_low; + unsigned int reserved2[2]; }; struct icp_qat_auth_chunk { diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c index a6e02405d402..8b123472b71c 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c +++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c @@ -8,6 +8,7 @@ #include <linux/workqueue.h> #include "adf_accel_devices.h" #include "adf_common_drv.h" +#include "adf_dc.h" #include "qat_bl.h" #include "qat_comp_req.h" #include "qat_compression.h" @@ -145,9 +146,7 @@ static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm) return -EINVAL; ctx->inst = inst; - ctx->inst->build_deflate_ctx(ctx->comp_ctx); - - return 0; + return qat_comp_build_ctx(inst->accel_dev, ctx->comp_ctx, QAT_DEFLATE); } static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm) @@ -241,13 +240,13 @@ static struct acomp_alg qat_acomp[] = { { .cra_priority = 4001, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct qat_compression_ctx), + .cra_reqsize = sizeof(struct qat_compression_req), .cra_module = THIS_MODULE, }, .init = qat_comp_alg_init_tfm, .exit = qat_comp_alg_exit_tfm, .compress = qat_comp_alg_compress, .decompress = qat_comp_alg_decompress, - .reqsize = sizeof(struct qat_compression_req), }}; int qat_comp_algs_register(void) diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.c b/drivers/crypto/intel/qat/qat_common/qat_compression.c index 7842a9f22178..c285b45b8679 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_compression.c +++ b/drivers/crypto/intel/qat/qat_common/qat_compression.c @@ -144,7 +144,6 @@ static int qat_compression_create_instances(struct adf_accel_dev *accel_dev) inst->id = i; atomic_set(&inst->refctr, 0); inst->accel_dev = accel_dev; - inst->build_deflate_ctx = GET_DC_OPS(accel_dev)->build_deflate_ctx; snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i); ret = adf_cfg_get_param_value(accel_dev, SEC, key, val); diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.h b/drivers/crypto/intel/qat/qat_common/qat_compression.h index aebac2302dcf..5ced3ed0e5ea 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_compression.h +++ b/drivers/crypto/intel/qat/qat_common/qat_compression.h @@ -20,7 +20,6 @@ struct qat_compression_instance { atomic_t refctr; struct qat_instance_backlog backlog; struct adf_dc_data *dc_data; - void (*build_deflate_ctx)(void *ctx); }; static inline bool adf_hw_dev_has_compression(struct adf_accel_dev *accel_dev) diff --git a/drivers/crypto/intel/qat/qat_common/qat_hal.c b/drivers/crypto/intel/qat/qat_common/qat_hal.c index ef8a9cf74f0c..da4eca6e1633 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_hal.c +++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c @@ -694,16 +694,17 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle, handle->pci_dev = pci_info->pci_dev; switch (handle->pci_dev->device) { - case ADF_4XXX_PCI_DEVICE_ID: - case ADF_401XX_PCI_DEVICE_ID: - case ADF_402XX_PCI_DEVICE_ID: - case ADF_420XX_PCI_DEVICE_ID: + case PCI_DEVICE_ID_INTEL_QAT_4XXX: + case PCI_DEVICE_ID_INTEL_QAT_401XX: + case PCI_DEVICE_ID_INTEL_QAT_402XX: + case PCI_DEVICE_ID_INTEL_QAT_420XX: + case PCI_DEVICE_ID_INTEL_QAT_6XXX: handle->chip_info->mmp_sram_size = 0; handle->chip_info->nn = false; handle->chip_info->lm2lm3 = true; handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG_2X; handle->chip_info->icp_rst_csr = ICP_RESET_CPP0; - if (handle->pci_dev->device == ADF_420XX_PCI_DEVICE_ID) + if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_420XX) handle->chip_info->icp_rst_mask = 0x100155; else handle->chip_info->icp_rst_mask = 0x100015; @@ -712,6 +713,8 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle, handle->chip_info->wakeup_event_val = 0x80000000; handle->chip_info->fw_auth = true; handle->chip_info->css_3k = true; + if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_6XXX) + handle->chip_info->dual_sign = true; handle->chip_info->tgroup_share_ustore = true; handle->chip_info->fcu_ctl_csr = FCU_CONTROL_4XXX; handle->chip_info->fcu_sts_csr = FCU_STATUS_4XXX; diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c index 7678a93c6853..21d652a1c8ef 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c @@ -1,11 +1,16 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2014 - 2020 Intel Corporation */ + +#define pr_fmt(fmt) "QAT: " fmt + #include <linux/align.h> +#include <linux/bitops.h> #include <linux/slab.h> #include <linux/ctype.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/pci_ids.h> +#include <linux/wordpart.h> #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "icp_qat_uclo.h" @@ -59,7 +64,7 @@ static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data) unsigned int i; if (!ae_data) { - pr_err("QAT: bad argument, ae_data is NULL\n"); + pr_err("bad argument, ae_data is NULL\n"); return -EINVAL; } @@ -86,12 +91,11 @@ static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr) int min = hdr->min_ver & 0xff; if (hdr->file_id != ICP_QAT_UOF_FID) { - pr_err("QAT: Invalid header 0x%x\n", hdr->file_id); + pr_err("Invalid header 0x%x\n", hdr->file_id); return -EINVAL; } if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) { - pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n", - maj, min); + pr_err("bad UOF version, major 0x%x, minor 0x%x\n", maj, min); return -EINVAL; } return 0; @@ -103,20 +107,19 @@ static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr) int min = suof_hdr->min_ver & 0xff; if (suof_hdr->file_id != ICP_QAT_SUOF_FID) { - pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id); + pr_err("invalid header 0x%x\n", suof_hdr->file_id); return -EINVAL; } if (suof_hdr->fw_type != 0) { - pr_err("QAT: unsupported firmware type\n"); + pr_err("unsupported firmware type\n"); return -EINVAL; } if (suof_hdr->num_chunks <= 0x1) { - pr_err("QAT: SUOF chunk amount is incorrect\n"); + pr_err("SUOF chunk amount is incorrect\n"); return -EINVAL; } if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) { - pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n", - maj, min); + pr_err("bad SUOF version, major 0x%x, minor 0x%x\n", maj, min); return -EINVAL; } return 0; @@ -223,24 +226,24 @@ static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle, char *str; if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) { - pr_err("QAT: initmem is out of range"); + pr_err("initmem is out of range"); return -EINVAL; } if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) { - pr_err("QAT: Memory scope for init_mem error\n"); + pr_err("Memory scope for init_mem error\n"); return -EINVAL; } str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name); if (!str) { - pr_err("QAT: AE name assigned in UOF init table is NULL\n"); + pr_err("AE name assigned in UOF init table is NULL\n"); return -EINVAL; } if (qat_uclo_parse_num(str, ae)) { - pr_err("QAT: Parse num for AE number failed\n"); + pr_err("Parse num for AE number failed\n"); return -EINVAL; } if (*ae >= ICP_QAT_UCLO_MAX_AE) { - pr_err("QAT: ae %d out of range\n", *ae); + pr_err("ae %d out of range\n", *ae); return -EINVAL; } return 0; @@ -356,8 +359,7 @@ static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle, return -EINVAL; break; default: - pr_err("QAT: initmem region error. region type=0x%x\n", - init_mem->region); + pr_err("initmem region error. region type=0x%x\n", init_mem->region); return -EINVAL; } return 0; @@ -431,7 +433,7 @@ static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle) for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { if (qat_hal_batch_wr_lm(handle, ae, obj_handle->lm_init_tab[ae])) { - pr_err("QAT: fail to batch init lmem for AE %d\n", ae); + pr_err("fail to batch init lmem for AE %d\n", ae); return -EINVAL; } qat_uclo_cleanup_batch_init_list(handle, @@ -539,26 +541,26 @@ qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj, code_page->imp_expr_tab_offset); if (uc_var_tab->entry_num || imp_var_tab->entry_num || imp_expr_tab->entry_num) { - pr_err("QAT: UOF can't contain imported variable to be parsed\n"); + pr_err("UOF can't contain imported variable to be parsed\n"); return -EINVAL; } neigh_reg_tab = (struct icp_qat_uof_objtable *) (encap_uof_obj->beg_uof + code_page->neigh_reg_tab_offset); if (neigh_reg_tab->entry_num) { - pr_err("QAT: UOF can't contain neighbor register table\n"); + pr_err("UOF can't contain neighbor register table\n"); return -EINVAL; } if (image->numpages > 1) { - pr_err("QAT: UOF can't contain multiple pages\n"); + pr_err("UOF can't contain multiple pages\n"); return -EINVAL; } if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) { - pr_err("QAT: UOF can't use shared control store feature\n"); + pr_err("UOF can't use shared control store feature\n"); return -EFAULT; } if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) { - pr_err("QAT: UOF can't use reloadable feature\n"); + pr_err("UOF can't use reloadable feature\n"); return -EFAULT; } return 0; @@ -677,7 +679,7 @@ static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae) } } if (!mflag) { - pr_err("QAT: uimage uses AE not set\n"); + pr_err("uimage uses AE not set\n"); return -EINVAL; } return 0; @@ -731,14 +733,15 @@ qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle) return ICP_QAT_AC_C62X_DEV_TYPE; case PCI_DEVICE_ID_INTEL_QAT_C3XXX: return ICP_QAT_AC_C3XXX_DEV_TYPE; - case ADF_4XXX_PCI_DEVICE_ID: - case ADF_401XX_PCI_DEVICE_ID: - case ADF_402XX_PCI_DEVICE_ID: - case ADF_420XX_PCI_DEVICE_ID: + case PCI_DEVICE_ID_INTEL_QAT_4XXX: + case PCI_DEVICE_ID_INTEL_QAT_401XX: + case PCI_DEVICE_ID_INTEL_QAT_402XX: + case PCI_DEVICE_ID_INTEL_QAT_420XX: return ICP_QAT_AC_4XXX_A_DEV_TYPE; + case PCI_DEVICE_ID_INTEL_QAT_6XXX: + return ICP_QAT_AC_6XXX_DEV_TYPE; default: - pr_err("QAT: unsupported device 0x%x\n", - handle->pci_dev->device); + pr_err("unsupported device 0x%x\n", handle->pci_dev->device); return 0; } } @@ -748,7 +751,7 @@ static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle) unsigned int maj_ver, prod_type = obj_handle->prod_type; if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) { - pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n", + pr_err("UOF type 0x%x doesn't match with platform 0x%x\n", obj_handle->encap_uof_obj.obj_hdr->ac_dev_type, prod_type); return -EINVAL; @@ -756,7 +759,7 @@ static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle) maj_ver = obj_handle->prod_rev & 0xff; if (obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver || obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver) { - pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver); + pr_err("UOF majVer 0x%x out of range\n", maj_ver); return -EINVAL; } return 0; @@ -799,7 +802,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle, case ICP_NEIGH_REL: return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value); default: - pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type); + pr_err("UOF uses not supported reg type 0x%x\n", reg_type); return -EFAULT; } return 0; @@ -835,8 +838,7 @@ static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle, case ICP_QAT_UOF_INIT_REG_CTX: /* check if ctx is appropriate for the ctxMode */ if (!((1 << init_regsym->ctx) & ctx_mask)) { - pr_err("QAT: invalid ctx num = 0x%x\n", - init_regsym->ctx); + pr_err("invalid ctx num = 0x%x\n", init_regsym->ctx); return -EINVAL; } qat_uclo_init_reg(handle, ae, @@ -848,10 +850,10 @@ static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle, exp_res); break; case ICP_QAT_UOF_INIT_EXPR: - pr_err("QAT: INIT_EXPR feature not supported\n"); + pr_err("INIT_EXPR feature not supported\n"); return -EINVAL; case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP: - pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n"); + pr_err("INIT_EXPR_ENDIAN_SWAP feature not supported\n"); return -EINVAL; default: break; @@ -871,7 +873,7 @@ static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle) return 0; if (obj_handle->init_mem_tab.entry_num) { if (qat_uclo_init_memory(handle)) { - pr_err("QAT: initialize memory failed\n"); + pr_err("initialize memory failed\n"); return -EINVAL; } } @@ -900,40 +902,40 @@ static int qat_hal_set_modes(struct icp_qat_fw_loader_handle *handle, mode = ICP_QAT_CTX_MODE(uof_image->ae_mode); ret = qat_hal_set_ae_ctx_mode(handle, ae, mode); if (ret) { - pr_err("QAT: qat_hal_set_ae_ctx_mode error\n"); + pr_err("qat_hal_set_ae_ctx_mode error\n"); return ret; } if (handle->chip_info->nn) { mode = ICP_QAT_NN_MODE(uof_image->ae_mode); ret = qat_hal_set_ae_nn_mode(handle, ae, mode); if (ret) { - pr_err("QAT: qat_hal_set_ae_nn_mode error\n"); + pr_err("qat_hal_set_ae_nn_mode error\n"); return ret; } } mode = ICP_QAT_LOC_MEM0_MODE(uof_image->ae_mode); ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, mode); if (ret) { - pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n"); + pr_err("qat_hal_set_ae_lm_mode LMEM0 error\n"); return ret; } mode = ICP_QAT_LOC_MEM1_MODE(uof_image->ae_mode); ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, mode); if (ret) { - pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n"); + pr_err("qat_hal_set_ae_lm_mode LMEM1 error\n"); return ret; } if (handle->chip_info->lm2lm3) { mode = ICP_QAT_LOC_MEM2_MODE(uof_image->ae_mode); ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM2, mode); if (ret) { - pr_err("QAT: qat_hal_set_ae_lm_mode LMEM2 error\n"); + pr_err("qat_hal_set_ae_lm_mode LMEM2 error\n"); return ret; } mode = ICP_QAT_LOC_MEM3_MODE(uof_image->ae_mode); ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM3, mode); if (ret) { - pr_err("QAT: qat_hal_set_ae_lm_mode LMEM3 error\n"); + pr_err("qat_hal_set_ae_lm_mode LMEM3 error\n"); return ret; } mode = ICP_QAT_LOC_TINDEX_MODE(uof_image->ae_mode); @@ -997,7 +999,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) obj_handle->prod_rev = PID_MAJOR_REV | (PID_MINOR_REV & handle->hal_handle->revision_id); if (qat_uclo_check_uof_compat(obj_handle)) { - pr_err("QAT: UOF incompatible\n"); + pr_err("UOF incompatible\n"); return -EINVAL; } obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(u64), @@ -1008,7 +1010,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) if (!obj_handle->obj_hdr->file_buff || !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT, &obj_handle->str_table)) { - pr_err("QAT: UOF doesn't have effective images\n"); + pr_err("UOF doesn't have effective images\n"); goto out_err; } obj_handle->uimage_num = @@ -1017,7 +1019,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) if (!obj_handle->uimage_num) goto out_err; if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) { - pr_err("QAT: Bad object\n"); + pr_err("Bad object\n"); goto out_check_uof_aemask_err; } qat_uclo_init_uword_num(handle); @@ -1034,6 +1036,36 @@ out_err: return -EFAULT; } +static unsigned int qat_uclo_simg_hdr2sign_len(struct icp_qat_fw_loader_handle *handle) +{ + if (handle->chip_info->dual_sign) + return ICP_QAT_DUALSIGN_OPAQUE_DATA_LEN; + + return ICP_QAT_AE_IMG_OFFSET(handle); +} + +static unsigned int qat_uclo_simg_hdr2cont_len(struct icp_qat_fw_loader_handle *handle) +{ + if (handle->chip_info->dual_sign) + return ICP_QAT_DUALSIGN_OPAQUE_DATA_LEN + ICP_QAT_DUALSIGN_MISC_INFO_LEN; + + return ICP_QAT_AE_IMG_OFFSET(handle); +} + +static unsigned int qat_uclo_simg_fw_type(struct icp_qat_fw_loader_handle *handle, void *img_ptr) +{ + struct icp_qat_css_hdr *hdr = img_ptr; + char *fw_hdr = img_ptr; + unsigned int offset; + + if (handle->chip_info->dual_sign) { + offset = qat_uclo_simg_hdr2sign_len(handle) + ICP_QAT_DUALSIGN_FW_TYPE_LEN; + return *(fw_hdr + offset); + } + + return hdr->fw_type; +} + static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle, struct icp_qat_suof_filehdr *suof_ptr, int suof_size) @@ -1050,7 +1082,7 @@ static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle, check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver, min_ver_offset); if (check_sum != suof_ptr->check_sum) { - pr_err("QAT: incorrect SUOF checksum\n"); + pr_err("incorrect SUOF checksum\n"); return -EINVAL; } suof_handle->check_sum = suof_ptr->check_sum; @@ -1065,9 +1097,9 @@ static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle, struct icp_qat_suof_chunk_hdr *suof_chunk_hdr) { struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; - unsigned int offset = ICP_QAT_AE_IMG_OFFSET(handle); - struct icp_qat_simg_ae_mode *ae_mode; + unsigned int offset = qat_uclo_simg_hdr2cont_len(handle); struct icp_qat_suof_objhdr *suof_objhdr; + struct icp_qat_simg_ae_mode *ae_mode; suof_img_hdr->simg_buf = (suof_handle->suof_buf + suof_chunk_hdr->offset + @@ -1112,14 +1144,13 @@ static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle, prod_rev = PID_MAJOR_REV | (PID_MINOR_REV & handle->hal_handle->revision_id); if (img_ae_mode->dev_type != prod_type) { - pr_err("QAT: incompatible product type %x\n", - img_ae_mode->dev_type); + pr_err("incompatible product type %x\n", img_ae_mode->dev_type); return -EINVAL; } maj_ver = prod_rev & 0xff; if (maj_ver > img_ae_mode->devmax_ver || maj_ver < img_ae_mode->devmin_ver) { - pr_err("QAT: incompatible device majver 0x%x\n", maj_ver); + pr_err("incompatible device majver 0x%x\n", maj_ver); return -EINVAL; } return 0; @@ -1162,7 +1193,7 @@ static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle, struct icp_qat_suof_img_hdr img_header; if (!suof_ptr || suof_size == 0) { - pr_err("QAT: input parameter SUOF pointer/size is NULL\n"); + pr_err("input parameter SUOF pointer/size is NULL\n"); return -EINVAL; } if (qat_uclo_check_suof_format(suof_ptr)) @@ -1205,7 +1236,6 @@ static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle, } #define ADD_ADDR(high, low) ((((u64)high) << 32) + low) -#define BITS_IN_DWORD 32 static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle, struct icp_qat_fw_auth_desc *desc) @@ -1223,7 +1253,7 @@ static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle, fcu_dram_hi_csr = handle->chip_info->fcu_dram_addr_hi; fcu_dram_lo_csr = handle->chip_info->fcu_dram_addr_lo; - SET_CAP_CSR(handle, fcu_dram_hi_csr, (bus_addr >> BITS_IN_DWORD)); + SET_CAP_CSR(handle, fcu_dram_hi_csr, bus_addr >> BITS_PER_TYPE(u32)); SET_CAP_CSR(handle, fcu_dram_lo_csr, bus_addr); SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH); @@ -1237,7 +1267,7 @@ static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle, return 0; } while (retry++ < FW_AUTH_MAX_RETRY); auth_fail: - pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n", + pr_err("authentication error (FCU_STATUS = 0x%x),retry = %d\n", fcu_sts & FCU_AUTH_STS_MASK, retry); return -EINVAL; } @@ -1273,14 +1303,13 @@ static int qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle, fcu_sts_csr = handle->chip_info->fcu_sts_csr; fcu_loaded_csr = handle->chip_info->fcu_loaded_ae_csr; } else { - pr_err("Chip 0x%x doesn't support broadcast load\n", - handle->pci_dev->device); + pr_err("Chip 0x%x doesn't support broadcast load\n", handle->pci_dev->device); return -EINVAL; } for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { if (qat_hal_check_ae_active(handle, (unsigned char)ae)) { - pr_err("QAT: Broadcast load failed. AE is not enabled or active.\n"); + pr_err("Broadcast load failed. AE is not enabled or active.\n"); return -EINVAL; } @@ -1312,7 +1341,7 @@ static int qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle, } while (retry++ < FW_AUTH_MAX_RETRY); if (retry > FW_AUTH_MAX_RETRY) { - pr_err("QAT: broadcast load failed timeout %d\n", retry); + pr_err("broadcast load failed timeout %d\n", retry); return -EINVAL; } } @@ -1366,24 +1395,38 @@ static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle, } static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle, - char *image, unsigned int size, + void *image, unsigned int size, unsigned int fw_type) { char *fw_type_name = fw_type ? "MMP" : "AE"; unsigned int css_dword_size = sizeof(u32); + unsigned int header_len, simg_type; + struct icp_qat_css_hdr *css_hdr; if (handle->chip_info->fw_auth) { - struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image; - unsigned int header_len = ICP_QAT_AE_IMG_OFFSET(handle); + header_len = qat_uclo_simg_hdr2sign_len(handle); + simg_type = qat_uclo_simg_fw_type(handle, image); + css_hdr = image; + + if (handle->chip_info->dual_sign) { + if (css_hdr->module_type != ICP_QAT_DUALSIGN_MODULE_TYPE) + goto err; + if (css_hdr->header_len != ICP_QAT_DUALSIGN_HDR_LEN) + goto err; + if (css_hdr->header_ver != ICP_QAT_DUALSIGN_HDR_VER) + goto err; + } else { + if (css_hdr->header_len * css_dword_size != header_len) + goto err; + if (css_hdr->size * css_dword_size != size) + goto err; + if (size <= header_len) + goto err; + } - if ((css_hdr->header_len * css_dword_size) != header_len) - goto err; - if ((css_hdr->size * css_dword_size) != size) - goto err; - if (fw_type != css_hdr->fw_type) - goto err; - if (size <= header_len) + if (fw_type != simg_type) goto err; + size -= header_len; } @@ -1397,123 +1440,95 @@ static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle, if (size > ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN) goto err; } else { - pr_err("QAT: Unsupported firmware type\n"); + pr_err("Unsupported firmware type\n"); return -EINVAL; } return 0; err: - pr_err("QAT: Invalid %s firmware image\n", fw_type_name); + pr_err("Invalid %s firmware image\n", fw_type_name); return -EINVAL; } -static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, - char *image, unsigned int size, - struct icp_qat_fw_auth_desc **desc) +static int qat_uclo_build_auth_desc_RSA(struct icp_qat_fw_loader_handle *handle, + char *image, unsigned int size, + struct icp_firml_dram_desc *dram_desc, + unsigned int fw_type, struct icp_qat_fw_auth_desc **desc) { struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image; - struct icp_qat_fw_auth_desc *auth_desc; - struct icp_qat_auth_chunk *auth_chunk; - u64 virt_addr, bus_addr, virt_base; - unsigned int simg_offset = sizeof(*auth_chunk); struct icp_qat_simg_ae_mode *simg_ae_mode; - struct icp_firml_dram_desc img_desc; - int ret; - - ret = qat_uclo_simg_alloc(handle, &img_desc, ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN); - if (ret) { - pr_err("QAT: error, allocate continuous dram fail\n"); - return ret; - } - - if (!IS_ALIGNED(img_desc.dram_size, 8) || !img_desc.dram_bus_addr) { - pr_debug("QAT: invalid address\n"); - qat_uclo_simg_free(handle, &img_desc); - return -EINVAL; - } + struct icp_qat_fw_auth_desc *auth_desc; + char *virt_addr, *virt_base; + u64 bus_addr; - auth_chunk = img_desc.dram_base_addr_v; - auth_chunk->chunk_size = img_desc.dram_size; - auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr; - virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset; - bus_addr = img_desc.dram_bus_addr + simg_offset; - auth_desc = img_desc.dram_base_addr_v; - auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); - auth_desc->css_hdr_low = (unsigned int)bus_addr; + virt_base = dram_desc->dram_base_addr_v; + virt_base += sizeof(struct icp_qat_auth_chunk); + bus_addr = dram_desc->dram_bus_addr + sizeof(struct icp_qat_auth_chunk); + auth_desc = dram_desc->dram_base_addr_v; + auth_desc->css_hdr_high = upper_32_bits(bus_addr); + auth_desc->css_hdr_low = lower_32_bits(bus_addr); virt_addr = virt_base; - memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr)); + memcpy(virt_addr, image, sizeof(*css_hdr)); /* pub key */ bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) + sizeof(*css_hdr); virt_addr = virt_addr + sizeof(*css_hdr); - auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); - auth_desc->fwsk_pub_low = (unsigned int)bus_addr; + auth_desc->fwsk_pub_high = upper_32_bits(bus_addr); + auth_desc->fwsk_pub_low = lower_32_bits(bus_addr); - memcpy((void *)(uintptr_t)virt_addr, - (void *)(image + sizeof(*css_hdr)), - ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)); + memcpy(virt_addr, image + sizeof(*css_hdr), ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)); /* padding */ memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)), 0, ICP_QAT_CSS_FWSK_PAD_LEN(handle)); /* exponent */ - memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + - ICP_QAT_CSS_FWSK_PAD_LEN(handle)), - (void *)(image + sizeof(*css_hdr) + - ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)), - sizeof(unsigned int)); + memcpy(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + + ICP_QAT_CSS_FWSK_PAD_LEN(handle), image + sizeof(*css_hdr) + + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle), sizeof(unsigned int)); /* signature */ bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high, auth_desc->fwsk_pub_low) + ICP_QAT_CSS_FWSK_PUB_LEN(handle); virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN(handle); - auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); - auth_desc->signature_low = (unsigned int)bus_addr; + auth_desc->signature_high = upper_32_bits(bus_addr); + auth_desc->signature_low = lower_32_bits(bus_addr); - memcpy((void *)(uintptr_t)virt_addr, - (void *)(image + sizeof(*css_hdr) + - ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + - ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle)), - ICP_QAT_CSS_SIGNATURE_LEN(handle)); + memcpy(virt_addr, image + sizeof(*css_hdr) + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + + ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle), ICP_QAT_CSS_SIGNATURE_LEN(handle)); bus_addr = ADD_ADDR(auth_desc->signature_high, auth_desc->signature_low) + ICP_QAT_CSS_SIGNATURE_LEN(handle); virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle); - auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); - auth_desc->img_low = (unsigned int)bus_addr; - auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(handle); - if (bus_addr + auth_desc->img_len > img_desc.dram_bus_addr + - ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN) { - pr_err("QAT: insufficient memory size for authentication data\n"); - qat_uclo_simg_free(handle, &img_desc); + auth_desc->img_high = upper_32_bits(bus_addr); + auth_desc->img_low = lower_32_bits(bus_addr); + auth_desc->img_len = size - qat_uclo_simg_hdr2sign_len(handle); + if (bus_addr + auth_desc->img_len > + dram_desc->dram_bus_addr + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN) { + pr_err("insufficient memory size for authentication data\n"); + qat_uclo_simg_free(handle, dram_desc); return -ENOMEM; } - memcpy((void *)(uintptr_t)virt_addr, - (void *)(image + ICP_QAT_AE_IMG_OFFSET(handle)), - auth_desc->img_len); + memcpy(virt_addr, image + qat_uclo_simg_hdr2sign_len(handle), auth_desc->img_len); virt_addr = virt_base; /* AE firmware */ - if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type == - CSS_AE_FIRMWARE) { + if (fw_type == CSS_AE_FIRMWARE) { auth_desc->img_ae_mode_data_high = auth_desc->img_high; auth_desc->img_ae_mode_data_low = auth_desc->img_low; bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high, auth_desc->img_ae_mode_data_low) + sizeof(struct icp_qat_simg_ae_mode); - auth_desc->img_ae_init_data_high = (unsigned int) - (bus_addr >> BITS_IN_DWORD); - auth_desc->img_ae_init_data_low = (unsigned int)bus_addr; + auth_desc->img_ae_init_data_high = upper_32_bits(bus_addr); + auth_desc->img_ae_init_data_low = lower_32_bits(bus_addr); bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN; - auth_desc->img_ae_insts_high = (unsigned int) - (bus_addr >> BITS_IN_DWORD); - auth_desc->img_ae_insts_low = (unsigned int)bus_addr; + auth_desc->img_ae_insts_high = upper_32_bits(bus_addr); + auth_desc->img_ae_insts_low = lower_32_bits(bus_addr); virt_addr += sizeof(struct icp_qat_css_hdr); virt_addr += ICP_QAT_CSS_FWSK_PUB_LEN(handle); virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle); @@ -1527,6 +1542,141 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, return 0; } +static int qat_uclo_build_auth_desc_dualsign(struct icp_qat_fw_loader_handle *handle, + char *image, unsigned int size, + struct icp_firml_dram_desc *dram_desc, + unsigned int fw_type, + struct icp_qat_fw_auth_desc **desc) +{ + struct icp_qat_simg_ae_mode *simg_ae_mode; + struct icp_qat_fw_auth_desc *auth_desc; + unsigned int chunk_offset, img_offset; + u64 bus_addr, addr; + char *virt_addr; + + virt_addr = dram_desc->dram_base_addr_v; + virt_addr += sizeof(struct icp_qat_auth_chunk); + bus_addr = dram_desc->dram_bus_addr + sizeof(struct icp_qat_auth_chunk); + + auth_desc = dram_desc->dram_base_addr_v; + auth_desc->img_len = size - qat_uclo_simg_hdr2sign_len(handle); + auth_desc->css_hdr_high = upper_32_bits(bus_addr); + auth_desc->css_hdr_low = lower_32_bits(bus_addr); + memcpy(virt_addr, image, ICP_QAT_DUALSIGN_OPAQUE_HDR_LEN); + + img_offset = ICP_QAT_DUALSIGN_OPAQUE_HDR_LEN; + chunk_offset = ICP_QAT_DUALSIGN_OPAQUE_HDR_ALIGN_LEN; + + /* RSA pub key */ + addr = bus_addr + chunk_offset; + auth_desc->fwsk_pub_high = upper_32_bits(addr); + auth_desc->fwsk_pub_low = lower_32_bits(addr); + memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)); + + img_offset += ICP_QAT_CSS_FWSK_MODULUS_LEN(handle); + chunk_offset += ICP_QAT_CSS_FWSK_MODULUS_LEN(handle); + /* RSA padding */ + memset(virt_addr + chunk_offset, 0, ICP_QAT_CSS_FWSK_PAD_LEN(handle)); + + chunk_offset += ICP_QAT_CSS_FWSK_PAD_LEN(handle); + /* RSA exponent */ + memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle)); + + img_offset += ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle); + chunk_offset += ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle); + /* RSA signature */ + addr = bus_addr + chunk_offset; + auth_desc->signature_high = upper_32_bits(addr); + auth_desc->signature_low = lower_32_bits(addr); + memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_CSS_SIGNATURE_LEN(handle)); + + img_offset += ICP_QAT_CSS_SIGNATURE_LEN(handle); + chunk_offset += ICP_QAT_CSS_SIGNATURE_LEN(handle); + /* XMSS pubkey */ + addr = bus_addr + chunk_offset; + auth_desc->xmss_pubkey_high = upper_32_bits(addr); + auth_desc->xmss_pubkey_low = lower_32_bits(addr); + memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN); + + img_offset += ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN; + chunk_offset += ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN; + /* XMSS signature */ + addr = bus_addr + chunk_offset; + auth_desc->xmss_sig_high = upper_32_bits(addr); + auth_desc->xmss_sig_low = lower_32_bits(addr); + memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_DUALSIGN_XMSS_SIG_LEN); + + img_offset += ICP_QAT_DUALSIGN_XMSS_SIG_LEN; + chunk_offset += ICP_QAT_DUALSIGN_XMSS_SIG_ALIGN_LEN; + + if (dram_desc->dram_size < (chunk_offset + auth_desc->img_len)) { + pr_err("auth chunk memory size is not enough to store data\n"); + return -ENOMEM; + } + + /* Signed data */ + addr = bus_addr + chunk_offset; + auth_desc->img_high = upper_32_bits(addr); + auth_desc->img_low = lower_32_bits(addr); + memcpy(virt_addr + chunk_offset, image + img_offset, auth_desc->img_len); + + chunk_offset += ICP_QAT_DUALSIGN_MISC_INFO_LEN; + /* AE firmware */ + if (fw_type == CSS_AE_FIRMWARE) { + /* AE mode data */ + addr = bus_addr + chunk_offset; + auth_desc->img_ae_mode_data_high = upper_32_bits(addr); + auth_desc->img_ae_mode_data_low = lower_32_bits(addr); + simg_ae_mode = + (struct icp_qat_simg_ae_mode *)(virt_addr + chunk_offset); + auth_desc->ae_mask = simg_ae_mode->ae_mask & handle->cfg_ae_mask; + + chunk_offset += sizeof(struct icp_qat_simg_ae_mode); + /* AE init seq */ + addr = bus_addr + chunk_offset; + auth_desc->img_ae_init_data_high = upper_32_bits(addr); + auth_desc->img_ae_init_data_low = lower_32_bits(addr); + + chunk_offset += ICP_QAT_SIMG_AE_INIT_SEQ_LEN; + /* AE instructions */ + addr = bus_addr + chunk_offset; + auth_desc->img_ae_insts_high = upper_32_bits(addr); + auth_desc->img_ae_insts_low = lower_32_bits(addr); + } else { + addr = bus_addr + chunk_offset; + auth_desc->img_ae_insts_high = upper_32_bits(addr); + auth_desc->img_ae_insts_low = lower_32_bits(addr); + } + *desc = auth_desc; + return 0; +} + +static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, + char *image, unsigned int size, + struct icp_qat_fw_auth_desc **desc) +{ + struct icp_qat_auth_chunk *auth_chunk; + struct icp_firml_dram_desc img_desc; + unsigned int simg_fw_type; + int ret; + + ret = qat_uclo_simg_alloc(handle, &img_desc, ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN); + if (ret) + return ret; + + simg_fw_type = qat_uclo_simg_fw_type(handle, image); + auth_chunk = img_desc.dram_base_addr_v; + auth_chunk->chunk_size = img_desc.dram_size; + auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr; + + if (handle->chip_info->dual_sign) + return qat_uclo_build_auth_desc_dualsign(handle, image, size, &img_desc, + simg_fw_type, desc); + + return qat_uclo_build_auth_desc_RSA(handle, image, size, &img_desc, + simg_fw_type, desc); +} + static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle, struct icp_qat_fw_auth_desc *desc) { @@ -1546,7 +1696,7 @@ static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle, if (!((desc->ae_mask >> i) & 0x1)) continue; if (qat_hal_check_ae_active(handle, i)) { - pr_err("QAT: AE %d is active\n", i); + pr_err("AE %d is active\n", i); return -EINVAL; } SET_CAP_CSR(handle, fcu_ctl_csr, @@ -1566,7 +1716,7 @@ static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle, } } while (retry++ < FW_AUTH_MAX_RETRY); if (retry > FW_AUTH_MAX_RETRY) { - pr_err("QAT: firmware load failed timeout %x\n", retry); + pr_err("firmware load failed timeout %x\n", retry); return -EINVAL; } } @@ -1584,7 +1734,7 @@ static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle, handle->sobj_handle = suof_handle; if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) { qat_uclo_del_suof(handle); - pr_err("QAT: map SUOF failed\n"); + pr_err("map SUOF failed\n"); return -EINVAL; } return 0; @@ -1608,7 +1758,7 @@ int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, qat_uclo_ummap_auth_fw(handle, &desc); } else { if (handle->chip_info->mmp_sram_size < mem_size) { - pr_err("QAT: MMP size is too large: 0x%x\n", mem_size); + pr_err("MMP size is too large: 0x%x\n", mem_size); return -EFBIG; } qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size); @@ -1634,7 +1784,7 @@ static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr, ICP_QAT_UOF_OBJS); if (!objhdl->obj_hdr) { - pr_err("QAT: object file chunk is null\n"); + pr_err("object file chunk is null\n"); goto out_objhdr_err; } handle->obj_handle = objhdl; @@ -1669,7 +1819,7 @@ static int qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle *handle, checksum = qat_uclo_calc_str_checksum(&mof_ptr->min_ver, min_ver_offset); if (checksum != mof_ptr->checksum) { - pr_err("QAT: incorrect MOF checksum\n"); + pr_err("incorrect MOF checksum\n"); return -EINVAL; } @@ -1705,7 +1855,7 @@ static int qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle, } } - pr_err("QAT: object %s is not found inside MOF\n", obj_name); + pr_err("object %s is not found inside MOF\n", obj_name); return -EINVAL; } @@ -1722,7 +1872,7 @@ static int qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle *mobj_handle, ICP_QAT_MOF_OBJ_CHUNKID_LEN)) { obj = mobj_handle->sobjs_hdr + obj_chunkhdr->offset; } else { - pr_err("QAT: unsupported chunk id\n"); + pr_err("unsupported chunk id\n"); return -EINVAL; } mobj_hdr->obj_buf = obj; @@ -1783,7 +1933,7 @@ static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle) } if ((uobj_chunk_num + sobj_chunk_num) != *valid_chunk) { - pr_err("QAT: inconsistent UOF/SUOF chunk amount\n"); + pr_err("inconsistent UOF/SUOF chunk amount\n"); return -EINVAL; } return 0; @@ -1824,17 +1974,16 @@ static int qat_uclo_check_mof_format(struct icp_qat_mof_file_hdr *mof_hdr) int min = mof_hdr->min_ver & 0xff; if (mof_hdr->file_id != ICP_QAT_MOF_FID) { - pr_err("QAT: invalid header 0x%x\n", mof_hdr->file_id); + pr_err("invalid header 0x%x\n", mof_hdr->file_id); return -EINVAL; } if (mof_hdr->num_chunks <= 0x1) { - pr_err("QAT: MOF chunk amount is incorrect\n"); + pr_err("MOF chunk amount is incorrect\n"); return -EINVAL; } if (maj != ICP_QAT_MOF_MAJVER || min != ICP_QAT_MOF_MINVER) { - pr_err("QAT: bad MOF version, major 0x%x, minor 0x%x\n", - maj, min); + pr_err("bad MOF version, major 0x%x, minor 0x%x\n", maj, min); return -EINVAL; } return 0; diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile index 5bf5c890c362..1427fe76f171 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile +++ b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile @@ -1,4 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o qat_dh895xcc-y := adf_drv.o adf_dh895xcc_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index e48bcf1818cd..5b4bd0ba1ccb 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -4,7 +4,6 @@ #include <adf_admin.h> #include <adf_common_drv.h> #include <adf_gen2_config.h> -#include <adf_gen2_dc.h> #include <adf_gen2_hw_csr_data.h> #include <adf_gen2_hw_data.h> #include <adf_gen2_pfvf.h> @@ -24,7 +23,6 @@ static const u32 thrd_to_arb_map[ADF_DH895XCC_MAX_ACCELENGINES] = { static struct adf_hw_device_class dh895xcc_class = { .name = ADF_DH895XCC_DEVICE_NAME, .type = DEV_DH895XCC, - .instances = 0 }; static u32 get_accel_mask(struct adf_hw_device_data *self) diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c index 07e9d7e52861..b59e0cc49e52 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c @@ -19,24 +19,6 @@ #include <adf_dbgfs.h> #include "adf_dh895xcc_hw_data.h" -static const struct pci_device_id adf_pci_tbl[] = { - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC), }, - { } -}; -MODULE_DEVICE_TABLE(pci, adf_pci_tbl); - -static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); -static void adf_remove(struct pci_dev *dev); - -static struct pci_driver adf_driver = { - .id_table = adf_pci_tbl, - .name = ADF_DH895XCC_DEVICE_NAME, - .probe = adf_probe, - .remove = adf_remove, - .sriov_configure = adf_sriov_configure, - .err_handler = &adf_err_handler, -}; - static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) { pci_release_regions(accel_dev->accel_pci_dev.pci_dev); @@ -227,6 +209,29 @@ static void adf_remove(struct pci_dev *pdev) kfree(accel_dev); } +static void adf_shutdown(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + adf_dev_down(accel_dev); +} + +static const struct pci_device_id adf_pci_tbl[] = { + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC) }, + { } +}; +MODULE_DEVICE_TABLE(pci, adf_pci_tbl); + +static struct pci_driver adf_driver = { + .id_table = adf_pci_tbl, + .name = ADF_DH895XCC_DEVICE_NAME, + .probe = adf_probe, + .remove = adf_remove, + .shutdown = adf_shutdown, + .sriov_configure = adf_sriov_configure, + .err_handler = &adf_err_handler, +}; + static int __init adfdrv_init(void) { request_module("intel_qat"); diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile index 93f9c81edf09..c2fdb6e0f68f 100644 --- a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile +++ b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile @@ -1,4 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o qat_dh895xccvf-y := adf_drv.o adf_dh895xccvf_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c index f4ee4c2e00da..828456c43b76 100644 --- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c +++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c @@ -3,7 +3,6 @@ #include <adf_accel_devices.h> #include <adf_common_drv.h> #include <adf_gen2_config.h> -#include <adf_gen2_dc.h> #include <adf_gen2_hw_csr_data.h> #include <adf_gen2_hw_data.h> #include <adf_gen2_pfvf.h> @@ -13,7 +12,6 @@ static struct adf_hw_device_class dh895xcciov_class = { .name = ADF_DH895XCCVF_DEVICE_NAME, .type = DEV_DH895XCCVF, - .instances = 0 }; static u32 get_accel_mask(struct adf_hw_device_data *self) diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c index fa08f10e6f3f..9c21f5d835d2 100644 --- a/drivers/crypto/marvell/cesa/cesa.c +++ b/drivers/crypto/marvell/cesa/cesa.c @@ -94,7 +94,7 @@ static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status) static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status) { - if (engine->chain.first && engine->chain.last) + if (engine->chain_hw.first && engine->chain_hw.last) return mv_cesa_tdma_process(engine, status); return mv_cesa_std_process(engine, status); diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h index d215a6bed6bc..50ca1039fdaa 100644 --- a/drivers/crypto/marvell/cesa/cesa.h +++ b/drivers/crypto/marvell/cesa/cesa.h @@ -440,8 +440,10 @@ struct mv_cesa_dev { * SRAM * @queue: fifo of the pending crypto requests * @load: engine load counter, useful for load balancing - * @chain: list of the current tdma descriptors being processed - * by this engine. + * @chain_hw: list of the current tdma descriptors being processed + * by the hardware. + * @chain_sw: list of the current tdma descriptors that will be + * submitted to the hardware. * @complete_queue: fifo of the processed requests by the engine * * Structure storing CESA engine information. @@ -463,7 +465,8 @@ struct mv_cesa_engine { struct gen_pool *pool; struct crypto_queue queue; atomic_t load; - struct mv_cesa_tdma_chain chain; + struct mv_cesa_tdma_chain chain_hw; + struct mv_cesa_tdma_chain chain_sw; struct list_head complete_queue; int irq; }; diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c index cf62db50f958..48c5c8ea8c43 100644 --- a/drivers/crypto/marvell/cesa/cipher.c +++ b/drivers/crypto/marvell/cesa/cipher.c @@ -459,6 +459,9 @@ static int mv_cesa_skcipher_queue_req(struct skcipher_request *req, struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); struct mv_cesa_engine *engine; + if (!req->cryptlen) + return 0; + ret = mv_cesa_skcipher_req_init(req, tmpl); if (ret) return ret; diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c index f150861ceaf6..6815eddc9068 100644 --- a/drivers/crypto/marvell/cesa/hash.c +++ b/drivers/crypto/marvell/cesa/hash.c @@ -663,7 +663,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) if (ret) goto err_free_tdma; - if (iter.src.sg) { + if (iter.base.len > iter.src.op_offset) { /* * Add all the new data, inserting an operation block and * launch command between each full SRAM block-worth of diff --git a/drivers/crypto/marvell/cesa/tdma.c b/drivers/crypto/marvell/cesa/tdma.c index 388a06e180d6..243305354420 100644 --- a/drivers/crypto/marvell/cesa/tdma.c +++ b/drivers/crypto/marvell/cesa/tdma.c @@ -38,6 +38,15 @@ void mv_cesa_dma_step(struct mv_cesa_req *dreq) { struct mv_cesa_engine *engine = dreq->engine; + spin_lock_bh(&engine->lock); + if (engine->chain_sw.first == dreq->chain.first) { + engine->chain_sw.first = NULL; + engine->chain_sw.last = NULL; + } + engine->chain_hw.first = dreq->chain.first; + engine->chain_hw.last = dreq->chain.last; + spin_unlock_bh(&engine->lock); + writel_relaxed(0, engine->regs + CESA_SA_CFG); mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE); @@ -96,25 +105,27 @@ void mv_cesa_dma_prepare(struct mv_cesa_req *dreq, void mv_cesa_tdma_chain(struct mv_cesa_engine *engine, struct mv_cesa_req *dreq) { - if (engine->chain.first == NULL && engine->chain.last == NULL) { - engine->chain.first = dreq->chain.first; - engine->chain.last = dreq->chain.last; - } else { - struct mv_cesa_tdma_desc *last; + struct mv_cesa_tdma_desc *last = engine->chain_sw.last; - last = engine->chain.last; + /* + * Break the DMA chain if the request being queued needs the IV + * regs to be set before lauching the request. + */ + if (!last || dreq->chain.first->flags & CESA_TDMA_SET_STATE) + engine->chain_sw.first = dreq->chain.first; + else { last->next = dreq->chain.first; - engine->chain.last = dreq->chain.last; - - /* - * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on - * the last element of the current chain, or if the request - * being queued needs the IV regs to be set before lauching - * the request. - */ - if (!(last->flags & CESA_TDMA_BREAK_CHAIN) && - !(dreq->chain.first->flags & CESA_TDMA_SET_STATE)) - last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma); + last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma); + } + last = dreq->chain.last; + engine->chain_sw.last = last; + /* + * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on + * the last element of the current chain. + */ + if (last->flags & CESA_TDMA_BREAK_CHAIN) { + engine->chain_sw.first = NULL; + engine->chain_sw.last = NULL; } } @@ -127,7 +138,7 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status) tdma_cur = readl(engine->regs + CESA_TDMA_CUR); - for (tdma = engine->chain.first; tdma; tdma = next) { + for (tdma = engine->chain_hw.first; tdma; tdma = next) { spin_lock_bh(&engine->lock); next = tdma->next; spin_unlock_bh(&engine->lock); @@ -149,12 +160,12 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status) &backlog); /* Re-chaining to the next request */ - engine->chain.first = tdma->next; + engine->chain_hw.first = tdma->next; tdma->next = NULL; /* If this is the last request, clear the chain */ - if (engine->chain.first == NULL) - engine->chain.last = NULL; + if (engine->chain_hw.first == NULL) + engine->chain_hw.last = NULL; spin_unlock_bh(&engine->lock); ctx = crypto_tfm_ctx(req->tfm); diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c index 5cae8fafa151..d4aab9e20f2a 100644 --- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c +++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c @@ -6,6 +6,7 @@ #include "otx2_cptvf.h" #include "otx2_cptlf.h" #include "cn10k_cpt.h" +#include "otx2_cpt_common.h" static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num, struct otx2_cptlf_info *lf); @@ -27,7 +28,7 @@ static struct cpt_hw_ops cn10k_hw_ops = { static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num, struct otx2_cptlf_info *lf) { - void __iomem *lmtline = lf->lmtline; + void *lmtline = lf->lfs->lmt_info.base + (lf->slot * LMTLINE_SIZE); u64 val = (lf->slot & 0x7FF); u64 tar_addr = 0; @@ -41,15 +42,49 @@ static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num, dma_wmb(); /* Copy CPT command to LMTLINE */ - memcpy_toio(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE); + memcpy(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE); cn10k_lmt_flush(val, tar_addr); } +void cn10k_cpt_lmtst_free(struct pci_dev *pdev, struct otx2_cptlfs_info *lfs) +{ + struct otx2_lmt_info *lmt_info = &lfs->lmt_info; + + if (!lmt_info->base) + return; + + dma_free_attrs(&pdev->dev, lmt_info->size, + lmt_info->base - lmt_info->align, + lmt_info->iova - lmt_info->align, + DMA_ATTR_FORCE_CONTIGUOUS); +} +EXPORT_SYMBOL_NS_GPL(cn10k_cpt_lmtst_free, "CRYPTO_DEV_OCTEONTX2_CPT"); + +static int cn10k_cpt_lmtst_alloc(struct pci_dev *pdev, + struct otx2_cptlfs_info *lfs, u32 size) +{ + struct otx2_lmt_info *lmt_info = &lfs->lmt_info; + dma_addr_t align_iova; + dma_addr_t iova; + + lmt_info->base = dma_alloc_attrs(&pdev->dev, size, &iova, GFP_KERNEL, + DMA_ATTR_FORCE_CONTIGUOUS); + if (!lmt_info->base) + return -ENOMEM; + + align_iova = ALIGN((u64)iova, LMTLINE_ALIGN); + lmt_info->iova = align_iova; + lmt_info->align = align_iova - iova; + lmt_info->size = size; + lmt_info->base += lmt_info->align; + return 0; +} + int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf) { struct pci_dev *pdev = cptpf->pdev; - resource_size_t size; - u64 lmt_base; + u32 size; + int ret; if (!test_bit(CN10K_LMTST, &cptpf->cap_flag)) { cptpf->lfs.ops = &otx2_hw_ops; @@ -57,18 +92,19 @@ int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf) } cptpf->lfs.ops = &cn10k_hw_ops; - lmt_base = readq(cptpf->reg_base + RVU_PF_LMTLINE_ADDR); - if (!lmt_base) { - dev_err(&pdev->dev, "PF LMTLINE address not configured\n"); - return -ENOMEM; + size = OTX2_CPT_MAX_VFS_NUM * LMTLINE_SIZE + LMTLINE_ALIGN; + ret = cn10k_cpt_lmtst_alloc(pdev, &cptpf->lfs, size); + if (ret) { + dev_err(&pdev->dev, "PF-%d LMTLINE memory allocation failed\n", + cptpf->pf_id); + return ret; } - size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM); - size -= ((1 + cptpf->max_vfs) * MBOX_SIZE); - cptpf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, lmt_base, size); - if (!cptpf->lfs.lmt_base) { - dev_err(&pdev->dev, - "Mapping of PF LMTLINE address failed\n"); - return -ENOMEM; + + ret = otx2_cpt_lmtst_tbl_setup_msg(&cptpf->lfs); + if (ret) { + dev_err(&pdev->dev, "PF-%d: LMTST Table setup failed\n", + cptpf->pf_id); + cn10k_cpt_lmtst_free(pdev, &cptpf->lfs); } return 0; @@ -78,18 +114,25 @@ EXPORT_SYMBOL_NS_GPL(cn10k_cptpf_lmtst_init, "CRYPTO_DEV_OCTEONTX2_CPT"); int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf) { struct pci_dev *pdev = cptvf->pdev; - resource_size_t offset, size; + u32 size; + int ret; if (!test_bit(CN10K_LMTST, &cptvf->cap_flag)) return 0; - offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM); - size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM); - /* Map VF LMILINE region */ - cptvf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, offset, size); - if (!cptvf->lfs.lmt_base) { - dev_err(&pdev->dev, "Unable to map BAR4\n"); - return -ENOMEM; + size = cptvf->lfs.lfs_num * LMTLINE_SIZE + LMTLINE_ALIGN; + ret = cn10k_cpt_lmtst_alloc(pdev, &cptvf->lfs, size); + if (ret) { + dev_err(&pdev->dev, "VF-%d LMTLINE memory allocation failed\n", + cptvf->vf_id); + return ret; + } + + ret = otx2_cpt_lmtst_tbl_setup_msg(&cptvf->lfs); + if (ret) { + dev_err(&pdev->dev, "VF-%d: LMTST Table setup failed\n", + cptvf->vf_id); + cn10k_cpt_lmtst_free(pdev, &cptvf->lfs); } return 0; diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h index 92be3ecf570f..ea5990048c21 100644 --- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h +++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h @@ -50,6 +50,7 @@ static inline u8 otx2_cpt_get_uc_compcode(union otx2_cpt_res_s *result) int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf); int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf); +void cn10k_cpt_lmtst_free(struct pci_dev *pdev, struct otx2_cptlfs_info *lfs); void cn10k_cpt_ctx_flush(struct pci_dev *pdev, u64 cptr, bool inval); int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev, struct cn10k_cpt_errata_ctx *er_ctx); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h index c5b7c57574ef..d529bcb03775 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h @@ -145,11 +145,8 @@ static inline u64 otx2_cpt_read64(void __iomem *reg_base, u64 blk, u64 slot, static inline bool is_dev_otx2(struct pci_dev *pdev) { - if (pdev->device == OTX2_CPT_PCI_PF_DEVICE_ID || - pdev->device == OTX2_CPT_PCI_VF_DEVICE_ID) - return true; - - return false; + return pdev->device == OTX2_CPT_PCI_PF_DEVICE_ID || + pdev->device == OTX2_CPT_PCI_VF_DEVICE_ID; } static inline bool is_dev_cn10ka(struct pci_dev *pdev) @@ -159,12 +156,10 @@ static inline bool is_dev_cn10ka(struct pci_dev *pdev) static inline bool is_dev_cn10ka_ax(struct pci_dev *pdev) { - if (pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A && - ((pdev->revision & 0xFF) == 4 || (pdev->revision & 0xFF) == 0x50 || - (pdev->revision & 0xff) == 0x51)) - return true; - - return false; + return pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A && + ((pdev->revision & 0xFF) == 4 || + (pdev->revision & 0xFF) == 0x50 || + (pdev->revision & 0xFF) == 0x51); } static inline bool is_dev_cn10kb(struct pci_dev *pdev) @@ -174,11 +169,8 @@ static inline bool is_dev_cn10kb(struct pci_dev *pdev) static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev) { - if (pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A && - (pdev->revision & 0xFF) == 0x54) - return true; - - return false; + return pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A && + (pdev->revision & 0xFF) == 0x54; } static inline void otx2_cpt_set_hw_caps(struct pci_dev *pdev, @@ -192,18 +184,12 @@ static inline void otx2_cpt_set_hw_caps(struct pci_dev *pdev, static inline bool cpt_is_errata_38550_exists(struct pci_dev *pdev) { - if (is_dev_otx2(pdev) || is_dev_cn10ka_ax(pdev)) - return true; - - return false; + return is_dev_otx2(pdev) || is_dev_cn10ka_ax(pdev); } static inline bool cpt_feature_sgv2(struct pci_dev *pdev) { - if (!is_dev_otx2(pdev) && !is_dev_cn10ka_ax(pdev)) - return true; - - return false; + return !is_dev_otx2(pdev) && !is_dev_cn10ka_ax(pdev); } int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev); @@ -223,5 +209,6 @@ int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs); int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs); int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox); int otx2_cpt_lf_reset_msg(struct otx2_cptlfs_info *lfs, int slot); +int otx2_cpt_lmtst_tbl_setup_msg(struct otx2_cptlfs_info *lfs); #endif /* __OTX2_CPT_COMMON_H */ diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c index b8b7c8a3c0ca..95f3de3a34eb 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c @@ -255,3 +255,28 @@ int otx2_cpt_lf_reset_msg(struct otx2_cptlfs_info *lfs, int slot) return ret; } EXPORT_SYMBOL_NS_GPL(otx2_cpt_lf_reset_msg, "CRYPTO_DEV_OCTEONTX2_CPT"); + +int otx2_cpt_lmtst_tbl_setup_msg(struct otx2_cptlfs_info *lfs) +{ + struct otx2_mbox *mbox = lfs->mbox; + struct pci_dev *pdev = lfs->pdev; + struct lmtst_tbl_setup_req *req; + + req = (struct lmtst_tbl_setup_req *) + otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req), + sizeof(struct msg_rsp)); + if (!req) { + dev_err(&pdev->dev, "RVU MBOX failed to alloc message.\n"); + return -EFAULT; + } + + req->hdr.id = MBOX_MSG_LMTST_TBL_SETUP; + req->hdr.sig = OTX2_MBOX_REQ_SIG; + req->hdr.pcifunc = 0; + + req->use_local_lmt_region = true; + req->lmt_iova = lfs->lmt_info.iova; + + return otx2_cpt_send_mbox_msg(mbox, pdev); +} +EXPORT_SYMBOL_NS_GPL(otx2_cpt_lmtst_tbl_setup_msg, "CRYPTO_DEV_OCTEONTX2_CPT"); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c index b5d66afcc030..dc7c7a2650a5 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c @@ -433,10 +433,7 @@ int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri, for (slot = 0; slot < lfs->lfs_num; slot++) { lfs->lf[slot].lfs = lfs; lfs->lf[slot].slot = slot; - if (lfs->lmt_base) - lfs->lf[slot].lmtline = lfs->lmt_base + - (slot * LMTLINE_SIZE); - else + if (!lfs->lmt_info.base) lfs->lf[slot].lmtline = lfs->reg_base + OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_LMT, slot, OTX2_CPT_LMT_LF_LMTLINEX(0)); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h index bd8604be2952..6e004a5568d8 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h @@ -105,11 +105,19 @@ struct cpt_hw_ops { gfp_t gfp); }; +#define LMTLINE_SIZE 128 +#define LMTLINE_ALIGN 128 +struct otx2_lmt_info { + void *base; + dma_addr_t iova; + u32 size; + u8 align; +}; + struct otx2_cptlfs_info { /* Registers start address of VF/PF LFs are attached to */ void __iomem *reg_base; -#define LMTLINE_SIZE 128 - void __iomem *lmt_base; + struct otx2_lmt_info lmt_info; struct pci_dev *pdev; /* Device LFs are attached to */ struct otx2_cptlf_info lf[OTX2_CPT_MAX_LFS_NUM]; struct otx2_mbox *mbox; diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c index 12971300296d..1c5c262af48d 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c @@ -639,6 +639,12 @@ static int cptpf_device_init(struct otx2_cptpf_dev *cptpf) /* Disable all cores */ ret = otx2_cpt_disable_all_cores(cptpf); + otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base, + &cptpf->afpf_mbox, BLKADDR_CPT0); + if (cptpf->has_cpt1) + otx2_cptlf_set_dev_info(&cptpf->cpt1_lfs, cptpf->pdev, + cptpf->reg_base, &cptpf->afpf_mbox, + BLKADDR_CPT1); return ret; } @@ -786,19 +792,19 @@ static int otx2_cptpf_probe(struct pci_dev *pdev, cptpf->max_vfs = pci_sriov_get_totalvfs(pdev); cptpf->kvf_limits = 1; - err = cn10k_cptpf_lmtst_init(cptpf); + /* Initialize CPT PF device */ + err = cptpf_device_init(cptpf); if (err) goto unregister_intr; - /* Initialize CPT PF device */ - err = cptpf_device_init(cptpf); + err = cn10k_cptpf_lmtst_init(cptpf); if (err) goto unregister_intr; /* Initialize engine groups */ err = otx2_cpt_init_eng_grps(pdev, &cptpf->eng_grps); if (err) - goto unregister_intr; + goto free_lmtst; err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group); if (err) @@ -814,6 +820,8 @@ sysfs_grp_del: sysfs_remove_group(&dev->kobj, &cptpf_sysfs_group); cleanup_eng_grps: otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps); +free_lmtst: + cn10k_cpt_lmtst_free(pdev, &cptpf->lfs); unregister_intr: cptpf_disable_afpf_mbox_intr(cptpf); destroy_afpf_mbox: @@ -848,6 +856,8 @@ static void otx2_cptpf_remove(struct pci_dev *pdev) cptpf_disable_afpf_mbox_intr(cptpf); /* Destroy AF-PF mbox */ cptpf_afpf_mbox_destroy(cptpf); + /* Free LMTST memory */ + cn10k_cpt_lmtst_free(pdev, &cptpf->lfs); pci_set_drvdata(pdev, NULL); } diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c index ec1ac7e836a3..12c0e966fa65 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c @@ -264,8 +264,6 @@ static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, return -ENOENT; } - otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base, - &cptpf->afpf_mbox, BLKADDR_CPT0); cptpf->lfs.global_slot = 0; cptpf->lfs.ctx_ilen_ovrd = cfg_req->ctx_ilen_valid; cptpf->lfs.ctx_ilen = cfg_req->ctx_ilen; @@ -278,9 +276,6 @@ static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, if (cptpf->has_cpt1) { cptpf->rsrc_req_blkaddr = BLKADDR_CPT1; - otx2_cptlf_set_dev_info(&cptpf->cpt1_lfs, cptpf->pdev, - cptpf->reg_base, &cptpf->afpf_mbox, - BLKADDR_CPT1); cptpf->cpt1_lfs.global_slot = num_lfs; cptpf->cpt1_lfs.ctx_ilen_ovrd = cfg_req->ctx_ilen_valid; cptpf->cpt1_lfs.ctx_ilen = cfg_req->ctx_ilen; @@ -507,6 +502,7 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf, case MBOX_MSG_CPT_INLINE_IPSEC_CFG: case MBOX_MSG_NIX_INLINE_IPSEC_CFG: case MBOX_MSG_CPT_LF_RESET: + case MBOX_MSG_LMTST_TBL_SETUP: break; default: diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c index 42c5484ce66a..78367849c3d5 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c @@ -1513,8 +1513,6 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf) if (ret) goto delete_grps; - otx2_cptlf_set_dev_info(lfs, cptpf->pdev, cptpf->reg_base, - &cptpf->afpf_mbox, BLKADDR_CPT0); ret = otx2_cptlf_init(lfs, OTX2_CPT_ALL_ENG_GRPS_MASK, OTX2_CPT_QUEUE_HI_PRIO, 1); if (ret) diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c index d84eebdf2fa8..56904bdfd6e8 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c @@ -283,8 +283,6 @@ static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf) lfs_num = cptvf->lfs.kvf_limits; - otx2_cptlf_set_dev_info(lfs, cptvf->pdev, cptvf->reg_base, - &cptvf->pfvf_mbox, cptvf->blkaddr); ret = otx2_cptlf_init(lfs, eng_grp_msk, OTX2_CPT_QUEUE_HI_PRIO, lfs_num); if (ret) @@ -378,10 +376,6 @@ static int otx2_cptvf_probe(struct pci_dev *pdev, otx2_cpt_set_hw_caps(pdev, &cptvf->cap_flag); - ret = cn10k_cptvf_lmtst_init(cptvf); - if (ret) - goto clear_drvdata; - /* Initialize PF<=>VF mailbox */ ret = cptvf_pfvf_mbox_init(cptvf); if (ret) @@ -396,6 +390,9 @@ static int otx2_cptvf_probe(struct pci_dev *pdev, cptvf_hw_ops_get(cptvf); + otx2_cptlf_set_dev_info(&cptvf->lfs, cptvf->pdev, cptvf->reg_base, + &cptvf->pfvf_mbox, cptvf->blkaddr); + ret = otx2_cptvf_send_caps_msg(cptvf); if (ret) { dev_err(&pdev->dev, "Couldn't get CPT engine capabilities.\n"); @@ -404,13 +401,19 @@ static int otx2_cptvf_probe(struct pci_dev *pdev, if (cptvf->eng_caps[OTX2_CPT_SE_TYPES] & BIT_ULL(35)) cptvf->lfs.ops->cpt_sg_info_create = cn10k_sgv2_info_create; + ret = cn10k_cptvf_lmtst_init(cptvf); + if (ret) + goto unregister_interrupts; + /* Initialize CPT LFs */ ret = cptvf_lf_init(cptvf); if (ret) - goto unregister_interrupts; + goto free_lmtst; return 0; +free_lmtst: + cn10k_cpt_lmtst_free(pdev, &cptvf->lfs); unregister_interrupts: cptvf_disable_pfvf_mbox_intrs(cptvf); destroy_pfvf_mbox: @@ -434,6 +437,8 @@ static void otx2_cptvf_remove(struct pci_dev *pdev) cptvf_disable_pfvf_mbox_intrs(cptvf); /* Destroy PF-VF mbox */ cptvf_pfvf_mbox_destroy(cptvf); + /* Free LMTST memory */ + cn10k_cpt_lmtst_free(pdev, &cptvf->lfs); pci_set_drvdata(pdev, NULL); } diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c index d9fa5f6e204d..931b72580fd9 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c @@ -134,6 +134,7 @@ static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf, sizeof(cptvf->eng_caps)); break; case MBOX_MSG_CPT_LF_RESET: + case MBOX_MSG_LMTST_TBL_SETUP: break; default: dev_err(&cptvf->pdev->dev, "Unsupported msg %d received.\n", diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c index 0e440f704a8f..35fa5bad1d9f 100644 --- a/drivers/crypto/nx/nx-aes-cbc.c +++ b/drivers/crypto/nx/nx-aes-cbc.c @@ -8,10 +8,12 @@ */ #include <crypto/aes.h> -#include <crypto/algapi.h> +#include <crypto/internal/skcipher.h> +#include <linux/err.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <linux/types.h> -#include <linux/crypto.h> +#include <linux/spinlock.h> +#include <linux/string.h> #include <asm/vio.h> #include "nx_csbcpb.h" diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c index dfa3ad1a12f2..709b3ee74657 100644 --- a/drivers/crypto/nx/nx-aes-ctr.c +++ b/drivers/crypto/nx/nx-aes-ctr.c @@ -9,10 +9,12 @@ #include <crypto/aes.h> #include <crypto/ctr.h> -#include <crypto/algapi.h> +#include <crypto/internal/skcipher.h> +#include <linux/err.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <linux/types.h> -#include <linux/crypto.h> +#include <linux/spinlock.h> +#include <linux/string.h> #include <asm/vio.h> #include "nx_csbcpb.h" diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c index 502a565074e9..4039cf3b22d4 100644 --- a/drivers/crypto/nx/nx-aes-ecb.c +++ b/drivers/crypto/nx/nx-aes-ecb.c @@ -8,10 +8,12 @@ */ #include <crypto/aes.h> -#include <crypto/algapi.h> +#include <crypto/internal/skcipher.h> +#include <linux/err.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <linux/types.h> -#include <linux/crypto.h> +#include <linux/spinlock.h> +#include <linux/string.h> #include <asm/vio.h> #include "nx_csbcpb.h" diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c index eb5c8f689360..bf465d824e2c 100644 --- a/drivers/crypto/nx/nx-aes-xcbc.c +++ b/drivers/crypto/nx/nx-aes-xcbc.c @@ -7,13 +7,14 @@ * Author: Kent Yoder <yoder1@us.ibm.com> */ -#include <crypto/internal/hash.h> #include <crypto/aes.h> -#include <crypto/algapi.h> +#include <crypto/internal/hash.h> +#include <linux/atomic.h> +#include <linux/errno.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <linux/types.h> -#include <linux/crypto.h> -#include <asm/vio.h> +#include <linux/spinlock.h> +#include <linux/string.h> #include "nx_csbcpb.h" #include "nx.h" @@ -21,8 +22,6 @@ struct xcbc_state { u8 state[AES_BLOCK_SIZE]; - unsigned int count; - u8 buffer[AES_BLOCK_SIZE]; }; static int nx_xcbc_set_key(struct crypto_shash *desc, @@ -58,7 +57,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc, */ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out) { - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_sg *in_sg, *out_sg; u8 keys[2][AES_BLOCK_SIZE]; @@ -135,9 +134,9 @@ out: return rc; } -static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm) +static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_shash *tfm) { - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); + struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; int err; @@ -166,31 +165,24 @@ static int nx_xcbc_update(struct shash_desc *desc, const u8 *data, unsigned int len) { + struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm); struct xcbc_state *sctx = shash_desc_ctx(desc); - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_sg *in_sg; struct nx_sg *out_sg; - u32 to_process = 0, leftover, total; unsigned int max_sg_len; unsigned long irq_flags; + u32 to_process, total; int rc = 0; int data_len; spin_lock_irqsave(&nx_ctx->lock, irq_flags); + memcpy(csbcpb->cpb.aes_xcbc.out_cv_mac, sctx->state, AES_BLOCK_SIZE); + NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; - total = sctx->count + len; - - /* 2 cases for total data len: - * 1: <= AES_BLOCK_SIZE: copy into state, return 0 - * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover - */ - if (total <= AES_BLOCK_SIZE) { - memcpy(sctx->buffer + sctx->count, data, len); - sctx->count += len; - goto out; - } + total = len; in_sg = nx_ctx->in_sg; max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg), @@ -200,7 +192,7 @@ static int nx_xcbc_update(struct shash_desc *desc, data_len = AES_BLOCK_SIZE; out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, - &len, nx_ctx->ap->sglen); + &data_len, nx_ctx->ap->sglen); if (data_len != AES_BLOCK_SIZE) { rc = -EINVAL; @@ -210,56 +202,21 @@ static int nx_xcbc_update(struct shash_desc *desc, nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); do { - to_process = total - to_process; - to_process = to_process & ~(AES_BLOCK_SIZE - 1); - - leftover = total - to_process; - - /* the hardware will not accept a 0 byte operation for this - * algorithm and the operation MUST be finalized to be correct. - * So if we happen to get an update that falls on a block sized - * boundary, we must save off the last block to finalize with - * later. */ - if (!leftover) { - to_process -= AES_BLOCK_SIZE; - leftover = AES_BLOCK_SIZE; - } - - if (sctx->count) { - data_len = sctx->count; - in_sg = nx_build_sg_list(nx_ctx->in_sg, - (u8 *) sctx->buffer, - &data_len, - max_sg_len); - if (data_len != sctx->count) { - rc = -EINVAL; - goto out; - } - } + to_process = total & ~(AES_BLOCK_SIZE - 1); - data_len = to_process - sctx->count; in_sg = nx_build_sg_list(in_sg, (u8 *) data, - &data_len, + &to_process, max_sg_len); - if (data_len != to_process - sctx->count) { - rc = -EINVAL; - goto out; - } - nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); /* we've hit the nx chip previously and we're updating again, * so copy over the partial digest */ - if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { - memcpy(csbcpb->cpb.aes_xcbc.cv, - csbcpb->cpb.aes_xcbc.out_cv_mac, - AES_BLOCK_SIZE); - } + memcpy(csbcpb->cpb.aes_xcbc.cv, + csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); - NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { rc = -EINVAL; goto out; @@ -271,28 +228,24 @@ static int nx_xcbc_update(struct shash_desc *desc, atomic_inc(&(nx_ctx->stats->aes_ops)); - /* everything after the first update is continuation */ - NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; - total -= to_process; - data += to_process - sctx->count; - sctx->count = 0; + data += to_process; in_sg = nx_ctx->in_sg; - } while (leftover > AES_BLOCK_SIZE); + } while (total >= AES_BLOCK_SIZE); - /* copy the leftover back into the state struct */ - memcpy(sctx->buffer, data, leftover); - sctx->count = leftover; + rc = total; + memcpy(sctx->state, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } -static int nx_xcbc_final(struct shash_desc *desc, u8 *out) +static int nx_xcbc_finup(struct shash_desc *desc, const u8 *src, + unsigned int nbytes, u8 *out) { + struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm); struct xcbc_state *sctx = shash_desc_ctx(desc); - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_sg *in_sg, *out_sg; unsigned long irq_flags; @@ -301,12 +254,10 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out) spin_lock_irqsave(&nx_ctx->lock, irq_flags); - if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { - /* we've hit the nx chip previously, now we're finalizing, - * so copy over the partial digest */ - memcpy(csbcpb->cpb.aes_xcbc.cv, - csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); - } else if (sctx->count == 0) { + if (nbytes) { + /* non-zero final, so copy over the partial digest */ + memcpy(csbcpb->cpb.aes_xcbc.cv, sctx->state, AES_BLOCK_SIZE); + } else { /* * we've never seen an update, so this is a 0 byte op. The * hardware cannot handle a 0 byte op, so just ECB to @@ -320,11 +271,11 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out) * this is not an intermediate operation */ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; - len = sctx->count; - in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer, - &len, nx_ctx->ap->sglen); + len = nbytes; + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)src, &len, + nx_ctx->ap->sglen); - if (len != sctx->count) { + if (len != nbytes) { rc = -EINVAL; goto out; } @@ -362,18 +313,19 @@ struct shash_alg nx_shash_aes_xcbc_alg = { .digestsize = AES_BLOCK_SIZE, .init = nx_xcbc_init, .update = nx_xcbc_update, - .final = nx_xcbc_final, + .finup = nx_xcbc_finup, .setkey = nx_xcbc_set_key, .descsize = sizeof(struct xcbc_state), - .statesize = sizeof(struct xcbc_state), + .init_tfm = nx_crypto_ctx_aes_xcbc_init2, + .exit_tfm = nx_crypto_ctx_shash_exit, .base = { .cra_name = "xcbc(aes)", .cra_driver_name = "xcbc-aes-nx", .cra_priority = 300, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINAL_NONZERO, .cra_blocksize = AES_BLOCK_SIZE, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct nx_crypto_ctx), - .cra_init = nx_crypto_ctx_aes_xcbc_init2, - .cra_exit = nx_crypto_ctx_exit, } }; diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c index c3bebf0feabe..5b29dd026df2 100644 --- a/drivers/crypto/nx/nx-sha256.c +++ b/drivers/crypto/nx/nx-sha256.c @@ -9,9 +9,12 @@ #include <crypto/internal/hash.h> #include <crypto/sha2.h> +#include <linux/errno.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <asm/vio.h> -#include <asm/byteorder.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/unaligned.h> #include "nx_csbcpb.h" #include "nx.h" @@ -19,12 +22,11 @@ struct sha256_state_be { __be32 state[SHA256_DIGEST_SIZE / 4]; u64 count; - u8 buf[SHA256_BLOCK_SIZE]; }; -static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm) +static int nx_crypto_ctx_sha256_init(struct crypto_shash *tfm) { - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); + struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(tfm); int err; err = nx_crypto_ctx_sha_init(tfm); @@ -40,11 +42,10 @@ static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm) return 0; } -static int nx_sha256_init(struct shash_desc *desc) { +static int nx_sha256_init(struct shash_desc *desc) +{ struct sha256_state_be *sctx = shash_desc_ctx(desc); - memset(sctx, 0, sizeof *sctx); - sctx->state[0] = __cpu_to_be32(SHA256_H0); sctx->state[1] = __cpu_to_be32(SHA256_H1); sctx->state[2] = __cpu_to_be32(SHA256_H2); @@ -61,30 +62,18 @@ static int nx_sha256_init(struct shash_desc *desc) { static int nx_sha256_update(struct shash_desc *desc, const u8 *data, unsigned int len) { + struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm); struct sha256_state_be *sctx = shash_desc_ctx(desc); - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + u64 to_process, leftover, total = len; struct nx_sg *out_sg; - u64 to_process = 0, leftover, total; unsigned long irq_flags; int rc = 0; int data_len; u32 max_sg_len; - u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE); spin_lock_irqsave(&nx_ctx->lock, irq_flags); - /* 2 cases for total data len: - * 1: < SHA256_BLOCK_SIZE: copy into state, return 0 - * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover - */ - total = (sctx->count % SHA256_BLOCK_SIZE) + len; - if (total < SHA256_BLOCK_SIZE) { - memcpy(sctx->buf + buf_len, data, len); - sctx->count += len; - goto out; - } - memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE); NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; @@ -105,41 +94,17 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, } do { - int used_sgs = 0; struct nx_sg *in_sg = nx_ctx->in_sg; - if (buf_len) { - data_len = buf_len; - in_sg = nx_build_sg_list(in_sg, - (u8 *) sctx->buf, - &data_len, - max_sg_len); - - if (data_len != buf_len) { - rc = -EINVAL; - goto out; - } - used_sgs = in_sg - nx_ctx->in_sg; - } + to_process = total & ~(SHA256_BLOCK_SIZE - 1); - /* to_process: SHA256_BLOCK_SIZE aligned chunk to be - * processed in this iteration. This value is restricted - * by sg list limits and number of sgs we already used - * for leftover data. (see above) - * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len, - * but because data may not be aligned, we need to account - * for that too. */ - to_process = min_t(u64, total, - (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE); - to_process = to_process & ~(SHA256_BLOCK_SIZE - 1); - - data_len = to_process - buf_len; + data_len = to_process; in_sg = nx_build_sg_list(in_sg, (u8 *) data, &data_len, max_sg_len); nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); - to_process = data_len + buf_len; + to_process = data_len; leftover = total - to_process; /* @@ -162,26 +127,22 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, atomic_inc(&(nx_ctx->stats->sha256_ops)); total -= to_process; - data += to_process - buf_len; - buf_len = 0; - + data += to_process; + sctx->count += to_process; } while (leftover >= SHA256_BLOCK_SIZE); - /* copy the leftover back into the state struct */ - if (leftover) - memcpy(sctx->buf, data, leftover); - - sctx->count += len; + rc = leftover; memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } -static int nx_sha256_final(struct shash_desc *desc, u8 *out) +static int nx_sha256_finup(struct shash_desc *desc, const u8 *src, + unsigned int nbytes, u8 *out) { + struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm); struct sha256_state_be *sctx = shash_desc_ctx(desc); - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_sg *in_sg, *out_sg; unsigned long irq_flags; @@ -197,25 +158,19 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) nx_ctx->ap->databytelen/NX_PAGE_SIZE); /* final is represented by continuing the operation and indicating that - * this is not an intermediate operation */ - if (sctx->count >= SHA256_BLOCK_SIZE) { - /* we've hit the nx chip previously, now we're finalizing, - * so copy over the partial digest */ - memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE); - NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; - NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; - } else { - NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; - NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; - } + * this is not an intermediate operation + * copy over the partial digest */ + memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE); + NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + sctx->count += nbytes; csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8); - len = sctx->count & (SHA256_BLOCK_SIZE - 1); - in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf, - &len, max_sg_len); + len = nbytes; + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)src, &len, max_sg_len); - if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) { + if (len != nbytes) { rc = -EINVAL; goto out; } @@ -251,18 +206,34 @@ out: static int nx_sha256_export(struct shash_desc *desc, void *out) { struct sha256_state_be *sctx = shash_desc_ctx(desc); + union { + u8 *u8; + u32 *u32; + u64 *u64; + } p = { .u8 = out }; + int i; - memcpy(out, sctx, sizeof(*sctx)); + for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(*p.u32); i++) + put_unaligned(be32_to_cpu(sctx->state[i]), p.u32++); + put_unaligned(sctx->count, p.u64++); return 0; } static int nx_sha256_import(struct shash_desc *desc, const void *in) { struct sha256_state_be *sctx = shash_desc_ctx(desc); + union { + const u8 *u8; + const u32 *u32; + const u64 *u64; + } p = { .u8 = in }; + int i; - memcpy(sctx, in, sizeof(*sctx)); + for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(*p.u32); i++) + sctx->state[i] = cpu_to_be32(get_unaligned(p.u32++)); + sctx->count = get_unaligned(p.u64++); return 0; } @@ -270,19 +241,20 @@ struct shash_alg nx_shash_sha256_alg = { .digestsize = SHA256_DIGEST_SIZE, .init = nx_sha256_init, .update = nx_sha256_update, - .final = nx_sha256_final, + .finup = nx_sha256_finup, .export = nx_sha256_export, .import = nx_sha256_import, + .init_tfm = nx_crypto_ctx_sha256_init, + .exit_tfm = nx_crypto_ctx_shash_exit, .descsize = sizeof(struct sha256_state_be), .statesize = sizeof(struct sha256_state_be), .base = { .cra_name = "sha256", .cra_driver_name = "sha256-nx", .cra_priority = 300, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct nx_crypto_ctx), - .cra_init = nx_crypto_ctx_sha256_init, - .cra_exit = nx_crypto_ctx_exit, } }; diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c index 1ffb40d2c324..f74776b7d7d7 100644 --- a/drivers/crypto/nx/nx-sha512.c +++ b/drivers/crypto/nx/nx-sha512.c @@ -9,8 +9,12 @@ #include <crypto/internal/hash.h> #include <crypto/sha2.h> +#include <linux/errno.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <asm/vio.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/unaligned.h> #include "nx_csbcpb.h" #include "nx.h" @@ -18,12 +22,11 @@ struct sha512_state_be { __be64 state[SHA512_DIGEST_SIZE / 8]; u64 count[2]; - u8 buf[SHA512_BLOCK_SIZE]; }; -static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm) +static int nx_crypto_ctx_sha512_init(struct crypto_shash *tfm) { - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); + struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(tfm); int err; err = nx_crypto_ctx_sha_init(tfm); @@ -43,8 +46,6 @@ static int nx_sha512_init(struct shash_desc *desc) { struct sha512_state_be *sctx = shash_desc_ctx(desc); - memset(sctx, 0, sizeof *sctx); - sctx->state[0] = __cpu_to_be64(SHA512_H0); sctx->state[1] = __cpu_to_be64(SHA512_H1); sctx->state[2] = __cpu_to_be64(SHA512_H2); @@ -54,6 +55,7 @@ static int nx_sha512_init(struct shash_desc *desc) sctx->state[6] = __cpu_to_be64(SHA512_H6); sctx->state[7] = __cpu_to_be64(SHA512_H7); sctx->count[0] = 0; + sctx->count[1] = 0; return 0; } @@ -61,30 +63,18 @@ static int nx_sha512_init(struct shash_desc *desc) static int nx_sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len) { + struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm); struct sha512_state_be *sctx = shash_desc_ctx(desc); - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + u64 to_process, leftover, total = len; struct nx_sg *out_sg; - u64 to_process, leftover = 0, total; unsigned long irq_flags; int rc = 0; int data_len; u32 max_sg_len; - u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE); spin_lock_irqsave(&nx_ctx->lock, irq_flags); - /* 2 cases for total data len: - * 1: < SHA512_BLOCK_SIZE: copy into state, return 0 - * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover - */ - total = (sctx->count[0] % SHA512_BLOCK_SIZE) + len; - if (total < SHA512_BLOCK_SIZE) { - memcpy(sctx->buf + buf_len, data, len); - sctx->count[0] += len; - goto out; - } - memcpy(csbcpb->cpb.sha512.message_digest, sctx->state, SHA512_DIGEST_SIZE); NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; @@ -105,45 +95,17 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, } do { - int used_sgs = 0; struct nx_sg *in_sg = nx_ctx->in_sg; - if (buf_len) { - data_len = buf_len; - in_sg = nx_build_sg_list(in_sg, - (u8 *) sctx->buf, - &data_len, max_sg_len); - - if (data_len != buf_len) { - rc = -EINVAL; - goto out; - } - used_sgs = in_sg - nx_ctx->in_sg; - } + to_process = total & ~(SHA512_BLOCK_SIZE - 1); - /* to_process: SHA512_BLOCK_SIZE aligned chunk to be - * processed in this iteration. This value is restricted - * by sg list limits and number of sgs we already used - * for leftover data. (see above) - * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len, - * but because data may not be aligned, we need to account - * for that too. */ - to_process = min_t(u64, total, - (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE); - to_process = to_process & ~(SHA512_BLOCK_SIZE - 1); - - data_len = to_process - buf_len; + data_len = to_process; in_sg = nx_build_sg_list(in_sg, (u8 *) data, &data_len, max_sg_len); nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); - if (data_len != (to_process - buf_len)) { - rc = -EINVAL; - goto out; - } - - to_process = data_len + buf_len; + to_process = data_len; leftover = total - to_process; /* @@ -166,30 +128,29 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, atomic_inc(&(nx_ctx->stats->sha512_ops)); total -= to_process; - data += to_process - buf_len; - buf_len = 0; - + data += to_process; + sctx->count[0] += to_process; + if (sctx->count[0] < to_process) + sctx->count[1]++; } while (leftover >= SHA512_BLOCK_SIZE); - /* copy the leftover back into the state struct */ - if (leftover) - memcpy(sctx->buf, data, leftover); - sctx->count[0] += len; + rc = leftover; memcpy(sctx->state, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } -static int nx_sha512_final(struct shash_desc *desc, u8 *out) +static int nx_sha512_finup(struct shash_desc *desc, const u8 *src, + unsigned int nbytes, u8 *out) { struct sha512_state_be *sctx = shash_desc_ctx(desc); - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_sg *in_sg, *out_sg; u32 max_sg_len; - u64 count0; unsigned long irq_flags; + u64 count0, count1; int rc = 0; int len; @@ -201,30 +162,23 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) nx_ctx->ap->databytelen/NX_PAGE_SIZE); /* final is represented by continuing the operation and indicating that - * this is not an intermediate operation */ - if (sctx->count[0] >= SHA512_BLOCK_SIZE) { - /* we've hit the nx chip previously, now we're finalizing, - * so copy over the partial digest */ - memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state, - SHA512_DIGEST_SIZE); - NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; - NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; - } else { - NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; - NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; - } - + * this is not an intermediate operation + * copy over the partial digest */ + memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state, SHA512_DIGEST_SIZE); NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; - count0 = sctx->count[0] * 8; + count0 = sctx->count[0] + nbytes; + count1 = sctx->count[1]; - csbcpb->cpb.sha512.message_bit_length_lo = count0; + csbcpb->cpb.sha512.message_bit_length_lo = count0 << 3; + csbcpb->cpb.sha512.message_bit_length_hi = (count1 << 3) | + (count0 >> 61); - len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1); - in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len, - max_sg_len); + len = nbytes; + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)src, &len, max_sg_len); - if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) { + if (len != nbytes) { rc = -EINVAL; goto out; } @@ -246,7 +200,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) goto out; atomic_inc(&(nx_ctx->stats->sha512_ops)); - atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes)); + atomic64_add(count0, &(nx_ctx->stats->sha512_bytes)); memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); out: @@ -257,18 +211,34 @@ out: static int nx_sha512_export(struct shash_desc *desc, void *out) { struct sha512_state_be *sctx = shash_desc_ctx(desc); + union { + u8 *u8; + u64 *u64; + } p = { .u8 = out }; + int i; - memcpy(out, sctx, sizeof(*sctx)); + for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(*p.u64); i++) + put_unaligned(be64_to_cpu(sctx->state[i]), p.u64++); + put_unaligned(sctx->count[0], p.u64++); + put_unaligned(sctx->count[1], p.u64++); return 0; } static int nx_sha512_import(struct shash_desc *desc, const void *in) { struct sha512_state_be *sctx = shash_desc_ctx(desc); + union { + const u8 *u8; + const u64 *u64; + } p = { .u8 = in }; + int i; - memcpy(sctx, in, sizeof(*sctx)); + for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(*p.u64); i++) + sctx->state[i] = cpu_to_be64(get_unaligned(p.u64++)); + sctx->count[0] = get_unaligned(p.u64++); + sctx->count[1] = get_unaligned(p.u64++); return 0; } @@ -276,19 +246,20 @@ struct shash_alg nx_shash_sha512_alg = { .digestsize = SHA512_DIGEST_SIZE, .init = nx_sha512_init, .update = nx_sha512_update, - .final = nx_sha512_final, + .finup = nx_sha512_finup, .export = nx_sha512_export, .import = nx_sha512_import, + .init_tfm = nx_crypto_ctx_sha512_init, + .exit_tfm = nx_crypto_ctx_shash_exit, .descsize = sizeof(struct sha512_state_be), .statesize = sizeof(struct sha512_state_be), .base = { .cra_name = "sha512", .cra_driver_name = "sha512-nx", .cra_priority = 300, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct nx_crypto_ctx), - .cra_init = nx_crypto_ctx_sha512_init, - .cra_exit = nx_crypto_ctx_exit, } }; diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index a3b979193d9b..78135fb13f5c 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -7,11 +7,11 @@ * Author: Kent Yoder <yoder1@us.ibm.com> */ +#include <crypto/aes.h> #include <crypto/internal/aead.h> #include <crypto/internal/hash.h> -#include <crypto/aes.h> +#include <crypto/internal/skcipher.h> #include <crypto/sha2.h> -#include <crypto/algapi.h> #include <crypto/scatterwalk.h> #include <linux/module.h> #include <linux/moduleparam.h> @@ -124,8 +124,6 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head, } if ((sg - sg_head) == sgmax) { - pr_err("nx: scatter/gather list overflow, pid: %d\n", - current->pid); sg++; break; } @@ -702,14 +700,14 @@ int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm) NX_MODE_AES_ECB); } -int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm) +int nx_crypto_ctx_sha_init(struct crypto_shash *tfm) { - return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_SHA, NX_MODE_SHA); + return nx_crypto_ctx_init(crypto_shash_ctx(tfm), NX_FC_SHA, NX_MODE_SHA); } -int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm) +int nx_crypto_ctx_aes_xcbc_init(struct crypto_shash *tfm) { - return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, + return nx_crypto_ctx_init(crypto_shash_ctx(tfm), NX_FC_AES, NX_MODE_AES_XCBC_MAC); } @@ -744,6 +742,11 @@ void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm) kfree_sensitive(nx_ctx->kmem); } +void nx_crypto_ctx_shash_exit(struct crypto_shash *tfm) +{ + nx_crypto_ctx_exit(crypto_shash_ctx(tfm)); +} + static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id) { dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n", diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h index e1b4b6927bec..36974f08490a 100644 --- a/drivers/crypto/nx/nx.h +++ b/drivers/crypto/nx/nx.h @@ -3,7 +3,11 @@ #ifndef __NX_H__ #define __NX_H__ +#include <asm/vio.h> #include <crypto/ctr.h> +#include <crypto/internal/aead.h> +#include <crypto/internal/hash.h> +#include <crypto/internal/skcipher.h> #define NX_NAME "nx-crypto" #define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver" @@ -139,19 +143,20 @@ struct nx_crypto_ctx { } priv; }; -struct crypto_aead; +struct scatterlist; /* prototypes */ int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm); int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm); -int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm); +int nx_crypto_ctx_aes_xcbc_init(struct crypto_shash *tfm); int nx_crypto_ctx_aes_ctr_init(struct crypto_skcipher *tfm); int nx_crypto_ctx_aes_cbc_init(struct crypto_skcipher *tfm); int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm); -int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm); +int nx_crypto_ctx_sha_init(struct crypto_shash *tfm); void nx_crypto_ctx_exit(struct crypto_tfm *tfm); void nx_crypto_ctx_skcipher_exit(struct crypto_skcipher *tfm); void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm); +void nx_crypto_ctx_shash_exit(struct crypto_shash *tfm); void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function); int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op, u32 may_sleep); diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 551dd32a8db0..1ecf5f6ac04e 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -1086,10 +1086,7 @@ static struct attribute *omap_aes_attrs[] = { &dev_attr_fallback.attr, NULL, }; - -static const struct attribute_group omap_aes_attr_group = { - .attrs = omap_aes_attrs, -}; +ATTRIBUTE_GROUPS(omap_aes); static int omap_aes_probe(struct platform_device *pdev) { @@ -1215,12 +1212,6 @@ static int omap_aes_probe(struct platform_device *pdev) } } - err = sysfs_create_group(&dev->kobj, &omap_aes_attr_group); - if (err) { - dev_err(dev, "could not create sysfs device attrs\n"); - goto err_aead_algs; - } - return 0; err_aead_algs: for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) { @@ -1277,8 +1268,6 @@ static void omap_aes_remove(struct platform_device *pdev) tasklet_kill(&dd->done_task); omap_aes_dma_cleanup(dd); pm_runtime_disable(dd->dev); - - sysfs_remove_group(&dd->dev->kobj, &omap_aes_attr_group); } #ifdef CONFIG_PM_SLEEP @@ -1304,6 +1293,7 @@ static struct platform_driver omap_aes_driver = { .name = "omap-aes", .pm = &omap_aes_pm_ops, .of_match_table = omap_aes_of_match, + .dev_groups = omap_aes_groups, }, }; diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 7021481bf027..56f192cb976d 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -2039,10 +2039,7 @@ static struct attribute *omap_sham_attrs[] = { &dev_attr_fallback.attr, NULL, }; - -static const struct attribute_group omap_sham_attr_group = { - .attrs = omap_sham_attrs, -}; +ATTRIBUTE_GROUPS(omap_sham); static int omap_sham_probe(struct platform_device *pdev) { @@ -2158,12 +2155,6 @@ static int omap_sham_probe(struct platform_device *pdev) } } - err = sysfs_create_group(&dev->kobj, &omap_sham_attr_group); - if (err) { - dev_err(dev, "could not create sysfs device attrs\n"); - goto err_algs; - } - return 0; err_algs: @@ -2210,8 +2201,6 @@ static void omap_sham_remove(struct platform_device *pdev) if (!dd->polling_mode) dma_release_channel(dd->dma_lch); - - sysfs_remove_group(&dd->dev->kobj, &omap_sham_attr_group); } static struct platform_driver omap_sham_driver = { @@ -2220,6 +2209,7 @@ static struct platform_driver omap_sham_driver = { .driver = { .name = "omap-sham", .of_match_table = omap_sham_of_match, + .dev_groups = omap_sham_groups, }, }; diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index db9e84c0c9fb..329f60ad422e 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -7,59 +7,89 @@ * Copyright (c) 2006 Michal Ludvig <michal@logix.cz> */ +#include <asm/cpu_device_id.h> #include <crypto/internal/hash.h> #include <crypto/padlock.h> #include <crypto/sha1.h> #include <crypto/sha2.h> +#include <linux/cpufeature.h> #include <linux/err.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/errno.h> -#include <linux/interrupt.h> #include <linux/kernel.h> -#include <linux/scatterlist.h> -#include <asm/cpu_device_id.h> -#include <asm/fpu/api.h> +#include <linux/module.h> -struct padlock_sha_desc { - struct shash_desc fallback; -}; +#define PADLOCK_SHA_DESCSIZE (128 + ((PADLOCK_ALIGNMENT - 1) & \ + ~(CRYPTO_MINALIGN - 1))) struct padlock_sha_ctx { - struct crypto_shash *fallback; + struct crypto_ahash *fallback; }; -static int padlock_sha_init(struct shash_desc *desc) +static inline void *padlock_shash_desc_ctx(struct shash_desc *desc) { - struct padlock_sha_desc *dctx = shash_desc_ctx(desc); - struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm); + return PTR_ALIGN(shash_desc_ctx(desc), PADLOCK_ALIGNMENT); +} + +static int padlock_sha1_init(struct shash_desc *desc) +{ + struct sha1_state *sctx = padlock_shash_desc_ctx(desc); + + *sctx = (struct sha1_state){ + .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, + }; + + return 0; +} + +static int padlock_sha256_init(struct shash_desc *desc) +{ + struct crypto_sha256_state *sctx = padlock_shash_desc_ctx(desc); - dctx->fallback.tfm = ctx->fallback; - return crypto_shash_init(&dctx->fallback); + sha256_block_init(sctx); + return 0; } static int padlock_sha_update(struct shash_desc *desc, const u8 *data, unsigned int length) { - struct padlock_sha_desc *dctx = shash_desc_ctx(desc); + u8 *state = padlock_shash_desc_ctx(desc); + struct crypto_shash *tfm = desc->tfm; + int err, remain; + + remain = length - round_down(length, crypto_shash_blocksize(tfm)); + { + struct padlock_sha_ctx *ctx = crypto_shash_ctx(tfm); + HASH_REQUEST_ON_STACK(req, ctx->fallback); + + ahash_request_set_callback(req, 0, NULL, NULL); + ahash_request_set_virt(req, data, NULL, length - remain); + err = crypto_ahash_import_core(req, state) ?: + crypto_ahash_update(req) ?: + crypto_ahash_export_core(req, state); + HASH_REQUEST_ZERO(req); + } - return crypto_shash_update(&dctx->fallback, data, length); + return err ?: remain; } static int padlock_sha_export(struct shash_desc *desc, void *out) { - struct padlock_sha_desc *dctx = shash_desc_ctx(desc); - - return crypto_shash_export(&dctx->fallback, out); + memcpy(out, padlock_shash_desc_ctx(desc), + crypto_shash_coresize(desc->tfm)); + return 0; } static int padlock_sha_import(struct shash_desc *desc, const void *in) { - struct padlock_sha_desc *dctx = shash_desc_ctx(desc); - struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm); + unsigned int bs = crypto_shash_blocksize(desc->tfm); + unsigned int ss = crypto_shash_coresize(desc->tfm); + u64 *state = padlock_shash_desc_ctx(desc); + + memcpy(state, in, ss); + + /* Stop evil imports from generating a fault. */ + state[ss / 8 - 1] &= ~(bs - 1); - dctx->fallback.tfm = ctx->fallback; - return crypto_shash_import(&dctx->fallback, in); + return 0; } static inline void padlock_output_block(uint32_t *src, @@ -69,65 +99,38 @@ static inline void padlock_output_block(uint32_t *src, *dst++ = swab32(*src++); } +static int padlock_sha_finup(struct shash_desc *desc, const u8 *in, + unsigned int count, u8 *out) +{ + struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm); + HASH_REQUEST_ON_STACK(req, ctx->fallback); + + ahash_request_set_callback(req, 0, NULL, NULL); + ahash_request_set_virt(req, in, out, count); + return crypto_ahash_import_core(req, padlock_shash_desc_ctx(desc)) ?: + crypto_ahash_finup(req); +} + static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in, unsigned int count, u8 *out) { /* We can't store directly to *out as it may be unaligned. */ /* BTW Don't reduce the buffer size below 128 Bytes! * PadLock microcode needs it that big. */ - char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ - ((aligned(STACK_ALIGN))); - char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); - struct padlock_sha_desc *dctx = shash_desc_ctx(desc); - struct sha1_state state; - unsigned int space; - unsigned int leftover; - int err; - - err = crypto_shash_export(&dctx->fallback, &state); - if (err) - goto out; + struct sha1_state *state = padlock_shash_desc_ctx(desc); + u64 start = state->count; - if (state.count + count > ULONG_MAX) - return crypto_shash_finup(&dctx->fallback, in, count, out); - - leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1; - space = SHA1_BLOCK_SIZE - leftover; - if (space) { - if (count > space) { - err = crypto_shash_update(&dctx->fallback, in, space) ?: - crypto_shash_export(&dctx->fallback, &state); - if (err) - goto out; - count -= space; - in += space; - } else { - memcpy(state.buffer + leftover, in, count); - in = state.buffer; - count += leftover; - state.count &= ~(SHA1_BLOCK_SIZE - 1); - } - } - - memcpy(result, &state.state, SHA1_DIGEST_SIZE); + if (start + count > ULONG_MAX) + return padlock_sha_finup(desc, in, count, out); asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ : \ - : "c"((unsigned long)state.count + count), \ - "a"((unsigned long)state.count), \ - "S"(in), "D"(result)); - - padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); + : "c"((unsigned long)start + count), \ + "a"((unsigned long)start), \ + "S"(in), "D"(state)); -out: - return err; -} - -static int padlock_sha1_final(struct shash_desc *desc, u8 *out) -{ - const u8 *buf = (void *)desc; - - return padlock_sha1_finup(desc, buf, 0, out); + padlock_output_block(state->state, (uint32_t *)out, 5); + return 0; } static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in, @@ -136,78 +139,46 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in, /* We can't store directly to *out as it may be unaligned. */ /* BTW Don't reduce the buffer size below 128 Bytes! * PadLock microcode needs it that big. */ - char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ - ((aligned(STACK_ALIGN))); - char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); - struct padlock_sha_desc *dctx = shash_desc_ctx(desc); - struct sha256_state state; - unsigned int space; - unsigned int leftover; - int err; - - err = crypto_shash_export(&dctx->fallback, &state); - if (err) - goto out; + struct sha256_state *state = padlock_shash_desc_ctx(desc); + u64 start = state->count; - if (state.count + count > ULONG_MAX) - return crypto_shash_finup(&dctx->fallback, in, count, out); - - leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1; - space = SHA256_BLOCK_SIZE - leftover; - if (space) { - if (count > space) { - err = crypto_shash_update(&dctx->fallback, in, space) ?: - crypto_shash_export(&dctx->fallback, &state); - if (err) - goto out; - count -= space; - in += space; - } else { - memcpy(state.buf + leftover, in, count); - in = state.buf; - count += leftover; - state.count &= ~(SHA1_BLOCK_SIZE - 1); - } - } - - memcpy(result, &state.state, SHA256_DIGEST_SIZE); + if (start + count > ULONG_MAX) + return padlock_sha_finup(desc, in, count, out); asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ : \ - : "c"((unsigned long)state.count + count), \ - "a"((unsigned long)state.count), \ - "S"(in), "D"(result)); + : "c"((unsigned long)start + count), \ + "a"((unsigned long)start), \ + "S"(in), "D"(state)); - padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); - -out: - return err; -} - -static int padlock_sha256_final(struct shash_desc *desc, u8 *out) -{ - const u8 *buf = (void *)desc; - - return padlock_sha256_finup(desc, buf, 0, out); + padlock_output_block(state->state, (uint32_t *)out, 8); + return 0; } static int padlock_init_tfm(struct crypto_shash *hash) { const char *fallback_driver_name = crypto_shash_alg_name(hash); struct padlock_sha_ctx *ctx = crypto_shash_ctx(hash); - struct crypto_shash *fallback_tfm; + struct crypto_ahash *fallback_tfm; /* Allocate a fallback and abort if it failed. */ - fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0, - CRYPTO_ALG_NEED_FALLBACK); + fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC); if (IS_ERR(fallback_tfm)) { printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n", fallback_driver_name); return PTR_ERR(fallback_tfm); } + if (crypto_shash_statesize(hash) != + crypto_ahash_statesize(fallback_tfm)) { + crypto_free_ahash(fallback_tfm); + return -EINVAL; + } + ctx->fallback = fallback_tfm; - hash->descsize += crypto_shash_descsize(fallback_tfm); + return 0; } @@ -215,26 +186,27 @@ static void padlock_exit_tfm(struct crypto_shash *hash) { struct padlock_sha_ctx *ctx = crypto_shash_ctx(hash); - crypto_free_shash(ctx->fallback); + crypto_free_ahash(ctx->fallback); } static struct shash_alg sha1_alg = { .digestsize = SHA1_DIGEST_SIZE, - .init = padlock_sha_init, + .init = padlock_sha1_init, .update = padlock_sha_update, .finup = padlock_sha1_finup, - .final = padlock_sha1_final, .export = padlock_sha_export, .import = padlock_sha_import, .init_tfm = padlock_init_tfm, .exit_tfm = padlock_exit_tfm, - .descsize = sizeof(struct padlock_sha_desc), - .statesize = sizeof(struct sha1_state), + .descsize = PADLOCK_SHA_DESCSIZE, + .statesize = SHA1_STATE_SIZE, .base = { .cra_name = "sha1", .cra_driver_name = "sha1-padlock", .cra_priority = PADLOCK_CRA_PRIORITY, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK, + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct padlock_sha_ctx), .cra_module = THIS_MODULE, @@ -243,21 +215,22 @@ static struct shash_alg sha1_alg = { static struct shash_alg sha256_alg = { .digestsize = SHA256_DIGEST_SIZE, - .init = padlock_sha_init, + .init = padlock_sha256_init, .update = padlock_sha_update, .finup = padlock_sha256_finup, - .final = padlock_sha256_final, + .init_tfm = padlock_init_tfm, .export = padlock_sha_export, .import = padlock_sha_import, - .init_tfm = padlock_init_tfm, .exit_tfm = padlock_exit_tfm, - .descsize = sizeof(struct padlock_sha_desc), - .statesize = sizeof(struct sha256_state), + .descsize = PADLOCK_SHA_DESCSIZE, + .statesize = sizeof(struct crypto_sha256_state), .base = { .cra_name = "sha256", .cra_driver_name = "sha256-padlock", .cra_priority = PADLOCK_CRA_PRIORITY, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK, + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct padlock_sha_ctx), .cra_module = THIS_MODULE, @@ -266,207 +239,58 @@ static struct shash_alg sha256_alg = { /* Add two shash_alg instance for hardware-implemented * * multiple-parts hash supported by VIA Nano Processor.*/ -static int padlock_sha1_init_nano(struct shash_desc *desc) -{ - struct sha1_state *sctx = shash_desc_ctx(desc); - - *sctx = (struct sha1_state){ - .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, - }; - - return 0; -} static int padlock_sha1_update_nano(struct shash_desc *desc, - const u8 *data, unsigned int len) + const u8 *src, unsigned int len) { - struct sha1_state *sctx = shash_desc_ctx(desc); - unsigned int partial, done; - const u8 *src; /*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/ - u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ - ((aligned(STACK_ALIGN))); - u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); - - partial = sctx->count & 0x3f; - sctx->count += len; - done = 0; - src = data; - memcpy(dst, (u8 *)(sctx->state), SHA1_DIGEST_SIZE); - - if ((partial + len) >= SHA1_BLOCK_SIZE) { - - /* Append the bytes in state's buffer to a block to handle */ - if (partial) { - done = -partial; - memcpy(sctx->buffer + partial, data, - done + SHA1_BLOCK_SIZE); - src = sctx->buffer; - asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" - : "+S"(src), "+D"(dst) \ - : "a"((long)-1), "c"((unsigned long)1)); - done += SHA1_BLOCK_SIZE; - src = data + done; - } - - /* Process the left bytes from the input data */ - if (len - done >= SHA1_BLOCK_SIZE) { - asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" - : "+S"(src), "+D"(dst) - : "a"((long)-1), - "c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE))); - done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE); - src = data + done; - } - partial = 0; - } - memcpy((u8 *)(sctx->state), dst, SHA1_DIGEST_SIZE); - memcpy(sctx->buffer + partial, src, len - done); - - return 0; -} - -static int padlock_sha1_final_nano(struct shash_desc *desc, u8 *out) -{ - struct sha1_state *state = (struct sha1_state *)shash_desc_ctx(desc); - unsigned int partial, padlen; - __be64 bits; - static const u8 padding[64] = { 0x80, }; - - bits = cpu_to_be64(state->count << 3); - - /* Pad out to 56 mod 64 */ - partial = state->count & 0x3f; - padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial); - padlock_sha1_update_nano(desc, padding, padlen); - - /* Append length field bytes */ - padlock_sha1_update_nano(desc, (const u8 *)&bits, sizeof(bits)); - - /* Swap to output */ - padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 5); - - return 0; -} - -static int padlock_sha256_init_nano(struct shash_desc *desc) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - - *sctx = (struct sha256_state){ - .state = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, \ - SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7}, - }; - - return 0; + struct sha1_state *state = padlock_shash_desc_ctx(desc); + int blocks = len / SHA1_BLOCK_SIZE; + + len -= blocks * SHA1_BLOCK_SIZE; + state->count += blocks * SHA1_BLOCK_SIZE; + + /* Process the left bytes from the input data */ + asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" + : "+S"(src), "+D"(state) + : "a"((long)-1), + "c"((unsigned long)blocks)); + return len; } -static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data, +static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *src, unsigned int len) { - struct sha256_state *sctx = shash_desc_ctx(desc); - unsigned int partial, done; - const u8 *src; /*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/ - u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ - ((aligned(STACK_ALIGN))); - u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); - - partial = sctx->count & 0x3f; - sctx->count += len; - done = 0; - src = data; - memcpy(dst, (u8 *)(sctx->state), SHA256_DIGEST_SIZE); - - if ((partial + len) >= SHA256_BLOCK_SIZE) { - - /* Append the bytes in state's buffer to a block to handle */ - if (partial) { - done = -partial; - memcpy(sctx->buf + partial, data, - done + SHA256_BLOCK_SIZE); - src = sctx->buf; - asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" - : "+S"(src), "+D"(dst) - : "a"((long)-1), "c"((unsigned long)1)); - done += SHA256_BLOCK_SIZE; - src = data + done; - } - - /* Process the left bytes from input data*/ - if (len - done >= SHA256_BLOCK_SIZE) { - asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" - : "+S"(src), "+D"(dst) - : "a"((long)-1), - "c"((unsigned long)((len - done) / 64))); - done += ((len - done) - (len - done) % 64); - src = data + done; - } - partial = 0; - } - memcpy((u8 *)(sctx->state), dst, SHA256_DIGEST_SIZE); - memcpy(sctx->buf + partial, src, len - done); - - return 0; -} - -static int padlock_sha256_final_nano(struct shash_desc *desc, u8 *out) -{ - struct sha256_state *state = - (struct sha256_state *)shash_desc_ctx(desc); - unsigned int partial, padlen; - __be64 bits; - static const u8 padding[64] = { 0x80, }; - - bits = cpu_to_be64(state->count << 3); - - /* Pad out to 56 mod 64 */ - partial = state->count & 0x3f; - padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial); - padlock_sha256_update_nano(desc, padding, padlen); - - /* Append length field bytes */ - padlock_sha256_update_nano(desc, (const u8 *)&bits, sizeof(bits)); - - /* Swap to output */ - padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 8); - - return 0; -} - -static int padlock_sha_export_nano(struct shash_desc *desc, - void *out) -{ - int statesize = crypto_shash_statesize(desc->tfm); - void *sctx = shash_desc_ctx(desc); - - memcpy(out, sctx, statesize); - return 0; -} - -static int padlock_sha_import_nano(struct shash_desc *desc, - const void *in) -{ - int statesize = crypto_shash_statesize(desc->tfm); - void *sctx = shash_desc_ctx(desc); - - memcpy(sctx, in, statesize); - return 0; + struct crypto_sha256_state *state = padlock_shash_desc_ctx(desc); + int blocks = len / SHA256_BLOCK_SIZE; + + len -= blocks * SHA256_BLOCK_SIZE; + state->count += blocks * SHA256_BLOCK_SIZE; + + /* Process the left bytes from input data*/ + asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" + : "+S"(src), "+D"(state) + : "a"((long)-1), + "c"((unsigned long)blocks)); + return len; } static struct shash_alg sha1_alg_nano = { .digestsize = SHA1_DIGEST_SIZE, - .init = padlock_sha1_init_nano, + .init = padlock_sha1_init, .update = padlock_sha1_update_nano, - .final = padlock_sha1_final_nano, - .export = padlock_sha_export_nano, - .import = padlock_sha_import_nano, - .descsize = sizeof(struct sha1_state), - .statesize = sizeof(struct sha1_state), + .finup = padlock_sha1_finup, + .export = padlock_sha_export, + .import = padlock_sha_import, + .descsize = PADLOCK_SHA_DESCSIZE, + .statesize = SHA1_STATE_SIZE, .base = { .cra_name = "sha1", .cra_driver_name = "sha1-padlock-nano", .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -474,17 +298,19 @@ static struct shash_alg sha1_alg_nano = { static struct shash_alg sha256_alg_nano = { .digestsize = SHA256_DIGEST_SIZE, - .init = padlock_sha256_init_nano, + .init = padlock_sha256_init, .update = padlock_sha256_update_nano, - .final = padlock_sha256_final_nano, - .export = padlock_sha_export_nano, - .import = padlock_sha_import_nano, - .descsize = sizeof(struct sha256_state), - .statesize = sizeof(struct sha256_state), + .finup = padlock_sha256_finup, + .export = padlock_sha_export, + .import = padlock_sha_import, + .descsize = PADLOCK_SHA_DESCSIZE, + .statesize = sizeof(struct crypto_sha256_state), .base = { .cra_name = "sha256", .cra_driver_name = "sha256-padlock-nano", .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c index 69d6019d8abc..d6928ebe9526 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c @@ -52,12 +52,11 @@ static int rk_ahash_digest_fb(struct ahash_request *areq) algt->stat_fb++; ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - - rctx->fallback_req.nbytes = areq->nbytes; - rctx->fallback_req.src = areq->src; - rctx->fallback_req.result = areq->result; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); + ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result, + areq->nbytes); return crypto_ahash_digest(&rctx->fallback_req); } @@ -124,8 +123,9 @@ static int rk_ahash_init(struct ahash_request *req) struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); return crypto_ahash_init(&rctx->fallback_req); } @@ -137,10 +137,10 @@ static int rk_ahash_update(struct ahash_request *req) struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.nbytes = req->nbytes; - rctx->fallback_req.src = req->src; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); + ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes); return crypto_ahash_update(&rctx->fallback_req); } @@ -152,9 +152,10 @@ static int rk_ahash_final(struct ahash_request *req) struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.result = req->result; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); + ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0); return crypto_ahash_final(&rctx->fallback_req); } @@ -166,12 +167,11 @@ static int rk_ahash_finup(struct ahash_request *req) struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - - rctx->fallback_req.nbytes = req->nbytes; - rctx->fallback_req.src = req->src; - rctx->fallback_req.result = req->result; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); + ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result, + req->nbytes); return crypto_ahash_finup(&rctx->fallback_req); } @@ -183,8 +183,9 @@ static int rk_ahash_import(struct ahash_request *req, const void *in) struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); return crypto_ahash_import(&rctx->fallback_req, in); } @@ -196,8 +197,9 @@ static int rk_ahash_export(struct ahash_request *req, void *out) struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); return crypto_ahash_export(&rctx->fallback_req, out); } diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index b4c3c14dafd5..b829c84f60f2 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -9,11 +9,17 @@ // // Hash part based on omap-sham.c driver. +#include <crypto/aes.h> +#include <crypto/ctr.h> +#include <crypto/internal/hash.h> +#include <crypto/internal/skcipher.h> +#include <crypto/md5.h> +#include <crypto/scatterwalk.h> +#include <crypto/sha1.h> +#include <crypto/sha2.h> #include <linux/clk.h> -#include <linux/crypto.h> #include <linux/dma-mapping.h> #include <linux/err.h> -#include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> @@ -22,17 +28,9 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/scatterlist.h> - -#include <crypto/ctr.h> -#include <crypto/aes.h> -#include <crypto/algapi.h> -#include <crypto/scatterwalk.h> - -#include <crypto/hash.h> -#include <crypto/md5.h> -#include <crypto/sha1.h> -#include <crypto/sha2.h> -#include <crypto/internal/hash.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> #define _SBF(s, v) ((v) << (s)) diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c index 091612b066f1..fdc0b2486069 100644 --- a/drivers/crypto/sa2ul.c +++ b/drivers/crypto/sa2ul.c @@ -1415,22 +1415,13 @@ static int sa_sha_run(struct ahash_request *req) (auth_len >= SA_UNSAFE_DATA_SZ_MIN && auth_len <= SA_UNSAFE_DATA_SZ_MAX)) { struct ahash_request *subreq = &rctx->fallback_req; - int ret = 0; + int ret; ahash_request_set_tfm(subreq, ctx->fallback.ahash); - subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; - - crypto_ahash_init(subreq); - - subreq->nbytes = auth_len; - subreq->src = req->src; - subreq->result = req->result; - - ret |= crypto_ahash_update(subreq); - - subreq->nbytes = 0; + ahash_request_set_callback(subreq, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); + ahash_request_set_crypt(subreq, req->src, req->result, auth_len); - ret |= crypto_ahash_final(subreq); + ret = crypto_ahash_digest(subreq); return ret; } @@ -1502,8 +1493,7 @@ static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) return ret; if (alg_base) { - ctx->shash = crypto_alloc_shash(alg_base, 0, - CRYPTO_ALG_NEED_FALLBACK); + ctx->shash = crypto_alloc_shash(alg_base, 0, 0); if (IS_ERR(ctx->shash)) { dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n", alg_base); @@ -1511,8 +1501,7 @@ static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) } /* for fallback */ ctx->fallback.ahash = - crypto_alloc_ahash(alg_base, 0, - CRYPTO_ALG_NEED_FALLBACK); + crypto_alloc_ahash(alg_base, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(ctx->fallback.ahash)) { dev_err(ctx->dev_data->dev, "Could not load fallback driver\n"); @@ -1546,54 +1535,38 @@ static int sa_sha_init(struct ahash_request *req) crypto_ahash_digestsize(tfm), rctx); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash); - rctx->fallback_req.base.flags = - req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); + ahash_request_set_crypt(&rctx->fallback_req, NULL, NULL, 0); return crypto_ahash_init(&rctx->fallback_req); } static int sa_sha_update(struct ahash_request *req) { - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); - struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); - ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash); - rctx->fallback_req.base.flags = - req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.nbytes = req->nbytes; - rctx->fallback_req.src = req->src; + ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); + ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes); return crypto_ahash_update(&rctx->fallback_req); } static int sa_sha_final(struct ahash_request *req) { - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); - struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); - ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash); - rctx->fallback_req.base.flags = - req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.result = req->result; + ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); + ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0); return crypto_ahash_final(&rctx->fallback_req); } static int sa_sha_finup(struct ahash_request *req) { - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); - struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); - ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash); - rctx->fallback_req.base.flags = - req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; - - rctx->fallback_req.nbytes = req->nbytes; - rctx->fallback_req.src = req->src; - rctx->fallback_req.result = req->result; + ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); + ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result, req->nbytes); return crypto_ahash_finup(&rctx->fallback_req); } @@ -1605,8 +1578,7 @@ static int sa_sha_import(struct ahash_request *req, const void *in) struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); return crypto_ahash_import(&rctx->fallback_req, in); } @@ -1614,12 +1586,9 @@ static int sa_sha_import(struct ahash_request *req, const void *in) static int sa_sha_export(struct ahash_request *req, void *out) { struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); struct ahash_request *subreq = &rctx->fallback_req; - ahash_request_set_tfm(subreq, ctx->fallback.ahash); - subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(subreq, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); return crypto_ahash_export(subreq, out); } diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c index ca9d0cca1f74..0e07d0523291 100644 --- a/drivers/crypto/tegra/tegra-se-aes.c +++ b/drivers/crypto/tegra/tegra-se-aes.c @@ -269,7 +269,7 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) unsigned int cmdlen, key1_id, key2_id; int ret; - rctx->iv = (u32 *)req->iv; + rctx->iv = (ctx->alg == SE_ALG_ECB) ? NULL : (u32 *)req->iv; rctx->len = req->cryptlen; key1_id = ctx->key1_id; key2_id = ctx->key2_id; @@ -498,9 +498,6 @@ static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt) if (!req->cryptlen) return 0; - if (ctx->alg == SE_ALG_ECB) - req->iv = NULL; - rctx->encrypt = encrypt; return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req); diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c index 42d007b7af45..d09b4aaeecef 100644 --- a/drivers/crypto/tegra/tegra-se-hash.c +++ b/drivers/crypto/tegra/tegra-se-hash.c @@ -117,8 +117,9 @@ static int tegra_sha_fallback_init(struct ahash_request *req) struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); return crypto_ahash_init(&rctx->fallback_req); } @@ -130,10 +131,10 @@ static int tegra_sha_fallback_update(struct ahash_request *req) struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.nbytes = req->nbytes; - rctx->fallback_req.src = req->src; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); + ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes); return crypto_ahash_update(&rctx->fallback_req); } @@ -145,9 +146,10 @@ static int tegra_sha_fallback_final(struct ahash_request *req) struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.result = req->result; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); + ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0); return crypto_ahash_final(&rctx->fallback_req); } @@ -159,12 +161,11 @@ static int tegra_sha_fallback_finup(struct ahash_request *req) struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - - rctx->fallback_req.nbytes = req->nbytes; - rctx->fallback_req.src = req->src; - rctx->fallback_req.result = req->result; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); + ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result, + req->nbytes); return crypto_ahash_finup(&rctx->fallback_req); } @@ -176,12 +177,11 @@ static int tegra_sha_fallback_digest(struct ahash_request *req) struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - - rctx->fallback_req.nbytes = req->nbytes; - rctx->fallback_req.src = req->src; - rctx->fallback_req.result = req->result; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); + ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result, + req->nbytes); return crypto_ahash_digest(&rctx->fallback_req); } @@ -193,8 +193,9 @@ static int tegra_sha_fallback_import(struct ahash_request *req, const void *in) struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); return crypto_ahash_import(&rctx->fallback_req, in); } @@ -206,8 +207,9 @@ static int tegra_sha_fallback_export(struct ahash_request *req, void *out) struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); return crypto_ahash_export(&rctx->fallback_req, out); } diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c index 580649f9bff8..5813017b6b79 100644 --- a/drivers/crypto/xilinx/zynqmp-sha.c +++ b/drivers/crypto/xilinx/zynqmp-sha.c @@ -3,18 +3,18 @@ * Xilinx ZynqMP SHA Driver. * Copyright (c) 2022 Xilinx Inc. */ -#include <linux/cacheflush.h> -#include <crypto/hash.h> #include <crypto/internal/hash.h> #include <crypto/sha3.h> -#include <linux/crypto.h> +#include <linux/cacheflush.h> +#include <linux/cleanup.h> #include <linux/device.h> #include <linux/dma-mapping.h> +#include <linux/err.h> #include <linux/firmware/xlnx-zynqmp.h> -#include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/spinlock.h> #include <linux/platform_device.h> #define ZYNQMP_DMA_BIT_MASK 32U @@ -36,13 +36,11 @@ struct zynqmp_sha_tfm_ctx { struct crypto_shash *fbk_tfm; }; -struct zynqmp_sha_desc_ctx { - struct shash_desc fbk_req; -}; - static dma_addr_t update_dma_addr, final_dma_addr; static char *ubuf, *fbuf; +static DEFINE_SPINLOCK(zynqmp_sha_lock); + static int zynqmp_sha_init_tfm(struct crypto_shash *hash) { const char *fallback_driver_name = crypto_shash_alg_name(hash); @@ -60,8 +58,13 @@ static int zynqmp_sha_init_tfm(struct crypto_shash *hash) if (IS_ERR(fallback_tfm)) return PTR_ERR(fallback_tfm); + if (crypto_shash_descsize(hash) < + crypto_shash_statesize(tfm_ctx->fbk_tfm)) { + crypto_free_shash(fallback_tfm); + return -EINVAL; + } + tfm_ctx->fbk_tfm = fallback_tfm; - hash->descsize += crypto_shash_descsize(tfm_ctx->fbk_tfm); return 0; } @@ -70,61 +73,55 @@ static void zynqmp_sha_exit_tfm(struct crypto_shash *hash) { struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_shash_ctx(hash); - if (tfm_ctx->fbk_tfm) { - crypto_free_shash(tfm_ctx->fbk_tfm); - tfm_ctx->fbk_tfm = NULL; - } + crypto_free_shash(tfm_ctx->fbk_tfm); +} - memzero_explicit(tfm_ctx, sizeof(struct zynqmp_sha_tfm_ctx)); +static int zynqmp_sha_continue(struct shash_desc *desc, + struct shash_desc *fbdesc, int err) +{ + err = err ?: crypto_shash_export(fbdesc, shash_desc_ctx(desc)); + shash_desc_zero(fbdesc); + return err; } static int zynqmp_sha_init(struct shash_desc *desc) { - struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc); struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct crypto_shash *fbtfm = tctx->fbk_tfm; + SHASH_DESC_ON_STACK(fbdesc, fbtfm); + int err; - dctx->fbk_req.tfm = tctx->fbk_tfm; - return crypto_shash_init(&dctx->fbk_req); + fbdesc->tfm = fbtfm; + err = crypto_shash_init(fbdesc); + return zynqmp_sha_continue(desc, fbdesc, err); } static int zynqmp_sha_update(struct shash_desc *desc, const u8 *data, unsigned int length) { - struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc); - - return crypto_shash_update(&dctx->fbk_req, data, length); -} - -static int zynqmp_sha_final(struct shash_desc *desc, u8 *out) -{ - struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc); + struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct crypto_shash *fbtfm = tctx->fbk_tfm; + SHASH_DESC_ON_STACK(fbdesc, fbtfm); + int err; - return crypto_shash_final(&dctx->fbk_req, out); + fbdesc->tfm = fbtfm; + err = crypto_shash_import(fbdesc, shash_desc_ctx(desc)) ?: + crypto_shash_update(fbdesc, data, length); + return zynqmp_sha_continue(desc, fbdesc, err); } static int zynqmp_sha_finup(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out) { - struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc); - - return crypto_shash_finup(&dctx->fbk_req, data, length, out); -} - -static int zynqmp_sha_import(struct shash_desc *desc, const void *in) -{ - struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc); struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct crypto_shash *fbtfm = tctx->fbk_tfm; + SHASH_DESC_ON_STACK(fbdesc, fbtfm); - dctx->fbk_req.tfm = tctx->fbk_tfm; - return crypto_shash_import(&dctx->fbk_req, in); + fbdesc->tfm = fbtfm; + return crypto_shash_import(fbdesc, shash_desc_ctx(desc)) ?: + crypto_shash_finup(fbdesc, data, length, out); } -static int zynqmp_sha_export(struct shash_desc *desc, void *out) -{ - struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc); - - return crypto_shash_export(&dctx->fbk_req, out); -} - -static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) +static int __zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) { unsigned int remaining_len = len; int update_size; @@ -159,26 +156,27 @@ static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned i return ret; } +static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) +{ + scoped_guard(spinlock_bh, &zynqmp_sha_lock) + return __zynqmp_sha_digest(desc, data, len, out); +} + static struct zynqmp_sha_drv_ctx sha3_drv_ctx = { .sha3_384 = { .init = zynqmp_sha_init, .update = zynqmp_sha_update, - .final = zynqmp_sha_final, .finup = zynqmp_sha_finup, .digest = zynqmp_sha_digest, - .export = zynqmp_sha_export, - .import = zynqmp_sha_import, .init_tfm = zynqmp_sha_init_tfm, .exit_tfm = zynqmp_sha_exit_tfm, - .descsize = sizeof(struct zynqmp_sha_desc_ctx), - .statesize = sizeof(struct sha3_state), + .descsize = SHA3_384_EXPORT_SIZE, .digestsize = SHA3_384_DIGEST_SIZE, .base = { .cra_name = "sha3-384", .cra_driver_name = "zynqmp-sha3-384", .cra_priority = 300, .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA3_384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct zynqmp_sha_tfm_ctx), diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h index 15699299dc11..17b692eb3257 100644 --- a/drivers/cxl/core/core.h +++ b/drivers/cxl/core/core.h @@ -119,7 +119,7 @@ int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port, int cxl_ras_init(void); void cxl_ras_exit(void); -int cxl_gpf_port_setup(struct device *dport_dev, struct cxl_port *port); +int cxl_gpf_port_setup(struct cxl_dport *dport); int cxl_acpi_get_extended_linear_cache_size(struct resource *backing_res, int nid, resource_size_t *size); diff --git a/drivers/cxl/core/features.c b/drivers/cxl/core/features.c index f4daefe3180e..1498e2369c37 100644 --- a/drivers/cxl/core/features.c +++ b/drivers/cxl/core/features.c @@ -528,13 +528,13 @@ static void *cxlctl_set_feature(struct cxl_features_state *cxlfs, rc = cxl_set_feature(cxl_mbox, &feat_in->uuid, feat_in->version, feat_in->feat_data, data_size, flags, offset, &return_code); + *out_len = sizeof(*rpc_out); if (rc) { rpc_out->retval = return_code; return no_free_ptr(rpc_out); } rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS; - *out_len = sizeof(*rpc_out); return no_free_ptr(rpc_out); } @@ -677,7 +677,7 @@ static void free_memdev_fwctl(void *_fwctl_dev) fwctl_put(fwctl_dev); } -int devm_cxl_setup_fwctl(struct cxl_memdev *cxlmd) +int devm_cxl_setup_fwctl(struct device *host, struct cxl_memdev *cxlmd) { struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_features_state *cxlfs; @@ -700,7 +700,7 @@ int devm_cxl_setup_fwctl(struct cxl_memdev *cxlmd) if (rc) return rc; - return devm_add_action_or_reset(&cxlmd->dev, free_memdev_fwctl, + return devm_add_action_or_reset(host, free_memdev_fwctl, no_free_ptr(fwctl_dev)); } EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fwctl, "CXL"); diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 96fecb799cbc..3b80e9a76ba8 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -1072,14 +1072,20 @@ int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c) #define GPF_TIMEOUT_BASE_MAX 2 #define GPF_TIMEOUT_SCALE_MAX 7 /* 10 seconds */ -u16 cxl_gpf_get_dvsec(struct device *dev, bool is_port) +u16 cxl_gpf_get_dvsec(struct device *dev) { + struct pci_dev *pdev; + bool is_port = true; u16 dvsec; if (!dev_is_pci(dev)) return 0; - dvsec = pci_find_dvsec_capability(to_pci_dev(dev), PCI_VENDOR_ID_CXL, + pdev = to_pci_dev(dev); + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ENDPOINT) + is_port = false; + + dvsec = pci_find_dvsec_capability(pdev, PCI_VENDOR_ID_CXL, is_port ? CXL_DVSEC_PORT_GPF : CXL_DVSEC_DEVICE_GPF); if (!dvsec) dev_warn(dev, "%s GPF DVSEC not present\n", @@ -1128,26 +1134,24 @@ static int update_gpf_port_dvsec(struct pci_dev *pdev, int dvsec, int phase) return rc; } -int cxl_gpf_port_setup(struct device *dport_dev, struct cxl_port *port) +int cxl_gpf_port_setup(struct cxl_dport *dport) { - struct pci_dev *pdev; - - if (!port) + if (!dport) return -EINVAL; - if (!port->gpf_dvsec) { + if (!dport->gpf_dvsec) { + struct pci_dev *pdev; int dvsec; - dvsec = cxl_gpf_get_dvsec(dport_dev, true); + dvsec = cxl_gpf_get_dvsec(dport->dport_dev); if (!dvsec) return -EINVAL; - port->gpf_dvsec = dvsec; + dport->gpf_dvsec = dvsec; + pdev = to_pci_dev(dport->dport_dev); + update_gpf_port_dvsec(pdev, dport->gpf_dvsec, 1); + update_gpf_port_dvsec(pdev, dport->gpf_dvsec, 2); } - pdev = to_pci_dev(dport_dev); - update_gpf_port_dvsec(pdev, port->gpf_dvsec, 1); - update_gpf_port_dvsec(pdev, port->gpf_dvsec, 2); - return 0; } diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index 0fd6646c1a2e..726bd4a7de27 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -1678,7 +1678,7 @@ retry: if (rc && rc != -EBUSY) return rc; - cxl_gpf_port_setup(dport_dev, port); + cxl_gpf_port_setup(dport); /* Any more ports to add between this one and the root? */ if (!dev_is_cxl_root_child(&port->dev)) diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c index 117c2e94c761..5ca7b0eed568 100644 --- a/drivers/cxl/core/regs.c +++ b/drivers/cxl/core/regs.c @@ -581,7 +581,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri resource_size_t rcrb = ri->base; void __iomem *addr; u32 bar0, bar1; - u16 cmd; u32 id; if (which == CXL_RCRB_UPSTREAM) @@ -603,7 +602,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri } id = readl(addr + PCI_VENDOR_ID); - cmd = readw(addr + PCI_COMMAND); bar0 = readl(addr + PCI_BASE_ADDRESS_0); bar1 = readl(addr + PCI_BASE_ADDRESS_1); iounmap(addr); @@ -618,8 +616,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri dev_err(dev, "Failed to access Downstream Port RCRB\n"); return CXL_RESOURCE_NONE; } - if (!(cmd & PCI_COMMAND_MEMORY)) - return CXL_RESOURCE_NONE; /* The RCRB is a Memory Window, and the MEM_TYPE_1M bit is obsolete */ if (bar0 & (PCI_BASE_ADDRESS_MEM_TYPE_1M | PCI_BASE_ADDRESS_SPACE_IO)) return CXL_RESOURCE_NONE; diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index be8a7dc77719..a9ab46eb0610 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -592,7 +592,6 @@ struct cxl_dax_region { * @cdat: Cached CDAT data * @cdat_available: Should a CDAT attribute be available in sysfs * @pci_latency: Upstream latency in picoseconds - * @gpf_dvsec: Cached GPF port DVSEC */ struct cxl_port { struct device dev; @@ -616,7 +615,6 @@ struct cxl_port { } cdat; bool cdat_available; long pci_latency; - int gpf_dvsec; }; /** @@ -664,6 +662,7 @@ struct cxl_rcrb_info { * @regs: Dport parsed register blocks * @coord: access coordinates (bandwidth and latency performance attributes) * @link_latency: calculated PCIe downstream latency + * @gpf_dvsec: Cached GPF port DVSEC */ struct cxl_dport { struct device *dport_dev; @@ -675,6 +674,7 @@ struct cxl_dport { struct cxl_regs regs; struct access_coordinate coord[ACCESS_COORDINATE_MAX]; long link_latency; + int gpf_dvsec; }; /** @@ -910,6 +910,6 @@ bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port); #define __mock static #endif -u16 cxl_gpf_get_dvsec(struct device *dev, bool is_port); +u16 cxl_gpf_get_dvsec(struct device *dev); #endif /* __CXL_H__ */ diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 7b14a154463c..785aa2af5eaa 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -1018,7 +1018,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (rc) return rc; - rc = devm_cxl_setup_fwctl(cxlmd); + rc = devm_cxl_setup_fwctl(&pdev->dev, cxlmd); if (rc) dev_dbg(&pdev->dev, "No CXL FWCTL setup\n"); diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c index d061fe3d2b86..e197883690ef 100644 --- a/drivers/cxl/pmem.c +++ b/drivers/cxl/pmem.c @@ -108,7 +108,7 @@ static void cxl_nvdimm_arm_dirty_shutdown_tracking(struct cxl_nvdimm *cxl_nvd) return; } - if (!cxl_gpf_get_dvsec(cxlds->dev, false)) + if (!cxl_gpf_get_dvsec(cxlds->dev)) return; if (cxl_get_dirty_count(mds, &count)) { diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 5f8d010516f0..b1ef4546346d 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -320,8 +320,9 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, count++; dma_resv_list_set(fobj, i, fence, usage); - /* pointer update must be visible before we extend the num_fences */ - smp_store_mb(fobj->num_fences, count); + /* fence update must be visible before we extend the num_fences */ + smp_wmb(); + fobj->num_fences = count; } EXPORT_SYMBOL(dma_resv_add_fence); diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index f5905d67dedb..22a808995f10 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -438,15 +438,17 @@ static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long a return -EINVAL; pt = dma_fence_to_sync_pt(fence); - if (!pt) - return -EINVAL; + if (!pt) { + ret = -EINVAL; + goto put_fence; + } spin_lock_irqsave(fence->lock, flags); - if (test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) { - data.deadline_ns = ktime_to_ns(pt->deadline); - } else { + if (!test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) { ret = -ENOENT; + goto unlock; } + data.deadline_ns = ktime_to_ns(pt->deadline); spin_unlock_irqrestore(fence->lock, flags); dma_fence_put(fence); @@ -458,6 +460,13 @@ static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long a return -EFAULT; return 0; + +unlock: + spin_unlock_irqrestore(fence->lock, flags); +put_fence: + dma_fence_put(fence); + + return ret; } static long sw_sync_ioctl(struct file *file, unsigned int cmd, diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index cc7398cc17d6..e74e36a8ecda 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -393,7 +393,7 @@ static long udmabuf_create(struct miscdevice *device, if (!ubuf) return -ENOMEM; - pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT; + pglimit = ((u64)size_limit_mb * 1024 * 1024) >> PAGE_SHIFT; for (i = 0; i < head->count; i++) { pgoff_t subpgcnt; diff --git a/drivers/dma/amd/ptdma/ptdma-dmaengine.c b/drivers/dma/amd/ptdma/ptdma-dmaengine.c index 715ac3ae067b..81339664036f 100644 --- a/drivers/dma/amd/ptdma/ptdma-dmaengine.c +++ b/drivers/dma/amd/ptdma/ptdma-dmaengine.c @@ -342,6 +342,9 @@ static void pt_cmd_callback_work(void *data, int err) struct pt_dma_chan *chan; unsigned long flags; + if (!desc) + return; + dma_chan = desc->vd.tx.chan; chan = to_pt_chan(dma_chan); @@ -355,16 +358,14 @@ static void pt_cmd_callback_work(void *data, int err) desc->status = DMA_ERROR; spin_lock_irqsave(&chan->vc.lock, flags); - if (desc) { - if (desc->status != DMA_COMPLETE) { - if (desc->status != DMA_ERROR) - desc->status = DMA_COMPLETE; + if (desc->status != DMA_COMPLETE) { + if (desc->status != DMA_ERROR) + desc->status = DMA_COMPLETE; - dma_cookie_complete(tx_desc); - dma_descriptor_unmap(tx_desc); - } else { - tx_desc = NULL; - } + dma_cookie_complete(tx_desc); + dma_descriptor_unmap(tx_desc); + } else { + tx_desc = NULL; } spin_unlock_irqrestore(&chan->vc.lock, flags); diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index d891dfca358e..91b2fbc0b864 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -841,9 +841,9 @@ static int dmatest_func(void *data) } else { dma_async_issue_pending(chan); - wait_event_timeout(thread->done_wait, - done->done, - msecs_to_jiffies(params->timeout)); + wait_event_freezable_timeout(thread->done_wait, + done->done, + msecs_to_jiffies(params->timeout)); status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c index 756d67325db5..66bfa28d984e 100644 --- a/drivers/dma/fsl-edma-main.c +++ b/drivers/dma/fsl-edma-main.c @@ -57,7 +57,7 @@ static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id) intr = edma_readl_chreg(fsl_chan, ch_int); if (!intr) - return IRQ_HANDLED; + return IRQ_NONE; edma_writel_chreg(fsl_chan, 1, ch_int); diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index ff94ee892339..6d12033649f8 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -222,7 +222,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) struct idxd_wq *wq; struct device *dev, *fdev; int rc = 0; - struct iommu_sva *sva; + struct iommu_sva *sva = NULL; unsigned int pasid; struct idxd_cdev *idxd_cdev; @@ -317,7 +317,7 @@ failed_set_pasid: if (device_user_pasid_enabled(idxd)) idxd_xa_pasid_remove(ctx); failed_get_pasid: - if (device_user_pasid_enabled(idxd)) + if (device_user_pasid_enabled(idxd) && !IS_ERR_OR_NULL(sva)) iommu_sva_unbind_device(sva); failed: mutex_unlock(&wq->wq_lock); @@ -407,6 +407,9 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma) if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO)) return -EPERM; + if (current->mm != ctx->mm) + return -EPERM; + rc = check_vma(wq, vma, __func__); if (rc < 0) return rc; @@ -473,6 +476,9 @@ static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t ssize_t written = 0; int i; + if (current->mm != ctx->mm) + return -EPERM; + for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) { int rc = idxd_submit_user_descriptor(ctx, udesc + i); @@ -493,6 +499,9 @@ static __poll_t idxd_cdev_poll(struct file *filp, struct idxd_device *idxd = wq->idxd; __poll_t out = 0; + if (current->mm != ctx->mm) + return POLLNVAL; + poll_wait(filp, &wq->err_queue, wait); spin_lock(&idxd->dev_lock); if (idxd->sw_err.valid) diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index fca1d2924999..760b7d81fcd8 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -155,6 +155,25 @@ static void idxd_cleanup_interrupts(struct idxd_device *idxd) pci_free_irq_vectors(pdev); } +static void idxd_clean_wqs(struct idxd_device *idxd) +{ + struct idxd_wq *wq; + struct device *conf_dev; + int i; + + for (i = 0; i < idxd->max_wqs; i++) { + wq = idxd->wqs[i]; + if (idxd->hw.wq_cap.op_config) + bitmap_free(wq->opcap_bmap); + kfree(wq->wqcfg); + conf_dev = wq_confdev(wq); + put_device(conf_dev); + kfree(wq); + } + bitmap_free(idxd->wq_enable_map); + kfree(idxd->wqs); +} + static int idxd_setup_wqs(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; @@ -169,8 +188,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd) idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev)); if (!idxd->wq_enable_map) { - kfree(idxd->wqs); - return -ENOMEM; + rc = -ENOMEM; + goto err_bitmap; } for (i = 0; i < idxd->max_wqs; i++) { @@ -189,10 +208,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd) conf_dev->bus = &dsa_bus_type; conf_dev->type = &idxd_wq_device_type; rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id); - if (rc < 0) { - put_device(conf_dev); + if (rc < 0) goto err; - } mutex_init(&wq->wq_lock); init_waitqueue_head(&wq->err_queue); @@ -203,7 +220,6 @@ static int idxd_setup_wqs(struct idxd_device *idxd) wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); if (!wq->wqcfg) { - put_device(conf_dev); rc = -ENOMEM; goto err; } @@ -211,9 +227,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd) if (idxd->hw.wq_cap.op_config) { wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL); if (!wq->opcap_bmap) { - put_device(conf_dev); rc = -ENOMEM; - goto err; + goto err_opcap_bmap; } bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS); } @@ -224,15 +239,46 @@ static int idxd_setup_wqs(struct idxd_device *idxd) return 0; - err: +err_opcap_bmap: + kfree(wq->wqcfg); + +err: + put_device(conf_dev); + kfree(wq); + while (--i >= 0) { wq = idxd->wqs[i]; + if (idxd->hw.wq_cap.op_config) + bitmap_free(wq->opcap_bmap); + kfree(wq->wqcfg); conf_dev = wq_confdev(wq); put_device(conf_dev); + kfree(wq); + } + bitmap_free(idxd->wq_enable_map); + +err_bitmap: + kfree(idxd->wqs); + return rc; } +static void idxd_clean_engines(struct idxd_device *idxd) +{ + struct idxd_engine *engine; + struct device *conf_dev; + int i; + + for (i = 0; i < idxd->max_engines; i++) { + engine = idxd->engines[i]; + conf_dev = engine_confdev(engine); + put_device(conf_dev); + kfree(engine); + } + kfree(idxd->engines); +} + static int idxd_setup_engines(struct idxd_device *idxd) { struct idxd_engine *engine; @@ -263,6 +309,7 @@ static int idxd_setup_engines(struct idxd_device *idxd) rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id); if (rc < 0) { put_device(conf_dev); + kfree(engine); goto err; } @@ -276,10 +323,26 @@ static int idxd_setup_engines(struct idxd_device *idxd) engine = idxd->engines[i]; conf_dev = engine_confdev(engine); put_device(conf_dev); + kfree(engine); } + kfree(idxd->engines); + return rc; } +static void idxd_clean_groups(struct idxd_device *idxd) +{ + struct idxd_group *group; + int i; + + for (i = 0; i < idxd->max_groups; i++) { + group = idxd->groups[i]; + put_device(group_confdev(group)); + kfree(group); + } + kfree(idxd->groups); +} + static int idxd_setup_groups(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; @@ -310,6 +373,7 @@ static int idxd_setup_groups(struct idxd_device *idxd) rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id); if (rc < 0) { put_device(conf_dev); + kfree(group); goto err; } @@ -334,20 +398,18 @@ static int idxd_setup_groups(struct idxd_device *idxd) while (--i >= 0) { group = idxd->groups[i]; put_device(group_confdev(group)); + kfree(group); } + kfree(idxd->groups); + return rc; } static void idxd_cleanup_internals(struct idxd_device *idxd) { - int i; - - for (i = 0; i < idxd->max_groups; i++) - put_device(group_confdev(idxd->groups[i])); - for (i = 0; i < idxd->max_engines; i++) - put_device(engine_confdev(idxd->engines[i])); - for (i = 0; i < idxd->max_wqs; i++) - put_device(wq_confdev(idxd->wqs[i])); + idxd_clean_groups(idxd); + idxd_clean_engines(idxd); + idxd_clean_wqs(idxd); destroy_workqueue(idxd->wq); } @@ -390,7 +452,7 @@ static int idxd_init_evl(struct idxd_device *idxd) static int idxd_setup_internals(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; - int rc, i; + int rc; init_waitqueue_head(&idxd->cmd_waitq); @@ -421,14 +483,11 @@ static int idxd_setup_internals(struct idxd_device *idxd) err_evl: destroy_workqueue(idxd->wq); err_wkq_create: - for (i = 0; i < idxd->max_groups; i++) - put_device(group_confdev(idxd->groups[i])); + idxd_clean_groups(idxd); err_group: - for (i = 0; i < idxd->max_engines; i++) - put_device(engine_confdev(idxd->engines[i])); + idxd_clean_engines(idxd); err_engine: - for (i = 0; i < idxd->max_wqs; i++) - put_device(wq_confdev(idxd->wqs[i])); + idxd_clean_wqs(idxd); err_wqs: return rc; } @@ -528,6 +587,17 @@ static void idxd_read_caps(struct idxd_device *idxd) idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET); } +static void idxd_free(struct idxd_device *idxd) +{ + if (!idxd) + return; + + put_device(idxd_confdev(idxd)); + bitmap_free(idxd->opcap_bmap); + ida_free(&idxd_ida, idxd->id); + kfree(idxd); +} + static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data) { struct device *dev = &pdev->dev; @@ -545,28 +615,34 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type); idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL); if (idxd->id < 0) - return NULL; + goto err_ida; idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev)); - if (!idxd->opcap_bmap) { - ida_free(&idxd_ida, idxd->id); - return NULL; - } + if (!idxd->opcap_bmap) + goto err_opcap; device_initialize(conf_dev); conf_dev->parent = dev; conf_dev->bus = &dsa_bus_type; conf_dev->type = idxd->data->dev_type; rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id); - if (rc < 0) { - put_device(conf_dev); - return NULL; - } + if (rc < 0) + goto err_name; spin_lock_init(&idxd->dev_lock); spin_lock_init(&idxd->cmd_lock); return idxd; + +err_name: + put_device(conf_dev); + bitmap_free(idxd->opcap_bmap); +err_opcap: + ida_free(&idxd_ida, idxd->id); +err_ida: + kfree(idxd); + + return NULL; } static int idxd_enable_system_pasid(struct idxd_device *idxd) @@ -1190,7 +1266,7 @@ int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev, err: pci_iounmap(pdev, idxd->reg_base); err_iomap: - put_device(idxd_confdev(idxd)); + idxd_free(idxd); err_idxd_alloc: pci_disable_device(pdev); return rc; @@ -1232,7 +1308,6 @@ static void idxd_shutdown(struct pci_dev *pdev) static void idxd_remove(struct pci_dev *pdev) { struct idxd_device *idxd = pci_get_drvdata(pdev); - struct idxd_irq_entry *irq_entry; idxd_unregister_devices(idxd); /* @@ -1245,20 +1320,12 @@ static void idxd_remove(struct pci_dev *pdev) get_device(idxd_confdev(idxd)); device_unregister(idxd_confdev(idxd)); idxd_shutdown(pdev); - if (device_pasid_enabled(idxd)) - idxd_disable_system_pasid(idxd); idxd_device_remove_debugfs(idxd); - - irq_entry = idxd_get_ie(idxd, 0); - free_irq(irq_entry->vector, irq_entry); - pci_free_irq_vectors(pdev); + idxd_cleanup(idxd); pci_iounmap(pdev, idxd->reg_base); - if (device_user_pasid_enabled(idxd)) - idxd_disable_sva(pdev); - pci_disable_device(pdev); - destroy_workqueue(idxd->wq); - perfmon_pmu_remove(idxd); put_device(idxd_confdev(idxd)); + idxd_free(idxd); + pci_disable_device(pdev); } static struct pci_driver idxd_pci_driver = { diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c index c9aba2304de7..5d3c0ae6b342 100644 --- a/drivers/dma/ioat/dca.c +++ b/drivers/dma/ioat/dca.c @@ -10,7 +10,7 @@ #include <linux/interrupt.h> #include <linux/dca.h> -#include <asm/cpuid.h> +#include <asm/cpuid/api.h> /* either a kernel change is needed, or we need something like this in kernel */ #ifndef CONFIG_SMP diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c index d5ddb4e30e71..47c8adfdc155 100644 --- a/drivers/dma/mediatek/mtk-cqdma.c +++ b/drivers/dma/mediatek/mtk-cqdma.c @@ -420,15 +420,11 @@ static struct virt_dma_desc *mtk_cqdma_find_active_desc(struct dma_chan *c, { struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); struct virt_dma_desc *vd; - unsigned long flags; - spin_lock_irqsave(&cvc->pc->lock, flags); list_for_each_entry(vd, &cvc->pc->queue, node) if (vd->tx.cookie == cookie) { - spin_unlock_irqrestore(&cvc->pc->lock, flags); return vd; } - spin_unlock_irqrestore(&cvc->pc->lock, flags); list_for_each_entry(vd, &cvc->vc.desc_issued, node) if (vd->tx.cookie == cookie) @@ -452,9 +448,11 @@ static enum dma_status mtk_cqdma_tx_status(struct dma_chan *c, if (ret == DMA_COMPLETE || !txstate) return ret; + spin_lock_irqsave(&cvc->pc->lock, flags); spin_lock_irqsave(&cvc->vc.lock, flags); vd = mtk_cqdma_find_active_desc(c, cookie); spin_unlock_irqrestore(&cvc->vc.lock, flags); + spin_unlock_irqrestore(&cvc->pc->lock, flags); if (vd) { cvd = to_cqdma_vdesc(vd); diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index b223a7aacb0c..b6255c0601bb 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -1091,8 +1091,11 @@ static void udma_check_tx_completion(struct work_struct *work) u32 residue_diff; ktime_t time_diff; unsigned long delay; + unsigned long flags; while (1) { + spin_lock_irqsave(&uc->vc.lock, flags); + if (uc->desc) { /* Get previous residue and time stamp */ residue_diff = uc->tx_drain.residue; @@ -1127,6 +1130,8 @@ static void udma_check_tx_completion(struct work_struct *work) break; } + spin_unlock_irqrestore(&uc->vc.lock, flags); + usleep_range(ktime_to_us(delay), ktime_to_us(delay) + 10); continue; @@ -1143,6 +1148,8 @@ static void udma_check_tx_completion(struct work_struct *work) break; } + + spin_unlock_irqrestore(&uc->vc.lock, flags); } static irqreturn_t udma_ring_irq_handler(int irq, void *data) @@ -4246,7 +4253,6 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { struct udma_dev *ud = ofdma->of_dma_data; - dma_cap_mask_t mask = ud->ddev.cap_mask; struct udma_filter_param filter_param; struct dma_chan *chan; @@ -4278,7 +4284,7 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec, } } - chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param, + chan = __dma_request_channel(&ud->ddev.cap_mask, udma_dma_filter_fn, &filter_param, ofdma->of_node); if (!chan) { dev_err(ud->dev, "get channel fail in %s.\n", __func__); diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c index 47cea645fc91..20333608b983 100644 --- a/drivers/edac/altera_edac.c +++ b/drivers/edac/altera_edac.c @@ -99,7 +99,7 @@ static irqreturn_t altr_sdram_mc_err_handler(int irq, void *dev_id) if (status & priv->ecc_stat_ce_mask) { regmap_read(drvdata->mc_vbase, priv->ecc_saddr_offset, &err_addr); - if (priv->ecc_uecnt_offset) + if (priv->ecc_cecnt_offset) regmap_read(drvdata->mc_vbase, priv->ecc_cecnt_offset, &err_count); edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count, @@ -1005,9 +1005,6 @@ altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask, } } - /* Interrupt mode set to every SBERR */ - regmap_write(ecc_mgr_map, ALTR_A10_ECC_INTMODE_OFST, - ALTR_A10_ECC_INTMODE); /* Enable ECC */ ecc_set_bits(ecc_ctrl_en_mask, (ecc_block_base + ALTR_A10_ECC_CTRL_OFST)); @@ -2127,6 +2124,10 @@ static int altr_edac_a10_probe(struct platform_device *pdev) return PTR_ERR(edac->ecc_mgr_map); } + /* Set irq mask for DDR SBE to avoid any pending irq before registration */ + regmap_write(edac->ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_SET_OFST, + (A10_SYSMGR_ECC_INTMASK_SDMMCB | A10_SYSMGR_ECC_INTMASK_DDR0)); + edac->irq_chip.name = pdev->dev.of_node->name; edac->irq_chip.irq_mask = a10_eccmgr_irq_mask; edac->irq_chip.irq_unmask = a10_eccmgr_irq_unmask; diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h index 3727e72c8c2e..7248d24c4908 100644 --- a/drivers/edac/altera_edac.h +++ b/drivers/edac/altera_edac.h @@ -249,6 +249,8 @@ struct altr_sdram_mc_data { #define A10_SYSMGR_ECC_INTMASK_SET_OFST 0x94 #define A10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98 #define A10_SYSMGR_ECC_INTMASK_OCRAM BIT(1) +#define A10_SYSMGR_ECC_INTMASK_SDMMCB BIT(16) +#define A10_SYSMGR_ECC_INTMASK_DDR0 BIT(17) #define A10_SYSMGR_ECC_INTSTAT_SERR_OFST 0x9C #define A10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 90f0eb7cc5b9..58b1482a0fbb 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -2,8 +2,8 @@ #include <linux/ras.h> #include <linux/string_choices.h> #include "amd64_edac.h" -#include <asm/amd_nb.h> -#include <asm/amd_node.h> +#include <asm/amd/nb.h> +#include <asm/amd/node.h> static struct edac_pci_ctl_info *pci_ctl; @@ -2942,13 +2942,13 @@ static void dct_read_mc_regs(struct amd64_pvt *pvt) * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since * those are Read-As-Zero. */ - rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem); + rdmsrq(MSR_K8_TOP_MEM1, pvt->top_mem); edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem); /* Check first whether TOP_MEM2 is enabled: */ - rdmsrl(MSR_AMD64_SYSCFG, msr_val); + rdmsrq(MSR_AMD64_SYSCFG, msr_val); if (msr_val & BIT(21)) { - rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2); + rdmsrq(MSR_K8_TOP_MEM2, pvt->top_mem2); edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2); } else { edac_dbg(0, " TOP_MEM2 disabled\n"); diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c index 204834149579..5ddd83dc94ba 100644 --- a/drivers/edac/ie31200_edac.c +++ b/drivers/edac/ie31200_edac.c @@ -52,6 +52,7 @@ #include <linux/io-64-nonatomic-lo-hi.h> #include <asm/mce.h> +#include <asm/msr.h> #include "edac_module.h" #define EDAC_MOD_STR "ie31200_edac" diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index 50d74d3bf0f5..af3c12284a1e 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -3,6 +3,7 @@ #include <linux/slab.h> #include <asm/cpu.h> +#include <asm/msr.h> #include "mce_amd.h" diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index 19295282de24..fe55613a8ea9 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -299,7 +299,8 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3, import_uuid(&buf->uuid, (u8 *)&rx_buf->uuid); } - ffa_rx_release(); + if (!(flags & PARTITION_INFO_GET_RETURN_COUNT_ONLY)) + ffa_rx_release(); mutex_unlock(&drv_info->rx_lock); diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index 7af01664ce7e..3a5474015f7d 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -255,6 +255,9 @@ static struct scmi_device *scmi_child_dev_find(struct device *parent, if (!dev) return NULL; + /* Drop the refcnt bumped implicitly by device_find_child */ + put_device(dev); + return to_scmi_dev(dev); } diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 1c75a4c9c371..0390d5ff195e 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -1248,7 +1248,8 @@ static void xfer_put(const struct scmi_protocol_handle *ph, } static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo, - struct scmi_xfer *xfer, ktime_t stop) + struct scmi_xfer *xfer, ktime_t stop, + bool *ooo) { struct scmi_info *info = handle_to_scmi_info(cinfo->handle); @@ -1257,7 +1258,7 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo, * in case of out-of-order receptions of delayed responses */ return info->desc->ops->poll_done(cinfo, xfer) || - try_wait_for_completion(&xfer->done) || + (*ooo = try_wait_for_completion(&xfer->done)) || ktime_after(ktime_get(), stop); } @@ -1274,15 +1275,17 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, * itself to support synchronous commands replies. */ if (!desc->sync_cmds_completed_on_ret) { + bool ooo = false; + /* * Poll on xfer using transport provided .poll_done(); * assumes no completion interrupt was available. */ ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms); - spin_until_cond(scmi_xfer_done_no_timeout(cinfo, - xfer, stop)); - if (ktime_after(ktime_get(), stop)) { + spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, + stop, &ooo)); + if (!ooo && !info->desc->ops->poll_done(cinfo, xfer)) { dev_err(dev, "timed out in resp(caller: %pS) - polling\n", (void *)_RET_IP_); diff --git a/drivers/firmware/cirrus/Kconfig b/drivers/firmware/cirrus/Kconfig index 0a883091259a..e3c2e38b746d 100644 --- a/drivers/firmware/cirrus/Kconfig +++ b/drivers/firmware/cirrus/Kconfig @@ -6,14 +6,11 @@ config FW_CS_DSP config FW_CS_DSP_KUNIT_TEST_UTILS tristate - depends on KUNIT && REGMAP - select FW_CS_DSP config FW_CS_DSP_KUNIT_TEST tristate "KUnit tests for Cirrus Logic cs_dsp" if !KUNIT_ALL_TESTS - depends on KUNIT && REGMAP + depends on KUNIT && REGMAP && FW_CS_DSP default KUNIT_ALL_TESTS - select FW_CS_DSP select FW_CS_DSP_KUNIT_TEST_UTILS help This builds KUnit tests for cs_dsp. diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c b/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c index 161272e47bda..73412bcef50c 100644 --- a/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c +++ b/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c @@ -462,36 +462,6 @@ unsigned int cs_dsp_mock_xm_header_get_alg_base_in_words(struct cs_dsp_test *pri EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_alg_base_in_words, "FW_CS_DSP_KUNIT_TEST_UTILS"); /** - * cs_dsp_mock_xm_header_get_fw_version_from_regmap() - Firmware version. - * - * @priv: Pointer to struct cs_dsp_test. - * - * Return: Firmware version word value. - */ -unsigned int cs_dsp_mock_xm_header_get_fw_version_from_regmap(struct cs_dsp_test *priv) -{ - unsigned int xm = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM); - union { - struct wmfw_id_hdr adsp2; - struct wmfw_v3_id_hdr halo; - } hdr; - - switch (priv->dsp->type) { - case WMFW_ADSP2: - regmap_raw_read(priv->dsp->regmap, xm, &hdr.adsp2, sizeof(hdr.adsp2)); - return be32_to_cpu(hdr.adsp2.ver); - case WMFW_HALO: - regmap_raw_read(priv->dsp->regmap, xm, &hdr.halo, sizeof(hdr.halo)); - return be32_to_cpu(hdr.halo.ver); - default: - KUNIT_FAIL(priv->test, NULL); - return 0; - } -} -EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_fw_version_from_regmap, - "FW_CS_DSP_KUNIT_TEST_UTILS"); - -/** * cs_dsp_mock_xm_header_get_fw_version() - Firmware version. * * @header: Pointer to struct cs_dsp_mock_xm_header. diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_bin.c b/drivers/firmware/cirrus/test/cs_dsp_test_bin.c index 1e161bbc5b4a..163b7faecff4 100644 --- a/drivers/firmware/cirrus/test/cs_dsp_test_bin.c +++ b/drivers/firmware/cirrus/test/cs_dsp_test_bin.c @@ -2198,7 +2198,7 @@ static int cs_dsp_bin_test_common_init(struct kunit *test, struct cs_dsp *dsp) priv->local->bin_builder = cs_dsp_mock_bin_init(priv, 1, - cs_dsp_mock_xm_header_get_fw_version_from_regmap(priv)); + cs_dsp_mock_xm_header_get_fw_version(xm_hdr)); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->local->bin_builder); /* We must provide a dummy wmfw to load */ diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c index 8748874f0552..a7ec956d2724 100644 --- a/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c +++ b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c @@ -451,7 +451,7 @@ static int cs_dsp_bin_err_test_common_init(struct kunit *test, struct cs_dsp *ds local->bin_builder = cs_dsp_mock_bin_init(priv, 1, - cs_dsp_mock_xm_header_get_fw_version_from_regmap(priv)); + cs_dsp_mock_xm_header_get_fw_version(local->xm_header)); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->bin_builder); /* Init cs_dsp */ diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index d23a1b9fed75..2f173391b63d 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -85,7 +85,6 @@ lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o \ lib-$(CONFIG_ARM) += arm32-stub.o lib-$(CONFIG_ARM64) += kaslr.o arm64.o arm64-stub.o smbios.o lib-$(CONFIG_X86) += x86-stub.o smbios.o -lib-$(CONFIG_EFI_MIXED) += x86-mixed.o lib-$(CONFIG_X86_64) += x86-5lvl.o lib-$(CONFIG_RISCV) += kaslr.o riscv.o riscv-stub.o lib-$(CONFIG_LOONGARCH) += loongarch.o loongarch-stub.o diff --git a/drivers/firmware/efi/libstub/x86-5lvl.c b/drivers/firmware/efi/libstub/x86-5lvl.c index 77359e802181..f1c5fb45d5f7 100644 --- a/drivers/firmware/efi/libstub/x86-5lvl.c +++ b/drivers/firmware/efi/libstub/x86-5lvl.c @@ -62,7 +62,7 @@ efi_status_t efi_setup_5level_paging(void) void efi_5level_switch(void) { - bool want_la57 = IS_ENABLED(CONFIG_X86_5LEVEL) && !efi_no5lvl; + bool want_la57 = !efi_no5lvl; bool have_la57 = native_read_cr4() & X86_CR4_LA57; bool need_toggle = want_la57 ^ have_la57; u64 *pgt = (void *)la57_toggle + PAGE_SIZE; diff --git a/drivers/firmware/efi/libstub/x86-mixed.S b/drivers/firmware/efi/libstub/x86-mixed.S deleted file mode 100644 index e04ed99bc449..000000000000 --- a/drivers/firmware/efi/libstub/x86-mixed.S +++ /dev/null @@ -1,253 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2014, 2015 Intel Corporation; author Matt Fleming - * - * Early support for invoking 32-bit EFI services from a 64-bit kernel. - * - * Because this thunking occurs before ExitBootServices() we have to - * restore the firmware's 32-bit GDT and IDT before we make EFI service - * calls. - * - * On the plus side, we don't have to worry about mangling 64-bit - * addresses into 32-bits because we're executing with an identity - * mapped pagetable and haven't transitioned to 64-bit virtual addresses - * yet. - */ - -#include <linux/linkage.h> -#include <asm/desc_defs.h> -#include <asm/msr.h> -#include <asm/page_types.h> -#include <asm/pgtable_types.h> -#include <asm/processor-flags.h> -#include <asm/segment.h> - - .text - .code32 -#ifdef CONFIG_EFI_HANDOVER_PROTOCOL -SYM_FUNC_START(efi32_stub_entry) - call 1f -1: popl %ecx - - /* Clear BSS */ - xorl %eax, %eax - leal (_bss - 1b)(%ecx), %edi - leal (_ebss - 1b)(%ecx), %ecx - subl %edi, %ecx - shrl $2, %ecx - cld - rep stosl - - add $0x4, %esp /* Discard return address */ - movl 8(%esp), %ebx /* struct boot_params pointer */ - jmp efi32_startup -SYM_FUNC_END(efi32_stub_entry) -#endif - -/* - * Called using a far call from __efi64_thunk() below, using the x86_64 SysV - * ABI (except for R8/R9 which are inaccessible to 32-bit code - EAX/EBX are - * used instead). EBP+16 points to the arguments passed via the stack. - * - * The first argument (EDI) is a pointer to the boot service or protocol, to - * which the remaining arguments are passed, each truncated to 32 bits. - */ -SYM_FUNC_START_LOCAL(efi_enter32) - /* - * Convert x86-64 SysV ABI params to i386 ABI - */ - pushl 32(%ebp) /* Up to 3 args passed via the stack */ - pushl 24(%ebp) - pushl 16(%ebp) - pushl %ebx /* R9 */ - pushl %eax /* R8 */ - pushl %ecx - pushl %edx - pushl %esi - - /* Disable paging */ - movl %cr0, %eax - btrl $X86_CR0_PG_BIT, %eax - movl %eax, %cr0 - - /* Disable long mode via EFER */ - movl $MSR_EFER, %ecx - rdmsr - btrl $_EFER_LME, %eax - wrmsr - - call *%edi - - /* We must preserve return value */ - movl %eax, %edi - - call efi32_enable_long_mode - - addl $32, %esp - movl %edi, %eax - lret -SYM_FUNC_END(efi_enter32) - - .code64 -SYM_FUNC_START(__efi64_thunk) - push %rbp - movl %esp, %ebp - push %rbx - - /* Move args #5 and #6 into 32-bit accessible registers */ - movl %r8d, %eax - movl %r9d, %ebx - - lcalll *efi32_call(%rip) - - pop %rbx - pop %rbp - RET -SYM_FUNC_END(__efi64_thunk) - - .code32 -SYM_FUNC_START_LOCAL(efi32_enable_long_mode) - movl %cr4, %eax - btsl $(X86_CR4_PAE_BIT), %eax - movl %eax, %cr4 - - movl $MSR_EFER, %ecx - rdmsr - btsl $_EFER_LME, %eax - wrmsr - - /* Disable interrupts - the firmware's IDT does not work in long mode */ - cli - - /* Enable paging */ - movl %cr0, %eax - btsl $X86_CR0_PG_BIT, %eax - movl %eax, %cr0 - ret -SYM_FUNC_END(efi32_enable_long_mode) - -/* - * This is the common EFI stub entry point for mixed mode. It sets up the GDT - * and page tables needed for 64-bit execution, after which it calls the - * common 64-bit EFI entrypoint efi_stub_entry(). - * - * Arguments: 0(%esp) image handle - * 4(%esp) EFI system table pointer - * %ebx struct boot_params pointer (or NULL) - * - * Since this is the point of no return for ordinary execution, no registers - * are considered live except for the function parameters. [Note that the EFI - * stub may still exit and return to the firmware using the Exit() EFI boot - * service.] - */ -SYM_FUNC_START_LOCAL(efi32_startup) - movl %esp, %ebp - - subl $8, %esp - sgdtl (%esp) /* Save GDT descriptor to the stack */ - movl 2(%esp), %esi /* Existing GDT pointer */ - movzwl (%esp), %ecx /* Existing GDT limit */ - inc %ecx /* Existing GDT size */ - andl $~7, %ecx /* Ensure size is multiple of 8 */ - - subl %ecx, %esp /* Allocate new GDT */ - andl $~15, %esp /* Realign the stack */ - movl %esp, %edi /* New GDT address */ - leal 7(%ecx), %eax /* New GDT limit */ - pushw %cx /* Push 64-bit CS (for LJMP below) */ - pushl %edi /* Push new GDT address */ - pushw %ax /* Push new GDT limit */ - - /* Copy GDT to the stack and add a 64-bit code segment at the end */ - movl $GDT_ENTRY(DESC_CODE64, 0, 0xfffff) & 0xffffffff, (%edi,%ecx) - movl $GDT_ENTRY(DESC_CODE64, 0, 0xfffff) >> 32, 4(%edi,%ecx) - shrl $2, %ecx - cld - rep movsl /* Copy the firmware GDT */ - lgdtl (%esp) /* Switch to the new GDT */ - - call 1f -1: pop %edi - - /* Record mixed mode entry */ - movb $0x0, (efi_is64 - 1b)(%edi) - - /* Set up indirect far call to re-enter 32-bit mode */ - leal (efi32_call - 1b)(%edi), %eax - addl %eax, (%eax) - movw %cs, 4(%eax) - - /* Disable paging */ - movl %cr0, %eax - btrl $X86_CR0_PG_BIT, %eax - movl %eax, %cr0 - - /* Set up 1:1 mapping */ - leal (pte - 1b)(%edi), %eax - movl $_PAGE_PRESENT | _PAGE_RW | _PAGE_PSE, %ecx - leal (_PAGE_PRESENT | _PAGE_RW)(%eax), %edx -2: movl %ecx, (%eax) - addl $8, %eax - addl $PMD_SIZE, %ecx - jnc 2b - - movl $PAGE_SIZE, %ecx - .irpc l, 0123 - movl %edx, \l * 8(%eax) - addl %ecx, %edx - .endr - addl %ecx, %eax - movl %edx, (%eax) - movl %eax, %cr3 - - call efi32_enable_long_mode - - /* Set up far jump to 64-bit mode (CS is already on the stack) */ - leal (efi_stub_entry - 1b)(%edi), %eax - movl %eax, 2(%esp) - - movl 0(%ebp), %edi - movl 4(%ebp), %esi - movl %ebx, %edx - ljmpl *2(%esp) -SYM_FUNC_END(efi32_startup) - -/* - * efi_status_t efi32_pe_entry(efi_handle_t image_handle, - * efi_system_table_32_t *sys_table) - */ -SYM_FUNC_START(efi32_pe_entry) - pushl %ebx // save callee-save registers - - /* Check whether the CPU supports long mode */ - movl $0x80000001, %eax // assume extended info support - cpuid - btl $29, %edx // check long mode bit - jnc 1f - leal 8(%esp), %esp // preserve stack alignment - xor %ebx, %ebx // no struct boot_params pointer - jmp efi32_startup // only ESP and EBX remain live -1: movl $0x80000003, %eax // EFI_UNSUPPORTED - popl %ebx - RET -SYM_FUNC_END(efi32_pe_entry) - -#ifdef CONFIG_EFI_HANDOVER_PROTOCOL - .org efi32_stub_entry + 0x200 - .code64 -SYM_FUNC_START_NOALIGN(efi64_stub_entry) - jmp efi_handover_entry -SYM_FUNC_END(efi64_stub_entry) -#endif - - .data - .balign 8 -SYM_DATA_START_LOCAL(efi32_call) - .long efi_enter32 - . - .word 0x0 -SYM_DATA_END(efi32_call) -SYM_DATA(efi_is64, .byte 1) - - .bss - .balign PAGE_SIZE -SYM_DATA_LOCAL(pte, .fill 6 * PAGE_SIZE, 1, 0) diff --git a/drivers/firmware/samsung/exynos-acpm.c b/drivers/firmware/samsung/exynos-acpm.c index a85b2dbdd9f0..15e991b99f5a 100644 --- a/drivers/firmware/samsung/exynos-acpm.c +++ b/drivers/firmware/samsung/exynos-acpm.c @@ -185,6 +185,29 @@ struct acpm_match_data { #define handle_to_acpm_info(h) container_of(h, struct acpm_info, handle) /** + * acpm_get_saved_rx() - get the response if it was already saved. + * @achan: ACPM channel info. + * @xfer: reference to the transfer to get response for. + * @tx_seqnum: xfer TX sequence number. + */ +static void acpm_get_saved_rx(struct acpm_chan *achan, + const struct acpm_xfer *xfer, u32 tx_seqnum) +{ + const struct acpm_rx_data *rx_data = &achan->rx_data[tx_seqnum - 1]; + u32 rx_seqnum; + + if (!rx_data->response) + return; + + rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, rx_data->cmd[0]); + + if (rx_seqnum == tx_seqnum) { + memcpy(xfer->rxd, rx_data->cmd, xfer->rxlen); + clear_bit(rx_seqnum - 1, achan->bitmap_seqnum); + } +} + +/** * acpm_get_rx() - get response from RX queue. * @achan: ACPM channel info. * @xfer: reference to the transfer to get response for. @@ -204,15 +227,16 @@ static int acpm_get_rx(struct acpm_chan *achan, const struct acpm_xfer *xfer) rx_front = readl(achan->rx.front); i = readl(achan->rx.rear); - /* Bail out if RX is empty. */ - if (i == rx_front) + tx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]); + + if (i == rx_front) { + acpm_get_saved_rx(achan, xfer, tx_seqnum); return 0; + } base = achan->rx.base; mlen = achan->mlen; - tx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]); - /* Drain RX queue. */ do { /* Read RX seqnum. */ @@ -259,16 +283,8 @@ static int acpm_get_rx(struct acpm_chan *achan, const struct acpm_xfer *xfer) * If the response was not in this iteration of the queue, check if the * RX data was previously saved. */ - rx_data = &achan->rx_data[tx_seqnum - 1]; - if (!rx_set && rx_data->response) { - rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, - rx_data->cmd[0]); - - if (rx_seqnum == tx_seqnum) { - memcpy(xfer->rxd, rx_data->cmd, xfer->rxlen); - clear_bit(rx_seqnum - 1, achan->bitmap_seqnum); - } - } + if (!rx_set) + acpm_get_saved_rx(achan, xfer, tx_seqnum); return 0; } diff --git a/drivers/firmware/smccc/kvm_guest.c b/drivers/firmware/smccc/kvm_guest.c index 5767aed25cdc..a123c05cbc9e 100644 --- a/drivers/firmware/smccc/kvm_guest.c +++ b/drivers/firmware/smccc/kvm_guest.c @@ -95,7 +95,7 @@ void __init kvm_arm_target_impl_cpu_init(void) for (i = 0; i < max_cpus; i++) { arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID, - i, &res); + i, 0, 0, &res); if (res.a0 != SMCCC_RET_SUCCESS) { pr_warn("Discovering target implementation CPUs failed\n"); goto mem_free; @@ -103,7 +103,7 @@ void __init kvm_arm_target_impl_cpu_init(void) target[i].midr = res.a1; target[i].revidr = res.a2; target[i].aidr = res.a3; - }; + } if (!cpu_errata_set_target_impl(max_cpus, target)) { pr_warn("Failed to set target implementation CPUs\n"); diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c index 3c52cb73237a..e3f990d888d7 100644 --- a/drivers/firmware/stratix10-svc.c +++ b/drivers/firmware/stratix10-svc.c @@ -1224,22 +1224,28 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev) if (!svc->intel_svc_fcs) { dev_err(dev, "failed to allocate %s device\n", INTEL_FCS); ret = -ENOMEM; - goto err_unregister_dev; + goto err_unregister_rsu_dev; } ret = platform_device_add(svc->intel_svc_fcs); if (ret) { platform_device_put(svc->intel_svc_fcs); - goto err_unregister_dev; + goto err_unregister_rsu_dev; } + ret = of_platform_default_populate(dev_of_node(dev), NULL, dev); + if (ret) + goto err_unregister_fcs_dev; + dev_set_drvdata(dev, svc); pr_info("Intel Service Layer Driver Initialized\n"); return 0; -err_unregister_dev: +err_unregister_fcs_dev: + platform_device_unregister(svc->intel_svc_fcs); +err_unregister_rsu_dev: platform_device_unregister(svc->stratix10_svc_rsu); err_free_kfifo: kfifo_free(&controller->svc_fifo); @@ -1253,6 +1259,8 @@ static void stratix10_svc_drv_remove(struct platform_device *pdev) struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev); struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev); + of_platform_depopulate(ctrl->dev); + platform_device_unregister(svc->intel_svc_fcs); platform_device_unregister(svc->stratix10_svc_rsu); diff --git a/drivers/fpga/tests/fpga-bridge-test.c b/drivers/fpga/tests/fpga-bridge-test.c index b9ab29809e96..124ba40e32b1 100644 --- a/drivers/fpga/tests/fpga-bridge-test.c +++ b/drivers/fpga/tests/fpga-bridge-test.c @@ -170,4 +170,5 @@ static struct kunit_suite fpga_bridge_suite = { kunit_test_suite(fpga_bridge_suite); +MODULE_DESCRIPTION("KUnit test for the FPGA Bridge"); MODULE_LICENSE("GPL"); diff --git a/drivers/fpga/tests/fpga-mgr-test.c b/drivers/fpga/tests/fpga-mgr-test.c index 9cb37aefbac4..8748babb0504 100644 --- a/drivers/fpga/tests/fpga-mgr-test.c +++ b/drivers/fpga/tests/fpga-mgr-test.c @@ -330,4 +330,5 @@ static struct kunit_suite fpga_mgr_suite = { kunit_test_suite(fpga_mgr_suite); +MODULE_DESCRIPTION("KUnit test for the FPGA Manager"); MODULE_LICENSE("GPL"); diff --git a/drivers/fpga/tests/fpga-region-test.c b/drivers/fpga/tests/fpga-region-test.c index 6a108cafded8..020ceac48509 100644 --- a/drivers/fpga/tests/fpga-region-test.c +++ b/drivers/fpga/tests/fpga-region-test.c @@ -214,4 +214,5 @@ static struct kunit_suite fpga_region_suite = { kunit_test_suite(fpga_region_suite); +MODULE_DESCRIPTION("KUnit test for the FPGA Region"); MODULE_LICENSE("GPL"); diff --git a/drivers/fwctl/main.c b/drivers/fwctl/main.c index cb1ac9c40239..bc6378506296 100644 --- a/drivers/fwctl/main.c +++ b/drivers/fwctl/main.c @@ -105,7 +105,7 @@ static int fwctl_cmd_rpc(struct fwctl_ucmd *ucmd) if (!test_and_set_bit(0, &fwctl_tainted)) { dev_warn( &fwctl->dev, - "%s(%d): has requested full access to the physical device device", + "%s(%d): has requested full access to the physical device", current->comm, task_pid_nr(current)); add_taint(TAINT_FWCTL, LOCKDEP_STILL_OK); } diff --git a/drivers/fwctl/pds/main.c b/drivers/fwctl/pds/main.c index 284c4165fdd4..9b9d1f6b5556 100644 --- a/drivers/fwctl/pds/main.c +++ b/drivers/fwctl/pds/main.c @@ -105,12 +105,14 @@ static int pdsfc_identify(struct pdsfc_dev *pdsfc) static void pdsfc_free_endpoints(struct pdsfc_dev *pdsfc) { struct device *dev = &pdsfc->fwctl.dev; + u32 num_endpoints; int i; if (!pdsfc->endpoints) return; - for (i = 0; pdsfc->endpoint_info && i < pdsfc->endpoints->num_entries; i++) + num_endpoints = le32_to_cpu(pdsfc->endpoints->num_entries); + for (i = 0; pdsfc->endpoint_info && i < num_endpoints; i++) mutex_destroy(&pdsfc->endpoint_info[i].lock); vfree(pdsfc->endpoint_info); pdsfc->endpoint_info = NULL; @@ -199,7 +201,7 @@ static int pdsfc_init_endpoints(struct pdsfc_dev *pdsfc) ep_entry = (struct pds_fwctl_query_data_endpoint *)pdsfc->endpoints->entries; for (i = 0; i < num_endpoints; i++) { mutex_init(&pdsfc->endpoint_info[i].lock); - pdsfc->endpoint_info[i].endpoint = ep_entry[i].id; + pdsfc->endpoint_info[i].endpoint = le32_to_cpu(ep_entry[i].id); } return 0; @@ -214,6 +216,7 @@ static struct pds_fwctl_query_data *pdsfc_get_operations(struct pdsfc_dev *pdsfc struct pds_fwctl_query_data *data; union pds_core_adminq_cmd cmd; dma_addr_t data_pa; + u32 num_entries; int err; int i; @@ -246,8 +249,9 @@ static struct pds_fwctl_query_data *pdsfc_get_operations(struct pdsfc_dev *pdsfc *pa = data_pa; entries = (struct pds_fwctl_query_data_operation *)data->entries; - dev_dbg(dev, "num_entries %d\n", data->num_entries); - for (i = 0; i < data->num_entries; i++) { + num_entries = le32_to_cpu(data->num_entries); + dev_dbg(dev, "num_entries %d\n", num_entries); + for (i = 0; i < num_entries; i++) { /* Translate FW command attribute to fwctl scope */ switch (entries[i].scope) { @@ -267,7 +271,7 @@ static struct pds_fwctl_query_data *pdsfc_get_operations(struct pdsfc_dev *pdsfc break; } dev_dbg(dev, "endpoint %d operation: id %x scope %d\n", - ep, entries[i].id, entries[i].scope); + ep, le32_to_cpu(entries[i].id), entries[i].scope); } return data; @@ -280,24 +284,26 @@ static int pdsfc_validate_rpc(struct pdsfc_dev *pdsfc, struct pds_fwctl_query_data_operation *op_entry; struct pdsfc_rpc_endpoint_info *ep_info = NULL; struct device *dev = &pdsfc->fwctl.dev; + u32 num_entries; int i; /* validate rpc in_len & out_len based * on ident.max_req_sz & max_resp_sz */ - if (rpc->in.len > pdsfc->ident.max_req_sz) { + if (rpc->in.len > le32_to_cpu(pdsfc->ident.max_req_sz)) { dev_dbg(dev, "Invalid request size %u, max %u\n", - rpc->in.len, pdsfc->ident.max_req_sz); + rpc->in.len, le32_to_cpu(pdsfc->ident.max_req_sz)); return -EINVAL; } - if (rpc->out.len > pdsfc->ident.max_resp_sz) { + if (rpc->out.len > le32_to_cpu(pdsfc->ident.max_resp_sz)) { dev_dbg(dev, "Invalid response size %u, max %u\n", - rpc->out.len, pdsfc->ident.max_resp_sz); + rpc->out.len, le32_to_cpu(pdsfc->ident.max_resp_sz)); return -EINVAL; } - for (i = 0; i < pdsfc->endpoints->num_entries; i++) { + num_entries = le32_to_cpu(pdsfc->endpoints->num_entries); + for (i = 0; i < num_entries; i++) { if (pdsfc->endpoint_info[i].endpoint == rpc->in.ep) { ep_info = &pdsfc->endpoint_info[i]; break; @@ -326,8 +332,9 @@ static int pdsfc_validate_rpc(struct pdsfc_dev *pdsfc, /* reject unsupported and/or out of scope commands */ op_entry = (struct pds_fwctl_query_data_operation *)ep_info->operations->entries; - for (i = 0; i < ep_info->operations->num_entries; i++) { - if (PDS_FWCTL_RPC_OPCODE_CMP(rpc->in.op, op_entry[i].id)) { + num_entries = le32_to_cpu(ep_info->operations->num_entries); + for (i = 0; i < num_entries; i++) { + if (PDS_FWCTL_RPC_OPCODE_CMP(rpc->in.op, le32_to_cpu(op_entry[i].id))) { if (scope < op_entry[i].scope) return -EPERM; return 0; @@ -402,7 +409,7 @@ static void *pdsfc_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope, cmd = (union pds_core_adminq_cmd) { .fwctl_rpc = { .opcode = PDS_FWCTL_CMD_RPC, - .flags = PDS_FWCTL_RPC_IND_REQ | PDS_FWCTL_RPC_IND_RESP, + .flags = cpu_to_le16(PDS_FWCTL_RPC_IND_REQ | PDS_FWCTL_RPC_IND_RESP), .ep = cpu_to_le32(rpc->in.ep), .op = cpu_to_le32(rpc->in.op), .req_pa = cpu_to_le64(in_payload_dma_addr), diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO index b5f0a7a2e1bf..4b70cbaa1caa 100644 --- a/drivers/gpio/TODO +++ b/drivers/gpio/TODO @@ -186,3 +186,37 @@ their hardware offsets within the chip. Encourage users to switch to using them and eventually remove the existing global export/unexport attribues. + +------------------------------------------------------------------------------- + +Remove GPIOD_FLAGS_BIT_NONEXCLUSIVE + +GPIOs in the linux kernel are meant to be an exclusive resource. This means +that the GPIO descriptors (the software representation of the hardware concept) +are not reference counted and - in general - only one user at a time can +request a GPIO line and control its settings. The consumer API is designed +around full control of the line's state as evidenced by the fact that, for +instance, gpiod_set_value() does indeed drive the line as requested, instead +of bumping an enable counter of some sort. + +A problematic use-case for GPIOs is when two consumers want to use the same +descriptor independently. An example of such a user is the regulator subsystem +which may instantiate several struct regulator_dev instances containing +a struct device but using the same enable GPIO line. + +A workaround was introduced in the form of the GPIOD_FLAGS_BIT_NONEXCLUSIVE +flag but its implementation is problematic: it does not provide any +synchronization of usage nor did it introduce any enable count meaning the +non-exclusive users of the same descriptor will in fact "fight" for the +control over it. This flag should be removed and replaced with a better +solution, possibly based on the new power sequencing subsystem. + +------------------------------------------------------------------------------- + +Remove devm_gpiod_unhinge() + +devm_gpiod_unhinge() is provided as a way to transfer the ownership of managed +enable GPIOs to the regulator core. Rather than doing that however, we should +make it possible for the regulator subsystem to deal with GPIO resources the +lifetime of which it doesn't control as logically, a GPIO obtained by a caller +should also be freed by it. diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c index 0cd4c36ae8aa..541517536489 100644 --- a/drivers/gpio/gpio-mpc8xxx.c +++ b/drivers/gpio/gpio-mpc8xxx.c @@ -410,7 +410,9 @@ static int mpc8xxx_probe(struct platform_device *pdev) goto err; } - device_init_wakeup(dev, true); + ret = devm_device_init_wakeup(dev); + if (ret) + return dev_err_probe(dev, ret, "Failed to init wakeup\n"); return 0; err: diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index 4055596faef7..57633a7b4270 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -408,9 +408,8 @@ static void mvebu_gpio_irq_ack(struct irq_data *d) struct mvebu_gpio_chip *mvchip = gc->private; u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); mvebu_gpio_write_edge_cause(mvchip, ~mask); - irq_gc_unlock(gc); } static void mvebu_gpio_edge_irq_mask(struct irq_data *d) @@ -420,10 +419,9 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d) struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); ct->mask_cache_priv &= ~mask; mvebu_gpio_write_edge_mask(mvchip, ct->mask_cache_priv); - irq_gc_unlock(gc); } static void mvebu_gpio_edge_irq_unmask(struct irq_data *d) @@ -433,11 +431,10 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d) struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); mvebu_gpio_write_edge_cause(mvchip, ~mask); ct->mask_cache_priv |= mask; mvebu_gpio_write_edge_mask(mvchip, ct->mask_cache_priv); - irq_gc_unlock(gc); } static void mvebu_gpio_level_irq_mask(struct irq_data *d) @@ -447,10 +444,9 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d) struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); ct->mask_cache_priv &= ~mask; mvebu_gpio_write_level_mask(mvchip, ct->mask_cache_priv); - irq_gc_unlock(gc); } static void mvebu_gpio_level_irq_unmask(struct irq_data *d) @@ -460,10 +456,9 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d) struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); ct->mask_cache_priv |= mask; mvebu_gpio_write_level_mask(mvchip, ct->mask_cache_priv); - irq_gc_unlock(gc); } /***************************************************************************** diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 442435ded020..13cc120cf11f 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -1204,6 +1204,8 @@ static int pca953x_restore_context(struct pca953x_chip *chip) guard(mutex)(&chip->i2c_lock); + if (chip->client->irq > 0) + enable_irq(chip->client->irq); regcache_cache_only(chip->regmap, false); regcache_mark_dirty(chip->regmap); ret = pca953x_regcache_sync(chip); @@ -1216,6 +1218,10 @@ static int pca953x_restore_context(struct pca953x_chip *chip) static void pca953x_save_context(struct pca953x_chip *chip) { guard(mutex)(&chip->i2c_lock); + + /* Disable IRQ to prevent early triggering while regmap "cache only" is on */ + if (chip->client->irq > 0) + disable_irq(chip->client->irq); regcache_cache_only(chip->regmap, true); } diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c index 6895b65c86af..d27bfac6c9f5 100644 --- a/drivers/gpio/gpio-tegra186.c +++ b/drivers/gpio/gpio-tegra186.c @@ -823,6 +823,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev) struct gpio_irq_chip *irq; struct tegra_gpio *gpio; struct device_node *np; + struct resource *res; char **names; int err; @@ -842,19 +843,19 @@ static int tegra186_gpio_probe(struct platform_device *pdev) gpio->num_banks++; /* get register apertures */ - gpio->secure = devm_platform_ioremap_resource_byname(pdev, "security"); - if (IS_ERR(gpio->secure)) { - gpio->secure = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(gpio->secure)) - return PTR_ERR(gpio->secure); - } - - gpio->base = devm_platform_ioremap_resource_byname(pdev, "gpio"); - if (IS_ERR(gpio->base)) { - gpio->base = devm_platform_ioremap_resource(pdev, 1); - if (IS_ERR(gpio->base)) - return PTR_ERR(gpio->base); - } + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "security"); + if (!res) + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + gpio->secure = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(gpio->secure)) + return PTR_ERR(gpio->secure); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gpio"); + if (!res) + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + gpio->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(gpio->base)) + return PTR_ERR(gpio->base); err = platform_irq_count(pdev); if (err < 0) diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c index 13407fd4f0eb..eab6726953b4 100644 --- a/drivers/gpio/gpio-virtuser.c +++ b/drivers/gpio/gpio-virtuser.c @@ -401,10 +401,15 @@ static ssize_t gpio_virtuser_direction_do_write(struct file *file, char buf[32], *trimmed; int ret, dir, val = 0; - ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count); + if (count >= sizeof(buf)) + return -EINVAL; + + ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); if (ret < 0) return ret; + buf[ret] = '\0'; + trimmed = strim(buf); if (strcmp(trimmed, "input") == 0) { @@ -623,12 +628,15 @@ static ssize_t gpio_virtuser_consumer_write(struct file *file, char buf[GPIO_VIRTUSER_NAME_BUF_LEN + 2]; int ret; + if (count >= sizeof(buf)) + return -EINVAL; + ret = simple_write_to_buffer(buf, GPIO_VIRTUSER_NAME_BUF_LEN, ppos, user_buf, count); if (ret < 0) return ret; - buf[strlen(buf) - 1] = '\0'; + buf[ret] = '\0'; ret = gpiod_set_consumer_name(data->ad.desc, buf); if (ret) diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c index be81fa2b17ab..3dae63f3ea21 100644 --- a/drivers/gpio/gpio-zynq.c +++ b/drivers/gpio/gpio-zynq.c @@ -1011,6 +1011,7 @@ static void zynq_gpio_remove(struct platform_device *pdev) ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) dev_warn(&pdev->dev, "pm_runtime_get_sync() Failed\n"); + device_init_wakeup(&pdev->dev, 0); gpiochip_remove(&gpio->chip); device_set_wakeup_capable(&pdev->dev, 0); pm_runtime_disable(&pdev->dev); diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c index 08205f355ceb..120d1ec5af3b 100644 --- a/drivers/gpio/gpiolib-devres.c +++ b/drivers/gpio/gpiolib-devres.c @@ -317,11 +317,15 @@ EXPORT_SYMBOL_GPL(devm_gpiod_put); * @dev: GPIO consumer * @desc: GPIO descriptor to remove resource management from * + * *DEPRECATED* + * This function should not be used. It's been provided as a workaround for + * resource ownership issues in the regulator framework and should be replaced + * with a better solution. + * * Remove resource management from a GPIO descriptor. This is needed when * you want to hand over lifecycle management of a descriptor to another * mechanism. */ - void devm_gpiod_unhinge(struct device *dev, struct gpio_desc *desc) { int ret; diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index eb667f8f1ead..65f6a7177b78 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -193,6 +193,8 @@ static void of_gpio_try_fixup_polarity(const struct device_node *np, */ { "himax,hx8357", "gpios-reset", false }, { "himax,hx8369", "gpios-reset", false }, +#endif +#if IS_ENABLED(CONFIG_MTD_NAND_JZ4780) /* * The rb-gpios semantics was undocumented and qi,lb60 (along with * the ingenic driver) got it wrong. The active state encodes the @@ -266,6 +268,9 @@ static void of_gpio_set_polarity_by_property(const struct device_node *np, { "fsl,imx8qm-fec", "phy-reset-gpios", "phy-reset-active-high" }, { "fsl,s32v234-fec", "phy-reset-gpios", "phy-reset-active-high" }, #endif +#if IS_ENABLED(CONFIG_MMC_ATMELMCI) + { "atmel,hsmci", "cd-gpios", "cd-inverted" }, +#endif #if IS_ENABLED(CONFIG_PCI_IMX6) { "fsl,imx6q-pcie", "reset-gpio", "reset-gpio-active-high" }, { "fsl,imx6sx-pcie", "reset-gpio", "reset-gpio-active-high" }, @@ -292,9 +297,6 @@ static void of_gpio_set_polarity_by_property(const struct device_node *np, { "regulator-gpio", "enable-gpio", "enable-active-high" }, { "regulator-gpio", "enable-gpios", "enable-active-high" }, #endif -#if IS_ENABLED(CONFIG_MMC_ATMELMCI) - { "atmel,hsmci", "cd-gpios", "cd-inverted" }, -#endif }; unsigned int i; bool active_high; diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index b8197502a5ac..113c5d90f2df 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -742,6 +742,12 @@ EXPORT_SYMBOL_GPL(gpiochip_query_valid_mask); bool gpiochip_line_is_valid(const struct gpio_chip *gc, unsigned int offset) { + /* + * hog pins are requested before registering GPIO chip + */ + if (!gc->gpiodev) + return true; + /* No mask means all valid */ if (likely(!gc->gpiodev->valid_mask)) return true; @@ -2879,7 +2885,7 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value) * output-only, but if there is then not even a .set() operation it * is pretty tricky to drive the output line. */ - if (!guard.gc->set && !guard.gc->direction_output) { + if (!guard.gc->set && !guard.gc->set_rv && !guard.gc->direction_output) { gpiod_warn(desc, "%s: missing set() and direction_output() operations\n", __func__); diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 2cba2b6ebe1c..f01925ed8176 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -188,7 +188,7 @@ config DRM_DEBUG_DP_MST_TOPOLOGY_REFS bool "Enable refcount backtrace history in the DP MST helpers" depends on STACKTRACE_SUPPORT select STACKDEPOT - depends on DRM_KMS_HELPER + select DRM_KMS_HELPER depends on DEBUG_KERNEL depends on EXPERT help diff --git a/drivers/gpu/drm/adp/adp_drv.c b/drivers/gpu/drm/adp/adp_drv.c index c98c647f981d..54cde090c3f4 100644 --- a/drivers/gpu/drm/adp/adp_drv.c +++ b/drivers/gpu/drm/adp/adp_drv.c @@ -121,7 +121,6 @@ struct adp_drv_private { dma_addr_t mask_iova; int be_irq; int fe_irq; - spinlock_t irq_lock; struct drm_pending_vblank_event *event; }; @@ -288,6 +287,7 @@ static void adp_crtc_atomic_enable(struct drm_crtc *crtc, writel(BIT(0), adp->be + ADBE_BLEND_EN3); writel(BIT(0), adp->be + ADBE_BLEND_BYPASS); writel(BIT(0), adp->be + ADBE_BLEND_EN4); + drm_crtc_vblank_on(crtc); } static void adp_crtc_atomic_disable(struct drm_crtc *crtc, @@ -310,6 +310,7 @@ static void adp_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { u32 frame_num = 1; + unsigned long flags; struct adp_drv_private *adp = crtc_to_adp(crtc); struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state, crtc); u64 new_size = ALIGN(new_state->mode.hdisplay * @@ -330,13 +331,19 @@ static void adp_crtc_atomic_flush(struct drm_crtc *crtc, } writel(ADBE_FIFO_SYNC | frame_num, adp->be + ADBE_FIFO); //FIXME: use adbe flush interrupt - spin_lock_irq(&crtc->dev->event_lock); if (crtc->state->event) { - drm_crtc_vblank_get(crtc); - adp->event = crtc->state->event; + struct drm_pending_vblank_event *event = crtc->state->event; + + crtc->state->event = NULL; + spin_lock_irqsave(&crtc->dev->event_lock, flags); + + if (drm_crtc_vblank_get(crtc) != 0) + drm_crtc_send_vblank_event(crtc, event); + else + adp->event = event; + + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); } - crtc->state->event = NULL; - spin_unlock_irq(&crtc->dev->event_lock); } static const struct drm_crtc_funcs adp_crtc_funcs = { @@ -482,8 +489,6 @@ static irqreturn_t adp_fe_irq(int irq, void *arg) u32 int_status; u32 int_ctl; - spin_lock(&adp->irq_lock); - int_status = readl(adp->fe + ADP_INT_STATUS); if (int_status & ADP_INT_STATUS_VBLANK) { drm_crtc_handle_vblank(&adp->crtc); @@ -501,7 +506,6 @@ static irqreturn_t adp_fe_irq(int irq, void *arg) writel(int_status, adp->fe + ADP_INT_STATUS); - spin_unlock(&adp->irq_lock); return IRQ_HANDLED; } @@ -512,8 +516,7 @@ static int adp_drm_bind(struct device *dev) struct adp_drv_private *adp = to_adp(drm); int err; - adp_disable_vblank(adp); - writel(ADP_CTRL_FIFO_ON | ADP_CTRL_VBLANK_ON, adp->fe + ADP_CTRL); + writel(ADP_CTRL_FIFO_ON, adp->fe + ADP_CTRL); adp->next_bridge = drmm_of_get_bridge(&adp->drm, dev->of_node, 0, 0); if (IS_ERR(adp->next_bridge)) { @@ -567,8 +570,6 @@ static int adp_probe(struct platform_device *pdev) if (IS_ERR(adp)) return PTR_ERR(adp); - spin_lock_init(&adp->irq_lock); - dev_set_drvdata(&pdev->dev, &adp->drm); err = adp_parse_of(pdev, adp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 6d83ccfa42ee..c3641331d4de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -353,7 +353,6 @@ enum amdgpu_kiq_irq { AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0, AMDGPU_CP_KIQ_IRQ_LAST }; -#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */ #define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ #define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ #define MAX_KIQ_REG_TRY 1000 @@ -1124,6 +1123,7 @@ struct amdgpu_device { bool in_s3; bool in_s4; bool in_s0ix; + suspend_state_t last_suspend_state; enum pp_mp1_state mp1_state; struct amdgpu_doorbell_index doorbell_index; @@ -1614,11 +1614,9 @@ static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_cap #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND) bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev); bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); -void amdgpu_choose_low_power_state(struct amdgpu_device *adev); #else static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; } -static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { } #endif void amdgpu_register_gpu_instance(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index b7f8f2ff143d..707e131f89d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -1533,22 +1533,4 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) #endif /* CONFIG_AMD_PMC */ } -/** - * amdgpu_choose_low_power_state - * - * @adev: amdgpu_device_pointer - * - * Choose the target low power state for the GPU - */ -void amdgpu_choose_low_power_state(struct amdgpu_device *adev) -{ - if (adev->in_runpm) - return; - - if (amdgpu_acpi_is_s0ix_active(adev)) - adev->in_s0ix = true; - else if (amdgpu_acpi_is_s3_active(adev)) - adev->in_s3 = true; -} - #endif /* CONFIG_SUSPEND */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c index cfdf558b48b6..02138aa55793 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c @@ -109,7 +109,7 @@ int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct drm_exec exec; int r; - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); + drm_exec_init(&exec, 0, 0); drm_exec_until_all_locked(&exec) { r = amdgpu_vm_lock_pd(vm, &exec, 0); if (likely(!r)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index a30111d2c3ea..f8b3e04d71ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3510,6 +3510,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) amdgpu_device_mem_scratch_fini(adev); amdgpu_ib_pool_fini(adev); amdgpu_seq64_fini(adev); + amdgpu_doorbell_fini(adev); } if (adev->ip_blocks[i].version->funcs->sw_fini) { r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]); @@ -3643,6 +3644,13 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) adev, adev->ip_blocks[i].version->type)) continue; + /* Since we skip suspend for S0i3, we need to cancel the delayed + * idle work here as the suspend callback never gets called. + */ + if (adev->in_s0ix && + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX && + amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0)) + cancel_delayed_work_sync(&adev->gfx.idle_work); /* skip suspend of gfx/mes and psp for S0ix * gfx is in gfxoff state, so on resume it will exit gfxoff just * like at runtime. PSP is also part of the always on hardware @@ -4851,7 +4859,6 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) iounmap(adev->rmmio); adev->rmmio = NULL; - amdgpu_doorbell_fini(adev); drm_dev_exit(idx); } @@ -4900,28 +4907,20 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev) * @data: data * * This function is called when the system is about to suspend or hibernate. - * It is used to evict resources from the device before the system goes to - * sleep while there is still access to swap. + * It is used to set the appropriate flags so that eviction can be optimized + * in the pm prepare callback. */ static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode, void *data) { struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb); - int r; switch (mode) { case PM_HIBERNATION_PREPARE: adev->in_s4 = true; - fallthrough; - case PM_SUSPEND_PREPARE: - r = amdgpu_device_evict_resources(adev); - /* - * This is considered non-fatal at this time because - * amdgpu_device_prepare() will also fatally evict resources. - * See https://gitlab.freedesktop.org/drm/amd/-/issues/3781 - */ - if (r) - drm_warn(adev_to_drm(adev), "Failed to evict resources, freeze active processes if problems occur: %d\n", r); + break; + case PM_POST_HIBERNATION: + adev->in_s4 = false; break; } @@ -4942,15 +4941,13 @@ int amdgpu_device_prepare(struct drm_device *dev) struct amdgpu_device *adev = drm_to_adev(dev); int i, r; - amdgpu_choose_low_power_state(adev); - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; /* Evict the majority of BOs before starting suspend sequence */ r = amdgpu_device_evict_resources(adev); if (r) - goto unprepare; + return r; flush_delayed_work(&adev->gfx.gfx_off_delay_work); @@ -4961,15 +4958,10 @@ int amdgpu_device_prepare(struct drm_device *dev) continue; r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]); if (r) - goto unprepare; + return r; } return 0; - -unprepare: - adev->in_s0ix = adev->in_s3 = adev->in_s4 = false; - - return r; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index dc2713ec95a5..9e738fae2b74 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -120,6 +120,8 @@ MODULE_FIRMWARE("amdgpu/vega20_ip_discovery.bin"); MODULE_FIRMWARE("amdgpu/raven_ip_discovery.bin"); MODULE_FIRMWARE("amdgpu/raven2_ip_discovery.bin"); MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin"); +MODULE_FIRMWARE("amdgpu/arcturus_ip_discovery.bin"); +MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin"); #define mmIP_DISCOVERY_VERSION 0x16A00 #define mmRCC_CONFIG_MEMSIZE 0xde3 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 9f627caedc3f..44e120f9f764 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -43,6 +43,29 @@ #include <linux/dma-fence-array.h> #include <linux/pci-p2pdma.h> +static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops; + +/** + * dma_buf_attach_adev - Helper to get adev of an attachment + * + * @attach: attachment + * + * Returns: + * A struct amdgpu_device * if the attaching device is an amdgpu device or + * partition, NULL otherwise. + */ +static struct amdgpu_device *dma_buf_attach_adev(struct dma_buf_attachment *attach) +{ + if (attach->importer_ops == &amdgpu_dma_buf_attach_ops) { + struct drm_gem_object *obj = attach->importer_priv; + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + + return amdgpu_ttm_adev(bo->tbo.bdev); + } + + return NULL; +} + /** * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation * @@ -54,11 +77,13 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) { + struct amdgpu_device *attach_adev = dma_buf_attach_adev(attach); struct drm_gem_object *obj = dmabuf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0) + if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) && + pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0) attach->peer2peer = false; amdgpu_vm_bo_update_shared(bo); @@ -75,11 +100,35 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, */ static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach) { - struct drm_gem_object *obj = attach->dmabuf->priv; - struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + struct dma_buf *dmabuf = attach->dmabuf; + struct amdgpu_bo *bo = gem_to_amdgpu_bo(dmabuf->priv); + u32 domains = bo->allowed_domains; + + dma_resv_assert_held(dmabuf->resv); + + /* Try pinning into VRAM to allow P2P with RDMA NICs without ODP + * support if all attachments can do P2P. If any attachment can't do + * P2P just pin into GTT instead. + * + * To avoid with conflicting pinnings between GPUs and RDMA when move + * notifiers are disabled, only allow pinning in VRAM when move + * notiers are enabled. + */ + if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) { + domains &= ~AMDGPU_GEM_DOMAIN_VRAM; + } else { + list_for_each_entry(attach, &dmabuf->attachments, node) + if (!attach->peer2peer) + domains &= ~AMDGPU_GEM_DOMAIN_VRAM; + } - /* pin buffer into GTT */ - return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); + if (domains & AMDGPU_GEM_DOMAIN_VRAM) + bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + + if (WARN_ON(!domains)) + return -EINVAL; + + return amdgpu_bo_pin(bo, domains); } /** @@ -134,9 +183,6 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); if (r) return ERR_PTR(r); - - } else if (bo->tbo.resource->mem_type != TTM_PL_TT) { - return ERR_PTR(-EBUSY); } switch (bo->tbo.resource->mem_type) { @@ -153,6 +199,11 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, break; case TTM_PL_VRAM: + /* XGMI-accessible memory should never be DMA-mapped */ + if (WARN_ON(amdgpu_dmabuf_is_xgmi_accessible( + dma_buf_attach_adev(attach), bo))) + return ERR_PTR(-EINVAL); + r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0, bo->tbo.base.size, attach->dev, dir, &sgt); @@ -184,7 +235,7 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir) { - if (sgt->sgl->page_link) { + if (sg_page(sgt->sgl)) { dma_unmap_sgtable(attach->dev, sgt, dir, 0); sg_free_table(sgt); kfree(sgt); @@ -459,6 +510,9 @@ bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev, struct drm_gem_object *obj = &bo->tbo.base; struct drm_gem_object *gobj; + if (!adev) + return false; + if (obj->import_attach) { struct dma_buf *dma_buf = obj->import_attach->dmabuf; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 26bf896f1444..72c807f5822e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2548,8 +2548,20 @@ static int amdgpu_pmops_suspend(struct device *dev) adev->in_s0ix = true; else if (amdgpu_acpi_is_s3_active(adev)) adev->in_s3 = true; - if (!adev->in_s0ix && !adev->in_s3) + if (!adev->in_s0ix && !adev->in_s3) { + /* don't allow going deep first time followed by s2idle the next time */ + if (adev->last_suspend_state != PM_SUSPEND_ON && + adev->last_suspend_state != pm_suspend_target_state) { + drm_err_once(drm_dev, "Unsupported suspend state %d\n", + pm_suspend_target_state); + return -EINVAL; + } return 0; + } + + /* cache the state last used for suspend */ + adev->last_suspend_state = pm_suspend_target_state; + return amdgpu_device_suspend(drm_dev, true); } @@ -2603,13 +2615,8 @@ static int amdgpu_pmops_freeze(struct device *dev) static int amdgpu_pmops_thaw(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(drm_dev); - int r; - - r = amdgpu_device_resume(drm_dev, true); - adev->in_s4 = false; - return r; + return amdgpu_device_resume(drm_dev, true); } static int amdgpu_pmops_poweroff(struct device *dev) @@ -2622,9 +2629,6 @@ static int amdgpu_pmops_poweroff(struct device *dev) static int amdgpu_pmops_restore(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(drm_dev); - - adev->in_s4 = false; return amdgpu_device_resume(drm_dev, true); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 72af5e5a894a..cf2df7790077 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -1438,9 +1438,11 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; struct drm_gpu_scheduler *sched = &ring->sched; struct drm_sched_entity entity; + static atomic_t counter; struct dma_fence *f; struct amdgpu_job *job; struct amdgpu_ib *ib; + void *owner; int i, r; /* Initialize the scheduler entity */ @@ -1451,9 +1453,15 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring) goto err; } - r = amdgpu_job_alloc_with_ib(ring->adev, &entity, NULL, - 64, 0, - &job); + /* + * Use some unique dummy value as the owner to make sure we execute + * the cleaner shader on each submission. The value just need to change + * for each submission and is otherwise meaningless. + */ + owner = (void *)(unsigned long)atomic_inc_return(&counter); + + r = amdgpu_job_alloc_with_ib(ring->adev, &entity, owner, + 64, 0, &job); if (r) goto err; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 464625282872..ecb74ccf1d90 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -699,12 +699,10 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid, uint32_t flush_type, bool all_hub, uint32_t inst) { - u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : - adev->usec_timeout; struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring; struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst]; unsigned int ndw; - int r; + int r, cnt = 0; uint32_t seq; /* @@ -761,10 +759,21 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid, amdgpu_ring_commit(ring); spin_unlock(&adev->gfx.kiq[inst].ring_lock); - if (amdgpu_fence_wait_polling(ring, seq, usec_timeout) < 1) { + + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + + might_sleep(); + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY && + !amdgpu_reset_pending(adev->reset_domain)) { + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + } + + if (cnt > MAX_KIQ_REG_TRY) { dev_err(adev->dev, "timeout waiting for kiq fence\n"); r = -ETIME; - } + } else + r = 0; } error_unlock_reset: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 80cd6f5273db..0b9987781f76 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -163,8 +163,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) * When GTT is just an alternative to VRAM make sure that we * only use it as fallback and still try to fill up VRAM first. */ - if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM && - !(adev->flags & AMD_IS_APU)) + if (abo->tbo.resource && !(adev->flags & AMD_IS_APU) && + domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) places[c].flags |= TTM_PL_FLAG_FALLBACK; c++; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index cdcdae7f71ce..83adf81defc7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -66,7 +66,6 @@ #define VCN_ENC_CMD_REG_WAIT 0x0000000c #define VCN_AON_SOC_ADDRESS_2_0 0x1f800 -#define VCN1_AON_SOC_ADDRESS_3_0 0x48000 #define VCN_VID_IP_ADDRESS_2_0 0x0 #define VCN_AON_IP_ADDRESS_2_0 0x30000 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 6da8994e0469..2d7f82e98df9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -24,6 +24,7 @@ #include <linux/dma-mapping.h> #include <drm/ttm/ttm_range_manager.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_vm.h" @@ -907,6 +908,9 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) struct ttm_resource_manager *man = &mgr->manager; int err; + man->cg = drmm_cgroup_register_region(adev_to_drm(adev), "vram", adev->gmc.real_vram_size); + if (IS_ERR(man->cg)) + return PTR_ERR(man->cg); ttm_resource_manager_init(man, &adev->mman.bdev, adev->gmc.real_vram_size); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index a63ce747863f..23e6a05359c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -6114,7 +6114,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); @@ -6192,7 +6192,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0); @@ -6269,7 +6269,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); @@ -6644,7 +6644,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index d57db42f9536..2a5c2a1ae3c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -2428,7 +2428,7 @@ static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr) } if (amdgpu_emu_mode == 1) - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); @@ -2472,7 +2472,7 @@ static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr) } if (amdgpu_emu_mode == 1) - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); @@ -2517,7 +2517,7 @@ static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr) } if (amdgpu_emu_mode == 1) - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); @@ -3153,7 +3153,7 @@ static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev) amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj); if (amdgpu_emu_mode == 1) - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); @@ -3371,7 +3371,7 @@ static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev) amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj); if (amdgpu_emu_mode == 1) - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, lower_32_bits(adev->gfx.me.me_fw_gpu_addr)); @@ -4541,7 +4541,7 @@ static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev) if (r) return r; - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? false : true; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index e7b58e470292..62a257a4a3e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -2324,7 +2324,7 @@ static int gfx_v12_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev) amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj); if (amdgpu_emu_mode == 1) - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); @@ -2468,7 +2468,7 @@ static int gfx_v12_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev) amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj); if (amdgpu_emu_mode == 1) - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, lower_32_bits(adev->gfx.me.me_fw_gpu_addr)); @@ -3426,7 +3426,7 @@ static int gfx_v12_0_gfxhub_enable(struct amdgpu_device *adev) if (r) return r; - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? false : true; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 95d894a231fc..809b3a882d0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -268,7 +268,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; /* flush hdp cache */ - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); /* This is necessary for SRIOV as well as for GFXOFF to function * properly under bare metal @@ -969,7 +969,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) adev->hdp.funcs->init_registers(adev); /* Flush HDP after it is initialized */ - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? false : true; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index ad099f136f84..fec9a007533a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -229,7 +229,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; /* flush hdp cache */ - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); /* This is necessary for SRIOV as well as for GFXOFF to function * properly under bare metal @@ -752,6 +752,18 @@ static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block) adev->gmc.vram_type = vram_type; adev->gmc.vram_vendor = vram_vendor; + /* The mall_size is already calculated as mall_size_per_umc * num_umc. + * However, for gfx1151, which features a 2-to-1 UMC mapping, + * the result must be multiplied by 2 to determine the actual mall size. + */ + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(11, 5, 1): + adev->gmc.mall_size *= 2; + break; + default: + break; + } + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 1): @@ -899,7 +911,7 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev) return r; /* Flush HDP after it is initialized */ - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? false : true; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c index 05c026d0b0d9..c6f290704d47 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c @@ -297,7 +297,7 @@ static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, return; /* flush hdp cache */ - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); /* This is necessary for SRIOV as well as for GFXOFF to function * properly under bare metal @@ -881,7 +881,7 @@ static int gmc_v12_0_gart_enable(struct amdgpu_device *adev) return r; /* Flush HDP after it is initialized */ - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? false : true; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 783e0c3b86b4..5effe8327d29 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -2435,7 +2435,7 @@ static int gmc_v9_0_hw_init(struct amdgpu_ip_block *ip_block) adev->hdp.funcs->init_registers(adev); /* After HDP is initialized, flush HDP.*/ - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) value = false; diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c index f1dc13b3ab38..cbbeadeb53f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c @@ -41,7 +41,12 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev, { if (!ring || !ring->funcs->emit_wreg) { WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); + /* We just need to read back a register to post the write. + * Reading back the remapped register causes problems on + * some platforms so just read back the memory size register. + */ + if (adev->nbio.funcs->get_memsize) + adev->nbio.funcs->get_memsize(adev); } else { amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c index 43195c079748..086a647308df 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c @@ -32,7 +32,12 @@ static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev, { if (!ring || !ring->funcs->emit_wreg) { WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); + /* We just need to read back a register to post the write. + * Reading back the remapped register causes problems on + * some platforms so just read back the memory size register. + */ + if (adev->nbio.funcs->get_memsize) + adev->nbio.funcs->get_memsize(adev); } else { amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c index fcb8dd2876bc..40940b4ab400 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c @@ -33,7 +33,17 @@ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev, if (!ring || !ring->funcs->emit_wreg) { WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); + if (amdgpu_sriov_vf(adev)) { + /* this is fine because SR_IOV doesn't remap the register */ + RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); + } else { + /* We just need to read back a register to post the write. + * Reading back the remapped register causes problems on + * some platforms so just read back the memory size register. + */ + if (adev->nbio.funcs->get_memsize) + adev->nbio.funcs->get_memsize(adev); + } } else { amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c index a88d25a06c29..6ccd31c8bc69 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c @@ -35,7 +35,12 @@ static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev, { if (!ring || !ring->funcs->emit_wreg) { WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); + /* We just need to read back a register to post the write. + * Reading back the remapped register causes problems on + * some platforms so just read back the memory size register. + */ + if (adev->nbio.funcs->get_memsize) + adev->nbio.funcs->get_memsize(adev); } else { amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c index 49f7eb4fbd11..2c9239a22f39 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c @@ -32,7 +32,12 @@ static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev, { if (!ring || !ring->funcs->emit_wreg) { WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); + /* We just need to read back a register to post the write. + * Reading back the remapped register causes problems on + * some platforms so just read back the memory size register. + */ + if (adev->nbio.funcs->get_memsize) + adev->nbio.funcs->get_memsize(adev); } else { amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index e65916ada23b..ef9538fbbf53 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -894,6 +894,10 @@ static void mes_v11_0_get_fw_version(struct amdgpu_device *adev) { int pipe; + /* return early if we have already fetched these */ + if (adev->mes.sched_version && adev->mes.kiq_version) + return; + /* get MES scheduler/KIQ versions */ mutex_lock(&adev->srbm_mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c index 183dd3346da5..e6ab617b9a40 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c @@ -1392,17 +1392,20 @@ static int mes_v12_0_queue_init(struct amdgpu_device *adev, mes_v12_0_queue_init_register(ring); } - /* get MES scheduler/KIQ versions */ - mutex_lock(&adev->srbm_mutex); - soc21_grbm_select(adev, 3, pipe, 0, 0); + if (((pipe == AMDGPU_MES_SCHED_PIPE) && !adev->mes.sched_version) || + ((pipe == AMDGPU_MES_KIQ_PIPE) && !adev->mes.kiq_version)) { + /* get MES scheduler/KIQ versions */ + mutex_lock(&adev->srbm_mutex); + soc21_grbm_select(adev, 3, pipe, 0, 0); - if (pipe == AMDGPU_MES_SCHED_PIPE) - adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO); - else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq) - adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO); + if (pipe == AMDGPU_MES_SCHED_PIPE) + adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO); + else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq) + adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO); - soc21_grbm_select(adev, 0, 0, 0, 0); - mutex_unlock(&adev->srbm_mutex); + soc21_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c index 2ece3ae75ec1..bed5ef4d8788 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c @@ -360,7 +360,7 @@ static void nbio_v7_11_get_clockgating_state(struct amdgpu_device *adev, *flags |= AMD_CG_SUPPORT_BIF_LS; } -#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) +#define MMIO_REG_HOLE_OFFSET 0x44000 static void nbio_v7_11_set_reg_remap(struct amdgpu_device *adev) { diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index bb5dfc410a66..215543575f47 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -533,7 +533,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops) } memcpy_toio(adev->mman.aper_base_kaddr, buf, sz); - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); vfree(buf); drm_dev_exit(idx); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index cc621064610f..afdf8ce3b4c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -610,7 +610,7 @@ static int psp_v13_0_memory_training(struct psp_context *psp, uint32_t ops) } memcpy_toio(adev->mman.aper_base_kaddr, buf, sz); - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); vfree(buf); drm_dev_exit(idx); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c index 7c49c3f3c388..256288c6cd78 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c @@ -498,7 +498,7 @@ static int psp_v14_0_memory_training(struct psp_context *psp, uint32_t ops) } memcpy_toio(adev->mman.aper_base_kaddr, buf, sz); - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); vfree(buf); drm_dev_exit(idx); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 8e7a36f26e9c..b8d835c9e17e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -39,6 +39,7 @@ #define VCN_VID_SOC_ADDRESS_2_0 0x1fa00 #define VCN1_VID_SOC_ADDRESS_3_0 0x48200 +#define VCN1_AON_SOC_ADDRESS_3_0 0x48000 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x1fd #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x503 diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index d716510b8dd6..3eec1b8feaee 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -39,6 +39,7 @@ #define VCN_VID_SOC_ADDRESS_2_0 0x1fa00 #define VCN1_VID_SOC_ADDRESS_3_0 0x48200 +#define VCN1_AON_SOC_ADDRESS_3_0 0x48000 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 22ae1939476f..0b19f0ab4480 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -40,6 +40,7 @@ #define VCN_VID_SOC_ADDRESS_2_0 0x1fa00 #define VCN1_VID_SOC_ADDRESS_3_0 0x48200 +#define VCN1_AON_SOC_ADDRESS_3_0 0x48000 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index c6f6392c1c20..1f777c125b00 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -46,6 +46,7 @@ #define VCN_VID_SOC_ADDRESS_2_0 0x1fb00 #define VCN1_VID_SOC_ADDRESS_3_0 0x48300 +#define VCN1_AON_SOC_ADDRESS_3_0 0x48000 #define VCN_HARVEST_MMSCH 0 @@ -614,7 +615,8 @@ static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst, /* VCN global tiling registers */ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( - VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); + VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG), + adev->gfx.config.gb_addr_config, 0, indirect); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 3e176b4b7c69..012f6ea928ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -45,6 +45,7 @@ #define VCN_VID_SOC_ADDRESS_2_0 0x1fb00 #define VCN1_VID_SOC_ADDRESS_3_0 0x48300 +#define VCN1_AON_SOC_ADDRESS_3_0 0x48000 static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_3[] = { SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS), diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c index ba603b2246e2..f11df9c2ec13 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c @@ -46,6 +46,7 @@ #define VCN_VID_SOC_ADDRESS_2_0 0x1fb00 #define VCN1_VID_SOC_ADDRESS_3_0 (0x48300 + 0x38000) +#define VCN1_AON_SOC_ADDRESS_3_0 (0x48000 + 0x38000) #define VCN_HARVEST_MMSCH 0 @@ -1022,6 +1023,10 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | VCN_RB1_DB_CTRL__EN_MASK); + /* Keeping one read-back to ensure all register writes are done, otherwise + * it may introduce race conditions */ + RREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL); + return 0; } @@ -1204,6 +1209,10 @@ static int vcn_v4_0_5_start(struct amdgpu_vcn_inst *vinst) WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); + /* Keeping one read-back to ensure all register writes are done, otherwise + * it may introduce race conditions */ + RREG32_SOC15(VCN, i, regVCN_RB_ENABLE); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c index d99d05f42f1d..b90da3d3e140 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c @@ -533,7 +533,8 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst, /* VCN global tiling registers */ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( - VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); + VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG), + adev->gfx.config.gb_addr_config, 0, indirect); return; } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c index 581d8629b9d9..e0e84ef7f568 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c @@ -503,6 +503,52 @@ static void vcn_v5_0_1_enable_clock_gating(struct amdgpu_vcn_inst *vinst) } /** + * vcn_v5_0_1_pause_dpg_mode - VCN pause with dpg mode + * + * @vinst: VCN instance + * @new_state: pause state + * + * Pause dpg mode for VCN block + */ +static int vcn_v5_0_1_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, + struct dpg_pause_state *new_state) +{ + struct amdgpu_device *adev = vinst->adev; + uint32_t reg_data = 0; + int vcn_inst; + + vcn_inst = GET_INST(VCN, vinst->inst); + + /* pause/unpause if state is changed */ + if (vinst->pause_state.fw_based != new_state->fw_based) { + DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d %s\n", + vinst->pause_state.fw_based, new_state->fw_based, + new_state->fw_based ? "VCN_DPG_STATE__PAUSE" : "VCN_DPG_STATE__UNPAUSE"); + reg_data = RREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE) & + (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); + + if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { + /* pause DPG */ + reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; + WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data); + + /* wait for ACK */ + SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_DPG_PAUSE, + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); + } else { + /* unpause DPG, no need to wait */ + reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; + WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data); + } + vinst->pause_state.fw_based = new_state->fw_based; + } + + return 0; +} + + +/** * vcn_v5_0_1_start_dpg_mode - VCN start with dpg mode * * @vinst: VCN instance @@ -518,6 +564,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst, volatile struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; struct amdgpu_ring *ring; + struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__PAUSE}; int vcn_inst; uint32_t tmp; @@ -582,6 +629,9 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst, if (indirect) amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM); + /* Pause dpg */ + vcn_v5_0_1_pause_dpg_mode(vinst, &state); + ring = &adev->vcn.inst[inst_idx].ring_enc[0]; WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, lower_32_bits(ring->gpu_addr)); @@ -775,9 +825,13 @@ static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_vcn_inst *vinst) int inst_idx = vinst->inst; uint32_t tmp; int vcn_inst; + struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE}; vcn_inst = GET_INST(VCN, inst_idx); + /* Unpause dpg */ + vcn_v5_0_1_pause_dpg_mode(vinst, &state); + /* Wait for power status to be 1 */ SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 86d8bc10d90a..9b3510e53112 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -239,6 +239,13 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] = .max_pixels_per_frame = 4096 * 4096, .max_level = 186, }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, + .max_width = 4096, + .max_height = 4096, + .max_pixels_per_frame = 4096 * 4096, + .max_level = 0, + }, }; static const struct amdgpu_video_codecs cz_video_codecs_decode = diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index e477d7509646..9bbee484d57c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1983,9 +1983,6 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev) if (kfd_dbg_has_ttmps_always_setup(dev->gpu)) dev->node_props.debug_prop |= HSA_DBG_DISPATCH_INFO_ALWAYS_VALID; - if (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE) - dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED; - if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0)) { if (KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 3) || KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 4)) @@ -2001,7 +1998,11 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev) dev->node_props.capability |= HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED; - dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED; + if (!amdgpu_sriov_vf(dev->gpu->adev)) + dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED; + + if (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE) + dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED; } else { dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 | HSA_DBG_WATCH_ADDR_MASK_HI_BIT; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index d0d8ad5368c3..a187cdb43e7e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -372,6 +372,8 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev, static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, struct dm_crtc_state *new_state) { + if (new_state->stream->adjust.timing_adjust_pending) + return true; if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) return true; else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state)) @@ -673,15 +675,21 @@ static void dm_crtc_high_irq(void *interrupt_params) spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); if (acrtc->dm_irq_params.stream && - acrtc->dm_irq_params.vrr_params.supported && - acrtc->dm_irq_params.freesync_config.state == - VRR_STATE_ACTIVE_VARIABLE) { + acrtc->dm_irq_params.vrr_params.supported) { + bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled; + bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled; + bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE; + mod_freesync_handle_v_update(adev->dm.freesync_module, acrtc->dm_irq_params.stream, &acrtc->dm_irq_params.vrr_params); - dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream, - &acrtc->dm_irq_params.vrr_params.adjust); + /* update vmin_vmax only if freesync is enabled, or only if PSR and REPLAY are disabled */ + if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) { + dc_stream_adjust_vmin_vmax(adev->dm.dc, + acrtc->dm_irq_params.stream, + &acrtc->dm_irq_params.vrr_params.adjust); + } } /* @@ -1726,9 +1734,30 @@ static const struct dmi_system_id dmi_quirk_table[] = { .callback = edp0_on_dp1_callback, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 645 14 inch G11 Notebook PC"), + }, + }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"), }, }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 445 14 inch G11 Notebook PC"), + }, + }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 465 16 inch G11 Notebook PC"), + }, + }, {} /* TODO: refactor this from a fixed table to a dynamic option */ }; @@ -1899,26 +1928,6 @@ static enum dmub_ips_disable_type dm_get_default_ips_mode( switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(3, 5, 0): case IP_VERSION(3, 6, 0): - /* - * On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to - * cause a hard hang. A fix exists for newer PMFW. - * - * As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest - * IPS state in all cases, except for s0ix and all displays off (DPMS), - * where IPS2 is allowed. - * - * When checking pmfw version, use the major and minor only. - */ - if ((adev->pm.fw_version & 0x00FFFF00) < 0x005D6300) - ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; - else if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(11, 5, 0)) - /* - * Other ASICs with DCN35 that have residency issues with - * IPS2 in idle. - * We want them to use IPS2 only in display off cases. - */ - ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; - break; case IP_VERSION(3, 5, 1): ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; break; @@ -3334,16 +3343,16 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state, for (k = 0; k < dc_state->stream_count; k++) { bundle->stream_update.stream = dc_state->streams[k]; - for (m = 0; m < dc_state->stream_status->plane_count; m++) { + for (m = 0; m < dc_state->stream_status[k].plane_count; m++) { bundle->surface_updates[m].surface = - dc_state->stream_status->plane_states[m]; + dc_state->stream_status[k].plane_states[m]; bundle->surface_updates[m].surface->force_full_update = true; } update_planes_and_stream_adapter(dm->dc, UPDATE_TYPE_FULL, - dc_state->stream_status->plane_count, + dc_state->stream_status[k].plane_count, dc_state->streams[k], &bundle->stream_update, bundle->surface_updates); @@ -3460,11 +3469,6 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) return 0; } - - /* leave display off for S4 sequence */ - if (adev->in_s4) - return 0; - /* Recreate dc_state - DC invalidates it when setting power state to S3. */ dc_state_release(dm_state->context); dm_state->context = dc_state_create(dm->dc, NULL); @@ -6500,12 +6504,12 @@ decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, const struct drm_display_mode *native_mode, bool scale_enabled) { - if (scale_enabled) { - copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); - } else if (native_mode->clock == drm_mode->clock && - native_mode->htotal == drm_mode->htotal && - native_mode->vtotal == drm_mode->vtotal) { - copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); + if (scale_enabled || ( + native_mode->clock == drm_mode->clock && + native_mode->htotal == drm_mode->htotal && + native_mode->vtotal == drm_mode->vtotal)) { + if (native_mode->crtc_clock) + copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); } else { /* no scaling nor amdgpu inserted, no need to patch */ } @@ -11022,6 +11026,9 @@ static bool should_reset_plane(struct drm_atomic_state *state, state->allow_modeset) return true; + if (amdgpu_in_reset(adev) && state->allow_modeset) + return true; + /* Exit early if we know that we're adding or removing the plane. */ if (old_plane_state->crtc != new_plane_state->crtc) return true; @@ -12739,7 +12746,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync( * Transient states before tunneling is enabled could * lead to this error. We can ignore this for now. */ - if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) { + if (p_notify->result == AUX_RET_ERROR_PROTOCOL_ERROR) { DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n", payload->address, payload->length, p_notify->result); @@ -12748,22 +12755,15 @@ int amdgpu_dm_process_dmub_aux_transfer_sync( goto out; } + payload->reply[0] = adev->dm.dmub_notify->aux_reply.command & 0xF; + if (adev->dm.dmub_notify->aux_reply.command & 0xF0) + /* The reply is stored in the top nibble of the command. */ + payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF; - payload->reply[0] = adev->dm.dmub_notify->aux_reply.command; - if (!payload->write && p_notify->aux_reply.length && - (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) { - - if (payload->length != p_notify->aux_reply.length) { - DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n", - p_notify->aux_reply.length, - payload->address, payload->length); - *operation_result = AUX_RET_ERROR_INVALID_REPLY; - goto out; - } - + /*write req may receive a byte indicating partially written number as well*/ + if (p_notify->aux_reply.length) memcpy(payload->data, p_notify->aux_reply.data, p_notify->aux_reply.length); - } /* success */ ret = p_notify->aux_reply.length; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 36a830a7440f..e8bdd7f0c460 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -113,6 +113,7 @@ bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state) * * Panel Replay and PSR SU * - Enable when: + * - VRR is disabled * - vblank counter is disabled * - entry is allowed: usermode demonstrates an adequate number of fast * commits) @@ -131,19 +132,20 @@ static void amdgpu_dm_crtc_set_panel_sr_feature( bool is_sr_active = (link->replay_settings.replay_allow_active || link->psr_settings.psr_allow_active); bool is_crc_window_active = false; + bool vrr_active = amdgpu_dm_crtc_vrr_active_irq(vblank_work->acrtc); #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY is_crc_window_active = amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base); #endif - if (link->replay_settings.replay_feature_enabled && + if (link->replay_settings.replay_feature_enabled && !vrr_active && allow_sr_entry && !is_sr_active && !is_crc_window_active) { amdgpu_dm_replay_enable(vblank_work->stream, true); } else if (vblank_enabled) { if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active) amdgpu_dm_psr_disable(vblank_work->stream, false); - } else if (link->psr_settings.psr_feature_enabled && + } else if (link->psr_settings.psr_feature_enabled && !vrr_active && allow_sr_entry && !is_sr_active && !is_crc_window_active) { struct amdgpu_dm_connector *aconn = @@ -244,6 +246,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work) struct vblank_control_work *vblank_work = container_of(work, struct vblank_control_work, work); struct amdgpu_display_manager *dm = vblank_work->dm; + struct amdgpu_device *adev = drm_to_adev(dm->ddev); + int r; mutex_lock(&dm->dc_lock); @@ -271,8 +275,15 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work) vblank_work->acrtc->dm_irq_params.allow_sr_entry); } - if (dm->active_vblank_irq_count == 0) + if (dm->active_vblank_irq_count == 0) { + r = amdgpu_dpm_pause_power_profile(adev, true); + if (r) + dev_warn(adev->dev, "failed to set default power profile mode\n"); dc_allow_idle_optimizations(dm->dc, true); + r = amdgpu_dpm_pause_power_profile(adev, false); + if (r) + dev_warn(adev->dev, "failed to restore the power profile mode\n"); + } mutex_unlock(&dm->dc_lock); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index 5198a079b463..8f22ad966543 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -173,6 +173,9 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work, unsigned int conn_index = aconnector->base.index; guard(mutex)(&hdcp_w->mutex); + drm_connector_get(&aconnector->base); + if (hdcp_w->aconnector[conn_index]) + drm_connector_put(&hdcp_w->aconnector[conn_index]->base); hdcp_w->aconnector[conn_index] = aconnector; memset(&link_adjust, 0, sizeof(link_adjust)); @@ -220,7 +223,6 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, unsigned int conn_index = aconnector->base.index; guard(mutex)(&hdcp_w->mutex); - hdcp_w->aconnector[conn_index] = aconnector; /* the removal of display will invoke auth reset -> hdcp destroy and * we'd expect the Content Protection (CP) property changed back to @@ -236,7 +238,10 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, } mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output); - + if (hdcp_w->aconnector[conn_index]) { + drm_connector_put(&hdcp_w->aconnector[conn_index]->base); + hdcp_w->aconnector[conn_index] = NULL; + } process_output(hdcp_w); } @@ -254,6 +259,10 @@ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_inde for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) { hdcp_w->encryption_status[conn_index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + if (hdcp_w->aconnector[conn_index]) { + drm_connector_put(&hdcp_w->aconnector[conn_index]->base); + hdcp_w->aconnector[conn_index] = NULL; + } } process_output(hdcp_w); @@ -488,6 +497,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) struct hdcp_workqueue *hdcp_work = handle; struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx; int link_index = aconnector->dc_link->link_index; + unsigned int conn_index = aconnector->base.index; struct mod_hdcp_display *display = &hdcp_work[link_index].display; struct mod_hdcp_link *link = &hdcp_work[link_index].link; struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; @@ -544,7 +554,10 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) guard(mutex)(&hdcp_w->mutex); mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output); - + drm_connector_get(&aconnector->base); + if (hdcp_w->aconnector[conn_index]) + drm_connector_put(&hdcp_w->aconnector[conn_index]->base); + hdcp_w->aconnector[conn_index] = aconnector; process_output(hdcp_w); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 2cd35392e2da..1395a748d726 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -918,7 +918,7 @@ dm_helpers_probe_acpi_edid(void *data, u8 *buf, unsigned int block, size_t len) { struct drm_connector *connector = data; struct acpi_device *acpidev = ACPI_COMPANION(connector->dev->dev); - unsigned char start = block * EDID_LENGTH; + unsigned short start = block * EDID_LENGTH; struct edid *edid; int r; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 7ceedf626d23..5cdbc86ef8f5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -51,6 +51,9 @@ #define PEAK_FACTOR_X1000 1006 +/* + * This function handles both native AUX and I2C-Over-AUX transactions. + */ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { @@ -59,6 +62,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, enum aux_return_code_type operation_result; struct amdgpu_device *adev; struct ddc_service *ddc; + uint8_t copy[16]; if (WARN_ON(msg->size > 16)) return -E2BIG; @@ -74,6 +78,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, (msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0; payload.defer_delay = 0; + if (payload.write) { + memcpy(copy, msg->buffer, msg->size); + payload.data = copy; + } + result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload, &operation_result); @@ -87,15 +96,25 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, if (adev->dm.aux_hpd_discon_quirk) { if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE && operation_result == AUX_RET_ERROR_HPD_DISCON) { - result = 0; + result = msg->size; operation_result = AUX_RET_SUCCESS; } } - if (payload.write && result >= 0) - result = msg->size; + /* + * result equals to 0 includes the cases of AUX_DEFER/I2C_DEFER + */ + if (payload.write && result >= 0) { + if (result) { + /*one byte indicating partially written bytes*/ + drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX partially written\n"); + result = payload.data[0]; + } else if (!payload.reply[0]) + /*I2C_ACK|AUX_ACK*/ + result = msg->size; + } - if (result < 0) + if (result < 0) { switch (operation_result) { case AUX_RET_SUCCESS: break; @@ -114,6 +133,13 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, break; } + drm_dbg_dp(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result); + } + + if (payload.reply[0]) + drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.", + payload.reply[0]); + return result; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 28d1353f403d..ba4ce8a63158 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -439,9 +439,12 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, * Don't adjust DRR while there's bandwidth optimizations pending to * avoid conflicting with firmware updates. */ - if (dc->ctx->dce_version > DCE_VERSION_MAX) - if (dc->optimized_required || dc->wm_optimized_required) + if (dc->ctx->dce_version > DCE_VERSION_MAX) { + if (dc->optimized_required || dc->wm_optimized_required) { + stream->adjust.timing_adjust_pending = true; return false; + } + } dc_exit_ips_for_hw_access(dc); @@ -3168,7 +3171,8 @@ static void copy_stream_update_to_stream(struct dc *dc, if (update->crtc_timing_adjust) { if (stream->adjust.v_total_min != update->crtc_timing_adjust->v_total_min || - stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max) + stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max || + stream->adjust.timing_adjust_pending) update->crtc_timing_adjust->timing_adjust_pending = true; stream->adjust = *update->crtc_timing_adjust; update->crtc_timing_adjust->timing_adjust_pending = false; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c index 0c8ec30ea672..731fbd4bc600 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c @@ -910,7 +910,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm } //TODO : Could be possibly moved to a common helper layer. -static bool dml21_wrapper_get_plane_id(const struct dc_state *context, const struct dc_plane_state *plane, unsigned int *plane_id) +static bool dml21_wrapper_get_plane_id(const struct dc_state *context, unsigned int stream_id, const struct dc_plane_state *plane, unsigned int *plane_id) { int i, j; @@ -918,10 +918,12 @@ static bool dml21_wrapper_get_plane_id(const struct dc_state *context, const str return false; for (i = 0; i < context->stream_count; i++) { - for (j = 0; j < context->stream_status[i].plane_count; j++) { - if (context->stream_status[i].plane_states[j] == plane) { - *plane_id = (i << 16) | j; - return true; + if (context->streams[i]->stream_id == stream_id) { + for (j = 0; j < context->stream_status[i].plane_count; j++) { + if (context->stream_status[i].plane_states[j] == plane) { + *plane_id = (i << 16) | j; + return true; + } } } } @@ -944,14 +946,14 @@ static unsigned int map_stream_to_dml21_display_cfg(const struct dml2_context *d return location; } -static unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, +static unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, unsigned int stream_id, const struct dc_plane_state *plane, const struct dc_state *context) { unsigned int plane_id; int i = 0; int location = -1; - if (!dml21_wrapper_get_plane_id(context, plane, &plane_id)) { + if (!dml21_wrapper_get_plane_id(context, stream_id, plane, &plane_id)) { ASSERT(false); return -1; } @@ -1037,7 +1039,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location; } else { for (plane_index = 0; plane_index < context->stream_status[stream_index].plane_count; plane_index++) { - disp_cfg_plane_location = map_plane_to_dml21_display_cfg(dml_ctx, context->stream_status[stream_index].plane_states[plane_index], context); + disp_cfg_plane_location = map_plane_to_dml21_display_cfg(dml_ctx, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], context); if (disp_cfg_plane_location < 0) disp_cfg_plane_location = dml_dispcfg->num_planes++; @@ -1048,7 +1050,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s populate_dml21_plane_config_from_plane_state(dml_ctx, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location], context->stream_status[stream_index].plane_states[plane_index], context, stream_index); dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location; - if (dml21_wrapper_get_plane_id(context, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location])) + if (dml21_wrapper_get_plane_id(context, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location])) dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id_valid[disp_cfg_plane_location] = true; /* apply forced pstate policy */ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c index be54f0e696ce..ed6584535e89 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c @@ -2,6 +2,7 @@ // // Copyright 2024 Advanced Micro Devices, Inc. +#include <linux/vmalloc.h> #include "dml2_internal_types.h" #include "dml_top.h" @@ -13,11 +14,11 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx) { - *dml_ctx = kzalloc(sizeof(struct dml2_context), GFP_KERNEL); + *dml_ctx = vzalloc(sizeof(struct dml2_context)); if (!(*dml_ctx)) return false; - (*dml_ctx)->v21.dml_init.dml2_instance = kzalloc(sizeof(struct dml2_instance), GFP_KERNEL); + (*dml_ctx)->v21.dml_init.dml2_instance = vzalloc(sizeof(struct dml2_instance)); if (!((*dml_ctx)->v21.dml_init.dml2_instance)) return false; @@ -27,7 +28,7 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx) (*dml_ctx)->v21.mode_support.display_config = &(*dml_ctx)->v21.display_config; (*dml_ctx)->v21.mode_programming.display_config = (*dml_ctx)->v21.mode_support.display_config; - (*dml_ctx)->v21.mode_programming.programming = kzalloc(sizeof(struct dml2_display_cfg_programming), GFP_KERNEL); + (*dml_ctx)->v21.mode_programming.programming = vzalloc(sizeof(struct dml2_display_cfg_programming)); if (!((*dml_ctx)->v21.mode_programming.programming)) return false; @@ -86,6 +87,8 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, co /* Store configuration options */ (*dml_ctx)->config = *config; + DC_FP_START(); + /*Initialize SOCBB and DCNIP params */ dml21_initialize_soc_bb_params(&(*dml_ctx)->v21.dml_init, config, in_dc); dml21_initialize_ip_params(&(*dml_ctx)->v21.dml_init, config, in_dc); @@ -96,6 +99,8 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, co /*Initialize DML21 instance */ dml2_initialize_instance(&(*dml_ctx)->v21.dml_init); + + DC_FP_END(); } bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config) @@ -111,8 +116,8 @@ bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const s void dml21_destroy(struct dml2_context *dml2) { - kfree(dml2->v21.dml_init.dml2_instance); - kfree(dml2->v21.mode_programming.programming); + vfree(dml2->v21.dml_init.dml2_instance); + vfree(dml2->v21.mode_programming.programming); } static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *context, struct resource_context *out_new_hw_state, @@ -229,7 +234,9 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s if (!result) return false; + DC_FP_START(); result = dml2_build_mode_programming(mode_programming); + DC_FP_END(); if (!result) return false; @@ -272,7 +279,9 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co mode_support->dml2_instance = dml_init->dml2_instance; dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx); dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming; + DC_FP_START(); is_supported = dml2_check_mode_supported(mode_support); + DC_FP_END(); if (!is_supported) return false; @@ -284,10 +293,11 @@ bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml bool out = false; /* Use dml_validate_only for fast_validate path */ - if (fast_validate) { + if (fast_validate) out = dml21_check_mode_support(in_dc, context, dml_ctx); - } else + else out = dml21_mode_check_and_programming(in_dc, context, dml_ctx); + return out; } @@ -426,8 +436,12 @@ void dml21_copy(struct dml2_context *dst_dml_ctx, dst_dml_ctx->v21.mode_programming.programming = dst_dml2_programming; + DC_FP_START(); + /* need to initialize copied instance for internal references to be correct */ dml2_initialize_instance(&dst_dml_ctx->v21.dml_init); + + DC_FP_END(); } bool dml21_create_copy(struct dml2_context **dst_dml_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index 2061d43b92e1..ab6baf269801 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -973,7 +973,9 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p } } -static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc_state *context, struct scaler_data *out) +static struct scaler_data *get_scaler_data_for_plane( + const struct dc_plane_state *in, + struct dc_state *context) { int i; struct pipe_ctx *temp_pipe = &context->res_ctx.temp_pipe; @@ -994,7 +996,7 @@ static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc } ASSERT(i < MAX_PIPES); - memcpy(out, &temp_pipe->plane_res.scl_data, sizeof(*out)); + return &temp_pipe->plane_res.scl_data; } static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, @@ -1057,11 +1059,7 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out const struct dc_plane_state *in, struct dc_state *context, const struct soc_bounding_box_st *soc) { - struct scaler_data *scaler_data = kzalloc(sizeof(*scaler_data), GFP_KERNEL); - if (!scaler_data) - return; - - get_scaler_data_for_plane(in, context, scaler_data); + struct scaler_data *scaler_data = get_scaler_data_for_plane(in, context); out->CursorBPP[location] = dml_cur_32bit; out->CursorWidth[location] = 256; @@ -1126,8 +1124,6 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out out->DynamicMetadataTransmittedBytes[location] = 0; out->NumberOfCursors[location] = 1; - - kfree(scaler_data); } static unsigned int map_stream_to_dml_display_cfg(const struct dml2_context *dml2, diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index 939ee0708bd2..e89571874185 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -24,6 +24,8 @@ * */ +#include <linux/vmalloc.h> + #include "display_mode_core.h" #include "dml2_internal_types.h" #include "dml2_utils.h" @@ -732,17 +734,22 @@ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2 return out; } + DC_FP_START(); + /* Use dml_validate_only for fast_validate path */ if (fast_validate) out = dml2_validate_only(context); else out = dml2_validate_and_build_resource(in_dc, context); + + DC_FP_END(); + return out; } static inline struct dml2_context *dml2_allocate_memory(void) { - return (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL); + return (struct dml2_context *) vzalloc(sizeof(struct dml2_context)); } static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) @@ -779,11 +786,15 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op break; } + DC_FP_START(); + initialize_dml2_ip_params(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.ip); initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc); initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states); + + DC_FP_END(); } bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) @@ -812,7 +823,7 @@ void dml2_destroy(struct dml2_context *dml2) if (dml2->architecture == dml2_architecture_21) dml21_destroy(dml2); - kfree(dml2); + vfree(dml2); } void dml2_extract_dram_and_fclk_change_support(struct dml2_context *dml2, diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c index 1236e0f9a256..712aff7e17f7 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c @@ -120,10 +120,11 @@ void dpp401_set_cursor_attributes( enum dc_cursor_color_format color_format = cursor_attributes->color_format; int cur_rom_en = 0; - // DCN4 should always do Cursor degamma for Cursor Color modes if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA || color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) { - cur_rom_en = 1; + if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) { + cur_rom_en = 1; + } } REG_UPDATE_3(CURSOR0_CONTROL, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c index 5489f3d431f6..3af6a3402b89 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c @@ -1980,9 +1980,9 @@ void dcn401_program_pipe( dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size); } - if (pipe_ctx->update_flags.raw || - (pipe_ctx->plane_state && pipe_ctx->plane_state->update_flags.raw) || - pipe_ctx->stream->update_flags.raw) + if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw || + pipe_ctx->plane_state->update_flags.raw || + pipe_ctx->stream->update_flags.raw)) dc->hwss.update_dchubp_dpp(dc, pipe_ctx, context); if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable || diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 268626e73c54..53c961f86d43 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -148,6 +148,7 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init) void link_set_all_streams_dpms_off_for_link(struct dc_link *link) { struct pipe_ctx *pipes[MAX_PIPES]; + struct dc_stream_state *streams[MAX_PIPES]; struct dc_state *state = link->dc->current_state; uint8_t count; int i; @@ -160,10 +161,18 @@ void link_set_all_streams_dpms_off_for_link(struct dc_link *link) link_get_master_pipes_with_dpms_on(link, state, &count, pipes); + /* The subsequent call to dc_commit_updates_for_stream for a full update + * will release the current state and swap to a new state. Releasing the + * current state results in the stream pointers in the pipe_ctx structs + * to be zero'd. Hence, cache all streams prior to dc_commit_updates_for_stream. + */ + for (i = 0; i < count; i++) + streams[i] = pipes[i]->stream; + for (i = 0; i < count; i++) { - stream_update.stream = pipes[i]->stream; + stream_update.stream = streams[i]; dc_commit_updates_for_stream(link->ctx->dc, NULL, 0, - pipes[i]->stream, &stream_update, + streams[i], &stream_update, state); } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c index 34d2e097ca2e..5a5d48fadbf2 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c @@ -35,6 +35,17 @@ #define DC_LOGGER \ link->ctx->logger +static void get_default_8b_10b_lttpr_aux_rd_interval( + union training_aux_rd_interval *training_rd_interval) +{ + /* LTTPR are required to program DPCD 0000Eh to 0x4 (16ms) upon AUX + * read reply to this register. Since old sinks with DPCD rev 1.1 + * and earlier may not support this register, assume the mandatory + * value is programmed by the LTTPR to avoid AUX timeout issues. + */ + training_rd_interval->raw = 0x4; +} + static int32_t get_cr_training_aux_rd_interval(struct dc_link *link, const struct dc_link_settings *link_settings, enum lttpr_mode lttpr_mode) @@ -43,17 +54,22 @@ static int32_t get_cr_training_aux_rd_interval(struct dc_link *link, uint32_t wait_in_micro_secs = 100; memset(&training_rd_interval, 0, sizeof(training_rd_interval)); - if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING && - link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { - core_link_read_dpcd( - link, - DP_TRAINING_AUX_RD_INTERVAL, - (uint8_t *)&training_rd_interval, - sizeof(training_rd_interval)); - if (lttpr_mode != LTTPR_MODE_NON_TRANSPARENT) - wait_in_micro_secs = 400; - if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) - wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; + if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) { + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) + core_link_read_dpcd( + link, + DP_TRAINING_AUX_RD_INTERVAL, + (uint8_t *)&training_rd_interval, + sizeof(training_rd_interval)); + else if (dp_is_lttpr_present(link)) + get_default_8b_10b_lttpr_aux_rd_interval(&training_rd_interval); + + if (training_rd_interval.raw != 0) { + if (lttpr_mode != LTTPR_MODE_NON_TRANSPARENT) + wait_in_micro_secs = 400; + if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) + wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; + } } return wait_in_micro_secs; } @@ -71,13 +87,15 @@ static uint32_t get_eq_training_aux_rd_interval( DP_128B132B_TRAINING_AUX_RD_INTERVAL, (uint8_t *)&training_rd_interval, sizeof(training_rd_interval)); - } else if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING && - link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { - core_link_read_dpcd( - link, - DP_TRAINING_AUX_RD_INTERVAL, - (uint8_t *)&training_rd_interval, - sizeof(training_rd_interval)); + } else if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) { + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) + core_link_read_dpcd( + link, + DP_TRAINING_AUX_RD_INTERVAL, + (uint8_t *)&training_rd_interval, + sizeof(training_rd_interval)); + else if (dp_is_lttpr_present(link)) + get_default_8b_10b_lttpr_aux_rd_interval(&training_rd_interval); } switch (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index 2a59cc61ed8c..944650cb13de 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -2114,8 +2114,6 @@ static bool dcn32_resource_construct( #define REG_STRUCT dccg_regs dccg_regs_init(); - DC_FP_START(); - ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = &res_cap_dcn32; @@ -2501,14 +2499,10 @@ static bool dcn32_resource_construct( if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev) && (dc->config.sdpif_request_limit_words_per_umc == 0)) dc->config.sdpif_request_limit_words_per_umc = 16; - DC_FP_END(); - return true; create_fail: - DC_FP_END(); - dcn32_resource_destruct(pool); return false; diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 2a9606118d89..21dc956b5f35 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -429,6 +429,7 @@ struct amd_pm_funcs { int (*set_pp_table)(void *handle, const char *buf, size_t size); void (*debugfs_print_current_performance_level)(void *handle, struct seq_file *m); int (*switch_power_profile)(void *handle, enum PP_SMC_POWER_PROFILE type, bool en); + int (*pause_power_profile)(void *handle, bool pause); /* export to amdgpu */ struct amd_vce_state *(*get_vce_clock_state)(void *handle, u32 idx); int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id, diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 81e9b443ca0a..3533d43ed1e7 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -349,6 +349,25 @@ int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, return ret; } +int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev, + bool pause) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; + + if (amdgpu_sriov_vf(adev)) + return 0; + + if (pp_funcs && pp_funcs->pause_power_profile) { + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->pause_power_profile( + adev->powerplay.pp_handle, pause); + mutex_unlock(&adev->pm.mutex); + } + + return ret; +} + int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, uint32_t pstate) { diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index f93d287dbf13..4c0f7ad14816 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -410,6 +410,8 @@ int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, enum PP_SMC_POWER_PROFILE type, bool en); +int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev, + bool pause); int amdgpu_dpm_baco_reset(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 033c3229b555..46cce1d2aaf3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -2398,7 +2398,11 @@ static int smu_switch_power_profile(void *handle, smu_power_profile_mode_get(smu, type); else smu_power_profile_mode_put(smu, type); - ret = smu_bump_power_profile_mode(smu, NULL, 0); + /* don't switch the active workload when paused */ + if (smu->pause_workload) + ret = 0; + else + ret = smu_bump_power_profile_mode(smu, NULL, 0); if (ret) { if (enable) smu_power_profile_mode_put(smu, type); @@ -2411,6 +2415,35 @@ static int smu_switch_power_profile(void *handle, return 0; } +static int smu_pause_power_profile(void *handle, + bool pause) +{ + struct smu_context *smu = handle; + struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); + u32 workload_mask = 1 << PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; + int ret; + + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + return -EOPNOTSUPP; + + if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && + smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { + smu->pause_workload = pause; + + /* force to bootup default profile */ + if (smu->pause_workload && smu->ppt_funcs->set_power_profile_mode) + ret = smu->ppt_funcs->set_power_profile_mode(smu, + workload_mask, + NULL, + 0); + else + ret = smu_bump_power_profile_mode(smu, NULL, 0); + return ret; + } + + return 0; +} + static enum amd_dpm_forced_level smu_get_performance_level(void *handle) { struct smu_context *smu = handle; @@ -3733,6 +3766,7 @@ static const struct amd_pm_funcs swsmu_pm_funcs = { .get_pp_table = smu_sys_get_pp_table, .set_pp_table = smu_sys_set_pp_table, .switch_power_profile = smu_switch_power_profile, + .pause_power_profile = smu_pause_power_profile, /* export to amdgpu */ .dispatch_tasks = smu_handle_dpm_task, .load_firmware = smu_load_microcode, diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 3ba169639f54..dd6d0e7aa242 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -558,6 +558,7 @@ struct smu_context { /* asic agnostic workload mask */ uint32_t workload_mask; + bool pause_workload; /* default/user workload preference */ uint32_t power_profile_mode; uint32_t workload_refcount[PP_SMC_POWER_PROFILE_COUNT]; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 78391d8f35a9..25fabf336a64 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1204,7 +1204,7 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, uint32_t crystal_clock_freq = 2500; uint32_t tach_period; - if (speed == 0) + if (!speed || speed > UINT_MAX/8) return -EINVAL; /* * To prevent from possible overheat, some ASICs may have requirement diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 17fc5dc708f4..60e5ac179c15 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -549,7 +549,7 @@ int drm_dev_wedged_event(struct drm_device *dev, unsigned long method) if (drm_WARN_ONCE(dev, !recovery, "invalid recovery method %u\n", opt)) break; - len += scnprintf(event_string + len, sizeof(event_string), "%s,", recovery); + len += scnprintf(event_string + len, sizeof(event_string) - len, "%s,", recovery); } if (recovery) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 13bc4c290b17..9edb3247c767 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -6596,6 +6596,7 @@ static void drm_reset_display_info(struct drm_connector *connector) info->has_hdmi_infoframe = false; info->rgb_quant_range_selectable = false; memset(&info->hdmi, 0, sizeof(info->hdmi)); + memset(&connector->hdr_sink_metadata, 0, sizeof(connector->hdr_sink_metadata)); info->edid_hdmi_rgb444_dc_modes = 0; info->edid_hdmi_ycbcr444_dc_modes = 0; diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index c299cd94d3f7..cf2463090d3a 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -964,6 +964,10 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f) struct drm_file *file = f->private_data; struct drm_device *dev = file->minor->dev; struct drm_printer p = drm_seq_file_printer(m); + int idx; + + if (!drm_dev_enter(dev, &idx)) + return; drm_printf(&p, "drm-driver:\t%s\n", dev->driver->name); drm_printf(&p, "drm-client-id:\t%llu\n", file->client_id); @@ -983,6 +987,8 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f) if (dev->driver->show_fdinfo) dev->driver->show_fdinfo(&p, file); + + drm_dev_exit(idx); } EXPORT_SYMBOL(drm_show_fdinfo); diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c index 38431e8360e7..4b2f32889f00 100644 --- a/drivers/gpu/drm/drm_gpusvm.c +++ b/drivers/gpu/drm/drm_gpusvm.c @@ -1118,6 +1118,10 @@ static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm, lockdep_assert_held(&gpusvm->notifier_lock); if (range->flags.has_dma_mapping) { + struct drm_gpusvm_range_flags flags = { + .__flags = range->flags.__flags, + }; + for (i = 0, j = 0; i < npages; j++) { struct drm_pagemap_device_addr *addr = &range->dma_addr[j]; @@ -1131,8 +1135,12 @@ static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm, dev, *addr); i += 1 << addr->order; } - range->flags.has_devmem_pages = false; - range->flags.has_dma_mapping = false; + + /* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */ + flags.has_devmem_pages = false; + flags.has_dma_mapping = false; + WRITE_ONCE(range->flags.__flags, flags.__flags); + range->dpagemap = NULL; } } @@ -1334,6 +1342,7 @@ int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm, int err = 0; struct dev_pagemap *pagemap; struct drm_pagemap *dpagemap; + struct drm_gpusvm_range_flags flags; retry: hmm_range.notifier_seq = mmu_interval_read_begin(notifier); @@ -1378,7 +1387,8 @@ map_pages: */ drm_gpusvm_notifier_lock(gpusvm); - if (range->flags.unmapped) { + flags.__flags = range->flags.__flags; + if (flags.unmapped) { drm_gpusvm_notifier_unlock(gpusvm); err = -EFAULT; goto err_free; @@ -1454,6 +1464,11 @@ map_pages: goto err_unmap; } + if (ctx->devmem_only) { + err = -EFAULT; + goto err_unmap; + } + addr = dma_map_page(gpusvm->drm->dev, page, 0, PAGE_SIZE << order, @@ -1469,14 +1484,17 @@ map_pages: } i += 1 << order; num_dma_mapped = i; + flags.has_dma_mapping = true; } - range->flags.has_dma_mapping = true; if (zdd) { - range->flags.has_devmem_pages = true; + flags.has_devmem_pages = true; range->dpagemap = dpagemap; } + /* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */ + WRITE_ONCE(range->flags.__flags, flags.__flags); + drm_gpusvm_notifier_unlock(gpusvm); kvfree(pfns); set_seqno: @@ -1765,6 +1783,8 @@ int drm_gpusvm_migrate_to_devmem(struct drm_gpusvm *gpusvm, goto err_finalize; /* Upon success bind devmem allocation to range and zdd */ + devmem_allocation->timeslice_expiration = get_jiffies_64() + + msecs_to_jiffies(ctx->timeslice_ms); zdd->devmem_allocation = devmem_allocation; /* Owns ref */ err_finalize: @@ -1985,6 +2005,13 @@ static int __drm_gpusvm_migrate_to_ram(struct vm_area_struct *vas, void *buf; int i, err = 0; + if (page) { + zdd = page->zone_device_data; + if (time_before64(get_jiffies_64(), + zdd->devmem_allocation->timeslice_expiration)) + return 0; + } + start = ALIGN_DOWN(fault_addr, size); end = ALIGN(fault_addr + 1, size); diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index 89e05a5bed1d..a4cd476f9b30 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -404,12 +404,16 @@ static void mipi_dbi_blank(struct mipi_dbi_dev *dbidev) u16 height = drm->mode_config.min_height; u16 width = drm->mode_config.min_width; struct mipi_dbi *dbi = &dbidev->dbi; - size_t len = width * height * 2; + const struct drm_format_info *dst_format; + size_t len; int idx; if (!drm_dev_enter(drm, &idx)) return; + dst_format = drm_format_info(dbidev->pixel_format); + len = drm_format_info_min_pitch(dst_format, 0, width) * height; + memset(dbidev->tx_buf, 0, len); mipi_dbi_set_window_address(dbidev, 0, width - 1, 0, height - 1); diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index 5170f72b0830..f91daefa9d2b 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -43,13 +43,13 @@ struct decon_data { unsigned int wincon_burstlen_shift; }; -static struct decon_data exynos7_decon_data = { +static const struct decon_data exynos7_decon_data = { .vidw_buf_start_base = 0x80, .shadowcon_win_protect_shift = 10, .wincon_burstlen_shift = 11, }; -static struct decon_data exynos7870_decon_data = { +static const struct decon_data exynos7870_decon_data = { .vidw_buf_start_base = 0x880, .shadowcon_win_protect_shift = 8, .wincon_burstlen_shift = 10, diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index f313ae7bc3a3..6cc7bf77bcac 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -355,8 +355,7 @@ static void exynos_drm_platform_shutdown(struct platform_device *pdev) { struct drm_device *drm = platform_get_drvdata(pdev); - if (drm) - drm_atomic_helper_shutdown(drm); + drm_atomic_helper_shutdown(drm); } static struct platform_driver exynos_drm_platform_driver = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index b150cfd92f6e..09e33a26caaf 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -908,7 +908,7 @@ static void fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id, u32 buf_num; u32 cfg; - DRM_DEV_DEBUG_KMS(ctx->dev, "buf_id[%d]enqueu[%d]\n", buf_id, enqueue); + DRM_DEV_DEBUG_KMS(ctx->dev, "buf_id[%d]enqueue[%d]\n", buf_id, enqueue); spin_lock_irqsave(&ctx->lock, flags); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 1ad87584b1c2..c394cc702d7d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -731,7 +731,7 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win, /* * Setting dma-burst to 16Word causes permanent tearing for very small * buffers, e.g. cursor buffer. Burst Mode switching which based on - * plane size is not recommended as plane size varies alot towards the + * plane size is not recommended as plane size varies a lot towards the * end of the screen and rapid movement causes unstable DMA, but it is * still better to change dma-burst than displaying garbage. */ diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 08cf79a62025..e644e2382d77 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -312,9 +312,6 @@ static int vidi_get_modes(struct drm_connector *connector) else drm_edid = drm_edid_alloc(fake_edid_info, sizeof(fake_edid_info)); - if (!drm_edid) - return 0; - drm_edid_connector_update(connector, drm_edid); count = drm_edid_connector_add_modes(connector); diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 048be2872247..98b898a1de8f 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -244,6 +244,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv, qi->deinterleave = 4; break; case INTEL_DRAM_GDDR: + case INTEL_DRAM_GDDR_ECC: qi->channel_width = 32; break; default: @@ -398,6 +399,12 @@ static const struct intel_sa_info xe2_hpd_sa_info = { /* Other values not used by simplified algorithm */ }; +static const struct intel_sa_info xe2_hpd_ecc_sa_info = { + .derating = 45, + .deprogbwlimit = 53, + /* Other values not used by simplified algorithm */ +}; + static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa) { struct intel_qgv_info qi = {}; @@ -740,10 +747,15 @@ static unsigned int icl_qgv_bw(struct drm_i915_private *i915, void intel_bw_init_hw(struct drm_i915_private *dev_priv) { + const struct dram_info *dram_info = &dev_priv->dram_info; + if (!HAS_DISPLAY(dev_priv)) return; - if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv)) + if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv) && + dram_info->type == INTEL_DRAM_GDDR_ECC) + xe2_hpd_get_bw_info(dev_priv, &xe2_hpd_ecc_sa_info); + else if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv)) xe2_hpd_get_bw_info(dev_priv, &xe2_hpd_sa_info); else if (DISPLAY_VER(dev_priv) >= 14) tgl_get_bw_info(dev_priv, &mtl_sa_info); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 3afb85fe8536..3b509c70fb58 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -968,7 +968,9 @@ static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state, old_crtc_state->vrr.vmin != new_crtc_state->vrr.vmin || old_crtc_state->vrr.vmax != new_crtc_state->vrr.vmax || old_crtc_state->vrr.guardband != new_crtc_state->vrr.guardband || - old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full; + old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full || + old_crtc_state->vrr.vsync_start != new_crtc_state->vrr.vsync_start || + old_crtc_state->vrr.vsync_end != new_crtc_state->vrr.vsync_end; } static bool cmrr_params_changed(const struct intel_crtc_state *old_crtc_state, diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h index 717286981687..7a3bb77c7af7 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.h +++ b/drivers/gpu/drm/i915/display/intel_display_device.h @@ -161,6 +161,7 @@ struct intel_display_platforms { #define HAS_DPT(__display) (DISPLAY_VER(__display) >= 13) #define HAS_DSB(__display) (DISPLAY_INFO(__display)->has_dsb) #define HAS_DSC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dsc) +#define HAS_DSC_3ENGINES(__display) (DISPLAY_VERx100(__display) == 1401 && HAS_DSC(__display)) #define HAS_DSC_MST(__display) (DISPLAY_VER(__display) >= 12 && HAS_DSC(__display)) #define HAS_FBC(__display) (DISPLAY_RUNTIME_INFO(__display)->fbc_mask != 0) #define HAS_FBC_DIRTY_RECT(__display) (DISPLAY_VER(__display) >= 30) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index a236b5fc7a3d..392c3653d0d7 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -172,10 +172,28 @@ int intel_dp_link_symbol_clock(int rate) static int max_dprx_rate(struct intel_dp *intel_dp) { + struct intel_display *display = to_intel_display(intel_dp); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + int max_rate; + if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) - return drm_dp_tunnel_max_dprx_rate(intel_dp->tunnel); + max_rate = drm_dp_tunnel_max_dprx_rate(intel_dp->tunnel); + else + max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); - return drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); + /* + * Some broken eDP sinks illegally declare support for + * HBR3 without TPS4, and are unable to produce a stable + * output. Reject HBR3 when TPS4 is not available. + */ + if (max_rate >= 810000 && !drm_dp_tps4_supported(intel_dp->dpcd)) { + drm_dbg_kms(display->drm, + "[ENCODER:%d:%s] Rejecting HBR3 due to missing TPS4 support\n", + encoder->base.base.id, encoder->base.name); + max_rate = 540000; + } + + return max_rate; } static int max_dprx_lane_count(struct intel_dp *intel_dp) @@ -1032,10 +1050,11 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector, u8 test_slice_count = valid_dsc_slicecount[i] * num_joined_pipes; /* - * 3 DSC Slices per pipe need 3 DSC engines, - * which is supported only with Ultrajoiner. + * 3 DSC Slices per pipe need 3 DSC engines, which is supported only + * with Ultrajoiner only for some platforms. */ - if (valid_dsc_slicecount[i] == 3 && num_joined_pipes != 4) + if (valid_dsc_slicecount[i] == 3 && + (!HAS_DSC_3ENGINES(display) || num_joined_pipes != 4)) continue; if (test_slice_count > @@ -4170,6 +4189,9 @@ static void intel_edp_mso_init(struct intel_dp *intel_dp) static void intel_edp_set_sink_rates(struct intel_dp *intel_dp) { + struct intel_display *display = to_intel_display(intel_dp); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + intel_dp->num_sink_rates = 0; if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { @@ -4180,10 +4202,7 @@ intel_edp_set_sink_rates(struct intel_dp *intel_dp) sink_rates, sizeof(sink_rates)); for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { - int val = le16_to_cpu(sink_rates[i]); - - if (val == 0) - break; + int rate; /* Value read multiplied by 200kHz gives the per-lane * link rate in kHz. The source rates are, however, @@ -4191,7 +4210,24 @@ intel_edp_set_sink_rates(struct intel_dp *intel_dp) * back to symbols is * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) */ - intel_dp->sink_rates[i] = (val * 200) / 10; + rate = le16_to_cpu(sink_rates[i]) * 200 / 10; + + if (rate == 0) + break; + + /* + * Some broken eDP sinks illegally declare support for + * HBR3 without TPS4, and are unable to produce a stable + * output. Reject HBR3 when TPS4 is not available. + */ + if (rate >= 810000 && !drm_dp_tps4_supported(intel_dp->dpcd)) { + drm_dbg_kms(display->drm, + "[ENCODER:%d:%s] Rejecting HBR3 due to missing TPS4 support\n", + encoder->base.base.id, encoder->base.name); + break; + } + + intel_dp->sink_rates[i] = rate; } intel_dp->num_sink_rates = i; } diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 02f95108c637..6dc2d31ccb5a 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -242,7 +242,7 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp, to_intel_connector(conn_state->connector); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - bool is_mst = intel_dp->is_mst; + bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); int bpp_x16, slots = -EINVAL; int dsc_slice_count = 0; int max_dpt_bpp_x16; diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c index 4efd4f7d497a..7b240ce681a0 100644 --- a/drivers/gpu/drm/i915/display/intel_vblank.c +++ b/drivers/gpu/drm/i915/display/intel_vblank.c @@ -222,7 +222,9 @@ int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state) * However if queried just before the start of vblank we'll get an * answer that's slightly in the future. */ - if (DISPLAY_VER(display) == 2) + if (DISPLAY_VER(display) >= 20 || display->platform.battlemage) + return 1; + else if (DISPLAY_VER(display) == 2) return -1; else if (HAS_DDI(display) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return 2; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index ae3343c81a64..5e784db9f315 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -305,36 +305,20 @@ void __shmem_writeback(size_t size, struct address_space *mapping) .range_end = LLONG_MAX, .for_reclaim = 1, }; - unsigned long i; + struct folio *folio = NULL; + int error = 0; /* * Leave mmapings intact (GTT will have been revoked on unbinding, - * leaving only CPU mmapings around) and add those pages to the LRU + * leaving only CPU mmapings around) and add those folios to the LRU * instead of invoking writeback so they are aged and paged out * as normal. */ - - /* Begin writeback on each dirty page */ - for (i = 0; i < size >> PAGE_SHIFT; i++) { - struct page *page; - - page = find_lock_page(mapping, i); - if (!page) - continue; - - if (!page_mapped(page) && clear_page_dirty_for_io(page)) { - int ret; - - SetPageReclaim(page); - ret = mapping->a_ops->writepage(page, &wbc); - if (!PageWriteback(page)) - ClearPageReclaim(page); - if (!ret) - goto put; - } - unlock_page(page); -put: - put_page(page); + while ((folio = writeback_iter(mapping, &wbc, folio, &error))) { + if (folio_mapped(folio)) + folio_redirty_for_writepage(&wbc, folio); + else + error = shmem_writeout(folio, &wbc); } } diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index 9378d5901c49..9ca42589da4d 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -117,21 +117,10 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6) GEN6_RC_CTL_RC6_ENABLE | GEN6_RC_CTL_EI_MODE(1); - /* - * BSpec 52698 - Render powergating must be off. - * FIXME BSpec is outdated, disabling powergating for MTL is just - * temporary wa and should be removed after fixing real cause - * of forcewake timeouts. - */ - if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) - pg_enable = - GEN9_MEDIA_PG_ENABLE | - GEN11_MEDIA_SAMPLER_PG_ENABLE; - else - pg_enable = - GEN9_RENDER_PG_ENABLE | - GEN9_MEDIA_PG_ENABLE | - GEN11_MEDIA_SAMPLER_PG_ENABLE; + pg_enable = + GEN9_RENDER_PG_ENABLE | + GEN9_MEDIA_PG_ENABLE | + GEN11_MEDIA_SAMPLER_PG_ENABLE; if (GRAPHICS_VER(gt->i915) >= 12 && !IS_DG1(gt->i915)) { for (i = 0; i < I915_MAX_VCS; i++) diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 64e9317f58fb..71ee01d9ef64 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -1001,6 +1001,10 @@ void intel_rps_dec_waiters(struct intel_rps *rps) if (rps_uses_slpc(rps)) { slpc = rps_to_slpc(rps); + /* Don't decrement num_waiters for req where increment was skipped */ + if (slpc->power_profile == SLPC_POWER_PROFILES_POWER_SAVING) + return; + intel_guc_slpc_dec_waiters(slpc); } else { atomic_dec(&rps->num_waiters); @@ -1029,11 +1033,15 @@ void intel_rps_boost(struct i915_request *rq) if (slpc->power_profile == SLPC_POWER_PROFILES_POWER_SAVING) return; - if (slpc->min_freq_softlimit >= slpc->boost_freq) - return; - /* Return if old value is non zero */ if (!atomic_fetch_inc(&slpc->num_waiters)) { + /* + * Skip queuing boost work if frequency is already boosted, + * but still increment num_waiters. + */ + if (slpc->min_freq_softlimit >= slpc->boost_freq) + return; + GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n", rq->fence.context, rq->fence.seqno); queue_work(rps_to_gt(rps)->i915->unordered_wq, diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index d791f9baa11d..456d3372eef8 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -317,6 +317,11 @@ void intel_huc_init_early(struct intel_huc *huc) } } +void intel_huc_fini_late(struct intel_huc *huc) +{ + delayed_huc_load_fini(huc); +} + #define HUC_LOAD_MODE_STRING(x) (x ? "GSC" : "legacy") static int check_huc_loading_mode(struct intel_huc *huc) { @@ -414,12 +419,6 @@ out: void intel_huc_fini(struct intel_huc *huc) { - /* - * the fence is initialized in init_early, so we need to clean it up - * even if HuC loading is off. - */ - delayed_huc_load_fini(huc); - if (huc->heci_pkt) i915_vma_unpin_and_release(&huc->heci_pkt, 0); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h index d5e441b9e08d..921ad4b1687f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h @@ -55,6 +55,7 @@ struct intel_huc { int intel_huc_sanitize(struct intel_huc *huc); void intel_huc_init_early(struct intel_huc *huc); +void intel_huc_fini_late(struct intel_huc *huc); int intel_huc_init(struct intel_huc *huc); void intel_huc_fini(struct intel_huc *huc); int intel_huc_auth(struct intel_huc *huc, enum intel_huc_authentication_type type); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 90ba1b0b4c9d..4a3493e8d433 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -136,6 +136,7 @@ void intel_uc_init_late(struct intel_uc *uc) void intel_uc_driver_late_release(struct intel_uc *uc) { + intel_huc_fini_late(&uc->huc); } /** diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 509f9ccae3a9..dbad4d853d3a 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -222,7 +222,6 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu) u8 *buf; struct opregion_header *header; struct vbt v; - const char opregion_signature[16] = OPREGION_SIGNATURE; gvt_dbg_core("init vgpu%d opregion\n", vgpu->id); vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL | @@ -236,8 +235,10 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu) /* emulated opregion with VBT mailbox only */ buf = (u8 *)vgpu_opregion(vgpu)->va; header = (struct opregion_header *)buf; - memcpy(header->signature, opregion_signature, - sizeof(opregion_signature)); + + static_assert(sizeof(header->signature) == sizeof(OPREGION_SIGNATURE) - 1); + memcpy(header->signature, OPREGION_SIGNATURE, sizeof(header->signature)); + header->size = 0x8; header->opregion_ver = 0x02000000; header->mboxes = MBOX_VBT; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ffc346379cc2..54538b6f85df 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -305,6 +305,7 @@ struct drm_i915_private { INTEL_DRAM_DDR5, INTEL_DRAM_LPDDR5, INTEL_DRAM_GDDR, + INTEL_DRAM_GDDR_ECC, } type; u8 num_qgv_points; u8 num_psf_gv_points; diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h index 9aae779c4da3..4969d3de2bac 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h @@ -23,6 +23,7 @@ int intel_pxp_gsccs_init(struct intel_pxp *pxp); int intel_pxp_gsccs_create_session(struct intel_pxp *pxp, int arb_session_id); void intel_pxp_gsccs_end_arb_fw_session(struct intel_pxp *pxp, u32 arb_session_id); +bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp); #else static inline void intel_pxp_gsccs_fini(struct intel_pxp *pxp) @@ -34,8 +35,11 @@ static inline int intel_pxp_gsccs_init(struct intel_pxp *pxp) return 0; } -#endif +static inline bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp) +{ + return false; +} -bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp); +#endif #endif /*__INTEL_PXP_GSCCS_H__ */ diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c index fee76c1d2f45..889281819c5b 100644 --- a/drivers/gpu/drm/i915/selftests/i915_selftest.c +++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c @@ -23,7 +23,9 @@ #include <linux/random.h> +#include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" +#include "gt/intel_gt_regs.h" #include "gt/uc/intel_gsc_fw.h" #include "i915_driver.h" @@ -253,11 +255,27 @@ int i915_mock_selftests(void) int i915_live_selftests(struct pci_dev *pdev) { struct drm_i915_private *i915 = pdev_to_i915(pdev); + struct intel_uncore *uncore = &i915->uncore; int err; + u32 pg_enable; + intel_wakeref_t wakeref; if (!i915_selftest.live) return 0; + /* + * FIXME Disable render powergating, this is temporary wa and should be removed + * after fixing real cause of forcewake timeouts. + */ + with_intel_runtime_pm(uncore->rpm, wakeref) { + if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 00), IP_VER(12, 74))) { + pg_enable = intel_uncore_read(uncore, GEN9_PG_ENABLE); + if (pg_enable & GEN9_RENDER_PG_ENABLE) + intel_uncore_write_fw(uncore, GEN9_PG_ENABLE, + pg_enable & ~GEN9_RENDER_PG_ENABLE); + } + } + __wait_gsc_proxy_completed(i915); __wait_gsc_huc_load_completed(i915); diff --git a/drivers/gpu/drm/i915/selftests/librapl.c b/drivers/gpu/drm/i915/selftests/librapl.c index eb03b5b28bad..25b8726b9dff 100644 --- a/drivers/gpu/drm/i915/selftests/librapl.c +++ b/drivers/gpu/drm/i915/selftests/librapl.c @@ -22,12 +22,12 @@ u64 librapl_energy_uJ(void) unsigned long long power; u32 units; - if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) + if (rdmsrq_safe(MSR_RAPL_POWER_UNIT, &power)) return 0; units = (power & 0x1f00) >> 8; - if (rdmsrl_safe(MSR_PP1_ENERGY_STATUS, &power)) + if (rdmsrq_safe(MSR_PP1_ENERGY_STATUS, &power)) return 0; return (1000000 * power) >> units; /* convert to uJ */ diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c index 9e310f4099f4..f60eedb0e92c 100644 --- a/drivers/gpu/drm/i915/soc/intel_dram.c +++ b/drivers/gpu/drm/i915/soc/intel_dram.c @@ -687,6 +687,10 @@ static int xelpdp_get_dram_info(struct drm_i915_private *i915) drm_WARN_ON(&i915->drm, !IS_DGFX(i915)); dram_info->type = INTEL_DRAM_GDDR; break; + case 9: + drm_WARN_ON(&i915->drm, !IS_DGFX(i915)); + dram_info->type = INTEL_DRAM_GDDR_ECC; + break; default: MISSING_CASE(val); return -EINVAL; diff --git a/drivers/gpu/drm/imagination/pvr_fw.c b/drivers/gpu/drm/imagination/pvr_fw.c index 3debc9870a82..d09c4c684116 100644 --- a/drivers/gpu/drm/imagination/pvr_fw.c +++ b/drivers/gpu/drm/imagination/pvr_fw.c @@ -732,7 +732,7 @@ pvr_fw_process(struct pvr_device *pvr_dev) fw_mem->core_data, fw_mem->core_code_alloc_size); if (err) - goto err_free_fw_core_data_obj; + goto err_free_kdata; memcpy(fw_code_ptr, fw_mem->code, fw_mem->code_alloc_size); memcpy(fw_data_ptr, fw_mem->data, fw_mem->data_alloc_size); @@ -742,10 +742,14 @@ pvr_fw_process(struct pvr_device *pvr_dev) memcpy(fw_core_data_ptr, fw_mem->core_data, fw_mem->core_data_alloc_size); /* We're finished with the firmware section memory on the CPU, unmap. */ - if (fw_core_data_ptr) + if (fw_core_data_ptr) { pvr_fw_object_vunmap(fw_mem->core_data_obj); - if (fw_core_code_ptr) + fw_core_data_ptr = NULL; + } + if (fw_core_code_ptr) { pvr_fw_object_vunmap(fw_mem->core_code_obj); + fw_core_code_ptr = NULL; + } pvr_fw_object_vunmap(fw_mem->data_obj); fw_data_ptr = NULL; pvr_fw_object_vunmap(fw_mem->code_obj); @@ -753,7 +757,7 @@ pvr_fw_process(struct pvr_device *pvr_dev) err = pvr_fw_create_fwif_connection_ctl(pvr_dev); if (err) - goto err_free_fw_core_data_obj; + goto err_free_kdata; return 0; @@ -763,13 +767,16 @@ err_free_kdata: kfree(fw_mem->data); kfree(fw_mem->code); -err_free_fw_core_data_obj: if (fw_core_data_ptr) - pvr_fw_object_unmap_and_destroy(fw_mem->core_data_obj); + pvr_fw_object_vunmap(fw_mem->core_data_obj); + if (fw_mem->core_data_obj) + pvr_fw_object_destroy(fw_mem->core_data_obj); err_free_fw_core_code_obj: if (fw_core_code_ptr) - pvr_fw_object_unmap_and_destroy(fw_mem->core_code_obj); + pvr_fw_object_vunmap(fw_mem->core_code_obj); + if (fw_mem->core_code_obj) + pvr_fw_object_destroy(fw_mem->core_code_obj); err_free_fw_data_obj: if (fw_data_ptr) @@ -836,6 +843,12 @@ pvr_fw_cleanup(struct pvr_device *pvr_dev) struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem; pvr_fw_fini_fwif_connection_ctl(pvr_dev); + + kfree(fw_mem->core_data); + kfree(fw_mem->core_code); + kfree(fw_mem->data); + kfree(fw_mem->code); + if (fw_mem->core_code_obj) pvr_fw_object_destroy(fw_mem->core_code_obj); if (fw_mem->core_data_obj) diff --git a/drivers/gpu/drm/imagination/pvr_job.c b/drivers/gpu/drm/imagination/pvr_job.c index 1cdb3cfd058d..59b334d094fa 100644 --- a/drivers/gpu/drm/imagination/pvr_job.c +++ b/drivers/gpu/drm/imagination/pvr_job.c @@ -671,6 +671,13 @@ pvr_jobs_link_geom_frag(struct pvr_job_data *job_data, u32 *job_count) geom_job->paired_job = frag_job; frag_job->paired_job = geom_job; + /* The geometry job pvr_job structure is used when the fragment + * job is being prepared by the GPU scheduler. Have the fragment + * job hold a reference on the geometry job to prevent it being + * freed until the fragment job has finished with it. + */ + pvr_job_get(geom_job); + /* Skip the fragment job we just paired to the geometry job. */ i++; } diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c index eba69309bb6c..5e9bc0992824 100644 --- a/drivers/gpu/drm/imagination/pvr_queue.c +++ b/drivers/gpu/drm/imagination/pvr_queue.c @@ -866,6 +866,10 @@ static void pvr_queue_free_job(struct drm_sched_job *sched_job) struct pvr_job *job = container_of(sched_job, struct pvr_job, base); drm_sched_job_cleanup(sched_job); + + if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job) + pvr_job_put(job->paired_job); + job->paired_job = NULL; pvr_job_put(job); } diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 81d2ee37e773..49ff9f1f16d3 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -169,7 +169,7 @@ static const struct meson_drm_soc_attr meson_drm_soc_attrs[] = { /* S805X/S805Y HDMI PLL won't lock for HDMI PHY freq > 1,65GHz */ { .limits = { - .max_hdmi_phy_freq = 1650000, + .max_hdmi_phy_freq = 1650000000, }, .attrs = (const struct soc_device_attribute []) { { .soc_id = "GXL (S805*)", }, diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h index 3f9345c14f31..be4b0e4df6e1 100644 --- a/drivers/gpu/drm/meson/meson_drv.h +++ b/drivers/gpu/drm/meson/meson_drv.h @@ -37,7 +37,7 @@ struct meson_drm_match_data { }; struct meson_drm_soc_limits { - unsigned int max_hdmi_phy_freq; + unsigned long long max_hdmi_phy_freq; }; struct meson_drm { diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c index 6d1c9262a2cf..c08fa93e50a3 100644 --- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c +++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c @@ -70,12 +70,12 @@ static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi, { struct meson_drm *priv = encoder_hdmi->priv; int vic = drm_match_cea_mode(mode); - unsigned int phy_freq; - unsigned int vclk_freq; - unsigned int venc_freq; - unsigned int hdmi_freq; + unsigned long long phy_freq; + unsigned long long vclk_freq; + unsigned long long venc_freq; + unsigned long long hdmi_freq; - vclk_freq = mode->clock; + vclk_freq = mode->clock * 1000ULL; /* For 420, pixel clock is half unlike venc clock */ if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) @@ -107,7 +107,8 @@ static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi, if (mode->flags & DRM_MODE_FLAG_DBLCLK) venc_freq /= 2; - dev_dbg(priv->dev, "vclk:%d phy=%d venc=%d hdmi=%d enci=%d\n", + dev_dbg(priv->dev, + "vclk:%lluHz phy=%lluHz venc=%lluHz hdmi=%lluHz enci=%d\n", phy_freq, vclk_freq, venc_freq, hdmi_freq, priv->venc.hdmi_use_enci); @@ -122,10 +123,11 @@ static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bri struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); struct meson_drm *priv = encoder_hdmi->priv; bool is_hdmi2_sink = display_info->hdmi.scdc.supported; - unsigned int phy_freq; - unsigned int vclk_freq; - unsigned int venc_freq; - unsigned int hdmi_freq; + unsigned long long clock = mode->clock * 1000ULL; + unsigned long long phy_freq; + unsigned long long vclk_freq; + unsigned long long venc_freq; + unsigned long long hdmi_freq; int vic = drm_match_cea_mode(mode); enum drm_mode_status status; @@ -144,12 +146,12 @@ static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bri if (status != MODE_OK) return status; - return meson_vclk_dmt_supported_freq(priv, mode->clock); + return meson_vclk_dmt_supported_freq(priv, clock); /* Check against supported VIC modes */ } else if (!meson_venc_hdmi_supported_vic(vic)) return MODE_BAD; - vclk_freq = mode->clock; + vclk_freq = clock; /* For 420, pixel clock is half unlike venc clock */ if (drm_mode_is_420_only(display_info, mode) || @@ -179,7 +181,8 @@ static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bri if (mode->flags & DRM_MODE_FLAG_DBLCLK) venc_freq /= 2; - dev_dbg(priv->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n", + dev_dbg(priv->dev, + "%s: vclk:%lluHz phy=%lluHz venc=%lluHz hdmi=%lluHz\n", __func__, phy_freq, vclk_freq, venc_freq, hdmi_freq); return meson_vclk_vic_supported_freq(priv, phy_freq, vclk_freq); diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c index 2a942dc6a6dc..3325580d885d 100644 --- a/drivers/gpu/drm/meson/meson_vclk.c +++ b/drivers/gpu/drm/meson/meson_vclk.c @@ -110,7 +110,10 @@ #define HDMI_PLL_LOCK BIT(31) #define HDMI_PLL_LOCK_G12A (3 << 30) -#define FREQ_1000_1001(_freq) DIV_ROUND_CLOSEST(_freq * 1000, 1001) +#define PIXEL_FREQ_1000_1001(_freq) \ + DIV_ROUND_CLOSEST_ULL((_freq) * 1000ULL, 1001ULL) +#define PHY_FREQ_1000_1001(_freq) \ + (PIXEL_FREQ_1000_1001(DIV_ROUND_DOWN_ULL(_freq, 10ULL)) * 10) /* VID PLL Dividers */ enum { @@ -360,11 +363,11 @@ enum { }; struct meson_vclk_params { - unsigned int pll_freq; - unsigned int phy_freq; - unsigned int vclk_freq; - unsigned int venc_freq; - unsigned int pixel_freq; + unsigned long long pll_freq; + unsigned long long phy_freq; + unsigned long long vclk_freq; + unsigned long long venc_freq; + unsigned long long pixel_freq; unsigned int pll_od1; unsigned int pll_od2; unsigned int pll_od3; @@ -372,11 +375,11 @@ struct meson_vclk_params { unsigned int vclk_div; } params[] = { [MESON_VCLK_HDMI_ENCI_54000] = { - .pll_freq = 4320000, - .phy_freq = 270000, - .vclk_freq = 54000, - .venc_freq = 54000, - .pixel_freq = 54000, + .pll_freq = 4320000000, + .phy_freq = 270000000, + .vclk_freq = 54000000, + .venc_freq = 54000000, + .pixel_freq = 54000000, .pll_od1 = 4, .pll_od2 = 4, .pll_od3 = 1, @@ -384,11 +387,11 @@ struct meson_vclk_params { .vclk_div = 1, }, [MESON_VCLK_HDMI_DDR_54000] = { - .pll_freq = 4320000, - .phy_freq = 270000, - .vclk_freq = 54000, - .venc_freq = 54000, - .pixel_freq = 27000, + .pll_freq = 4320000000, + .phy_freq = 270000000, + .vclk_freq = 54000000, + .venc_freq = 54000000, + .pixel_freq = 27000000, .pll_od1 = 4, .pll_od2 = 4, .pll_od3 = 1, @@ -396,11 +399,11 @@ struct meson_vclk_params { .vclk_div = 1, }, [MESON_VCLK_HDMI_DDR_148500] = { - .pll_freq = 2970000, - .phy_freq = 742500, - .vclk_freq = 148500, - .venc_freq = 148500, - .pixel_freq = 74250, + .pll_freq = 2970000000, + .phy_freq = 742500000, + .vclk_freq = 148500000, + .venc_freq = 148500000, + .pixel_freq = 74250000, .pll_od1 = 4, .pll_od2 = 1, .pll_od3 = 1, @@ -408,11 +411,11 @@ struct meson_vclk_params { .vclk_div = 1, }, [MESON_VCLK_HDMI_74250] = { - .pll_freq = 2970000, - .phy_freq = 742500, - .vclk_freq = 74250, - .venc_freq = 74250, - .pixel_freq = 74250, + .pll_freq = 2970000000, + .phy_freq = 742500000, + .vclk_freq = 74250000, + .venc_freq = 74250000, + .pixel_freq = 74250000, .pll_od1 = 2, .pll_od2 = 2, .pll_od3 = 2, @@ -420,11 +423,11 @@ struct meson_vclk_params { .vclk_div = 1, }, [MESON_VCLK_HDMI_148500] = { - .pll_freq = 2970000, - .phy_freq = 1485000, - .vclk_freq = 148500, - .venc_freq = 148500, - .pixel_freq = 148500, + .pll_freq = 2970000000, + .phy_freq = 1485000000, + .vclk_freq = 148500000, + .venc_freq = 148500000, + .pixel_freq = 148500000, .pll_od1 = 1, .pll_od2 = 2, .pll_od3 = 2, @@ -432,11 +435,11 @@ struct meson_vclk_params { .vclk_div = 1, }, [MESON_VCLK_HDMI_297000] = { - .pll_freq = 5940000, - .phy_freq = 2970000, - .venc_freq = 297000, - .vclk_freq = 297000, - .pixel_freq = 297000, + .pll_freq = 5940000000, + .phy_freq = 2970000000, + .venc_freq = 297000000, + .vclk_freq = 297000000, + .pixel_freq = 297000000, .pll_od1 = 2, .pll_od2 = 1, .pll_od3 = 1, @@ -444,11 +447,11 @@ struct meson_vclk_params { .vclk_div = 2, }, [MESON_VCLK_HDMI_594000] = { - .pll_freq = 5940000, - .phy_freq = 5940000, - .venc_freq = 594000, - .vclk_freq = 594000, - .pixel_freq = 594000, + .pll_freq = 5940000000, + .phy_freq = 5940000000, + .venc_freq = 594000000, + .vclk_freq = 594000000, + .pixel_freq = 594000000, .pll_od1 = 1, .pll_od2 = 1, .pll_od3 = 2, @@ -456,11 +459,11 @@ struct meson_vclk_params { .vclk_div = 1, }, [MESON_VCLK_HDMI_594000_YUV420] = { - .pll_freq = 5940000, - .phy_freq = 2970000, - .venc_freq = 594000, - .vclk_freq = 594000, - .pixel_freq = 297000, + .pll_freq = 5940000000, + .phy_freq = 2970000000, + .venc_freq = 594000000, + .vclk_freq = 594000000, + .pixel_freq = 297000000, .pll_od1 = 2, .pll_od2 = 1, .pll_od3 = 1, @@ -617,16 +620,16 @@ static void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m, 3 << 20, pll_od_to_reg(od3) << 20); } -#define XTAL_FREQ 24000 +#define XTAL_FREQ (24 * 1000 * 1000) static unsigned int meson_hdmi_pll_get_m(struct meson_drm *priv, - unsigned int pll_freq) + unsigned long long pll_freq) { /* The GXBB PLL has a /2 pre-multiplier */ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) - pll_freq /= 2; + pll_freq = DIV_ROUND_DOWN_ULL(pll_freq, 2); - return pll_freq / XTAL_FREQ; + return DIV_ROUND_DOWN_ULL(pll_freq, XTAL_FREQ); } #define HDMI_FRAC_MAX_GXBB 4096 @@ -635,12 +638,13 @@ static unsigned int meson_hdmi_pll_get_m(struct meson_drm *priv, static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv, unsigned int m, - unsigned int pll_freq) + unsigned long long pll_freq) { - unsigned int parent_freq = XTAL_FREQ; + unsigned long long parent_freq = XTAL_FREQ; unsigned int frac_max = HDMI_FRAC_MAX_GXL; unsigned int frac_m; unsigned int frac; + u32 remainder; /* The GXBB PLL has a /2 pre-multiplier and a larger FRAC width */ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) { @@ -652,11 +656,11 @@ static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv, frac_max = HDMI_FRAC_MAX_G12A; /* We can have a perfect match !*/ - if (pll_freq / m == parent_freq && - pll_freq % m == 0) + if (div_u64_rem(pll_freq, m, &remainder) == parent_freq && + remainder == 0) return 0; - frac = div_u64((u64)pll_freq * (u64)frac_max, parent_freq); + frac = mul_u64_u64_div_u64(pll_freq, frac_max, parent_freq); frac_m = m * frac_max; if (frac_m > frac) return frac_max; @@ -666,7 +670,7 @@ static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv, } static bool meson_hdmi_pll_validate_params(struct meson_drm *priv, - unsigned int m, + unsigned long long m, unsigned int frac) { if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) { @@ -694,7 +698,7 @@ static bool meson_hdmi_pll_validate_params(struct meson_drm *priv, } static bool meson_hdmi_pll_find_params(struct meson_drm *priv, - unsigned int freq, + unsigned long long freq, unsigned int *m, unsigned int *frac, unsigned int *od) @@ -706,7 +710,7 @@ static bool meson_hdmi_pll_find_params(struct meson_drm *priv, continue; *frac = meson_hdmi_pll_get_frac(priv, *m, freq * *od); - DRM_DEBUG_DRIVER("PLL params for %dkHz: m=%x frac=%x od=%d\n", + DRM_DEBUG_DRIVER("PLL params for %lluHz: m=%x frac=%x od=%d\n", freq, *m, *frac, *od); if (meson_hdmi_pll_validate_params(priv, *m, *frac)) @@ -718,7 +722,7 @@ static bool meson_hdmi_pll_find_params(struct meson_drm *priv, /* pll_freq is the frequency after the OD dividers */ enum drm_mode_status -meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq) +meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned long long freq) { unsigned int od, m, frac; @@ -741,7 +745,7 @@ EXPORT_SYMBOL_GPL(meson_vclk_dmt_supported_freq); /* pll_freq is the frequency after the OD dividers */ static void meson_hdmi_pll_generic_set(struct meson_drm *priv, - unsigned int pll_freq) + unsigned long long pll_freq) { unsigned int od, m, frac, od1, od2, od3; @@ -756,7 +760,7 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv, od1 = od / od2; } - DRM_DEBUG_DRIVER("PLL params for %dkHz: m=%x frac=%x od=%d/%d/%d\n", + DRM_DEBUG_DRIVER("PLL params for %lluHz: m=%x frac=%x od=%d/%d/%d\n", pll_freq, m, frac, od1, od2, od3); meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3); @@ -764,17 +768,18 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv, return; } - DRM_ERROR("Fatal, unable to find parameters for PLL freq %d\n", + DRM_ERROR("Fatal, unable to find parameters for PLL freq %lluHz\n", pll_freq); } enum drm_mode_status -meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq, - unsigned int vclk_freq) +meson_vclk_vic_supported_freq(struct meson_drm *priv, + unsigned long long phy_freq, + unsigned long long vclk_freq) { int i; - DRM_DEBUG_DRIVER("phy_freq = %d vclk_freq = %d\n", + DRM_DEBUG_DRIVER("phy_freq = %lluHz vclk_freq = %lluHz\n", phy_freq, vclk_freq); /* Check against soc revision/package limits */ @@ -785,19 +790,19 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq, } for (i = 0 ; params[i].pixel_freq ; ++i) { - DRM_DEBUG_DRIVER("i = %d pixel_freq = %d alt = %d\n", + DRM_DEBUG_DRIVER("i = %d pixel_freq = %lluHz alt = %lluHz\n", i, params[i].pixel_freq, - FREQ_1000_1001(params[i].pixel_freq)); - DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n", + PIXEL_FREQ_1000_1001(params[i].pixel_freq)); + DRM_DEBUG_DRIVER("i = %d phy_freq = %lluHz alt = %lluHz\n", i, params[i].phy_freq, - FREQ_1000_1001(params[i].phy_freq/1000)*1000); + PHY_FREQ_1000_1001(params[i].phy_freq)); /* Match strict frequency */ if (phy_freq == params[i].phy_freq && vclk_freq == params[i].vclk_freq) return MODE_OK; /* Match 1000/1001 variant */ - if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/1000)*1000) && - vclk_freq == FREQ_1000_1001(params[i].vclk_freq)) + if (phy_freq == PHY_FREQ_1000_1001(params[i].phy_freq) && + vclk_freq == PIXEL_FREQ_1000_1001(params[i].vclk_freq)) return MODE_OK; } @@ -805,8 +810,9 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq, } EXPORT_SYMBOL_GPL(meson_vclk_vic_supported_freq); -static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq, - unsigned int od1, unsigned int od2, unsigned int od3, +static void meson_vclk_set(struct meson_drm *priv, + unsigned long long pll_base_freq, unsigned int od1, + unsigned int od2, unsigned int od3, unsigned int vid_pll_div, unsigned int vclk_div, unsigned int hdmi_tx_div, unsigned int venc_div, bool hdmi_use_enci, bool vic_alternate_clock) @@ -826,15 +832,15 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq, meson_hdmi_pll_generic_set(priv, pll_base_freq); } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) { switch (pll_base_freq) { - case 2970000: + case 2970000000: m = 0x3d; frac = vic_alternate_clock ? 0xd02 : 0xe00; break; - case 4320000: + case 4320000000: m = vic_alternate_clock ? 0x59 : 0x5a; frac = vic_alternate_clock ? 0xe8f : 0; break; - case 5940000: + case 5940000000: m = 0x7b; frac = vic_alternate_clock ? 0xa05 : 0xc00; break; @@ -844,15 +850,15 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq, } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) || meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) { switch (pll_base_freq) { - case 2970000: + case 2970000000: m = 0x7b; frac = vic_alternate_clock ? 0x281 : 0x300; break; - case 4320000: + case 4320000000: m = vic_alternate_clock ? 0xb3 : 0xb4; frac = vic_alternate_clock ? 0x347 : 0; break; - case 5940000: + case 5940000000: m = 0xf7; frac = vic_alternate_clock ? 0x102 : 0x200; break; @@ -861,15 +867,15 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq, meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3); } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) { switch (pll_base_freq) { - case 2970000: + case 2970000000: m = 0x7b; frac = vic_alternate_clock ? 0x140b4 : 0x18000; break; - case 4320000: + case 4320000000: m = vic_alternate_clock ? 0xb3 : 0xb4; frac = vic_alternate_clock ? 0x1a3ee : 0; break; - case 5940000: + case 5940000000: m = 0xf7; frac = vic_alternate_clock ? 0x8148 : 0x10000; break; @@ -1025,14 +1031,14 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq, } void meson_vclk_setup(struct meson_drm *priv, unsigned int target, - unsigned int phy_freq, unsigned int vclk_freq, - unsigned int venc_freq, unsigned int dac_freq, + unsigned long long phy_freq, unsigned long long vclk_freq, + unsigned long long venc_freq, unsigned long long dac_freq, bool hdmi_use_enci) { bool vic_alternate_clock = false; - unsigned int freq; - unsigned int hdmi_tx_div; - unsigned int venc_div; + unsigned long long freq; + unsigned long long hdmi_tx_div; + unsigned long long venc_div; if (target == MESON_VCLK_TARGET_CVBS) { meson_venci_cvbs_clock_config(priv); @@ -1052,27 +1058,27 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target, return; } - hdmi_tx_div = vclk_freq / dac_freq; + hdmi_tx_div = DIV_ROUND_DOWN_ULL(vclk_freq, dac_freq); if (hdmi_tx_div == 0) { - pr_err("Fatal Error, invalid HDMI-TX freq %d\n", + pr_err("Fatal Error, invalid HDMI-TX freq %lluHz\n", dac_freq); return; } - venc_div = vclk_freq / venc_freq; + venc_div = DIV_ROUND_DOWN_ULL(vclk_freq, venc_freq); if (venc_div == 0) { - pr_err("Fatal Error, invalid HDMI venc freq %d\n", + pr_err("Fatal Error, invalid HDMI venc freq %lluHz\n", venc_freq); return; } for (freq = 0 ; params[freq].pixel_freq ; ++freq) { if ((phy_freq == params[freq].phy_freq || - phy_freq == FREQ_1000_1001(params[freq].phy_freq/1000)*1000) && + phy_freq == PHY_FREQ_1000_1001(params[freq].phy_freq)) && (vclk_freq == params[freq].vclk_freq || - vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) { + vclk_freq == PIXEL_FREQ_1000_1001(params[freq].vclk_freq))) { if (vclk_freq != params[freq].vclk_freq) vic_alternate_clock = true; else @@ -1098,7 +1104,8 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target, } if (!params[freq].pixel_freq) { - pr_err("Fatal Error, invalid HDMI vclk freq %d\n", vclk_freq); + pr_err("Fatal Error, invalid HDMI vclk freq %lluHz\n", + vclk_freq); return; } diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h index 60617aaf18dd..7ac55744e574 100644 --- a/drivers/gpu/drm/meson/meson_vclk.h +++ b/drivers/gpu/drm/meson/meson_vclk.h @@ -20,17 +20,18 @@ enum { }; /* 27MHz is the CVBS Pixel Clock */ -#define MESON_VCLK_CVBS 27000 +#define MESON_VCLK_CVBS (27 * 1000 * 1000) enum drm_mode_status -meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq); +meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned long long freq); enum drm_mode_status -meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq, - unsigned int vclk_freq); +meson_vclk_vic_supported_freq(struct meson_drm *priv, + unsigned long long phy_freq, + unsigned long long vclk_freq); void meson_vclk_setup(struct meson_drm *priv, unsigned int target, - unsigned int phy_freq, unsigned int vclk_freq, - unsigned int venc_freq, unsigned int dac_freq, + unsigned long long phy_freq, unsigned long long vclk_freq, + unsigned long long venc_freq, unsigned long long dac_freq, bool hdmi_use_enci); #endif /* __MESON_VCLK_H */ diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index fb71658c3117..6067d08aeee3 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -223,7 +223,7 @@ void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mod vsyncstr = mode->crtc_vsync_start - 1; vsyncend = mode->crtc_vsync_end - 1; vtotal = mode->crtc_vtotal - 2; - vblkstr = mode->crtc_vblank_start; + vblkstr = mode->crtc_vblank_start - 1; vblkend = vtotal + 1; linecomp = vdispend; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 06465bc2d0b4..242d02d48c0c 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -242,10 +242,10 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) break; fallthrough; case MSM_SUBMIT_CMD_BUF: - OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); + OUT_PKT7(ring, CP_INDIRECT_BUFFER, 3); OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); - OUT_RING(ring, submit->cmd[i].size); + OUT_RING(ring, A5XX_CP_INDIRECT_BUFFER_2_IB_SIZE(submit->cmd[i].size)); ibs++; break; } @@ -377,10 +377,10 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) break; fallthrough; case MSM_SUBMIT_CMD_BUF: - OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); + OUT_PKT7(ring, CP_INDIRECT_BUFFER, 3); OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); - OUT_RING(ring, submit->cmd[i].size); + OUT_RING(ring, A5XX_CP_INDIRECT_BUFFER_2_IB_SIZE(submit->cmd[i].size)); ibs++; break; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h index 1f32807bb5e5..ad60089f18ea 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h @@ -132,7 +132,6 @@ static const struct dpu_intf_cfg msm8937_intf[] = { .prog_fetch_lines_worst_case = 14, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), - .intr_tear_rd_ptr = -1, }, { .name = "intf_2", .id = INTF_2, .base = 0x6b000, .len = 0x268, @@ -141,7 +140,6 @@ static const struct dpu_intf_cfg msm8937_intf[] = { .prog_fetch_lines_worst_case = 14, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), - .intr_tear_rd_ptr = -1, }, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h index 42131959ff22..a1cf89a0a42d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h @@ -118,7 +118,6 @@ static const struct dpu_intf_cfg msm8917_intf[] = { .prog_fetch_lines_worst_case = 14, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), - .intr_tear_rd_ptr = -1, }, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h index 2b4723a5c676..eea9b80e2287 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h @@ -131,7 +131,6 @@ static const struct dpu_intf_cfg msm8953_intf[] = { .prog_fetch_lines_worst_case = 14, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), - .intr_tear_rd_ptr = -1, }, { .name = "intf_1", .id = INTF_1, .base = 0x6a800, .len = 0x268, @@ -140,7 +139,6 @@ static const struct dpu_intf_cfg msm8953_intf[] = { .prog_fetch_lines_worst_case = 14, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), - .intr_tear_rd_ptr = -1, }, { .name = "intf_2", .id = INTF_2, .base = 0x6b000, .len = 0x268, @@ -149,7 +147,6 @@ static const struct dpu_intf_cfg msm8953_intf[] = { .prog_fetch_lines_worst_case = 14, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), - .intr_tear_rd_ptr = -1, }, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h index 5cf19de71f06..ae18a354e5d2 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h @@ -241,7 +241,6 @@ static const struct dpu_intf_cfg msm8996_intf[] = { .prog_fetch_lines_worst_case = 25, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), - .intr_tear_rd_ptr = -1, }, { .name = "intf_1", .id = INTF_1, .base = 0x6a800, .len = 0x268, @@ -250,7 +249,6 @@ static const struct dpu_intf_cfg msm8996_intf[] = { .prog_fetch_lines_worst_case = 25, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), - .intr_tear_rd_ptr = -1, }, { .name = "intf_2", .id = INTF_2, .base = 0x6b000, .len = 0x268, @@ -259,7 +257,6 @@ static const struct dpu_intf_cfg msm8996_intf[] = { .prog_fetch_lines_worst_case = 25, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), - .intr_tear_rd_ptr = -1, }, { .name = "intf_3", .id = INTF_3, .base = 0x6b800, .len = 0x268, @@ -267,7 +264,6 @@ static const struct dpu_intf_cfg msm8996_intf[] = { .prog_fetch_lines_worst_case = 25, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), - .intr_tear_rd_ptr = -1, }, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h index 4f2f68b07f20..bb89da0a481d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h @@ -202,7 +202,6 @@ static const struct dpu_intf_cfg sdm660_intf[] = { .prog_fetch_lines_worst_case = 21, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), - .intr_tear_rd_ptr = -1, }, { .name = "intf_1", .id = INTF_1, .base = 0x6a800, .len = 0x280, @@ -211,7 +210,6 @@ static const struct dpu_intf_cfg sdm660_intf[] = { .prog_fetch_lines_worst_case = 21, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), - .intr_tear_rd_ptr = -1, }, { .name = "intf_2", .id = INTF_2, .base = 0x6b000, .len = 0x280, @@ -220,7 +218,6 @@ static const struct dpu_intf_cfg sdm660_intf[] = { .prog_fetch_lines_worst_case = 21, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), - .intr_tear_rd_ptr = -1, }, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h index c70bef025ac4..7caf876ca3e3 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h @@ -147,7 +147,6 @@ static const struct dpu_intf_cfg sdm630_intf[] = { .prog_fetch_lines_worst_case = 21, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), - .intr_tear_rd_ptr = -1, }, { .name = "intf_1", .id = INTF_1, .base = 0x6a800, .len = 0x280, @@ -156,7 +155,6 @@ static const struct dpu_intf_cfg sdm630_intf[] = { .prog_fetch_lines_worst_case = 21, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), - .intr_tear_rd_ptr = -1, }, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 8610bbf2b87c..862e9e6bf0a5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -1666,7 +1666,7 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc, */ static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys) { - struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(phys->parent); + struct dpu_encoder_virt *dpu_enc; if (!phys) { DPU_ERROR("invalid argument(s)\n"); @@ -1678,6 +1678,8 @@ static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys) return; } + dpu_enc = to_dpu_encoder_virt(phys->parent); + if (phys->parent->encoder_type == DRM_MODE_ENCODER_VIRTUAL && dpu_enc->cwb_mask) { DPU_DEBUG("encoder %d CWB enabled, skipping\n", DRMID(phys->parent)); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index af3e541f60c3..e03d6091f736 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -729,12 +729,40 @@ static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu, static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu, struct dpu_sw_pipe *pipe, struct dpu_sw_pipe_cfg *pipe_cfg, - const struct msm_format *fmt, - const struct drm_display_mode *mode) + const struct drm_display_mode *mode, + struct drm_plane_state *new_plane_state) { uint32_t min_src_size; struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); int ret; + const struct msm_format *fmt; + uint32_t supported_rotations; + const struct dpu_sspp_cfg *pipe_hw_caps; + const struct dpu_sspp_sub_blks *sblk; + + pipe_hw_caps = pipe->sspp->cap; + sblk = pipe->sspp->cap->sblk; + + /* + * We already have verified scaling against platform limitations. + * Now check if the SSPP supports scaling at all. + */ + if (!sblk->scaler_blk.len && + ((drm_rect_width(&new_plane_state->src) >> 16 != + drm_rect_width(&new_plane_state->dst)) || + (drm_rect_height(&new_plane_state->src) >> 16 != + drm_rect_height(&new_plane_state->dst)))) + return -ERANGE; + + fmt = msm_framebuffer_format(new_plane_state->fb); + + supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0; + + if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION)) + supported_rotations |= DRM_MODE_ROTATE_90; + + pipe_cfg->rotation = drm_rotation_simplify(new_plane_state->rotation, + supported_rotations); min_src_size = MSM_FORMAT_IS_YUV(fmt) ? 2 : 1; @@ -923,47 +951,20 @@ static int dpu_plane_atomic_check_sspp(struct drm_plane *plane, struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state); struct dpu_sw_pipe *pipe = &pstate->pipe; struct dpu_sw_pipe *r_pipe = &pstate->r_pipe; - const struct msm_format *fmt; struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg; struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg; - uint32_t supported_rotations; - const struct dpu_sspp_cfg *pipe_hw_caps; - const struct dpu_sspp_sub_blks *sblk; int ret = 0; - pipe_hw_caps = pipe->sspp->cap; - sblk = pipe->sspp->cap->sblk; - - /* - * We already have verified scaling against platform limitations. - * Now check if the SSPP supports scaling at all. - */ - if (!sblk->scaler_blk.len && - ((drm_rect_width(&new_plane_state->src) >> 16 != - drm_rect_width(&new_plane_state->dst)) || - (drm_rect_height(&new_plane_state->src) >> 16 != - drm_rect_height(&new_plane_state->dst)))) - return -ERANGE; - - fmt = msm_framebuffer_format(new_plane_state->fb); - - supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0; - - if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION)) - supported_rotations |= DRM_MODE_ROTATE_90; - - pipe_cfg->rotation = drm_rotation_simplify(new_plane_state->rotation, - supported_rotations); - r_pipe_cfg->rotation = pipe_cfg->rotation; - - ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt, - &crtc_state->adjusted_mode); + ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, + &crtc_state->adjusted_mode, + new_plane_state); if (ret) return ret; if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) { - ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt, - &crtc_state->adjusted_mode); + ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, + &crtc_state->adjusted_mode, + new_plane_state); if (ret) return ret; } @@ -1059,6 +1060,9 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane, struct drm_crtc_state *crtc_state; int ret; + if (IS_ERR(plane_state)) + return PTR_ERR(plane_state); + if (plane_state->crtc) crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc); diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml index 55a35182858c..5a6ae9fc3194 100644 --- a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml +++ b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml @@ -2259,5 +2259,12 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords) </reg32> </domain> +<domain name="CP_INDIRECT_BUFFER" width="32" varset="chip" prefix="chip" variants="A5XX-"> + <reg64 offset="0" name="IB_BASE" type="address"/> + <reg32 offset="2" name="2"> + <bitfield name="IB_SIZE" low="0" high="19"/> + </reg32> +</domain> + </database> diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index db961eade225..2016c1e7242f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -144,6 +144,9 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) nouveau_bo_del_io_reserve_lru(bo); nv10_bo_put_tile_region(dev, nvbo->tile, NULL); + if (bo->base.import_attach) + drm_prime_gem_destroy(&bo->base, bo->sg); + /* * If nouveau_bo_new() allocated this buffer, the GEM object was never * initialized, so don't attempt to release it. diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 7cc84472cece..edddfc036c6d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -90,7 +90,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error) while (!list_empty(&fctx->pending)) { fence = list_entry(fctx->pending.next, typeof(*fence), head); - if (error) + if (error && !dma_fence_is_signaled_locked(&fence->base)) dma_fence_set_error(&fence->base, error); if (nouveau_fence_signal(fence)) diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 9ae2cee1c7c5..67e3c99de73a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -87,9 +87,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem) return; } - if (gem->import_attach) - drm_prime_gem_destroy(gem, nvbo->bo.sg); - ttm_bo_put(&nvbo->bo); pm_runtime_mark_last_busy(dev); diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c index 7d68a8acfe2e..eb0f8373258c 100644 --- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c +++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c @@ -129,11 +129,11 @@ static int jadard_unprepare(struct drm_panel *panel) { struct jadard *jadard = panel_to_jadard(panel); - gpiod_set_value(jadard->reset, 1); + gpiod_set_value(jadard->reset, 0); msleep(120); if (jadard->desc->reset_before_power_off_vcioo) { - gpiod_set_value(jadard->reset, 0); + gpiod_set_value(jadard->reset, 1); usleep_range(1000, 2000); } diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 232b03c1a259..33a37539de57 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -1027,27 +1027,28 @@ static const struct panel_desc auo_g070vvn01 = { }, }; -static const struct drm_display_mode auo_g101evn010_mode = { - .clock = 68930, - .hdisplay = 1280, - .hsync_start = 1280 + 82, - .hsync_end = 1280 + 82 + 2, - .htotal = 1280 + 82 + 2 + 84, - .vdisplay = 800, - .vsync_start = 800 + 8, - .vsync_end = 800 + 8 + 2, - .vtotal = 800 + 8 + 2 + 6, +static const struct display_timing auo_g101evn010_timing = { + .pixelclock = { 64000000, 68930000, 85000000 }, + .hactive = { 1280, 1280, 1280 }, + .hfront_porch = { 8, 64, 256 }, + .hback_porch = { 8, 64, 256 }, + .hsync_len = { 40, 168, 767 }, + .vactive = { 800, 800, 800 }, + .vfront_porch = { 4, 8, 100 }, + .vback_porch = { 4, 8, 100 }, + .vsync_len = { 8, 16, 223 }, }; static const struct panel_desc auo_g101evn010 = { - .modes = &auo_g101evn010_mode, - .num_modes = 1, + .timings = &auo_g101evn010_timing, + .num_timings = 1, .bpc = 6, .size = { .width = 216, .height = 135, }, .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, + .bus_flags = DRM_BUS_FLAG_DE_HIGH, .connector_type = DRM_MODE_CONNECTOR_LVDS, }; diff --git a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c index 3d1dddb34603..7d531b6f4c09 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c @@ -94,6 +94,7 @@ struct rockchip_hdmi_qp { struct gpio_desc *enable_gpio; struct delayed_work hpd_work; int port_id; + const struct rockchip_hdmi_qp_ctrl_ops *ctrl_ops; }; struct rockchip_hdmi_qp_ctrl_ops { @@ -461,6 +462,7 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master, return -ENODEV; } + hdmi->ctrl_ops = cfg->ctrl_ops; hdmi->dev = &pdev->dev; hdmi->port_id = -ENODEV; @@ -600,27 +602,8 @@ static void dw_hdmi_qp_rockchip_remove(struct platform_device *pdev) static int __maybe_unused dw_hdmi_qp_rockchip_resume(struct device *dev) { struct rockchip_hdmi_qp *hdmi = dev_get_drvdata(dev); - u32 val; - val = HIWORD_UPDATE(RK3588_SCLIN_MASK, RK3588_SCLIN_MASK) | - HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) | - HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) | - HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK); - regmap_write(hdmi->vo_regmap, - hdmi->port_id ? RK3588_GRF_VO1_CON6 : RK3588_GRF_VO1_CON3, - val); - - val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK, - RK3588_SET_HPD_PATH_MASK); - regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val); - - if (hdmi->port_id) - val = HIWORD_UPDATE(RK3588_HDMI1_GRANT_SEL, - RK3588_HDMI1_GRANT_SEL); - else - val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL, - RK3588_HDMI0_GRANT_SEL); - regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON9, val); + hdmi->ctrl_ops->io_init(hdmi); dw_hdmi_qp_resume(dev, hdmi->hdmi); diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c index 14958d6b3d2e..0a2840cbe8e2 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c @@ -1754,9 +1754,9 @@ static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32 dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP0_PIN_POL, polflags); break; case ROCKCHIP_VOP2_EP_DP1: - die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX; - die |= RK3588_SYS_DSP_INFACE_EN_MIPI1 | - FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX, vp->id); + die &= ~RK3588_SYS_DSP_INFACE_EN_DP1_MUX; + die |= RK3588_SYS_DSP_INFACE_EN_DP1 | + FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_DP1_MUX, vp->id); dip &= ~RK3588_DSP_IF_POL__DP1_PIN_POL; dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP1_PIN_POL, polflags); break; diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile index f203ac5514ae..f778a4eee7c9 100644 --- a/drivers/gpu/drm/sti/Makefile +++ b/drivers/gpu/drm/sti/Makefile @@ -7,8 +7,6 @@ sti-drm-y := \ sti_compositor.o \ sti_crtc.o \ sti_plane.o \ - sti_crtc.o \ - sti_plane.o \ sti_hdmi.o \ sti_hdmi_tx3g4c28phy.o \ sti_dvo.o \ diff --git a/drivers/gpu/drm/tests/drm_client_modeset_test.c b/drivers/gpu/drm/tests/drm_client_modeset_test.c index 7516f6cb36e4..b2fdb1a774fe 100644 --- a/drivers/gpu/drm/tests/drm_client_modeset_test.c +++ b/drivers/gpu/drm/tests/drm_client_modeset_test.c @@ -95,6 +95,9 @@ static void drm_test_pick_cmdline_res_1920_1080_60(struct kunit *test) expected_mode = drm_mode_find_dmt(priv->drm, 1920, 1080, 60, false); KUNIT_ASSERT_NOT_NULL(test, expected_mode); + ret = drm_kunit_add_mode_destroy_action(test, expected_mode); + KUNIT_ASSERT_EQ(test, ret, 0); + KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline, connector, @@ -129,7 +132,8 @@ static void drm_test_pick_cmdline_named(struct kunit *test) struct drm_device *drm = priv->drm; struct drm_connector *connector = &priv->connector; struct drm_cmdline_mode *cmdline_mode = &connector->cmdline_mode; - const struct drm_display_mode *expected_mode, *mode; + const struct drm_display_mode *mode; + struct drm_display_mode *expected_mode; const char *cmdline = params->cmdline; int ret; @@ -149,6 +153,9 @@ static void drm_test_pick_cmdline_named(struct kunit *test) expected_mode = params->func(drm); KUNIT_ASSERT_NOT_NULL(test, expected_mode); + ret = drm_kunit_add_mode_destroy_action(test, expected_mode); + KUNIT_ASSERT_EQ(test, ret, 0); + KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected_mode, mode)); } diff --git a/drivers/gpu/drm/tests/drm_cmdline_parser_test.c b/drivers/gpu/drm/tests/drm_cmdline_parser_test.c index 59c8408c453c..1cfcb597b088 100644 --- a/drivers/gpu/drm/tests/drm_cmdline_parser_test.c +++ b/drivers/gpu/drm/tests/drm_cmdline_parser_test.c @@ -7,6 +7,7 @@ #include <kunit/test.h> #include <drm/drm_connector.h> +#include <drm/drm_kunit_helpers.h> #include <drm/drm_modes.h> static const struct drm_connector no_connector = {}; @@ -955,8 +956,15 @@ struct drm_cmdline_tv_option_test { static void drm_test_cmdline_tv_options(struct kunit *test) { const struct drm_cmdline_tv_option_test *params = test->param_value; - const struct drm_display_mode *expected_mode = params->mode_fn(NULL); + struct drm_display_mode *expected_mode; struct drm_cmdline_mode mode = { }; + int ret; + + expected_mode = params->mode_fn(NULL); + KUNIT_ASSERT_NOT_NULL(test, expected_mode); + + ret = drm_kunit_add_mode_destroy_action(test, expected_mode); + KUNIT_ASSERT_EQ(test, ret, 0); KUNIT_EXPECT_TRUE(test, drm_mode_parse_command_line_for_connector(params->cmdline, &no_connector, &mode)); diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c index fd4215e2f982..925fbc2cda70 100644 --- a/drivers/gpu/drm/tests/drm_gem_shmem_test.c +++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c @@ -216,6 +216,9 @@ static void drm_gem_shmem_test_get_pages_sgt(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt); KUNIT_EXPECT_NULL(test, shmem->sgt); + ret = kunit_add_action_or_reset(test, kfree_wrapper, sgt); + KUNIT_ASSERT_EQ(test, ret, 0); + ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt); KUNIT_ASSERT_EQ(test, ret, 0); diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c index a4eb68f0decc..6f6616cf4966 100644 --- a/drivers/gpu/drm/tests/drm_kunit_helpers.c +++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c @@ -279,6 +279,28 @@ static void kunit_action_drm_mode_destroy(void *ptr) } /** + * drm_kunit_add_mode_destroy_action() - Add a drm_destroy_mode kunit action + * @test: The test context object + * @mode: The drm_display_mode to destroy eventually + * + * Registers a kunit action that will destroy the drm_display_mode at + * the end of the test. + * + * If an error occurs, the drm_display_mode will be destroyed. + * + * Returns: + * 0 on success, an error code otherwise. + */ +int drm_kunit_add_mode_destroy_action(struct kunit *test, + struct drm_display_mode *mode) +{ + return kunit_add_action_or_reset(test, + kunit_action_drm_mode_destroy, + mode); +} +EXPORT_SYMBOL_GPL(drm_kunit_add_mode_destroy_action); + +/** * drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC for a KUnit test * @test: The test context object * @dev: DRM device diff --git a/drivers/gpu/drm/tests/drm_modes_test.c b/drivers/gpu/drm/tests/drm_modes_test.c index 6ed51f99e133..f5b20f92df8b 100644 --- a/drivers/gpu/drm/tests/drm_modes_test.c +++ b/drivers/gpu/drm/tests/drm_modes_test.c @@ -40,6 +40,7 @@ static void drm_test_modes_analog_tv_ntsc_480i(struct kunit *test) { struct drm_test_modes_priv *priv = test->priv; struct drm_display_mode *mode; + int ret; mode = drm_analog_tv_mode(priv->drm, DRM_MODE_TV_MODE_NTSC, @@ -47,6 +48,9 @@ static void drm_test_modes_analog_tv_ntsc_480i(struct kunit *test) true); KUNIT_ASSERT_NOT_NULL(test, mode); + ret = drm_kunit_add_mode_destroy_action(test, mode); + KUNIT_ASSERT_EQ(test, ret, 0); + KUNIT_EXPECT_EQ(test, drm_mode_vrefresh(mode), 60); KUNIT_EXPECT_EQ(test, mode->hdisplay, 720); @@ -70,6 +74,7 @@ static void drm_test_modes_analog_tv_ntsc_480i_inlined(struct kunit *test) { struct drm_test_modes_priv *priv = test->priv; struct drm_display_mode *expected, *mode; + int ret; expected = drm_analog_tv_mode(priv->drm, DRM_MODE_TV_MODE_NTSC, @@ -77,9 +82,15 @@ static void drm_test_modes_analog_tv_ntsc_480i_inlined(struct kunit *test) true); KUNIT_ASSERT_NOT_NULL(test, expected); + ret = drm_kunit_add_mode_destroy_action(test, expected); + KUNIT_ASSERT_EQ(test, ret, 0); + mode = drm_mode_analog_ntsc_480i(priv->drm); KUNIT_ASSERT_NOT_NULL(test, mode); + ret = drm_kunit_add_mode_destroy_action(test, mode); + KUNIT_ASSERT_EQ(test, ret, 0); + KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected, mode)); } @@ -87,6 +98,7 @@ static void drm_test_modes_analog_tv_pal_576i(struct kunit *test) { struct drm_test_modes_priv *priv = test->priv; struct drm_display_mode *mode; + int ret; mode = drm_analog_tv_mode(priv->drm, DRM_MODE_TV_MODE_PAL, @@ -94,6 +106,9 @@ static void drm_test_modes_analog_tv_pal_576i(struct kunit *test) true); KUNIT_ASSERT_NOT_NULL(test, mode); + ret = drm_kunit_add_mode_destroy_action(test, mode); + KUNIT_ASSERT_EQ(test, ret, 0); + KUNIT_EXPECT_EQ(test, drm_mode_vrefresh(mode), 50); KUNIT_EXPECT_EQ(test, mode->hdisplay, 720); @@ -117,6 +132,7 @@ static void drm_test_modes_analog_tv_pal_576i_inlined(struct kunit *test) { struct drm_test_modes_priv *priv = test->priv; struct drm_display_mode *expected, *mode; + int ret; expected = drm_analog_tv_mode(priv->drm, DRM_MODE_TV_MODE_PAL, @@ -124,9 +140,15 @@ static void drm_test_modes_analog_tv_pal_576i_inlined(struct kunit *test) true); KUNIT_ASSERT_NOT_NULL(test, expected); + ret = drm_kunit_add_mode_destroy_action(test, expected); + KUNIT_ASSERT_EQ(test, ret, 0); + mode = drm_mode_analog_pal_576i(priv->drm); KUNIT_ASSERT_NOT_NULL(test, mode); + ret = drm_kunit_add_mode_destroy_action(test, mode); + KUNIT_ASSERT_EQ(test, ret, 0); + KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected, mode)); } @@ -134,6 +156,7 @@ static void drm_test_modes_analog_tv_mono_576i(struct kunit *test) { struct drm_test_modes_priv *priv = test->priv; struct drm_display_mode *mode; + int ret; mode = drm_analog_tv_mode(priv->drm, DRM_MODE_TV_MODE_MONOCHROME, @@ -141,6 +164,9 @@ static void drm_test_modes_analog_tv_mono_576i(struct kunit *test) true); KUNIT_ASSERT_NOT_NULL(test, mode); + ret = drm_kunit_add_mode_destroy_action(test, mode); + KUNIT_ASSERT_EQ(test, ret, 0); + KUNIT_EXPECT_EQ(test, drm_mode_vrefresh(mode), 50); KUNIT_EXPECT_EQ(test, mode->hdisplay, 720); diff --git a/drivers/gpu/drm/tests/drm_probe_helper_test.c b/drivers/gpu/drm/tests/drm_probe_helper_test.c index bc09ff38aca1..db0e4f5df275 100644 --- a/drivers/gpu/drm/tests/drm_probe_helper_test.c +++ b/drivers/gpu/drm/tests/drm_probe_helper_test.c @@ -98,7 +98,7 @@ drm_test_connector_helper_tv_get_modes_check(struct kunit *test) struct drm_connector *connector = &priv->connector; struct drm_cmdline_mode *cmdline = &connector->cmdline_mode; struct drm_display_mode *mode; - const struct drm_display_mode *expected; + struct drm_display_mode *expected; size_t len; int ret; @@ -134,6 +134,9 @@ drm_test_connector_helper_tv_get_modes_check(struct kunit *test) KUNIT_EXPECT_TRUE(test, drm_mode_equal(mode, expected)); KUNIT_EXPECT_TRUE(test, mode->type & DRM_MODE_TYPE_PREFERRED); + + ret = drm_kunit_add_mode_destroy_action(test, expected); + KUNIT_ASSERT_EQ(test, ret, 0); } if (params->num_expected_modes >= 2) { @@ -145,6 +148,9 @@ drm_test_connector_helper_tv_get_modes_check(struct kunit *test) KUNIT_EXPECT_TRUE(test, drm_mode_equal(mode, expected)); KUNIT_EXPECT_FALSE(test, mode->type & DRM_MODE_TYPE_PREFERRED); + + ret = drm_kunit_add_mode_destroy_action(test, expected); + KUNIT_ASSERT_EQ(test, ret, 0); } mutex_unlock(&priv->drm->mode_config.mutex); diff --git a/drivers/gpu/drm/tiny/panel-mipi-dbi.c b/drivers/gpu/drm/tiny/panel-mipi-dbi.c index 0460ecaef4bd..23914a9f7fd3 100644 --- a/drivers/gpu/drm/tiny/panel-mipi-dbi.c +++ b/drivers/gpu/drm/tiny/panel-mipi-dbi.c @@ -390,7 +390,10 @@ static int panel_mipi_dbi_spi_probe(struct spi_device *spi) spi_set_drvdata(spi, drm); - drm_client_setup(drm, NULL); + if (bpp == 16) + drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB565); + else + drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB888); return 0; } diff --git a/drivers/gpu/drm/ttm/ttm_backup.c b/drivers/gpu/drm/ttm/ttm_backup.c index 93c007f18855..ffaab68bd5dd 100644 --- a/drivers/gpu/drm/ttm/ttm_backup.c +++ b/drivers/gpu/drm/ttm/ttm_backup.c @@ -8,20 +8,6 @@ #include <linux/swap.h> /* - * Casting from randomized struct file * to struct ttm_backup * is fine since - * struct ttm_backup is never defined nor dereferenced. - */ -static struct file *ttm_backup_to_file(struct ttm_backup *backup) -{ - return (void *)backup; -} - -static struct ttm_backup *ttm_file_to_backup(struct file *file) -{ - return (void *)file; -} - -/* * Need to map shmem indices to handle since a handle value * of 0 means error, following the swp_entry_t convention. */ @@ -40,12 +26,12 @@ static pgoff_t ttm_backup_handle_to_shmem_idx(pgoff_t handle) * @backup: The struct backup pointer used to obtain the handle * @handle: The handle obtained from the @backup_page function. */ -void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle) +void ttm_backup_drop(struct file *backup, pgoff_t handle) { loff_t start = ttm_backup_handle_to_shmem_idx(handle); start <<= PAGE_SHIFT; - shmem_truncate_range(file_inode(ttm_backup_to_file(backup)), start, + shmem_truncate_range(file_inode(backup), start, start + PAGE_SIZE - 1); } @@ -55,16 +41,15 @@ void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle) * @backup: The struct backup pointer used to back up the page. * @dst: The struct page to copy into. * @handle: The handle returned when the page was backed up. - * @intr: Try to perform waits interruptable or at least killable. + * @intr: Try to perform waits interruptible or at least killable. * * Return: 0 on success, Negative error code on failure, notably * -EINTR if @intr was set to true and a signal is pending. */ -int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst, +int ttm_backup_copy_page(struct file *backup, struct page *dst, pgoff_t handle, bool intr) { - struct file *filp = ttm_backup_to_file(backup); - struct address_space *mapping = filp->f_mapping; + struct address_space *mapping = backup->f_mapping; struct folio *from_folio; pgoff_t idx = ttm_backup_handle_to_shmem_idx(handle); @@ -106,12 +91,11 @@ int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst, * the folio size- and usage. */ s64 -ttm_backup_backup_page(struct ttm_backup *backup, struct page *page, +ttm_backup_backup_page(struct file *backup, struct page *page, bool writeback, pgoff_t idx, gfp_t page_gfp, gfp_t alloc_gfp) { - struct file *filp = ttm_backup_to_file(backup); - struct address_space *mapping = filp->f_mapping; + struct address_space *mapping = backup->f_mapping; unsigned long handle = 0; struct folio *to_folio; int ret; @@ -136,13 +120,13 @@ ttm_backup_backup_page(struct ttm_backup *backup, struct page *page, .for_reclaim = 1, }; folio_set_reclaim(to_folio); - ret = mapping->a_ops->writepage(folio_file_page(to_folio, idx), &wbc); + ret = shmem_writeout(to_folio, &wbc); if (!folio_test_writeback(to_folio)) folio_clear_reclaim(to_folio); /* - * If writepage succeeds, it unlocks the folio. - * writepage() errors are otherwise dropped, since writepage() - * is only best effort here. + * If writeout succeeds, it unlocks the folio. errors + * are otherwise dropped, since writeout is only best + * effort here. */ if (ret) folio_unlock(to_folio); @@ -161,9 +145,9 @@ ttm_backup_backup_page(struct ttm_backup *backup, struct page *page, * * After a call to this function, it's illegal to use the @backup pointer. */ -void ttm_backup_fini(struct ttm_backup *backup) +void ttm_backup_fini(struct file *backup) { - fput(ttm_backup_to_file(backup)); + fput(backup); } /** @@ -194,14 +178,10 @@ EXPORT_SYMBOL_GPL(ttm_backup_bytes_avail); * * Create a backup utilizing shmem objects. * - * Return: A pointer to a struct ttm_backup on success, + * Return: A pointer to a struct file on success, * an error pointer on error. */ -struct ttm_backup *ttm_backup_shmem_create(loff_t size) +struct file *ttm_backup_shmem_create(loff_t size) { - struct file *filp; - - filp = shmem_file_setup("ttm shmem backup", size, 0); - - return ttm_file_to_backup(filp); + return shmem_file_setup("ttm shmem backup", size, 0); } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 95b86003c50d..5bf3c969907c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1093,7 +1093,8 @@ struct ttm_bo_swapout_walk { struct ttm_lru_walk walk; /** @gfp_flags: The gfp flags to use for ttm_tt_swapout() */ gfp_t gfp_flags; - + /** @hit_low: Whether we should attempt to swap BO's with low watermark threshold */ + /** @evict_low: If we cannot swap a bo when @try_low is false (first pass) */ bool hit_low, evict_low; }; diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c index 83b10706ba89..c2ea865be657 100644 --- a/drivers/gpu/drm/ttm/ttm_pool.c +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -506,7 +506,7 @@ static void ttm_pool_allocated_page_commit(struct page *allocated, * if successful, populate the page-table and dma-address arrays. */ static int ttm_pool_restore_commit(struct ttm_pool_tt_restore *restore, - struct ttm_backup *backup, + struct file *backup, const struct ttm_operation_ctx *ctx, struct ttm_pool_alloc_state *alloc) @@ -655,7 +655,7 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt, pgoff_t start_page, pgoff_t end_page) { struct page **pages = &tt->pages[start_page]; - struct ttm_backup *backup = tt->backup; + struct file *backup = tt->backup; pgoff_t i, nr; for (i = start_page; i < end_page; i += nr, pages += nr) { @@ -963,7 +963,7 @@ void ttm_pool_drop_backed_up(struct ttm_tt *tt) long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt, const struct ttm_backup_flags *flags) { - struct ttm_backup *backup = tt->backup; + struct file *backup = tt->backup; struct page *page; unsigned long handle; gfp_t alloc_gfp; diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index df0aa6c4b8b8..698cd4bf5e46 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -544,7 +544,7 @@ EXPORT_SYMBOL(ttm_tt_pages_limit); */ int ttm_tt_setup_backup(struct ttm_tt *tt) { - struct ttm_backup *backup = + struct file *backup = ttm_backup_shmem_create(((loff_t)tt->num_pages) << PAGE_SHIFT); if (WARN_ON_ONCE(!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE))) diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index 34c42d6e12cd..eb35482f6fb5 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -428,7 +428,8 @@ v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job) struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]); struct v3d_bo *indirect = to_v3d_bo(indirect_csd->indirect); struct drm_v3d_submit_csd *args = &indirect_csd->job->args; - u32 *wg_counts; + struct v3d_dev *v3d = job->base.v3d; + u32 num_batches, *wg_counts; v3d_get_bo_vaddr(bo); v3d_get_bo_vaddr(indirect); @@ -441,8 +442,17 @@ v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job) args->cfg[0] = wg_counts[0] << V3D_CSD_CFG012_WG_COUNT_SHIFT; args->cfg[1] = wg_counts[1] << V3D_CSD_CFG012_WG_COUNT_SHIFT; args->cfg[2] = wg_counts[2] << V3D_CSD_CFG012_WG_COUNT_SHIFT; - args->cfg[4] = DIV_ROUND_UP(indirect_csd->wg_size, 16) * - (wg_counts[0] * wg_counts[1] * wg_counts[2]) - 1; + + num_batches = DIV_ROUND_UP(indirect_csd->wg_size, 16) * + (wg_counts[0] * wg_counts[1] * wg_counts[2]); + + /* V3D 7.1.6 and later don't subtract 1 from the number of batches */ + if (v3d->ver < 71 || (v3d->ver == 71 && v3d->rev < 6)) + args->cfg[4] = num_batches - 1; + else + args->cfg[4] = num_batches; + + WARN_ON(args->cfg[4] == ~0); for (int i = 0; i < 3; i++) { /* 0xffffffff indicates that the uniform rewrite is not needed */ @@ -734,11 +744,16 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) return DRM_GPU_SCHED_STAT_NOMINAL; } -/* If the current address or return address have changed, then the GPU - * has probably made progress and we should delay the reset. This - * could fail if the GPU got in an infinite loop in the CL, but that - * is pretty unlikely outside of an i-g-t testcase. - */ +static void +v3d_sched_skip_reset(struct drm_sched_job *sched_job) +{ + struct drm_gpu_scheduler *sched = sched_job->sched; + + spin_lock(&sched->job_list_lock); + list_add(&sched_job->list, &sched->pending_list); + spin_unlock(&sched->job_list_lock); +} + static enum drm_gpu_sched_stat v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q, u32 *timedout_ctca, u32 *timedout_ctra) @@ -748,9 +763,16 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q, u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q)); u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q)); + /* If the current address or return address have changed, then the GPU + * has probably made progress and we should delay the reset. This + * could fail if the GPU got in an infinite loop in the CL, but that + * is pretty unlikely outside of an i-g-t testcase. + */ if (*timedout_ctca != ctca || *timedout_ctra != ctra) { *timedout_ctca = ctca; *timedout_ctra = ctra; + + v3d_sched_skip_reset(sched_job); return DRM_GPU_SCHED_STAT_NOMINAL; } @@ -790,11 +812,13 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job) struct v3d_dev *v3d = job->base.v3d; u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4(v3d->ver)); - /* If we've made progress, skip reset and let the timer get - * rearmed. + /* If we've made progress, skip reset, add the job to the pending + * list, and let the timer get rearmed. */ if (job->timedout_batches != batches) { job->timedout_batches = batches; + + v3d_sched_skip_reset(sched_job); return DRM_GPU_SCHED_STAT_NOMINAL; } diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 2d88e390feb4..e32e680c7197 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -128,6 +128,14 @@ static void virtio_gpu_remove(struct virtio_device *vdev) drm_dev_put(dev); } +static void virtio_gpu_shutdown(struct virtio_device *vdev) +{ + /* + * drm does its own synchronization on shutdown. + * Do nothing here, opt out of device reset. + */ +} + static void virtio_gpu_config_changed(struct virtio_device *vdev) { struct drm_device *dev = vdev->priv; @@ -162,6 +170,7 @@ static struct virtio_driver virtio_gpu_driver = { .id_table = id_table, .probe = virtio_gpu_probe, .remove = virtio_gpu_remove, + .shutdown = virtio_gpu_shutdown, .config_changed = virtio_gpu_config_changed }; diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c index dde8fc1a3689..90c99d83c4cf 100644 --- a/drivers/gpu/drm/virtio/virtgpu_gem.c +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c @@ -115,13 +115,14 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj, if (!vgdev->has_context_init) virtio_gpu_create_context(obj->dev, file); - objs = virtio_gpu_array_alloc(1); - if (!objs) - return -ENOMEM; - virtio_gpu_array_add_obj(objs, obj); + if (vfpriv->context_created) { + objs = virtio_gpu_array_alloc(1); + if (!objs) + return -ENOMEM; + virtio_gpu_array_add_obj(objs, obj); - if (vfpriv->ctx_id) virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, objs); + } out_notify: virtio_gpu_notify(vgdev); diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index a6f5a78f436a..87e584add042 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -366,12 +366,6 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane, return 0; obj = new_state->fb->obj[0]; - if (obj->import_attach) { - ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj); - if (ret) - return ret; - } - if (bo->dumb || obj->import_attach) { vgplane_st->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, @@ -380,7 +374,21 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane, return -ENOMEM; } + if (obj->import_attach) { + ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj); + if (ret) + goto err_fence; + } + return 0; + +err_fence: + if (vgplane_st->fence) { + dma_fence_put(&vgplane_st->fence->f); + vgplane_st->fence = NULL; + } + + return ret; } static void virtio_gpu_cleanup_imported_obj(struct drm_gem_object *obj) diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index fe6a0b018571..4de2a63ccd18 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c @@ -321,6 +321,7 @@ struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev, return ERR_PTR(-ENOMEM); obj = &bo->base.base; + obj->resv = buf->resv; obj->funcs = &virtgpu_gem_dma_buf_funcs; drm_gem_private_object_init(dev, obj, buf->size); diff --git a/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h b/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h index a255946b6f77..8cfcd3360896 100644 --- a/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h +++ b/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h @@ -41,6 +41,7 @@ #define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2)) +#define PIPE_CONTROL0_L3_READ_ONLY_CACHE_INVALIDATE BIT(10) /* gen12 */ #define PIPE_CONTROL0_HDC_PIPELINE_FLUSH BIT(9) /* gen12 */ #define PIPE_CONTROL_COMMAND_CACHE_INVALIDATE (1<<29) diff --git a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h index 167fb0f742de..5a47991b4b81 100644 --- a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h +++ b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h @@ -47,6 +47,10 @@ #define MI_LRI_FORCE_POSTED REG_BIT(12) #define MI_LRI_LEN(x) (((x) & 0xff) + 1) +#define MI_STORE_REGISTER_MEM (__MI_INSTR(0x24) | XE_INSTR_NUM_DW(4)) +#define MI_SRM_USE_GGTT REG_BIT(22) +#define MI_SRM_ADD_CS_OFFSET REG_BIT(19) + #define MI_FLUSH_DW __MI_INSTR(0x26) #define MI_FLUSH_DW_PROTECTED_MEM_EN REG_BIT(22) #define MI_FLUSH_DW_STORE_INDEX REG_BIT(21) diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h index fb8ec317b6ee..891f928d80ce 100644 --- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h @@ -43,6 +43,10 @@ #define XEHPC_BCS8_RING_BASE 0x3ee000 #define GSCCS_RING_BASE 0x11a000 +#define ENGINE_ID(base) XE_REG((base) + 0x8c) +#define ENGINE_INSTANCE_ID REG_GENMASK(9, 4) +#define ENGINE_CLASS_ID REG_GENMASK(2, 0) + #define RING_TAIL(base) XE_REG((base) + 0x30) #define TAIL_ADDR REG_GENMASK(20, 3) @@ -154,6 +158,7 @@ #define STOP_RING REG_BIT(8) #define RING_CTX_TIMESTAMP(base) XE_REG((base) + 0x3a8) +#define RING_CTX_TIMESTAMP_UDW(base) XE_REG((base) + 0x3ac) #define CSBE_DEBUG_STATUS(base) XE_REG((base) + 0x3fc) #define RING_FORCE_TO_NONPRIV(base, i) XE_REG(((base) + 0x4d0) + (i) * 4) diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h index da1f198ac107..181913967ac9 100644 --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h @@ -157,6 +157,7 @@ #define XEHPG_SC_INSTDONE_EXTRA2 XE_REG_MCR(0x7108) #define COMMON_SLICE_CHICKEN4 XE_REG(0x7300, XE_REG_OPTION_MASKED) +#define SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE REG_BIT(12) #define DISABLE_TDC_LOAD_BALANCING_CALC REG_BIT(6) #define COMMON_SLICE_CHICKEN3 XE_REG(0x7304, XE_REG_OPTION_MASKED) diff --git a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h index 57944f90bbf6..994af591a2e8 100644 --- a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h +++ b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h @@ -11,7 +11,9 @@ #define CTX_RING_TAIL (0x06 + 1) #define CTX_RING_START (0x08 + 1) #define CTX_RING_CTL (0x0a + 1) +#define CTX_BB_PER_CTX_PTR (0x12 + 1) #define CTX_TIMESTAMP (0x22 + 1) +#define CTX_TIMESTAMP_UDW (0x24 + 1) #define CTX_INDIRECT_RING_STATE (0x26 + 1) #define CTX_PDP0_UDW (0x30 + 1) #define CTX_PDP0_LDW (0x32 + 1) diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c index ef1e5256c56a..0e502feaca81 100644 --- a/drivers/gpu/drm/xe/tests/xe_mocs.c +++ b/drivers/gpu/drm/xe/tests/xe_mocs.c @@ -46,8 +46,11 @@ static void read_l3cc_table(struct xe_gt *gt, unsigned int fw_ref, i; u32 reg_val; - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - KUNIT_ASSERT_NE_MSG(test, fw_ref, 0, "Forcewake Failed.\n"); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + xe_force_wake_put(gt_to_fw(gt), fw_ref); + KUNIT_ASSERT_TRUE_MSG(test, true, "Forcewake Failed.\n"); + } for (i = 0; i < info->num_mocs_regs; i++) { if (!(i & 1)) { diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index 72ef0b6fc425..0482f26aa480 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -330,6 +330,8 @@ struct xe_device { u8 has_sriov:1; /** @info.has_usm: Device has unified shared memory support */ u8 has_usm:1; + /** @info.has_64bit_timestamp: Device supports 64-bit timestamps */ + u8 has_64bit_timestamp:1; /** @info.is_dgfx: is discrete device */ u8 is_dgfx:1; /** @@ -585,6 +587,7 @@ struct xe_device { INTEL_DRAM_DDR5, INTEL_DRAM_LPDDR5, INTEL_DRAM_GDDR, + INTEL_DRAM_GDDR_ECC, } type; u8 num_qgv_points; u8 num_psf_gv_points; diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c index f67803e15a0e..f7a20264ea33 100644 --- a/drivers/gpu/drm/xe/xe_dma_buf.c +++ b/drivers/gpu/drm/xe/xe_dma_buf.c @@ -145,10 +145,7 @@ static void xe_dma_buf_unmap(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir) { - struct dma_buf *dma_buf = attach->dmabuf; - struct xe_bo *bo = gem_to_xe_bo(dma_buf->priv); - - if (!xe_bo_is_vram(bo)) { + if (sg_page(sgt->sgl)) { dma_unmap_sgtable(attach->dev, sgt, dir, 0); sg_free_table(sgt); kfree(sgt); diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c index f2bb9168967c..e2bb156c71fb 100644 --- a/drivers/gpu/drm/xe/xe_eu_stall.c +++ b/drivers/gpu/drm/xe/xe_eu_stall.c @@ -52,6 +52,8 @@ struct xe_eu_stall_data_stream { struct xe_gt *gt; struct xe_bo *bo; + /* Lock to protect data buffer pointers */ + struct mutex xecore_buf_lock; struct per_xecore_buf *xecore_buf; struct { bool reported_to_user; @@ -208,6 +210,9 @@ int xe_eu_stall_init(struct xe_gt *gt) struct xe_device *xe = gt_to_xe(gt); int ret; + if (!xe_eu_stall_supported_on_platform(xe)) + return 0; + gt->eu_stall = kzalloc(sizeof(*gt->eu_stall), GFP_KERNEL); if (!gt->eu_stall) { ret = -ENOMEM; @@ -378,7 +383,7 @@ static bool eu_stall_data_buf_poll(struct xe_eu_stall_data_stream *stream) u16 group, instance; unsigned int xecore; - mutex_lock(>->eu_stall->stream_lock); + mutex_lock(&stream->xecore_buf_lock); for_each_dss_steering(xecore, gt, group, instance) { xecore_buf = &stream->xecore_buf[xecore]; read_ptr = xecore_buf->read; @@ -396,7 +401,7 @@ static bool eu_stall_data_buf_poll(struct xe_eu_stall_data_stream *stream) set_bit(xecore, stream->data_drop.mask); xecore_buf->write = write_ptr; } - mutex_unlock(>->eu_stall->stream_lock); + mutex_unlock(&stream->xecore_buf_lock); return min_data_present; } @@ -511,11 +516,13 @@ static ssize_t xe_eu_stall_stream_read_locked(struct xe_eu_stall_data_stream *st unsigned int xecore; int ret = 0; + mutex_lock(&stream->xecore_buf_lock); if (bitmap_weight(stream->data_drop.mask, XE_MAX_DSS_FUSE_BITS)) { if (!stream->data_drop.reported_to_user) { stream->data_drop.reported_to_user = true; xe_gt_dbg(gt, "EU stall data dropped in XeCores: %*pb\n", XE_MAX_DSS_FUSE_BITS, stream->data_drop.mask); + mutex_unlock(&stream->xecore_buf_lock); return -EIO; } stream->data_drop.reported_to_user = false; @@ -527,6 +534,7 @@ static ssize_t xe_eu_stall_stream_read_locked(struct xe_eu_stall_data_stream *st if (ret || count == total_size) break; } + mutex_unlock(&stream->xecore_buf_lock); return total_size ?: (ret ?: -EAGAIN); } @@ -583,6 +591,7 @@ static void xe_eu_stall_stream_free(struct xe_eu_stall_data_stream *stream) { struct xe_gt *gt = stream->gt; + mutex_destroy(&stream->xecore_buf_lock); gt->eu_stall->stream = NULL; kfree(stream); } @@ -718,6 +727,7 @@ static int xe_eu_stall_stream_init(struct xe_eu_stall_data_stream *stream, } init_waitqueue_head(&stream->poll_wq); + mutex_init(&stream->xecore_buf_lock); INIT_DELAYED_WORK(&stream->buf_poll_work, eu_stall_data_buf_poll_work_fn); stream->per_xecore_buf_size = per_xecore_buf_size; stream->sampling_rate_mult = props->sampling_rate_mult; diff --git a/drivers/gpu/drm/xe/xe_eu_stall.h b/drivers/gpu/drm/xe/xe_eu_stall.h index ed9d0f233566..d1c76e503799 100644 --- a/drivers/gpu/drm/xe/xe_eu_stall.h +++ b/drivers/gpu/drm/xe/xe_eu_stall.h @@ -7,6 +7,7 @@ #define __XE_EU_STALL_H__ #include "xe_gt_types.h" +#include "xe_sriov.h" size_t xe_eu_stall_get_per_xecore_buf_size(void); size_t xe_eu_stall_data_record_size(struct xe_device *xe); @@ -19,6 +20,6 @@ int xe_eu_stall_stream_open(struct drm_device *dev, static inline bool xe_eu_stall_supported_on_platform(struct xe_device *xe) { - return xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20; + return !IS_SRIOV_VF(xe) && (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20); } #endif diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c index 606922d9dd73..cd9b1c32f30f 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@ -830,7 +830,7 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q) { struct xe_device *xe = gt_to_xe(q->gt); struct xe_lrc *lrc; - u32 old_ts, new_ts; + u64 old_ts, new_ts; int idx; /* diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c index fd41113f8572..0bcf97063ff6 100644 --- a/drivers/gpu/drm/xe/xe_gsc.c +++ b/drivers/gpu/drm/xe/xe_gsc.c @@ -555,6 +555,28 @@ void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc) flush_work(&gsc->work); } +void xe_gsc_stop_prepare(struct xe_gsc *gsc) +{ + struct xe_gt *gt = gsc_to_gt(gsc); + int ret; + + if (!xe_uc_fw_is_loadable(&gsc->fw) || xe_uc_fw_is_in_error_state(&gsc->fw)) + return; + + xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GSC); + + /* + * If the GSC FW load or the proxy init are interrupted, the only way + * to recover it is to do an FLR and reload the GSC from scratch. + * Therefore, let's wait for the init to complete before stopping + * operations. The proxy init is the last step, so we can just wait on + * that + */ + ret = xe_gsc_wait_for_proxy_init_done(gsc); + if (ret) + xe_gt_err(gt, "failed to wait for GSC init completion before uc stop\n"); +} + /* * wa_14015076503: if the GSC FW is loaded, we need to alert it before doing a * GSC engine reset by writing a notification bit in the GS1 register and then diff --git a/drivers/gpu/drm/xe/xe_gsc.h b/drivers/gpu/drm/xe/xe_gsc.h index d99f66c38075..b8b8e0810ad9 100644 --- a/drivers/gpu/drm/xe/xe_gsc.h +++ b/drivers/gpu/drm/xe/xe_gsc.h @@ -16,6 +16,7 @@ struct xe_hw_engine; int xe_gsc_init(struct xe_gsc *gsc); int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc); void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc); +void xe_gsc_stop_prepare(struct xe_gsc *gsc); void xe_gsc_load_start(struct xe_gsc *gsc); void xe_gsc_hwe_irq_handler(struct xe_hw_engine *hwe, u16 intr_vec); diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c index 8cf70b228ff3..d0519cd6704a 100644 --- a/drivers/gpu/drm/xe/xe_gsc_proxy.c +++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c @@ -71,6 +71,17 @@ bool xe_gsc_proxy_init_done(struct xe_gsc *gsc) HECI1_FWSTS1_PROXY_STATE_NORMAL; } +int xe_gsc_wait_for_proxy_init_done(struct xe_gsc *gsc) +{ + struct xe_gt *gt = gsc_to_gt(gsc); + + /* Proxy init can take up to 500ms, so wait double that for safety */ + return xe_mmio_wait32(>->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE), + HECI1_FWSTS1_CURRENT_STATE, + HECI1_FWSTS1_PROXY_STATE_NORMAL, + USEC_PER_SEC, NULL, false); +} + static void __gsc_proxy_irq_rmw(struct xe_gsc *gsc, u32 clr, u32 set) { struct xe_gt *gt = gsc_to_gt(gsc); diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.h b/drivers/gpu/drm/xe/xe_gsc_proxy.h index fdef56995cd4..765602221dbc 100644 --- a/drivers/gpu/drm/xe/xe_gsc_proxy.h +++ b/drivers/gpu/drm/xe/xe_gsc_proxy.h @@ -12,6 +12,7 @@ struct xe_gsc; int xe_gsc_proxy_init(struct xe_gsc *gsc); bool xe_gsc_proxy_init_done(struct xe_gsc *gsc); +int xe_gsc_wait_for_proxy_init_done(struct xe_gsc *gsc); int xe_gsc_proxy_start(struct xe_gsc *gsc); int xe_gsc_proxy_request_handler(struct xe_gsc *gsc); diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 10a9e3c72b36..66198cf2662c 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -857,7 +857,7 @@ void xe_gt_suspend_prepare(struct xe_gt *gt) fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - xe_uc_stop_prepare(>->uc); + xe_uc_suspend_prepare(>->uc); xe_force_wake_put(gt_to_fw(gt), fw_ref); } diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c index 2d63a69cbfa3..f7005a3643e6 100644 --- a/drivers/gpu/drm/xe/xe_gt_debugfs.c +++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c @@ -92,22 +92,23 @@ static int hw_engines(struct xe_gt *gt, struct drm_printer *p) struct xe_hw_engine *hwe; enum xe_hw_engine_id id; unsigned int fw_ref; + int ret = 0; xe_pm_runtime_get(xe); fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { - xe_pm_runtime_put(xe); - xe_force_wake_put(gt_to_fw(gt), fw_ref); - return -ETIMEDOUT; + ret = -ETIMEDOUT; + goto fw_put; } for_each_hw_engine(hwe, gt, id) xe_hw_engine_print(hwe, p); +fw_put: xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_pm_runtime_put(xe); - return 0; + return ret; } static int powergate_info(struct xe_gt *gt, struct drm_printer *p) diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c index c5ad9a0a89c2..0c22b3a36655 100644 --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c @@ -435,9 +435,16 @@ static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue) num_eus = bitmap_weight(gt->fuse_topo.eu_mask_per_dss, XE_MAX_EU_FUSE_BITS) * num_dss; - /* user can issue separate page faults per EU and per CS */ + /* + * user can issue separate page faults per EU and per CS + * + * XXX: Multiplier required as compute UMD are getting PF queue errors + * without it. Follow on why this multiplier is required. + */ +#define PF_MULTIPLIER 8 pf_queue->num_dw = - (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW; + (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW * PF_MULTIPLIER; +#undef PF_MULTIPLIER pf_queue->gt = gt; pf_queue->data = devm_kcalloc(xe->drm.dev, pf_queue->num_dw, diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c index 03072e094991..084cbdeba8ea 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c @@ -322,6 +322,13 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt) return 0; } +/* + * Ensure that roundup_pow_of_two(length) doesn't overflow. + * Note that roundup_pow_of_two() operates on unsigned long, + * not on u64. + */ +#define MAX_RANGE_TLB_INVALIDATION_LENGTH (rounddown_pow_of_two(ULONG_MAX)) + /** * xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an * address range @@ -346,6 +353,7 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt, struct xe_device *xe = gt_to_xe(gt); #define MAX_TLB_INVALIDATION_LEN 7 u32 action[MAX_TLB_INVALIDATION_LEN]; + u64 length = end - start; int len = 0; xe_gt_assert(gt, fence); @@ -358,11 +366,11 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt, action[len++] = XE_GUC_ACTION_TLB_INVALIDATION; action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */ - if (!xe->info.has_range_tlb_invalidation) { + if (!xe->info.has_range_tlb_invalidation || + length > MAX_RANGE_TLB_INVALIDATION_LENGTH) { action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL); } else { u64 orig_start = start; - u64 length = end - start; u64 align; if (length < SZ_4K) diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c index e7c9e095a19f..7031542a70ce 100644 --- a/drivers/gpu/drm/xe/xe_guc_ads.c +++ b/drivers/gpu/drm/xe/xe_guc_ads.c @@ -490,24 +490,52 @@ static void fill_engine_enable_masks(struct xe_gt *gt, engine_enable_mask(gt, XE_ENGINE_CLASS_OTHER)); } -static void guc_prep_golden_lrc_null(struct xe_guc_ads *ads) +/* + * Write the offsets corresponding to the golden LRCs. The actual data is + * populated later by guc_golden_lrc_populate() + */ +static void guc_golden_lrc_init(struct xe_guc_ads *ads) { struct xe_device *xe = ads_to_xe(ads); + struct xe_gt *gt = ads_to_gt(ads); struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads), offsetof(struct __guc_ads_blob, system_info)); - u8 guc_class; + size_t alloc_size, real_size; + u32 addr_ggtt, offset; + int class; + + offset = guc_ads_golden_lrc_offset(ads); + addr_ggtt = xe_bo_ggtt_addr(ads->bo) + offset; + + for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) { + u8 guc_class; + + guc_class = xe_engine_class_to_guc_class(class); - for (guc_class = 0; guc_class <= GUC_MAX_ENGINE_CLASSES; ++guc_class) { if (!info_map_read(xe, &info_map, engine_enabled_masks[guc_class])) continue; + real_size = xe_gt_lrc_size(gt, class); + alloc_size = PAGE_ALIGN(real_size); + + /* + * This interface is slightly confusing. We need to pass the + * base address of the full golden context and the size of just + * the engine state, which is the section of the context image + * that starts after the execlists LRC registers. This is + * required to allow the GuC to restore just the engine state + * when a watchdog reset occurs. + * We calculate the engine state size by removing the size of + * what comes before it in the context image (which is identical + * on all engines). + */ ads_blob_write(ads, ads.eng_state_size[guc_class], - guc_ads_golden_lrc_size(ads) - - xe_lrc_skip_size(xe)); + real_size - xe_lrc_skip_size(xe)); ads_blob_write(ads, ads.golden_context_lrca[guc_class], - xe_bo_ggtt_addr(ads->bo) + - guc_ads_golden_lrc_offset(ads)); + addr_ggtt); + + addr_ggtt += alloc_size; } } @@ -857,7 +885,7 @@ void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads) xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size); guc_policies_init(ads); - guc_prep_golden_lrc_null(ads); + guc_golden_lrc_init(ads); guc_mapping_table_init_invalid(gt, &info_map); guc_doorbell_init(ads); @@ -883,7 +911,7 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads) guc_policies_init(ads); fill_engine_enable_masks(gt, &info_map); guc_mmio_reg_state_init(ads); - guc_prep_golden_lrc_null(ads); + guc_golden_lrc_init(ads); guc_mapping_table_init(gt, &info_map); guc_capture_prep_lists(ads); guc_doorbell_init(ads); @@ -903,18 +931,22 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads) guc_ads_private_data_offset(ads)); } -static void guc_populate_golden_lrc(struct xe_guc_ads *ads) +/* + * After the golden LRC's are recorded for each engine class by the first + * submission, copy them to the ADS, as initialized earlier by + * guc_golden_lrc_init(). + */ +static void guc_golden_lrc_populate(struct xe_guc_ads *ads) { struct xe_device *xe = ads_to_xe(ads); struct xe_gt *gt = ads_to_gt(ads); struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads), offsetof(struct __guc_ads_blob, system_info)); size_t total_size = 0, alloc_size, real_size; - u32 addr_ggtt, offset; + u32 offset; int class; offset = guc_ads_golden_lrc_offset(ads); - addr_ggtt = xe_bo_ggtt_addr(ads->bo) + offset; for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) { u8 guc_class; @@ -931,26 +963,9 @@ static void guc_populate_golden_lrc(struct xe_guc_ads *ads) alloc_size = PAGE_ALIGN(real_size); total_size += alloc_size; - /* - * This interface is slightly confusing. We need to pass the - * base address of the full golden context and the size of just - * the engine state, which is the section of the context image - * that starts after the execlists LRC registers. This is - * required to allow the GuC to restore just the engine state - * when a watchdog reset occurs. - * We calculate the engine state size by removing the size of - * what comes before it in the context image (which is identical - * on all engines). - */ - ads_blob_write(ads, ads.eng_state_size[guc_class], - real_size - xe_lrc_skip_size(xe)); - ads_blob_write(ads, ads.golden_context_lrca[guc_class], - addr_ggtt); - xe_map_memcpy_to(xe, ads_to_map(ads), offset, gt->default_lrc[class], real_size); - addr_ggtt += alloc_size; offset += alloc_size; } @@ -959,7 +974,7 @@ static void guc_populate_golden_lrc(struct xe_guc_ads *ads) void xe_guc_ads_populate_post_load(struct xe_guc_ads *ads) { - guc_populate_golden_lrc(ads); + guc_golden_lrc_populate(ads); } static int guc_ads_action_update_policies(struct xe_guc_ads *ads, u32 policy_offset) diff --git a/drivers/gpu/drm/xe/xe_guc_capture.c b/drivers/gpu/drm/xe/xe_guc_capture.c index f6d523e4c5fe..9095618648bc 100644 --- a/drivers/gpu/drm/xe/xe_guc_capture.c +++ b/drivers/gpu/drm/xe/xe_guc_capture.c @@ -359,7 +359,7 @@ static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext, ext->reg = XE_REG(extlist->reg.__reg.addr); ext->flags = FIELD_PREP(GUC_REGSET_STEERING_NEEDED, 1); - ext->flags = FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id); + ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id); ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id); ext->regname = extlist->name; } diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c index 85215313976c..43b1192ba61c 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc.c +++ b/drivers/gpu/drm/xe/xe_guc_pc.c @@ -1070,6 +1070,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc) if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING, SLPC_RESET_EXTENDED_TIMEOUT_MS)) { xe_gt_err(gt, "GuC PC Start failed: Dynamic GT frequency control and GT sleep states are now disabled.\n"); + ret = -EIO; goto out; } diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 31bc2022bfc2..769781d577df 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -941,7 +941,7 @@ static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job) return xe_sched_invalidate_job(job, 2); } - ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]); + ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(q->lrc[0])); ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]); /* diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c index c3cc0fa105e8..57b71956ddf4 100644 --- a/drivers/gpu/drm/xe/xe_hmm.c +++ b/drivers/gpu/drm/xe/xe_hmm.c @@ -19,29 +19,6 @@ static u64 xe_npages_in_range(unsigned long start, unsigned long end) return (end - start) >> PAGE_SHIFT; } -/** - * xe_mark_range_accessed() - mark a range is accessed, so core mm - * have such information for memory eviction or write back to - * hard disk - * @range: the range to mark - * @write: if write to this range, we mark pages in this range - * as dirty - */ -static void xe_mark_range_accessed(struct hmm_range *range, bool write) -{ - struct page *page; - u64 i, npages; - - npages = xe_npages_in_range(range->start, range->end); - for (i = 0; i < npages; i++) { - page = hmm_pfn_to_page(range->hmm_pfns[i]); - if (write) - set_page_dirty_lock(page); - - mark_page_accessed(page); - } -} - static int xe_alloc_sg(struct xe_device *xe, struct sg_table *st, struct hmm_range *range, struct rw_semaphore *notifier_sem) { @@ -331,7 +308,6 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, if (ret) goto out_unlock; - xe_mark_range_accessed(&hmm_range, write); userptr->sg = &userptr->sgt; xe_hmm_userptr_set_mapped(uvma); userptr->notifier_seq = hmm_range.notifier_seq; diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c index 8c05fd30b7df..93241fd0a4ba 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine.c +++ b/drivers/gpu/drm/xe/xe_hw_engine.c @@ -389,12 +389,6 @@ xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe) blit_cctl_val, XE_RTP_ACTION_FLAG(ENGINE_BASE))) }, - /* Use Fixed slice CCS mode */ - { XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"), - XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)), - XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE, - RCU_MODE_FIXED_SLICE_CCS_MODE)) - }, /* Disable WMTP if HW doesn't support it */ { XE_RTP_NAME("DISABLE_WMTP_ON_UNSUPPORTED_HW"), XE_RTP_RULES(FUNC(xe_rtp_cfeg_wmtp_disabled)), @@ -461,6 +455,12 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe) XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), CS_PRIORITY_MEM_READ, XE_RTP_ACTION_FLAG(ENGINE_BASE))) }, + /* Use Fixed slice CCS mode */ + { XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"), + XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)), + XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE, + RCU_MODE_FIXED_SLICE_CCS_MODE)) + }, }; xe_rtp_process_to_sr(&ctx, engine_entries, ARRAY_SIZE(engine_entries), &hwe->reg_sr); diff --git a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c index b53e8d2accdb..a440442b4d72 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c +++ b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c @@ -32,14 +32,61 @@ bool xe_hw_engine_timeout_in_range(u64 timeout, u64 min, u64 max) return timeout >= min && timeout <= max; } -static void kobj_xe_hw_engine_release(struct kobject *kobj) +static void xe_hw_engine_sysfs_kobj_release(struct kobject *kobj) { kfree(kobj); } +static ssize_t xe_hw_engine_class_sysfs_attr_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct xe_device *xe = kobj_to_xe(kobj); + struct kobj_attribute *kattr; + ssize_t ret = -EIO; + + kattr = container_of(attr, struct kobj_attribute, attr); + if (kattr->show) { + xe_pm_runtime_get(xe); + ret = kattr->show(kobj, kattr, buf); + xe_pm_runtime_put(xe); + } + + return ret; +} + +static ssize_t xe_hw_engine_class_sysfs_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, + size_t count) +{ + struct xe_device *xe = kobj_to_xe(kobj); + struct kobj_attribute *kattr; + ssize_t ret = -EIO; + + kattr = container_of(attr, struct kobj_attribute, attr); + if (kattr->store) { + xe_pm_runtime_get(xe); + ret = kattr->store(kobj, kattr, buf, count); + xe_pm_runtime_put(xe); + } + + return ret; +} + +static const struct sysfs_ops xe_hw_engine_class_sysfs_ops = { + .show = xe_hw_engine_class_sysfs_attr_show, + .store = xe_hw_engine_class_sysfs_attr_store, +}; + static const struct kobj_type kobj_xe_hw_engine_type = { - .release = kobj_xe_hw_engine_release, - .sysfs_ops = &kobj_sysfs_ops + .release = xe_hw_engine_sysfs_kobj_release, + .sysfs_ops = &xe_hw_engine_class_sysfs_ops, +}; + +static const struct kobj_type kobj_xe_hw_engine_type_def = { + .release = xe_hw_engine_sysfs_kobj_release, + .sysfs_ops = &kobj_sysfs_ops, }; static ssize_t job_timeout_max_store(struct kobject *kobj, @@ -543,7 +590,7 @@ static int xe_add_hw_engine_class_defaults(struct xe_device *xe, if (!kobj) return -ENOMEM; - kobject_init(kobj, &kobj_xe_hw_engine_type); + kobject_init(kobj, &kobj_xe_hw_engine_type_def); err = kobject_add(kobj, parent, "%s", ".defaults"); if (err) goto err_object; @@ -559,57 +606,6 @@ err_object: return err; } -static void xe_hw_engine_sysfs_kobj_release(struct kobject *kobj) -{ - kfree(kobj); -} - -static ssize_t xe_hw_engine_class_sysfs_attr_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct xe_device *xe = kobj_to_xe(kobj); - struct kobj_attribute *kattr; - ssize_t ret = -EIO; - - kattr = container_of(attr, struct kobj_attribute, attr); - if (kattr->show) { - xe_pm_runtime_get(xe); - ret = kattr->show(kobj, kattr, buf); - xe_pm_runtime_put(xe); - } - - return ret; -} - -static ssize_t xe_hw_engine_class_sysfs_attr_store(struct kobject *kobj, - struct attribute *attr, - const char *buf, - size_t count) -{ - struct xe_device *xe = kobj_to_xe(kobj); - struct kobj_attribute *kattr; - ssize_t ret = -EIO; - - kattr = container_of(attr, struct kobj_attribute, attr); - if (kattr->store) { - xe_pm_runtime_get(xe); - ret = kattr->store(kobj, kattr, buf, count); - xe_pm_runtime_put(xe); - } - - return ret; -} - -static const struct sysfs_ops xe_hw_engine_class_sysfs_ops = { - .show = xe_hw_engine_class_sysfs_attr_show, - .store = xe_hw_engine_class_sysfs_attr_store, -}; - -static const struct kobj_type xe_hw_engine_sysfs_kobj_type = { - .release = xe_hw_engine_sysfs_kobj_release, - .sysfs_ops = &xe_hw_engine_class_sysfs_ops, -}; static void hw_engine_class_sysfs_fini(void *arg) { @@ -640,7 +636,7 @@ int xe_hw_engine_class_sysfs_init(struct xe_gt *gt) if (!kobj) return -ENOMEM; - kobject_init(kobj, &xe_hw_engine_sysfs_kobj_type); + kobject_init(kobj, &kobj_xe_hw_engine_type); err = kobject_add(kobj, gt->sysfs, "engines"); if (err) diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c index df3ceddede07..03bfba696b37 100644 --- a/drivers/gpu/drm/xe/xe_lrc.c +++ b/drivers/gpu/drm/xe/xe_lrc.c @@ -24,6 +24,7 @@ #include "xe_hw_fence.h" #include "xe_map.h" #include "xe_memirq.h" +#include "xe_mmio.h" #include "xe_sriov.h" #include "xe_trace_lrc.h" #include "xe_vm.h" @@ -650,6 +651,7 @@ u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc) #define LRC_START_SEQNO_PPHWSP_OFFSET (LRC_SEQNO_PPHWSP_OFFSET + 8) #define LRC_CTX_JOB_TIMESTAMP_OFFSET (LRC_START_SEQNO_PPHWSP_OFFSET + 8) #define LRC_PARALLEL_PPHWSP_OFFSET 2048 +#define LRC_ENGINE_ID_PPHWSP_OFFSET 2096 #define LRC_PPHWSP_SIZE SZ_4K u32 xe_lrc_regs_offset(struct xe_lrc *lrc) @@ -684,7 +686,7 @@ static inline u32 __xe_lrc_start_seqno_offset(struct xe_lrc *lrc) static u32 __xe_lrc_ctx_job_timestamp_offset(struct xe_lrc *lrc) { - /* The start seqno is stored in the driver-defined portion of PPHWSP */ + /* This is stored in the driver-defined portion of PPHWSP */ return xe_lrc_pphwsp_offset(lrc) + LRC_CTX_JOB_TIMESTAMP_OFFSET; } @@ -694,11 +696,21 @@ static inline u32 __xe_lrc_parallel_offset(struct xe_lrc *lrc) return xe_lrc_pphwsp_offset(lrc) + LRC_PARALLEL_PPHWSP_OFFSET; } +static inline u32 __xe_lrc_engine_id_offset(struct xe_lrc *lrc) +{ + return xe_lrc_pphwsp_offset(lrc) + LRC_ENGINE_ID_PPHWSP_OFFSET; +} + static u32 __xe_lrc_ctx_timestamp_offset(struct xe_lrc *lrc) { return __xe_lrc_regs_offset(lrc) + CTX_TIMESTAMP * sizeof(u32); } +static u32 __xe_lrc_ctx_timestamp_udw_offset(struct xe_lrc *lrc) +{ + return __xe_lrc_regs_offset(lrc) + CTX_TIMESTAMP_UDW * sizeof(u32); +} + static inline u32 __xe_lrc_indirect_ring_offset(struct xe_lrc *lrc) { /* Indirect ring state page is at the very end of LRC */ @@ -726,8 +738,10 @@ DECL_MAP_ADDR_HELPERS(regs) DECL_MAP_ADDR_HELPERS(start_seqno) DECL_MAP_ADDR_HELPERS(ctx_job_timestamp) DECL_MAP_ADDR_HELPERS(ctx_timestamp) +DECL_MAP_ADDR_HELPERS(ctx_timestamp_udw) DECL_MAP_ADDR_HELPERS(parallel) DECL_MAP_ADDR_HELPERS(indirect_ring) +DECL_MAP_ADDR_HELPERS(engine_id) #undef DECL_MAP_ADDR_HELPERS @@ -743,18 +757,37 @@ u32 xe_lrc_ctx_timestamp_ggtt_addr(struct xe_lrc *lrc) } /** + * xe_lrc_ctx_timestamp_udw_ggtt_addr() - Get ctx timestamp udw GGTT address + * @lrc: Pointer to the lrc. + * + * Returns: ctx timestamp udw GGTT address + */ +u32 xe_lrc_ctx_timestamp_udw_ggtt_addr(struct xe_lrc *lrc) +{ + return __xe_lrc_ctx_timestamp_udw_ggtt_addr(lrc); +} + +/** * xe_lrc_ctx_timestamp() - Read ctx timestamp value * @lrc: Pointer to the lrc. * * Returns: ctx timestamp value */ -u32 xe_lrc_ctx_timestamp(struct xe_lrc *lrc) +u64 xe_lrc_ctx_timestamp(struct xe_lrc *lrc) { struct xe_device *xe = lrc_to_xe(lrc); struct iosys_map map; + u32 ldw, udw = 0; map = __xe_lrc_ctx_timestamp_map(lrc); - return xe_map_read32(xe, &map); + ldw = xe_map_read32(xe, &map); + + if (xe->info.has_64bit_timestamp) { + map = __xe_lrc_ctx_timestamp_udw_map(lrc); + udw = xe_map_read32(xe, &map); + } + + return (u64)udw << 32 | ldw; } /** @@ -864,7 +897,7 @@ static void *empty_lrc_data(struct xe_hw_engine *hwe) static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm) { - u64 desc = xe_vm_pdp4_descriptor(vm, lrc->tile); + u64 desc = xe_vm_pdp4_descriptor(vm, gt_to_tile(lrc->gt)); xe_lrc_write_ctx_reg(lrc, CTX_PDP0_UDW, upper_32_bits(desc)); xe_lrc_write_ctx_reg(lrc, CTX_PDP0_LDW, lower_32_bits(desc)); @@ -877,6 +910,65 @@ static void xe_lrc_finish(struct xe_lrc *lrc) xe_bo_unpin(lrc->bo); xe_bo_unlock(lrc->bo); xe_bo_put(lrc->bo); + xe_bo_unpin_map_no_vm(lrc->bb_per_ctx_bo); +} + +/* + * xe_lrc_setup_utilization() - Setup wa bb to assist in calculating active + * context run ticks. + * @lrc: Pointer to the lrc. + * + * Context Timestamp (CTX_TIMESTAMP) in the LRC accumulates the run ticks of the + * context, but only gets updated when the context switches out. In order to + * check how long a context has been active before it switches out, two things + * are required: + * + * (1) Determine if the context is running: + * To do so, we program the WA BB to set an initial value for CTX_TIMESTAMP in + * the LRC. The value chosen is 1 since 0 is the initial value when the LRC is + * initialized. During a query, we just check for this value to determine if the + * context is active. If the context switched out, it would overwrite this + * location with the actual CTX_TIMESTAMP MMIO value. Note that WA BB runs as + * the last part of context restore, so reusing this LRC location will not + * clobber anything. + * + * (2) Calculate the time that the context has been active for: + * The CTX_TIMESTAMP ticks only when the context is active. If a context is + * active, we just use the CTX_TIMESTAMP MMIO as the new value of utilization. + * While doing so, we need to read the CTX_TIMESTAMP MMIO for the specific + * engine instance. Since we do not know which instance the context is running + * on until it is scheduled, we also read the ENGINE_ID MMIO in the WA BB and + * store it in the PPHSWP. + */ +#define CONTEXT_ACTIVE 1ULL +static void xe_lrc_setup_utilization(struct xe_lrc *lrc) +{ + u32 *cmd; + + cmd = lrc->bb_per_ctx_bo->vmap.vaddr; + + *cmd++ = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT | MI_SRM_ADD_CS_OFFSET; + *cmd++ = ENGINE_ID(0).addr; + *cmd++ = __xe_lrc_engine_id_ggtt_addr(lrc); + *cmd++ = 0; + + *cmd++ = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1); + *cmd++ = __xe_lrc_ctx_timestamp_ggtt_addr(lrc); + *cmd++ = 0; + *cmd++ = lower_32_bits(CONTEXT_ACTIVE); + + if (lrc_to_xe(lrc)->info.has_64bit_timestamp) { + *cmd++ = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1); + *cmd++ = __xe_lrc_ctx_timestamp_udw_ggtt_addr(lrc); + *cmd++ = 0; + *cmd++ = upper_32_bits(CONTEXT_ACTIVE); + } + + *cmd++ = MI_BATCH_BUFFER_END; + + xe_lrc_write_ctx_reg(lrc, CTX_BB_PER_CTX_PTR, + xe_bo_ggtt_addr(lrc->bb_per_ctx_bo) | 1); + } #define PVC_CTX_ASID (0x2e + 1) @@ -893,31 +985,40 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, void *init_data = NULL; u32 arb_enable; u32 lrc_size; + u32 bo_flags; int err; kref_init(&lrc->refcount); + lrc->gt = gt; lrc->flags = 0; lrc_size = ring_size + xe_gt_lrc_size(gt, hwe->class); if (xe_gt_has_indirect_ring_state(gt)) lrc->flags |= XE_LRC_FLAG_INDIRECT_RING_STATE; + bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT | + XE_BO_FLAG_GGTT_INVALIDATE; + /* * FIXME: Perma-pinning LRC as we don't yet support moving GGTT address * via VM bind calls. */ lrc->bo = xe_bo_create_pin_map(xe, tile, vm, lrc_size, ttm_bo_type_kernel, - XE_BO_FLAG_VRAM_IF_DGFX(tile) | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_GGTT_INVALIDATE); + bo_flags); if (IS_ERR(lrc->bo)) return PTR_ERR(lrc->bo); + lrc->bb_per_ctx_bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K, + ttm_bo_type_kernel, + bo_flags); + if (IS_ERR(lrc->bb_per_ctx_bo)) { + err = PTR_ERR(lrc->bb_per_ctx_bo); + goto err_lrc_finish; + } + lrc->size = lrc_size; - lrc->tile = gt_to_tile(hwe->gt); lrc->ring.size = ring_size; lrc->ring.tail = 0; - lrc->ctx_timestamp = 0; xe_hw_fence_ctx_init(&lrc->fence_ctx, hwe->gt, hwe->fence_irq, hwe->name); @@ -990,7 +1091,10 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, xe_lrc_read_ctx_reg(lrc, CTX_CONTEXT_CONTROL) | _MASKED_BIT_ENABLE(CTX_CTRL_PXP_ENABLE)); + lrc->ctx_timestamp = 0; xe_lrc_write_ctx_reg(lrc, CTX_TIMESTAMP, 0); + if (lrc_to_xe(lrc)->info.has_64bit_timestamp) + xe_lrc_write_ctx_reg(lrc, CTX_TIMESTAMP_UDW, 0); if (xe->info.has_asid && vm) xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, vm->usm.asid); @@ -1019,6 +1123,8 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, map = __xe_lrc_start_seqno_map(lrc); xe_map_write32(lrc_to_xe(lrc), &map, lrc->fence_ctx.next_seqno - 1); + xe_lrc_setup_utilization(lrc); + return 0; err_lrc_finish: @@ -1238,6 +1344,21 @@ struct iosys_map xe_lrc_parallel_map(struct xe_lrc *lrc) return __xe_lrc_parallel_map(lrc); } +/** + * xe_lrc_engine_id() - Read engine id value + * @lrc: Pointer to the lrc. + * + * Returns: context id value + */ +static u32 xe_lrc_engine_id(struct xe_lrc *lrc) +{ + struct xe_device *xe = lrc_to_xe(lrc); + struct iosys_map map; + + map = __xe_lrc_engine_id_map(lrc); + return xe_map_read32(xe, &map); +} + static int instr_dw(u32 cmd_header) { /* GFXPIPE "SINGLE_DW" opcodes are a single dword */ @@ -1684,7 +1805,7 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc) snapshot->lrc_offset = xe_lrc_pphwsp_offset(lrc); snapshot->lrc_size = lrc->bo->size - snapshot->lrc_offset; snapshot->lrc_snapshot = NULL; - snapshot->ctx_timestamp = xe_lrc_ctx_timestamp(lrc); + snapshot->ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(lrc)); snapshot->ctx_job_timestamp = xe_lrc_ctx_job_timestamp(lrc); return snapshot; } @@ -1784,22 +1905,74 @@ void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot) kfree(snapshot); } +static int get_ctx_timestamp(struct xe_lrc *lrc, u32 engine_id, u64 *reg_ctx_ts) +{ + u16 class = REG_FIELD_GET(ENGINE_CLASS_ID, engine_id); + u16 instance = REG_FIELD_GET(ENGINE_INSTANCE_ID, engine_id); + struct xe_hw_engine *hwe; + u64 val; + + hwe = xe_gt_hw_engine(lrc->gt, class, instance, false); + if (xe_gt_WARN_ONCE(lrc->gt, !hwe || xe_hw_engine_is_reserved(hwe), + "Unexpected engine class:instance %d:%d for context utilization\n", + class, instance)) + return -1; + + if (lrc_to_xe(lrc)->info.has_64bit_timestamp) + val = xe_mmio_read64_2x32(&hwe->gt->mmio, + RING_CTX_TIMESTAMP(hwe->mmio_base)); + else + val = xe_mmio_read32(&hwe->gt->mmio, + RING_CTX_TIMESTAMP(hwe->mmio_base)); + + *reg_ctx_ts = val; + + return 0; +} + /** * xe_lrc_update_timestamp() - Update ctx timestamp * @lrc: Pointer to the lrc. * @old_ts: Old timestamp value * * Populate @old_ts current saved ctx timestamp, read new ctx timestamp and - * update saved value. + * update saved value. With support for active contexts, the calculation may be + * slightly racy, so follow a read-again logic to ensure that the context is + * still active before returning the right timestamp. * * Returns: New ctx timestamp value */ -u32 xe_lrc_update_timestamp(struct xe_lrc *lrc, u32 *old_ts) +u64 xe_lrc_update_timestamp(struct xe_lrc *lrc, u64 *old_ts) { + u64 lrc_ts, reg_ts; + u32 engine_id; + *old_ts = lrc->ctx_timestamp; - lrc->ctx_timestamp = xe_lrc_ctx_timestamp(lrc); + lrc_ts = xe_lrc_ctx_timestamp(lrc); + /* CTX_TIMESTAMP mmio read is invalid on VF, so return the LRC value */ + if (IS_SRIOV_VF(lrc_to_xe(lrc))) { + lrc->ctx_timestamp = lrc_ts; + goto done; + } + + if (lrc_ts == CONTEXT_ACTIVE) { + engine_id = xe_lrc_engine_id(lrc); + if (!get_ctx_timestamp(lrc, engine_id, ®_ts)) + lrc->ctx_timestamp = reg_ts; + + /* read lrc again to ensure context is still active */ + lrc_ts = xe_lrc_ctx_timestamp(lrc); + } + + /* + * If context switched out, just use the lrc_ts. Note that this needs to + * be a separate if condition. + */ + if (lrc_ts != CONTEXT_ACTIVE) + lrc->ctx_timestamp = lrc_ts; +done: trace_xe_lrc_update_timestamp(lrc, *old_ts); return lrc->ctx_timestamp; diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h index 0b40f349ab95..eb6e8de8c939 100644 --- a/drivers/gpu/drm/xe/xe_lrc.h +++ b/drivers/gpu/drm/xe/xe_lrc.h @@ -120,7 +120,8 @@ void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot); u32 xe_lrc_ctx_timestamp_ggtt_addr(struct xe_lrc *lrc); -u32 xe_lrc_ctx_timestamp(struct xe_lrc *lrc); +u32 xe_lrc_ctx_timestamp_udw_ggtt_addr(struct xe_lrc *lrc); +u64 xe_lrc_ctx_timestamp(struct xe_lrc *lrc); u32 xe_lrc_ctx_job_timestamp_ggtt_addr(struct xe_lrc *lrc); u32 xe_lrc_ctx_job_timestamp(struct xe_lrc *lrc); @@ -136,6 +137,6 @@ u32 xe_lrc_ctx_job_timestamp(struct xe_lrc *lrc); * * Returns the current LRC timestamp */ -u32 xe_lrc_update_timestamp(struct xe_lrc *lrc, u32 *old_ts); +u64 xe_lrc_update_timestamp(struct xe_lrc *lrc, u64 *old_ts); #endif diff --git a/drivers/gpu/drm/xe/xe_lrc_types.h b/drivers/gpu/drm/xe/xe_lrc_types.h index 71ecb453f811..ae24cf6f8dd9 100644 --- a/drivers/gpu/drm/xe/xe_lrc_types.h +++ b/drivers/gpu/drm/xe/xe_lrc_types.h @@ -25,8 +25,8 @@ struct xe_lrc { /** @size: size of lrc including any indirect ring state page */ u32 size; - /** @tile: tile which this LRC belongs to */ - struct xe_tile *tile; + /** @gt: gt which this LRC belongs to */ + struct xe_gt *gt; /** @flags: LRC flags */ #define XE_LRC_FLAG_INDIRECT_RING_STATE 0x1 @@ -52,7 +52,10 @@ struct xe_lrc { struct xe_hw_fence_ctx fence_ctx; /** @ctx_timestamp: readout value of CTX_TIMESTAMP on last update */ - u32 ctx_timestamp; + u64 ctx_timestamp; + + /** @bb_per_ctx_bo: buffer object for per context batch wa buffer */ + struct xe_bo *bb_per_ctx_bo; }; struct xe_lrc_snapshot; diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index df4282c71bf0..5a3e89022c38 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -1177,7 +1177,7 @@ err: err_sync: /* Sync partial copies if any. FIXME: job_mutex? */ if (fence) { - dma_fence_wait(m->fence, false); + dma_fence_wait(fence, false); dma_fence_put(fence); } @@ -1547,7 +1547,7 @@ void xe_migrate_wait(struct xe_migrate *m) static u32 pte_update_cmd_size(u64 size) { u32 num_dword; - u64 entries = DIV_ROUND_UP(size, XE_PAGE_SIZE); + u64 entries = DIV_U64_ROUND_UP(size, XE_PAGE_SIZE); XE_WARN_ON(size > MAX_PREEMPTDISABLE_TRANSFER); /* @@ -1558,7 +1558,7 @@ static u32 pte_update_cmd_size(u64 size) * 2 dword for the page table's physical location * 2*n dword for value of pte to fill (each pte entry is 2 dwords) */ - num_dword = (1 + 2) * DIV_ROUND_UP(entries, 0x1ff); + num_dword = (1 + 2) * DIV_U64_ROUND_UP(entries, 0x1ff); num_dword += entries * 2; return num_dword; diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c index 70a36e777546..46301f341773 100644 --- a/drivers/gpu/drm/xe/xe_mmio.c +++ b/drivers/gpu/drm/xe/xe_mmio.c @@ -75,12 +75,12 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size) * is fine as it's going to the root tile's mmio, that's * guaranteed to be initialized earlier in xe_mmio_probe_early() */ - mtcfg = xe_mmio_read64_2x32(mmio, XEHP_MTCFG_ADDR); + mtcfg = xe_mmio_read32(mmio, XEHP_MTCFG_ADDR); tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1; if (tile_count < xe->info.tile_count) { drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n", - xe->info.tile_count, tile_count); + xe->info.tile_count, tile_count); xe->info.tile_count = tile_count; /* @@ -128,7 +128,7 @@ int xe_mmio_probe_early(struct xe_device *xe) */ xe->mmio.size = pci_resource_len(pdev, GTTMMADR_BAR); xe->mmio.regs = pci_iomap(pdev, GTTMMADR_BAR, 0); - if (xe->mmio.regs == NULL) { + if (!xe->mmio.regs) { drm_err(&xe->drm, "failed to map registers\n"); return -EIO; } @@ -309,8 +309,8 @@ u64 xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg) return (u64)udw << 32 | ldw; } -static int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, - u32 *out_val, bool atomic, bool expect_match) +static int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, + u32 timeout_us, u32 *out_val, bool atomic, bool expect_match) { ktime_t cur = ktime_get_raw(); const ktime_t end = ktime_add_us(cur, timeout_us); diff --git a/drivers/gpu/drm/xe/xe_mocs.c b/drivers/gpu/drm/xe/xe_mocs.c index 31dade91a089..0c737413fcb6 100644 --- a/drivers/gpu/drm/xe/xe_mocs.c +++ b/drivers/gpu/drm/xe/xe_mocs.c @@ -775,22 +775,23 @@ void xe_mocs_init(struct xe_gt *gt) void xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p) { struct xe_device *xe = gt_to_xe(gt); + enum xe_force_wake_domains domain; struct xe_mocs_info table; unsigned int fw_ref, flags; flags = get_mocs_settings(xe, &table); + domain = flags & HAS_LNCF_MOCS ? XE_FORCEWAKE_ALL : XE_FW_GT; xe_pm_runtime_get_noresume(xe); - fw_ref = xe_force_wake_get(gt_to_fw(gt), - flags & HAS_LNCF_MOCS ? - XE_FORCEWAKE_ALL : XE_FW_GT); - if (!fw_ref) + fw_ref = xe_force_wake_get(gt_to_fw(gt), domain); + + if (!xe_force_wake_ref_has_domain(fw_ref, domain)) goto err_fw; table.ops->dump(&table, flags, gt, p); - xe_force_wake_put(gt_to_fw(gt), fw_ref); err_fw: + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_pm_runtime_put(xe); } diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c index 9f4632e39a1a..e861c694f336 100644 --- a/drivers/gpu/drm/xe/xe_module.c +++ b/drivers/gpu/drm/xe/xe_module.c @@ -29,9 +29,6 @@ struct xe_modparam xe_modparam = { module_param_named(svm_notifier_size, xe_modparam.svm_notifier_size, uint, 0600); MODULE_PARM_DESC(svm_notifier_size, "Set the svm notifier size(in MiB), must be power of 2"); -module_param_named(always_migrate_to_vram, xe_modparam.always_migrate_to_vram, bool, 0444); -MODULE_PARM_DESC(always_migrate_to_vram, "Always migrate to VRAM on GPU fault"); - module_param_named_unsafe(force_execlist, xe_modparam.force_execlist, bool, 0444); MODULE_PARM_DESC(force_execlist, "Force Execlist submission"); diff --git a/drivers/gpu/drm/xe/xe_module.h b/drivers/gpu/drm/xe/xe_module.h index 84339e509c80..5a3bfea8b7b4 100644 --- a/drivers/gpu/drm/xe/xe_module.h +++ b/drivers/gpu/drm/xe/xe_module.h @@ -12,7 +12,6 @@ struct xe_modparam { bool force_execlist; bool probe_display; - bool always_migrate_to_vram; u32 force_vram_bar_size; int guc_log_level; char *guc_firmware_path; diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index 818f023166d5..f4d108dc49b1 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -140,6 +140,7 @@ static const struct xe_graphics_desc graphics_xelpg = { .has_indirect_ring_state = 1, \ .has_range_tlb_invalidation = 1, \ .has_usm = 1, \ + .has_64bit_timestamp = 1, \ .va_bits = 48, \ .vm_max_level = 4, \ .hw_engine_mask = \ @@ -668,6 +669,7 @@ static int xe_info_init(struct xe_device *xe, xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation; xe->info.has_usm = graphics_desc->has_usm; + xe->info.has_64bit_timestamp = graphics_desc->has_64bit_timestamp; for_each_remote_tile(tile, xe, id) { int err; diff --git a/drivers/gpu/drm/xe/xe_pci_types.h b/drivers/gpu/drm/xe/xe_pci_types.h index e9b9bbc138d3..ca6b10d35573 100644 --- a/drivers/gpu/drm/xe/xe_pci_types.h +++ b/drivers/gpu/drm/xe/xe_pci_types.h @@ -21,6 +21,7 @@ struct xe_graphics_desc { u8 has_indirect_ring_state:1; u8 has_range_tlb_invalidation:1; u8 has_usm:1; + u8 has_64bit_timestamp:1; }; struct xe_media_desc { diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index ffaf0d02dc7d..856038553b81 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -2232,11 +2232,19 @@ static void op_commit(struct xe_vm *vm, } case DRM_GPUVA_OP_DRIVER: { + /* WRITE_ONCE pairs with READ_ONCE in xe_svm.c */ + if (op->subop == XE_VMA_SUBOP_MAP_RANGE) { - op->map_range.range->tile_present |= BIT(tile->id); - op->map_range.range->tile_invalidated &= ~BIT(tile->id); + WRITE_ONCE(op->map_range.range->tile_present, + op->map_range.range->tile_present | + BIT(tile->id)); + WRITE_ONCE(op->map_range.range->tile_invalidated, + op->map_range.range->tile_invalidated & + ~BIT(tile->id)); } else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) { - op->unmap_range.range->tile_present &= ~BIT(tile->id); + WRITE_ONCE(op->unmap_range.range->tile_present, + op->unmap_range.range->tile_present & + ~BIT(tile->id)); } break; } diff --git a/drivers/gpu/drm/xe/xe_pxp_debugfs.c b/drivers/gpu/drm/xe/xe_pxp_debugfs.c index ccfbacf08efc..525a2f6bb076 100644 --- a/drivers/gpu/drm/xe/xe_pxp_debugfs.c +++ b/drivers/gpu/drm/xe/xe_pxp_debugfs.c @@ -66,9 +66,18 @@ static int pxp_terminate(struct seq_file *m, void *data) { struct xe_pxp *pxp = node_to_pxp(m->private); struct drm_printer p = drm_seq_file_printer(m); + int ready = xe_pxp_get_readiness_status(pxp); - if (!xe_pxp_is_enabled(pxp)) - return -ENODEV; + if (ready < 0) + return ready; /* disabled or error occurred */ + else if (!ready) + return -EBUSY; /* init still in progress */ + + /* no need for a termination if PXP is not active */ + if (pxp->status != XE_PXP_ACTIVE) { + drm_printf(&p, "PXP not active\n"); + return 0; + } /* simulate a termination interrupt */ spin_lock_irq(&pxp->xe->irq.lock); diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c index 917fc16de866..bc1689db4cd7 100644 --- a/drivers/gpu/drm/xe/xe_ring_ops.c +++ b/drivers/gpu/drm/xe/xe_ring_ops.c @@ -137,7 +137,8 @@ emit_pipe_control(u32 *dw, int i, u32 bit_group_0, u32 bit_group_1, u32 offset, static int emit_pipe_invalidate(u32 mask_flags, bool invalidate_tlb, u32 *dw, int i) { - u32 flags = PIPE_CONTROL_CS_STALL | + u32 flags0 = 0; + u32 flags1 = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_COMMAND_CACHE_INVALIDATE | PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | @@ -148,11 +149,15 @@ static int emit_pipe_invalidate(u32 mask_flags, bool invalidate_tlb, u32 *dw, PIPE_CONTROL_STORE_DATA_INDEX; if (invalidate_tlb) - flags |= PIPE_CONTROL_TLB_INVALIDATE; + flags1 |= PIPE_CONTROL_TLB_INVALIDATE; - flags &= ~mask_flags; + flags1 &= ~mask_flags; - return emit_pipe_control(dw, i, 0, flags, LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR, 0); + if (flags1 & PIPE_CONTROL_VF_CACHE_INVALIDATE) + flags0 |= PIPE_CONTROL0_L3_READ_ONLY_CACHE_INVALIDATE; + + return emit_pipe_control(dw, i, flags0, flags1, + LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR, 0); } static int emit_store_imm_ppgtt_posted(u64 addr, u64 value, @@ -229,13 +234,10 @@ static u32 get_ppgtt_flag(struct xe_sched_job *job) static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i) { - dw[i++] = MI_COPY_MEM_MEM | MI_COPY_MEM_MEM_SRC_GGTT | - MI_COPY_MEM_MEM_DST_GGTT; + dw[i++] = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT | MI_SRM_ADD_CS_OFFSET; + dw[i++] = RING_CTX_TIMESTAMP(0).addr; dw[i++] = xe_lrc_ctx_job_timestamp_ggtt_addr(lrc); dw[i++] = 0; - dw[i++] = xe_lrc_ctx_timestamp_ggtt_addr(lrc); - dw[i++] = 0; - dw[i++] = MI_NOOP; return i; } diff --git a/drivers/gpu/drm/xe/xe_shrinker.c b/drivers/gpu/drm/xe/xe_shrinker.c index 8184390f9c7b..86d47aaf0358 100644 --- a/drivers/gpu/drm/xe/xe_shrinker.c +++ b/drivers/gpu/drm/xe/xe_shrinker.c @@ -227,7 +227,7 @@ struct xe_shrinker *xe_shrinker_create(struct xe_device *xe) if (!shrinker) return ERR_PTR(-ENOMEM); - shrinker->shrink = shrinker_alloc(0, "xe system shrinker"); + shrinker->shrink = shrinker_alloc(0, "drm-xe_gem:%s", xe->drm.unique); if (!shrinker->shrink) { kfree(shrinker); return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c index 3e829c87d7b4..975094c1a582 100644 --- a/drivers/gpu/drm/xe/xe_svm.c +++ b/drivers/gpu/drm/xe/xe_svm.c @@ -15,8 +15,17 @@ static bool xe_svm_range_in_vram(struct xe_svm_range *range) { - /* Not reliable without notifier lock */ - return range->base.flags.has_devmem_pages; + /* + * Advisory only check whether the range is currently backed by VRAM + * memory. + */ + + struct drm_gpusvm_range_flags flags = { + /* Pairs with WRITE_ONCE in drm_gpusvm.c */ + .__flags = READ_ONCE(range->base.flags.__flags), + }; + + return flags.has_devmem_pages; } static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range) @@ -79,7 +88,7 @@ xe_svm_range_alloc(struct drm_gpusvm *gpusvm) range = kzalloc(sizeof(*range), GFP_KERNEL); if (!range) - return ERR_PTR(-ENOMEM); + return NULL; INIT_LIST_HEAD(&range->garbage_collector_link); xe_vm_get(gpusvm_to_vm(gpusvm)); @@ -645,9 +654,16 @@ void xe_svm_fini(struct xe_vm *vm) } static bool xe_svm_range_is_valid(struct xe_svm_range *range, - struct xe_tile *tile) + struct xe_tile *tile, + bool devmem_only) { - return (range->tile_present & ~range->tile_invalidated) & BIT(tile->id); + /* + * Advisory only check whether the range currently has a valid mapping, + * READ_ONCE pairs with WRITE_ONCE in xe_pt.c + */ + return ((READ_ONCE(range->tile_present) & + ~READ_ONCE(range->tile_invalidated)) & BIT(tile->id)) && + (!devmem_only || xe_svm_range_in_vram(range)); } static struct xe_vram_region *tile_to_vr(struct xe_tile *tile) @@ -696,11 +712,14 @@ retry: list_for_each_entry(block, blocks, link) block->private = vr; + xe_bo_get(bo); err = drm_gpusvm_migrate_to_devmem(&vm->svm.gpusvm, &range->base, &bo->devmem_allocation, ctx); - xe_bo_unlock(bo); if (err) - xe_bo_put(bo); /* Creation ref */ + xe_svm_devmem_release(&bo->devmem_allocation); + + xe_bo_unlock(bo); + xe_bo_put(bo); unlock: mmap_read_unlock(mm); @@ -709,6 +728,36 @@ unlock: return err; } +static bool supports_4K_migration(struct xe_device *xe) +{ + if (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) + return false; + + return true; +} + +static bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, + struct xe_vma *vma) +{ + struct xe_vm *vm = range_to_vm(&range->base); + u64 range_size = xe_svm_range_size(range); + + if (!range->base.flags.migrate_devmem) + return false; + + if (xe_svm_range_in_vram(range)) { + drm_dbg(&vm->xe->drm, "Range is already in VRAM\n"); + return false; + } + + if (range_size <= SZ_64K && !supports_4K_migration(vm->xe)) { + drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n"); + return false; + } + + return true; +} + /** * xe_svm_handle_pagefault() - SVM handle page fault * @vm: The VM. @@ -732,11 +781,16 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma, IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR), .check_pages_threshold = IS_DGFX(vm->xe) && IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? SZ_64K : 0, + .devmem_only = atomic && IS_DGFX(vm->xe) && + IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR), + .timeslice_ms = atomic && IS_DGFX(vm->xe) && + IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? 5 : 0, }; struct xe_svm_range *range; struct drm_gpusvm_range *r; struct drm_exec exec; struct dma_fence *fence; + int migrate_try_count = ctx.devmem_only ? 3 : 1; ktime_t end = 0; int err; @@ -755,24 +809,31 @@ retry: if (IS_ERR(r)) return PTR_ERR(r); + if (ctx.devmem_only && !r->flags.migrate_devmem) + return -EACCES; + range = to_xe_range(r); - if (xe_svm_range_is_valid(range, tile)) + if (xe_svm_range_is_valid(range, tile, ctx.devmem_only)) return 0; range_debug(range, "PAGE FAULT"); - /* XXX: Add migration policy, for now migrate range once */ - if (!range->skip_migrate && range->base.flags.migrate_devmem && - xe_svm_range_size(range) >= SZ_64K) { - range->skip_migrate = true; - + if (--migrate_try_count >= 0 && + xe_svm_range_needs_migrate_to_vram(range, vma)) { err = xe_svm_alloc_vram(vm, tile, range, &ctx); + ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */ if (err) { - drm_dbg(&vm->xe->drm, - "VRAM allocation failed, falling back to " - "retrying fault, asid=%u, errno=%pe\n", - vm->usm.asid, ERR_PTR(err)); - goto retry; + if (migrate_try_count || !ctx.devmem_only) { + drm_dbg(&vm->xe->drm, + "VRAM allocation failed, falling back to retrying fault, asid=%u, errno=%pe\n", + vm->usm.asid, ERR_PTR(err)); + goto retry; + } else { + drm_err(&vm->xe->drm, + "VRAM allocation failed, retry count exceeded, asid=%u, errno=%pe\n", + vm->usm.asid, ERR_PTR(err)); + return err; + } } } @@ -780,15 +841,23 @@ retry: err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, r, &ctx); /* Corner where CPU mappings have changed */ if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) { - if (err == -EOPNOTSUPP) { - range_debug(range, "PAGE FAULT - EVICT PAGES"); - drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base); + ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */ + if (migrate_try_count > 0 || !ctx.devmem_only) { + if (err == -EOPNOTSUPP) { + range_debug(range, "PAGE FAULT - EVICT PAGES"); + drm_gpusvm_range_evict(&vm->svm.gpusvm, + &range->base); + } + drm_dbg(&vm->xe->drm, + "Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n", + vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err)); + range_debug(range, "PAGE FAULT - RETRY PAGES"); + goto retry; + } else { + drm_err(&vm->xe->drm, + "Get pages failed, retry count exceeded, asid=%u, gpusvm=%p, errno=%pe\n", + vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err)); } - drm_dbg(&vm->xe->drm, - "Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n", - vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err)); - range_debug(range, "PAGE FAULT - RETRY PAGES"); - goto retry; } if (err) { range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT"); @@ -812,6 +881,7 @@ retry_bind: drm_exec_fini(&exec); err = PTR_ERR(fence); if (err == -EAGAIN) { + ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */ range_debug(range, "PAGE FAULT - RETRY BIND"); goto retry; } @@ -822,9 +892,6 @@ retry_bind: } drm_exec_fini(&exec); - if (xe_modparam.always_migrate_to_vram) - range->skip_migrate = false; - dma_fence_wait(fence, false); dma_fence_put(fence); @@ -944,3 +1011,15 @@ int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr) return 0; } #endif + +/** + * xe_svm_flush() - SVM flush + * @vm: The VM. + * + * Flush all SVM actions. + */ +void xe_svm_flush(struct xe_vm *vm) +{ + if (xe_vm_in_fault_mode(vm)) + flush_work(&vm->svm.garbage_collector.work); +} diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h index e059590e5076..fe58ac2f4baa 100644 --- a/drivers/gpu/drm/xe/xe_svm.h +++ b/drivers/gpu/drm/xe/xe_svm.h @@ -36,11 +36,6 @@ struct xe_svm_range { * range. Protected by GPU SVM notifier lock. */ u8 tile_invalidated; - /** - * @skip_migrate: Skip migration to VRAM, protected by GPU fault handler - * locking. - */ - u8 skip_migrate :1; }; #if IS_ENABLED(CONFIG_DRM_GPUSVM) @@ -72,6 +67,9 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end); int xe_svm_bo_evict(struct xe_bo *bo); void xe_svm_range_debug(struct xe_svm_range *range, const char *operation); + +void xe_svm_flush(struct xe_vm *vm); + #else static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range) { @@ -124,6 +122,11 @@ static inline void xe_svm_range_debug(struct xe_svm_range *range, const char *operation) { } + +static inline void xe_svm_flush(struct xe_vm *vm) +{ +} + #endif /** diff --git a/drivers/gpu/drm/xe/xe_trace_lrc.h b/drivers/gpu/drm/xe/xe_trace_lrc.h index 5c669a0b2180..d525cbee1e34 100644 --- a/drivers/gpu/drm/xe/xe_trace_lrc.h +++ b/drivers/gpu/drm/xe/xe_trace_lrc.h @@ -19,12 +19,12 @@ #define __dev_name_lrc(lrc) dev_name(gt_to_xe((lrc)->fence_ctx.gt)->drm.dev) TRACE_EVENT(xe_lrc_update_timestamp, - TP_PROTO(struct xe_lrc *lrc, uint32_t old), + TP_PROTO(struct xe_lrc *lrc, uint64_t old), TP_ARGS(lrc, old), TP_STRUCT__entry( __field(struct xe_lrc *, lrc) - __field(u32, old) - __field(u32, new) + __field(u64, old) + __field(u64, new) __string(name, lrc->fence_ctx.name) __string(device_id, __dev_name_lrc(lrc)) ), @@ -36,7 +36,7 @@ TRACE_EVENT(xe_lrc_update_timestamp, __assign_str(name); __assign_str(device_id); ), - TP_printk("lrc=:%p lrc->name=%s old=%u new=%u device_id:%s", + TP_printk("lrc=:%p lrc->name=%s old=%llu new=%llu device_id:%s", __entry->lrc, __get_str(name), __entry->old, __entry->new, __get_str(device_id)) diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c index c14bd2282044..3a8751a8b92d 100644 --- a/drivers/gpu/drm/xe/xe_uc.c +++ b/drivers/gpu/drm/xe/xe_uc.c @@ -244,7 +244,7 @@ void xe_uc_gucrc_disable(struct xe_uc *uc) void xe_uc_stop_prepare(struct xe_uc *uc) { - xe_gsc_wait_for_worker_completion(&uc->gsc); + xe_gsc_stop_prepare(&uc->gsc); xe_guc_stop_prepare(&uc->guc); } @@ -278,6 +278,12 @@ again: goto again; } +void xe_uc_suspend_prepare(struct xe_uc *uc) +{ + xe_gsc_wait_for_worker_completion(&uc->gsc); + xe_guc_stop_prepare(&uc->guc); +} + int xe_uc_suspend(struct xe_uc *uc) { /* GuC submission not enabled, nothing to do */ diff --git a/drivers/gpu/drm/xe/xe_uc.h b/drivers/gpu/drm/xe/xe_uc.h index 3813c1ede450..c23e6f5e2514 100644 --- a/drivers/gpu/drm/xe/xe_uc.h +++ b/drivers/gpu/drm/xe/xe_uc.h @@ -18,6 +18,7 @@ int xe_uc_reset_prepare(struct xe_uc *uc); void xe_uc_stop_prepare(struct xe_uc *uc); void xe_uc_stop(struct xe_uc *uc); int xe_uc_start(struct xe_uc *uc); +void xe_uc_suspend_prepare(struct xe_uc *uc); int xe_uc_suspend(struct xe_uc *uc); int xe_uc_sanitize_reset(struct xe_uc *uc); void xe_uc_declare_wedged(struct xe_uc *uc); diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 60303998bd61..367c84b90e9e 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -3312,8 +3312,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) } /* Ensure all UNMAPs visible */ - if (xe_vm_in_fault_mode(vm)) - flush_work(&vm->svm.garbage_collector.work); + xe_svm_flush(vm); err = down_write_killable(&vm->lock); if (err) diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c index 24f644c0a673..2f833f0d575f 100644 --- a/drivers/gpu/drm/xe/xe_wa.c +++ b/drivers/gpu/drm/xe/xe_wa.c @@ -815,6 +815,10 @@ static const struct xe_rtp_entry_sr lrc_was[] = { XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)), XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX)) }, + { XE_RTP_NAME("22021007897"), + XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)), + XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN4, SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE)) + }, /* Xe3_LPG */ { XE_RTP_NAME("14021490052"), diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules index 0c738af24f7c..9b9e176992a8 100644 --- a/drivers/gpu/drm/xe/xe_wa_oob.rules +++ b/drivers/gpu/drm/xe/xe_wa_oob.rules @@ -32,8 +32,10 @@ GRAPHICS_VERSION(3001) 14022293748 GRAPHICS_VERSION(2001) GRAPHICS_VERSION(2004) + GRAPHICS_VERSION_RANGE(3000, 3001) 22019794406 GRAPHICS_VERSION(2001) GRAPHICS_VERSION(2004) + GRAPHICS_VERSION_RANGE(3000, 3001) 22019338487 MEDIA_VERSION(2000) GRAPHICS_VERSION(2001) MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_not_sriov_vf) diff --git a/drivers/gpu/nova-core/gpu.rs b/drivers/gpu/nova-core/gpu.rs index 17c9660da450..ab0e5a72a059 100644 --- a/drivers/gpu/nova-core/gpu.rs +++ b/drivers/gpu/nova-core/gpu.rs @@ -93,7 +93,7 @@ impl Chipset { // For now, redirect to fmt::Debug for convenience. impl fmt::Display for Chipset { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) + write!(f, "{self:?}") } } diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c index 25f0ebfcbd5f..0a9b44ce4904 100644 --- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c +++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c @@ -83,6 +83,9 @@ static int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata) case ALS_IDX: privdata->dev_en.is_als_present = false; break; + case SRA_IDX: + privdata->dev_en.is_sra_present = false; + break; } if (cl_data->sensor_sts[i] == SENSOR_ENABLED) { @@ -134,9 +137,6 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata) for (i = 0; i < cl_data->num_hid_devices; i++) { cl_data->sensor_sts[i] = SENSOR_DISABLED; - if (cl_data->num_hid_devices == 1 && cl_data->sensor_idx[0] == SRA_IDX) - break; - if (cl_data->sensor_idx[i] == SRA_IDX) { info.sensor_idx = cl_data->sensor_idx[i]; writel(0, privdata->mmio + amd_get_p2c_val(privdata, 0)); @@ -145,8 +145,10 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata) (privdata, cl_data->sensor_idx[i], ENABLE_SENSOR); cl_data->sensor_sts[i] = (status == 0) ? SENSOR_ENABLED : SENSOR_DISABLED; - if (cl_data->sensor_sts[i] == SENSOR_ENABLED) + if (cl_data->sensor_sts[i] == SENSOR_ENABLED) { + cl_data->is_any_sensor_enabled = true; privdata->dev_en.is_sra_present = true; + } continue; } @@ -238,6 +240,8 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata) cleanup: amd_sfh_hid_client_deinit(privdata); for (i = 0; i < cl_data->num_hid_devices; i++) { + if (cl_data->sensor_idx[i] == SRA_IDX) + continue; devm_kfree(dev, cl_data->feature_report[i]); devm_kfree(dev, in_data->input_report[i]); devm_kfree(dev, cl_data->report_descr[i]); diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c index 2e96ec6a3073..9a06f9b0e4ef 100644 --- a/drivers/hid/bpf/hid_bpf_dispatch.c +++ b/drivers/hid/bpf/hid_bpf_dispatch.c @@ -38,6 +38,9 @@ dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type struct hid_bpf_ops *e; int ret; + if (unlikely(hdev->bpf.destroyed)) + return ERR_PTR(-ENODEV); + if (type >= HID_REPORT_TYPES) return ERR_PTR(-EINVAL); @@ -93,6 +96,9 @@ int dispatch_hid_bpf_raw_requests(struct hid_device *hdev, struct hid_bpf_ops *e; int ret, idx; + if (unlikely(hdev->bpf.destroyed)) + return -ENODEV; + if (rtype >= HID_REPORT_TYPES) return -EINVAL; @@ -130,6 +136,9 @@ int dispatch_hid_bpf_output_report(struct hid_device *hdev, struct hid_bpf_ops *e; int ret, idx; + if (unlikely(hdev->bpf.destroyed)) + return -ENODEV; + idx = srcu_read_lock(&hdev->bpf.srcu); list_for_each_entry_srcu(e, &hdev->bpf.prog_list, list, srcu_read_lock_held(&hdev->bpf.srcu)) { diff --git a/drivers/hid/bpf/progs/XPPen__ACK05.bpf.c b/drivers/hid/bpf/progs/XPPen__ACK05.bpf.c index 1a0aeea6a081..a754710fc90b 100644 --- a/drivers/hid/bpf/progs/XPPen__ACK05.bpf.c +++ b/drivers/hid/bpf/progs/XPPen__ACK05.bpf.c @@ -157,6 +157,7 @@ static const __u8 fixed_rdesc_vendor[] = { ReportCount(5) // padding Input(Const) // Byte 4 in report - just exists so we get to be a tablet pad + UsagePage_Digitizers Usage_Dig_BarrelSwitch // BTN_STYLUS ReportCount(1) ReportSize(1) diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 288a2b864cc4..1062731315a2 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -41,6 +41,10 @@ #define USB_VENDOR_ID_ACTIONSTAR 0x2101 #define USB_DEVICE_ID_ACTIONSTAR_1011 0x1011 +#define USB_VENDOR_ID_ADATA_XPG 0x125f +#define USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE 0x7505 +#define USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE_DONGLE 0x7506 + #define USB_VENDOR_ID_ADS_TECH 0x06e1 #define USB_DEVICE_ID_ADS_TECH_RADIO_SI470X 0xa155 diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 646171598e41..0731473cc9b1 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -27,6 +27,8 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_GAMEPAD), HID_QUIRK_BADPAD }, { HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR), HID_QUIRK_BADPAD }, + { HID_USB_DEVICE(USB_VENDOR_ID_ADATA_XPG, USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_ADATA_XPG, USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE_DONGLE), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016), HID_QUIRK_FULLSPEED_INTERVAL }, { HID_USB_DEVICE(USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX), HID_QUIRK_NO_INIT_REPORTS }, diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c index dfd9d22ed559..949d307c66a8 100644 --- a/drivers/hid/hid-steam.c +++ b/drivers/hid/hid-steam.c @@ -1150,11 +1150,9 @@ static void steam_client_ll_close(struct hid_device *hdev) struct steam_device *steam = hdev->driver_data; unsigned long flags; - bool connected; spin_lock_irqsave(&steam->lock, flags); steam->client_opened--; - connected = steam->connected && !steam->client_opened; spin_unlock_irqrestore(&steam->lock, flags); schedule_work(&steam->unregister_work); diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c index 3b81468a1df2..0bf70664c35e 100644 --- a/drivers/hid/hid-thrustmaster.c +++ b/drivers/hid/hid-thrustmaster.c @@ -174,6 +174,7 @@ static void thrustmaster_interrupts(struct hid_device *hdev) u8 ep_addr[2] = {b_ep, 0}; if (!usb_check_int_endpoints(usbif, ep_addr)) { + kfree(send_buf); hid_err(hdev, "Unexpected non-int endpoint\n"); return; } diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c index a367df6ea01f..61a4019ddc74 100644 --- a/drivers/hid/hid-uclogic-core.c +++ b/drivers/hid/hid-uclogic-core.c @@ -142,11 +142,12 @@ static int uclogic_input_configured(struct hid_device *hdev, suffix = "System Control"; break; } - } - - if (suffix) + } else { hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s %s", hdev->name, suffix); + if (!hi->input->name) + return -ENOMEM; + } return 0; } diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 1556d4287fa5..eaf099b2efdb 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -70,10 +70,16 @@ static void wacom_wac_queue_flush(struct hid_device *hdev, { while (!kfifo_is_empty(fifo)) { int size = kfifo_peek_len(fifo); - u8 *buf = kzalloc(size, GFP_KERNEL); + u8 *buf; unsigned int count; int err; + buf = kzalloc(size, GFP_KERNEL); + if (!buf) { + kfifo_skip(fifo); + continue; + } + count = kfifo_out(fifo, buf, size); if (count != size) { // Hard to say what is the "right" action in this @@ -81,6 +87,7 @@ static void wacom_wac_queue_flush(struct hid_device *hdev, // to flush seems reasonable enough, however. hid_warn(hdev, "%s: removed fifo entry with unexpected size\n", __func__); + kfree(buf); continue; } err = hid_report_raw_event(hdev, HID_INPUT_REPORT, buf, size, false); @@ -2361,6 +2368,8 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless) unsigned int connect_mask = HID_CONNECT_HIDRAW; features->pktlen = wacom_compute_pktlen(hdev); + if (!features->pktlen) + return -ENODEV; if (!devres_open_group(&hdev->dev, wacom, GFP_KERNEL)) return -ENOMEM; diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index fb8cd8469328..35f26fa1ffe7 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -1077,68 +1077,10 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer, EXPORT_SYMBOL(vmbus_sendpacket); /* - * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer - * packets using a GPADL Direct packet type. This interface allows you - * to control notifying the host. This will be useful for sending - * batched data. Also the sender can control the send flags - * explicitly. - */ -int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, - struct hv_page_buffer pagebuffers[], - u32 pagecount, void *buffer, u32 bufferlen, - u64 requestid) -{ - int i; - struct vmbus_channel_packet_page_buffer desc; - u32 descsize; - u32 packetlen; - u32 packetlen_aligned; - struct kvec bufferlist[3]; - u64 aligned_data = 0; - - if (pagecount > MAX_PAGE_BUFFER_COUNT) - return -EINVAL; - - /* - * Adjust the size down since vmbus_channel_packet_page_buffer is the - * largest size we support - */ - descsize = sizeof(struct vmbus_channel_packet_page_buffer) - - ((MAX_PAGE_BUFFER_COUNT - pagecount) * - sizeof(struct hv_page_buffer)); - packetlen = descsize + bufferlen; - packetlen_aligned = ALIGN(packetlen, sizeof(u64)); - - /* Setup the descriptor */ - desc.type = VM_PKT_DATA_USING_GPA_DIRECT; - desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; - desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */ - desc.length8 = (u16)(packetlen_aligned >> 3); - desc.transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */ - desc.reserved = 0; - desc.rangecount = pagecount; - - for (i = 0; i < pagecount; i++) { - desc.range[i].len = pagebuffers[i].len; - desc.range[i].offset = pagebuffers[i].offset; - desc.range[i].pfn = pagebuffers[i].pfn; - } - - bufferlist[0].iov_base = &desc; - bufferlist[0].iov_len = descsize; - bufferlist[1].iov_base = buffer; - bufferlist[1].iov_len = bufferlen; - bufferlist[2].iov_base = &aligned_data; - bufferlist[2].iov_len = (packetlen_aligned - packetlen); - - return hv_ringbuffer_write(channel, bufferlist, 3, requestid, NULL); -} -EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer); - -/* - * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet + * vmbus_sendpacket_mpb_desc - Send one or more multi-page buffer packets * using a GPADL Direct packet type. - * The buffer includes the vmbus descriptor. + * The desc argument must include space for the VMBus descriptor. The + * rangecount field must already be set. */ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, struct vmbus_packet_mpb_array *desc, @@ -1160,7 +1102,6 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, desc->length8 = (u16)(packetlen_aligned >> 3); desc->transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */ desc->reserved = 0; - desc->rangecount = 1; bufferlist[0].iov_base = desc; bufferlist[0].iov_len = desc_size; diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c index b3b11be11650..59792e00cecf 100644 --- a/drivers/hv/hv_common.c +++ b/drivers/hv/hv_common.c @@ -307,7 +307,7 @@ void __init hv_get_partition_id(void) local_irq_save(flags); output = *this_cpu_ptr(hyperv_pcpu_input_arg); - status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, &output); + status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, output); pt_id = output->partition_id; local_irq_restore(flags); @@ -566,9 +566,11 @@ int hv_common_cpu_die(unsigned int cpu) * originally allocated memory is reused in hv_common_cpu_init(). */ - synic_eventring_tail = this_cpu_ptr(hv_synic_eventring_tail); - kfree(*synic_eventring_tail); - *synic_eventring_tail = NULL; + if (hv_root_partition()) { + synic_eventring_tail = this_cpu_ptr(hv_synic_eventring_tail); + kfree(*synic_eventring_tail); + *synic_eventring_tail = NULL; + } return 0; } diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 29780f3a7478..0b450e53161e 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -477,4 +477,10 @@ static inline int hv_debug_add_dev_dir(struct hv_device *dev) #endif /* CONFIG_HYPERV_TESTING */ +/* Create and remove sysfs entry for memory mapped ring buffers for a channel */ +int hv_create_ring_sysfs(struct vmbus_channel *channel, + int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel, + struct vm_area_struct *vma)); +int hv_remove_ring_sysfs(struct vmbus_channel *channel); + #endif /* _HYPERV_VMBUS_H */ diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 8d3cff42bdbb..e3d51a316316 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -1802,6 +1802,26 @@ static ssize_t subchannel_id_show(struct vmbus_channel *channel, } static VMBUS_CHAN_ATTR_RO(subchannel_id); +static int hv_mmap_ring_buffer_wrapper(struct file *filp, struct kobject *kobj, + const struct bin_attribute *attr, + struct vm_area_struct *vma) +{ + struct vmbus_channel *channel = container_of(kobj, struct vmbus_channel, kobj); + + /* + * hv_(create|remove)_ring_sysfs implementation ensures that mmap_ring_buffer + * is not NULL. + */ + return channel->mmap_ring_buffer(channel, vma); +} + +static struct bin_attribute chan_attr_ring_buffer = { + .attr = { + .name = "ring", + .mode = 0600, + }, + .mmap = hv_mmap_ring_buffer_wrapper, +}; static struct attribute *vmbus_chan_attrs[] = { &chan_attr_out_mask.attr, &chan_attr_in_mask.attr, @@ -1821,6 +1841,11 @@ static struct attribute *vmbus_chan_attrs[] = { NULL }; +static struct bin_attribute *vmbus_chan_bin_attrs[] = { + &chan_attr_ring_buffer, + NULL +}; + /* * Channel-level attribute_group callback function. Returns the permission for * each attribute, and returns 0 if an attribute is not visible. @@ -1841,9 +1866,34 @@ static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj, return attr->mode; } +static umode_t vmbus_chan_bin_attr_is_visible(struct kobject *kobj, + const struct bin_attribute *attr, int idx) +{ + const struct vmbus_channel *channel = + container_of(kobj, struct vmbus_channel, kobj); + + /* Hide ring attribute if channel's ring_sysfs_visible is set to false */ + if (attr == &chan_attr_ring_buffer && !channel->ring_sysfs_visible) + return 0; + + return attr->attr.mode; +} + +static size_t vmbus_chan_bin_size(struct kobject *kobj, + const struct bin_attribute *bin_attr, int a) +{ + const struct vmbus_channel *channel = + container_of(kobj, struct vmbus_channel, kobj); + + return channel->ringbuffer_pagecount << PAGE_SHIFT; +} + static const struct attribute_group vmbus_chan_group = { .attrs = vmbus_chan_attrs, - .is_visible = vmbus_chan_attr_is_visible + .bin_attrs = vmbus_chan_bin_attrs, + .is_visible = vmbus_chan_attr_is_visible, + .is_bin_visible = vmbus_chan_bin_attr_is_visible, + .bin_size = vmbus_chan_bin_size, }; static const struct kobj_type vmbus_chan_ktype = { @@ -1851,6 +1901,63 @@ static const struct kobj_type vmbus_chan_ktype = { .release = vmbus_chan_release, }; +/** + * hv_create_ring_sysfs() - create "ring" sysfs entry corresponding to ring buffers for a channel. + * @channel: Pointer to vmbus_channel structure + * @hv_mmap_ring_buffer: function pointer for initializing the function to be called on mmap of + * channel's "ring" sysfs node, which is for the ring buffer of that channel. + * Function pointer is of below type: + * int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel, + * struct vm_area_struct *vma)) + * This has a pointer to the channel and a pointer to vm_area_struct, + * used for mmap, as arguments. + * + * Sysfs node for ring buffer of a channel is created along with other fields, however its + * visibility is disabled by default. Sysfs creation needs to be controlled when the use-case + * is running. + * For example, HV_NIC device is used either by uio_hv_generic or hv_netvsc at any given point of + * time, and "ring" sysfs is needed only when uio_hv_generic is bound to that device. To avoid + * exposing the ring buffer by default, this function is reponsible to enable visibility of + * ring for userspace to use. + * Note: Race conditions can happen with userspace and it is not encouraged to create new + * use-cases for this. This was added to maintain backward compatibility, while solving + * one of the race conditions in uio_hv_generic while creating sysfs. + * + * Returns 0 on success or error code on failure. + */ +int hv_create_ring_sysfs(struct vmbus_channel *channel, + int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel, + struct vm_area_struct *vma)) +{ + struct kobject *kobj = &channel->kobj; + + channel->mmap_ring_buffer = hv_mmap_ring_buffer; + channel->ring_sysfs_visible = true; + + return sysfs_update_group(kobj, &vmbus_chan_group); +} +EXPORT_SYMBOL_GPL(hv_create_ring_sysfs); + +/** + * hv_remove_ring_sysfs() - remove ring sysfs entry corresponding to ring buffers for a channel. + * @channel: Pointer to vmbus_channel structure + * + * Hide "ring" sysfs for a channel by changing its is_visible attribute and updating sysfs group. + * + * Returns 0 on success or error code on failure. + */ +int hv_remove_ring_sysfs(struct vmbus_channel *channel) +{ + struct kobject *kobj = &channel->kobj; + int ret; + + channel->ring_sysfs_visible = false; + ret = sysfs_update_group(kobj, &vmbus_chan_group); + channel->mmap_ring_buffer = NULL; + return ret; +} +EXPORT_SYMBOL_GPL(hv_remove_ring_sysfs); + /* * vmbus_add_channel_kobj - setup a sub-directory under device/channels */ diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c index 9ed2c4b6734e..8ecebea53651 100644 --- a/drivers/hwmon/fam15h_power.c +++ b/drivers/hwmon/fam15h_power.c @@ -143,8 +143,8 @@ static void do_read_registers_on_cu(void *_data) */ cu = topology_core_id(smp_processor_id()); - rdmsrl_safe(MSR_F15H_CU_PWR_ACCUMULATOR, &data->cu_acc_power[cu]); - rdmsrl_safe(MSR_F15H_PTSC, &data->cpu_sw_pwr_ptsc[cu]); + rdmsrq_safe(MSR_F15H_CU_PWR_ACCUMULATOR, &data->cu_acc_power[cu]); + rdmsrq_safe(MSR_F15H_PTSC, &data->cpu_sw_pwr_ptsc[cu]); data->cu_on[cu] = 1; } @@ -424,7 +424,7 @@ static int fam15h_power_init_data(struct pci_dev *f4, */ data->cpu_pwr_sample_ratio = cpuid_ecx(0x80000007); - if (rdmsrl_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &tmp)) { + if (rdmsrq_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &tmp)) { pr_err("Failed to read max compute unit power accumulator MSR\n"); return -ENODEV; } diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c index 6d1175a51832..2df4956296ed 100644 --- a/drivers/hwmon/hwmon-vid.c +++ b/drivers/hwmon/hwmon-vid.c @@ -15,6 +15,10 @@ #include <linux/kernel.h> #include <linux/hwmon-vid.h> +#ifdef CONFIG_X86 +#include <asm/msr.h> +#endif + /* * Common code for decoding VID pins. * diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index 3685906cc57c..472bcf6092f6 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -20,7 +20,7 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/pci_ids.h> -#include <asm/amd_node.h> +#include <asm/amd/node.h> #include <asm/processor.h> MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor"); diff --git a/drivers/hwtracing/intel_th/Kconfig b/drivers/hwtracing/intel_th/Kconfig index 4b6359326ede..4f7d2b6d79e2 100644 --- a/drivers/hwtracing/intel_th/Kconfig +++ b/drivers/hwtracing/intel_th/Kconfig @@ -60,6 +60,7 @@ config INTEL_TH_STH config INTEL_TH_MSU tristate "Intel(R) Trace Hub Memory Storage Unit" + depends on MMU help Memory Storage Unit (MSU) trace output device enables storing STP traces to system memory. It supports single diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index bf99d79a4192..7163950eb371 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c @@ -19,6 +19,7 @@ #include <linux/io.h> #include <linux/workqueue.h> #include <linux/dma-mapping.h> +#include <linux/pfn_t.h> #ifdef CONFIG_X86 #include <asm/set_memory.h> @@ -976,7 +977,6 @@ static void msc_buffer_contig_free(struct msc *msc) for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) { struct page *page = virt_to_page(msc->base + off); - page->mapping = NULL; __free_page(page); } @@ -1158,9 +1158,6 @@ static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win) int i; for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) { - struct page *page = msc_sg_page(sg); - - page->mapping = NULL; dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, sg_virt(sg), sg_dma_address(sg)); } @@ -1601,22 +1598,10 @@ static void msc_mmap_close(struct vm_area_struct *vma) { struct msc_iter *iter = vma->vm_file->private_data; struct msc *msc = iter->msc; - unsigned long pg; if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex)) return; - /* drop page _refcounts */ - for (pg = 0; pg < msc->nr_pages; pg++) { - struct page *page = msc_buffer_get_page(msc, pg); - - if (WARN_ON_ONCE(!page)) - continue; - - if (page->mapping) - page->mapping = NULL; - } - /* last mapping -- drop user_count */ atomic_dec(&msc->user_count); mutex_unlock(&msc->buf_mutex); @@ -1626,16 +1611,14 @@ static vm_fault_t msc_mmap_fault(struct vm_fault *vmf) { struct msc_iter *iter = vmf->vma->vm_file->private_data; struct msc *msc = iter->msc; + struct page *page; - vmf->page = msc_buffer_get_page(msc, vmf->pgoff); - if (!vmf->page) + page = msc_buffer_get_page(msc, vmf->pgoff); + if (!page) return VM_FAULT_SIGBUS; - get_page(vmf->page); - vmf->page->mapping = vmf->vma->vm_file->f_mapping; - vmf->page->index = vmf->pgoff; - - return 0; + get_page(page); + return vmf_insert_mixed(vmf->vma, vmf->address, page_to_pfn_t(page)); } static const struct vm_operations_struct msc_mmap_ops = { @@ -1676,7 +1659,7 @@ out: atomic_dec(&msc->user_count); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - vm_flags_set(vma, VM_DONTEXPAND | VM_DONTCOPY); + vm_flags_set(vma, VM_DONTEXPAND | VM_DONTCOPY | VM_MIXEDMAP); vma->vm_ops = &msc_mmap_ops; return ret; } diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 83c88c79afe2..bbbd6240fa6e 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -200,7 +200,7 @@ config I2C_ISMT config I2C_PIIX4 tristate "Intel PIIX4 and compatible (ATI/AMD/Serverworks/Broadcom/SMSC)" - depends on PCI && HAS_IOPORT + depends on PCI && HAS_IOPORT && X86 select I2C_SMBUS help If you say yes to this option, support will be included for the Intel diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c index 43bf90d90eeb..208ce4f9e782 100644 --- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c +++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c @@ -247,6 +247,9 @@ static int ec_i2c_probe(struct platform_device *pdev) u32 remote_bus; int err; + if (!ec) + return dev_err_probe(dev, -EPROBE_DEFER, "couldn't find parent EC device\n"); + if (!ec->cmd_xfer) { dev_err(dev, "Missing sendrecv\n"); return -EINVAL; diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index 8e0267c7cc29..f21f9877c040 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -278,9 +278,11 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) { dev->slave = i2c_new_ccgx_ucsi(&dev->adapter, dev->irq, &dgpu_node); - if (IS_ERR(dev->slave)) + if (IS_ERR(dev->slave)) { + i2c_del_adapter(&dev->adapter); return dev_err_probe(device, PTR_ERR(dev->slave), "register UCSI failed\n"); + } } pm_runtime_set_autosuspend_delay(device, 1000); diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c index 0d4b3935e687..342d47e67586 100644 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c @@ -1380,9 +1380,9 @@ static int lpi2c_imx_probe(struct platform_device *pdev) return 0; rpm_disable: - pm_runtime_put(&pdev->dev); - pm_runtime_disable(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); return ret; } diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 16afb9ca19bb..876791d20ed5 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -1454,7 +1454,7 @@ omap_i2c_probe(struct platform_device *pdev) (1000 * omap->speed / 8); } - if (of_property_read_bool(node, "mux-states")) { + if (of_property_present(node, "mux-states")) { struct mux_state *mux_state; mux_state = devm_mux_state_get(&pdev->dev, NULL); diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index dd75916157f0..59ecaa990bce 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -34,6 +34,7 @@ #include <linux/dmi.h> #include <linux/acpi.h> #include <linux/io.h> +#include <asm/amd/fch.h> #include "i2c-piix4.h" @@ -80,12 +81,11 @@ #define SB800_PIIX4_PORT_IDX_MASK 0x06 #define SB800_PIIX4_PORT_IDX_SHIFT 1 -/* On kerncz and Hudson2, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */ -#define SB800_PIIX4_PORT_IDX_KERNCZ 0x02 -#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18 +/* SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */ +#define SB800_PIIX4_PORT_IDX_KERNCZ (FCH_PM_DECODEEN + 0x02) +#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ (FCH_PM_DECODEEN_SMBUS0SEL >> 16) #define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3 -#define SB800_PIIX4_FCH_PM_ADDR 0xFED80300 #define SB800_PIIX4_FCH_PM_SIZE 8 #define SB800_ASF_ACPI_PATH "\\_SB.ASFC" @@ -162,19 +162,19 @@ int piix4_sb800_region_request(struct device *dev, struct sb800_mmio_cfg *mmio_c if (mmio_cfg->use_mmio) { void __iomem *addr; - if (!request_mem_region_muxed(SB800_PIIX4_FCH_PM_ADDR, + if (!request_mem_region_muxed(FCH_PM_BASE, SB800_PIIX4_FCH_PM_SIZE, "sb800_piix4_smb")) { dev_err(dev, "SMBus base address memory region 0x%x already in use.\n", - SB800_PIIX4_FCH_PM_ADDR); + FCH_PM_BASE); return -EBUSY; } - addr = ioremap(SB800_PIIX4_FCH_PM_ADDR, + addr = ioremap(FCH_PM_BASE, SB800_PIIX4_FCH_PM_SIZE); if (!addr) { - release_mem_region(SB800_PIIX4_FCH_PM_ADDR, + release_mem_region(FCH_PM_BASE, SB800_PIIX4_FCH_PM_SIZE); dev_err(dev, "SMBus base address mapping failed.\n"); return -ENOMEM; @@ -201,7 +201,7 @@ void piix4_sb800_region_release(struct device *dev, struct sb800_mmio_cfg *mmio_ { if (mmio_cfg->use_mmio) { iounmap(mmio_cfg->addr); - release_mem_region(SB800_PIIX4_FCH_PM_ADDR, + release_mem_region(FCH_PM_BASE, SB800_PIIX4_FCH_PM_SIZE); return; } diff --git a/drivers/i2c/i2c-atr.c b/drivers/i2c/i2c-atr.c index 8fe9ddff8e96..783fb8df2ebe 100644 --- a/drivers/i2c/i2c-atr.c +++ b/drivers/i2c/i2c-atr.c @@ -8,12 +8,12 @@ * Originally based on i2c-mux.c */ -#include <linux/fwnode.h> #include <linux/i2c-atr.h> #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> +#include <linux/property.h> #include <linux/slab.h> #include <linux/spinlock.h> diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 976f5be54e36..433d858b7be1 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -51,11 +51,12 @@ #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/moduleparam.h> -#include <asm/cpuid.h> +#include <asm/cpuid/api.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> #include <asm/mwait.h> #include <asm/spec-ctrl.h> +#include <asm/msr.h> #include <asm/tsc.h> #include <asm/fpu/api.h> #include <asm/smp.h> @@ -1928,35 +1929,35 @@ static void __init bxt_idle_state_table_update(void) unsigned long long msr; unsigned int usec; - rdmsrl(MSR_PKGC6_IRTL, msr); + rdmsrq(MSR_PKGC6_IRTL, msr); usec = irtl_2_usec(msr); if (usec) { bxt_cstates[2].exit_latency = usec; bxt_cstates[2].target_residency = usec; } - rdmsrl(MSR_PKGC7_IRTL, msr); + rdmsrq(MSR_PKGC7_IRTL, msr); usec = irtl_2_usec(msr); if (usec) { bxt_cstates[3].exit_latency = usec; bxt_cstates[3].target_residency = usec; } - rdmsrl(MSR_PKGC8_IRTL, msr); + rdmsrq(MSR_PKGC8_IRTL, msr); usec = irtl_2_usec(msr); if (usec) { bxt_cstates[4].exit_latency = usec; bxt_cstates[4].target_residency = usec; } - rdmsrl(MSR_PKGC9_IRTL, msr); + rdmsrq(MSR_PKGC9_IRTL, msr); usec = irtl_2_usec(msr); if (usec) { bxt_cstates[5].exit_latency = usec; bxt_cstates[5].target_residency = usec; } - rdmsrl(MSR_PKGC10_IRTL, msr); + rdmsrq(MSR_PKGC10_IRTL, msr); usec = irtl_2_usec(msr); if (usec) { bxt_cstates[6].exit_latency = usec; @@ -1984,7 +1985,7 @@ static void __init sklh_idle_state_table_update(void) if ((mwait_substates & (0xF << 28)) == 0) return; - rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr); + rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr); /* PC10 is not enabled in PKG C-state limit */ if ((msr & 0xF) != 8) @@ -1996,7 +1997,7 @@ static void __init sklh_idle_state_table_update(void) /* if SGX is present */ if (ebx & (1 << 2)) { - rdmsrl(MSR_IA32_FEAT_CTL, msr); + rdmsrq(MSR_IA32_FEAT_CTL, msr); /* if SGX is enabled */ if (msr & (1 << 18)) @@ -2015,7 +2016,7 @@ static void __init skx_idle_state_table_update(void) { unsigned long long msr; - rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr); + rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr); /* * 000b: C0/C1 (no package C-state support) @@ -2068,7 +2069,7 @@ static void __init spr_idle_state_table_update(void) * C6. However, if PC6 is disabled, we update the numbers to match * core C6. */ - rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr); + rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr); /* Limit value 2 and above allow for PC6. */ if ((msr & 0x7) < 2) { @@ -2082,8 +2083,8 @@ static void __init spr_idle_state_table_update(void) */ static void __init byt_cht_auto_demotion_disable(void) { - wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0); - wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0); + wrmsrq(MSR_CC6_DEMOTION_POLICY_CONFIG, 0); + wrmsrq(MSR_MC6_DEMOTION_POLICY_CONFIG, 0); } static bool __init intel_idle_verify_cstate(unsigned int mwait_hint) @@ -2241,27 +2242,27 @@ static void auto_demotion_disable(void) { unsigned long long msr_bits; - rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); + rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); msr_bits &= ~auto_demotion_disable_flags; - wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); + wrmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); } static void c1e_promotion_enable(void) { unsigned long long msr_bits; - rdmsrl(MSR_IA32_POWER_CTL, msr_bits); + rdmsrq(MSR_IA32_POWER_CTL, msr_bits); msr_bits |= 0x2; - wrmsrl(MSR_IA32_POWER_CTL, msr_bits); + wrmsrq(MSR_IA32_POWER_CTL, msr_bits); } static void c1e_promotion_disable(void) { unsigned long long msr_bits; - rdmsrl(MSR_IA32_POWER_CTL, msr_bits); + rdmsrq(MSR_IA32_POWER_CTL, msr_bits); msr_bits &= ~0x2; - wrmsrl(MSR_IA32_POWER_CTL, msr_bits); + wrmsrq(MSR_IA32_POWER_CTL, msr_bits); } /** diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c index 8601b9a8b8e7..5127e58eebc7 100644 --- a/drivers/iio/accel/adis16201.c +++ b/drivers/iio/accel/adis16201.c @@ -211,9 +211,9 @@ static const struct iio_chan_spec adis16201_channels[] = { BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14), ADIS_AUX_ADC_CHAN(ADIS16201_AUX_ADC_REG, ADIS16201_SCAN_AUX_ADC, 0, 12), ADIS_INCLI_CHAN(X, ADIS16201_XINCL_OUT_REG, ADIS16201_SCAN_INCLI_X, - BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14), + BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 12), ADIS_INCLI_CHAN(Y, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y, - BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14), + BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 12), IIO_CHAN_SOFT_TIMESTAMP(7) }; diff --git a/drivers/iio/accel/adxl355_core.c b/drivers/iio/accel/adxl355_core.c index e8cd21fa77a6..cbac622ef821 100644 --- a/drivers/iio/accel/adxl355_core.c +++ b/drivers/iio/accel/adxl355_core.c @@ -231,7 +231,7 @@ struct adxl355_data { u8 transf_buf[3]; struct { u8 buf[14]; - s64 ts; + aligned_s64 ts; } buffer; } __aligned(IIO_DMA_MINALIGN); }; diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c index add4053e7a02..0c04b2bb7efb 100644 --- a/drivers/iio/accel/adxl367.c +++ b/drivers/iio/accel/adxl367.c @@ -601,18 +601,14 @@ static int _adxl367_set_odr(struct adxl367_state *st, enum adxl367_odr odr) if (ret) return ret; + st->odr = odr; + /* Activity timers depend on ODR */ ret = _adxl367_set_act_time_ms(st, st->act_time_ms); if (ret) return ret; - ret = _adxl367_set_inact_time_ms(st, st->inact_time_ms); - if (ret) - return ret; - - st->odr = odr; - - return 0; + return _adxl367_set_inact_time_ms(st, st->inact_time_ms); } static int adxl367_set_odr(struct iio_dev *indio_dev, enum adxl367_odr odr) diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c index 48e4282964a0..bf1d3923a181 100644 --- a/drivers/iio/accel/fxls8962af-core.c +++ b/drivers/iio/accel/fxls8962af-core.c @@ -1226,8 +1226,11 @@ int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq) if (ret) return ret; - if (device_property_read_bool(dev, "wakeup-source")) - device_init_wakeup(dev, true); + if (device_property_read_bool(dev, "wakeup-source")) { + ret = devm_device_init_wakeup(dev); + if (ret) + return dev_err_probe(dev, ret, "Failed to init wakeup\n"); + } return devm_iio_device_register(dev, indio_dev); } diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c index 18559757f908..7fef2727f89e 100644 --- a/drivers/iio/adc/ad7266.c +++ b/drivers/iio/adc/ad7266.c @@ -45,7 +45,7 @@ struct ad7266_state { */ struct { __be16 sample[2]; - s64 timestamp; + aligned_s64 timestamp; } data __aligned(IIO_DMA_MINALIGN); }; diff --git a/drivers/iio/adc/ad7380.c b/drivers/iio/adc/ad7380.c index 4fcb49fdf566..aef85093eb16 100644 --- a/drivers/iio/adc/ad7380.c +++ b/drivers/iio/adc/ad7380.c @@ -1211,6 +1211,9 @@ static int ad7380_offload_buffer_predisable(struct iio_dev *indio_dev) struct ad7380_state *st = iio_priv(indio_dev); int ret; + spi_offload_trigger_disable(st->offload, st->offload_trigger); + spi_unoptimize_message(&st->offload_msg); + if (st->seq) { ret = regmap_update_bits(st->regmap, AD7380_REG_ADDR_CONFIG1, @@ -1222,10 +1225,6 @@ static int ad7380_offload_buffer_predisable(struct iio_dev *indio_dev) st->seq = false; } - spi_offload_trigger_disable(st->offload, st->offload_trigger); - - spi_unoptimize_message(&st->offload_msg); - return 0; } @@ -1611,11 +1610,25 @@ static int ad7380_write_event_config(struct iio_dev *indio_dev, return ret; } -static int ad7380_get_alert_th(struct ad7380_state *st, +static int ad7380_get_alert_th(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, enum iio_event_direction dir, int *val) { - int ret, tmp; + struct ad7380_state *st = iio_priv(indio_dev); + const struct iio_scan_type *scan_type; + int ret, tmp, shift; + + scan_type = iio_get_current_scan_type(indio_dev, chan); + if (IS_ERR(scan_type)) + return PTR_ERR(scan_type); + + /* + * The register value is 12-bits and is compared to the most significant + * bits of raw value, therefore a shift is required to convert this to + * the same scale as the raw value. + */ + shift = scan_type->realbits - 12; switch (dir) { case IIO_EV_DIR_RISING: @@ -1625,7 +1638,7 @@ static int ad7380_get_alert_th(struct ad7380_state *st, if (ret) return ret; - *val = FIELD_GET(AD7380_ALERT_HIGH_TH, tmp); + *val = FIELD_GET(AD7380_ALERT_HIGH_TH, tmp) << shift; return IIO_VAL_INT; case IIO_EV_DIR_FALLING: ret = regmap_read(st->regmap, @@ -1634,7 +1647,7 @@ static int ad7380_get_alert_th(struct ad7380_state *st, if (ret) return ret; - *val = FIELD_GET(AD7380_ALERT_LOW_TH, tmp); + *val = FIELD_GET(AD7380_ALERT_LOW_TH, tmp) << shift; return IIO_VAL_INT; default: return -EINVAL; @@ -1648,7 +1661,6 @@ static int ad7380_read_event_value(struct iio_dev *indio_dev, enum iio_event_info info, int *val, int *val2) { - struct ad7380_state *st = iio_priv(indio_dev); int ret; switch (info) { @@ -1656,7 +1668,7 @@ static int ad7380_read_event_value(struct iio_dev *indio_dev, if (!iio_device_claim_direct(indio_dev)) return -EBUSY; - ret = ad7380_get_alert_th(st, dir, val); + ret = ad7380_get_alert_th(indio_dev, chan, dir, val); iio_device_release_direct(indio_dev); return ret; diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c index 1a314fddd7eb..703556eb7257 100644 --- a/drivers/iio/adc/ad7606.c +++ b/drivers/iio/adc/ad7606.c @@ -1236,9 +1236,11 @@ static int ad7616_sw_mode_setup(struct iio_dev *indio_dev) st->write_scale = ad7616_write_scale_sw; st->write_os = &ad7616_write_os_sw; - ret = st->bops->sw_mode_config(indio_dev); - if (ret) - return ret; + if (st->bops->sw_mode_config) { + ret = st->bops->sw_mode_config(indio_dev); + if (ret) + return ret; + } /* Activate Burst mode and SEQEN MODE */ return ad7606_write_mask(st, AD7616_CONFIGURATION_REGISTER, @@ -1268,6 +1270,9 @@ static int ad7606b_sw_mode_setup(struct iio_dev *indio_dev) st->write_scale = ad7606_write_scale_sw; st->write_os = &ad7606_write_os_sw; + if (!st->bops->sw_mode_config) + return 0; + return st->bops->sw_mode_config(indio_dev); } diff --git a/drivers/iio/adc/ad7606_spi.c b/drivers/iio/adc/ad7606_spi.c index 885bf0b68e77..179115e90988 100644 --- a/drivers/iio/adc/ad7606_spi.c +++ b/drivers/iio/adc/ad7606_spi.c @@ -131,7 +131,7 @@ static int ad7606_spi_reg_read(struct ad7606_state *st, unsigned int addr) { .tx_buf = &st->d16[0], .len = 2, - .cs_change = 0, + .cs_change = 1, }, { .rx_buf = &st->d16[1], .len = 2, diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c index 5a863005aca6..5e0be36af0c5 100644 --- a/drivers/iio/adc/ad7768-1.c +++ b/drivers/iio/adc/ad7768-1.c @@ -168,7 +168,7 @@ struct ad7768_state { union { struct { __be32 chan; - s64 timestamp; + aligned_s64 timestamp; } scan; __be32 d32; u8 d8[2]; diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c index a1e48a756a7b..359e26e3f5bc 100644 --- a/drivers/iio/adc/dln2-adc.c +++ b/drivers/iio/adc/dln2-adc.c @@ -466,7 +466,7 @@ static irqreturn_t dln2_adc_trigger_h(int irq, void *p) struct iio_dev *indio_dev = pf->indio_dev; struct { __le16 values[DLN2_ADC_MAX_CHANNELS]; - int64_t timestamp_space; + aligned_s64 timestamp_space; } data; struct dln2_adc_get_all_vals dev_data; struct dln2_adc *dln2 = iio_priv(indio_dev); diff --git a/drivers/iio/adc/qcom-spmi-iadc.c b/drivers/iio/adc/qcom-spmi-iadc.c index 7fb8b2499a1d..b64a8a407168 100644 --- a/drivers/iio/adc/qcom-spmi-iadc.c +++ b/drivers/iio/adc/qcom-spmi-iadc.c @@ -543,7 +543,9 @@ static int iadc_probe(struct platform_device *pdev) else return ret; } else { - device_init_wakeup(iadc->dev, 1); + ret = devm_device_init_wakeup(iadc->dev); + if (ret) + return dev_err_probe(iadc->dev, ret, "Failed to init wakeup\n"); } ret = iadc_update_offset(iadc); diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c index 9a099df79518..5e28bd28b81a 100644 --- a/drivers/iio/adc/rockchip_saradc.c +++ b/drivers/iio/adc/rockchip_saradc.c @@ -520,15 +520,6 @@ static int rockchip_saradc_probe(struct platform_device *pdev) if (info->reset) rockchip_saradc_reset_controller(info->reset); - /* - * Use a default value for the converter clock. - * This may become user-configurable in the future. - */ - ret = clk_set_rate(info->clk, info->data->clk_rate); - if (ret < 0) - return dev_err_probe(&pdev->dev, ret, - "failed to set adc clk rate\n"); - ret = regulator_enable(info->vref); if (ret < 0) return dev_err_probe(&pdev->dev, ret, @@ -555,6 +546,14 @@ static int rockchip_saradc_probe(struct platform_device *pdev) if (IS_ERR(info->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(info->clk), "failed to get adc clock\n"); + /* + * Use a default value for the converter clock. + * This may become user-configurable in the future. + */ + ret = clk_set_rate(info->clk, info->data->clk_rate); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, + "failed to set adc clk rate\n"); platform_set_drvdata(pdev, indio_dev); diff --git a/drivers/iio/chemical/pms7003.c b/drivers/iio/chemical/pms7003.c index d0bd94912e0a..e05ce1f12065 100644 --- a/drivers/iio/chemical/pms7003.c +++ b/drivers/iio/chemical/pms7003.c @@ -5,7 +5,6 @@ * Copyright (c) Tomasz Duszynski <tduszyns@gmail.com> */ -#include <linux/unaligned.h> #include <linux/completion.h> #include <linux/device.h> #include <linux/errno.h> @@ -19,6 +18,8 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/serdev.h> +#include <linux/types.h> +#include <linux/unaligned.h> #define PMS7003_DRIVER_NAME "pms7003" @@ -76,7 +77,7 @@ struct pms7003_state { /* Used to construct scan to push to the IIO buffer */ struct { u16 data[3]; /* PM1, PM2P5, PM10 */ - s64 ts; + aligned_s64 ts; } scan; }; diff --git a/drivers/iio/chemical/sps30.c b/drivers/iio/chemical/sps30.c index 6f4f2ba2c09d..a7888146188d 100644 --- a/drivers/iio/chemical/sps30.c +++ b/drivers/iio/chemical/sps30.c @@ -108,7 +108,7 @@ static irqreturn_t sps30_trigger_handler(int irq, void *p) int ret; struct { s32 data[4]; /* PM1, PM2P5, PM4, PM10 */ - s64 ts; + aligned_s64 ts; } scan; mutex_lock(&state->lock); diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c index ad1882f608c0..2055a03cbeb1 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c @@ -66,6 +66,10 @@ static struct { {HID_USAGE_SENSOR_HUMIDITY, 0, 1000, 0}, {HID_USAGE_SENSOR_HINGE, 0, 0, 17453293}, {HID_USAGE_SENSOR_HINGE, HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453293}, + + {HID_USAGE_SENSOR_HUMAN_PRESENCE, 0, 1, 0}, + {HID_USAGE_SENSOR_HUMAN_PROXIMITY, 0, 1, 0}, + {HID_USAGE_SENSOR_HUMAN_ATTENTION, 0, 1, 0}, }; static void simple_div(int dividend, int divisor, int *whole, diff --git a/drivers/iio/imu/adis16550.c b/drivers/iio/imu/adis16550.c index b14ea8937c7f..28f0dbd0226c 100644 --- a/drivers/iio/imu/adis16550.c +++ b/drivers/iio/imu/adis16550.c @@ -836,7 +836,7 @@ static irqreturn_t adis16550_trigger_handler(int irq, void *p) u16 dummy; bool valid; struct iio_poll_func *pf = p; - __be32 data[ADIS16550_MAX_SCAN_DATA]; + __be32 data[ADIS16550_MAX_SCAN_DATA] __aligned(8); struct iio_dev *indio_dev = pf->indio_dev; struct adis16550 *st = iio_priv(indio_dev); struct adis *adis = iio_device_get_drvdata(indio_dev); diff --git a/drivers/iio/imu/bmi270/bmi270_core.c b/drivers/iio/imu/bmi270/bmi270_core.c index a86be5af5ccb..2e4469f30d53 100644 --- a/drivers/iio/imu/bmi270/bmi270_core.c +++ b/drivers/iio/imu/bmi270/bmi270_core.c @@ -918,8 +918,7 @@ static int bmi270_configure_imu(struct bmi270_data *data) FIELD_PREP(BMI270_ACC_CONF_ODR_MSK, BMI270_ACC_CONF_ODR_100HZ) | FIELD_PREP(BMI270_ACC_CONF_BWP_MSK, - BMI270_ACC_CONF_BWP_NORMAL_MODE) | - BMI270_PWR_CONF_ADV_PWR_SAVE_MSK); + BMI270_ACC_CONF_BWP_NORMAL_MODE)); if (ret) return dev_err_probe(dev, ret, "Failed to configure accelerometer"); @@ -927,8 +926,7 @@ static int bmi270_configure_imu(struct bmi270_data *data) FIELD_PREP(BMI270_GYR_CONF_ODR_MSK, BMI270_GYR_CONF_ODR_200HZ) | FIELD_PREP(BMI270_GYR_CONF_BWP_MSK, - BMI270_GYR_CONF_BWP_NORMAL_MODE) | - BMI270_PWR_CONF_ADV_PWR_SAVE_MSK); + BMI270_GYR_CONF_BWP_NORMAL_MODE)); if (ret) return dev_err_probe(dev, ret, "Failed to configure gyroscope"); diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c index 3d3b27f28c9d..273196e647a2 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c @@ -50,7 +50,7 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p) u16 fifo_count; u32 fifo_period; s64 timestamp; - u8 data[INV_MPU6050_OUTPUT_DATA_SIZE]; + u8 data[INV_MPU6050_OUTPUT_DATA_SIZE] __aligned(8); size_t i, nb; mutex_lock(&st->lock); diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c index 0a7cd8c1aa33..8a9d2593576a 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c @@ -392,6 +392,9 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw) if (fifo_status & cpu_to_le16(ST_LSM6DSX_FIFO_EMPTY_MASK)) return 0; + if (!pattern_len) + pattern_len = ST_LSM6DSX_SAMPLE_SIZE; + fifo_len = (le16_to_cpu(fifo_status) & fifo_diff_mask) * ST_LSM6DSX_CHAN_SIZE; fifo_len = (fifo_len / pattern_len) * pattern_len; @@ -623,6 +626,9 @@ int st_lsm6dsx_read_tagged_fifo(struct st_lsm6dsx_hw *hw) if (!fifo_len) return 0; + if (!pattern_len) + pattern_len = ST_LSM6DSX_TAGGED_SAMPLE_SIZE; + for (read_len = 0; read_len < fifo_len; read_len += pattern_len) { err = st_lsm6dsx_read_block(hw, ST_LSM6DSX_REG_FIFO_OUT_TAG_ADDR, diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c index 4fdcc2acc94e..96c6106b95ee 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c @@ -2719,8 +2719,11 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id, } if (device_property_read_bool(dev, "wakeup-source") || - (pdata && pdata->wakeup_source)) - device_init_wakeup(dev, true); + (pdata && pdata->wakeup_source)) { + err = devm_device_init_wakeup(dev); + if (err) + return dev_err_probe(dev, err, "Failed to init wakeup\n"); + } return 0; } diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c index 76b76d12b388..4c65b32d34ce 100644 --- a/drivers/iio/light/hid-sensor-prox.c +++ b/drivers/iio/light/hid-sensor-prox.c @@ -34,9 +34,9 @@ struct prox_state { struct iio_chan_spec channels[MAX_CHANNELS]; u32 channel2usage[MAX_CHANNELS]; u32 human_presence[MAX_CHANNELS]; - int scale_pre_decml; - int scale_post_decml; - int scale_precision; + int scale_pre_decml[MAX_CHANNELS]; + int scale_post_decml[MAX_CHANNELS]; + int scale_precision[MAX_CHANNELS]; unsigned long scan_mask[2]; /* One entry plus one terminator. */ int num_channels; }; @@ -116,13 +116,15 @@ static int prox_read_raw(struct iio_dev *indio_dev, ret_type = IIO_VAL_INT; break; case IIO_CHAN_INFO_SCALE: - *val = prox_state->scale_pre_decml; - *val2 = prox_state->scale_post_decml; - ret_type = prox_state->scale_precision; + if (chan->scan_index >= prox_state->num_channels) + return -EINVAL; + + *val = prox_state->scale_pre_decml[chan->scan_index]; + *val2 = prox_state->scale_post_decml[chan->scan_index]; + ret_type = prox_state->scale_precision[chan->scan_index]; break; case IIO_CHAN_INFO_OFFSET: - *val = hid_sensor_convert_exponent( - prox_state->prox_attr[chan->scan_index].unit_expo); + *val = 0; ret_type = IIO_VAL_INT; break; case IIO_CHAN_INFO_SAMP_FREQ: @@ -249,6 +251,10 @@ static int prox_parse_report(struct platform_device *pdev, st->prox_attr[index].size); dev_dbg(&pdev->dev, "prox %x:%x\n", st->prox_attr[index].index, st->prox_attr[index].report_id); + st->scale_precision[index] = + hid_sensor_format_scale(usage_id, &st->prox_attr[index], + &st->scale_pre_decml[index], + &st->scale_post_decml[index]); index++; } diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c index 65b295877b41..393a3d2fbe1d 100644 --- a/drivers/iio/light/opt3001.c +++ b/drivers/iio/light/opt3001.c @@ -788,8 +788,9 @@ static irqreturn_t opt3001_irq(int irq, void *_iio) int ret; bool wake_result_ready_queue = false; enum iio_chan_type chan_type = opt->chip_info->chan_type; + bool ok_to_ignore_lock = opt->ok_to_ignore_lock; - if (!opt->ok_to_ignore_lock) + if (!ok_to_ignore_lock) mutex_lock(&opt->lock); ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION); @@ -826,7 +827,7 @@ static irqreturn_t opt3001_irq(int irq, void *_iio) } out: - if (!opt->ok_to_ignore_lock) + if (!ok_to_ignore_lock) mutex_unlock(&opt->lock); if (wake_result_ready_queue) diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c index 08975c60e325..7bc341c69697 100644 --- a/drivers/iio/magnetometer/ak8974.c +++ b/drivers/iio/magnetometer/ak8974.c @@ -535,8 +535,8 @@ static int ak8974_detect(struct ak8974 *ak8974) fab_data2, sizeof(fab_data2)); for (i = 0; i < 3; ++i) { - static const char axis[3] = "XYZ"; - static const char pgaxis[6] = "ZYZXYX"; + static const char axis[] = "XYZ"; + static const char pgaxis[] = "ZYZXYX"; unsigned offz = le16_to_cpu(fab_data2[i]) & 0x7F; unsigned fine = le16_to_cpu(fab_data1[i]); unsigned sens = le16_to_cpu(fab_data1[i + 3]); diff --git a/drivers/iio/pressure/mprls0025pa.h b/drivers/iio/pressure/mprls0025pa.h index 9d5c30afa9d6..d62a018eaff3 100644 --- a/drivers/iio/pressure/mprls0025pa.h +++ b/drivers/iio/pressure/mprls0025pa.h @@ -34,16 +34,6 @@ struct iio_dev; struct mpr_data; struct mpr_ops; -/** - * struct mpr_chan - * @pres: pressure value - * @ts: timestamp - */ -struct mpr_chan { - s32 pres; - s64 ts; -}; - enum mpr_func_id { MPR_FUNCTION_A, MPR_FUNCTION_B, @@ -69,6 +59,8 @@ enum mpr_func_id { * reading in a loop until data is ready * @completion: handshake from irq to read * @chan: channel values for buffered mode + * @chan.pres: pressure value + * @chan.ts: timestamp * @buffer: raw conversion data */ struct mpr_data { @@ -87,7 +79,10 @@ struct mpr_data { struct gpio_desc *gpiod_reset; int irq; struct completion completion; - struct mpr_chan chan; + struct { + s32 pres; + aligned_s64 ts; + } chan; u8 buffer[MPR_MEASUREMENT_RD_SIZE] __aligned(IIO_DMA_MINALIGN); }; diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c index c28a7a6dea5f..555a61e2f3fd 100644 --- a/drivers/iio/temperature/maxim_thermocouple.c +++ b/drivers/iio/temperature/maxim_thermocouple.c @@ -121,9 +121,9 @@ static const struct maxim_thermocouple_chip maxim_thermocouple_chips[] = { struct maxim_thermocouple_data { struct spi_device *spi; const struct maxim_thermocouple_chip *chip; + char tc_type; u8 buffer[16] __aligned(IIO_DMA_MINALIGN); - char tc_type; }; static int maxim_thermocouple_read(struct maxim_thermocouple_data *data, diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index fedcdb56fb6b..ab31eefa916b 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -72,6 +72,8 @@ static const char * const cma_events[] = { static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, enum ib_gid_type gid_type); +static void cma_netevent_work_handler(struct work_struct *_work); + const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) { size_t index = event; @@ -1047,6 +1049,7 @@ __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler, get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); id_priv->id.route.addr.dev_addr.net = get_net(net); id_priv->seq_num &= 0x00ffffff; + INIT_WORK(&id_priv->id.net_work, cma_netevent_work_handler); rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID); if (parent) @@ -5241,7 +5244,6 @@ static int cma_netevent_callback(struct notifier_block *self, if (!memcmp(current_id->id.route.addr.dev_addr.dst_dev_addr, neigh->ha, ETH_ALEN)) continue; - INIT_WORK(¤t_id->id.net_work, cma_netevent_work_handler); cma_id_get(current_id); queue_work(cma_wq, ¤t_id->id.net_work); } diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index b4e3e4beb7f4..d4263385850a 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -1352,6 +1352,9 @@ static void ib_device_notify_register(struct ib_device *device) down_read(&devices_rwsem); + /* Mark for userspace that device is ready */ + kobject_uevent(&device->dev.kobj, KOBJ_ADD); + ret = rdma_nl_notify_event(device, 0, RDMA_REGISTER_EVENT); if (ret) goto out; @@ -1468,10 +1471,9 @@ int ib_register_device(struct ib_device *device, const char *name, return ret; } dev_set_uevent_suppress(&device->dev, false); - /* Mark for userspace that device is ready */ - kobject_uevent(&device->dev.kobj, KOBJ_ADD); ib_device_notify_register(device); + ib_device_put(device); return 0; diff --git a/drivers/infiniband/core/ucaps.c b/drivers/infiniband/core/ucaps.c index 6853c6d078f9..de5cb8bf0a61 100644 --- a/drivers/infiniband/core/ucaps.c +++ b/drivers/infiniband/core/ucaps.c @@ -170,7 +170,7 @@ int ib_create_ucap(enum rdma_user_cap type) ucap->dev.class = &ucaps_class; ucap->dev.devt = MKDEV(MAJOR(ucaps_base_dev), type); ucap->dev.release = ucap_dev_release; - ret = dev_set_name(&ucap->dev, ucap_names[type]); + ret = dev_set_name(&ucap->dev, "%s", ucap_names[type]); if (ret) goto err_device; diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index e9fa22d31c23..c48ef6083020 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -76,12 +76,14 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp, npfns = (end - start) >> PAGE_SHIFT; umem_odp->pfn_list = kvcalloc( - npfns, sizeof(*umem_odp->pfn_list), GFP_KERNEL); + npfns, sizeof(*umem_odp->pfn_list), + GFP_KERNEL | __GFP_NOWARN); if (!umem_odp->pfn_list) return -ENOMEM; umem_odp->dma_list = kvcalloc( - ndmas, sizeof(*umem_odp->dma_list), GFP_KERNEL); + ndmas, sizeof(*umem_odp->dma_list), + GFP_KERNEL | __GFP_NOWARN); if (!umem_odp->dma_list) { ret = -ENOMEM; goto out_pfn_list; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 9082b3fd2b47..063801384b2b 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -1774,10 +1774,7 @@ int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata) ib_srq); struct bnxt_re_dev *rdev = srq->rdev; struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq; - struct bnxt_qplib_nq *nq = NULL; - if (qplib_srq->cq) - nq = qplib_srq->cq->nq; if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) { free_page((unsigned long)srq->uctx_srq_page); hash_del(&srq->hash_entry); @@ -1785,8 +1782,6 @@ int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata) bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq); ib_umem_release(srq->umem); atomic_dec(&rdev->stats.res.srq_count); - if (nq) - nq->budget--; return 0; } @@ -1827,7 +1822,6 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq, struct ib_udata *udata) { struct bnxt_qplib_dev_attr *dev_attr; - struct bnxt_qplib_nq *nq = NULL; struct bnxt_re_ucontext *uctx; struct bnxt_re_dev *rdev; struct bnxt_re_srq *srq; @@ -1873,7 +1867,6 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq, srq->qplib_srq.eventq_hw_ring_id = rdev->nqr->nq[0].ring_id; srq->qplib_srq.sg_info.pgsize = PAGE_SIZE; srq->qplib_srq.sg_info.pgshft = PAGE_SHIFT; - nq = &rdev->nqr->nq[0]; if (udata) { rc = bnxt_re_init_user_srq(rdev, pd, srq, udata); @@ -1908,8 +1901,6 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq, goto fail; } } - if (nq) - nq->budget++; active_srqs = atomic_inc_return(&rdev->stats.res.srq_count); if (active_srqs > rdev->stats.res.srq_watermark) rdev->stats.res.srq_watermark = active_srqs; @@ -3079,7 +3070,6 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) ib_umem_release(cq->umem); atomic_dec(&rdev->stats.res.cq_count); - nq->budget--; kfree(cq->cql); return 0; } diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index cf89a8db4f64..8d0b63d4b50a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -763,7 +763,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) if (ret) return ret; } - dma_set_max_seg_size(dev, UINT_MAX); + dma_set_max_seg_size(dev, SZ_2G); ret = ib_register_device(ib_dev, "hns_%d", dev); if (ret) { dev_err(dev, "ib_register_device failed!\n"); diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c index 1ee8969595d3..7599e31b5743 100644 --- a/drivers/infiniband/hw/irdma/main.c +++ b/drivers/infiniband/hw/irdma/main.c @@ -221,7 +221,7 @@ static int irdma_init_interrupts(struct irdma_pci_f *rf, struct ice_pf *pf) break; if (i < IRDMA_MIN_MSIX) { - for (; i > 0; i--) + while (--i >= 0) ice_free_rdma_qvector(pf, &rf->msix_entries[i]); kfree(rf->msix_entries); @@ -255,6 +255,8 @@ static void irdma_remove(struct auxiliary_device *aux_dev) ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, false); irdma_deinit_interrupts(iwdev->rf, pf); + kfree(iwdev->rf); + pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(pf->pdev->devfn)); } diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index eeb932e58730..1e8c92826de2 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -4871,5 +4871,4 @@ void irdma_ib_dealloc_device(struct ib_device *ibdev) irdma_rt_deinit_hw(iwdev); irdma_ctrl_deinit_hw(iwdev->rf); - kfree(iwdev->rf); } diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c index 251246c73b33..0ff9f18a71e8 100644 --- a/drivers/infiniband/hw/mlx5/fs.c +++ b/drivers/infiniband/hw/mlx5/fs.c @@ -3461,7 +3461,6 @@ DECLARE_UVERBS_NAMED_OBJECT( &UVERBS_METHOD(MLX5_IB_METHOD_STEERING_ANCHOR_DESTROY)); const struct uapi_definition mlx5_ib_flow_defs[] = { -#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) UAPI_DEF_CHAIN_OBJ_TREE_NAMED( MLX5_IB_OBJECT_FLOW_MATCHER), UAPI_DEF_CHAIN_OBJ_TREE( @@ -3472,7 +3471,6 @@ const struct uapi_definition mlx5_ib_flow_defs[] = { UAPI_DEF_CHAIN_OBJ_TREE_NAMED( MLX5_IB_OBJECT_STEERING_ANCHOR, UAPI_DEF_IS_OBJ_SUPPORTED(mlx5_ib_shared_ft_allowed)), -#endif {}, }; diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index b9f4a2937c3a..2098de762bf5 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c @@ -90,7 +90,7 @@ static int create_file(const char *name, umode_t mode, int error; inode_lock(d_inode(parent)); - *dentry = lookup_one_len(name, parent, strlen(name)); + *dentry = lookup_noperm(&QSTR(name), parent); if (!IS_ERR(*dentry)) error = qibfs_mknod(d_inode(parent), *dentry, mode, fops, data); @@ -433,7 +433,7 @@ static int remove_device_files(struct super_block *sb, char unit[10]; snprintf(unit, sizeof(unit), "%u", dd->unit); - dir = lookup_one_len_unlocked(unit, sb->s_root, strlen(unit)); + dir = lookup_noperm_unlocked(&QSTR(unit), sb->s_root); if (IS_ERR(dir)) { pr_err("Lookup of %s failed\n", unit); diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index 4ddcd5860e0f..11eca39b73a9 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -397,7 +397,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev) if (!us_ibdev) { usnic_err("Device %s context alloc failed\n", netdev_name(pci_get_drvdata(dev))); - return ERR_PTR(-EFAULT); + return NULL; } us_ibdev->ufdev = usnic_fwd_dev_alloc(dev); @@ -517,8 +517,8 @@ static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic) } us_ibdev = usnic_ib_device_add(parent_pci); - if (IS_ERR_OR_NULL(us_ibdev)) { - us_ibdev = us_ibdev ? us_ibdev : ERR_PTR(-EFAULT); + if (!us_ibdev) { + us_ibdev = ERR_PTR(-EFAULT); goto out; } @@ -586,10 +586,10 @@ static int usnic_ib_pci_probe(struct pci_dev *pdev, } pf = usnic_ib_discover_pf(vf->vnic); - if (IS_ERR_OR_NULL(pf)) { - usnic_err("Failed to discover pf of vnic %s with err%ld\n", - pci_name(pdev), PTR_ERR(pf)); - err = pf ? PTR_ERR(pf) : -EFAULT; + if (IS_ERR(pf)) { + err = PTR_ERR(pf); + usnic_err("Failed to discover pf of vnic %s with err%d\n", + pci_name(pdev), err); goto out_clean_vnic; } diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c index fec87c9030ab..fffd144d509e 100644 --- a/drivers/infiniband/sw/rxe/rxe_cq.c +++ b/drivers/infiniband/sw/rxe/rxe_cq.c @@ -56,11 +56,8 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, cq->queue->buf, cq->queue->buf_size, &cq->queue->ip); - if (err) { - vfree(cq->queue->buf); - kfree(cq->queue); + if (err) return err; - } cq->is_user = uresp; diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h index feb386d98d1d..0bc3fbb6554f 100644 --- a/drivers/infiniband/sw/rxe/rxe_loc.h +++ b/drivers/infiniband/sw/rxe/rxe_loc.h @@ -140,6 +140,12 @@ static inline int qp_mtu(struct rxe_qp *qp) return IB_MTU_4096; } +static inline bool is_odp_mr(struct rxe_mr *mr) +{ + return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem && + mr->umem->is_odp; +} + void free_rd_atomic_resource(struct resp_res *res); static inline void rxe_advance_resp_resource(struct rxe_qp *qp) diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index 868d2f0b74e9..432d864c3ce9 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -323,7 +323,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, return err; } - if (mr->umem->is_odp) + if (is_odp_mr(mr)) return rxe_odp_mr_copy(mr, iova, addr, length, dir); else return rxe_mr_copy_xarray(mr, iova, addr, length, dir); @@ -536,7 +536,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value) u64 *va; /* ODP is not supported right now. WIP. */ - if (mr->umem->is_odp) + if (is_odp_mr(mr)) return RESPST_ERR_UNSUPPORTED_OPCODE; /* See IBA oA19-28 */ diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 54ba9ee1acc5..5d9174e408db 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -650,7 +650,7 @@ static enum resp_states process_flush(struct rxe_qp *qp, struct resp_res *res = qp->resp.res; /* ODP is not supported right now. WIP. */ - if (mr->umem->is_odp) + if (is_odp_mr(mr)) return RESPST_ERR_UNSUPPORTED_OPCODE; /* oA19-14, oA19-15 */ @@ -706,7 +706,7 @@ static enum resp_states atomic_reply(struct rxe_qp *qp, if (!res->replay) { u64 iova = qp->resp.va + qp->resp.offset; - if (mr->umem->is_odp) + if (is_odp_mr(mr)) err = rxe_odp_atomic_op(mr, iova, pkt->opcode, atmeth_comp(pkt), atmeth_swap_add(pkt), diff --git a/drivers/input/joystick/magellan.c b/drivers/input/joystick/magellan.c index 2eaa25c9c68c..7622638e5bb8 100644 --- a/drivers/input/joystick/magellan.c +++ b/drivers/input/joystick/magellan.c @@ -48,7 +48,7 @@ struct magellan { static int magellan_crunch_nibbles(unsigned char *data, int count) { - static unsigned char nibbles[16] = "0AB3D56GH9:K<MN?"; + static const unsigned char nibbles[16] __nonstring = "0AB3D56GH9:K<MN?"; do { if (data[count] == nibbles[data[count] & 0xf]) diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index c33e6f33265b..1008858f78e2 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -77,12 +77,13 @@ * xbox d-pads should map to buttons, as is required for DDR pads * but we map them to axes when possible to simplify things */ -#define MAP_DPAD_TO_BUTTONS (1 << 0) -#define MAP_TRIGGERS_TO_BUTTONS (1 << 1) -#define MAP_STICKS_TO_NULL (1 << 2) -#define MAP_SELECT_BUTTON (1 << 3) -#define MAP_PADDLES (1 << 4) -#define MAP_PROFILE_BUTTON (1 << 5) +#define MAP_DPAD_TO_BUTTONS BIT(0) +#define MAP_TRIGGERS_TO_BUTTONS BIT(1) +#define MAP_STICKS_TO_NULL BIT(2) +#define MAP_SHARE_BUTTON BIT(3) +#define MAP_PADDLES BIT(4) +#define MAP_PROFILE_BUTTON BIT(5) +#define MAP_SHARE_OFFSET BIT(6) #define DANCEPAD_MAP_CONFIG (MAP_DPAD_TO_BUTTONS | \ MAP_TRIGGERS_TO_BUTTONS | MAP_STICKS_TO_NULL) @@ -135,14 +136,14 @@ static const struct xpad_device { { 0x03f0, 0x048D, "HyperX Clutch", 0, XTYPE_XBOX360 }, /* wireless */ { 0x03f0, 0x0495, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE }, { 0x03f0, 0x07A0, "HyperX Clutch Gladiate RGB", 0, XTYPE_XBOXONE }, - { 0x03f0, 0x08B6, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE }, /* v2 */ + { 0x03f0, 0x08B6, "HyperX Clutch Gladiate", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, /* v2 */ { 0x03f0, 0x09B4, "HyperX Clutch Tanto", 0, XTYPE_XBOXONE }, { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX }, - { 0x044f, 0xd01e, "ThrustMaster, Inc. ESWAP X 2 ELDEN RING EDITION", 0, XTYPE_XBOXONE }, { 0x044f, 0x0f10, "Thrustmaster Modena GT Wheel", 0, XTYPE_XBOX }, { 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 }, + { 0x044f, 0xd01e, "ThrustMaster, Inc. ESWAP X 2 ELDEN RING EDITION", 0, XTYPE_XBOXONE }, { 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX }, { 0x045e, 0x0285, "Microsoft X-Box pad (Japan)", 0, XTYPE_XBOX }, { 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX }, @@ -159,7 +160,7 @@ static const struct xpad_device { { 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W }, { 0x045e, 0x0b00, "Microsoft X-Box One Elite 2 pad", MAP_PADDLES, XTYPE_XBOXONE }, { 0x045e, 0x0b0a, "Microsoft X-Box Adaptive Controller", MAP_PROFILE_BUTTON, XTYPE_XBOXONE }, - { 0x045e, 0x0b12, "Microsoft Xbox Series S|X Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE }, + { 0x045e, 0x0b12, "Microsoft Xbox Series S|X Controller", MAP_SHARE_BUTTON | MAP_SHARE_OFFSET, XTYPE_XBOXONE }, { 0x046d, 0xc21d, "Logitech Gamepad F310", 0, XTYPE_XBOX360 }, { 0x046d, 0xc21e, "Logitech Gamepad F510", 0, XTYPE_XBOX360 }, { 0x046d, 0xc21f, "Logitech Gamepad F710", 0, XTYPE_XBOX360 }, @@ -205,13 +206,13 @@ static const struct xpad_device { { 0x0738, 0x9871, "Mad Catz Portable Drum", 0, XTYPE_XBOX360 }, { 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 }, { 0x0738, 0xb738, "Mad Catz MVC2TE Stick 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, - { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 }, + { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", 0, XTYPE_XBOX360 }, { 0x0738, 0xcb02, "Saitek Cyborg Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 }, { 0x0738, 0xcb03, "Saitek P3200 Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 }, { 0x0738, 0xcb29, "Saitek Aviator Stick AV8R02", 0, XTYPE_XBOX360 }, { 0x0738, 0xf738, "Super SFIV FightStick TE S", 0, XTYPE_XBOX360 }, { 0x07ff, 0xffff, "Mad Catz GamePad", 0, XTYPE_XBOX360 }, - { 0x0b05, 0x1a38, "ASUS ROG RAIKIRI", 0, XTYPE_XBOXONE }, + { 0x0b05, 0x1a38, "ASUS ROG RAIKIRI", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x0b05, 0x1abb, "ASUS ROG RAIKIRI PRO", 0, XTYPE_XBOXONE }, { 0x0c12, 0x0005, "Intec wireless", 0, XTYPE_XBOX }, { 0x0c12, 0x8801, "Nyko Xbox Controller", 0, XTYPE_XBOX }, @@ -240,7 +241,7 @@ static const struct xpad_device { { 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0147, "PDP Marvel Xbox One Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x015c, "PDP Xbox One Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, - { 0x0e6f, 0x015d, "PDP Mirror's Edge Official Wired Controller for Xbox One", XTYPE_XBOXONE }, + { 0x0e6f, 0x015d, "PDP Mirror's Edge Official Wired Controller for Xbox One", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0161, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0162, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0163, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, @@ -281,6 +282,7 @@ static const struct xpad_device { { 0x0f0d, 0x00dc, "HORIPAD FPS for Nintendo Switch", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x0f0d, 0x0151, "Hori Racing Wheel Overdrive for Xbox Series X", 0, XTYPE_XBOXONE }, { 0x0f0d, 0x0152, "Hori Racing Wheel Overdrive for Xbox Series X", 0, XTYPE_XBOXONE }, + { 0x0f0d, 0x01b2, "HORI Taiko No Tatsujin Drum Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x0f30, 0x010b, "Philips Recoil", 0, XTYPE_XBOX }, { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX }, { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, @@ -288,6 +290,8 @@ static const struct xpad_device { { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, { 0x10f5, 0x7005, "Turtle Beach Recon Controller", 0, XTYPE_XBOXONE }, + { 0x10f5, 0x7008, "Turtle Beach Recon Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, + { 0x10f5, 0x7073, "Turtle Beach Stealth Ultra Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, { 0x11ff, 0x0511, "PXN V900", 0, XTYPE_XBOX360 }, { 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 }, @@ -352,7 +356,10 @@ static const struct xpad_device { { 0x1ee9, 0x1590, "ZOTAC Gaming Zone", 0, XTYPE_XBOX360 }, { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE }, { 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE }, + { 0x20d6, 0x2064, "PowerA Wired Controller for Xbox", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 }, + { 0x20d6, 0x400b, "PowerA FUSION Pro 4 Wired Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, + { 0x20d6, 0x890b, "PowerA MOGA XP-Ultra Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x2345, 0xe00b, "Machenike G5 Pro Controller", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 }, @@ -384,13 +391,16 @@ static const struct xpad_device { { 0x294b, 0x3404, "Snakebyte GAMEPAD RGB X", 0, XTYPE_XBOXONE }, { 0x2993, 0x2001, "TECNO Pocket Go", 0, XTYPE_XBOX360 }, { 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, XTYPE_XBOXONE }, + { 0x2dc8, 0x200f, "8BitDo Ultimate 3-mode Controller for Xbox", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x2dc8, 0x3106, "8BitDo Ultimate Wireless / Pro 2 Wired Controller", 0, XTYPE_XBOX360 }, { 0x2dc8, 0x3109, "8BitDo Ultimate Wireless Bluetooth", 0, XTYPE_XBOX360 }, { 0x2dc8, 0x310a, "8BitDo Ultimate 2C Wireless Controller", 0, XTYPE_XBOX360 }, + { 0x2dc8, 0x310b, "8BitDo Ultimate 2 Wireless Controller", 0, XTYPE_XBOX360 }, { 0x2dc8, 0x6001, "8BitDo SN30 Pro", 0, XTYPE_XBOX360 }, + { 0x2e24, 0x0423, "Hyperkin DuchesS Xbox One pad", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE }, { 0x2e24, 0x1688, "Hyperkin X91 X-Box One pad", 0, XTYPE_XBOXONE }, - { 0x2e95, 0x0504, "SCUF Gaming Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE }, + { 0x2e95, 0x0504, "SCUF Gaming Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 }, { 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 }, { 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 }, @@ -714,8 +724,10 @@ static const struct xboxone_init_packet xboxone_init_packets[] = { XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init), XBOXONE_INIT_PKT(0x045e, 0x0b00, extra_input_packet_init), XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_led_on), + XBOXONE_INIT_PKT(0x0f0d, 0x01b2, xboxone_pdp_led_on), XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_led_on), XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_auth), + XBOXONE_INIT_PKT(0x0f0d, 0x01b2, xboxone_pdp_auth), XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_auth), XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init), @@ -1027,7 +1039,7 @@ static void xpad360w_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned cha * The report format was gleaned from * https://github.com/kylelemons/xbox/blob/master/xbox.go */ -static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data) +static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data, u32 len) { struct input_dev *dev = xpad->dev; bool do_sync = false; @@ -1068,8 +1080,12 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char /* menu/view buttons */ input_report_key(dev, BTN_START, data[4] & BIT(2)); input_report_key(dev, BTN_SELECT, data[4] & BIT(3)); - if (xpad->mapping & MAP_SELECT_BUTTON) - input_report_key(dev, KEY_RECORD, data[22] & BIT(0)); + if (xpad->mapping & MAP_SHARE_BUTTON) { + if (xpad->mapping & MAP_SHARE_OFFSET) + input_report_key(dev, KEY_RECORD, data[len - 26] & BIT(0)); + else + input_report_key(dev, KEY_RECORD, data[len - 18] & BIT(0)); + } /* buttons A,B,X,Y */ input_report_key(dev, BTN_A, data[4] & BIT(4)); @@ -1217,7 +1233,7 @@ static void xpad_irq_in(struct urb *urb) xpad360w_process_packet(xpad, 0, xpad->idata); break; case XTYPE_XBOXONE: - xpadone_process_packet(xpad, 0, xpad->idata); + xpadone_process_packet(xpad, 0, xpad->idata, urb->actual_length); break; default: xpad_process_packet(xpad, 0, xpad->idata); @@ -1944,7 +1960,7 @@ static int xpad_init_input(struct usb_xpad *xpad) xpad->xtype == XTYPE_XBOXONE) { for (i = 0; xpad360_btn[i] >= 0; i++) input_set_capability(input_dev, EV_KEY, xpad360_btn[i]); - if (xpad->mapping & MAP_SELECT_BUTTON) + if (xpad->mapping & MAP_SHARE_BUTTON) input_set_capability(input_dev, EV_KEY, KEY_RECORD); } else { for (i = 0; xpad_btn[i] >= 0; i++) diff --git a/drivers/input/keyboard/mtk-pmic-keys.c b/drivers/input/keyboard/mtk-pmic-keys.c index 5ad6be914160..061d48350df6 100644 --- a/drivers/input/keyboard/mtk-pmic-keys.c +++ b/drivers/input/keyboard/mtk-pmic-keys.c @@ -147,8 +147,8 @@ static void mtk_pmic_keys_lp_reset_setup(struct mtk_pmic_keys *keys, u32 value, mask; int error; - kregs_home = keys->keys[MTK_PMIC_HOMEKEY_INDEX].regs; - kregs_pwr = keys->keys[MTK_PMIC_PWRKEY_INDEX].regs; + kregs_home = ®s->keys_regs[MTK_PMIC_HOMEKEY_INDEX]; + kregs_pwr = ®s->keys_regs[MTK_PMIC_PWRKEY_INDEX]; error = of_property_read_u32(keys->dev->of_node, "power-off-time-sec", &long_press_debounce); diff --git a/drivers/input/misc/hisi_powerkey.c b/drivers/input/misc/hisi_powerkey.c index d3c293a95d32..d315017324d9 100644 --- a/drivers/input/misc/hisi_powerkey.c +++ b/drivers/input/misc/hisi_powerkey.c @@ -30,7 +30,7 @@ static irqreturn_t hi65xx_power_press_isr(int irq, void *q) { struct input_dev *input = q; - pm_wakeup_event(input->dev.parent, MAX_HELD_TIME); + pm_wakeup_dev_event(input->dev.parent, MAX_HELD_TIME, true); input_report_key(input, KEY_POWER, 1); input_sync(input); diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c index 8d7303fc13bc..1cfadd73829f 100644 --- a/drivers/input/misc/sparcspkr.c +++ b/drivers/input/misc/sparcspkr.c @@ -74,9 +74,14 @@ static int bbc_spkr_event(struct input_dev *dev, unsigned int type, unsigned int return -1; switch (code) { - case SND_BELL: if (value) value = 1000; - case SND_TONE: break; - default: return -1; + case SND_BELL: + if (value) + value = 1000; + break; + case SND_TONE: + break; + default: + return -1; } if (value > 20 && value < 32767) @@ -109,9 +114,14 @@ static int grover_spkr_event(struct input_dev *dev, unsigned int type, unsigned return -1; switch (code) { - case SND_BELL: if (value) value = 1000; - case SND_TONE: break; - default: return -1; + case SND_BELL: + if (value) + value = 1000; + break; + case SND_TONE: + break; + default: + return -1; } if (value > 20 && value < 32767) diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 309c360aab55..c5c88a75a019 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -164,6 +164,7 @@ static const char * const topbuttonpad_pnp_ids[] = { #ifdef CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS static const char * const smbus_pnp_ids[] = { /* all of the topbuttonpad_pnp_ids are valid, we just add some extras */ + "DLL060d", /* Dell Precision M3800 */ "LEN0048", /* X1 Carbon 3 */ "LEN0046", /* X250 */ "LEN0049", /* Yoga 11e */ @@ -190,11 +191,15 @@ static const char * const smbus_pnp_ids[] = { "LEN2054", /* E480 */ "LEN2055", /* E580 */ "LEN2068", /* T14 Gen 1 */ + "SYN1221", /* TUXEDO InfinityBook Pro 14 v5 */ + "SYN3003", /* HP EliteBook 850 G1 */ "SYN3015", /* HP EliteBook 840 G2 */ "SYN3052", /* HP EliteBook 840 G4 */ "SYN3221", /* HP 15-ay000 */ "SYN323d", /* HP Spectre X360 13-w013dx */ "SYN3257", /* HP Envy 13-ad105ng */ + "TOS01f6", /* Dynabook Portege X30L-G */ + "TOS0213", /* Dynabook Portege X30-D */ NULL }; #endif diff --git a/drivers/input/rmi4/rmi_f34.c b/drivers/input/rmi4/rmi_f34.c index d760af4cc12e..f1947f03b06a 100644 --- a/drivers/input/rmi4/rmi_f34.c +++ b/drivers/input/rmi4/rmi_f34.c @@ -4,6 +4,7 @@ * Copyright (C) 2016 Zodiac Inflight Innovations */ +#include "linux/device.h" #include <linux/kernel.h> #include <linux/rmi.h> #include <linux/firmware.h> @@ -289,39 +290,30 @@ static int rmi_f34_update_firmware(struct f34_data *f34, return rmi_f34_flash_firmware(f34, syn_fw); } -static int rmi_f34_status(struct rmi_function *fn) -{ - struct f34_data *f34 = dev_get_drvdata(&fn->dev); - - /* - * The status is the percentage complete, or once complete, - * zero for success or a negative return code. - */ - return f34->update_status; -} - static ssize_t rmi_driver_bootloader_id_show(struct device *dev, struct device_attribute *dattr, char *buf) { struct rmi_driver_data *data = dev_get_drvdata(dev); - struct rmi_function *fn = data->f34_container; + struct rmi_function *fn; struct f34_data *f34; - if (fn) { - f34 = dev_get_drvdata(&fn->dev); - - if (f34->bl_version == 5) - return sysfs_emit(buf, "%c%c\n", - f34->bootloader_id[0], - f34->bootloader_id[1]); - else - return sysfs_emit(buf, "V%d.%d\n", - f34->bootloader_id[1], - f34->bootloader_id[0]); - } + fn = data->f34_container; + if (!fn) + return -ENODEV; - return 0; + f34 = dev_get_drvdata(&fn->dev); + if (!f34) + return -ENODEV; + + if (f34->bl_version == 5) + return sysfs_emit(buf, "%c%c\n", + f34->bootloader_id[0], + f34->bootloader_id[1]); + else + return sysfs_emit(buf, "V%d.%d\n", + f34->bootloader_id[1], + f34->bootloader_id[0]); } static DEVICE_ATTR(bootloader_id, 0444, rmi_driver_bootloader_id_show, NULL); @@ -334,13 +326,16 @@ static ssize_t rmi_driver_configuration_id_show(struct device *dev, struct rmi_function *fn = data->f34_container; struct f34_data *f34; - if (fn) { - f34 = dev_get_drvdata(&fn->dev); + fn = data->f34_container; + if (!fn) + return -ENODEV; + + f34 = dev_get_drvdata(&fn->dev); + if (!f34) + return -ENODEV; - return sysfs_emit(buf, "%s\n", f34->configuration_id); - } - return 0; + return sysfs_emit(buf, "%s\n", f34->configuration_id); } static DEVICE_ATTR(configuration_id, 0444, @@ -356,10 +351,14 @@ static int rmi_firmware_update(struct rmi_driver_data *data, if (!data->f34_container) { dev_warn(dev, "%s: No F34 present!\n", __func__); - return -EINVAL; + return -ENODEV; } f34 = dev_get_drvdata(&data->f34_container->dev); + if (!f34) { + dev_warn(dev, "%s: No valid F34 present!\n", __func__); + return -ENODEV; + } if (f34->bl_version >= 7) { if (data->pdt_props & HAS_BSR) { @@ -485,10 +484,18 @@ static ssize_t rmi_driver_update_fw_status_show(struct device *dev, char *buf) { struct rmi_driver_data *data = dev_get_drvdata(dev); - int update_status = 0; + struct f34_data *f34; + int update_status = -ENODEV; - if (data->f34_container) - update_status = rmi_f34_status(data->f34_container); + /* + * The status is the percentage complete, or once complete, + * zero for success or a negative return code. + */ + if (data->f34_container) { + f34 = dev_get_drvdata(&data->f34_container->dev); + if (f34) + update_status = f34->update_status; + } return sysfs_emit(buf, "%d\n", update_status); } @@ -508,33 +515,21 @@ static const struct attribute_group rmi_firmware_attr_group = { .attrs = rmi_firmware_attrs, }; -static int rmi_f34_probe(struct rmi_function *fn) +static int rmi_f34v5_probe(struct f34_data *f34) { - struct f34_data *f34; - unsigned char f34_queries[9]; + struct rmi_function *fn = f34->fn; + u8 f34_queries[9]; bool has_config_id; - u8 version = fn->fd.function_version; - int ret; - - f34 = devm_kzalloc(&fn->dev, sizeof(struct f34_data), GFP_KERNEL); - if (!f34) - return -ENOMEM; - - f34->fn = fn; - dev_set_drvdata(&fn->dev, f34); - - /* v5 code only supported version 0, try V7 probe */ - if (version > 0) - return rmi_f34v7_probe(f34); + int error; f34->bl_version = 5; - ret = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr, - f34_queries, sizeof(f34_queries)); - if (ret) { + error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr, + f34_queries, sizeof(f34_queries)); + if (error) { dev_err(&fn->dev, "%s: Failed to query properties\n", __func__); - return ret; + return error; } snprintf(f34->bootloader_id, sizeof(f34->bootloader_id), @@ -560,11 +555,11 @@ static int rmi_f34_probe(struct rmi_function *fn) f34->v5.config_blocks); if (has_config_id) { - ret = rmi_read_block(fn->rmi_dev, fn->fd.control_base_addr, - f34_queries, sizeof(f34_queries)); - if (ret) { + error = rmi_read_block(fn->rmi_dev, fn->fd.control_base_addr, + f34_queries, sizeof(f34_queries)); + if (error) { dev_err(&fn->dev, "Failed to read F34 config ID\n"); - return ret; + return error; } snprintf(f34->configuration_id, sizeof(f34->configuration_id), @@ -573,12 +568,34 @@ static int rmi_f34_probe(struct rmi_function *fn) f34_queries[2], f34_queries[3]); rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Configuration ID: %s\n", - f34->configuration_id); + f34->configuration_id); } return 0; } +static int rmi_f34_probe(struct rmi_function *fn) +{ + struct f34_data *f34; + u8 version = fn->fd.function_version; + int error; + + f34 = devm_kzalloc(&fn->dev, sizeof(struct f34_data), GFP_KERNEL); + if (!f34) + return -ENOMEM; + + f34->fn = fn; + + /* v5 code only supported version 0 */ + error = version == 0 ? rmi_f34v5_probe(f34) : rmi_f34v7_probe(f34); + if (error) + return error; + + dev_set_drvdata(&fn->dev, f34); + + return 0; +} + int rmi_f34_create_sysfs(struct rmi_device *rmi_dev) { return sysfs_create_group(&rmi_dev->dev.kobj, &rmi_firmware_attr_group); diff --git a/drivers/input/touchscreen/cyttsp5.c b/drivers/input/touchscreen/cyttsp5.c index eafe5a9b8964..071b7c9bf566 100644 --- a/drivers/input/touchscreen/cyttsp5.c +++ b/drivers/input/touchscreen/cyttsp5.c @@ -580,7 +580,7 @@ static int cyttsp5_power_control(struct cyttsp5 *ts, bool on) int rc; SET_CMD_REPORT_TYPE(cmd[0], 0); - SET_CMD_REPORT_ID(cmd[0], HID_POWER_SLEEP); + SET_CMD_REPORT_ID(cmd[0], state); SET_CMD_OPCODE(cmd[1], HID_CMD_SET_POWER); rc = cyttsp5_write(ts, HID_COMMAND_REG, cmd, sizeof(cmd)); @@ -870,13 +870,16 @@ static int cyttsp5_probe(struct device *dev, struct regmap *regmap, int irq, ts->input->phys = ts->phys; input_set_drvdata(ts->input, ts); - /* Reset the gpio to be in a reset state */ + /* Assert gpio to be in a reset state */ ts->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(ts->reset_gpio)) { error = PTR_ERR(ts->reset_gpio); dev_err(dev, "Failed to request reset gpio, error %d\n", error); return error; } + + fsleep(10); /* Ensure long-enough reset pulse (minimum 10us). */ + gpiod_set_value_cansleep(ts->reset_gpio, 0); /* Need a delay to have device up */ diff --git a/drivers/input/touchscreen/stmpe-ts.c b/drivers/input/touchscreen/stmpe-ts.c index a94a1997f96b..af0fb38bcfdc 100644 --- a/drivers/input/touchscreen/stmpe-ts.c +++ b/drivers/input/touchscreen/stmpe-ts.c @@ -366,12 +366,7 @@ static struct platform_driver stmpe_ts_driver = { }; module_platform_driver(stmpe_ts_driver); -static const struct of_device_id stmpe_ts_ids[] = { - { .compatible = "st,stmpe-ts", }, - { }, -}; -MODULE_DEVICE_TABLE(of, stmpe_ts_ids); - +MODULE_ALIAS("platform:stmpe-ts"); MODULE_AUTHOR("Luotao Fu <l.fu@pengutronix.de>"); MODULE_DESCRIPTION("STMPEXXX touchscreen driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index dd9e26b7b718..14aa0d77df26 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -3664,6 +3664,14 @@ found: while (*uid == '0' && *(uid + 1)) uid++; + if (strlen(hid) >= ACPIHID_HID_LEN) { + pr_err("Invalid command line: hid is too long\n"); + return 1; + } else if (strlen(uid) >= ACPIHID_UID_LEN) { + pr_err("Invalid command line: uid is too long\n"); + return 1; + } + i = early_acpihid_map_size++; memcpy(early_acpihid_map[i].hid, hid, strlen(hid)); memcpy(early_acpihid_map[i].uid, uid, strlen(uid)); diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index be8761bbef0f..f34209b08b4c 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -3869,6 +3869,9 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) struct irq_2_irte *irte_info = &ir_data->irq_2_irte; struct iommu_dev_data *dev_data; + if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))) + return -EINVAL; + if (ir_data->iommu == NULL) return -EINVAL; @@ -3879,21 +3882,11 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) * we should not modify the IRTE */ if (!dev_data || !dev_data->use_vapic) - return 0; + return -EINVAL; ir_data->cfg = irqd_cfg(data); pi_data->ir_data = ir_data; - /* Note: - * SVM tries to set up for VAPIC mode, but we are in - * legacy mode. So, we force legacy mode instead. - */ - if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) { - pr_debug("%s: Fall back to using intr legacy remap\n", - __func__); - pi_data->is_guest_mode = false; - } - pi_data->prev_ga_tag = ir_data->cached_ga_tag; if (pi_data->is_guest_mode) { ir_data->ga_root_ptr = (pi_data->base >> 12); diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index 9ba596430e7c..980cc6b33c43 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -411,6 +411,12 @@ struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev, return ERR_CAST(smmu_domain); smmu_domain->domain.type = IOMMU_DOMAIN_SVA; smmu_domain->domain.ops = &arm_smmu_sva_domain_ops; + + /* + * Choose page_size as the leaf page size for invalidation when + * ARM_SMMU_FEAT_RANGE_INV is present + */ + smmu_domain->domain.pgsize_bitmap = PAGE_SIZE; smmu_domain->smmu = smmu; ret = xa_alloc(&arm_smmu_asid_xa, &asid, smmu_domain, diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index b4c21aaed126..48d910399a1b 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -3388,6 +3388,7 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu, mutex_lock(&smmu->streams_mutex); for (i = 0; i < fwspec->num_ids; i++) { struct arm_smmu_stream *new_stream = &master->streams[i]; + struct rb_node *existing; u32 sid = fwspec->ids[i]; new_stream->id = sid; @@ -3398,11 +3399,21 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu, break; /* Insert into SID tree */ - if (rb_find_add(&new_stream->node, &smmu->streams, - arm_smmu_streams_cmp_node)) { - dev_warn(master->dev, "stream %u already in tree\n", - sid); - ret = -EINVAL; + existing = rb_find_add(&new_stream->node, &smmu->streams, + arm_smmu_streams_cmp_node); + if (existing) { + struct arm_smmu_master *existing_master = + rb_entry(existing, struct arm_smmu_stream, node) + ->master; + + /* Bridged PCI devices may end up with duplicated IDs */ + if (existing_master == master) + continue; + + dev_warn(master->dev, + "Aliasing StreamID 0x%x (from %s) unsupported, expect DMA to be broken\n", + sid, dev_name(existing_master->dev)); + ret = -ENODEV; break; } } @@ -4429,6 +4440,8 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) reg = readl_relaxed(smmu->base + ARM_SMMU_IDR3); if (FIELD_GET(IDR3_RIL, reg)) smmu->features |= ARM_SMMU_FEAT_RANGE_INV; + if (FIELD_GET(IDR3_FWB, reg)) + smmu->features |= ARM_SMMU_FEAT_S2FWB; /* IDR5 */ reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5); diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c index d525ab43a4ae..dd7d030d2e89 100644 --- a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c +++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c @@ -487,17 +487,6 @@ static int tegra241_cmdqv_hw_reset(struct arm_smmu_device *smmu) /* VCMDQ Resource Helpers */ -static void tegra241_vcmdq_free_smmu_cmdq(struct tegra241_vcmdq *vcmdq) -{ - struct arm_smmu_queue *q = &vcmdq->cmdq.q; - size_t nents = 1 << q->llq.max_n_shift; - size_t qsz = nents << CMDQ_ENT_SZ_SHIFT; - - if (!q->base) - return; - dmam_free_coherent(vcmdq->cmdqv->smmu.dev, qsz, q->base, q->base_dma); -} - static int tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq *vcmdq) { struct arm_smmu_device *smmu = &vcmdq->cmdqv->smmu; @@ -560,7 +549,8 @@ static void tegra241_vintf_free_lvcmdq(struct tegra241_vintf *vintf, u16 lidx) struct tegra241_vcmdq *vcmdq = vintf->lvcmdqs[lidx]; char header[64]; - tegra241_vcmdq_free_smmu_cmdq(vcmdq); + /* Note that the lvcmdq queue memory space is managed by devres */ + tegra241_vintf_deinit_lvcmdq(vintf, lidx); dev_dbg(vintf->cmdqv->dev, @@ -768,13 +758,13 @@ static int tegra241_cmdqv_init_structures(struct arm_smmu_device *smmu) vintf = kzalloc(sizeof(*vintf), GFP_KERNEL); if (!vintf) - goto out_fallback; + return -ENOMEM; /* Init VINTF0 for in-kernel use */ ret = tegra241_cmdqv_init_vintf(cmdqv, 0, vintf); if (ret) { dev_err(cmdqv->dev, "failed to init vintf0: %d\n", ret); - goto free_vintf; + return ret; } /* Preallocate logical VCMDQs to VINTF0 */ @@ -783,24 +773,12 @@ static int tegra241_cmdqv_init_structures(struct arm_smmu_device *smmu) vcmdq = tegra241_vintf_alloc_lvcmdq(vintf, lidx); if (IS_ERR(vcmdq)) - goto free_lvcmdq; + return PTR_ERR(vcmdq); } /* Now, we are ready to run all the impl ops */ smmu->impl_ops = &tegra241_cmdqv_impl_ops; return 0; - -free_lvcmdq: - for (lidx--; lidx >= 0; lidx--) - tegra241_vintf_free_lvcmdq(vintf, lidx); - tegra241_cmdqv_deinit_vintf(cmdqv, vintf->idx); -free_vintf: - kfree(vintf); -out_fallback: - dev_info(smmu->impl_dev, "Falling back to standard SMMU CMDQ\n"); - smmu->options &= ~ARM_SMMU_OPT_TEGRA241_CMDQV; - tegra241_cmdqv_remove(smmu); - return 0; } #ifdef CONFIG_IOMMU_DEBUGFS diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index cb7e29dcac15..a775e4dbe06f 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -1754,7 +1754,7 @@ static size_t cookie_msi_granule(const struct iommu_domain *domain) return PAGE_SIZE; default: BUG(); - }; + } } static struct list_head *cookie_msi_pages(const struct iommu_domain *domain) @@ -1766,7 +1766,7 @@ static struct list_head *cookie_msi_pages(const struct iommu_domain *domain) return &domain->msi_cookie->msi_page_list; default: BUG(); - }; + } } static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 69e23e017d9e..317266aca6e2 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -832,7 +832,7 @@ static int __maybe_unused exynos_sysmmu_suspend(struct device *dev) struct exynos_iommu_owner *owner = dev_iommu_priv_get(master); mutex_lock(&owner->rpm_lock); - if (&data->domain->domain != &exynos_identity_domain) { + if (data->domain) { dev_dbg(data->sysmmu, "saving state\n"); __sysmmu_disable(data); } @@ -850,7 +850,7 @@ static int __maybe_unused exynos_sysmmu_resume(struct device *dev) struct exynos_iommu_owner *owner = dev_iommu_priv_get(master); mutex_lock(&owner->rpm_lock); - if (&data->domain->domain != &exynos_identity_domain) { + if (data->domain) { dev_dbg(data->sysmmu, "restoring state\n"); __sysmmu_enable(data); } diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 6e67cc66a204..cb0b993bebb4 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -3785,6 +3785,22 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev) intel_iommu_debugfs_create_dev(info); + return &iommu->iommu; +free_table: + intel_pasid_free_table(dev); +clear_rbtree: + device_rbtree_remove(info); +free: + kfree(info); + + return ERR_PTR(ret); +} + +static void intel_iommu_probe_finalize(struct device *dev) +{ + struct device_domain_info *info = dev_iommu_priv_get(dev); + struct intel_iommu *iommu = info->iommu; + /* * The PCIe spec, in its wisdom, declares that the behaviour of the * device is undefined if you enable PASID support after ATS support. @@ -3792,22 +3808,12 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev) * we can't yet know if we're ever going to use it. */ if (info->pasid_supported && - !pci_enable_pasid(pdev, info->pasid_supported & ~1)) + !pci_enable_pasid(to_pci_dev(dev), info->pasid_supported & ~1)) info->pasid_enabled = 1; - if (sm_supported(iommu)) + if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) iommu_enable_pci_ats(info); iommu_enable_pci_pri(info); - - return &iommu->iommu; -free_table: - intel_pasid_free_table(dev); -clear_rbtree: - device_rbtree_remove(info); -free: - kfree(info); - - return ERR_PTR(ret); } static void intel_iommu_release_device(struct device *dev) @@ -3835,7 +3841,6 @@ static void intel_iommu_release_device(struct device *dev) intel_pasid_free_table(dev); intel_iommu_debugfs_remove_dev(info); kfree(info); - set_dma_ops(dev, NULL); } static void intel_iommu_get_resv_regions(struct device *device, @@ -4392,6 +4397,7 @@ const struct iommu_ops intel_iommu_ops = { .domain_alloc_sva = intel_svm_domain_alloc, .domain_alloc_nested = intel_iommu_domain_alloc_nested, .probe_device = intel_iommu_probe_device, + .probe_finalize = intel_iommu_probe_finalize, .release_device = intel_iommu_release_device, .get_resv_regions = intel_iommu_get_resv_regions, .device_group = intel_iommu_device_group, @@ -4433,6 +4439,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx); +/* QM57/QS57 integrated gfx malfunctions with dmar */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_iommu_igfx); + /* Broadwell igfx malfunctions with dmar */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx); @@ -4510,7 +4519,6 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev) } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt); diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c index ea3ca5203919..3bc2a03cceca 100644 --- a/drivers/iommu/intel/irq_remapping.c +++ b/drivers/iommu/intel/irq_remapping.c @@ -1287,43 +1287,44 @@ static struct irq_chip intel_ir_chip = { }; /* - * With posted MSIs, all vectors are multiplexed into a single notification - * vector. Devices MSIs are then dispatched in a demux loop where - * EOIs can be coalesced as well. + * With posted MSIs, the MSI vectors are multiplexed into a single notification + * vector, and only the notification vector is sent to the APIC IRR. Device + * MSIs are then dispatched in a demux loop that harvests the MSIs from the + * CPU's Posted Interrupt Request bitmap. I.e. Posted MSIs never get sent to + * the APIC IRR, and thus do not need an EOI. The notification handler instead + * performs a single EOI after processing the PIR. * - * "INTEL-IR-POST" IRQ chip does not do EOI on ACK, thus the dummy irq_ack() - * function. Instead EOI is performed by the posted interrupt notification - * handler. + * Note! Pending SMP/CPU affinity changes, which are per MSI, must still be + * honored, only the APIC EOI is omitted. * * For the example below, 3 MSIs are coalesced into one CPU notification. Only - * one apic_eoi() is needed. + * one apic_eoi() is needed, but each MSI needs to process pending changes to + * its CPU affinity. * * __sysvec_posted_msi_notification() * irq_enter(); * handle_edge_irq() * irq_chip_ack_parent() - * dummy(); // No EOI + * irq_move_irq(); // No EOI * handle_irq_event() * driver_handler() * handle_edge_irq() * irq_chip_ack_parent() - * dummy(); // No EOI + * irq_move_irq(); // No EOI * handle_irq_event() * driver_handler() * handle_edge_irq() * irq_chip_ack_parent() - * dummy(); // No EOI + * irq_move_irq(); // No EOI * handle_irq_event() * driver_handler() * apic_eoi() * irq_exit() + * */ - -static void dummy_ack(struct irq_data *d) { } - static struct irq_chip intel_ir_chip_post_msi = { .name = "INTEL-IR-POST", - .irq_ack = dummy_ack, + .irq_ack = irq_move_irq, .irq_set_affinity = intel_ir_set_affinity, .irq_compose_msi_msg = intel_ir_compose_msi_msg, .irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity, diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index c8033ca66377..9d728800a862 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -538,6 +538,9 @@ static void iommu_deinit_device(struct device *dev) dev->iommu_group = NULL; module_put(ops->owner); dev_iommu_free(dev); +#ifdef CONFIG_IOMMU_DMA + dev->dma_iommu = false; +#endif } static struct iommu_domain *pasid_array_entry_to_domain(void *entry) @@ -2717,7 +2720,8 @@ int report_iommu_fault(struct iommu_domain *domain, struct device *dev, * if upper layers showed interest and installed a fault handler, * invoke it. */ - if (domain->handler) + if (domain->cookie_type == IOMMU_COOKIE_FAULT_HANDLER && + domain->handler) ret = domain->handler(domain, dev, iova, flags, domain->handler_token); @@ -3362,10 +3366,12 @@ static int __iommu_set_group_pasid(struct iommu_domain *domain, int ret; for_each_group_device(group, device) { - ret = domain->ops->set_dev_pasid(domain, device->dev, - pasid, old); - if (ret) - goto err_revert; + if (device->dev->iommu->max_pasids > 0) { + ret = domain->ops->set_dev_pasid(domain, device->dev, + pasid, old); + if (ret) + goto err_revert; + } } return 0; @@ -3375,15 +3381,18 @@ err_revert: for_each_group_device(group, device) { if (device == last_gdev) break; - /* - * If no old domain, undo the succeeded devices/pasid. - * Otherwise, rollback the succeeded devices/pasid to the old - * domain. And it is a driver bug to fail attaching with a - * previously good domain. - */ - if (!old || WARN_ON(old->ops->set_dev_pasid(old, device->dev, + if (device->dev->iommu->max_pasids > 0) { + /* + * If no old domain, undo the succeeded devices/pasid. + * Otherwise, rollback the succeeded devices/pasid to + * the old domain. And it is a driver bug to fail + * attaching with a previously good domain. + */ + if (!old || + WARN_ON(old->ops->set_dev_pasid(old, device->dev, pasid, domain))) - iommu_remove_dev_pasid(device->dev, pasid, domain); + iommu_remove_dev_pasid(device->dev, pasid, domain); + } } return ret; } @@ -3394,8 +3403,10 @@ static void __iommu_remove_group_pasid(struct iommu_group *group, { struct group_device *device; - for_each_group_device(group, device) - iommu_remove_dev_pasid(device->dev, pasid, domain); + for_each_group_device(group, device) { + if (device->dev->iommu->max_pasids > 0) + iommu_remove_dev_pasid(device->dev, pasid, domain); + } } /* @@ -3436,7 +3447,13 @@ int iommu_attach_device_pasid(struct iommu_domain *domain, mutex_lock(&group->mutex); for_each_group_device(group, device) { - if (pasid >= device->dev->iommu->max_pasids) { + /* + * Skip PASID validation for devices without PASID support + * (max_pasids = 0). These devices cannot issue transactions + * with PASID, so they don't affect group's PASID usage. + */ + if ((device->dev->iommu->max_pasids > 0) && + (pasid >= device->dev->iommu->max_pasids)) { ret = -EINVAL; goto out_unlock; } diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 074daf1aac4e..e424b279a8cd 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -1081,31 +1081,24 @@ static int ipmmu_probe(struct platform_device *pdev) } } + platform_set_drvdata(pdev, mmu); /* * Register the IPMMU to the IOMMU subsystem in the following cases: * - R-Car Gen2 IPMMU (all devices registered) * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device) */ - if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) { - ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, - dev_name(&pdev->dev)); - if (ret) - return ret; - - ret = iommu_device_register(&mmu->iommu, &ipmmu_ops, &pdev->dev); - if (ret) - return ret; - } + if (mmu->features->has_cache_leaf_nodes && ipmmu_is_root(mmu)) + return 0; - /* - * We can't create the ARM mapping here as it requires the bus to have - * an IOMMU, which only happens when bus_set_iommu() is called in - * ipmmu_init() after the probe function returns. - */ + ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, dev_name(&pdev->dev)); + if (ret) + return ret; - platform_set_drvdata(pdev, mmu); + ret = iommu_device_register(&mmu->iommu, &ipmmu_ops, &pdev->dev); + if (ret) + iommu_device_sysfs_remove(&mmu->iommu); - return 0; + return ret; } static void ipmmu_remove(struct platform_device *pdev) diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 034b0e670384..df98d0c65f54 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -1372,15 +1372,6 @@ static int mtk_iommu_probe(struct platform_device *pdev) platform_set_drvdata(pdev, data); mutex_init(&data->mutex); - ret = iommu_device_sysfs_add(&data->iommu, dev, NULL, - "mtk-iommu.%pa", &ioaddr); - if (ret) - goto out_link_remove; - - ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev); - if (ret) - goto out_sysfs_remove; - if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE)) { list_add_tail(&data->list, data->plat_data->hw_list); data->hw_list = data->plat_data->hw_list; @@ -1390,19 +1381,28 @@ static int mtk_iommu_probe(struct platform_device *pdev) data->hw_list = &data->hw_list_head; } + ret = iommu_device_sysfs_add(&data->iommu, dev, NULL, + "mtk-iommu.%pa", &ioaddr); + if (ret) + goto out_list_del; + + ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev); + if (ret) + goto out_sysfs_remove; + if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match); if (ret) - goto out_list_del; + goto out_device_unregister; } return ret; -out_list_del: - list_del(&data->list); +out_device_unregister: iommu_device_unregister(&data->iommu); out_sysfs_remove: iommu_device_sysfs_remove(&data->iommu); -out_link_remove: +out_list_del: + list_del(&data->list); if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) device_link_remove(data->smicomm_dev, dev); out_runtime_disable: diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index cec05e443083..0d196e447142 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -114,8 +114,8 @@ config I8259 config BCM2712_MIP tristate "Broadcom BCM2712 MSI-X Interrupt Peripheral support" - depends on ARCH_BRCMSTB || COMPILE_TEST - default m if ARCH_BRCMSTB + depends on ARCH_BRCMSTB || ARCH_BCM2835 || COMPILE_TEST + default m if ARCH_BRCMSTB || ARCH_BCM2835 depends on ARM_GIC select GENERIC_IRQ_CHIP select IRQ_DOMAIN_HIERARCHY @@ -166,6 +166,11 @@ config DW_APB_ICTL select GENERIC_IRQ_CHIP select IRQ_DOMAIN_HIERARCHY +config ECONET_EN751221_INTC + bool + select GENERIC_IRQ_CHIP + select IRQ_DOMAIN + config FARADAY_FTINTC010 bool select IRQ_DOMAIN diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 365bcea9a61f..23ca4959e6ce 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o obj-$(CONFIG_ARCH_ACTIONS) += irq-owl-sirq.o obj-$(CONFIG_DAVINCI_CP_INTC) += irq-davinci-cp-intc.o obj-$(CONFIG_EXYNOS_IRQ_COMBINER) += exynos-combiner.o +obj-$(CONFIG_ECONET_EN751221_INTC) += irq-econet-en751221.o obj-$(CONFIG_FARADAY_FTINTC010) += irq-ftintc010.o obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o obj-$(CONFIG_ARCH_LPC32XX) += irq-lpc32xx.o diff --git a/drivers/irqchip/irq-al-fic.c b/drivers/irqchip/irq-al-fic.c index c980e822a10a..8f300843bbca 100644 --- a/drivers/irqchip/irq-al-fic.c +++ b/drivers/irqchip/irq-al-fic.c @@ -65,15 +65,13 @@ static int al_fic_irq_set_type(struct irq_data *data, unsigned int flow_type) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); struct al_fic *fic = gc->private; enum al_fic_state new_state; - int ret = 0; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); if (((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_HIGH) && ((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_EDGE_RISING)) { pr_debug("fic doesn't support flow type %d\n", flow_type); - ret = -EINVAL; - goto err; + return -EINVAL; } new_state = (flow_type & IRQ_TYPE_LEVEL_HIGH) ? @@ -91,16 +89,10 @@ static int al_fic_irq_set_type(struct irq_data *data, unsigned int flow_type) if (fic->state == AL_FIC_UNCONFIGURED) { al_fic_set_trigger(fic, gc, new_state); } else if (fic->state != new_state) { - pr_debug("fic %s state already configured to %d\n", - fic->name, fic->state); - ret = -EINVAL; - goto err; + pr_debug("fic %s state already configured to %d\n", fic->name, fic->state); + return -EINVAL; } - -err: - irq_gc_unlock(gc); - - return ret; + return 0; } static void al_fic_irq_handler(struct irq_desc *desc) diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c index 3839ad79ad31..03aeed39a4d2 100644 --- a/drivers/irqchip/irq-atmel-aic.c +++ b/drivers/irqchip/irq-atmel-aic.c @@ -78,9 +78,8 @@ static int aic_retrigger(struct irq_data *d) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); /* Enable interrupt on AIC5 */ - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, d->mask, AT91_AIC_ISCR); - irq_gc_unlock(gc); return 1; } @@ -106,30 +105,27 @@ static void aic_suspend(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IDCR); irq_reg_writel(gc, gc->wake_active, AT91_AIC_IECR); - irq_gc_unlock(gc); } static void aic_resume(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, gc->wake_active, AT91_AIC_IDCR); irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IECR); - irq_gc_unlock(gc); } static void aic_pm_shutdown(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, 0xffffffff, AT91_AIC_IDCR); irq_reg_writel(gc, 0xffffffff, AT91_AIC_ICCR); - irq_gc_unlock(gc); } #else #define aic_suspend NULL @@ -175,10 +171,8 @@ static int aic_irq_domain_xlate(struct irq_domain *d, { struct irq_domain_chip_generic *dgc = d->gc; struct irq_chip_generic *gc; - unsigned long flags; unsigned smr; - int idx; - int ret; + int idx, ret; if (!dgc) return -EINVAL; @@ -194,11 +188,10 @@ static int aic_irq_domain_xlate(struct irq_domain *d, gc = dgc->gc[idx]; - irq_gc_lock_irqsave(gc, flags); + guard(raw_spinlock_irq)(&gc->lock); smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq)); aic_common_set_priority(intspec[2], &smr); irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq)); - irq_gc_unlock_irqrestore(gc, flags); return ret; } diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c index e98c2875af9e..60b00d2c3d7a 100644 --- a/drivers/irqchip/irq-atmel-aic5.c +++ b/drivers/irqchip/irq-atmel-aic5.c @@ -92,11 +92,10 @@ static void aic5_mask(struct irq_data *d) * Disable interrupt on AIC5. We always take the lock of the * first irq chip as all chips share the same registers. */ - irq_gc_lock(bgc); + guard(raw_spinlock)(&bgc->lock); irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); irq_reg_writel(gc, 1, AT91_AIC5_IDCR); gc->mask_cache &= ~d->mask; - irq_gc_unlock(bgc); } static void aic5_unmask(struct irq_data *d) @@ -109,11 +108,10 @@ static void aic5_unmask(struct irq_data *d) * Enable interrupt on AIC5. We always take the lock of the * first irq chip as all chips share the same registers. */ - irq_gc_lock(bgc); + guard(raw_spinlock)(&bgc->lock); irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); irq_reg_writel(gc, 1, AT91_AIC5_IECR); gc->mask_cache |= d->mask; - irq_gc_unlock(bgc); } static int aic5_retrigger(struct irq_data *d) @@ -122,11 +120,9 @@ static int aic5_retrigger(struct irq_data *d) struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); /* Enable interrupt on AIC5 */ - irq_gc_lock(bgc); + guard(raw_spinlock)(&bgc->lock); irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR); irq_reg_writel(bgc, 1, AT91_AIC5_ISCR); - irq_gc_unlock(bgc); - return 1; } @@ -137,14 +133,12 @@ static int aic5_set_type(struct irq_data *d, unsigned type) unsigned int smr; int ret; - irq_gc_lock(bgc); + guard(raw_spinlock)(&bgc->lock); irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR); smr = irq_reg_readl(bgc, AT91_AIC5_SMR); ret = aic_common_set_type(d, type, &smr); if (!ret) irq_reg_writel(bgc, smr, AT91_AIC5_SMR); - irq_gc_unlock(bgc); - return ret; } @@ -166,7 +160,7 @@ static void aic5_suspend(struct irq_data *d) smr_cache[i] = irq_reg_readl(bgc, AT91_AIC5_SMR); } - irq_gc_lock(bgc); + guard(raw_spinlock)(&bgc->lock); for (i = 0; i < dgc->irqs_per_chip; i++) { mask = 1 << i; if ((mask & gc->mask_cache) == (mask & gc->wake_active)) @@ -178,7 +172,6 @@ static void aic5_suspend(struct irq_data *d) else irq_reg_writel(bgc, 1, AT91_AIC5_IDCR); } - irq_gc_unlock(bgc); } static void aic5_resume(struct irq_data *d) @@ -190,7 +183,7 @@ static void aic5_resume(struct irq_data *d) int i; u32 mask; - irq_gc_lock(bgc); + guard(raw_spinlock)(&bgc->lock); if (smr_cache) { irq_reg_writel(bgc, 0xffffffff, AT91_AIC5_SPU); @@ -214,7 +207,6 @@ static void aic5_resume(struct irq_data *d) else irq_reg_writel(bgc, 1, AT91_AIC5_IDCR); } - irq_gc_unlock(bgc); } static void aic5_pm_shutdown(struct irq_data *d) @@ -225,13 +217,12 @@ static void aic5_pm_shutdown(struct irq_data *d) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); int i; - irq_gc_lock(bgc); + guard(raw_spinlock)(&bgc->lock); for (i = 0; i < dgc->irqs_per_chip; i++) { irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR); irq_reg_writel(bgc, 1, AT91_AIC5_IDCR); irq_reg_writel(bgc, 1, AT91_AIC5_ICCR); } - irq_gc_unlock(bgc); } #else #define aic5_suspend NULL @@ -277,7 +268,6 @@ static int aic5_irq_domain_xlate(struct irq_domain *d, unsigned int *out_type) { struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0); - unsigned long flags; unsigned smr; int ret; @@ -289,13 +279,11 @@ static int aic5_irq_domain_xlate(struct irq_domain *d, if (ret) return ret; - irq_gc_lock_irqsave(bgc, flags); + guard(raw_spinlock_irq)(&bgc->lock); irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR); smr = irq_reg_readl(bgc, AT91_AIC5_SMR); aic_common_set_priority(intspec[2], &smr); irq_reg_writel(bgc, smr, AT91_AIC5_SMR); - irq_gc_unlock_irqrestore(bgc, flags); - return ret; } diff --git a/drivers/irqchip/irq-bcm2712-mip.c b/drivers/irqchip/irq-bcm2712-mip.c index c546a063c8d8..62722d009804 100644 --- a/drivers/irqchip/irq-bcm2712-mip.c +++ b/drivers/irqchip/irq-bcm2712-mip.c @@ -163,6 +163,7 @@ static const struct irq_domain_ops mip_middle_domain_ops = { static const struct msi_parent_ops mip_msi_parent_ops = { .supported_flags = MIP_MSI_FLAGS_SUPPORTED, .required_flags = MIP_MSI_FLAGS_REQUIRED, + .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK, .bus_select_token = DOMAIN_BUS_GENERIC_MSI, .bus_select_mask = MATCH_PCI_MSI, .prefix = "MIP-MSI-", diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c index 8a7c088d2081..ff22c3104401 100644 --- a/drivers/irqchip/irq-bcm7120-l2.c +++ b/drivers/irqchip/irq-bcm7120-l2.c @@ -63,16 +63,15 @@ static void bcm7120_l2_intc_irq_handle(struct irq_desc *desc) for (idx = 0; idx < b->n_words; idx++) { int base = idx * IRQS_PER_WORD; - struct irq_chip_generic *gc = - irq_get_domain_generic_chip(b->domain, base); + struct irq_chip_generic *gc; unsigned long pending; int hwirq; - irq_gc_lock(gc); - pending = irq_reg_readl(gc, b->stat_offset[idx]) & - gc->mask_cache & - data->irq_map_mask[idx]; - irq_gc_unlock(gc); + gc = irq_get_domain_generic_chip(b->domain, base); + scoped_guard (raw_spinlock, &gc->lock) { + pending = irq_reg_readl(gc, b->stat_offset[idx]) & gc->mask_cache & + data->irq_map_mask[idx]; + } for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) generic_handle_domain_irq(b->domain, base + hwirq); @@ -86,11 +85,9 @@ static void bcm7120_l2_intc_suspend(struct irq_chip_generic *gc) struct bcm7120_l2_intc_data *b = gc->private; struct irq_chip_type *ct = gc->chip_types; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); if (b->can_wake) - irq_reg_writel(gc, gc->mask_cache | gc->wake_active, - ct->regs.mask); - irq_gc_unlock(gc); + irq_reg_writel(gc, gc->mask_cache | gc->wake_active, ct->regs.mask); } static void bcm7120_l2_intc_resume(struct irq_chip_generic *gc) @@ -98,9 +95,8 @@ static void bcm7120_l2_intc_resume(struct irq_chip_generic *gc) struct irq_chip_type *ct = gc->chip_types; /* Restore the saved mask */ - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, gc->mask_cache, ct->regs.mask); - irq_gc_unlock(gc); } static int bcm7120_l2_intc_init_one(struct device_node *dn, diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index d15ca092742c..1bec5b2cd3f0 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c @@ -97,9 +97,8 @@ static void __brcmstb_l2_intc_suspend(struct irq_data *d, bool save) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct irq_chip_type *ct = irq_data_get_chip_type(d); struct brcmstb_l2_intc_data *b = gc->private; - unsigned long flags; - irq_gc_lock_irqsave(gc, flags); + guard(raw_spinlock_irqsave)(&gc->lock); /* Save the current mask */ if (save) b->saved_mask = irq_reg_readl(gc, ct->regs.mask); @@ -109,7 +108,6 @@ static void __brcmstb_l2_intc_suspend(struct irq_data *d, bool save) irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable); irq_reg_writel(gc, gc->wake_active, ct->regs.enable); } - irq_gc_unlock_irqrestore(gc, flags); } static void brcmstb_l2_intc_shutdown(struct irq_data *d) @@ -127,9 +125,8 @@ static void brcmstb_l2_intc_resume(struct irq_data *d) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct irq_chip_type *ct = irq_data_get_chip_type(d); struct brcmstb_l2_intc_data *b = gc->private; - unsigned long flags; - irq_gc_lock_irqsave(gc, flags); + guard(raw_spinlock_irqsave)(&gc->lock); if (ct->chip.irq_ack) { /* Clear unmasked non-wakeup interrupts */ irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active, @@ -139,7 +136,6 @@ static void brcmstb_l2_intc_resume(struct irq_data *d) /* Restore the saved mask */ irq_reg_writel(gc, b->saved_mask, ct->regs.disable); irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable); - irq_gc_unlock_irqrestore(gc, flags); } static int __init brcmstb_l2_intc_of_init(struct device_node *np, diff --git a/drivers/irqchip/irq-csky-apb-intc.c b/drivers/irqchip/irq-csky-apb-intc.c index a970f4806f31..5b7150705d29 100644 --- a/drivers/irqchip/irq-csky-apb-intc.c +++ b/drivers/irqchip/irq-csky-apb-intc.c @@ -50,11 +50,10 @@ static void irq_ck_mask_set_bit(struct irq_data *d) unsigned long ifr = ct->regs.mask - 8; u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); *ct->mask_cache |= mask; irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); irq_reg_writel(gc, irq_reg_readl(gc, ifr) & ~mask, ifr); - irq_gc_unlock(gc); } static void __init ck_set_gc(struct device_node *node, void __iomem *reg_base, diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c index c8a6d62ccb47..4240a0dbf627 100644 --- a/drivers/irqchip/irq-dw-apb-ictl.c +++ b/drivers/irqchip/irq-dw-apb-ictl.c @@ -101,10 +101,9 @@ static void dw_apb_ictl_resume(struct irq_data *d) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct irq_chip_type *ct = irq_data_get_chip_type(d); - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); writel_relaxed(~0, gc->reg_base + ct->regs.enable); writel_relaxed(*ct->mask_cache, gc->reg_base + ct->regs.mask); - irq_gc_unlock(gc); } #else #define dw_apb_ictl_resume NULL diff --git a/drivers/irqchip/irq-econet-en751221.c b/drivers/irqchip/irq-econet-en751221.c new file mode 100644 index 000000000000..d83d5eb12795 --- /dev/null +++ b/drivers/irqchip/irq-econet-en751221.c @@ -0,0 +1,310 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * EN751221 Interrupt Controller Driver. + * + * The EcoNet EN751221 Interrupt Controller is a simple interrupt controller + * designed for the MIPS 34Kc MT SMP processor with 2 VPEs. Each interrupt can + * be routed to either VPE but not both, so to support per-CPU interrupts, a + * secondary IRQ number is allocated to control masking/unmasking on VPE#1. In + * this driver, these are called "shadow interrupts". The assignment of shadow + * interrupts is defined by the SoC integrator when wiring the interrupt lines, + * so they are configurable in the device tree. + * + * If an interrupt (say 30) needs per-CPU capability, the SoC integrator + * allocates another IRQ number (say 29) to be its shadow. The device tree + * reflects this by adding the pair <30 29> to the "econet,shadow-interrupts" + * property. + * + * When VPE#1 requests IRQ 30, the driver manipulates the mask bit for IRQ 29, + * telling the hardware to mask VPE#1's view of IRQ 30. + * + * Copyright (C) 2025 Caleb James DeLisle <cjd@cjdns.fr> + */ + +#include <linux/cleanup.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/irqdomain.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> + +#define IRQ_COUNT 40 + +#define NOT_PERCPU 0xff +#define IS_SHADOW 0xfe + +#define REG_MASK0 0x04 +#define REG_MASK1 0x50 +#define REG_PENDING0 0x08 +#define REG_PENDING1 0x54 + +/** + * @membase: Base address of the interrupt controller registers + * @interrupt_shadows: Array of all interrupts, for each value, + * - NOT_PERCPU: This interrupt is not per-cpu, so it has no shadow + * - IS_SHADOW: This interrupt is a shadow of another per-cpu interrupt + * - else: This is a per-cpu interrupt whose shadow is the value + */ +static struct { + void __iomem *membase; + u8 interrupt_shadows[IRQ_COUNT]; +} econet_intc __ro_after_init; + +static DEFINE_RAW_SPINLOCK(irq_lock); + +/* IRQs must be disabled */ +static void econet_wreg(u32 reg, u32 val, u32 mask) +{ + u32 v; + + guard(raw_spinlock)(&irq_lock); + + v = ioread32(econet_intc.membase + reg); + v &= ~mask; + v |= val & mask; + iowrite32(v, econet_intc.membase + reg); +} + +/* IRQs must be disabled */ +static void econet_chmask(u32 hwirq, bool unmask) +{ + u32 reg, mask; + u8 shadow; + + /* + * If the IRQ is a shadow, it should never be manipulated directly. + * It should only be masked/unmasked as a result of the "real" per-cpu + * irq being manipulated by a thread running on VPE#1. + * If it is per-cpu (has a shadow), and we're on VPE#1, the shadow is what we mask. + * This is single processor only, so smp_processor_id() never exceeds 1. + */ + shadow = econet_intc.interrupt_shadows[hwirq]; + if (WARN_ON_ONCE(shadow == IS_SHADOW)) + return; + else if (shadow != NOT_PERCPU && smp_processor_id() == 1) + hwirq = shadow; + + if (hwirq >= 32) { + reg = REG_MASK1; + mask = BIT(hwirq - 32); + } else { + reg = REG_MASK0; + mask = BIT(hwirq); + } + + econet_wreg(reg, unmask ? mask : 0, mask); +} + +/* IRQs must be disabled */ +static void econet_intc_mask(struct irq_data *d) +{ + econet_chmask(d->hwirq, false); +} + +/* IRQs must be disabled */ +static void econet_intc_unmask(struct irq_data *d) +{ + econet_chmask(d->hwirq, true); +} + +static void econet_mask_all(void) +{ + /* IRQs are generally disabled during init, but guarding here makes it non-obligatory. */ + guard(irqsave)(); + econet_wreg(REG_MASK0, 0, ~0); + econet_wreg(REG_MASK1, 0, ~0); +} + +static void econet_intc_handle_pending(struct irq_domain *d, u32 pending, u32 offset) +{ + int hwirq; + + while (pending) { + hwirq = fls(pending) - 1; + generic_handle_domain_irq(d, hwirq + offset); + pending &= ~BIT(hwirq); + } +} + +static void econet_intc_from_parent(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct irq_domain *domain; + u32 pending0, pending1; + + chained_irq_enter(chip, desc); + + pending0 = ioread32(econet_intc.membase + REG_PENDING0); + pending1 = ioread32(econet_intc.membase + REG_PENDING1); + + if (unlikely(!(pending0 | pending1))) { + spurious_interrupt(); + } else { + domain = irq_desc_get_handler_data(desc); + econet_intc_handle_pending(domain, pending0, 0); + econet_intc_handle_pending(domain, pending1, 32); + } + + chained_irq_exit(chip, desc); +} + +static const struct irq_chip econet_irq_chip; + +static int econet_intc_map(struct irq_domain *d, u32 irq, irq_hw_number_t hwirq) +{ + int ret; + + if (hwirq >= IRQ_COUNT) { + pr_err("%s: hwirq %lu out of range\n", __func__, hwirq); + return -EINVAL; + } else if (econet_intc.interrupt_shadows[hwirq] == IS_SHADOW) { + pr_err("%s: can't map hwirq %lu, it is a shadow interrupt\n", __func__, hwirq); + return -EINVAL; + } + + if (econet_intc.interrupt_shadows[hwirq] == NOT_PERCPU) { + irq_set_chip_and_handler(irq, &econet_irq_chip, handle_level_irq); + } else { + irq_set_chip_and_handler(irq, &econet_irq_chip, handle_percpu_devid_irq); + ret = irq_set_percpu_devid(irq); + if (ret) + pr_warn("%s: Failed irq_set_percpu_devid for %u: %d\n", d->name, irq, ret); + } + + irq_set_chip_data(irq, NULL); + return 0; +} + +static const struct irq_chip econet_irq_chip = { + .name = "en751221-intc", + .irq_unmask = econet_intc_unmask, + .irq_mask = econet_intc_mask, + .irq_mask_ack = econet_intc_mask, +}; + +static const struct irq_domain_ops econet_domain_ops = { + .xlate = irq_domain_xlate_onecell, + .map = econet_intc_map +}; + +static int __init get_shadow_interrupts(struct device_node *node) +{ + const char *field = "econet,shadow-interrupts"; + int num_shadows; + + num_shadows = of_property_count_u32_elems(node, field); + + memset(econet_intc.interrupt_shadows, NOT_PERCPU, + sizeof(econet_intc.interrupt_shadows)); + + if (num_shadows <= 0) { + return 0; + } else if (num_shadows % 2) { + pr_err("%pOF: %s count is odd, ignoring\n", node, field); + return 0; + } + + u32 *shadows __free(kfree) = kmalloc_array(num_shadows, sizeof(u32), GFP_KERNEL); + if (!shadows) + return -ENOMEM; + + if (of_property_read_u32_array(node, field, shadows, num_shadows)) { + pr_err("%pOF: Failed to read %s\n", node, field); + return -EINVAL; + } + + for (int i = 0; i < num_shadows; i += 2) { + u32 shadow = shadows[i + 1]; + u32 target = shadows[i]; + + if (shadow > IRQ_COUNT) { + pr_err("%pOF: %s[%d] shadow(%d) out of range\n", + node, field, i + 1, shadow); + continue; + } + + if (target >= IRQ_COUNT) { + pr_err("%pOF: %s[%d] target(%d) out of range\n", node, field, i, target); + continue; + } + + if (econet_intc.interrupt_shadows[target] != NOT_PERCPU) { + pr_err("%pOF: %s[%d] target(%d) already has a shadow\n", + node, field, i, target); + continue; + } + + if (econet_intc.interrupt_shadows[shadow] != NOT_PERCPU) { + pr_err("%pOF: %s[%d] shadow(%d) already has a target\n", + node, field, i + 1, shadow); + continue; + } + + econet_intc.interrupt_shadows[target] = shadow; + econet_intc.interrupt_shadows[shadow] = IS_SHADOW; + } + + return 0; +} + +static int __init econet_intc_of_init(struct device_node *node, struct device_node *parent) +{ + struct irq_domain *domain; + struct resource res; + int ret, irq; + + ret = get_shadow_interrupts(node); + if (ret) + return ret; + + irq = irq_of_parse_and_map(node, 0); + if (!irq) { + pr_err("%pOF: DT: Failed to get IRQ from 'interrupts'\n", node); + return -EINVAL; + } + + if (of_address_to_resource(node, 0, &res)) { + pr_err("%pOF: DT: Failed to get 'reg'\n", node); + ret = -EINVAL; + goto err_dispose_mapping; + } + + if (!request_mem_region(res.start, resource_size(&res), res.name)) { + pr_err("%pOF: Failed to request memory\n", node); + ret = -EBUSY; + goto err_dispose_mapping; + } + + econet_intc.membase = ioremap(res.start, resource_size(&res)); + if (!econet_intc.membase) { + pr_err("%pOF: Failed to remap membase\n", node); + ret = -ENOMEM; + goto err_release; + } + + econet_mask_all(); + + domain = irq_domain_create_linear(of_fwnode_handle(node), IRQ_COUNT, + &econet_domain_ops, NULL); + if (!domain) { + pr_err("%pOF: Failed to add irqdomain\n", node); + ret = -ENOMEM; + goto err_unmap; + } + + irq_set_chained_handler_and_data(irq, econet_intc_from_parent, domain); + + return 0; + +err_unmap: + iounmap(econet_intc.membase); +err_release: + release_mem_region(res.start, resource_size(&res)); +err_dispose_mapping: + irq_dispose_mapping(irq); + return ret; +} + +IRQCHIP_DECLARE(econet_en751221_intc, "econet,en751221-intc", econet_intc_of_init); diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c index c69894861866..cc6a6c1585d2 100644 --- a/drivers/irqchip/irq-gic-v2m.c +++ b/drivers/irqchip/irq-gic-v2m.c @@ -252,7 +252,7 @@ static void __init gicv2m_teardown(void) static struct msi_parent_ops gicv2m_msi_parent_ops = { .supported_flags = GICV2M_MSI_FLAGS_SUPPORTED, .required_flags = GICV2M_MSI_FLAGS_REQUIRED, - .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK, + .chip_flags = MSI_CHIP_FLAG_SET_EOI, .bus_select_token = DOMAIN_BUS_NEXUS, .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI, .prefix = "GICv2m-", @@ -421,7 +421,7 @@ static int __init gicv2m_of_init(struct fwnode_handle *parent_handle, #ifdef CONFIG_ACPI static int acpi_num_msi; -static __init struct fwnode_handle *gicv2m_get_fwnode(struct device *dev) +static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev) { struct v2m_data *data; diff --git a/drivers/irqchip/irq-gic-v3-its-msi-parent.c b/drivers/irqchip/irq-gic-v3-its-msi-parent.c index bdb04c808148..c5a7eb1c0419 100644 --- a/drivers/irqchip/irq-gic-v3-its-msi-parent.c +++ b/drivers/irqchip/irq-gic-v3-its-msi-parent.c @@ -203,7 +203,7 @@ static bool its_init_dev_msi_info(struct device *dev, struct irq_domain *domain, const struct msi_parent_ops gic_v3_its_msi_parent_ops = { .supported_flags = ITS_MSI_FLAGS_SUPPORTED, .required_flags = ITS_MSI_FLAGS_REQUIRED, - .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK, + .chip_flags = MSI_CHIP_FLAG_SET_EOI, .bus_select_token = DOMAIN_BUS_NEXUS, .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI, .prefix = "ITS-", diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 0115ad6c8259..81a44ce7619f 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -125,6 +125,8 @@ struct its_node { int vlpi_redist_offset; }; +static DEFINE_PER_CPU(struct its_node *, local_4_1_its); + #define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS)) #define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP)) #define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1) @@ -2778,6 +2780,7 @@ static u64 inherit_vpe_l1_table_from_its(void) } val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1); + *this_cpu_ptr(&local_4_1_its) = its; return val; } @@ -2815,6 +2818,7 @@ static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask) gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base; *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask; + *this_cpu_ptr(&local_4_1_its) = *per_cpu_ptr(&local_4_1_its, cpu); return val; } @@ -4180,7 +4184,7 @@ static struct irq_chip its_vpe_irq_chip = { static struct its_node *find_4_1_its(void) { - static struct its_node *its = NULL; + struct its_node *its = *this_cpu_ptr(&local_4_1_its); if (!its) { list_for_each_entry(its, &its_nodes, entry) { diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c index 34e9ca77a8c3..647b18e24e0c 100644 --- a/drivers/irqchip/irq-gic-v3-mbi.c +++ b/drivers/irqchip/irq-gic-v3-mbi.c @@ -197,7 +197,7 @@ static bool mbi_init_dev_msi_info(struct device *dev, struct irq_domain *domain, static const struct msi_parent_ops gic_v3_mbi_msi_parent_ops = { .supported_flags = MBI_MSI_FLAGS_SUPPORTED, .required_flags = MBI_MSI_FLAGS_REQUIRED, - .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK, + .chip_flags = MSI_CHIP_FLAG_SET_EOI, .bus_select_token = DOMAIN_BUS_NEXUS, .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI, .prefix = "MBI-", diff --git a/drivers/irqchip/irq-ingenic-tcu.c b/drivers/irqchip/irq-ingenic-tcu.c index e43ff8935b82..794ecba717c9 100644 --- a/drivers/irqchip/irq-ingenic-tcu.c +++ b/drivers/irqchip/irq-ingenic-tcu.c @@ -52,11 +52,10 @@ static void ingenic_tcu_gc_unmask_enable_reg(struct irq_data *d) struct regmap *map = gc->private; u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); regmap_write(map, ct->regs.ack, mask); regmap_write(map, ct->regs.enable, mask); *ct->mask_cache |= mask; - irq_gc_unlock(gc); } static void ingenic_tcu_gc_mask_disable_reg(struct irq_data *d) @@ -66,10 +65,9 @@ static void ingenic_tcu_gc_mask_disable_reg(struct irq_data *d) struct regmap *map = gc->private; u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); regmap_write(map, ct->regs.disable, mask); *ct->mask_cache &= ~mask; - irq_gc_unlock(gc); } static void ingenic_tcu_gc_mask_disable_reg_and_ack(struct irq_data *d) @@ -79,10 +77,9 @@ static void ingenic_tcu_gc_mask_disable_reg_and_ack(struct irq_data *d) struct regmap *map = gc->private; u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); regmap_write(map, ct->regs.ack, mask); regmap_write(map, ct->regs.disable, mask); - irq_gc_unlock(gc); } static int __init ingenic_tcu_irq_init(struct device_node *np, diff --git a/drivers/irqchip/irq-lan966x-oic.c b/drivers/irqchip/irq-lan966x-oic.c index 9445c3a6b1b0..11d3a0ffa261 100644 --- a/drivers/irqchip/irq-lan966x-oic.c +++ b/drivers/irqchip/irq-lan966x-oic.c @@ -71,14 +71,12 @@ static unsigned int lan966x_oic_irq_startup(struct irq_data *data) struct lan966x_oic_chip_regs *chip_regs = gc->private; u32 map; - irq_gc_lock(gc); - - /* Map the source interrupt to the destination */ - map = irq_reg_readl(gc, chip_regs->reg_off_map); - map |= data->mask; - irq_reg_writel(gc, map, chip_regs->reg_off_map); - - irq_gc_unlock(gc); + scoped_guard (raw_spinlock, &gc->lock) { + /* Map the source interrupt to the destination */ + map = irq_reg_readl(gc, chip_regs->reg_off_map); + map |= data->mask; + irq_reg_writel(gc, map, chip_regs->reg_off_map); + } ct->chip.irq_ack(data); ct->chip.irq_unmask(data); @@ -95,14 +93,12 @@ static void lan966x_oic_irq_shutdown(struct irq_data *data) ct->chip.irq_mask(data); - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); /* Unmap the interrupt */ map = irq_reg_readl(gc, chip_regs->reg_off_map); map &= ~data->mask; irq_reg_writel(gc, map, chip_regs->reg_off_map); - - irq_gc_unlock(gc); } static int lan966x_oic_irq_set_type(struct irq_data *data, diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c index 95cade56e0be..0033c2188abc 100644 --- a/drivers/irqchip/irq-loongson-liointc.c +++ b/drivers/irqchip/irq-loongson-liointc.c @@ -116,9 +116,8 @@ static int liointc_set_type(struct irq_data *data, unsigned int type) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); u32 mask = data->mask; - unsigned long flags; - irq_gc_lock_irqsave(gc, flags); + guard(raw_spinlock)(&gc->lock); switch (type) { case IRQ_TYPE_LEVEL_HIGH: liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false); @@ -137,10 +136,8 @@ static int liointc_set_type(struct irq_data *data, unsigned int type) liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true); break; default: - irq_gc_unlock_irqrestore(gc, flags); return -EINVAL; } - irq_gc_unlock_irqrestore(gc, flags); irqd_set_trigger_type(data, type); return 0; @@ -157,10 +154,9 @@ static void liointc_suspend(struct irq_chip_generic *gc) static void liointc_resume(struct irq_chip_generic *gc) { struct liointc_priv *priv = gc->private; - unsigned long flags; int i; - irq_gc_lock_irqsave(gc, flags); + guard(raw_spinlock_irqsave)(&gc->lock); /* Disable all at first */ writel(0xffffffff, gc->reg_base + LIOINTC_REG_INTC_DISABLE); /* Restore map cache */ @@ -170,7 +166,6 @@ static void liointc_resume(struct irq_chip_generic *gc) writel(priv->int_edge, gc->reg_base + LIOINTC_REG_INTC_EDGE); /* Restore mask cache */ writel(gc->mask_cache, gc->reg_base + LIOINTC_REG_INTC_ENABLE); - irq_gc_unlock_irqrestore(gc, flags); } static int parent_irq[LIOINTC_NUM_PARENT]; diff --git a/drivers/irqchip/irq-mscc-ocelot.c b/drivers/irqchip/irq-mscc-ocelot.c index 7b3020f480d5..8cbc191f750b 100644 --- a/drivers/irqchip/irq-mscc-ocelot.c +++ b/drivers/irqchip/irq-mscc-ocelot.c @@ -83,7 +83,7 @@ static void ocelot_irq_unmask(struct irq_data *data) unsigned int mask = data->mask; u32 val; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); /* * Clear sticky bits for edge mode interrupts. * Serval has only one trigger register replication, but the adjacent @@ -97,7 +97,6 @@ static void ocelot_irq_unmask(struct irq_data *data) *ct->mask_cache &= ~mask; irq_reg_writel(gc, mask, p->reg_off_ena_set); - irq_gc_unlock(gc); } static void ocelot_irq_handler(struct irq_desc *desc) diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c index d67f93f6d750..60b976286636 100644 --- a/drivers/irqchip/irq-mvebu-gicp.c +++ b/drivers/irqchip/irq-mvebu-gicp.c @@ -161,7 +161,7 @@ static const struct irq_domain_ops gicp_domain_ops = { static const struct msi_parent_ops gicp_msi_parent_ops = { .supported_flags = GICP_MSI_FLAGS_SUPPORTED, .required_flags = GICP_MSI_FLAGS_REQUIRED, - .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK, + .chip_flags = MSI_CHIP_FLAG_SET_EOI, .bus_select_token = DOMAIN_BUS_GENERIC_MSI, .bus_select_mask = MATCH_PLATFORM_MSI, .prefix = "GICP-", diff --git a/drivers/irqchip/irq-mvebu-odmi.c b/drivers/irqchip/irq-mvebu-odmi.c index 28f7e81df94f..54f6f0811573 100644 --- a/drivers/irqchip/irq-mvebu-odmi.c +++ b/drivers/irqchip/irq-mvebu-odmi.c @@ -157,7 +157,7 @@ static const struct irq_domain_ops odmi_domain_ops = { static const struct msi_parent_ops odmi_msi_parent_ops = { .supported_flags = ODMI_MSI_FLAGS_SUPPORTED, .required_flags = ODMI_MSI_FLAGS_REQUIRED, - .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK, + .chip_flags = MSI_CHIP_FLAG_SET_EOI, .bus_select_token = DOMAIN_BUS_GENERIC_MSI, .bus_select_mask = MATCH_PLATFORM_MSI, .prefix = "ODMI-", diff --git a/drivers/irqchip/irq-pruss-intc.c b/drivers/irqchip/irq-pruss-intc.c index 8f7c9dc45953..87a5813fd835 100644 --- a/drivers/irqchip/irq-pruss-intc.c +++ b/drivers/irqchip/irq-pruss-intc.c @@ -581,8 +581,7 @@ static int pruss_intc_probe(struct platform_device *pdev) host_data->intc = intc; host_data->host_irq = i; - irq_set_handler_data(irq, host_data); - irq_set_chained_handler(irq, pruss_intc_irq_handler); + irq_set_chained_handler_and_data(irq, pruss_intc_irq_handler, host_data); } return 0; diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c index 00c770e367d0..8d569f7c5a7a 100644 --- a/drivers/irqchip/irq-qcom-mpm.c +++ b/drivers/irqchip/irq-qcom-mpm.c @@ -227,6 +227,9 @@ static int qcom_mpm_alloc(struct irq_domain *domain, unsigned int virq, if (ret) return ret; + if (pin == GPIO_NO_WAKE_IRQ) + return irq_domain_disconnect_hierarchy(domain, virq); + ret = irq_domain_set_hwirq_and_chip(domain, virq, pin, &qcom_mpm_chip, priv); if (ret) diff --git a/drivers/irqchip/irq-renesas-rzv2h.c b/drivers/irqchip/irq-renesas-rzv2h.c index 7a61d454a1a5..1c12e6ec1370 100644 --- a/drivers/irqchip/irq-renesas-rzv2h.c +++ b/drivers/irqchip/irq-renesas-rzv2h.c @@ -170,6 +170,14 @@ static void rzv2h_tint_irq_endisable(struct irq_data *d, bool enable) else tssr &= ~ICU_TSSR_TIEN(tssel_n, priv->info->field_width); writel_relaxed(tssr, priv->base + priv->info->t_offs + ICU_TSSR(k)); + + /* + * A glitch in the edge detection circuit can cause a spurious + * interrupt. Clear the status flag after setting the ICU_TSSRk + * registers, which is recommended by the hardware manual as a + * countermeasure. + */ + writel_relaxed(BIT(tint_nr), priv->base + priv->info->t_offs + ICU_TSCLR); } static void rzv2h_icu_irq_disable(struct irq_data *d) diff --git a/drivers/irqchip/irq-riscv-imsic-state.c b/drivers/irqchip/irq-riscv-imsic-state.c index bdf5cd2037f2..62f76950a113 100644 --- a/drivers/irqchip/irq-riscv-imsic-state.c +++ b/drivers/irqchip/irq-riscv-imsic-state.c @@ -208,17 +208,17 @@ skip: } #ifdef CONFIG_SMP -static void __imsic_local_timer_start(struct imsic_local_priv *lpriv) +static void __imsic_local_timer_start(struct imsic_local_priv *lpriv, unsigned int cpu) { lockdep_assert_held(&lpriv->lock); if (!timer_pending(&lpriv->timer)) { lpriv->timer.expires = jiffies + 1; - add_timer_on(&lpriv->timer, smp_processor_id()); + add_timer_on(&lpriv->timer, cpu); } } #else -static inline void __imsic_local_timer_start(struct imsic_local_priv *lpriv) +static inline void __imsic_local_timer_start(struct imsic_local_priv *lpriv, unsigned int cpu) { } #endif @@ -233,7 +233,7 @@ void imsic_local_sync_all(bool force_all) if (force_all) bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1); if (!__imsic_local_sync(lpriv)) - __imsic_local_timer_start(lpriv); + __imsic_local_timer_start(lpriv, smp_processor_id()); raw_spin_unlock_irqrestore(&lpriv->lock, flags); } @@ -278,7 +278,7 @@ static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu return; } - __imsic_local_timer_start(lpriv); + __imsic_local_timer_start(lpriv, cpu); } } #else diff --git a/drivers/irqchip/irq-sg2042-msi.c b/drivers/irqchip/irq-sg2042-msi.c index ee682e87eb8b..f7e8429de436 100644 --- a/drivers/irqchip/irq-sg2042-msi.c +++ b/drivers/irqchip/irq-sg2042-msi.c @@ -19,21 +19,36 @@ #include "irq-msi-lib.h" -#define SG2042_MAX_MSI_VECTOR 32 +struct sg204x_msi_chip_info { + const struct irq_chip *irqchip; + const struct msi_parent_ops *parent_ops; +}; + +/** + * struct sg204x_msi_chipdata - chip data for the SG204x MSI IRQ controller + * @reg_clr: clear reg, see TRM, 10.1.33, GP_INTR0_CLR + * @doorbell_addr: see TRM, 10.1.32, GP_INTR0_SET + * @irq_first: First vectors number that MSIs starts + * @num_irqs: Number of vectors for MSIs + * @msi_map: mapping for allocated MSI vectors. + * @msi_map_lock: Lock for msi_map + * @chip_info: chip specific infomations + */ +struct sg204x_msi_chipdata { + void __iomem *reg_clr; -struct sg2042_msi_chipdata { - void __iomem *reg_clr; // clear reg, see TRM, 10.1.33, GP_INTR0_CLR + phys_addr_t doorbell_addr; - phys_addr_t doorbell_addr; // see TRM, 10.1.32, GP_INTR0_SET + u32 irq_first; + u32 num_irqs; - u32 irq_first; // The vector number that MSIs starts - u32 num_irqs; // The number of vectors for MSIs + unsigned long *msi_map; + struct mutex msi_map_lock; - DECLARE_BITMAP(msi_map, SG2042_MAX_MSI_VECTOR); - struct mutex msi_map_lock; // lock for msi_map + const struct sg204x_msi_chip_info *chip_info; }; -static int sg2042_msi_allocate_hwirq(struct sg2042_msi_chipdata *data, int num_req) +static int sg204x_msi_allocate_hwirq(struct sg204x_msi_chipdata *data, int num_req) { int first; @@ -43,7 +58,7 @@ static int sg2042_msi_allocate_hwirq(struct sg2042_msi_chipdata *data, int num_r return first >= 0 ? first : -ENOSPC; } -static void sg2042_msi_free_hwirq(struct sg2042_msi_chipdata *data, int hwirq, int num_req) +static void sg204x_msi_free_hwirq(struct sg204x_msi_chipdata *data, int hwirq, int num_req) { guard(mutex)(&data->msi_map_lock); bitmap_release_region(data->msi_map, hwirq, get_count_order(num_req)); @@ -51,7 +66,7 @@ static void sg2042_msi_free_hwirq(struct sg2042_msi_chipdata *data, int hwirq, i static void sg2042_msi_irq_ack(struct irq_data *d) { - struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d); + struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d); int bit_off = d->hwirq; writel(1 << bit_off, data->reg_clr); @@ -61,7 +76,7 @@ static void sg2042_msi_irq_ack(struct irq_data *d) static void sg2042_msi_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) { - struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d); + struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d); msg->address_hi = upper_32_bits(data->doorbell_addr); msg->address_lo = lower_32_bits(data->doorbell_addr); @@ -79,9 +94,38 @@ static const struct irq_chip sg2042_msi_middle_irq_chip = { .irq_compose_msi_msg = sg2042_msi_irq_compose_msi_msg, }; -static int sg2042_msi_parent_domain_alloc(struct irq_domain *domain, unsigned int virq, int hwirq) +static void sg2044_msi_irq_ack(struct irq_data *d) +{ + struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d); + + writel(0, (u32 __iomem *)data->reg_clr + d->hwirq); + irq_chip_ack_parent(d); +} + +static void sg2044_msi_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d); + phys_addr_t doorbell = data->doorbell_addr + 4 * (d->hwirq / 32); + + msg->address_lo = lower_32_bits(doorbell); + msg->address_hi = upper_32_bits(doorbell); + msg->data = d->hwirq % 32; +} + +static struct irq_chip sg2044_msi_middle_irq_chip = { + .name = "SG2044 MSI", + .irq_ack = sg2044_msi_irq_ack, + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, +#ifdef CONFIG_SMP + .irq_set_affinity = irq_chip_set_affinity_parent, +#endif + .irq_compose_msi_msg = sg2044_msi_irq_compose_msi_msg, +}; + +static int sg204x_msi_parent_domain_alloc(struct irq_domain *domain, unsigned int virq, int hwirq) { - struct sg2042_msi_chipdata *data = domain->host_data; + struct sg204x_msi_chipdata *data = domain->host_data; struct irq_fwspec fwspec; struct irq_data *d; int ret; @@ -99,47 +143,45 @@ static int sg2042_msi_parent_domain_alloc(struct irq_domain *domain, unsigned in return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING); } -static int sg2042_msi_middle_domain_alloc(struct irq_domain *domain, unsigned int virq, +static int sg204x_msi_middle_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { - struct sg2042_msi_chipdata *data = domain->host_data; + struct sg204x_msi_chipdata *data = domain->host_data; int hwirq, err, i; - hwirq = sg2042_msi_allocate_hwirq(data, nr_irqs); + hwirq = sg204x_msi_allocate_hwirq(data, nr_irqs); if (hwirq < 0) return hwirq; for (i = 0; i < nr_irqs; i++) { - err = sg2042_msi_parent_domain_alloc(domain, virq + i, hwirq + i); + err = sg204x_msi_parent_domain_alloc(domain, virq + i, hwirq + i); if (err) goto err_hwirq; irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, - &sg2042_msi_middle_irq_chip, data); + data->chip_info->irqchip, data); } - return 0; err_hwirq: - sg2042_msi_free_hwirq(data, hwirq, nr_irqs); + sg204x_msi_free_hwirq(data, hwirq, nr_irqs); irq_domain_free_irqs_parent(domain, virq, i); - return err; } -static void sg2042_msi_middle_domain_free(struct irq_domain *domain, unsigned int virq, +static void sg204x_msi_middle_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); - struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d); + struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d); irq_domain_free_irqs_parent(domain, virq, nr_irqs); - sg2042_msi_free_hwirq(data, d->hwirq, nr_irqs); + sg204x_msi_free_hwirq(data, d->hwirq, nr_irqs); } -static const struct irq_domain_ops sg2042_msi_middle_domain_ops = { - .alloc = sg2042_msi_middle_domain_alloc, - .free = sg2042_msi_middle_domain_free, +static const struct irq_domain_ops sg204x_msi_middle_domain_ops = { + .alloc = sg204x_msi_middle_domain_alloc, + .free = sg204x_msi_middle_domain_free, .select = msi_lib_irq_domain_select, }; @@ -151,20 +193,37 @@ static const struct irq_domain_ops sg2042_msi_middle_domain_ops = { static const struct msi_parent_ops sg2042_msi_parent_ops = { .required_flags = SG2042_MSI_FLAGS_REQUIRED, .supported_flags = SG2042_MSI_FLAGS_SUPPORTED, + .chip_flags = MSI_CHIP_FLAG_SET_ACK, .bus_select_mask = MATCH_PCI_MSI, .bus_select_token = DOMAIN_BUS_NEXUS, .prefix = "SG2042-", .init_dev_msi_info = msi_lib_init_dev_msi_info, }; -static int sg2042_msi_init_domains(struct sg2042_msi_chipdata *data, +#define SG2044_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS) + +#define SG2044_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_PCI_MSIX) + +static const struct msi_parent_ops sg2044_msi_parent_ops = { + .required_flags = SG2044_MSI_FLAGS_REQUIRED, + .supported_flags = SG2044_MSI_FLAGS_SUPPORTED, + .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK, + .bus_select_mask = MATCH_PCI_MSI, + .bus_select_token = DOMAIN_BUS_NEXUS, + .prefix = "SG2044-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, +}; + +static int sg204x_msi_init_domains(struct sg204x_msi_chipdata *data, struct irq_domain *plic_domain, struct device *dev) { struct fwnode_handle *fwnode = dev_fwnode(dev); struct irq_domain *middle_domain; middle_domain = irq_domain_create_hierarchy(plic_domain, 0, data->num_irqs, fwnode, - &sg2042_msi_middle_domain_ops, data); + &sg204x_msi_middle_domain_ops, data); if (!middle_domain) { pr_err("Failed to create the MSI middle domain\n"); return -ENOMEM; @@ -173,24 +232,29 @@ static int sg2042_msi_init_domains(struct sg2042_msi_chipdata *data, irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS); middle_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT; - middle_domain->msi_parent_ops = &sg2042_msi_parent_ops; - + middle_domain->msi_parent_ops = data->chip_info->parent_ops; return 0; } static int sg2042_msi_probe(struct platform_device *pdev) { struct fwnode_reference_args args = { }; - struct sg2042_msi_chipdata *data; + struct sg204x_msi_chipdata *data; struct device *dev = &pdev->dev; struct irq_domain *plic_domain; struct resource *res; int ret; - data = devm_kzalloc(dev, sizeof(struct sg2042_msi_chipdata), GFP_KERNEL); + data = devm_kzalloc(dev, sizeof(struct sg204x_msi_chipdata), GFP_KERNEL); if (!data) return -ENOMEM; + data->chip_info = device_get_match_data(&pdev->dev); + if (!data->chip_info) { + dev_err(&pdev->dev, "Failed to get irqchip\n"); + return -EINVAL; + } + data->reg_clr = devm_platform_ioremap_resource_byname(pdev, "clr"); if (IS_ERR(data->reg_clr)) { dev_err(dev, "Failed to map clear register\n"); @@ -231,11 +295,28 @@ static int sg2042_msi_probe(struct platform_device *pdev) mutex_init(&data->msi_map_lock); - return sg2042_msi_init_domains(data, plic_domain, dev); + data->msi_map = devm_bitmap_zalloc(&pdev->dev, data->num_irqs, GFP_KERNEL); + if (!data->msi_map) { + dev_err(&pdev->dev, "Unable to allocate msi mapping\n"); + return -ENOMEM; + } + + return sg204x_msi_init_domains(data, plic_domain, dev); } +static const struct sg204x_msi_chip_info sg2042_chip_info = { + .irqchip = &sg2042_msi_middle_irq_chip, + .parent_ops = &sg2042_msi_parent_ops, +}; + +static const struct sg204x_msi_chip_info sg2044_chip_info = { + .irqchip = &sg2044_msi_middle_irq_chip, + .parent_ops = &sg2044_msi_parent_ops, +}; + static const struct of_device_id sg2042_msi_of_match[] = { - { .compatible = "sophgo,sg2042-msi" }, + { .compatible = "sophgo,sg2042-msi", .data = &sg2042_chip_info }, + { .compatible = "sophgo,sg2044-msi", .data = &sg2044_chip_info }, { } }; diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index 7cd34f9b0269..978811f2abe8 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -169,22 +169,18 @@ static int stm32_irq_set_type(struct irq_data *d, unsigned int type) u32 rtsr, ftsr; int err; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); rtsr = irq_reg_readl(gc, stm32_bank->rtsr_ofst); ftsr = irq_reg_readl(gc, stm32_bank->ftsr_ofst); err = stm32_exti_set_type(d, type, &rtsr, &ftsr); if (err) - goto unlock; + return err; irq_reg_writel(gc, rtsr, stm32_bank->rtsr_ofst); irq_reg_writel(gc, ftsr, stm32_bank->ftsr_ofst); - -unlock: - irq_gc_unlock(gc); - - return err; + return 0; } static void stm32_chip_suspend(struct stm32_exti_chip_data *chip_data, @@ -217,18 +213,16 @@ static void stm32_irq_suspend(struct irq_chip_generic *gc) { struct stm32_exti_chip_data *chip_data = gc->private; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); stm32_chip_suspend(chip_data, gc->wake_active); - irq_gc_unlock(gc); } static void stm32_irq_resume(struct irq_chip_generic *gc) { struct stm32_exti_chip_data *chip_data = gc->private; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); stm32_chip_resume(chip_data, gc->mask_cache); - irq_gc_unlock(gc); } static int stm32_exti_alloc(struct irq_domain *d, unsigned int virq, @@ -265,11 +259,8 @@ static void stm32_irq_ack(struct irq_data *d) struct stm32_exti_chip_data *chip_data = gc->private; const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank; - irq_gc_lock(gc); - + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, d->mask, stm32_bank->rpr_ofst); - - irq_gc_unlock(gc); } static struct diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c index f521341f50d9..fe32dfdc2dd0 100644 --- a/drivers/irqchip/irq-sunxi-nmi.c +++ b/drivers/irqchip/irq-sunxi-nmi.c @@ -111,7 +111,7 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type) unsigned int src_type; unsigned int i; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); switch (flow_type & IRQF_TRIGGER_MASK) { case IRQ_TYPE_EDGE_FALLING: @@ -128,9 +128,7 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type) src_type = SUNXI_SRC_TYPE_LEVEL_LOW; break; default: - irq_gc_unlock(gc); - pr_err("Cannot assign multiple trigger modes to IRQ %d.\n", - data->irq); + pr_err("Cannot assign multiple trigger modes to IRQ %d.\n", data->irq); return -EBADR; } @@ -145,9 +143,6 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type) src_type_reg &= ~SUNXI_NMI_SRC_TYPE_MASK; src_type_reg |= src_type; sunxi_sc_nmi_write(gc, ctrl_off, src_type_reg); - - irq_gc_unlock(gc); - return IRQ_SET_MASK_OK; } diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c index 733dbda18a82..94cbc5111d7e 100644 --- a/drivers/irqchip/irq-tb10x.c +++ b/drivers/irqchip/irq-tb10x.c @@ -41,11 +41,9 @@ static inline u32 ab_irqctl_readreg(struct irq_chip_generic *gc, u32 reg) static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); - uint32_t im, mod, pol; + uint32_t mod, pol, im = data->mask; - im = data->mask; - - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); mod = ab_irqctl_readreg(gc, AB_IRQCTL_SRC_MODE) | im; pol = ab_irqctl_readreg(gc, AB_IRQCTL_SRC_POLARITY) | im; @@ -67,9 +65,7 @@ static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type) case IRQ_TYPE_EDGE_RISING: break; default: - irq_gc_unlock(gc); - pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n", - __func__, data->irq); + pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n", __func__, data->irq); return -EBADR; } @@ -79,9 +75,6 @@ static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type) ab_irqctl_writereg(gc, AB_IRQCTL_SRC_MODE, mod); ab_irqctl_writereg(gc, AB_IRQCTL_SRC_POLARITY, pol); ab_irqctl_writereg(gc, AB_IRQCTL_INT_STATUS, im); - - irq_gc_unlock(gc); - return IRQ_SET_MASK_OK; } diff --git a/drivers/irqchip/irq-vt8500.c b/drivers/irqchip/irq-vt8500.c index e17dd3a8c2d5..3b742590aec8 100644 --- a/drivers/irqchip/irq-vt8500.c +++ b/drivers/irqchip/irq-vt8500.c @@ -15,6 +15,7 @@ #include <linux/io.h> #include <linux/irq.h> #include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/interrupt.h> #include <linux/bitops.h> @@ -63,29 +64,28 @@ struct vt8500_irq_data { struct irq_domain *domain; /* Domain for this controller */ }; -/* Global variable for accessing io-mem addresses */ -static struct vt8500_irq_data intc[VT8500_INTC_MAX]; -static u32 active_cnt = 0; +/* Primary interrupt controller data */ +static struct vt8500_irq_data *primary_intc; -static void vt8500_irq_mask(struct irq_data *d) +static void vt8500_irq_ack(struct irq_data *d) { struct vt8500_irq_data *priv = d->domain->host_data; void __iomem *base = priv->base; void __iomem *stat_reg = base + VT8500_ICIS + (d->hwirq < 32 ? 0 : 4); - u8 edge, dctr; - u32 status; + u32 status = (1 << (d->hwirq & 0x1f)); - edge = readb(base + VT8500_ICDC + d->hwirq) & VT8500_EDGE; - if (edge) { - status = readl(stat_reg); + writel(status, stat_reg); +} - status |= (1 << (d->hwirq & 0x1f)); - writel(status, stat_reg); - } else { - dctr = readb(base + VT8500_ICDC + d->hwirq); - dctr &= ~VT8500_INT_ENABLE; - writeb(dctr, base + VT8500_ICDC + d->hwirq); - } +static void vt8500_irq_mask(struct irq_data *d) +{ + struct vt8500_irq_data *priv = d->domain->host_data; + void __iomem *base = priv->base; + u8 dctr; + + dctr = readb(base + VT8500_ICDC + d->hwirq); + dctr &= ~VT8500_INT_ENABLE; + writeb(dctr, base + VT8500_ICDC + d->hwirq); } static void vt8500_irq_unmask(struct irq_data *d) @@ -130,11 +130,11 @@ static int vt8500_irq_set_type(struct irq_data *d, unsigned int flow_type) } static struct irq_chip vt8500_irq_chip = { - .name = "vt8500", - .irq_ack = vt8500_irq_mask, - .irq_mask = vt8500_irq_mask, - .irq_unmask = vt8500_irq_unmask, - .irq_set_type = vt8500_irq_set_type, + .name = "vt8500", + .irq_ack = vt8500_irq_ack, + .irq_mask = vt8500_irq_mask, + .irq_unmask = vt8500_irq_unmask, + .irq_set_type = vt8500_irq_set_type, }; static void __init vt8500_init_irq_hw(void __iomem *base) @@ -163,82 +163,89 @@ static const struct irq_domain_ops vt8500_irq_domain_ops = { .xlate = irq_domain_xlate_onecell, }; +static inline void vt8500_handle_irq_common(struct vt8500_irq_data *intc) +{ + unsigned long irqnr = readl_relaxed(intc->base) & 0x3F; + unsigned long stat; + + /* + * Highest Priority register default = 63, so check that this + * is a real interrupt by checking the status register + */ + if (irqnr == 63) { + stat = readl_relaxed(intc->base + VT8500_ICIS + 4); + if (!(stat & BIT(31))) + return; + } + + generic_handle_domain_irq(intc->domain, irqnr); +} + static void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs) { - u32 stat, i; - int irqnr; - void __iomem *base; - - /* Loop through each active controller */ - for (i=0; i<active_cnt; i++) { - base = intc[i].base; - irqnr = readl_relaxed(base) & 0x3F; - /* - Highest Priority register default = 63, so check that this - is a real interrupt by checking the status register - */ - if (irqnr == 63) { - stat = readl_relaxed(base + VT8500_ICIS + 4); - if (!(stat & BIT(31))) - continue; - } + vt8500_handle_irq_common(primary_intc); +} - generic_handle_domain_irq(intc[i].domain, irqnr); - } +static void vt8500_handle_irq_chained(struct irq_desc *desc) +{ + struct irq_domain *d = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + struct vt8500_irq_data *intc = d->host_data; + + chained_irq_enter(chip, desc); + vt8500_handle_irq_common(intc); + chained_irq_exit(chip, desc); } static int __init vt8500_irq_init(struct device_node *node, struct device_node *parent) { - int irq, i; - struct device_node *np = node; + struct vt8500_irq_data *intc; + int irq, i, ret = 0; - if (active_cnt == VT8500_INTC_MAX) { - pr_err("%s: Interrupt controllers > VT8500_INTC_MAX\n", - __func__); - goto out; - } + intc = kzalloc(sizeof(*intc), GFP_KERNEL); + if (!intc) + return -ENOMEM; - intc[active_cnt].base = of_iomap(np, 0); - intc[active_cnt].domain = irq_domain_add_linear(node, 64, - &vt8500_irq_domain_ops, &intc[active_cnt]); - - if (!intc[active_cnt].base) { + intc->base = of_iomap(node, 0); + if (!intc->base) { pr_err("%s: Unable to map IO memory\n", __func__); - goto out; + ret = -ENOMEM; + goto err_free; } - if (!intc[active_cnt].domain) { + intc->domain = irq_domain_create_linear(of_fwnode_handle(node), 64, + &vt8500_irq_domain_ops, intc); + if (!intc->domain) { pr_err("%s: Unable to add irq domain!\n", __func__); - goto out; + ret = -ENOMEM; + goto err_unmap; } - set_handle_irq(vt8500_handle_irq); - - vt8500_init_irq_hw(intc[active_cnt].base); + vt8500_init_irq_hw(intc->base); pr_info("vt8500-irq: Added interrupt controller\n"); - active_cnt++; - - /* check if this is a slaved controller */ - if (of_irq_count(np) != 0) { - /* check that we have the correct number of interrupts */ - if (of_irq_count(np) != 8) { - pr_err("%s: Incorrect IRQ map for slaved controller\n", - __func__); - return -EINVAL; - } - - for (i = 0; i < 8; i++) { - irq = irq_of_parse_and_map(np, i); - enable_irq(irq); + /* check if this is a chained controller */ + if (of_irq_count(node) != 0) { + for (i = 0; i < of_irq_count(node); i++) { + irq = irq_of_parse_and_map(node, i); + irq_set_chained_handler_and_data(irq, vt8500_handle_irq_chained, + intc); } pr_info("vt8500-irq: Enabled slave->parent interrupts\n"); + } else { + primary_intc = intc; + set_handle_irq(vt8500_handle_irq); } -out: return 0; + +err_unmap: + iounmap(intc->base); +err_free: + kfree(intc); + return ret; } IRQCHIP_DECLARE(vt8500_irq, "via,vt8500-intc", vt8500_irq_init); diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c index 02a680c73979..bf0d7d58c8b0 100644 --- a/drivers/mcb/mcb-parse.c +++ b/drivers/mcb/mcb-parse.c @@ -96,7 +96,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus, ret = mcb_device_register(bus, mdev); if (ret < 0) - goto err; + return ret; return 0; diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 06f809e70f15..ddb37f6670de 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -139,7 +139,7 @@ config MD_RAID456 tristate "RAID-4/RAID-5/RAID-6 mode" depends on BLK_DEV_MD select RAID6_PQ - select LIBCRC32C + select CRC32 select ASYNC_MEMCPY select ASYNC_XOR select ASYNC_PQ diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index e42f1400cea9..c40db9c161c1 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -293,8 +293,7 @@ static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out, bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META; bio->bi_iter.bi_sector = SB_SECTOR; - __bio_add_page(bio, virt_to_page(out), SB_SIZE, - offset_in_page(out)); + bio_add_virt_nofail(bio, out, SB_SIZE); out->offset = cpu_to_le64(sb->offset); @@ -546,7 +545,7 @@ static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) static struct uuid_entry *uuid_find_empty(struct cache_set *c) { - static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"; + static const char zero_uuid[16] = { 0 }; return uuid_find(c, zero_uuid); } diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 9c8ed65cd87e..d098e75e3461 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -68,6 +68,8 @@ #define LIST_DIRTY 1 #define LIST_SIZE 2 +#define SCAN_RESCHED_CYCLE 16 + /*--------------------------------------------------------------*/ /* @@ -1362,7 +1364,7 @@ static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector, ptr = (char *)b->data + offset; len = n_sectors << SECTOR_SHIFT; - __bio_add_page(bio, virt_to_page(ptr), len, offset_in_page(ptr)); + bio_add_virt_nofail(bio, ptr, len); submit_bio(bio); } @@ -2424,7 +2426,12 @@ static void __scan(struct dm_bufio_client *c) atomic_long_dec(&c->need_shrink); freed++; - cond_resched(); + + if (unlikely(freed % SCAN_RESCHED_CYCLE == 0)) { + dm_bufio_unlock(c); + cond_resched(); + dm_bufio_lock(c); + } } } } diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 2a283feb3319..1f626066e8cc 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -2557,14 +2557,8 @@ static void dm_integrity_inline_recheck(struct work_struct *w) char *mem; outgoing_bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_READ, GFP_NOIO, &ic->recheck_bios); - - r = bio_add_page(outgoing_bio, virt_to_page(outgoing_data), ic->sectors_per_block << SECTOR_SHIFT, 0); - if (unlikely(r != (ic->sectors_per_block << SECTOR_SHIFT))) { - bio_put(outgoing_bio); - bio->bi_status = BLK_STS_RESOURCE; - bio_endio(bio); - return; - } + bio_add_virt_nofail(outgoing_bio, outgoing_data, + ic->sectors_per_block << SECTOR_SHIFT); bip = bio_integrity_alloc(outgoing_bio, GFP_NOIO, 1); if (IS_ERR(bip)) { @@ -3211,7 +3205,8 @@ next_chunk: bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_READ, GFP_NOIO, &ic->recalc_bios); bio->bi_iter.bi_sector = ic->start + SB_SECTORS + range.logical_sector; - __bio_add_page(bio, virt_to_page(recalc_buffer), range.n_sectors << SECTOR_SHIFT, offset_in_page(recalc_buffer)); + bio_add_virt_nofail(bio, recalc_buffer, + range.n_sectors << SECTOR_SHIFT); r = submit_bio_wait(bio); bio_put(bio); if (unlikely(r)) { @@ -3228,7 +3223,8 @@ next_chunk: bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_WRITE, GFP_NOIO, &ic->recalc_bios); bio->bi_iter.bi_sector = ic->start + SB_SECTORS + range.logical_sector; - __bio_add_page(bio, virt_to_page(recalc_buffer), range.n_sectors << SECTOR_SHIFT, offset_in_page(recalc_buffer)); + bio_add_virt_nofail(bio, recalc_buffer, + range.n_sectors << SECTOR_SHIFT); bip = bio_integrity_alloc(bio, GFP_NOIO, 1); if (unlikely(IS_ERR(bip))) { @@ -5164,7 +5160,7 @@ static void dm_integrity_dtr(struct dm_target *ti) BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); BUG_ON(!list_empty(&ic->wait_list)); - if (ic->mode == 'B') + if (ic->mode == 'B' && ic->bitmap_flush_work.work.func) cancel_delayed_work_sync(&ic->bitmap_flush_work); if (ic->metadata_wq) destroy_workqueue(ic->metadata_wq); diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 6adc55fd90d3..127138c61be5 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -14,6 +14,7 @@ #include "raid5.h" #include "raid10.h" #include "md-bitmap.h" +#include "dm-core.h" #include <linux/device-mapper.h> @@ -3308,6 +3309,7 @@ size_check: /* Disable/enable discard support on raid set. */ configure_discard_support(rs); + rs->md.dm_gendisk = ti->table->md->disk; mddev_unlock(&rs->md); return 0; @@ -3327,6 +3329,7 @@ static void raid_dtr(struct dm_target *ti) mddev_lock_nointr(&rs->md); md_stop(&rs->md); + rs->md.dm_gendisk = NULL; mddev_unlock(&rs->md); if (work_pending(&rs->md.event_work)) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 35100a435c88..6b23e777e10e 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -523,8 +523,9 @@ static char **realloc_argv(unsigned int *size, char **old_argv) gfp = GFP_NOIO; } argv = kmalloc_array(new_size, sizeof(*argv), gfp); - if (argv && old_argv) { - memcpy(argv, old_argv, *size * sizeof(*argv)); + if (argv) { + if (old_argv) + memcpy(argv, old_argv, *size * sizeof(*argv)); *size = new_size; } @@ -1049,7 +1050,6 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device * unsigned int min_pool_size = 0, pool_size; struct dm_md_mempools *pools; unsigned int bioset_flags = 0; - bool mempool_needs_integrity = t->integrity_supported; if (unlikely(type == DM_TYPE_NONE)) { DMERR("no table type is set, can't allocate mempools"); @@ -1074,8 +1074,6 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device * per_io_data_size = max(per_io_data_size, ti->per_io_data_size); min_pool_size = max(min_pool_size, ti->num_flush_bios); - - mempool_needs_integrity |= ti->mempool_needs_integrity; } pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); front_pad = roundup(per_io_data_size, @@ -1175,7 +1173,7 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile, t = dm_get_live_table(md, &srcu_idx); if (!t) - return 0; + goto put_live_table; for (unsigned int i = 0; i < t->num_targets; i++) { struct dm_target *ti = dm_table_get_target(t, i); @@ -1186,6 +1184,7 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile, (void *)key); } +put_live_table: dm_put_live_table(md, srcu_idx); return 0; } diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index 44ec9b17cfd3..37b08f26c62f 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -2357,9 +2357,8 @@ static int bitmap_get_stats(void *data, struct md_bitmap_stats *stats) if (!bitmap) return -ENOENT; - if (bitmap->mddev->bitmap_info.external) - return -ENOENT; - if (!bitmap->storage.sb_page) /* no superblock */ + if (!bitmap->mddev->bitmap_info.external && + !bitmap->storage.sb_page) return -EINVAL; sb = kmap_local_page(bitmap->storage.sb_page); stats->sync_size = le64_to_cpu(sb->sync_size); diff --git a/drivers/md/md.c b/drivers/md/md.c index 9daa78c5fe33..0fde115e921f 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -111,32 +111,48 @@ static void md_wakeup_thread_directly(struct md_thread __rcu *thread); /* Default safemode delay: 200 msec */ #define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1) /* - * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' - * is 1000 KB/sec, so the extra system load does not show up that much. - * Increase it if you want to have more _guaranteed_ speed. Note that - * the RAID driver will use the maximum available bandwidth if the IO - * subsystem is idle. There is also an 'absolute maximum' reconstruction - * speed limit - in case reconstruction slows down your system despite - * idle IO detection. + * Current RAID-1,4,5,6,10 parallel reconstruction 'guaranteed speed limit' + * is sysctl_speed_limit_min, 1000 KB/sec by default, so the extra system load + * does not show up that much. Increase it if you want to have more guaranteed + * speed. Note that the RAID driver will use the maximum bandwidth + * sysctl_speed_limit_max, 200 MB/sec by default, if the IO subsystem is idle. * - * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. - * or /sys/block/mdX/md/sync_speed_{min,max} + * Background sync IO speed control: + * + * - below speed min: + * no limit; + * - above speed min and below speed max: + * a) if mddev is idle, then no limit; + * b) if mddev is busy handling normal IO, then limit inflight sync IO + * to sync_io_depth; + * - above speed max: + * sync IO can't be issued; + * + * Following configurations can be changed via /proc/sys/dev/raid/ for system + * or /sys/block/mdX/md/ for one array. */ - static int sysctl_speed_limit_min = 1000; static int sysctl_speed_limit_max = 200000; -static inline int speed_min(struct mddev *mddev) +static int sysctl_sync_io_depth = 32; + +static int speed_min(struct mddev *mddev) { return mddev->sync_speed_min ? mddev->sync_speed_min : sysctl_speed_limit_min; } -static inline int speed_max(struct mddev *mddev) +static int speed_max(struct mddev *mddev) { return mddev->sync_speed_max ? mddev->sync_speed_max : sysctl_speed_limit_max; } +static int sync_io_depth(struct mddev *mddev) +{ + return mddev->sync_io_depth ? + mddev->sync_io_depth : sysctl_sync_io_depth; +} + static void rdev_uninit_serial(struct md_rdev *rdev) { if (!test_and_clear_bit(CollisionCheck, &rdev->flags)) @@ -293,14 +309,21 @@ static const struct ctl_table raid_table[] = { .procname = "speed_limit_min", .data = &sysctl_speed_limit_min, .maxlen = sizeof(int), - .mode = S_IRUGO|S_IWUSR, + .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "speed_limit_max", .data = &sysctl_speed_limit_max, .maxlen = sizeof(int), - .mode = S_IRUGO|S_IWUSR, + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "sync_io_depth", + .data = &sysctl_sync_io_depth, + .maxlen = sizeof(int), + .mode = 0644, .proc_handler = proc_dointvec, }, }; @@ -5091,7 +5114,7 @@ static ssize_t sync_min_show(struct mddev *mddev, char *page) { return sprintf(page, "%d (%s)\n", speed_min(mddev), - mddev->sync_speed_min ? "local": "system"); + mddev->sync_speed_min ? "local" : "system"); } static ssize_t @@ -5100,7 +5123,7 @@ sync_min_store(struct mddev *mddev, const char *buf, size_t len) unsigned int min; int rv; - if (strncmp(buf, "system", 6)==0) { + if (strncmp(buf, "system", 6) == 0) { min = 0; } else { rv = kstrtouint(buf, 10, &min); @@ -5120,7 +5143,7 @@ static ssize_t sync_max_show(struct mddev *mddev, char *page) { return sprintf(page, "%d (%s)\n", speed_max(mddev), - mddev->sync_speed_max ? "local": "system"); + mddev->sync_speed_max ? "local" : "system"); } static ssize_t @@ -5129,7 +5152,7 @@ sync_max_store(struct mddev *mddev, const char *buf, size_t len) unsigned int max; int rv; - if (strncmp(buf, "system", 6)==0) { + if (strncmp(buf, "system", 6) == 0) { max = 0; } else { rv = kstrtouint(buf, 10, &max); @@ -5146,6 +5169,35 @@ static struct md_sysfs_entry md_sync_max = __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); static ssize_t +sync_io_depth_show(struct mddev *mddev, char *page) +{ + return sprintf(page, "%d (%s)\n", sync_io_depth(mddev), + mddev->sync_io_depth ? "local" : "system"); +} + +static ssize_t +sync_io_depth_store(struct mddev *mddev, const char *buf, size_t len) +{ + unsigned int max; + int rv; + + if (strncmp(buf, "system", 6) == 0) { + max = 0; + } else { + rv = kstrtouint(buf, 10, &max); + if (rv < 0) + return rv; + if (max == 0) + return -EINVAL; + } + mddev->sync_io_depth = max; + return len; +} + +static struct md_sysfs_entry md_sync_io_depth = +__ATTR_RW(sync_io_depth); + +static ssize_t degraded_show(struct mddev *mddev, char *page) { return sprintf(page, "%d\n", mddev->degraded); @@ -5671,6 +5723,7 @@ static struct attribute *md_redundancy_attrs[] = { &md_mismatches.attr, &md_sync_min.attr, &md_sync_max.attr, + &md_sync_io_depth.attr, &md_sync_speed.attr, &md_sync_force_parallel.attr, &md_sync_completed.attr, @@ -8572,50 +8625,55 @@ void md_cluster_stop(struct mddev *mddev) put_cluster_ops(mddev); } -static int is_mddev_idle(struct mddev *mddev, int init) +static bool is_rdev_holder_idle(struct md_rdev *rdev, bool init) { + unsigned long last_events = rdev->last_events; + + if (!bdev_is_partition(rdev->bdev)) + return true; + + /* + * If rdev is partition, and user doesn't issue IO to the array, the + * array is still not idle if user issues IO to other partitions. + */ + rdev->last_events = part_stat_read_accum(rdev->bdev->bd_disk->part0, + sectors) - + part_stat_read_accum(rdev->bdev, sectors); + + return init || rdev->last_events <= last_events; +} + +/* + * mddev is idle if following conditions are matched since last check: + * 1) mddev doesn't have normal IO completed; + * 2) mddev doesn't have inflight normal IO; + * 3) if any member disk is partition, and other partitions don't have IO + * completed; + * + * Noted this checking rely on IO accounting is enabled. + */ +static bool is_mddev_idle(struct mddev *mddev, int init) +{ + unsigned long last_events = mddev->normal_io_events; + struct gendisk *disk; struct md_rdev *rdev; - int idle; - int curr_events; + bool idle = true; - idle = 1; - rcu_read_lock(); - rdev_for_each_rcu(rdev, mddev) { - struct gendisk *disk = rdev->bdev->bd_disk; + disk = mddev_is_dm(mddev) ? mddev->dm_gendisk : mddev->gendisk; + if (!disk) + return true; - if (!init && !blk_queue_io_stat(disk->queue)) - continue; + mddev->normal_io_events = part_stat_read_accum(disk->part0, sectors); + if (!init && (mddev->normal_io_events > last_events || + bdev_count_inflight(disk->part0))) + idle = false; - curr_events = (int)part_stat_read_accum(disk->part0, sectors) - - atomic_read(&disk->sync_io); - /* sync IO will cause sync_io to increase before the disk_stats - * as sync_io is counted when a request starts, and - * disk_stats is counted when it completes. - * So resync activity will cause curr_events to be smaller than - * when there was no such activity. - * non-sync IO will cause disk_stat to increase without - * increasing sync_io so curr_events will (eventually) - * be larger than it was before. Once it becomes - * substantially larger, the test below will cause - * the array to appear non-idle, and resync will slow - * down. - * If there is a lot of outstanding resync activity when - * we set last_event to curr_events, then all that activity - * completing might cause the array to appear non-idle - * and resync will be slowed down even though there might - * not have been non-resync activity. This will only - * happen once though. 'last_events' will soon reflect - * the state where there is little or no outstanding - * resync requests, and further resync activity will - * always make curr_events less than last_events. - * - */ - if (init || curr_events - rdev->last_events > 64) { - rdev->last_events = curr_events; - idle = 0; - } - } + rcu_read_lock(); + rdev_for_each_rcu(rdev, mddev) + if (!is_rdev_holder_idle(rdev, init)) + idle = false; rcu_read_unlock(); + return idle; } @@ -8927,6 +8985,23 @@ static sector_t md_sync_position(struct mddev *mddev, enum sync_action action) } } +static bool sync_io_within_limit(struct mddev *mddev) +{ + int io_sectors; + + /* + * For raid456, sync IO is stripe(4k) per IO, for other levels, it's + * RESYNC_PAGES(64k) per IO. + */ + if (mddev->level == 4 || mddev->level == 5 || mddev->level == 6) + io_sectors = 8; + else + io_sectors = 128; + + return atomic_read(&mddev->recovery_active) < + io_sectors * sync_io_depth(mddev); +} + #define SYNC_MARKS 10 #define SYNC_MARK_STEP (3*HZ) #define UPDATE_FREQUENCY (5*60*HZ) @@ -9195,7 +9270,8 @@ void md_do_sync(struct md_thread *thread) msleep(500); goto repeat; } - if (!is_mddev_idle(mddev, 0)) { + if (!sync_io_within_limit(mddev) && + !is_mddev_idle(mddev, 0)) { /* * Give other IO more of a chance. * The faster the devices, the less we wait. diff --git a/drivers/md/md.h b/drivers/md/md.h index 1cf00a04bcdd..d45a9e6ead80 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -132,7 +132,7 @@ struct md_rdev { sector_t sectors; /* Device size (in 512bytes sectors) */ struct mddev *mddev; /* RAID array if running */ - int last_events; /* IO event timestamp */ + unsigned long last_events; /* IO event timestamp */ /* * If meta_bdev is non-NULL, it means that a separate device is @@ -404,7 +404,8 @@ struct mddev { * are happening, so run/ * takeover/stop are not safe */ - struct gendisk *gendisk; + struct gendisk *gendisk; /* mdraid gendisk */ + struct gendisk *dm_gendisk; /* dm-raid gendisk */ struct kobject kobj; int hold_active; @@ -483,6 +484,7 @@ struct mddev { /* if zero, use the system-wide default */ int sync_speed_min; int sync_speed_max; + int sync_io_depth; /* resync even though the same disks are shared among md-devices */ int parallel_resync; @@ -518,6 +520,7 @@ struct mddev { * adding a spare */ + unsigned long normal_io_events; /* IO event timestamp */ atomic_t recovery_active; /* blocks scheduled, but not written */ wait_queue_head_t recovery_wait; sector_t recovery_cp; @@ -714,17 +717,6 @@ static inline int mddev_trylock(struct mddev *mddev) } extern void mddev_unlock(struct mddev *mddev); -static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) -{ - if (blk_queue_io_stat(bdev->bd_disk->queue)) - atomic_add(nr_sectors, &bdev->bd_disk->sync_io); -} - -static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors) -{ - md_sync_acct(bio->bi_bdev, nr_sectors); -} - struct md_personality { struct md_submodule_head head; diff --git a/drivers/md/persistent-data/Kconfig b/drivers/md/persistent-data/Kconfig index f4f948b0e173..dbb97a7233ab 100644 --- a/drivers/md/persistent-data/Kconfig +++ b/drivers/md/persistent-data/Kconfig @@ -2,7 +2,7 @@ config DM_PERSISTENT_DATA tristate depends on BLK_DEV_DM - select LIBCRC32C + select CRC32 select DM_BUFIO help Library providing immutable on-disk data structure support for diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 0efc03cea24e..657d481525be 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2200,14 +2200,9 @@ static int fix_sync_read_error(struct r1bio *r1_bio) if (!rdev_set_badblocks(rdev, sect, s, 0)) abort = 1; } - if (abort) { - conf->recovery_disabled = - mddev->recovery_disabled; - set_bit(MD_RECOVERY_INTR, &mddev->recovery); - md_done_sync(mddev, r1_bio->sectors, 0); - put_buf(r1_bio); + if (abort) return 0; - } + /* Try next page */ sectors -= s; sect += s; @@ -2346,10 +2341,21 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) int disks = conf->raid_disks * 2; struct bio *wbio; - if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) - /* ouch - failed to read all of that. */ - if (!fix_sync_read_error(r1_bio)) + if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) { + /* + * ouch - failed to read all of that. + * No need to fix read error for check/repair + * because all member disks are read. + */ + if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) || + !fix_sync_read_error(r1_bio)) { + conf->recovery_disabled = mddev->recovery_disabled; + set_bit(MD_RECOVERY_INTR, &mddev->recovery); + md_done_sync(mddev, r1_bio->sectors, 0); + put_buf(r1_bio); return; + } + } if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) process_checks(r1_bio); @@ -2376,7 +2382,6 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) wbio->bi_end_io = end_sync_write; atomic_inc(&r1_bio->remaining); - md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); submit_bio_noacct(wbio); } @@ -3049,7 +3054,6 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, bio = r1_bio->bios[i]; if (bio->bi_end_io == end_sync_read) { read_targets--; - md_sync_acct_bio(bio, nr_sectors); if (read_targets == 1) bio->bi_opf &= ~MD_FAILFAST; submit_bio_noacct(bio); @@ -3058,7 +3062,6 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, } else { atomic_set(&r1_bio->remaining, 1); bio = r1_bio->bios[r1_bio->read_disk]; - md_sync_acct_bio(bio, nr_sectors); if (read_targets == 1) bio->bi_opf &= ~MD_FAILFAST; submit_bio_noacct(bio); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 846c5f29486e..dce06bf65016 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1735,6 +1735,7 @@ retry_discard: * The discard bio returns only first r10bio finishes */ if (first_copy) { + md_account_bio(mddev, &bio); r10_bio->master_bio = bio; set_bit(R10BIO_Discard, &r10_bio->state); first_copy = false; @@ -2425,7 +2426,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) atomic_inc(&conf->mirrors[d].rdev->nr_pending); atomic_inc(&r10_bio->remaining); - md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) tbio->bi_opf |= MD_FAILFAST; @@ -2447,8 +2447,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) bio_copy_data(tbio, fbio); d = r10_bio->devs[i].devnum; atomic_inc(&r10_bio->remaining); - md_sync_acct(conf->mirrors[d].replacement->bdev, - bio_sectors(tbio)); submit_bio_noacct(tbio); } @@ -2582,13 +2580,10 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) d = r10_bio->devs[1].devnum; if (wbio->bi_end_io) { atomic_inc(&conf->mirrors[d].rdev->nr_pending); - md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); submit_bio_noacct(wbio); } if (wbio2) { atomic_inc(&conf->mirrors[d].replacement->nr_pending); - md_sync_acct(conf->mirrors[d].replacement->bdev, - bio_sectors(wbio2)); submit_bio_noacct(wbio2); } } @@ -3756,7 +3751,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, r10_bio->sectors = nr_sectors; if (bio->bi_end_io == end_sync_read) { - md_sync_acct_bio(bio, nr_sectors); bio->bi_status = 0; submit_bio_noacct(bio); } @@ -4879,7 +4873,6 @@ read_more: r10_bio->sectors = nr_sectors; /* Now submit the read */ - md_sync_acct_bio(read_bio, r10_bio->sectors); atomic_inc(&r10_bio->remaining); read_bio->bi_next = NULL; submit_bio_noacct(read_bio); @@ -4939,7 +4932,6 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio) continue; atomic_inc(&rdev->nr_pending); - md_sync_acct_bio(b, r10_bio->sectors); atomic_inc(&r10_bio->remaining); b->bi_next = NULL; submit_bio_noacct(b); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 6389383166c0..ca5b0e8ba707 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1240,10 +1240,6 @@ again: } if (rdev) { - if (s->syncing || s->expanding || s->expanded - || s->replacing) - md_sync_acct(rdev->bdev, RAID5_STRIPE_SECTORS(conf)); - set_bit(STRIPE_IO_STARTED, &sh->state); bio_init(bi, rdev->bdev, &dev->vec, 1, op | op_flags); @@ -1300,10 +1296,6 @@ again: submit_bio_noacct(bi); } if (rrdev) { - if (s->syncing || s->expanding || s->expanded - || s->replacing) - md_sync_acct(rrdev->bdev, RAID5_STRIPE_SECTORS(conf)); - set_bit(STRIPE_IO_STARTED, &sh->state); bio_init(rbi, rrdev->bdev, &dev->rvec, 1, op | op_flags); diff --git a/drivers/media/cec/i2c/Kconfig b/drivers/media/cec/i2c/Kconfig index b9d21643eef1..c31abc26f602 100644 --- a/drivers/media/cec/i2c/Kconfig +++ b/drivers/media/cec/i2c/Kconfig @@ -16,6 +16,7 @@ config CEC_CH7322 config CEC_NXP_TDA9950 tristate "NXP Semiconductors TDA9950/TDA998X HDMI CEC" + depends on I2C select CEC_NOTIFIER select CEC_CORE default DRM_I2C_NXP_TDA998X diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index e576b213084d..e45ba127069f 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -1149,8 +1149,11 @@ config VIDEO_ISL7998X config VIDEO_LT6911UXE tristate "Lontium LT6911UXE decoder" - depends on ACPI && VIDEO_DEV + depends on ACPI && VIDEO_DEV && I2C select V4L2_FWNODE + select V4L2_CCI_I2C + select MEDIA_CONTROLLER + select VIDEO_V4L2_SUBDEV_API help This is a Video4Linux2 sensor-level driver for the Lontium LT6911UXE HDMI to MIPI CSI-2 bridge. diff --git a/drivers/media/platform/synopsys/hdmirx/Kconfig b/drivers/media/platform/synopsys/hdmirx/Kconfig index 27e6706f84a3..4321f985f632 100644 --- a/drivers/media/platform/synopsys/hdmirx/Kconfig +++ b/drivers/media/platform/synopsys/hdmirx/Kconfig @@ -2,6 +2,7 @@ config VIDEO_SYNOPSYS_HDMIRX tristate "Synopsys DesignWare HDMI Receiver driver" + depends on ARCH_ROCKCHIP || COMPILE_TEST depends on VIDEO_DEV select MEDIA_CONTROLLER select VIDEO_V4L2_SUBDEV_API diff --git a/drivers/media/test-drivers/vivid/Kconfig b/drivers/media/test-drivers/vivid/Kconfig index e95edc0f22bf..cc470070a7a5 100644 --- a/drivers/media/test-drivers/vivid/Kconfig +++ b/drivers/media/test-drivers/vivid/Kconfig @@ -32,7 +32,8 @@ config VIDEO_VIVID_CEC config VIDEO_VIVID_OSD bool "Enable Framebuffer for testing Output Overlay" - depends on VIDEO_VIVID && FB + depends on VIDEO_VIVID && FB_CORE + depends on VIDEO_VIVID=m || FB_CORE=y default y select FB_IOMEM_HELPERS help diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c index 18fc1aaa5cdd..2b6778d8d166 100644 --- a/drivers/misc/cs5535-mfgpt.c +++ b/drivers/misc/cs5535-mfgpt.c @@ -16,6 +16,7 @@ #include <linux/platform_device.h> #include <linux/cs5535.h> #include <linux/slab.h> +#include <asm/msr.h> #define DRV_NAME "cs5535-mfgpt" diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c index 04756302b878..98d3d123004c 100644 --- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c +++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c @@ -37,6 +37,7 @@ struct pci1xxxx_gpio { struct auxiliary_device *aux_dev; void __iomem *reg_base; + raw_spinlock_t wa_lock; struct gpio_chip gpio; spinlock_t lock; int irq_base; @@ -167,7 +168,7 @@ static void pci1xxxx_gpio_irq_ack(struct irq_data *data) unsigned long flags; spin_lock_irqsave(&priv->lock, flags); - pci1xxx_assign_bit(priv->reg_base, INTR_STAT_OFFSET(gpio), (gpio % 32), true); + writel(BIT(gpio % 32), priv->reg_base + INTR_STAT_OFFSET(gpio)); spin_unlock_irqrestore(&priv->lock, flags); } @@ -257,6 +258,7 @@ static irqreturn_t pci1xxxx_gpio_irq_handler(int irq, void *dev_id) struct pci1xxxx_gpio *priv = dev_id; struct gpio_chip *gc = &priv->gpio; unsigned long int_status = 0; + unsigned long wa_flags; unsigned long flags; u8 pincount; int bit; @@ -280,7 +282,9 @@ static irqreturn_t pci1xxxx_gpio_irq_handler(int irq, void *dev_id) writel(BIT(bit), priv->reg_base + INTR_STATUS_OFFSET(gpiobank)); spin_unlock_irqrestore(&priv->lock, flags); irq = irq_find_mapping(gc->irq.domain, (bit + (gpiobank * 32))); - handle_nested_irq(irq); + raw_spin_lock_irqsave(&priv->wa_lock, wa_flags); + generic_handle_irq(irq); + raw_spin_unlock_irqrestore(&priv->wa_lock, wa_flags); } } spin_lock_irqsave(&priv->lock, flags); diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index a5f88ec97df7..bc40b940ae21 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -117,6 +117,7 @@ #define MEI_DEV_ID_LNL_M 0xA870 /* Lunar Lake Point M */ +#define MEI_DEV_ID_PTL_H 0xE370 /* Panther Lake H */ #define MEI_DEV_ID_PTL_P 0xE470 /* Panther Lake P */ /* diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index d6ff9d82ae94..3f9c60b579ae 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -124,6 +124,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_LNL_M, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_PTL_H, MEI_ME_PCH15_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_PTL_P, MEI_ME_PCH15_CFG)}, /* required last entry */ diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c index 7be1649b1972..da26a080916c 100644 --- a/drivers/misc/mei/vsc-tp.c +++ b/drivers/misc/mei/vsc-tp.c @@ -36,20 +36,24 @@ #define VSC_TP_XFER_TIMEOUT_BYTES 700 #define VSC_TP_PACKET_PADDING_SIZE 1 #define VSC_TP_PACKET_SIZE(pkt) \ - (sizeof(struct vsc_tp_packet) + le16_to_cpu((pkt)->len) + VSC_TP_CRC_SIZE) + (sizeof(struct vsc_tp_packet_hdr) + le16_to_cpu((pkt)->hdr.len) + VSC_TP_CRC_SIZE) #define VSC_TP_MAX_PACKET_SIZE \ - (sizeof(struct vsc_tp_packet) + VSC_TP_MAX_MSG_SIZE + VSC_TP_CRC_SIZE) + (sizeof(struct vsc_tp_packet_hdr) + VSC_TP_MAX_MSG_SIZE + VSC_TP_CRC_SIZE) #define VSC_TP_MAX_XFER_SIZE \ (VSC_TP_MAX_PACKET_SIZE + VSC_TP_XFER_TIMEOUT_BYTES) #define VSC_TP_NEXT_XFER_LEN(len, offset) \ - (len + sizeof(struct vsc_tp_packet) + VSC_TP_CRC_SIZE - offset + VSC_TP_PACKET_PADDING_SIZE) + (len + sizeof(struct vsc_tp_packet_hdr) + VSC_TP_CRC_SIZE - offset + VSC_TP_PACKET_PADDING_SIZE) -struct vsc_tp_packet { +struct vsc_tp_packet_hdr { __u8 sync; __u8 cmd; __le16 len; __le32 seq; - __u8 buf[] __counted_by(len); +}; + +struct vsc_tp_packet { + struct vsc_tp_packet_hdr hdr; + __u8 buf[VSC_TP_MAX_XFER_SIZE - sizeof(struct vsc_tp_packet_hdr)]; }; struct vsc_tp { @@ -67,8 +71,8 @@ struct vsc_tp { u32 seq; /* command buffer */ - void *tx_buf; - void *rx_buf; + struct vsc_tp_packet *tx_buf; + struct vsc_tp_packet *rx_buf; atomic_t assert_cnt; wait_queue_head_t xfer_wait; @@ -158,12 +162,12 @@ static int vsc_tp_dev_xfer(struct vsc_tp *tp, void *obuf, void *ibuf, size_t len static int vsc_tp_xfer_helper(struct vsc_tp *tp, struct vsc_tp_packet *pkt, void *ibuf, u16 ilen) { - int ret, offset = 0, cpy_len, src_len, dst_len = sizeof(struct vsc_tp_packet); + int ret, offset = 0, cpy_len, src_len, dst_len = sizeof(struct vsc_tp_packet_hdr); int next_xfer_len = VSC_TP_PACKET_SIZE(pkt) + VSC_TP_XFER_TIMEOUT_BYTES; - u8 *src, *crc_src, *rx_buf = tp->rx_buf; + u8 *src, *crc_src, *rx_buf = (u8 *)tp->rx_buf; int count_down = VSC_TP_MAX_XFER_COUNT; u32 recv_crc = 0, crc = ~0; - struct vsc_tp_packet ack; + struct vsc_tp_packet_hdr ack; u8 *dst = (u8 *)&ack; bool synced = false; @@ -280,10 +284,10 @@ int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen, guard(mutex)(&tp->mutex); - pkt->sync = VSC_TP_PACKET_SYNC; - pkt->cmd = cmd; - pkt->len = cpu_to_le16(olen); - pkt->seq = cpu_to_le32(++tp->seq); + pkt->hdr.sync = VSC_TP_PACKET_SYNC; + pkt->hdr.cmd = cmd; + pkt->hdr.len = cpu_to_le16(olen); + pkt->hdr.seq = cpu_to_le32(++tp->seq); memcpy(pkt->buf, obuf, olen); crc = ~crc32(~0, (u8 *)pkt, sizeof(pkt) + olen); @@ -320,7 +324,7 @@ int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len) guard(mutex)(&tp->mutex); /* rom xfer is big endian */ - cpu_to_be32_array(tp->tx_buf, obuf, words); + cpu_to_be32_array((u32 *)tp->tx_buf, obuf, words); ret = read_poll_timeout(gpiod_get_value_cansleep, ret, !ret, VSC_TP_ROM_XFER_POLL_DELAY_US, @@ -336,7 +340,7 @@ int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len) return ret; if (ibuf) - be32_to_cpu_array(ibuf, tp->rx_buf, words); + be32_to_cpu_array(ibuf, (u32 *)tp->rx_buf, words); return ret; } @@ -490,11 +494,11 @@ static int vsc_tp_probe(struct spi_device *spi) if (!tp) return -ENOMEM; - tp->tx_buf = devm_kzalloc(dev, VSC_TP_MAX_XFER_SIZE, GFP_KERNEL); + tp->tx_buf = devm_kzalloc(dev, sizeof(*tp->tx_buf), GFP_KERNEL); if (!tp->tx_buf) return -ENOMEM; - tp->rx_buf = devm_kzalloc(dev, VSC_TP_MAX_XFER_SIZE, GFP_KERNEL); + tp->rx_buf = devm_kzalloc(dev, sizeof(*tp->rx_buf), GFP_KERNEL); if (!tp->rx_buf) return -ENOMEM; diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index d294850a35a1..c4e5e2c977be 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -122,7 +122,6 @@ struct pci_endpoint_test { struct pci_endpoint_test_data { enum pci_barno test_reg_bar; size_t alignment; - int irq_type; }; static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test, @@ -948,7 +947,6 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, test_reg_bar = data->test_reg_bar; test->test_reg_bar = test_reg_bar; test->alignment = data->alignment; - test->irq_type = data->irq_type; } init_completion(&test->irq_raised); @@ -970,10 +968,6 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, pci_set_master(pdev); - ret = pci_endpoint_test_alloc_irq_vectors(test, test->irq_type); - if (ret) - goto err_disable_irq; - for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { base = pci_ioremap_bar(pdev, bar); @@ -1009,10 +1003,6 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, goto err_ida_remove; } - ret = pci_endpoint_test_request_irq(test); - if (ret) - goto err_kfree_test_name; - pci_endpoint_test_get_capabilities(test); misc_device = &test->miscdev; @@ -1020,7 +1010,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, misc_device->name = kstrdup(name, GFP_KERNEL); if (!misc_device->name) { ret = -ENOMEM; - goto err_release_irq; + goto err_kfree_test_name; } misc_device->parent = &pdev->dev; misc_device->fops = &pci_endpoint_test_fops; @@ -1036,9 +1026,6 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, err_kfree_name: kfree(misc_device->name); -err_release_irq: - pci_endpoint_test_release_irq(test); - err_kfree_test_name: kfree(test->name); @@ -1051,8 +1038,6 @@ err_iounmap: pci_iounmap(pdev, test->bar[bar]); } -err_disable_irq: - pci_endpoint_test_free_irq_vectors(test); pci_release_regions(pdev); err_disable_pdev: @@ -1092,23 +1077,19 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev) static const struct pci_endpoint_test_data default_data = { .test_reg_bar = BAR_0, .alignment = SZ_4K, - .irq_type = PCITEST_IRQ_TYPE_MSI, }; static const struct pci_endpoint_test_data am654_data = { .test_reg_bar = BAR_2, .alignment = SZ_64K, - .irq_type = PCITEST_IRQ_TYPE_MSI, }; static const struct pci_endpoint_test_data j721e_data = { .alignment = 256, - .irq_type = PCITEST_IRQ_TYPE_MSI, }; static const struct pci_endpoint_test_data rk3588_data = { .alignment = SZ_64K, - .irq_type = PCITEST_IRQ_TYPE_MSI, }; /* diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 6824131b69b1..264e11fa58ea 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -691,8 +691,8 @@ config MMC_TMIO_CORE config MMC_SDHI tristate "Renesas SDHI SD/SDIO controller support" depends on SUPERH || ARCH_RENESAS || COMPILE_TEST + depends on (RESET_CONTROLLER && REGULATOR) || !OF select MMC_TMIO_CORE - select RESET_CONTROLLER if ARCH_RENESAS help This provides support for the SDHI SD/SDIO controller found in Renesas SuperH, ARM and ARM64 based SoCs diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index fa6526be3638..8c83e203c516 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -1179,7 +1179,7 @@ int renesas_sdhi_probe(struct platform_device *pdev, if (IS_ERR(rdev)) { dev_err(dev, "regulator register failed err=%ld", PTR_ERR(rdev)); ret = PTR_ERR(rdev); - goto efree; + goto edisclk; } priv->rdev = rdev; } @@ -1243,26 +1243,26 @@ int renesas_sdhi_probe(struct platform_device *pdev, num_irqs = platform_irq_count(pdev); if (num_irqs < 0) { ret = num_irqs; - goto eirq; + goto edisclk; } /* There must be at least one IRQ source */ if (!num_irqs) { ret = -ENXIO; - goto eirq; + goto edisclk; } for (i = 0; i < num_irqs; i++) { irq = platform_get_irq(pdev, i); if (irq < 0) { ret = irq; - goto eirq; + goto edisclk; } ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0, dev_name(&pdev->dev), host); if (ret) - goto eirq; + goto edisclk; } ret = tmio_mmc_host_probe(host); @@ -1274,8 +1274,6 @@ int renesas_sdhi_probe(struct platform_device *pdev, return ret; -eirq: - tmio_mmc_host_remove(host); edisclk: renesas_sdhi_clk_disable(host); efree: diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c index 09b9ab15e499..a20d03fdd6a9 100644 --- a/drivers/mmc/host/sdhci-of-dwcmshc.c +++ b/drivers/mmc/host/sdhci-of-dwcmshc.c @@ -17,6 +17,7 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> +#include <linux/pm_domain.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/sizes.h> @@ -745,6 +746,29 @@ static void dwcmshc_rk35xx_postinit(struct sdhci_host *host, struct dwcmshc_priv } } +static void dwcmshc_rk3576_postinit(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv) +{ + struct device *dev = mmc_dev(host->mmc); + int ret; + + /* + * This works around the design of the RK3576's power domains, which + * makes the PD_NVM power domain, which the sdhci controller on the + * RK3576 is in, never come back the same way once it's run-time + * suspended once. This can happen during early kernel boot if no driver + * is using either PD_NVM or its child power domain PD_SDGMAC for a + * short moment, leading to it being turned off to save power. By + * keeping it on, sdhci suspending won't lead to PD_NVM becoming a + * candidate for getting turned off. + */ + ret = dev_pm_genpd_rpm_always_on(dev, true); + if (ret && ret != -EOPNOTSUPP) + dev_warn(dev, "failed to set PD rpm always on, SoC may hang later: %pe\n", + ERR_PTR(ret)); + + dwcmshc_rk35xx_postinit(host, dwc_priv); +} + static int th1520_execute_tuning(struct sdhci_host *host, u32 opcode) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -1176,6 +1200,18 @@ static const struct dwcmshc_pltfm_data sdhci_dwcmshc_rk35xx_pdata = { .postinit = dwcmshc_rk35xx_postinit, }; +static const struct dwcmshc_pltfm_data sdhci_dwcmshc_rk3576_pdata = { + .pdata = { + .ops = &sdhci_dwcmshc_rk35xx_ops, + .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | + SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN, + }, + .init = dwcmshc_rk35xx_init, + .postinit = dwcmshc_rk3576_postinit, +}; + static const struct dwcmshc_pltfm_data sdhci_dwcmshc_th1520_pdata = { .pdata = { .ops = &sdhci_dwcmshc_th1520_ops, @@ -1275,6 +1311,10 @@ static const struct of_device_id sdhci_dwcmshc_dt_ids[] = { .data = &sdhci_dwcmshc_rk35xx_pdata, }, { + .compatible = "rockchip,rk3576-dwcmshc", + .data = &sdhci_dwcmshc_rk3576_pdata, + }, + { .compatible = "rockchip,rk3568-dwcmshc", .data = &sdhci_dwcmshc_rk35xx_pdata, }, diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index f75c31815ab0..73385ff4c0f3 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -155,6 +155,7 @@ struct sdhci_am654_data { u32 tuning_loop; #define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0) +#define SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA BIT(1) }; struct window { @@ -166,6 +167,7 @@ struct window { struct sdhci_am654_driver_data { const struct sdhci_pltfm_data *pdata; u32 flags; + u32 quirks; #define IOMUX_PRESENT (1 << 0) #define FREQSEL_2_BIT (1 << 1) #define STRBSEL_4_BIT (1 << 2) @@ -356,6 +358,29 @@ static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host, sdhci_set_clock(host, clock); } +static int sdhci_am654_start_signal_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host); + int ret; + + if ((sdhci_am654->quirks & SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA) && + ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) { + if (!IS_ERR(mmc->supply.vqmmc)) { + ret = mmc_regulator_set_vqmmc(mmc, ios); + if (ret < 0) { + pr_err("%s: Switching to 1.8V signalling voltage failed,\n", + mmc_hostname(mmc)); + return -EIO; + } + } + return 0; + } + + return sdhci_start_signal_voltage_switch(mmc, ios); +} + static u8 sdhci_am654_write_power_on(struct sdhci_host *host, u8 val, int reg) { writeb(val, host->ioaddr + reg); @@ -650,6 +675,12 @@ static const struct sdhci_am654_driver_data sdhci_j721e_4bit_drvdata = { .flags = IOMUX_PRESENT, }; +static const struct sdhci_am654_driver_data sdhci_am62_4bit_drvdata = { + .pdata = &sdhci_j721e_4bit_pdata, + .flags = IOMUX_PRESENT, + .quirks = SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA, +}; + static const struct soc_device_attribute sdhci_am654_devices[] = { { .family = "AM65X", .revision = "SR1.0", @@ -872,7 +903,7 @@ static const struct of_device_id sdhci_am654_of_match[] = { }, { .compatible = "ti,am62-sdhci", - .data = &sdhci_j721e_4bit_drvdata, + .data = &sdhci_am62_4bit_drvdata, }, { /* sentinel */ } }; @@ -906,6 +937,7 @@ static int sdhci_am654_probe(struct platform_device *pdev) pltfm_host = sdhci_priv(host); sdhci_am654 = sdhci_pltfm_priv(pltfm_host); sdhci_am654->flags = drvdata->flags; + sdhci_am654->quirks = drvdata->quirks; clk_xin = devm_clk_get(dev, "clk_xin"); if (IS_ERR(clk_xin)) { @@ -940,6 +972,7 @@ static int sdhci_am654_probe(struct platform_device *pdev) goto err_pltfm_free; } + host->mmc_host_ops.start_signal_voltage_switch = sdhci_am654_start_signal_voltage_switch; host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning; pm_runtime_get_noresume(dev); diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c index 9739387cff8c..58c6e1743f5c 100644 --- a/drivers/mtd/inftlcore.c +++ b/drivers/mtd/inftlcore.c @@ -482,10 +482,11 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block) silly = MAX_LOOPS; while (thisEUN <= inftl->lastEUN) { - inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) + - blockofs, 8, &retlen, (char *)&bci); - - status = bci.Status | bci.Status1; + if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) + + blockofs, 8, &retlen, (char *)&bci) < 0) + status = SECTOR_IGNORE; + else + status = bci.Status | bci.Status1; pr_debug("INFTL: status of block %d in EUN %d is %x\n", block , writeEUN, status); diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index db516a45f0c5..44913ff1bf12 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -3,11 +3,8 @@ nandcore-objs := core.o bbt.o obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o -ifeq ($(CONFIG_SPI_QPIC_SNAND),y) obj-$(CONFIG_SPI_QPIC_SNAND) += qpic_common.o -else obj-$(CONFIG_MTD_NAND_QCOM) += qpic_common.o -endif obj-y += onenand/ obj-y += raw/ obj-y += spi/ diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c index 341318024a19..ec95d787001b 100644 --- a/drivers/mtd/nand/raw/cs553x_nand.c +++ b/drivers/mtd/nand/raw/cs553x_nand.c @@ -351,20 +351,20 @@ static int __init cs553x_init(void) return -ENXIO; /* If it doesn't have the CS553[56], abort */ - rdmsrl(MSR_DIVIL_GLD_CAP, val); + rdmsrq(MSR_DIVIL_GLD_CAP, val); val &= ~0xFFULL; if (val != CAP_CS5535 && val != CAP_CS5536) return -ENXIO; /* If it doesn't have the NAND controller enabled, abort */ - rdmsrl(MSR_DIVIL_BALL_OPTS, val); + rdmsrq(MSR_DIVIL_BALL_OPTS, val); if (val & PIN_OPT_IDE) { pr_info("CS553x NAND controller: Flash I/O not enabled in MSR_DIVIL_BALL_OPTS.\n"); return -ENXIO; } for (i = 0; i < NR_CS553X_CONTROLLERS; i++) { - rdmsrl(MSR_DIVIL_LBAR_FLSH0 + i, val); + rdmsrq(MSR_DIVIL_LBAR_FLSH0 + i, val); if ((val & (FLSH_LBAR_EN|FLSH_NOR_NAND)) == (FLSH_LBAR_EN|FLSH_NOR_NAND)) err = cs553x_init_one(i, !!(val & FLSH_MEM_IO), val & 0xFFFFFFFF); diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c index b07c2f8b4035..918974d088cf 100644 --- a/drivers/mtd/nand/raw/r852.c +++ b/drivers/mtd/nand/raw/r852.c @@ -387,6 +387,9 @@ static int r852_wait(struct nand_chip *chip) static int r852_ready(struct nand_chip *chip) { struct r852_device *dev = r852_get_dev(nand_to_mtd(chip)); + if (dev->card_unstable) + return 0; + return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY); } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 950d8e4d86f8..8ea183da8d53 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -850,8 +850,9 @@ static int bond_check_dev_link(struct bonding *bond, struct net_device *slave_dev, int reporting) { const struct net_device_ops *slave_ops = slave_dev->netdev_ops; - struct ifreq ifr; struct mii_ioctl_data *mii; + struct ifreq ifr; + int ret; if (!reporting && !netif_running(slave_dev)) return 0; @@ -860,9 +861,13 @@ static int bond_check_dev_link(struct bonding *bond, return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0; /* Try to get link status using Ethtool first. */ - if (slave_dev->ethtool_ops->get_link) - return slave_dev->ethtool_ops->get_link(slave_dev) ? - BMSR_LSTATUS : 0; + if (slave_dev->ethtool_ops->get_link) { + netdev_lock_ops(slave_dev); + ret = slave_dev->ethtool_ops->get_link(slave_dev); + netdev_unlock_ops(slave_dev); + + return ret ? BMSR_LSTATUS : 0; + } /* Ethtool can't be used, fallback to MII ioctls. */ if (slave_ops->ndo_eth_ioctl) { diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index cf0d51805272..f6921368cd14 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -16,6 +16,7 @@ #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/timer.h> +#include <net/netdev_queues.h> MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Kvaser AB <support@kvaser.com>"); @@ -410,10 +411,13 @@ struct kvaser_pciefd_can { void __iomem *reg_base; struct can_berr_counter bec; u8 cmd_seq; + u8 tx_max_count; + u8 tx_idx; + u8 ack_idx; int err_rep_cnt; - int echo_idx; + unsigned int completed_tx_pkts; + unsigned int completed_tx_bytes; spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */ - spinlock_t echo_lock; /* Locks the message echo buffer */ struct timer_list bec_poll_timer; struct completion start_comp, flush_comp; }; @@ -714,6 +718,9 @@ static int kvaser_pciefd_open(struct net_device *netdev) int ret; struct kvaser_pciefd_can *can = netdev_priv(netdev); + can->tx_idx = 0; + can->ack_idx = 0; + ret = open_candev(netdev); if (ret) return ret; @@ -745,21 +752,26 @@ static int kvaser_pciefd_stop(struct net_device *netdev) timer_delete(&can->bec_poll_timer); } can->can.state = CAN_STATE_STOPPED; + netdev_reset_queue(netdev); close_candev(netdev); return ret; } +static unsigned int kvaser_pciefd_tx_avail(const struct kvaser_pciefd_can *can) +{ + return can->tx_max_count - (READ_ONCE(can->tx_idx) - READ_ONCE(can->ack_idx)); +} + static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p, - struct kvaser_pciefd_can *can, + struct can_priv *can, u8 seq, struct sk_buff *skb) { struct canfd_frame *cf = (struct canfd_frame *)skb->data; int packet_size; - int seq = can->echo_idx; memset(p, 0, sizeof(*p)); - if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + if (can->ctrlmode & CAN_CTRLMODE_ONE_SHOT) p->header[1] |= KVASER_PCIEFD_TPACKET_SMS; if (cf->can_id & CAN_RTR_FLAG) @@ -782,7 +794,7 @@ static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p, } else { p->header[1] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK, - can_get_cc_dlc((struct can_frame *)cf, can->can.ctrlmode)); + can_get_cc_dlc((struct can_frame *)cf, can->ctrlmode)); } p->header[1] |= FIELD_PREP(KVASER_PCIEFD_PACKET_SEQ_MASK, seq); @@ -797,22 +809,24 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct kvaser_pciefd_can *can = netdev_priv(netdev); - unsigned long irq_flags; struct kvaser_pciefd_tx_packet packet; + unsigned int seq = can->tx_idx & (can->can.echo_skb_max - 1); + unsigned int frame_len; int nr_words; - u8 count; if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; + if (!netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1)) + return NETDEV_TX_BUSY; - nr_words = kvaser_pciefd_prepare_tx_packet(&packet, can, skb); + nr_words = kvaser_pciefd_prepare_tx_packet(&packet, &can->can, seq, skb); - spin_lock_irqsave(&can->echo_lock, irq_flags); /* Prepare and save echo skb in internal slot */ - can_put_echo_skb(skb, netdev, can->echo_idx, 0); - - /* Move echo index to the next slot */ - can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max; + WRITE_ONCE(can->can.echo_skb[seq], NULL); + frame_len = can_skb_get_frame_len(skb); + can_put_echo_skb(skb, netdev, seq, frame_len); + netdev_sent_queue(netdev, frame_len); + WRITE_ONCE(can->tx_idx, can->tx_idx + 1); /* Write header to fifo */ iowrite32(packet.header[0], @@ -836,14 +850,7 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, KVASER_PCIEFD_KCAN_FIFO_LAST_REG); } - count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, - ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); - /* No room for a new message, stop the queue until at least one - * successful transmit - */ - if (count >= can->can.echo_skb_max || can->can.echo_skb[can->echo_idx]) - netif_stop_queue(netdev); - spin_unlock_irqrestore(&can->echo_lock, irq_flags); + netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1); return NETDEV_TX_OK; } @@ -970,6 +977,8 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) can->kv_pcie = pcie; can->cmd_seq = 0; can->err_rep_cnt = 0; + can->completed_tx_pkts = 0; + can->completed_tx_bytes = 0; can->bec.txerr = 0; can->bec.rxerr = 0; @@ -983,11 +992,10 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) tx_nr_packets_max = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK, ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); + can->tx_max_count = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1); can->can.clock.freq = pcie->freq; - can->can.echo_skb_max = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1); - can->echo_idx = 0; - spin_lock_init(&can->echo_lock); + can->can.echo_skb_max = roundup_pow_of_two(can->tx_max_count); spin_lock_init(&can->lock); can->can.bittiming_const = &kvaser_pciefd_bittiming_const; @@ -1201,7 +1209,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, skb = alloc_canfd_skb(priv->dev, &cf); if (!skb) { priv->dev->stats.rx_dropped++; - return -ENOMEM; + return 0; } cf->len = can_fd_dlc2len(dlc); @@ -1213,7 +1221,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf); if (!skb) { priv->dev->stats.rx_dropped++; - return -ENOMEM; + return 0; } can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->ctrlmode); } @@ -1231,7 +1239,9 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, priv->dev->stats.rx_packets++; kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp); - return netif_rx(skb); + netif_rx(skb); + + return 0; } static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can, @@ -1510,19 +1520,21 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie, netdev_dbg(can->can.dev, "Packet was flushed\n"); } else { int echo_idx = FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[0]); - int len; - u8 count; + unsigned int len, frame_len = 0; struct sk_buff *skb; + if (echo_idx != (can->ack_idx & (can->can.echo_skb_max - 1))) + return 0; skb = can->can.echo_skb[echo_idx]; - if (skb) - kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp); - len = can_get_echo_skb(can->can.dev, echo_idx, NULL); - count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, - ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); + if (!skb) + return 0; + kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp); + len = can_get_echo_skb(can->can.dev, echo_idx, &frame_len); - if (count < can->can.echo_skb_max && netif_queue_stopped(can->can.dev)) - netif_wake_queue(can->can.dev); + /* Pairs with barrier in kvaser_pciefd_start_xmit() */ + smp_store_release(&can->ack_idx, can->ack_idx + 1); + can->completed_tx_pkts++; + can->completed_tx_bytes += frame_len; if (!one_shot_fail) { can->can.dev->stats.tx_bytes += len; @@ -1638,32 +1650,51 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) { int pos = 0; int res = 0; + unsigned int i; do { res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf); } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE); + /* Report ACKs in this buffer to BQL en masse for correct periods */ + for (i = 0; i < pcie->nr_channels; ++i) { + struct kvaser_pciefd_can *can = pcie->can[i]; + + if (!can->completed_tx_pkts) + continue; + netif_subqueue_completed_wake(can->can.dev, 0, + can->completed_tx_pkts, + can->completed_tx_bytes, + kvaser_pciefd_tx_avail(can), 1); + can->completed_tx_pkts = 0; + can->completed_tx_bytes = 0; + } + return res; } -static u32 kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) +static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) { + void __iomem *srb_cmd_reg = KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG; u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); - if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) + iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); + + if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { kvaser_pciefd_read_buffer(pcie, 0); + iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, srb_cmd_reg); /* Rearm buffer */ + } - if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) + if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { kvaser_pciefd_read_buffer(pcie, 1); + iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, srb_cmd_reg); /* Rearm buffer */ + } if (unlikely(irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || irq & KVASER_PCIEFD_SRB_IRQ_DOF1 || irq & KVASER_PCIEFD_SRB_IRQ_DUF0 || irq & KVASER_PCIEFD_SRB_IRQ_DUF1)) dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq); - - iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); - return irq; } static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) @@ -1691,29 +1722,22 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev; const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask; u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie)); - u32 srb_irq = 0; - u32 srb_release = 0; int i; if (!(pci_irq & irq_mask->all)) return IRQ_NONE; + iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); + if (pci_irq & irq_mask->kcan_rx0) - srb_irq = kvaser_pciefd_receive_irq(pcie); + kvaser_pciefd_receive_irq(pcie); for (i = 0; i < pcie->nr_channels; i++) { if (pci_irq & irq_mask->kcan_tx[i]) kvaser_pciefd_transmit_irq(pcie->can[i]); } - if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0) - srb_release |= KVASER_PCIEFD_SRB_CMD_RDB0; - - if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1) - srb_release |= KVASER_PCIEFD_SRB_CMD_RDB1; - - if (srb_release) - iowrite32(srb_release, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); + iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); return IRQ_HANDLED; } @@ -1733,13 +1757,22 @@ static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie) } } +static void kvaser_pciefd_disable_irq_srcs(struct kvaser_pciefd *pcie) +{ + unsigned int i; + + /* Masking PCI_IRQ is insufficient as running ISR will unmask it */ + iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG); + for (i = 0; i < pcie->nr_channels; ++i) + iowrite32(0, pcie->can[i]->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); +} + static int kvaser_pciefd_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int ret; struct kvaser_pciefd *pcie; const struct kvaser_pciefd_irq_mask *irq_mask; - void __iomem *irq_en_base; pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) @@ -1805,8 +1838,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG); /* Enable PCI interrupts */ - irq_en_base = KVASER_PCIEFD_PCI_IEN_ADDR(pcie); - iowrite32(irq_mask->all, irq_en_base); + iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); /* Ready the DMA buffers */ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); @@ -1820,8 +1852,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, return 0; err_free_irq: - /* Disable PCI interrupts */ - iowrite32(0, irq_en_base); + kvaser_pciefd_disable_irq_srcs(pcie); free_irq(pcie->pci->irq, pcie); err_pci_free_irq_vectors: @@ -1844,35 +1875,26 @@ err_disable_pci: return ret; } -static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie) -{ - int i; - - for (i = 0; i < pcie->nr_channels; i++) { - struct kvaser_pciefd_can *can = pcie->can[i]; - - if (can) { - iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); - unregister_candev(can->can.dev); - timer_delete(&can->bec_poll_timer); - kvaser_pciefd_pwm_stop(can); - free_candev(can->can.dev); - } - } -} - static void kvaser_pciefd_remove(struct pci_dev *pdev) { struct kvaser_pciefd *pcie = pci_get_drvdata(pdev); + unsigned int i; - kvaser_pciefd_remove_all_ctrls(pcie); + for (i = 0; i < pcie->nr_channels; ++i) { + struct kvaser_pciefd_can *can = pcie->can[i]; - /* Disable interrupts */ - iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); - iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); + unregister_candev(can->can.dev); + timer_delete(&can->bec_poll_timer); + kvaser_pciefd_pwm_stop(can); + } + kvaser_pciefd_disable_irq_srcs(pcie); free_irq(pcie->pci->irq, pcie); pci_free_irq_vectors(pcie->pci); + + for (i = 0; i < pcie->nr_channels; ++i) + free_candev(pcie->can[i]->can.dev); + pci_iounmap(pdev, pcie->reg_base); pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 884a6352c42b..c2c116ce1087 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -2379,6 +2379,7 @@ struct m_can_classdev *m_can_class_allocate_dev(struct device *dev, SET_NETDEV_DEV(net_dev, dev); m_can_of_parse_mram(class_dev, mram_config_vals); + spin_lock_init(&class_dev->tx_handling_spinlock); out: return class_dev; } @@ -2462,9 +2463,9 @@ EXPORT_SYMBOL_GPL(m_can_class_register); void m_can_class_unregister(struct m_can_classdev *cdev) { + unregister_candev(cdev->net); if (cdev->is_peripheral) can_rx_offload_del(&cdev->offload); - unregister_candev(cdev->net); } EXPORT_SYMBOL_GPL(m_can_class_unregister); diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c index 46201c126703..c3fb3176ce42 100644 --- a/drivers/net/can/rockchip/rockchip_canfd-core.c +++ b/drivers/net/can/rockchip/rockchip_canfd-core.c @@ -902,15 +902,16 @@ static int rkcanfd_probe(struct platform_device *pdev) priv->can.data_bittiming_const = &rkcanfd_data_bittiming_const; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_BERR_REPORTING; - if (!(priv->devtype_data.quirks & RKCANFD_QUIRK_CANFD_BROKEN)) - priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD; priv->can.do_set_mode = rkcanfd_set_mode; priv->can.do_get_berr_counter = rkcanfd_get_berr_counter; priv->ndev = ndev; match = device_get_match_data(&pdev->dev); - if (match) + if (match) { priv->devtype_data = *(struct rkcanfd_devtype_data *)match; + if (!(priv->devtype_data.quirks & RKCANFD_QUIRK_CANFD_BROKEN)) + priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD; + } err = can_rx_offload_add_manual(ndev, &priv->offload, RKCANFD_NAPI_WEIGHT); @@ -936,8 +937,8 @@ static void rkcanfd_remove(struct platform_device *pdev) struct rkcanfd_priv *priv = platform_get_drvdata(pdev); struct net_device *ndev = priv->ndev; - can_rx_offload_del(&priv->offload); rkcanfd_unregister(priv); + can_rx_offload_del(&priv->offload); free_candev(ndev); } diff --git a/drivers/net/can/slcan/slcan-core.c b/drivers/net/can/slcan/slcan-core.c index 24c6622d36bd..58ff2ec1d975 100644 --- a/drivers/net/can/slcan/slcan-core.c +++ b/drivers/net/can/slcan/slcan-core.c @@ -71,12 +71,21 @@ MODULE_AUTHOR("Dario Binacchi <dario.binacchi@amarulasolutions.com>"); #define SLCAN_CMD_LEN 1 #define SLCAN_SFF_ID_LEN 3 #define SLCAN_EFF_ID_LEN 8 +#define SLCAN_DATA_LENGTH_LEN 1 +#define SLCAN_ERROR_LEN 1 #define SLCAN_STATE_LEN 1 #define SLCAN_STATE_BE_RXCNT_LEN 3 #define SLCAN_STATE_BE_TXCNT_LEN 3 -#define SLCAN_STATE_FRAME_LEN (1 + SLCAN_CMD_LEN + \ - SLCAN_STATE_BE_RXCNT_LEN + \ - SLCAN_STATE_BE_TXCNT_LEN) +#define SLCAN_STATE_MSG_LEN (SLCAN_CMD_LEN + \ + SLCAN_STATE_LEN + \ + SLCAN_STATE_BE_RXCNT_LEN + \ + SLCAN_STATE_BE_TXCNT_LEN) +#define SLCAN_ERROR_MSG_LEN_MIN (SLCAN_CMD_LEN + \ + SLCAN_ERROR_LEN + \ + SLCAN_DATA_LENGTH_LEN) +#define SLCAN_FRAME_MSG_LEN_MIN (SLCAN_CMD_LEN + \ + SLCAN_SFF_ID_LEN + \ + SLCAN_DATA_LENGTH_LEN) struct slcan { struct can_priv can; @@ -176,6 +185,9 @@ static void slcan_bump_frame(struct slcan *sl) u32 tmpid; char *cmd = sl->rbuff; + if (sl->rcount < SLCAN_FRAME_MSG_LEN_MIN) + return; + skb = alloc_can_skb(sl->dev, &cf); if (unlikely(!skb)) { sl->dev->stats.rx_dropped++; @@ -281,7 +293,7 @@ static void slcan_bump_state(struct slcan *sl) return; } - if (state == sl->can.state || sl->rcount < SLCAN_STATE_FRAME_LEN) + if (state == sl->can.state || sl->rcount != SLCAN_STATE_MSG_LEN) return; cmd += SLCAN_STATE_BE_RXCNT_LEN + SLCAN_CMD_LEN + 1; @@ -328,6 +340,9 @@ static void slcan_bump_err(struct slcan *sl) bool rx_errors = false, tx_errors = false, rx_over_errors = false; int i, len; + if (sl->rcount < SLCAN_ERROR_MSG_LEN_MIN) + return; + /* get len from sanitized ASCII value */ len = cmd[1]; if (len >= '0' && len < '9') @@ -456,8 +471,7 @@ static void slcan_bump(struct slcan *sl) static void slcan_unesc(struct slcan *sl, unsigned char s) { if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */ - if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && - sl->rcount > 4) + if (!test_and_clear_bit(SLF_ERROR, &sl->flags)) slcan_bump(sl); sl->rcount = 0; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index 3bc56517fe7a..c30b04f8fc0d 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -75,6 +75,24 @@ static const struct can_bittiming_const mcp251xfd_data_bittiming_const = { .brp_inc = 1, }; +/* The datasheet of the mcp2518fd (DS20006027B) specifies a range of + * [-64,63] for TDCO, indicating a relative TDCO. + * + * Manual tests have shown, that using a relative TDCO configuration + * results in bus off, while an absolute configuration works. + * + * For TDCO use the max value (63) from the data sheet, but 0 as the + * minimum. + */ +static const struct can_tdc_const mcp251xfd_tdc_const = { + .tdcv_min = 0, + .tdcv_max = 63, + .tdco_min = 0, + .tdco_max = 63, + .tdcf_min = 0, + .tdcf_max = 0, +}; + static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model) { switch (model) { @@ -510,8 +528,7 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv) { const struct can_bittiming *bt = &priv->can.bittiming; const struct can_bittiming *dbt = &priv->can.data_bittiming; - u32 val = 0; - s8 tdco; + u32 tdcmod, val = 0; int err; /* CAN Control Register @@ -575,11 +592,16 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv) return err; /* Transmitter Delay Compensation */ - tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1), - -64, 63); - val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, - MCP251XFD_REG_TDC_TDCMOD_AUTO) | - FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco); + if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_AUTO) + tdcmod = MCP251XFD_REG_TDC_TDCMOD_AUTO; + else if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_MANUAL) + tdcmod = MCP251XFD_REG_TDC_TDCMOD_MANUAL; + else + tdcmod = MCP251XFD_REG_TDC_TDCMOD_DISABLED; + + val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, tdcmod) | + FIELD_PREP(MCP251XFD_REG_TDC_TDCV_MASK, priv->can.tdc.tdcv) | + FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, priv->can.tdc.tdco); return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val); } @@ -2083,10 +2105,12 @@ static int mcp251xfd_probe(struct spi_device *spi) priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter; priv->can.bittiming_const = &mcp251xfd_bittiming_const; priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const; + priv->can.tdc_const = &mcp251xfd_tdc_const; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO | - CAN_CTRLMODE_CC_LEN8_DLC; + CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO | + CAN_CTRLMODE_TDC_MANUAL; set_bit(MCP251XFD_FLAGS_DOWN, priv->flags); priv->ndev = ndev; priv->spi = spi; @@ -2174,8 +2198,8 @@ static void mcp251xfd_remove(struct spi_device *spi) struct mcp251xfd_priv *priv = spi_get_drvdata(spi); struct net_device *ndev = priv->ndev; - can_rx_offload_del(&priv->offload); mcp251xfd_unregister(priv); + can_rx_offload_del(&priv->offload); spi->max_speed_hz = priv->spi_max_speed_hz_orig; free_candev(ndev); } diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 61d164ffb3ae..7216eb8f9493 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -326,6 +326,26 @@ static void b53_get_vlan_entry(struct b53_device *dev, u16 vid, } } +static void b53_set_eap_mode(struct b53_device *dev, int port, int mode) +{ + u64 eap_conf; + + if (is5325(dev) || is5365(dev) || dev->chip_id == BCM5389_DEVICE_ID) + return; + + b53_read64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), &eap_conf); + + if (is63xx(dev)) { + eap_conf &= ~EAP_MODE_MASK_63XX; + eap_conf |= (u64)mode << EAP_MODE_SHIFT_63XX; + } else { + eap_conf &= ~EAP_MODE_MASK; + eap_conf |= (u64)mode << EAP_MODE_SHIFT; + } + + b53_write64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), eap_conf); +} + static void b53_set_forwarding(struct b53_device *dev, int enable) { u8 mgmt; @@ -373,15 +393,17 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable, b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5); } + vc1 &= ~VC1_RX_MCST_FWD_EN; + if (enable) { vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; - vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN; + vc1 |= VC1_RX_MCST_UNTAG_EN; vc4 &= ~VC4_ING_VID_CHECK_MASK; if (enable_filtering) { vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; vc5 |= VC5_DROP_VTABLE_MISS; } else { - vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; + vc4 |= VC4_NO_ING_VID_CHK << VC4_ING_VID_CHECK_S; vc5 &= ~VC5_DROP_VTABLE_MISS; } @@ -393,7 +415,7 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable, } else { vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID); - vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN); + vc1 &= ~VC1_RX_MCST_UNTAG_EN; vc4 &= ~VC4_ING_VID_CHECK_MASK; vc5 &= ~VC5_DROP_VTABLE_MISS; @@ -576,6 +598,25 @@ static void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable) b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg); } +int b53_setup_port(struct dsa_switch *ds, int port) +{ + struct b53_device *dev = ds->priv; + + b53_port_set_ucast_flood(dev, port, true); + b53_port_set_mcast_flood(dev, port, true); + b53_port_set_learning(dev, port, false); + + /* Force all traffic to go to the CPU port to prevent the ASIC from + * trying to forward to bridged ports on matching FDB entries, then + * dropping frames because it isn't allowed to forward there. + */ + if (dsa_is_user_port(ds, port)) + b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED); + + return 0; +} +EXPORT_SYMBOL(b53_setup_port); + int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) { struct b53_device *dev = ds->priv; @@ -588,10 +629,6 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) cpu_port = dsa_to_port(ds, port)->cpu_dp->index; - b53_port_set_ucast_flood(dev, port, true); - b53_port_set_mcast_flood(dev, port, true); - b53_port_set_learning(dev, port, false); - if (dev->ops->irq_enable) ret = dev->ops->irq_enable(dev, port); if (ret) @@ -722,10 +759,6 @@ static void b53_enable_cpu_port(struct b53_device *dev, int port) b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl); b53_brcm_hdr_setup(dev->ds, port); - - b53_port_set_ucast_flood(dev, port, true); - b53_port_set_mcast_flood(dev, port, true); - b53_port_set_learning(dev, port, false); } static void b53_enable_mib(struct b53_device *dev) @@ -737,6 +770,15 @@ static void b53_enable_mib(struct b53_device *dev) b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); } +static void b53_enable_stp(struct b53_device *dev) +{ + u8 gc; + + b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); + gc |= GC_RX_BPDU_EN; + b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); +} + static u16 b53_default_pvid(struct b53_device *dev) { if (is5325(dev) || is5365(dev)) @@ -752,6 +794,22 @@ static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port) return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port); } +static bool b53_vlan_port_may_join_untagged(struct dsa_switch *ds, int port) +{ + struct b53_device *dev = ds->priv; + struct dsa_port *dp; + + if (!dev->vlan_filtering) + return true; + + dp = dsa_to_port(ds, port); + + if (dsa_port_is_cpu(dp)) + return true; + + return dp->bridge == NULL; +} + int b53_configure_vlan(struct dsa_switch *ds) { struct b53_device *dev = ds->priv; @@ -770,7 +828,7 @@ int b53_configure_vlan(struct dsa_switch *ds) b53_do_vlan_op(dev, VTA_CMD_CLEAR); } - b53_enable_vlan(dev, -1, dev->vlan_enabled, ds->vlan_filtering); + b53_enable_vlan(dev, -1, dev->vlan_enabled, dev->vlan_filtering); /* Create an untagged VLAN entry for the default PVID in case * CONFIG_VLAN_8021Q is disabled and there are no calls to @@ -778,26 +836,39 @@ int b53_configure_vlan(struct dsa_switch *ds) * entry. Do this only when the tagging protocol is not * DSA_TAG_PROTO_NONE */ + v = &dev->vlans[def_vid]; b53_for_each_port(dev, i) { - v = &dev->vlans[def_vid]; - v->members |= BIT(i); + if (!b53_vlan_port_may_join_untagged(ds, i)) + continue; + + vl.members |= BIT(i); if (!b53_vlan_port_needs_forced_tagged(ds, i)) - v->untag = v->members; - b53_write16(dev, B53_VLAN_PAGE, - B53_VLAN_PORT_DEF_TAG(i), def_vid); + vl.untag = vl.members; + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(i), + def_vid); } + b53_set_vlan_entry(dev, def_vid, &vl); - /* Upon initial call we have not set-up any VLANs, but upon - * system resume, we need to restore all VLAN entries. - */ - for (vid = def_vid; vid < dev->num_vlans; vid++) { - v = &dev->vlans[vid]; + if (dev->vlan_filtering) { + /* Upon initial call we have not set-up any VLANs, but upon + * system resume, we need to restore all VLAN entries. + */ + for (vid = def_vid + 1; vid < dev->num_vlans; vid++) { + v = &dev->vlans[vid]; - if (!v->members) - continue; + if (!v->members) + continue; + + b53_set_vlan_entry(dev, vid, v); + b53_fast_age_vlan(dev, vid); + } - b53_set_vlan_entry(dev, vid, v); - b53_fast_age_vlan(dev, vid); + b53_for_each_port(dev, i) { + if (!dsa_is_cpu_port(ds, i)) + b53_write16(dev, B53_VLAN_PAGE, + B53_VLAN_PORT_DEF_TAG(i), + dev->ports[i].pvid); + } } return 0; @@ -876,6 +947,7 @@ static int b53_switch_reset(struct b53_device *dev) } b53_enable_mib(dev); + b53_enable_stp(dev); return b53_flush_arl(dev, FAST_AGE_STATIC); } @@ -1115,7 +1187,9 @@ EXPORT_SYMBOL(b53_setup_devlink_resources); static int b53_setup(struct dsa_switch *ds) { struct b53_device *dev = ds->priv; + struct b53_vlan *vl; unsigned int port; + u16 pvid; int ret; /* Request bridge PVID untagged when DSA_TAG_PROTO_NONE is set @@ -1123,12 +1197,26 @@ static int b53_setup(struct dsa_switch *ds) */ ds->untag_bridge_pvid = dev->tag_protocol == DSA_TAG_PROTO_NONE; + /* The switch does not tell us the original VLAN for untagged + * packets, so keep the CPU port always tagged. + */ + ds->untag_vlan_aware_bridge_pvid = true; + ret = b53_reset_switch(dev); if (ret) { dev_err(ds->dev, "failed to reset switch\n"); return ret; } + /* setup default vlan for filtering mode */ + pvid = b53_default_pvid(dev); + vl = &dev->vlans[pvid]; + b53_for_each_port(dev, port) { + vl->members |= BIT(port); + if (!b53_vlan_port_needs_forced_tagged(ds, port)) + vl->untag |= BIT(port); + } + b53_reset_mib(dev); ret = b53_apply_config(dev); @@ -1482,7 +1570,10 @@ int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, { struct b53_device *dev = ds->priv; - b53_enable_vlan(dev, port, dev->vlan_enabled, vlan_filtering); + if (dev->vlan_filtering != vlan_filtering) { + dev->vlan_filtering = vlan_filtering; + b53_apply_config(dev); + } return 0; } @@ -1507,7 +1598,7 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port, if (vlan->vid >= dev->num_vlans) return -ERANGE; - b53_enable_vlan(dev, port, true, ds->vlan_filtering); + b53_enable_vlan(dev, port, true, dev->vlan_filtering); return 0; } @@ -1520,18 +1611,29 @@ int b53_vlan_add(struct dsa_switch *ds, int port, bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; struct b53_vlan *vl; + u16 old_pvid, new_pvid; int err; err = b53_vlan_prepare(ds, port, vlan); if (err) return err; - vl = &dev->vlans[vlan->vid]; + if (vlan->vid == 0) + return 0; + + old_pvid = dev->ports[port].pvid; + if (pvid) + new_pvid = vlan->vid; + else if (!pvid && vlan->vid == old_pvid) + new_pvid = b53_default_pvid(dev); + else + new_pvid = old_pvid; + dev->ports[port].pvid = new_pvid; - b53_get_vlan_entry(dev, vlan->vid, vl); + vl = &dev->vlans[vlan->vid]; - if (vlan->vid == 0 && vlan->vid == b53_default_pvid(dev)) - untagged = true; + if (dsa_is_cpu_port(ds, port)) + untagged = false; vl->members |= BIT(port); if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port)) @@ -1539,13 +1641,16 @@ int b53_vlan_add(struct dsa_switch *ds, int port, else vl->untag &= ~BIT(port); + if (!dev->vlan_filtering) + return 0; + b53_set_vlan_entry(dev, vlan->vid, vl); b53_fast_age_vlan(dev, vlan->vid); - if (pvid && !dsa_is_cpu_port(ds, port)) { + if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) { b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), - vlan->vid); - b53_fast_age_vlan(dev, vlan->vid); + new_pvid); + b53_fast_age_vlan(dev, old_pvid); } return 0; @@ -1560,20 +1665,25 @@ int b53_vlan_del(struct dsa_switch *ds, int port, struct b53_vlan *vl; u16 pvid; - b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid); + if (vlan->vid == 0) + return 0; - vl = &dev->vlans[vlan->vid]; + pvid = dev->ports[port].pvid; - b53_get_vlan_entry(dev, vlan->vid, vl); + vl = &dev->vlans[vlan->vid]; vl->members &= ~BIT(port); if (pvid == vlan->vid) pvid = b53_default_pvid(dev); + dev->ports[port].pvid = pvid; if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port)) vl->untag &= ~(BIT(port)); + if (!dev->vlan_filtering) + return 0; + b53_set_vlan_entry(dev, vlan->vid, vl); b53_fast_age_vlan(dev, vlan->vid); @@ -1906,8 +2016,9 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, bool *tx_fwd_offload, struct netlink_ext_ack *extack) { struct b53_device *dev = ds->priv; + struct b53_vlan *vl; s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; - u16 pvlan, reg; + u16 pvlan, reg, pvid; unsigned int i; /* On 7278, port 7 which connects to the ASP should only receive @@ -1916,15 +2027,29 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, if (dev->chip_id == BCM7278_DEVICE_ID && port == 7) return -EINVAL; - /* Make this port leave the all VLANs join since we will have proper - * VLAN entries from now on - */ - if (is58xx(dev)) { - b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®); - reg &= ~BIT(port); - if ((reg & BIT(cpu_port)) == BIT(cpu_port)) - reg &= ~BIT(cpu_port); - b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); + pvid = b53_default_pvid(dev); + vl = &dev->vlans[pvid]; + + if (dev->vlan_filtering) { + /* Make this port leave the all VLANs join since we will have + * proper VLAN entries from now on + */ + if (is58xx(dev)) { + b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, + ®); + reg &= ~BIT(port); + if ((reg & BIT(cpu_port)) == BIT(cpu_port)) + reg &= ~BIT(cpu_port); + b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, + reg); + } + + b53_get_vlan_entry(dev, pvid, vl); + vl->members &= ~BIT(port); + if (vl->members == BIT(cpu_port)) + vl->members &= ~BIT(cpu_port); + vl->untag = vl->members; + b53_set_vlan_entry(dev, pvid, vl); } b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); @@ -1944,6 +2069,9 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, pvlan |= BIT(i); } + /* Disable redirection of unknown SA to the CPU port */ + b53_set_eap_mode(dev, port, EAP_MODE_BASIC); + /* Configure the local port VLAN control membership to include * remote ports and update the local port bitmask */ @@ -1957,7 +2085,7 @@ EXPORT_SYMBOL(b53_br_join); void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge) { struct b53_device *dev = ds->priv; - struct b53_vlan *vl = &dev->vlans[0]; + struct b53_vlan *vl; s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; unsigned int i; u16 pvlan, reg, pvid; @@ -1979,19 +2107,25 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge) pvlan &= ~BIT(i); } + /* Enable redirection of unknown SA to the CPU port */ + b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED); + b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); dev->ports[port].vlan_ctl_mask = pvlan; pvid = b53_default_pvid(dev); + vl = &dev->vlans[pvid]; + + if (dev->vlan_filtering) { + /* Make this port join all VLANs without VLAN entries */ + if (is58xx(dev)) { + b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®); + reg |= BIT(port); + if (!(reg & BIT(cpu_port))) + reg |= BIT(cpu_port); + b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); + } - /* Make this port join all VLANs without VLAN entries */ - if (is58xx(dev)) { - b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®); - reg |= BIT(port); - if (!(reg & BIT(cpu_port))) - reg |= BIT(cpu_port); - b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); - } else { b53_get_vlan_entry(dev, pvid, vl); vl->members |= BIT(port) | BIT(cpu_port); vl->untag |= BIT(port) | BIT(cpu_port); @@ -2290,6 +2424,7 @@ static const struct dsa_switch_ops b53_switch_ops = { .phy_read = b53_phy_read16, .phy_write = b53_phy_write16, .phylink_get_caps = b53_phylink_get_caps, + .port_setup = b53_setup_port, .port_enable = b53_enable_port, .port_disable = b53_disable_port, .support_eee = b53_support_eee, @@ -2747,6 +2882,7 @@ struct b53_device *b53_switch_alloc(struct device *base, ds->ops = &b53_switch_ops; ds->phylink_mac_ops = &b53_phylink_mac_ops; dev->vlan_enabled = true; + dev->vlan_filtering = false; /* Let DSA handle the case were multiple bridges span the same switch * device and different VLAN awareness settings are requested, which * would be breaking filtering semantics for any of the other bridge diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 0166c37a13a7..2cf3e6a81e37 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -96,6 +96,7 @@ struct b53_pcs { struct b53_port { u16 vlan_ctl_mask; + u16 pvid; struct ethtool_keee eee; }; @@ -147,6 +148,7 @@ struct b53_device { unsigned int num_vlans; struct b53_vlan *vlans; bool vlan_enabled; + bool vlan_filtering; unsigned int num_ports; struct b53_port *ports; @@ -382,6 +384,7 @@ enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port, enum dsa_tag_protocol mprot); void b53_mirror_del(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror); +int b53_setup_port(struct dsa_switch *ds, int port); int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy); void b53_disable_port(struct dsa_switch *ds, int port); void b53_brcm_hdr_setup(struct dsa_switch *ds, int port); diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h index bfbcb66bef66..5f7a0e5c5709 100644 --- a/drivers/net/dsa/b53/b53_regs.h +++ b/drivers/net/dsa/b53/b53_regs.h @@ -50,6 +50,9 @@ /* Jumbo Frame Registers */ #define B53_JUMBO_PAGE 0x40 +/* EAP Registers */ +#define B53_EAP_PAGE 0x42 + /* EEE Control Registers Page */ #define B53_EEE_PAGE 0x92 @@ -481,6 +484,17 @@ #define JMS_MAX_SIZE 9724 /************************************************************************* + * EAP Page Registers + *************************************************************************/ +#define B53_PORT_EAP_CONF(i) (0x20 + 8 * (i)) +#define EAP_MODE_SHIFT 51 +#define EAP_MODE_SHIFT_63XX 50 +#define EAP_MODE_MASK (0x3ull << EAP_MODE_SHIFT) +#define EAP_MODE_MASK_63XX (0x3ull << EAP_MODE_SHIFT_63XX) +#define EAP_MODE_BASIC 0 +#define EAP_MODE_SIMPLIFIED 3 + +/************************************************************************* * EEE Configuration Page Registers *************************************************************************/ diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index fa2bf3fa9019..454a8c7fd7ee 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -1230,6 +1230,7 @@ static const struct dsa_switch_ops bcm_sf2_ops = { .resume = bcm_sf2_sw_resume, .get_wol = bcm_sf2_sw_get_wol, .set_wol = bcm_sf2_sw_set_wol, + .port_setup = b53_setup_port, .port_enable = bcm_sf2_port_setup, .port_disable = bcm_sf2_port_disable, .support_eee = b53_support_eee, diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 579ee504fed5..d66da5cbbeb9 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -265,16 +265,70 @@ static void ksz_phylink_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface); +/** + * ksz_phylink_mac_disable_tx_lpi() - Callback to signal LPI support (Dummy) + * @config: phylink config structure + * + * This function is a dummy handler. See ksz_phylink_mac_enable_tx_lpi() for + * a detailed explanation of EEE/LPI handling in KSZ switches. + */ +static void ksz_phylink_mac_disable_tx_lpi(struct phylink_config *config) +{ +} + +/** + * ksz_phylink_mac_enable_tx_lpi() - Callback to signal LPI support (Dummy) + * @config: phylink config structure + * @timer: timer value before entering LPI (unused) + * @tx_clock_stop: whether to stop the TX clock in LPI mode (unused) + * + * This function signals to phylink that the driver architecture supports + * LPI management, enabling phylink to control EEE advertisement during + * negotiation according to IEEE Std 802.3 (Clause 78). + * + * Hardware Management of EEE/LPI State: + * For KSZ switch ports with integrated PHYs (e.g., KSZ9893R ports 1-2), + * observation and testing suggest that the actual EEE / Low Power Idle (LPI) + * state transitions are managed autonomously by the hardware based on + * the auto-negotiation results. (Note: While the datasheet describes EEE + * operation based on negotiation, it doesn't explicitly detail the internal + * MAC/PHY interaction, so autonomous hardware management of the MAC state + * for LPI is inferred from observed behavior). + * This hardware control, consistent with the switch's ability to operate + * autonomously via strapping, means MAC-level software intervention is not + * required or exposed for managing the LPI state once EEE is negotiated. + * (Ref: KSZ9893R Data Sheet DS00002420D, primarily Section 4.7.5 explaining + * EEE, also Sections 4.1.7 on Auto-Negotiation and 3.2.1 on Configuration + * Straps). + * + * Additionally, ports configured as MAC interfaces (e.g., KSZ9893R port 3) + * lack documented MAC-level LPI control. + * + * Therefore, this callback performs no action and serves primarily to inform + * phylink of LPI awareness and to document the inferred hardware behavior. + * + * Returns: 0 (Always success) + */ +static int ksz_phylink_mac_enable_tx_lpi(struct phylink_config *config, + u32 timer, bool tx_clock_stop) +{ + return 0; +} + static const struct phylink_mac_ops ksz88x3_phylink_mac_ops = { .mac_config = ksz88x3_phylink_mac_config, .mac_link_down = ksz_phylink_mac_link_down, .mac_link_up = ksz8_phylink_mac_link_up, + .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi, + .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi, }; static const struct phylink_mac_ops ksz8_phylink_mac_ops = { .mac_config = ksz_phylink_mac_config, .mac_link_down = ksz_phylink_mac_link_down, .mac_link_up = ksz8_phylink_mac_link_up, + .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi, + .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi, }; static const struct ksz_dev_ops ksz88xx_dev_ops = { @@ -358,6 +412,8 @@ static const struct phylink_mac_ops ksz9477_phylink_mac_ops = { .mac_config = ksz_phylink_mac_config, .mac_link_down = ksz_phylink_mac_link_down, .mac_link_up = ksz9477_phylink_mac_link_up, + .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi, + .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi, }; static const struct ksz_dev_ops ksz9477_dev_ops = { @@ -401,6 +457,8 @@ static const struct phylink_mac_ops lan937x_phylink_mac_ops = { .mac_config = ksz_phylink_mac_config, .mac_link_down = ksz_phylink_mac_link_down, .mac_link_up = ksz9477_phylink_mac_link_up, + .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi, + .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi, }; static const struct ksz_dev_ops lan937x_dev_ops = { @@ -2016,6 +2074,18 @@ static void ksz_phylink_get_caps(struct dsa_switch *ds, int port, if (dev->dev_ops->get_caps) dev->dev_ops->get_caps(dev, port, config); + + if (ds->ops->support_eee && ds->ops->support_eee(ds, port)) { + memcpy(config->lpi_interfaces, config->supported_interfaces, + sizeof(config->lpi_interfaces)); + + config->lpi_capabilities = MAC_100FD; + if (dev->info->gbit_capable[port]) + config->lpi_capabilities |= MAC_1000FD; + + /* EEE is fully operational */ + config->eee_enabled_default = true; + } } void ksz_r_mib_stats64(struct ksz_device *dev, int port) @@ -3009,31 +3079,6 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port) if (!port) return MICREL_KSZ8_P1_ERRATA; break; - case KSZ8567_CHIP_ID: - /* KSZ8567R Errata DS80000752C Module 4 */ - case KSZ8765_CHIP_ID: - case KSZ8794_CHIP_ID: - case KSZ8795_CHIP_ID: - /* KSZ879x/KSZ877x/KSZ876x Errata DS80000687C Module 2 */ - case KSZ9477_CHIP_ID: - /* KSZ9477S Errata DS80000754A Module 4 */ - case KSZ9567_CHIP_ID: - /* KSZ9567S Errata DS80000756A Module 4 */ - case KSZ9896_CHIP_ID: - /* KSZ9896C Errata DS80000757A Module 3 */ - case KSZ9897_CHIP_ID: - case LAN9646_CHIP_ID: - /* KSZ9897R Errata DS80000758C Module 4 */ - /* Energy Efficient Ethernet (EEE) feature select must be manually disabled - * The EEE feature is enabled by default, but it is not fully - * operational. It must be manually disabled through register - * controls. If not disabled, the PHY ports can auto-negotiate - * to enable EEE, and this feature can cause link drops when - * linked to another device supporting EEE. - * - * The same item appears in the errata for all switches above. - */ - return MICREL_NO_EEE; } return 0; @@ -3467,6 +3512,20 @@ static int ksz_max_mtu(struct dsa_switch *ds, int port) return -EOPNOTSUPP; } +/** + * ksz_support_eee - Determine Energy Efficient Ethernet (EEE) support for a + * port + * @ds: Pointer to the DSA switch structure + * @port: Port number to check + * + * This function also documents devices where EEE was initially advertised but + * later withdrawn due to reliability issues, as described in official errata + * documents. These devices are explicitly listed to record known limitations, + * even if there is no technical necessity for runtime checks. + * + * Returns: true if the internal PHY on the given port supports fully + * operational EEE, false otherwise. + */ static bool ksz_support_eee(struct dsa_switch *ds, int port) { struct ksz_device *dev = ds->priv; @@ -3476,15 +3535,35 @@ static bool ksz_support_eee(struct dsa_switch *ds, int port) switch (dev->chip_id) { case KSZ8563_CHIP_ID: + case KSZ9563_CHIP_ID: + case KSZ9893_CHIP_ID: + return true; case KSZ8567_CHIP_ID: + /* KSZ8567R Errata DS80000752C Module 4 */ + case KSZ8765_CHIP_ID: + case KSZ8794_CHIP_ID: + case KSZ8795_CHIP_ID: + /* KSZ879x/KSZ877x/KSZ876x Errata DS80000687C Module 2 */ case KSZ9477_CHIP_ID: - case KSZ9563_CHIP_ID: + /* KSZ9477S Errata DS80000754A Module 4 */ case KSZ9567_CHIP_ID: - case KSZ9893_CHIP_ID: + /* KSZ9567S Errata DS80000756A Module 4 */ case KSZ9896_CHIP_ID: + /* KSZ9896C Errata DS80000757A Module 3 */ case KSZ9897_CHIP_ID: case LAN9646_CHIP_ID: - return true; + /* KSZ9897R Errata DS80000758C Module 4 */ + /* Energy Efficient Ethernet (EEE) feature select must be + * manually disabled + * The EEE feature is enabled by default, but it is not fully + * operational. It must be manually disabled through register + * controls. If not disabled, the PHY ports can auto-negotiate + * to enable EEE, and this feature can cause link drops when + * linked to another device supporting EEE. + * + * The same item appears in the errata for all switches above. + */ + break; } return false; diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index d70399bce5b9..c5d6628d7980 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -2419,6 +2419,9 @@ mt7531_setup_common(struct dsa_switch *ds) struct mt7530_priv *priv = ds->priv; int ret, i; + ds->assisted_learning_on_cpu_port = true; + ds->mtu_enforcement_ingress = true; + mt753x_trap_frames(priv); /* Enable and reset MIB counters */ @@ -2571,9 +2574,6 @@ mt7531_setup(struct dsa_switch *ds) if (ret) return ret; - ds->assisted_learning_on_cpu_port = true; - ds->mtu_enforcement_ingress = true; - return 0; } diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index dd616eb809ea..2281d6ab8c9a 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1852,6 +1852,8 @@ static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid, if (!chip->info->ops->vtu_getnext) return -EOPNOTSUPP; + memset(entry, 0, sizeof(*entry)); + entry->vid = vid ? vid - 1 : mv88e6xxx_max_vid(chip); entry->valid = false; @@ -1960,7 +1962,16 @@ static int mv88e6xxx_mst_put(struct mv88e6xxx_chip *chip, u8 sid) struct mv88e6xxx_mst *mst, *tmp; int err; - if (!sid) + /* If the SID is zero, it is for a VLAN mapped to the default MSTI, + * and mv88e6xxx_stu_setup() made sure it is always present, and thus, + * should not be removed here. + * + * If the chip lacks STU support, numerically the "sid" variable will + * happen to also be zero, but we don't want to rely on that fact, so + * we explicitly test that first. In that case, there is also nothing + * to do here. + */ + if (!mv88e6xxx_has_stu(chip) || !sid) return 0; list_for_each_entry_safe(mst, tmp, &chip->msts, node) { diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c index 795c8df7b6a7..195460a0a0d4 100644 --- a/drivers/net/dsa/mv88e6xxx/devlink.c +++ b/drivers/net/dsa/mv88e6xxx/devlink.c @@ -736,7 +736,8 @@ void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds) int i; for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++) - dsa_devlink_region_destroy(chip->regions[i]); + if (chip->regions[i]) + dsa_devlink_region_destroy(chip->regions[i]); } void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port) diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 940f1b71226d..7b35d24c38d7 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -1543,7 +1543,7 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot) struct tc_taprio_qopt_offload *taprio; struct ocelot_port *ocelot_port; struct timespec64 base_ts; - int port; + int i, port; u32 val; mutex_lock(&ocelot->fwd_domain_lock); @@ -1575,6 +1575,9 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot) QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M, QSYS_PARAM_CFG_REG_3); + for (i = 0; i < taprio->num_entries; i++) + vsc9959_tas_gcl_set(ocelot, i, &taprio->entries[i]); + ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE, QSYS_TAS_PARAM_CFG_CTRL); diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index f8454f3b6f9c..f674c400f05b 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -2081,6 +2081,7 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port, switch (state) { case BR_STATE_DISABLED: case BR_STATE_BLOCKING: + case BR_STATE_LISTENING: /* From UM10944 description of DRPDTAG (why put this there?): * "Management traffic flows to the port regardless of the state * of the INGRESS flag". So BPDUs are still be allowed to pass. @@ -2090,11 +2091,6 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port, mac[port].egress = false; mac[port].dyn_learn = false; break; - case BR_STATE_LISTENING: - mac[port].ingress = true; - mac[port].egress = false; - mac[port].dyn_learn = false; - break; case BR_STATE_LEARNING: mac[port].ingress = true; mac[port].egress = false; diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c index d748dc6de923..1e9ab65218ff 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.c +++ b/drivers/net/ethernet/airoha/airoha_eth.c @@ -614,7 +614,6 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) struct airoha_queue_entry *e = &q->entry[q->tail]; struct airoha_qdma_desc *desc = &q->desc[q->tail]; u32 hash, reason, msg1 = le32_to_cpu(desc->msg1); - dma_addr_t dma_addr = le32_to_cpu(desc->addr); struct page *page = virt_to_head_page(e->buf); u32 desc_ctrl = le32_to_cpu(desc->ctrl); struct airoha_gdm_port *port; @@ -623,22 +622,16 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) if (!(desc_ctrl & QDMA_DESC_DONE_MASK)) break; - if (!dma_addr) - break; - - len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl); - if (!len) - break; - q->tail = (q->tail + 1) % q->ndesc; q->queued--; - dma_sync_single_for_cpu(eth->dev, dma_addr, + dma_sync_single_for_cpu(eth->dev, e->dma_addr, SKB_WITH_OVERHEAD(q->buf_size), dir); + len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl); data_len = q->skb ? q->buf_size : SKB_WITH_OVERHEAD(q->buf_size); - if (data_len < len) + if (!len || data_len < len) goto free_frag; p = airoha_qdma_get_gdm_port(eth, desc); @@ -701,9 +694,12 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) q->skb = NULL; continue; free_frag: - page_pool_put_full_page(q->page_pool, page, true); - dev_kfree_skb(q->skb); - q->skb = NULL; + if (q->skb) { + dev_kfree_skb(q->skb); + q->skb = NULL; + } else { + page_pool_put_full_page(q->page_pool, page, true); + } } airoha_qdma_fill_rx_queue(q); diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c index 7a5710f9ccf6..ead0625e781f 100644 --- a/drivers/net/ethernet/airoha/airoha_npu.c +++ b/drivers/net/ethernet/airoha/airoha_npu.c @@ -104,12 +104,14 @@ struct ppe_mbox_data { u8 xpon_hal_api; u8 wan_xsi; u8 ct_joyme4; - int ppe_type; - int wan_mode; - int wan_sel; + u8 max_packet; + u8 rsv[3]; + u32 ppe_type; + u32 wan_mode; + u32 wan_sel; } init_info; struct { - int func_id; + u32 func_id; u32 size; u32 data; } set_info; diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c index c83a0a80d533..506f682d15c1 100644 --- a/drivers/net/ethernet/amd/pds_core/adminq.c +++ b/drivers/net/ethernet/amd/pds_core/adminq.c @@ -5,11 +5,6 @@ #include "core.h" -struct pdsc_wait_context { - struct pdsc_qcq *qcq; - struct completion wait_completion; -}; - static int pdsc_process_notifyq(struct pdsc_qcq *qcq) { union pds_core_notifyq_comp *comp; @@ -109,10 +104,10 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq) q_info = &q->info[q->tail_idx]; q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); - /* Copy out the completion data */ - memcpy(q_info->dest, comp, sizeof(*comp)); - - complete_all(&q_info->wc->wait_completion); + if (!completion_done(&q_info->completion)) { + memcpy(q_info->dest, comp, sizeof(*comp)); + complete(&q_info->completion); + } if (cq->tail_idx == cq->num_descs - 1) cq->done_color = !cq->done_color; @@ -162,8 +157,7 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data) static int __pdsc_adminq_post(struct pdsc *pdsc, struct pdsc_qcq *qcq, union pds_core_adminq_cmd *cmd, - union pds_core_adminq_comp *comp, - struct pdsc_wait_context *wc) + union pds_core_adminq_comp *comp) { struct pdsc_queue *q = &qcq->q; struct pdsc_q_info *q_info; @@ -205,9 +199,9 @@ static int __pdsc_adminq_post(struct pdsc *pdsc, /* Post the request */ index = q->head_idx; q_info = &q->info[index]; - q_info->wc = wc; q_info->dest = comp; memcpy(q_info->desc, cmd, sizeof(*cmd)); + reinit_completion(&q_info->completion); dev_dbg(pdsc->dev, "head_idx %d tail_idx %d\n", q->head_idx, q->tail_idx); @@ -231,16 +225,13 @@ int pdsc_adminq_post(struct pdsc *pdsc, union pds_core_adminq_comp *comp, bool fast_poll) { - struct pdsc_wait_context wc = { - .wait_completion = - COMPLETION_INITIALIZER_ONSTACK(wc.wait_completion), - }; unsigned long poll_interval = 1; unsigned long poll_jiffies; unsigned long time_limit; unsigned long time_start; unsigned long time_done; unsigned long remaining; + struct completion *wc; int err = 0; int index; @@ -250,20 +241,19 @@ int pdsc_adminq_post(struct pdsc *pdsc, return -ENXIO; } - wc.qcq = &pdsc->adminqcq; - index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp, &wc); + index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp); if (index < 0) { err = index; goto err_out; } + wc = &pdsc->adminqcq.q.info[index].completion; time_start = jiffies; time_limit = time_start + HZ * pdsc->devcmd_timeout; do { /* Timeslice the actual wait to catch IO errors etc early */ poll_jiffies = msecs_to_jiffies(poll_interval); - remaining = wait_for_completion_timeout(&wc.wait_completion, - poll_jiffies); + remaining = wait_for_completion_timeout(wc, poll_jiffies); if (remaining) break; @@ -292,9 +282,11 @@ int pdsc_adminq_post(struct pdsc *pdsc, dev_dbg(pdsc->dev, "%s: elapsed %d msecs\n", __func__, jiffies_to_msecs(time_done - time_start)); - /* Check the results */ - if (time_after_eq(time_done, time_limit)) + /* Check the results and clear an un-completed timeout */ + if (time_after_eq(time_done, time_limit) && !completion_done(wc)) { err = -ETIMEDOUT; + complete(wc); + } dev_dbg(pdsc->dev, "read admin queue completion idx %d:\n", index); dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1, diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c index eeb72b1809ea..92f359f2b449 100644 --- a/drivers/net/ethernet/amd/pds_core/auxbus.c +++ b/drivers/net/ethernet/amd/pds_core/auxbus.c @@ -107,9 +107,6 @@ int pds_client_adminq_cmd(struct pds_auxiliary_dev *padev, dev_dbg(pf->dev, "%s: %s opcode %d\n", __func__, dev_name(&padev->aux_dev.dev), req->opcode); - if (pf->state) - return -ENXIO; - /* Wrap the client's request */ cmd.client_request.opcode = PDS_AQ_CMD_CLIENT_CMD; cmd.client_request.client_id = cpu_to_le16(padev->client_id); @@ -189,7 +186,6 @@ void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf, pds_client_unregister(pf, padev->client_id); auxiliary_device_delete(&padev->aux_dev); auxiliary_device_uninit(&padev->aux_dev); - padev->client_id = 0; *pd_ptr = NULL; mutex_unlock(&pf->config_lock); diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c index 1eb0d92786f7..9512aa4083f0 100644 --- a/drivers/net/ethernet/amd/pds_core/core.c +++ b/drivers/net/ethernet/amd/pds_core/core.c @@ -167,8 +167,10 @@ static void pdsc_q_map(struct pdsc_queue *q, void *base, dma_addr_t base_pa) q->base = base; q->base_pa = base_pa; - for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) + for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) { cur->desc = base + (i * q->desc_size); + init_completion(&cur->completion); + } } static void pdsc_cq_map(struct pdsc_cq *cq, void *base, dma_addr_t base_pa) @@ -325,10 +327,7 @@ static int pdsc_core_init(struct pdsc *pdsc) size_t sz; int err; - /* Scale the descriptor ring length based on number of CPUs and VFs */ - numdescs = max_t(int, PDSC_ADMINQ_MIN_LENGTH, num_online_cpus()); - numdescs += 2 * pci_sriov_get_totalvfs(pdsc->pdev); - numdescs = roundup_pow_of_two(numdescs); + numdescs = PDSC_ADMINQ_MAX_LENGTH; err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq", PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR, numdescs, diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h index 0bf320c43083..0b53a1fab46d 100644 --- a/drivers/net/ethernet/amd/pds_core/core.h +++ b/drivers/net/ethernet/amd/pds_core/core.h @@ -16,7 +16,7 @@ #define PDSC_WATCHDOG_SECS 5 #define PDSC_QUEUE_NAME_MAX_SZ 16 -#define PDSC_ADMINQ_MIN_LENGTH 16 /* must be a power of two */ +#define PDSC_ADMINQ_MAX_LENGTH 16 /* must be a power of two */ #define PDSC_NOTIFYQ_LENGTH 64 /* must be a power of two */ #define PDSC_TEARDOWN_RECOVERY false #define PDSC_TEARDOWN_REMOVING true @@ -96,7 +96,7 @@ struct pdsc_q_info { unsigned int bytes; unsigned int nbufs; struct pdsc_buf_info bufs[PDS_CORE_MAX_FRAGS]; - struct pdsc_wait_context *wc; + struct completion completion; void *dest; }; diff --git a/drivers/net/ethernet/amd/pds_core/debugfs.c b/drivers/net/ethernet/amd/pds_core/debugfs.c index ac37a4e738ae..04c5e3abd8d7 100644 --- a/drivers/net/ethernet/amd/pds_core/debugfs.c +++ b/drivers/net/ethernet/amd/pds_core/debugfs.c @@ -154,8 +154,9 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq) debugfs_create_u32("index", 0400, intr_dentry, &intr->index); debugfs_create_u32("vector", 0400, intr_dentry, &intr->vector); - intr_ctrl_regset = kzalloc(sizeof(*intr_ctrl_regset), - GFP_KERNEL); + intr_ctrl_regset = devm_kzalloc(pdsc->dev, + sizeof(*intr_ctrl_regset), + GFP_KERNEL); if (!intr_ctrl_regset) return; intr_ctrl_regset->regs = intr_ctrl_regs; diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c index c5c787df61a4..d8dc39da4161 100644 --- a/drivers/net/ethernet/amd/pds_core/devlink.c +++ b/drivers/net/ethernet/amd/pds_core/devlink.c @@ -105,7 +105,7 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req, .fw_control.opcode = PDS_CORE_CMD_FW_CONTROL, .fw_control.oper = PDS_CORE_FW_GET_LIST, }; - struct pds_core_fw_list_info fw_list; + struct pds_core_fw_list_info fw_list = {}; struct pdsc *pdsc = devlink_priv(dl); union pds_core_dev_comp comp; char buf[32]; @@ -118,8 +118,6 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req, if (!err) memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list)); mutex_unlock(&pdsc->devcmd_lock); - if (err && err != -EIO) - return err; listlen = min(fw_list.num_fw_slots, ARRAY_SIZE(fw_list.fw_names)); for (i = 0; i < listlen; i++) { diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c index 230726d7b74f..d41b58fad37b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c @@ -373,8 +373,13 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata, } /* Set up the header page info */ - xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa, - XGBE_SKB_ALLOC_SIZE); + if (pdata->netdev->features & NETIF_F_RXCSUM) { + xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa, + XGBE_SKB_ALLOC_SIZE); + } else { + xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa, + pdata->rx_buf_size); + } /* Set up the buffer page info */ xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa, diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index f393228d41c7..f1b0fb02b3cd 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -320,6 +320,18 @@ static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata) XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE); } +static void xgbe_disable_sph_mode(struct xgbe_prv_data *pdata) +{ + unsigned int i; + + for (i = 0; i < pdata->channel_count; i++) { + if (!pdata->channel[i]->rx_ring) + break; + + XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0); + } +} + static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type, unsigned int index, unsigned int val) { @@ -3545,8 +3557,12 @@ static int xgbe_init(struct xgbe_prv_data *pdata) xgbe_config_tx_coalesce(pdata); xgbe_config_rx_buffer_size(pdata); xgbe_config_tso_mode(pdata); - xgbe_config_sph_mode(pdata); - xgbe_config_rss(pdata); + + if (pdata->netdev->features & NETIF_F_RXCSUM) { + xgbe_config_sph_mode(pdata); + xgbe_config_rss(pdata); + } + desc_if->wrapper_tx_desc_init(pdata); desc_if->wrapper_rx_desc_init(pdata); xgbe_enable_dma_interrupts(pdata); @@ -3702,5 +3718,9 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) hw_if->disable_vxlan = xgbe_disable_vxlan; hw_if->set_vxlan_id = xgbe_set_vxlan_id; + /* For Split Header*/ + hw_if->enable_sph = xgbe_config_sph_mode; + hw_if->disable_sph = xgbe_disable_sph_mode; + DBGPR("<--xgbe_init_function_ptrs\n"); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index d84a310dfcd4..8e09ad8fa022 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -2257,10 +2257,17 @@ static int xgbe_set_features(struct net_device *netdev, if (ret) return ret; - if ((features & NETIF_F_RXCSUM) && !rxcsum) + if ((features & NETIF_F_RXCSUM) && !rxcsum) { + hw_if->enable_sph(pdata); + hw_if->enable_vxlan(pdata); hw_if->enable_rx_csum(pdata); - else if (!(features & NETIF_F_RXCSUM) && rxcsum) + schedule_work(&pdata->restart_work); + } else if (!(features & NETIF_F_RXCSUM) && rxcsum) { + hw_if->disable_sph(pdata); + hw_if->disable_vxlan(pdata); hw_if->disable_rx_csum(pdata); + schedule_work(&pdata->restart_work); + } if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan) hw_if->enable_rx_vlan_stripping(pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index d85386cac8d1..ed5d43c16d0e 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -865,6 +865,10 @@ struct xgbe_hw_if { void (*enable_vxlan)(struct xgbe_prv_data *); void (*disable_vxlan)(struct xgbe_prv_data *); void (*set_vxlan_id)(struct xgbe_prv_data *); + + /* For Split Header */ + void (*enable_sph)(struct xgbe_prv_data *pdata); + void (*disable_sph)(struct xgbe_prv_data *pdata); }; /* This structure represents implementation specific routines for an diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index eeec8bf17cf4..1bd4313215d7 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -143,7 +143,7 @@ config BNX2X depends on PTP_1588_CLOCK_OPTIONAL select FW_LOADER select ZLIB_INFLATE - select LIBCRC32C + select CRC32 select MDIO help This driver supports Broadcom NetXtremeII 10 gigabit Ethernet cards. @@ -207,7 +207,7 @@ config BNXT depends on PCI depends on PTP_1588_CLOCK_OPTIONAL select FW_LOADER - select LIBCRC32C + select CRC32 select NET_DEVLINK select PAGE_POOL select DIMLIB diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 8725e1e13908..6afc2ab6fad2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -787,7 +787,7 @@ tx_free: dev_kfree_skb_any(skb); tx_kick_pending: if (BNXT_TX_PTP_IS_SET(lflags)) { - txr->tx_buf_ring[txr->tx_prod].is_ts_pkt = 0; + txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0; atomic64_inc(&bp->ptp_cfg->stats.ts_err); if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) /* set SKB to err so PTP worker will clean up */ @@ -795,7 +795,7 @@ tx_kick_pending: } if (txr->kick_pending) bnxt_txr_db_kick(bp, txr, txr->tx_prod); - txr->tx_buf_ring[txr->tx_prod].skb = NULL; + txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].skb = NULL; dev_core_stats_tx_dropped_inc(dev); return NETDEV_TX_OK; } @@ -2015,6 +2015,7 @@ static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type, } return skb; vlan_err: + skb_mark_for_recycle(skb); dev_kfree_skb(skb); return NULL; } @@ -3414,6 +3415,9 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) bnxt_free_one_tx_ring_skbs(bp, txr, i); } + + if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) + bnxt_ptp_free_txts_skbs(bp->ptp_cfg); } static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) @@ -11599,6 +11603,9 @@ static void bnxt_init_napi(struct bnxt *bp) poll_fn = bnxt_poll_p5; else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) cp_nr_rings--; + + set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state); + for (i = 0; i < cp_nr_rings; i++) { bnapi = bp->bnapi[i]; netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn, @@ -12318,12 +12325,15 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) { struct hwrm_func_drv_if_change_output *resp; struct hwrm_func_drv_if_change_input *req; - bool fw_reset = !bp->irq_tbl; bool resc_reinit = false; bool caps_change = false; int rc, retry = 0; + bool fw_reset; u32 flags = 0; + fw_reset = (bp->fw_reset_state == BNXT_FW_RESET_STATE_ABORT); + bp->fw_reset_state = 0; + if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) return 0; @@ -12392,13 +12402,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) set_bit(BNXT_STATE_ABORT_ERR, &bp->state); return rc; } + /* IRQ will be initialized later in bnxt_request_irq()*/ bnxt_clear_int_mode(bp); - rc = bnxt_init_int_mode(bp); - if (rc) { - clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); - netdev_err(bp->dev, "init int mode failed\n"); - return rc; - } } rc = bnxt_cancel_reservations(bp, fw_reset); } @@ -12797,8 +12802,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) /* VF-reps may need to be re-opened after the PF is re-opened */ if (BNXT_PF(bp)) bnxt_vf_reps_open(bp); - if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) - WRITE_ONCE(bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS); bnxt_ptp_init_rtc(bp, true); bnxt_ptp_cfg_tstamp_filters(bp); if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) @@ -14010,13 +14013,28 @@ static void bnxt_unlock_sp(struct bnxt *bp) netdev_unlock(bp->dev); } +/* Same as bnxt_lock_sp() with additional rtnl_lock */ +static void bnxt_rtnl_lock_sp(struct bnxt *bp) +{ + clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); + rtnl_lock(); + netdev_lock(bp->dev); +} + +static void bnxt_rtnl_unlock_sp(struct bnxt *bp) +{ + set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); + netdev_unlock(bp->dev); + rtnl_unlock(); +} + /* Only called from bnxt_sp_task() */ static void bnxt_reset(struct bnxt *bp, bool silent) { - bnxt_lock_sp(bp); + bnxt_rtnl_lock_sp(bp); if (test_bit(BNXT_STATE_OPEN, &bp->state)) bnxt_reset_task(bp, silent); - bnxt_unlock_sp(bp); + bnxt_rtnl_unlock_sp(bp); } /* Only called from bnxt_sp_task() */ @@ -14024,9 +14042,9 @@ static void bnxt_rx_ring_reset(struct bnxt *bp) { int i; - bnxt_lock_sp(bp); + bnxt_rtnl_lock_sp(bp); if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { - bnxt_unlock_sp(bp); + bnxt_rtnl_unlock_sp(bp); return; } /* Disable and flush TPA before resetting the RX ring */ @@ -14065,7 +14083,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp) } if (bp->flags & BNXT_FLAG_TPA) bnxt_set_tpa(bp, true); - bnxt_unlock_sp(bp); + bnxt_rtnl_unlock_sp(bp); } static void bnxt_fw_fatal_close(struct bnxt *bp) @@ -14833,7 +14851,7 @@ static void bnxt_fw_reset_abort(struct bnxt *bp, int rc) clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) bnxt_dl_health_fw_status_update(bp, false); - bp->fw_reset_state = 0; + bp->fw_reset_state = BNXT_FW_RESET_STATE_ABORT; netif_close(bp->dev); } @@ -14957,15 +14975,17 @@ static void bnxt_fw_reset_task(struct work_struct *work) bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; fallthrough; case BNXT_FW_RESET_STATE_OPENING: - while (!netdev_trylock(bp->dev)) { + while (!rtnl_trylock()) { bnxt_queue_fw_reset_work(bp, HZ / 10); return; } + netdev_lock(bp->dev); rc = bnxt_open(bp->dev); if (rc) { netdev_err(bp->dev, "bnxt_open() failed during FW reset\n"); bnxt_fw_reset_abort(bp, rc); netdev_unlock(bp->dev); + rtnl_unlock(); goto ulp_start; } @@ -14985,6 +15005,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) bnxt_dl_health_fw_status_update(bp, true); } netdev_unlock(bp->dev); + rtnl_unlock(); bnxt_ulp_start(bp, 0); bnxt_reenable_sriov(bp); netdev_lock(bp->dev); @@ -15933,7 +15954,7 @@ err_reset: rc); napi_enable_locked(&bnapi->napi); bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); - bnxt_reset_task(bp, true); + netif_close(dev); return rc; } @@ -16003,8 +16024,8 @@ static void bnxt_remove_one(struct pci_dev *pdev) bnxt_rdma_aux_device_del(bp); - bnxt_ptp_clear(bp); unregister_netdev(dev); + bnxt_ptp_clear(bp); bnxt_rdma_aux_device_uninit(bp); @@ -16749,6 +16770,7 @@ static int bnxt_resume(struct device *device) struct bnxt *bp = netdev_priv(dev); int rc = 0; + rtnl_lock(); netdev_lock(dev); rc = pci_enable_device(bp->pdev); if (rc) { @@ -16793,6 +16815,7 @@ static int bnxt_resume(struct device *device) resume_exit: netdev_unlock(bp->dev); + rtnl_unlock(); bnxt_ulp_start(bp, rc); if (!rc) bnxt_reenable_sriov(bp); @@ -16931,10 +16954,9 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) if (!err) result = PCI_ERS_RESULT_RECOVERED; + /* IRQ will be initialized later in bnxt_io_resume */ bnxt_ulp_irq_stop(bp); bnxt_clear_int_mode(bp); - err = bnxt_init_int_mode(bp); - bnxt_ulp_irq_restart(bp, err); } reset_exit: @@ -16959,20 +16981,25 @@ static void bnxt_io_resume(struct pci_dev *pdev) int err; netdev_info(bp->dev, "PCI Slot Resume\n"); + rtnl_lock(); netdev_lock(netdev); err = bnxt_hwrm_func_qcaps(bp); if (!err) { - if (netif_running(netdev)) + if (netif_running(netdev)) { err = bnxt_open(netdev); - else + } else { err = bnxt_reserve_rings(bp, true); + if (!err) + err = bnxt_init_int_mode(bp); + } } if (!err) netif_device_attach(netdev); netdev_unlock(netdev); + rtnl_unlock(); bnxt_ulp_start(bp, err); if (!err) bnxt_reenable_sriov(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 21726cf56586..bc8b3b7e915d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -2614,6 +2614,7 @@ struct bnxt { #define BNXT_FW_RESET_STATE_POLL_FW 4 #define BNXT_FW_RESET_STATE_OPENING 5 #define BNXT_FW_RESET_STATE_POLL_FW_DOWN 6 +#define BNXT_FW_RESET_STATE_ABORT 7 u16 fw_reset_min_dsecs; #define BNXT_DFLT_FW_RST_MIN_DSECS 20 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c index 5576e7cf8463..a000d3f630bd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c @@ -110,20 +110,30 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, } } + if (cmn_req->req_type == + cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE)) + info->dest_buf_size += len; + if (info->dest_buf) { if ((info->seg_start + off + len) <= BNXT_COREDUMP_BUF_LEN(info->buf_len)) { - memcpy(info->dest_buf + off, dma_buf, len); + u16 copylen = min_t(u16, len, + info->dest_buf_size - off); + + memcpy(info->dest_buf + off, dma_buf, copylen); + if (copylen < len) + break; } else { rc = -ENOBUFS; + if (cmn_req->req_type == + cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) { + kfree(info->dest_buf); + info->dest_buf = NULL; + } break; } } - if (cmn_req->req_type == - cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE)) - info->dest_buf_size += len; - if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE)) break; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 48dd5922e4dd..f5d490bf997e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -2062,6 +2062,17 @@ static int bnxt_get_regs_len(struct net_device *dev) return reg_len; } +#define BNXT_PCIE_32B_ENTRY(start, end) \ + { offsetof(struct pcie_ctx_hw_stats, start), \ + offsetof(struct pcie_ctx_hw_stats, end) } + +static const struct { + u16 start; + u16 end; +} bnxt_pcie_32b_entries[] = { + BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]), +}; + static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) { @@ -2094,12 +2105,27 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr); rc = hwrm_req_send(bp, req); if (!rc) { - __le64 *src = (__le64 *)hw_pcie_stats; - u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN); - int i; - - for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++) - dst[i] = le64_to_cpu(src[i]); + u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN); + u8 *src = (u8 *)hw_pcie_stats; + int i, j; + + for (i = 0, j = 0; i < sizeof(*hw_pcie_stats); ) { + if (i >= bnxt_pcie_32b_entries[j].start && + i <= bnxt_pcie_32b_entries[j].end) { + u32 *dst32 = (u32 *)(dst + i); + + *dst32 = le32_to_cpu(*(__le32 *)(src + i)); + i += 4; + if (i > bnxt_pcie_32b_entries[j].end && + j < ARRAY_SIZE(bnxt_pcie_32b_entries) - 1) + j++; + } else { + u64 *dst64 = (u64 *)(dst + i); + + *dst64 = le64_to_cpu(*(__le64 *)(src + i)); + i += 8; + } + } } hwrm_req_drop(bp, req); } @@ -4991,6 +5017,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, if (!bp->num_tests || !BNXT_PF(bp)) return; + memset(buf, 0, sizeof(u64) * bp->num_tests); if (etest->flags & ETH_TEST_FL_OFFLINE && bnxt_ulp_registered(bp->edev)) { etest->flags |= ETH_TEST_FL_FAILED; @@ -4998,7 +5025,6 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, return; } - memset(buf, 0, sizeof(u64) * bp->num_tests); if (!netif_running(dev)) { etest->flags |= ETH_TEST_FL_FAILED; return; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index 2d4e19b96ee7..0669d43472f5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -794,6 +794,27 @@ next_slot: return HZ; } +void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp) +{ + struct bnxt_ptp_tx_req *txts_req; + u16 cons = ptp->txts_cons; + + /* make sure ptp aux worker finished with + * possible BNXT_STATE_OPEN set + */ + ptp_cancel_worker_sync(ptp->ptp_clock); + + ptp->tx_avail = BNXT_MAX_TX_TS; + while (cons != ptp->txts_prod) { + txts_req = &ptp->txts_req[cons]; + if (!IS_ERR_OR_NULL(txts_req->tx_skb)) + dev_kfree_skb_any(txts_req->tx_skb); + cons = NEXT_TXTS(cons); + } + ptp->txts_cons = cons; + ptp_schedule_worker(ptp->ptp_clock, 0); +} + int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod) { spin_lock_bh(&ptp->ptp_tx_lock); @@ -1105,7 +1126,6 @@ out: void bnxt_ptp_clear(struct bnxt *bp) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; - int i; if (!ptp) return; @@ -1117,12 +1137,5 @@ void bnxt_ptp_clear(struct bnxt *bp) kfree(ptp->ptp_info.pin_config); ptp->ptp_info.pin_config = NULL; - for (i = 0; i < BNXT_MAX_TX_TS; i++) { - if (ptp->txts_req[i].tx_skb) { - dev_kfree_skb_any(ptp->txts_req[i].tx_skb); - ptp->txts_req[i].tx_skb = NULL; - } - } - bnxt_unmap_ptp_regs(bp); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h index a95f05e9c579..0481161d26ef 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h @@ -162,6 +162,7 @@ int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp); void bnxt_ptp_reapply_pps(struct bnxt *bp); int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr); int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr); +void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp); int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod); void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod); int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index a8e930d5dbb0..7564705d6478 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -20,6 +20,7 @@ #include <asm/byteorder.h> #include <linux/bitmap.h> #include <linux/auxiliary_bus.h> +#include <net/netdev_lock.h> #include "bnxt_hsi.h" #include "bnxt.h" @@ -309,14 +310,12 @@ void bnxt_ulp_irq_stop(struct bnxt *bp) if (!ulp->msix_requested) return; - netdev_lock(bp->dev); - ops = rcu_dereference(ulp->ulp_ops); + ops = netdev_lock_dereference(ulp->ulp_ops, bp->dev); if (!ops || !ops->ulp_irq_stop) return; if (test_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) reset = true; ops->ulp_irq_stop(ulp->handle, reset); - netdev_unlock(bp->dev); } } @@ -335,8 +334,7 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err) if (!ulp->msix_requested) return; - netdev_lock(bp->dev); - ops = rcu_dereference(ulp->ulp_ops); + ops = netdev_lock_dereference(ulp->ulp_ops, bp->dev); if (!ops || !ops->ulp_irq_restart) return; @@ -348,7 +346,6 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err) bnxt_fill_msix_vecs(bp, ent); } ops->ulp_irq_restart(ulp->handle, ent); - netdev_unlock(bp->dev); kfree(ent); } } diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 1fe8ec37491b..e1e8bd2ec155 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -997,22 +997,15 @@ static void macb_update_stats(struct macb *bp) static int macb_halt_tx(struct macb *bp) { - unsigned long halt_time, timeout; - u32 status; + u32 status; macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); - timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT); - do { - halt_time = jiffies; - status = macb_readl(bp, TSR); - if (!(status & MACB_BIT(TGO))) - return 0; - - udelay(250); - } while (time_before(halt_time, timeout)); - - return -ETIMEDOUT; + /* Poll TSR until TGO is cleared or timeout. */ + return read_poll_timeout_atomic(macb_readl, status, + !(status & MACB_BIT(TGO)), + 250, MACB_HALT_TIMEOUT, false, + bp, TSR); } static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget) diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index ca742cc146d7..7dae5aad3689 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -70,8 +70,8 @@ config LIQUIDIO depends on 64BIT && PCI depends on PCI depends on PTP_1588_CLOCK_OPTIONAL + select CRC32 select FW_LOADER - select LIBCRC32C select LIQUIDIO_CORE select NET_DEVLINK help diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 7f3f5afa864f..1546c3db08f0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -2270,6 +2270,7 @@ int cxgb4_init_ethtool_filters(struct adapter *adap) eth_filter->port[i].bmap = bitmap_zalloc(nentries, GFP_KERNEL); if (!eth_filter->port[i].bmap) { ret = -ENOMEM; + kvfree(eth_filter->port[i].loc_array); goto free_eth_finfo; } } diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index d88fbecdab4b..232e839a9d07 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -352,7 +352,7 @@ parse_eeprom (struct net_device *dev) eth_hw_addr_set(dev, psrom->mac_addr); if (np->chip_id == CHIP_IP1000A) { - np->led_mode = psrom->led_mode; + np->led_mode = le16_to_cpu(psrom->led_mode); return 0; } diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h index 195dc6cfd895..0e33e2eaae96 100644 --- a/drivers/net/ethernet/dlink/dl2k.h +++ b/drivers/net/ethernet/dlink/dl2k.h @@ -335,7 +335,7 @@ typedef struct t_SROM { u16 sub_system_id; /* 0x06 */ u16 pci_base_1; /* 0x08 (IP1000A only) */ u16 pci_base_2; /* 0x0a (IP1000A only) */ - u16 led_mode; /* 0x0c (IP1000A only) */ + __le16 led_mode; /* 0x0c (IP1000A only) */ u16 reserved1[9]; /* 0x0e-0x1f */ u8 mac_addr[6]; /* 0x20-0x25 */ u8 reserved2[10]; /* 0x26-0x2f */ diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index 625245b0845c..eba73246f986 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -67,6 +67,8 @@ #define TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE (TSNEP_TX_TYPE_XDP_NDO | TSNEP_TX_TYPE_MAP_PAGE) #define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO) #define TSNEP_TX_TYPE_XSK BIT(12) +#define TSNEP_TX_TYPE_TSTAMP BIT(13) +#define TSNEP_TX_TYPE_SKB_TSTAMP (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_TSTAMP) #define TSNEP_XDP_TX BIT(0) #define TSNEP_XDP_REDIRECT BIT(1) @@ -386,8 +388,7 @@ static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length, if (entry->skb) { entry->properties = length & TSNEP_DESC_LENGTH_MASK; entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; - if ((entry->type & TSNEP_TX_TYPE_SKB) && - (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)) + if ((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP) entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG; /* toggle user flag to prevent false acknowledge @@ -479,7 +480,8 @@ static int tsnep_tx_map_frag(skb_frag_t *frag, struct tsnep_tx_entry *entry, return mapped; } -static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count) +static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count, + bool do_tstamp) { struct device *dmadev = tx->adapter->dmadev; struct tsnep_tx_entry *entry; @@ -505,6 +507,9 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count) entry->type = TSNEP_TX_TYPE_SKB_INLINE; mapped = 0; } + + if (do_tstamp) + entry->type |= TSNEP_TX_TYPE_TSTAMP; } else { skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; @@ -558,11 +563,12 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count) static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, struct tsnep_tx *tx) { - int count = 1; struct tsnep_tx_entry *entry; + bool do_tstamp = false; + int count = 1; int length; - int i; int retval; + int i; if (skb_shinfo(skb)->nr_frags > 0) count += skb_shinfo(skb)->nr_frags; @@ -579,7 +585,13 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, entry = &tx->entry[tx->write]; entry->skb = skb; - retval = tsnep_tx_map(skb, tx, count); + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + tx->adapter->hwtstamp_config.tx_type == HWTSTAMP_TX_ON) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + do_tstamp = true; + } + + retval = tsnep_tx_map(skb, tx, count, do_tstamp); if (retval < 0) { tsnep_tx_unmap(tx, tx->write, count); dev_kfree_skb_any(entry->skb); @@ -591,9 +603,6 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, } length = retval; - if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - for (i = 0; i < count; i++) tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length, i == count - 1); @@ -844,8 +853,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) length = tsnep_tx_unmap(tx, tx->read, count); - if ((entry->type & TSNEP_TX_TYPE_SKB) && - (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) && + if (((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP) && (__le32_to_cpu(entry->desc_wb->properties) & TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) { struct skb_shared_hwtstamps hwtstamps; diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 2106861463e4..3ee52f4b1166 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -1850,6 +1850,16 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first, } } +static void enetc_bulk_flip_buff(struct enetc_bdr *rx_ring, int rx_ring_first, + int rx_ring_last) +{ + while (rx_ring_first != rx_ring_last) { + enetc_flip_rx_buff(rx_ring, + &rx_ring->rx_swbd[rx_ring_first]); + enetc_bdr_idx_inc(rx_ring, &rx_ring_first); + } +} + static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, struct napi_struct *napi, int work_limit, struct bpf_prog *prog) @@ -1868,11 +1878,10 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, while (likely(rx_frm_cnt < work_limit)) { union enetc_rx_bd *rxbd, *orig_rxbd; - int orig_i, orig_cleaned_cnt; struct xdp_buff xdp_buff; struct sk_buff *skb; + int orig_i, err; u32 bd_status; - int err; rxbd = enetc_rxbd(rx_ring, i); bd_status = le32_to_cpu(rxbd->r.lstatus); @@ -1887,7 +1896,6 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, break; orig_rxbd = rxbd; - orig_cleaned_cnt = cleaned_cnt; orig_i = i; enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i, @@ -1915,15 +1923,21 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, rx_ring->stats.xdp_drops++; break; case XDP_PASS: - rxbd = orig_rxbd; - cleaned_cnt = orig_cleaned_cnt; - i = orig_i; - - skb = enetc_build_skb(rx_ring, bd_status, &rxbd, - &i, &cleaned_cnt, - ENETC_RXB_DMA_SIZE_XDP); - if (unlikely(!skb)) + skb = xdp_build_skb_from_buff(&xdp_buff); + /* Probably under memory pressure, stop NAPI */ + if (unlikely(!skb)) { + enetc_xdp_drop(rx_ring, orig_i, i); + rx_ring->stats.xdp_drops++; goto out; + } + + enetc_get_offloads(rx_ring, orig_rxbd, skb); + + /* These buffers are about to be owned by the stack. + * Update our buffer cache (the rx_swbd array elements) + * with their other page halves. + */ + enetc_bulk_flip_buff(rx_ring, orig_i, i); napi_gro_receive(napi, skb); break; @@ -1965,11 +1979,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, enetc_xdp_drop(rx_ring, orig_i, i); rx_ring->stats.xdp_redirect_failures++; } else { - while (orig_i != i) { - enetc_flip_rx_buff(rx_ring, - &rx_ring->rx_swbd[orig_i]); - enetc_bdr_idx_inc(rx_ring, &orig_i); - } + enetc_bulk_flip_buff(rx_ring, orig_i, i); xdp_redirect_frm_cnt++; rx_ring->stats.xdp_redirect++; } @@ -3362,7 +3372,8 @@ static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i, bdr->buffer_offset = ENETC_RXB_PAD; priv->rx_ring[i] = bdr; - err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0); + err = __xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0, + ENETC_RXB_DMA_SIZE_XDP); if (err) goto free_vector; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index a86cfebedaa8..17e9bddb9ddd 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -714,7 +714,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, txq->bd.cur = bdp; /* Trigger transmission start */ - writel(0, txq->bd.reg_desc_active); + if (!(fep->quirks & FEC_QUIRK_ERR007885) || + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active)) + writel(0, txq->bd.reg_desc_active); return 0; } diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h index f8cdab62bf85..7725cb0c5c8a 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h @@ -108,14 +108,16 @@ struct hbg_irq_info { bool re_enable; bool need_print; bool need_reset; - u64 count; - void (*irq_handle)(struct hbg_priv *priv, struct hbg_irq_info *info); + void (*irq_handle)(struct hbg_priv *priv, + const struct hbg_irq_info *info); }; struct hbg_vector { char name[HBG_VECTOR_NUM][32]; - struct hbg_irq_info *info_array; + + u64 *stats_array; + const struct hbg_irq_info *info_array; u32 info_array_len; }; diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c index 5e0ba4d5b08d..01ad82d2f5cc 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c @@ -61,7 +61,7 @@ static int hbg_dbg_irq_info(struct seq_file *s, void *unused) { struct net_device *netdev = dev_get_drvdata(s->private); struct hbg_priv *priv = netdev_priv(netdev); - struct hbg_irq_info *info; + const struct hbg_irq_info *info; u32 i; for (i = 0; i < priv->vectors.info_array_len; i++) { @@ -73,7 +73,7 @@ static int hbg_dbg_irq_info(struct seq_file *s, void *unused) info->mask)), str_true_false(info->need_reset), str_true_false(info->need_print), - info->count); + priv->vectors.stats_array[i]); } return 0; @@ -106,6 +106,7 @@ static int hbg_dbg_nic_state(struct seq_file *s, void *unused) { struct net_device *netdev = dev_get_drvdata(s->private); struct hbg_priv *priv = netdev_priv(netdev); + bool np_link_fail; seq_printf(s, "event handling state: %s\n", state_str_true_false(priv, HBG_NIC_STATE_EVENT_HANDLING)); @@ -117,8 +118,10 @@ static int hbg_dbg_nic_state(struct seq_file *s, void *unused) reset_type_str[priv->reset_type]); seq_printf(s, "need reset state: %s\n", state_str_true_false(priv, HBG_NIC_STATE_NEED_RESET)); - seq_printf(s, "np_link fail state: %s\n", - state_str_true_false(priv, HBG_NIC_STATE_NP_LINK_FAIL)); + + np_link_fail = !hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR, + HBG_REG_AN_NEG_STATE_NP_LINK_OK_B); + seq_printf(s, "np_link fail state: %s\n", str_true_false(np_link_fail)); return 0; } diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c index d61c03f34ff0..f23fb5920c3c 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c @@ -234,7 +234,7 @@ static u64 hbg_get_irq_stats(struct hbg_vector *vectors, u32 mask) for (i = 0; i < vectors->info_array_len; i++) if (vectors->info_array[i].mask == mask) - return vectors->info_array[i].count; + return vectors->stats_array[i]; return 0; } diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c index 4e8cb66f601c..ff3295b60a69 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c @@ -26,12 +26,15 @@ static void hbg_restore_mac_table(struct hbg_priv *priv) static void hbg_restore_user_def_settings(struct hbg_priv *priv) { + /* The index of host mac is always 0. */ + u64 rx_pause_addr = ether_addr_to_u64(priv->filter.mac_table[0].addr); struct ethtool_pauseparam *pause_param = &priv->user_def.pause_param; hbg_restore_mac_table(priv); hbg_hw_set_mtu(priv, priv->netdev->mtu); hbg_hw_set_pause_enable(priv, pause_param->tx_pause, pause_param->rx_pause); + hbg_hw_set_rx_pause_mac_addr(priv, rx_pause_addr); } int hbg_rebuild(struct hbg_priv *priv) @@ -58,6 +61,8 @@ static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type) return -EBUSY; } + netif_device_detach(priv->netdev); + priv->reset_type = type; set_bit(HBG_NIC_STATE_RESETTING, &priv->state); clear_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state); @@ -88,6 +93,8 @@ static int hbg_reset_done(struct hbg_priv *priv, enum hbg_reset_type type) return ret; } + netif_device_attach(priv->netdev); + dev_info(&priv->pdev->dev, "reset done\n"); return ret; } @@ -114,16 +121,13 @@ void hbg_err_reset(struct hbg_priv *priv) if (running) dev_close(priv->netdev); - hbg_reset(priv); - - /* in hbg_pci_err_detected(), we will detach first, - * so we need to attach before open - */ - if (!netif_device_present(priv->netdev)) - netif_device_attach(priv->netdev); + if (hbg_reset(priv)) + goto err_unlock; if (running) dev_open(priv->netdev, NULL); + +err_unlock: rtnl_unlock(); } @@ -157,7 +161,6 @@ static pci_ers_result_t hbg_pci_err_slot_reset(struct pci_dev *pdev) pci_save_state(pdev); hbg_err_reset(priv); - netif_device_attach(netdev); return PCI_ERS_RESULT_RECOVERED; } diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c index 8f1107b85fbb..55520053270a 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c @@ -317,6 +317,9 @@ static void hbg_update_stats_by_info(struct hbg_priv *priv, const struct hbg_ethtool_stats *stats; u32 i; + if (test_bit(HBG_NIC_STATE_RESETTING, &priv->state)) + return; + for (i = 0; i < info_len; i++) { stats = &info[i]; if (!stats->reg) diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c index 74a18033b444..9b65eef62b3f 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c @@ -234,6 +234,10 @@ void hbg_hw_set_mac_filter_enable(struct hbg_priv *priv, u32 enable) { hbg_reg_write_field(priv, HBG_REG_REC_FILT_CTRL_ADDR, HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B, enable); + + /* only uc filter is supported, so set all bits of mc mask reg to 1 */ + hbg_reg_write64(priv, HBG_REG_STATION_ADDR_LOW_MSK_0, U64_MAX); + hbg_reg_write64(priv, HBG_REG_STATION_ADDR_LOW_MSK_1, U64_MAX); } void hbg_hw_set_pause_enable(struct hbg_priv *priv, u32 tx_en, u32 rx_en) @@ -242,6 +246,9 @@ void hbg_hw_set_pause_enable(struct hbg_priv *priv, u32 tx_en, u32 rx_en) HBG_REG_PAUSE_ENABLE_TX_B, tx_en); hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR, HBG_REG_PAUSE_ENABLE_RX_B, rx_en); + + hbg_reg_write_field(priv, HBG_REG_REC_FILT_CTRL_ADDR, + HBG_REG_REC_FILT_CTRL_PAUSE_FRM_PASS_B, rx_en); } void hbg_hw_get_pause_enable(struct hbg_priv *priv, u32 *tx_en, u32 *rx_en) diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c index e79e9ab3e530..8af0bc4cca21 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c @@ -6,7 +6,7 @@ #include "hbg_hw.h" static void hbg_irq_handle_err(struct hbg_priv *priv, - struct hbg_irq_info *irq_info) + const struct hbg_irq_info *irq_info) { if (irq_info->need_print) dev_err(&priv->pdev->dev, @@ -17,30 +17,30 @@ static void hbg_irq_handle_err(struct hbg_priv *priv, } static void hbg_irq_handle_tx(struct hbg_priv *priv, - struct hbg_irq_info *irq_info) + const struct hbg_irq_info *irq_info) { napi_schedule(&priv->tx_ring.napi); } static void hbg_irq_handle_rx(struct hbg_priv *priv, - struct hbg_irq_info *irq_info) + const struct hbg_irq_info *irq_info) { napi_schedule(&priv->rx_ring.napi); } static void hbg_irq_handle_rx_buf_val(struct hbg_priv *priv, - struct hbg_irq_info *irq_info) + const struct hbg_irq_info *irq_info) { priv->stats.rx_fifo_less_empty_thrsld_cnt++; } #define HBG_IRQ_I(name, handle) \ - {#name, HBG_INT_MSK_##name##_B, false, false, false, 0, handle} + {#name, HBG_INT_MSK_##name##_B, false, false, false, handle} #define HBG_ERR_IRQ_I(name, need_print, ndde_reset) \ {#name, HBG_INT_MSK_##name##_B, true, need_print, \ - ndde_reset, 0, hbg_irq_handle_err} + ndde_reset, hbg_irq_handle_err} -static struct hbg_irq_info hbg_irqs[] = { +static const struct hbg_irq_info hbg_irqs[] = { HBG_IRQ_I(RX, hbg_irq_handle_rx), HBG_IRQ_I(TX, hbg_irq_handle_tx), HBG_ERR_IRQ_I(TX_PKT_CPL, true, true), @@ -64,7 +64,7 @@ static struct hbg_irq_info hbg_irqs[] = { static irqreturn_t hbg_irq_handle(int irq_num, void *p) { - struct hbg_irq_info *info; + const struct hbg_irq_info *info; struct hbg_priv *priv = p; u32 status; u32 i; @@ -79,7 +79,7 @@ static irqreturn_t hbg_irq_handle(int irq_num, void *p) hbg_hw_irq_enable(priv, info->mask, false); hbg_hw_irq_clear(priv, info->mask); - info->count++; + priv->vectors.stats_array[i]++; if (info->irq_handle) info->irq_handle(priv, info); @@ -132,6 +132,12 @@ int hbg_irq_init(struct hbg_priv *priv) irq_names_map[i]); } + vectors->stats_array = devm_kcalloc(&priv->pdev->dev, + ARRAY_SIZE(hbg_irqs), + sizeof(u64), GFP_KERNEL); + if (!vectors->stats_array) + return -ENOMEM; + vectors->info_array = hbg_irqs; vectors->info_array_len = ARRAY_SIZE(hbg_irqs); return 0; diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c index 2ac5454338e4..2e64dc1ab355 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c @@ -21,7 +21,7 @@ static void hbg_all_irq_enable(struct hbg_priv *priv, bool enabled) { - struct hbg_irq_info *info; + const struct hbg_irq_info *info; u32 i; for (i = 0; i < priv->vectors.info_array_len; i++) { @@ -203,12 +203,12 @@ static int hbg_net_change_mtu(struct net_device *netdev, int new_mtu) if (netif_running(netdev)) return -EBUSY; - hbg_hw_set_mtu(priv, new_mtu); - WRITE_ONCE(netdev->mtu, new_mtu); - dev_dbg(&priv->pdev->dev, "change mtu from %u to %u\n", netdev->mtu, new_mtu); + hbg_hw_set_mtu(priv, new_mtu); + WRITE_ONCE(netdev->mtu, new_mtu); + return 0; } diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c index f29a937ad087..42b0083c9193 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c @@ -2,6 +2,7 @@ // Copyright (c) 2024 Hisilicon Limited. #include <linux/phy.h> +#include <linux/rtnetlink.h> #include "hbg_common.h" #include "hbg_hw.h" #include "hbg_mdio.h" @@ -133,12 +134,17 @@ void hbg_fix_np_link_fail(struct hbg_priv *priv) { struct device *dev = &priv->pdev->dev; + rtnl_lock(); + if (priv->stats.np_link_fail_cnt >= HBG_NP_LINK_FAIL_RETRY_TIMES) { dev_err(dev, "failed to fix the MAC link status\n"); priv->stats.np_link_fail_cnt = 0; - return; + goto unlock; } + if (!priv->mac.phydev->link) + goto unlock; + priv->stats.np_link_fail_cnt++; dev_err(dev, "failed to link between MAC and PHY, try to fix...\n"); @@ -147,6 +153,9 @@ void hbg_fix_np_link_fail(struct hbg_priv *priv) */ hbg_phy_stop(priv); hbg_phy_start(priv); + +unlock: + rtnl_unlock(); } static void hbg_phy_adjust_link(struct net_device *netdev) diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h index cc2cc612770d..a6e7f5e62b48 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h @@ -68,6 +68,7 @@ #define HBG_REG_TRANSMIT_CTRL_AN_EN_B BIT(5) #define HBG_REG_REC_FILT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0064) #define HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B BIT(0) +#define HBG_REG_REC_FILT_CTRL_PAUSE_FRM_PASS_B BIT(4) #define HBG_REG_RX_OCTETS_TOTAL_OK_ADDR (HBG_REG_SGMII_BASE + 0x0080) #define HBG_REG_RX_OCTETS_BAD_ADDR (HBG_REG_SGMII_BASE + 0x0084) #define HBG_REG_RX_UC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0088) @@ -134,6 +135,8 @@ #define HBG_REG_STATION_ADDR_HIGH_4_ADDR (HBG_REG_SGMII_BASE + 0x0224) #define HBG_REG_STATION_ADDR_LOW_5_ADDR (HBG_REG_SGMII_BASE + 0x0228) #define HBG_REG_STATION_ADDR_HIGH_5_ADDR (HBG_REG_SGMII_BASE + 0x022C) +#define HBG_REG_STATION_ADDR_LOW_MSK_0 (HBG_REG_SGMII_BASE + 0x0230) +#define HBG_REG_STATION_ADDR_LOW_MSK_1 (HBG_REG_SGMII_BASE + 0x0238) /* PCU */ #define HBG_REG_TX_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0420) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 09749e9f7398..4e5d8bc39a1b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -61,7 +61,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = { .name = "tm_qset", .cmd = HNAE3_DBG_CMD_TM_QSET, .dentry = HNS3_DBG_DENTRY_TM, - .buf_len = HNS3_DBG_READ_LEN, + .buf_len = HNS3_DBG_READ_LEN_1MB, .init = hns3_dbg_common_file_init, }, { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 9ff797fb36c4..b03b8758c777 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -473,20 +473,14 @@ static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, writel(mask_en, tqp_vector->mask_addr); } -static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) +static void hns3_irq_enable(struct hns3_enet_tqp_vector *tqp_vector) { napi_enable(&tqp_vector->napi); enable_irq(tqp_vector->vector_irq); - - /* enable vector */ - hns3_mask_vector_irq(tqp_vector, 1); } -static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) +static void hns3_irq_disable(struct hns3_enet_tqp_vector *tqp_vector) { - /* disable vector */ - hns3_mask_vector_irq(tqp_vector, 0); - disable_irq(tqp_vector->vector_irq); napi_disable(&tqp_vector->napi); cancel_work_sync(&tqp_vector->rx_group.dim.work); @@ -707,11 +701,42 @@ static int hns3_set_rx_cpu_rmap(struct net_device *netdev) return 0; } +static void hns3_enable_irqs_and_tqps(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + u16 i; + + for (i = 0; i < priv->vector_num; i++) + hns3_irq_enable(&priv->tqp_vector[i]); + + for (i = 0; i < priv->vector_num; i++) + hns3_mask_vector_irq(&priv->tqp_vector[i], 1); + + for (i = 0; i < h->kinfo.num_tqps; i++) + hns3_tqp_enable(h->kinfo.tqp[i]); +} + +static void hns3_disable_irqs_and_tqps(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + u16 i; + + for (i = 0; i < h->kinfo.num_tqps; i++) + hns3_tqp_disable(h->kinfo.tqp[i]); + + for (i = 0; i < priv->vector_num; i++) + hns3_mask_vector_irq(&priv->tqp_vector[i], 0); + + for (i = 0; i < priv->vector_num; i++) + hns3_irq_disable(&priv->tqp_vector[i]); +} + static int hns3_nic_net_up(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = priv->ae_handle; - int i, j; int ret; ret = hns3_nic_reset_all_ring(h); @@ -720,23 +745,13 @@ static int hns3_nic_net_up(struct net_device *netdev) clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); - /* enable the vectors */ - for (i = 0; i < priv->vector_num; i++) - hns3_vector_enable(&priv->tqp_vector[i]); - - /* enable rcb */ - for (j = 0; j < h->kinfo.num_tqps; j++) - hns3_tqp_enable(h->kinfo.tqp[j]); + hns3_enable_irqs_and_tqps(netdev); /* start the ae_dev */ ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; if (ret) { set_bit(HNS3_NIC_STATE_DOWN, &priv->state); - while (j--) - hns3_tqp_disable(h->kinfo.tqp[j]); - - for (j = i - 1; j >= 0; j--) - hns3_vector_disable(&priv->tqp_vector[j]); + hns3_disable_irqs_and_tqps(netdev); } return ret; @@ -823,17 +838,9 @@ static void hns3_reset_tx_queue(struct hnae3_handle *h) static void hns3_nic_net_down(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = hns3_get_handle(netdev); const struct hnae3_ae_ops *ops; - int i; - /* disable vectors */ - for (i = 0; i < priv->vector_num; i++) - hns3_vector_disable(&priv->tqp_vector[i]); - - /* disable rcb */ - for (i = 0; i < h->kinfo.num_tqps; i++) - hns3_tqp_disable(h->kinfo.tqp[i]); + hns3_disable_irqs_and_tqps(netdev); /* stop ae_dev */ ops = priv->ae_handle->ae_algo->ops; @@ -5864,8 +5871,6 @@ int hns3_set_channels(struct net_device *netdev, void hns3_external_lb_prepare(struct net_device *ndev, bool if_running) { struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hnae3_handle *h = priv->ae_handle; - int i; if (!if_running) return; @@ -5876,11 +5881,7 @@ void hns3_external_lb_prepare(struct net_device *ndev, bool if_running) netif_carrier_off(ndev); netif_tx_disable(ndev); - for (i = 0; i < priv->vector_num; i++) - hns3_vector_disable(&priv->tqp_vector[i]); - - for (i = 0; i < h->kinfo.num_tqps; i++) - hns3_tqp_disable(h->kinfo.tqp[i]); + hns3_disable_irqs_and_tqps(ndev); /* delay ring buffer clearing to hns3_reset_notify_uninit_enet * during reset process, because driver may not be able @@ -5896,7 +5897,6 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running) { struct hns3_nic_priv *priv = netdev_priv(ndev); struct hnae3_handle *h = priv->ae_handle; - int i; if (!if_running) return; @@ -5912,11 +5912,7 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running) clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); - for (i = 0; i < priv->vector_num; i++) - hns3_vector_enable(&priv->tqp_vector[i]); - - for (i = 0; i < h->kinfo.num_tqps; i++) - hns3_tqp_enable(h->kinfo.tqp[i]); + hns3_enable_irqs_and_tqps(ndev); netif_tx_wake_all_queues(ndev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c index 59cc9221185f..ec581d4b696f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c @@ -440,6 +440,13 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev) ptp->info.settime64 = hclge_ptp_settime; ptp->info.n_alarm = 0; + + spin_lock_init(&ptp->lock); + ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET; + ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE; + ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF; + hdev->ptp = ptp; + ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev); if (IS_ERR(ptp->clock)) { dev_err(&hdev->pdev->dev, @@ -451,12 +458,6 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev) return -ENODEV; } - spin_lock_init(&ptp->lock); - ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET; - ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE; - ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF; - hdev->ptp = ptp; - return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 9ba767740a04..dada42e7e0ec 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1292,9 +1292,8 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) rtnl_unlock(); } -static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) +static int hclgevf_en_hw_strip_rxvtag_cmd(struct hclgevf_dev *hdev, bool enable) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclge_vf_to_pf_msg send_msg; hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, @@ -1303,6 +1302,19 @@ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); } +static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int ret; + + ret = hclgevf_en_hw_strip_rxvtag_cmd(hdev, enable); + if (ret) + return ret; + + hdev->rxvtag_strip_en = enable; + return 0; +} + static int hclgevf_reset_tqp(struct hnae3_handle *handle) { #define HCLGEVF_RESET_ALL_QUEUE_DONE 1U @@ -2204,12 +2216,13 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) tc_valid, tc_size); } -static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) +static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev, + bool rxvtag_strip_en) { struct hnae3_handle *nic = &hdev->nic; int ret; - ret = hclgevf_en_hw_strip_rxvtag(nic, true); + ret = hclgevf_en_hw_strip_rxvtag(nic, rxvtag_strip_en); if (ret) { dev_err(&hdev->pdev->dev, "failed to enable rx vlan offload, ret = %d\n", ret); @@ -2879,7 +2892,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) if (ret) return ret; - ret = hclgevf_init_vlan_config(hdev); + ret = hclgevf_init_vlan_config(hdev, hdev->rxvtag_strip_en); if (ret) { dev_err(&hdev->pdev->dev, "failed(%d) to initialize VLAN config\n", ret); @@ -2994,7 +3007,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; } - ret = hclgevf_init_vlan_config(hdev); + ret = hclgevf_init_vlan_config(hdev, true); if (ret) { dev_err(&hdev->pdev->dev, "failed(%d) to initialize VLAN config\n", ret); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index cccef3228461..0208425ab594 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -253,6 +253,7 @@ struct hclgevf_dev { int *vector_irq; bool gro_en; + bool rxvtag_strip_en; unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)]; diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c index 01a08cfd0090..66e070095d1b 100644 --- a/drivers/net/ethernet/intel/ice/ice_adapter.c +++ b/drivers/net/ethernet/intel/ice/ice_adapter.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only // SPDX-FileCopyrightText: Copyright Red Hat -#include <linux/bitfield.h> #include <linux/cleanup.h> #include <linux/mutex.h> #include <linux/pci.h> @@ -14,32 +13,16 @@ static DEFINE_XARRAY(ice_adapters); static DEFINE_MUTEX(ice_adapters_mutex); -/* PCI bus number is 8 bits. Slot is 5 bits. Domain can have the rest. */ -#define INDEX_FIELD_DOMAIN GENMASK(BITS_PER_LONG - 1, 13) -#define INDEX_FIELD_DEV GENMASK(31, 16) -#define INDEX_FIELD_BUS GENMASK(12, 5) -#define INDEX_FIELD_SLOT GENMASK(4, 0) - -static unsigned long ice_adapter_index(const struct pci_dev *pdev) +static unsigned long ice_adapter_index(u64 dsn) { - unsigned int domain = pci_domain_nr(pdev->bus); - - WARN_ON(domain > FIELD_MAX(INDEX_FIELD_DOMAIN)); - - switch (pdev->device) { - case ICE_DEV_ID_E825C_BACKPLANE: - case ICE_DEV_ID_E825C_QSFP: - case ICE_DEV_ID_E825C_SFP: - case ICE_DEV_ID_E825C_SGMII: - return FIELD_PREP(INDEX_FIELD_DEV, pdev->device); - default: - return FIELD_PREP(INDEX_FIELD_DOMAIN, domain) | - FIELD_PREP(INDEX_FIELD_BUS, pdev->bus->number) | - FIELD_PREP(INDEX_FIELD_SLOT, PCI_SLOT(pdev->devfn)); - } +#if BITS_PER_LONG == 64 + return dsn; +#else + return (u32)dsn ^ (u32)(dsn >> 32); +#endif } -static struct ice_adapter *ice_adapter_new(void) +static struct ice_adapter *ice_adapter_new(u64 dsn) { struct ice_adapter *adapter; @@ -47,6 +30,7 @@ static struct ice_adapter *ice_adapter_new(void) if (!adapter) return NULL; + adapter->device_serial_number = dsn; spin_lock_init(&adapter->ptp_gltsyn_time_lock); refcount_set(&adapter->refcount, 1); @@ -77,23 +61,26 @@ static void ice_adapter_free(struct ice_adapter *adapter) * Return: Pointer to ice_adapter on success. * ERR_PTR() on error. -ENOMEM is the only possible error. */ -struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev) +struct ice_adapter *ice_adapter_get(struct pci_dev *pdev) { - unsigned long index = ice_adapter_index(pdev); + u64 dsn = pci_get_dsn(pdev); struct ice_adapter *adapter; + unsigned long index; int err; + index = ice_adapter_index(dsn); scoped_guard(mutex, &ice_adapters_mutex) { err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL); if (err == -EBUSY) { adapter = xa_load(&ice_adapters, index); refcount_inc(&adapter->refcount); + WARN_ON_ONCE(adapter->device_serial_number != dsn); return adapter; } if (err) return ERR_PTR(err); - adapter = ice_adapter_new(); + adapter = ice_adapter_new(dsn); if (!adapter) return ERR_PTR(-ENOMEM); xa_store(&ice_adapters, index, adapter, GFP_KERNEL); @@ -110,11 +97,13 @@ struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev) * * Context: Process, may sleep. */ -void ice_adapter_put(const struct pci_dev *pdev) +void ice_adapter_put(struct pci_dev *pdev) { - unsigned long index = ice_adapter_index(pdev); + u64 dsn = pci_get_dsn(pdev); struct ice_adapter *adapter; + unsigned long index; + index = ice_adapter_index(dsn); scoped_guard(mutex, &ice_adapters_mutex) { adapter = xa_load(&ice_adapters, index); if (WARN_ON(!adapter)) diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.h b/drivers/net/ethernet/intel/ice/ice_adapter.h index e233225848b3..ac15c0d2bc1a 100644 --- a/drivers/net/ethernet/intel/ice/ice_adapter.h +++ b/drivers/net/ethernet/intel/ice/ice_adapter.h @@ -32,6 +32,7 @@ struct ice_port_list { * @refcount: Reference count. struct ice_pf objects hold the references. * @ctrl_pf: Control PF of the adapter * @ports: Ports list + * @device_serial_number: DSN cached for collision detection on 32bit systems */ struct ice_adapter { refcount_t refcount; @@ -40,9 +41,10 @@ struct ice_adapter { struct ice_pf *ctrl_pf; struct ice_port_list ports; + u64 device_serial_number; }; -struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev); -void ice_adapter_put(const struct pci_dev *pdev); +struct ice_adapter *ice_adapter_get(struct pci_dev *pdev); +void ice_adapter_put(struct pci_dev *pdev); #endif /* _ICE_ADAPTER_H */ diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c index 69d5b1a28491..59323c019544 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.c +++ b/drivers/net/ethernet/intel/ice/ice_ddp.c @@ -2345,15 +2345,15 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size, cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM | ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW; - if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); } else { ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo); cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM; - } - if (hw->mac_type != ICE_MAC_GENERIC_3K_E825) - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + if (hw->mac_type == ICE_MAC_E810 || + hw->mac_type == ICE_MAC_GENERIC) + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + } status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); if (status) diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index 22371011c249..2410aee59fb2 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -1321,12 +1321,18 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr) */ if (!primary_lag) { lag->primary = true; + if (!ice_is_switchdev_running(lag->pf)) + return; + /* Configure primary's SWID to be shared */ ice_lag_primary_swid(lag, true); primary_lag = lag; } else { u16 swid; + if (!ice_is_switchdev_running(primary_lag->pf)) + return; + swid = primary_lag->pf->hw.port_info->sw_id; ice_lag_set_swid(swid, lag, true); ice_lag_add_prune_list(primary_lag, lag->pf); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 7c3006eb68dd..6446d0fcc052 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -4275,7 +4275,6 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg) } ice_vfhw_mac_add(vf, &al->list[i]); - vf->num_mac++; break; } diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index 7752920d7a8e..1cca9b2262e8 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -2097,6 +2097,11 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) pf = vf->pf; dev = ice_pf_to_dev(pf); vf_vsi = ice_get_vf_vsi(vf); + if (!vf_vsi) { + dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err_exit; + } #define ICE_VF_MAX_FDIR_FILTERS 128 if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) || diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index 66544faab710..70dbf80f3bb7 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -143,6 +143,7 @@ enum idpf_vport_state { * @vport_id: Vport identifier * @link_speed_mbps: Link speed in mbps * @vport_idx: Relative vport index + * @max_tx_hdr_size: Max header length hardware can support * @state: See enum idpf_vport_state * @netstats: Packet and byte stats * @stats_lock: Lock to protect stats update @@ -153,6 +154,7 @@ struct idpf_netdev_priv { u32 vport_id; u32 link_speed_mbps; u16 vport_idx; + u16 max_tx_hdr_size; enum idpf_vport_state state; struct rtnl_link_stats64 netstats; spinlock_t stats_lock; @@ -629,13 +631,13 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all, VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 |\ VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6) -#define IDPF_CAP_RX_CSUM_L4V4 (\ - VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |\ - VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP) +#define IDPF_CAP_TX_CSUM_L4V4 (\ + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP |\ + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP) -#define IDPF_CAP_RX_CSUM_L4V6 (\ - VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\ - VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP) +#define IDPF_CAP_TX_CSUM_L4V6 (\ + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP |\ + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP) #define IDPF_CAP_RX_CSUM (\ VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 |\ @@ -644,11 +646,9 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all, VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP) -#define IDPF_CAP_SCTP_CSUM (\ +#define IDPF_CAP_TX_SCTP_CSUM (\ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP |\ - VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP |\ - VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP |\ - VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP) + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP) #define IDPF_CAP_TUNNEL_TX_CSUM (\ VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |\ diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index aa755dedb41d..3a033ce19cda 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -703,8 +703,10 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) { struct idpf_adapter *adapter = vport->adapter; struct idpf_vport_config *vport_config; + netdev_features_t other_offloads = 0; + netdev_features_t csum_offloads = 0; + netdev_features_t tso_offloads = 0; netdev_features_t dflt_features; - netdev_features_t offloads = 0; struct idpf_netdev_priv *np; struct net_device *netdev; u16 idx = vport->idx; @@ -721,6 +723,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) np->vport = vport; np->vport_idx = vport->idx; np->vport_id = vport->vport_id; + np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter); vport->netdev = netdev; return idpf_init_mac_addr(vport, netdev); @@ -738,6 +741,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) np->adapter = adapter; np->vport_idx = vport->idx; np->vport_id = vport->vport_id; + np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter); spin_lock_init(&np->stats_lock); @@ -766,53 +770,32 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) dflt_features |= NETIF_F_RXHASH; - if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V4)) - dflt_features |= NETIF_F_IP_CSUM; - if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V6)) - dflt_features |= NETIF_F_IPV6_CSUM; + if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V4)) + csum_offloads |= NETIF_F_IP_CSUM; + if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V6)) + csum_offloads |= NETIF_F_IPV6_CSUM; if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM)) - dflt_features |= NETIF_F_RXCSUM; - if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_SCTP_CSUM)) - dflt_features |= NETIF_F_SCTP_CRC; + csum_offloads |= NETIF_F_RXCSUM; + if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_SCTP_CSUM)) + csum_offloads |= NETIF_F_SCTP_CRC; if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP)) - dflt_features |= NETIF_F_TSO; + tso_offloads |= NETIF_F_TSO; if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP)) - dflt_features |= NETIF_F_TSO6; + tso_offloads |= NETIF_F_TSO6; if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_UDP | VIRTCHNL2_CAP_SEG_IPV6_UDP)) - dflt_features |= NETIF_F_GSO_UDP_L4; + tso_offloads |= NETIF_F_GSO_UDP_L4; if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC)) - offloads |= NETIF_F_GRO_HW; - /* advertise to stack only if offloads for encapsulated packets is - * supported - */ - if (idpf_is_cap_ena(vport->adapter, IDPF_SEG_CAPS, - VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL)) { - offloads |= NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_GRE | - NETIF_F_GSO_GRE_CSUM | - NETIF_F_GSO_PARTIAL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | - NETIF_F_GSO_IPXIP4 | - NETIF_F_GSO_IPXIP6 | - 0; - - if (!idpf_is_cap_ena_all(vport->adapter, IDPF_CSUM_CAPS, - IDPF_CAP_TUNNEL_TX_CSUM)) - netdev->gso_partial_features |= - NETIF_F_GSO_UDP_TUNNEL_CSUM; - - netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; - offloads |= NETIF_F_TSO_MANGLEID; - } + other_offloads |= NETIF_F_GRO_HW; if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK)) - offloads |= NETIF_F_LOOPBACK; + other_offloads |= NETIF_F_LOOPBACK; - netdev->features |= dflt_features; - netdev->hw_features |= dflt_features | offloads; - netdev->hw_enc_features |= dflt_features | offloads; + netdev->features |= dflt_features | csum_offloads | tso_offloads; + netdev->hw_features |= netdev->features | other_offloads; + netdev->vlan_features |= netdev->features | other_offloads; + netdev->hw_enc_features |= dflt_features | other_offloads; idpf_set_ethtool_ops(netdev); netif_set_affinity_auto(netdev); SET_NETDEV_DEV(netdev, &adapter->pdev->dev); @@ -1132,11 +1115,9 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter, num_max_q = max(max_q->max_txq, max_q->max_rxq); vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL); - if (!vport->q_vector_idxs) { - kfree(vport); + if (!vport->q_vector_idxs) + goto free_vport; - return NULL; - } idpf_vport_init(vport, max_q); /* This alloc is done separate from the LUT because it's not strictly @@ -1146,11 +1127,9 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter, */ rss_data = &adapter->vport_config[idx]->user_config.rss_data; rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL); - if (!rss_data->rss_key) { - kfree(vport); + if (!rss_data->rss_key) + goto free_vector_idxs; - return NULL; - } /* Initialize default rss key */ netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size); @@ -1163,6 +1142,13 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter, adapter->next_vport = idpf_get_free_slot(adapter); return vport; + +free_vector_idxs: + kfree(vport->q_vector_idxs); +free_vport: + kfree(vport); + + return NULL; } /** @@ -2219,8 +2205,8 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb, struct net_device *netdev, netdev_features_t features) { - struct idpf_vport *vport = idpf_netdev_to_vport(netdev); - struct idpf_adapter *adapter = vport->adapter; + struct idpf_netdev_priv *np = netdev_priv(netdev); + u16 max_tx_hdr_size = np->max_tx_hdr_size; size_t len; /* No point in doing any of this if neither checksum nor GSO are @@ -2243,7 +2229,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb, goto unsupported; len = skb_network_header_len(skb); - if (unlikely(len > idpf_get_max_tx_hdr_size(adapter))) + if (unlikely(len > max_tx_hdr_size)) goto unsupported; if (!skb->encapsulation) @@ -2256,7 +2242,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb, /* IPLEN can support at most 127 dwords */ len = skb_inner_network_header_len(skb); - if (unlikely(len > idpf_get_max_tx_hdr_size(adapter))) + if (unlikely(len > max_tx_hdr_size)) goto unsupported; /* No need to validate L4LEN as TCP is the only protocol with a diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c index bec4a02c5373..b35713036a54 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_main.c +++ b/drivers/net/ethernet/intel/idpf/idpf_main.c @@ -89,6 +89,7 @@ static void idpf_shutdown(struct pci_dev *pdev) { struct idpf_adapter *adapter = pci_get_drvdata(pdev); + cancel_delayed_work_sync(&adapter->serv_task); cancel_delayed_work_sync(&adapter->vc_event_task); idpf_vc_core_deinit(adapter); idpf_deinit_dflt_mbx(adapter); diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index bdf52cef3891..2d5f5c9f91ce 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -4025,6 +4025,14 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) return budget; } + /* Switch to poll mode in the tear-down path after sending disable + * queues virtchnl message, as the interrupts will be disabled after + * that. + */ + if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE, + q_vector->tx[0]))) + return budget; + work_done = min_t(int, work_done, budget - 1); /* Exit the polling mode, but don't re-enable interrupts if stack might @@ -4035,15 +4043,7 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) else idpf_vport_intr_set_wb_on_itr(q_vector); - /* Switch to poll mode in the tear-down path after sending disable - * queues virtchnl message, as the interrupts will be disabled after - * that - */ - if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE, - q_vector->tx[0]))) - return budget; - else - return work_done; + return work_done; } /** diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index c35cc5cb1185..2f265c0959c7 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -319,6 +319,7 @@ struct igc_adapter { struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */ ktime_t ptp_reset_start; /* Reset time in clock mono */ struct system_time_snapshot snapshot; + struct mutex ptm_lock; /* Only allow one PTM transaction at a time */ char fw_version[32]; diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index 8e449904aa7d..d19325b0e6e0 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -574,7 +574,10 @@ #define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x3f) << 2) #define IGC_PTM_CTRL_PTM_TO(usec) (((usec) & 0xff) << 8) -#define IGC_PTM_SHORT_CYC_DEFAULT 1 /* Default short cycle interval */ +/* A short cycle time of 1us theoretically should work, but appears to be too + * short in practice. + */ +#define IGC_PTM_SHORT_CYC_DEFAULT 4 /* Default short cycle interval */ #define IGC_PTM_CYC_TIME_DEFAULT 5 /* Default PTM cycle time */ #define IGC_PTM_TIMEOUT_DEFAULT 255 /* Default timeout for PTM errors */ @@ -593,6 +596,7 @@ #define IGC_PTM_STAT_T4M1_OVFL BIT(3) /* T4 minus T1 overflow */ #define IGC_PTM_STAT_ADJUST_1ST BIT(4) /* 1588 timer adjusted during 1st PTM cycle */ #define IGC_PTM_STAT_ADJUST_CYC BIT(5) /* 1588 timer adjusted during non-1st PTM cycle */ +#define IGC_PTM_STAT_ALL GENMASK(5, 0) /* Used to clear all status */ /* PCIe PTM Cycle Control */ #define IGC_PTM_CYCLE_CTRL_CYC_TIME(msec) ((msec) & 0x3ff) /* PTM Cycle Time (msec) */ diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index f1330379e6bb..b1669d7cf435 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -7231,6 +7231,7 @@ static int igc_probe(struct pci_dev *pdev, err_register: igc_release_hw_control(adapter); + igc_ptp_stop(adapter); err_eeprom: if (!igc_check_reset_block(hw)) igc_reset_phy(hw); diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 946edbad4302..efc7b30e4211 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -974,45 +974,62 @@ static void igc_ptm_log_error(struct igc_adapter *adapter, u32 ptm_stat) } } +/* The PTM lock: adapter->ptm_lock must be held when calling igc_ptm_trigger() */ +static void igc_ptm_trigger(struct igc_hw *hw) +{ + u32 ctrl; + + /* To "manually" start the PTM cycle we need to set the + * trigger (TRIG) bit + */ + ctrl = rd32(IGC_PTM_CTRL); + ctrl |= IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + /* Perform flush after write to CTRL register otherwise + * transaction may not start + */ + wrfl(); +} + +/* The PTM lock: adapter->ptm_lock must be held when calling igc_ptm_reset() */ +static void igc_ptm_reset(struct igc_hw *hw) +{ + u32 ctrl; + + ctrl = rd32(IGC_PTM_CTRL); + ctrl &= ~IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + /* Write to clear all status */ + wr32(IGC_PTM_STAT, IGC_PTM_STAT_ALL); +} + static int igc_phc_get_syncdevicetime(ktime_t *device, struct system_counterval_t *system, void *ctx) { - u32 stat, t2_curr_h, t2_curr_l, ctrl; struct igc_adapter *adapter = ctx; struct igc_hw *hw = &adapter->hw; + u32 stat, t2_curr_h, t2_curr_l; int err, count = 100; ktime_t t1, t2_curr; - /* Get a snapshot of system clocks to use as historic value. */ - ktime_get_snapshot(&adapter->snapshot); - + /* Doing this in a loop because in the event of a + * badly timed (ha!) system clock adjustment, we may + * get PTM errors from the PCI root, but these errors + * are transitory. Repeating the process returns valid + * data eventually. + */ do { - /* Doing this in a loop because in the event of a - * badly timed (ha!) system clock adjustment, we may - * get PTM errors from the PCI root, but these errors - * are transitory. Repeating the process returns valid - * data eventually. - */ + /* Get a snapshot of system clocks to use as historic value. */ + ktime_get_snapshot(&adapter->snapshot); - /* To "manually" start the PTM cycle we need to clear and - * then set again the TRIG bit. - */ - ctrl = rd32(IGC_PTM_CTRL); - ctrl &= ~IGC_PTM_CTRL_TRIG; - wr32(IGC_PTM_CTRL, ctrl); - ctrl |= IGC_PTM_CTRL_TRIG; - wr32(IGC_PTM_CTRL, ctrl); - - /* The cycle only starts "for real" when software notifies - * that it has read the registers, this is done by setting - * VALID bit. - */ - wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + igc_ptm_trigger(hw); err = readx_poll_timeout(rd32, IGC_PTM_STAT, stat, stat, IGC_PTM_STAT_SLEEP, IGC_PTM_STAT_TIMEOUT); + igc_ptm_reset(hw); + if (err < 0) { netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n"); return err; @@ -1021,15 +1038,7 @@ static int igc_phc_get_syncdevicetime(ktime_t *device, if ((stat & IGC_PTM_STAT_VALID) == IGC_PTM_STAT_VALID) break; - if (stat & ~IGC_PTM_STAT_VALID) { - /* An error occurred, log it. */ - igc_ptm_log_error(adapter, stat); - /* The STAT register is write-1-to-clear (W1C), - * so write the previous error status to clear it. - */ - wr32(IGC_PTM_STAT, stat); - continue; - } + igc_ptm_log_error(adapter, stat); } while (--count); if (!count) { @@ -1061,9 +1070,16 @@ static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp, { struct igc_adapter *adapter = container_of(ptp, struct igc_adapter, ptp_caps); + int ret; + + /* This blocks until any in progress PTM transactions complete */ + mutex_lock(&adapter->ptm_lock); + + ret = get_device_system_crosststamp(igc_phc_get_syncdevicetime, + adapter, &adapter->snapshot, cts); + mutex_unlock(&adapter->ptm_lock); - return get_device_system_crosststamp(igc_phc_get_syncdevicetime, - adapter, &adapter->snapshot, cts); + return ret; } static int igc_ptp_getcyclesx64(struct ptp_clock_info *ptp, @@ -1162,6 +1178,7 @@ void igc_ptp_init(struct igc_adapter *adapter) spin_lock_init(&adapter->ptp_tx_lock); spin_lock_init(&adapter->free_timer_lock); spin_lock_init(&adapter->tmreg_lock); + mutex_init(&adapter->ptm_lock); adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; @@ -1174,6 +1191,7 @@ void igc_ptp_init(struct igc_adapter *adapter) if (IS_ERR(adapter->ptp_clock)) { adapter->ptp_clock = NULL; netdev_err(netdev, "ptp_clock_register failed\n"); + mutex_destroy(&adapter->ptm_lock); } else if (adapter->ptp_clock) { netdev_info(netdev, "PHC added\n"); adapter->ptp_flags |= IGC_PTP_ENABLED; @@ -1203,10 +1221,12 @@ static void igc_ptm_stop(struct igc_adapter *adapter) struct igc_hw *hw = &adapter->hw; u32 ctrl; + mutex_lock(&adapter->ptm_lock); ctrl = rd32(IGC_PTM_CTRL); ctrl &= ~IGC_PTM_CTRL_EN; wr32(IGC_PTM_CTRL, ctrl); + mutex_unlock(&adapter->ptm_lock); } /** @@ -1237,13 +1257,18 @@ void igc_ptp_suspend(struct igc_adapter *adapter) **/ void igc_ptp_stop(struct igc_adapter *adapter) { + if (!(adapter->ptp_flags & IGC_PTP_ENABLED)) + return; + igc_ptp_suspend(adapter); + adapter->ptp_flags &= ~IGC_PTP_ENABLED; if (adapter->ptp_clock) { ptp_clock_unregister(adapter->ptp_clock); netdev_info(adapter->netdev, "PHC removed\n"); adapter->ptp_flags &= ~IGC_PTP_ENABLED; } + mutex_destroy(&adapter->ptm_lock); } /** @@ -1255,13 +1280,18 @@ void igc_ptp_stop(struct igc_adapter *adapter) void igc_ptp_reset(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; - u32 cycle_ctrl, ctrl; + u32 cycle_ctrl, ctrl, stat; unsigned long flags; u32 timadj; + if (!(adapter->ptp_flags & IGC_PTP_ENABLED)) + return; + /* reset the tstamp_config */ igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + mutex_lock(&adapter->ptm_lock); + spin_lock_irqsave(&adapter->tmreg_lock, flags); switch (adapter->hw.mac.type) { @@ -1290,14 +1320,19 @@ void igc_ptp_reset(struct igc_adapter *adapter) ctrl = IGC_PTM_CTRL_EN | IGC_PTM_CTRL_START_NOW | IGC_PTM_CTRL_SHRT_CYC(IGC_PTM_SHORT_CYC_DEFAULT) | - IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT) | - IGC_PTM_CTRL_TRIG; + IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT); wr32(IGC_PTM_CTRL, ctrl); /* Force the first cycle to run. */ - wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + igc_ptm_trigger(hw); + if (readx_poll_timeout_atomic(rd32, IGC_PTM_STAT, stat, + stat, IGC_PTM_STAT_SLEEP, + IGC_PTM_STAT_TIMEOUT)) + netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n"); + + igc_ptm_reset(hw); break; default: /* No work to do. */ @@ -1314,5 +1349,7 @@ void igc_ptp_reset(struct igc_adapter *adapter) out: spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + mutex_unlock(&adapter->ptm_lock); + wrfl(); } diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index 0a679e95196f..24499bb36c00 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -1223,7 +1223,7 @@ static void octep_hb_timeout_task(struct work_struct *work) miss_cnt); rtnl_lock(); if (netif_running(oct->netdev)) - octep_stop(oct->netdev); + dev_close(oct->netdev); rtnl_unlock(); } diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c index 18c922dd5fc6..ccb69bc5c952 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c @@ -835,7 +835,9 @@ static void octep_vf_tx_timeout(struct net_device *netdev, unsigned int txqueue) struct octep_vf_device *oct = netdev_priv(netdev); netdev_hold(netdev, NULL, GFP_ATOMIC); - schedule_work(&oct->tx_timeout_task); + if (!schedule_work(&oct->tx_timeout_task)) + netdev_put(netdev, NULL); + } static int octep_vf_set_mac(struct net_device *netdev, void *p) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 0b27a695008b..971993586fb4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -717,6 +717,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat) if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; + + /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */ + if (idx >= CGX_RX_STAT_GLOBAL_INDEX) + lmac_id = 0; + *rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8)); return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c index 7fa98aeb3663..4a3370a40dd8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c @@ -13,19 +13,26 @@ /* RVU LMTST */ #define LMT_TBL_OP_READ 0 #define LMT_TBL_OP_WRITE 1 -#define LMT_MAP_TABLE_SIZE (128 * 1024) #define LMT_MAPTBL_ENTRY_SIZE 16 +#define LMT_MAX_VFS 256 + +#define LMT_MAP_ENTRY_ENA BIT_ULL(20) +#define LMT_MAP_ENTRY_LINES GENMASK_ULL(18, 16) /* Function to perform operations (read/write) on lmtst map table */ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, int lmt_tbl_op) { void __iomem *lmt_map_base; - u64 tbl_base; + u64 tbl_base, cfg; + int pfs, vfs; tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); + cfg = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG); + vfs = 1 << (cfg & 0xF); + pfs = 1 << ((cfg >> 4) & 0x7); - lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE); + lmt_map_base = ioremap_wc(tbl_base, pfs * vfs * LMT_MAPTBL_ENTRY_SIZE); if (!lmt_map_base) { dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); return -ENOMEM; @@ -35,6 +42,13 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, *val = readq(lmt_map_base + index); } else { writeq((*val), (lmt_map_base + index)); + + cfg = FIELD_PREP(LMT_MAP_ENTRY_ENA, 0x1); + /* 2048 LMTLINES */ + cfg |= FIELD_PREP(LMT_MAP_ENTRY_LINES, 0x6); + + writeq(cfg, (lmt_map_base + (index + 8))); + /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S * changes effective. Write 1 for flush and read is being used as a * barrier and sets up a data dependency. Write to 0 after a write @@ -52,7 +66,7 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, #define LMT_MAP_TBL_W1_OFF 8 static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc) { - return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) + + return ((rvu_get_pf(pcifunc) * LMT_MAX_VFS) + (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE; } @@ -69,7 +83,7 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc, mutex_lock(&rvu->rsrc_lock); rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova); - pf = rvu_get_pf(pcifunc) & 0x1F; + pf = rvu_get_pf(pcifunc) & RVU_PFVF_PF_MASK; val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 | ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF); rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index a1f9ec03c2ce..c827da626471 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -553,6 +553,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, u64 lmt_addr, val, tbl_base; int pf, vf, num_vfs, hw_vfs; void __iomem *lmt_map_base; + int apr_pfs, apr_vfs; int buf_size = 10240; size_t off = 0; int index = 0; @@ -568,8 +569,12 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, return -ENOMEM; tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); + val = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG); + apr_vfs = 1 << (val & 0xF); + apr_pfs = 1 << ((val >> 4) & 0x7); - lmt_map_base = ioremap_wc(tbl_base, 128 * 1024); + lmt_map_base = ioremap_wc(tbl_base, apr_pfs * apr_vfs * + LMT_MAPTBL_ENTRY_SIZE); if (!lmt_map_base) { dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); kfree(buf); @@ -591,7 +596,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t", pf); - index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE; + index = pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE; off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t", (tbl_base + index)); lmt_addr = readq(lmt_map_base + index); @@ -604,7 +609,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, /* Reading num of VFs per PF */ rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs); for (vf = 0; vf < num_vfs; vf++) { - index = (pf * rvu->hw->total_vfs * 16) + + index = (pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE) + ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE); off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d:VF%d \t\t", pf, vf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c index f3b9daffaec3..4c7e0f345cb5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c @@ -531,7 +531,8 @@ static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf, if (sw_tx_sc->encrypt) sectag_tci |= (MCS_TCI_E | MCS_TCI_C); - policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu); + policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, + pfvf->netdev->mtu + OTX2_ETH_HLEN); /* Write SecTag excluding AN bits(1..0) */ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2); policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 1e88422825be..d6b4b74e4002 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -356,6 +356,7 @@ struct otx2_flow_config { struct list_head flow_list_tc; u8 ucast_flt_cnt; bool ntuple; + u16 ntuple_cnt; }; struct dev_hw_ops { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c index 33ec9a7f7c03..e13ae5484c19 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c @@ -41,6 +41,7 @@ static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id, if (!pfvf->flow_cfg) return 0; + pfvf->flow_cfg->ntuple_cnt = ctx->val.vu16; otx2_alloc_mcam_entries(pfvf, ctx->val.vu16); return 0; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index 010385b29988..45b8c9230184 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -315,7 +315,7 @@ static void otx2_get_pauseparam(struct net_device *netdev, struct otx2_nic *pfvf = netdev_priv(netdev); struct cgx_pause_frm_cfg *req, *rsp; - if (is_otx2_lbkvf(pfvf->pdev)) + if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) return; mutex_lock(&pfvf->mbox.lock); @@ -347,7 +347,7 @@ static int otx2_set_pauseparam(struct net_device *netdev, if (pause->autoneg) return -EOPNOTSUPP; - if (is_otx2_lbkvf(pfvf->pdev)) + if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) return -EOPNOTSUPP; if (pause->rx_pause) @@ -941,8 +941,8 @@ static u32 otx2_get_link(struct net_device *netdev) { struct otx2_nic *pfvf = netdev_priv(netdev); - /* LBK link is internal and always UP */ - if (is_otx2_lbkvf(pfvf->pdev)) + /* LBK and SDP links are internal and always UP */ + if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) return 1; return pfvf->linfo.link_up; } @@ -1413,7 +1413,7 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev, { struct otx2_nic *pfvf = netdev_priv(netdev); - if (is_otx2_lbkvf(pfvf->pdev)) { + if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) { cmd->base.duplex = DUPLEX_FULL; cmd->base.speed = SPEED_100000; } else { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 47bfd1fb37d4..64c6d9162ef6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -247,7 +247,7 @@ int otx2_mcam_entry_init(struct otx2_nic *pfvf) mutex_unlock(&pfvf->mbox.lock); /* Allocate entries for Ntuple filters */ - count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT); + count = otx2_alloc_mcam_entries(pfvf, flow_cfg->ntuple_cnt); if (count <= 0) { otx2_clear_ntuple_flow_info(pfvf, flow_cfg); return 0; @@ -307,6 +307,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf) INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc); pf->flow_cfg->ucast_flt_cnt = OTX2_DEFAULT_UNICAST_FLOWS; + pf->flow_cfg->ntuple_cnt = OTX2_DEFAULT_FLOWCOUNT; /* Allocate bare minimum number of MCAM entries needed for * unicast and ntuple filters. diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 7ef3ba477d49..9b28be4c4a5d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -729,9 +729,12 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) } #ifdef CONFIG_DCB - err = otx2_dcbnl_set_ops(netdev); - if (err) - goto err_free_zc_bmap; + /* Priority flow control is not supported for LBK and SDP vf(s) */ + if (!(is_otx2_lbkvf(vf->pdev) || is_otx2_sdp_rep(vf->pdev))) { + err = otx2_dcbnl_set_ops(netdev); + if (err) + goto err_free_zc_bmap; + } #endif otx2_qos_init(vf, qos_txqs); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c index 0f844c14485a..35acc07bd964 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c @@ -165,6 +165,11 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf, otx2_config_sched_shaping(pfvf, node, cfg, &num_regs); } else if (level == NIX_TXSCH_LVL_TL2) { + /* configure parent txschq */ + cfg->reg[num_regs] = NIX_AF_TL2X_PARENT(node->schq); + cfg->regval[num_regs] = (u64)hw->tx_link << 16; + num_regs++; + /* configure link cfg */ if (level == pfvf->qos.link_cfg_lvl) { cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c index 04e08e06f30f..7153a71dfc86 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c @@ -67,6 +67,8 @@ static int rvu_rep_mcam_flow_init(struct rep_dev *rep) rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp (&priv->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) + goto exit; for (ent = 0; ent < rsp->count; ent++) rep->flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent]; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 43197b28b3e7..6c92072b4c28 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -269,12 +269,8 @@ static const char * const mtk_clks_source_name[] = { "ethwarp_wocpu2", "ethwarp_wocpu1", "ethwarp_wocpu0", - "top_usxgmii0_sel", - "top_usxgmii1_sel", "top_sgm0_sel", "top_sgm1_sel", - "top_xfi_phy0_xtal_sel", - "top_xfi_phy1_xtal_sel", "top_eth_gmii_sel", "top_eth_refck_50m_sel", "top_eth_sys_200m_sel", @@ -734,7 +730,7 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx, case SPEED_100: val |= MTK_QTX_SCH_MAX_RATE_EN | FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) | - FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3); + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3) | FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1); break; case SPEED_1000: @@ -757,13 +753,13 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx, case SPEED_100: val |= MTK_QTX_SCH_MAX_RATE_EN | FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) | - FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5); + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) | FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1); break; case SPEED_1000: val |= MTK_QTX_SCH_MAX_RATE_EN | - FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) | - FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) | + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) | + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 6) | FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10); break; default: @@ -871,9 +867,25 @@ static const struct phylink_mac_ops mtk_phylink_ops = { .mac_enable_tx_lpi = mtk_mac_enable_tx_lpi, }; +static void mtk_mdio_config(struct mtk_eth *eth) +{ + u32 val; + + /* Configure MDC Divider */ + val = FIELD_PREP(PPSC_MDC_CFG, eth->mdc_divider); + + /* Configure MDC Turbo Mode */ + if (mtk_is_netsys_v3_or_greater(eth)) + mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3); + else + val |= PPSC_MDC_TURBO; + + mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC); +} + static int mtk_mdio_init(struct mtk_eth *eth) { - unsigned int max_clk = 2500000, divider; + unsigned int max_clk = 2500000; struct device_node *mii_np; int ret; u32 val; @@ -908,20 +920,9 @@ static int mtk_mdio_init(struct mtk_eth *eth) } max_clk = val; } - divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63); - - /* Configure MDC Turbo Mode */ - if (mtk_is_netsys_v3_or_greater(eth)) - mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3); - - /* Configure MDC Divider */ - val = FIELD_PREP(PPSC_MDC_CFG, divider); - if (!mtk_is_netsys_v3_or_greater(eth)) - val |= PPSC_MDC_TURBO; - mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC); - - dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider); - + eth->mdc_divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63); + mtk_mdio_config(eth); + dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / eth->mdc_divider); ret = of_mdiobus_register(eth->mii_bus, mii_np); err_put_node: @@ -2247,14 +2248,18 @@ skip_rx: ring->data[idx] = new_data; rxd->rxd1 = (unsigned int)dma_addr; release_desc: + if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) { + if (unlikely(dma_addr == DMA_MAPPING_ERROR)) + addr64 = FIELD_GET(RX_DMA_ADDR64_MASK, + rxd->rxd2); + else + addr64 = RX_DMA_PREP_ADDR64(dma_addr); + } + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) rxd->rxd2 = RX_DMA_LSO; else - rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size); - - if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) && - likely(dma_addr != DMA_MAPPING_ERROR)) - rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr); + rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size) | addr64; ring->calc_idx = idx; done++; @@ -3181,11 +3186,19 @@ static int mtk_dma_init(struct mtk_eth *eth) static void mtk_dma_free(struct mtk_eth *eth) { const struct mtk_soc_data *soc = eth->soc; - int i; + int i, j, txqs = 1; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + txqs = MTK_QDMA_NUM_QUEUES; + + for (i = 0; i < MTK_MAX_DEVS; i++) { + if (!eth->netdev[i]) + continue; + + for (j = 0; j < txqs; j++) + netdev_tx_reset_subqueue(eth->netdev[i], j); + } - for (i = 0; i < MTK_MAX_DEVS; i++) - if (eth->netdev[i]) - netdev_reset_queue(eth->netdev[i]); if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) { dma_free_coherent(eth->dma_dev, MTK_QDMA_RING_SIZE * soc->tx.desc_size, @@ -3315,7 +3328,7 @@ static int mtk_start_dma(struct mtk_eth *eth) if (mtk_is_netsys_v2_or_greater(eth)) val |= MTK_MUTLI_CNT | MTK_RESV_BUF | MTK_WCOMP_EN | MTK_DMAD_WR_WDONE | - MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN; + MTK_CHK_DDONE_EN; else val |= MTK_RX_BT_32DWORDS; mtk_w32(eth, val, reg_map->qdma.glo_cfg); @@ -3460,9 +3473,6 @@ static int mtk_open(struct net_device *dev) } mtk_gdm_config(eth, target_mac->id, gdm_config); } - /* Reset and enable PSE */ - mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); - mtk_w32(eth, 0, MTK_RST_GL); napi_enable(ð->tx_napi); napi_enable(ð->rx_napi); @@ -3974,6 +3984,10 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset) else mtk_hw_reset(eth); + /* No MT7628/88 support yet */ + if (reset && !MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) + mtk_mdio_config(eth); + if (mtk_is_netsys_v3_or_greater(eth)) { /* Set FE to PDMAv2 if necessary */ val = mtk_r32(eth, MTK_FE_GLO_MISC); @@ -4034,11 +4048,27 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset) mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); if (mtk_is_netsys_v3_or_greater(eth)) { - /* PSE should not drop port1, port8 and port9 packets */ - mtk_w32(eth, 0x00000302, PSE_DROP_CFG); + /* PSE dummy page mechanism */ + mtk_w32(eth, PSE_DUMMY_WORK_GDM(1) | PSE_DUMMY_WORK_GDM(2) | + PSE_DUMMY_WORK_GDM(3) | DUMMY_PAGE_THR, PSE_DUMY_REQ); + + /* PSE free buffer drop threshold */ + mtk_w32(eth, 0x00600009, PSE_IQ_REV(8)); + + /* PSE should not drop port8, port9 and port13 packets from + * WDMA Tx + */ + mtk_w32(eth, 0x00002300, PSE_DROP_CFG); + + /* PSE should drop packets to port8, port9 and port13 on WDMA Rx + * ring full + */ + mtk_w32(eth, 0x00002300, PSE_PPE_DROP(0)); + mtk_w32(eth, 0x00002300, PSE_PPE_DROP(1)); + mtk_w32(eth, 0x00002300, PSE_PPE_DROP(2)); /* GDM and CDM Threshold */ - mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES); + mtk_w32(eth, 0x08000707, MTK_CDMW0_THRES); mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES); /* Disable GDM1 RX CRC stripping */ @@ -4055,7 +4085,7 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset) mtk_w32(eth, 0x00000300, PSE_DROP_CFG); /* PSE should drop packets to port 8/9 on WDMA Rx ring full */ - mtk_w32(eth, 0x00000300, PSE_PPE0_DROP); + mtk_w32(eth, 0x00000300, PSE_PPE_DROP(0)); /* PSE Free Queue Flow Control */ mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2); @@ -4718,7 +4748,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) } if (mtk_is_netsys_v3_or_greater(mac->hw) && - MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW_BIT) && + MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW) && id == MTK_GMAC1_ID) { mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 90a377ab4359..88ef2e9c50fc 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -151,7 +151,15 @@ #define PSE_FQFC_CFG1 0x100 #define PSE_FQFC_CFG2 0x104 #define PSE_DROP_CFG 0x108 -#define PSE_PPE0_DROP 0x110 +#define PSE_PPE_DROP(x) (0x110 + ((x) * 0x4)) + +/* PSE Last FreeQ Page Request Control */ +#define PSE_DUMY_REQ 0x10C +/* PSE_DUMY_REQ is not a typo but actually called like that also in + * MediaTek's datasheet + */ +#define PSE_DUMMY_WORK_GDM(x) BIT(16 + (x)) +#define DUMMY_PAGE_THR 0x1 /* PSE Input Queue Reservation Register*/ #define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2)) @@ -1271,6 +1279,7 @@ struct mtk_eth { struct clk *clks[MTK_CLK_MAX]; struct mii_bus *mii_bus; + unsigned int mdc_divider; struct work_struct pending_work; unsigned long state; diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c index 76f202d7f055..b175119a6a7d 100644 --- a/drivers/net/ethernet/mediatek/mtk_star_emac.c +++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c @@ -1163,6 +1163,7 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget) struct net_device *ndev = priv->ndev; unsigned int head = ring->head; unsigned int entry = ring->tail; + unsigned long flags; while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) { ret = mtk_star_tx_complete_one(priv); @@ -1182,9 +1183,9 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget) netif_wake_queue(ndev); if (napi_complete(napi)) { - spin_lock(&priv->lock); + spin_lock_irqsave(&priv->lock, flags); mtk_star_enable_dma_irq(priv, false, true); - spin_unlock(&priv->lock); + spin_unlock_irqrestore(&priv->lock, flags); } return 0; @@ -1341,16 +1342,16 @@ push_new_skb: static int mtk_star_rx_poll(struct napi_struct *napi, int budget) { struct mtk_star_priv *priv; + unsigned long flags; int work_done = 0; priv = container_of(napi, struct mtk_star_priv, rx_napi); work_done = mtk_star_rx(priv, budget); - if (work_done < budget) { - napi_complete_done(napi, work_done); - spin_lock(&priv->lock); + if (work_done < budget && napi_complete_done(napi, work_done)) { + spin_lock_irqsave(&priv->lock, flags); mtk_star_enable_dma_irq(priv, true, false); - spin_unlock(&priv->lock); + spin_unlock_irqrestore(&priv->lock, flags); } return work_done; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index 532c7fa94d17..dbd9482359e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -176,6 +176,7 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx) priv = ptpsq->txqsq.priv; + rtnl_lock(); mutex_lock(&priv->state_lock); chs = &priv->channels; netdev = priv->netdev; @@ -183,22 +184,19 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx) carrier_ok = netif_carrier_ok(netdev); netif_carrier_off(netdev); - rtnl_lock(); mlx5e_deactivate_priv_channels(priv); - rtnl_unlock(); mlx5e_ptp_close(chs->ptp); err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp); - rtnl_lock(); mlx5e_activate_priv_channels(priv); - rtnl_unlock(); /* return carrier back if needed */ if (carrier_ok) netif_carrier_on(netdev); mutex_unlock(&priv->state_lock); + rtnl_unlock(); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c index 5c762a71818d..7a18a469961d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c @@ -165,9 +165,6 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv, struct flow_match_enc_keyid enc_keyid; void *misc_c, *misc_v; - misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); - misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); - if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) return 0; @@ -182,6 +179,30 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv, err = mlx5e_tc_tun_parse_vxlan_gbp_option(priv, spec, f); if (err) return err; + + /* We can't mix custom tunnel headers with symbolic ones and we + * don't have a symbolic field name for GBP, so we use custom + * tunnel headers in this case. We need hardware support to + * match on custom tunnel headers, but we already know it's + * supported because the previous call successfully checked for + * that. + */ + misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + misc_parameters_5); + misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters_5); + + /* Shift by 8 to account for the reserved bits in the vxlan + * header after the VNI. + */ + MLX5_SET(fte_match_set_misc5, misc_c, tunnel_header_1, + be32_to_cpu(enc_keyid.mask->keyid) << 8); + MLX5_SET(fte_match_set_misc5, misc_v, tunnel_header_1, + be32_to_cpu(enc_keyid.key->keyid) << 8); + + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5; + + return 0; } /* match on VNI is required */ @@ -195,6 +216,11 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv, return -EOPNOTSUPP; } + misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + misc_parameters); + misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters); + MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni, be32_to_cpu(enc_keyid.mask->keyid)); MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 3506024c2453..9bd166f489e7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4349,6 +4349,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) netdev_warn(netdev, "Disabling HW_VLAN CTAG FILTERING, not supported in switchdev mode\n"); + features &= ~NETIF_F_HW_MACSEC; + if (netdev->features & NETIF_F_HW_MACSEC) + netdev_warn(netdev, "Disabling HW MACsec offload, not supported in switchdev mode\n"); + return features; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 9ba99609999f..f1d908f61134 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1750,9 +1750,6 @@ extra_split_attr_dests_needed(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr !list_is_first(&attr->list, &flow->attrs)) return 0; - if (flow_flag_test(flow, SLOW)) - return 0; - esw_attr = attr->esw_attr; if (!esw_attr->split_count || esw_attr->split_count == esw_attr->out_count - 1) @@ -1766,7 +1763,7 @@ extra_split_attr_dests_needed(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr for (i = esw_attr->split_count; i < esw_attr->out_count; i++) { /* external dest with encap is considered as internal by firmware */ if (esw_attr->dests[i].vport == MLX5_VPORT_UPLINK && - !(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) + !(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP)) ext_dest = true; else int_dest = true; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index a6a8eea5980c..0e3a977d5332 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -3533,7 +3533,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) int err; mutex_init(&esw->offloads.termtbl_mutex); - mlx5_rdma_enable_roce(esw->dev); + err = mlx5_rdma_enable_roce(esw->dev); + if (err) + goto err_roce; err = mlx5_esw_host_number_init(esw); if (err) @@ -3594,6 +3596,7 @@ err_vport_metadata: esw_offloads_metadata_uninit(esw); err_metadata: mlx5_rdma_disable_roce(esw->dev); +err_roce: mutex_destroy(&esw->offloads.termtbl_mutex); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c index eb3bd9c7f66e..ca9ecec358b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c @@ -637,10 +637,6 @@ struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev, bool use_l4_type; int err; - ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL); - if (!ttc) - return ERR_PTR(-ENOMEM); - switch (params->ns_type) { case MLX5_FLOW_NAMESPACE_PORT_SEL: use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) && @@ -654,7 +650,16 @@ struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev, return ERR_PTR(-EINVAL); } + ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL); + if (!ttc) + return ERR_PTR(-ENOMEM); + ns = mlx5_get_flow_namespace(dev, params->ns_type); + if (!ns) { + kvfree(ttc); + return ERR_PTR(-EOPNOTSUPP); + } + groups = use_l4_type ? &inner_ttc_groups[TTC_GROUPS_USE_L4_TYPE] : &inner_ttc_groups[TTC_GROUPS_DEFAULT]; @@ -710,10 +715,6 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev, bool use_l4_type; int err; - ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL); - if (!ttc) - return ERR_PTR(-ENOMEM); - switch (params->ns_type) { case MLX5_FLOW_NAMESPACE_PORT_SEL: use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) && @@ -727,7 +728,16 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev, return ERR_PTR(-EINVAL); } + ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL); + if (!ttc) + return ERR_PTR(-ENOMEM); + ns = mlx5_get_flow_namespace(dev, params->ns_type); + if (!ns) { + kvfree(ttc); + return ERR_PTR(-EOPNOTSUPP); + } + groups = use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE] : &ttc_groups[TTC_GROUPS_DEFAULT]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c index a42f6cd99b74..5c552b71e371 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c @@ -118,8 +118,8 @@ static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid * static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev) { + u8 mac[ETH_ALEN] = {}; union ib_gid gid; - u8 mac[ETH_ALEN]; mlx5_rdma_make_default_gid(dev, &gid); return mlx5_core_roce_gid_set(dev, 0, @@ -140,17 +140,17 @@ void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) mlx5_nic_vport_disable_roce(dev); } -void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) +int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) { int err; if (!MLX5_CAP_GEN(dev, roce)) - return; + return 0; err = mlx5_nic_vport_enable_roce(dev); if (err) { mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err); - return; + return err; } err = mlx5_rdma_add_roce_addr(dev); @@ -165,10 +165,11 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) goto del_roce_addr; } - return; + return err; del_roce_addr: mlx5_rdma_del_roce_addr(dev); disable_roce: mlx5_nic_vport_disable_roce(dev); + return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h index 750cff2a71a4..3d9e76c3d42f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h @@ -8,12 +8,12 @@ #ifdef CONFIG_MLX5_ESWITCH -void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev); +int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev); void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev); #else /* CONFIG_MLX5_ESWITCH */ -static inline void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) {} +static inline int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) { return 0; } static inline void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) {} #endif /* CONFIG_MLX5_ESWITCH */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 464821dd492d..a2033837182e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -3014,6 +3014,9 @@ static int mlxsw_sp_neigh_rif_made_sync(struct mlxsw_sp *mlxsw_sp, .rif = rif, }; + if (!mlxsw_sp_dev_lower_is_port(mlxsw_sp_rif_dev(rif))) + return 0; + neigh_for_each(&arp_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms); if (rms.err) goto err_arp; diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h index 4ca7b99ef131..de6b1a340f55 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic.h @@ -154,14 +154,14 @@ struct fbnic_dev *fbnic_devlink_alloc(struct pci_dev *pdev); void fbnic_devlink_register(struct fbnic_dev *fbd); void fbnic_devlink_unregister(struct fbnic_dev *fbd); -int fbnic_fw_enable_mbx(struct fbnic_dev *fbd); -void fbnic_fw_disable_mbx(struct fbnic_dev *fbd); +int fbnic_fw_request_mbx(struct fbnic_dev *fbd); +void fbnic_fw_free_mbx(struct fbnic_dev *fbd); void fbnic_hwmon_register(struct fbnic_dev *fbd); void fbnic_hwmon_unregister(struct fbnic_dev *fbd); -int fbnic_pcs_irq_enable(struct fbnic_dev *fbd); -void fbnic_pcs_irq_disable(struct fbnic_dev *fbd); +int fbnic_pcs_request_irq(struct fbnic_dev *fbd); +void fbnic_pcs_free_irq(struct fbnic_dev *fbd); void fbnic_napi_name_irqs(struct fbnic_dev *fbd); int fbnic_napi_request_irq(struct fbnic_dev *fbd, diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h index 3b12a0ab5906..51bee8072420 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h @@ -796,8 +796,10 @@ enum { /* PUL User Registers */ #define FBNIC_CSR_START_PUL_USER 0x31000 /* CSR section delimiter */ #define FBNIC_PUL_OB_TLP_HDR_AW_CFG 0x3103d /* 0xc40f4 */ +#define FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH CSR_BIT(19) #define FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME CSR_BIT(18) #define FBNIC_PUL_OB_TLP_HDR_AR_CFG 0x3103e /* 0xc40f8 */ +#define FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH CSR_BIT(19) #define FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME CSR_BIT(18) #define FBNIC_PUL_USER_OB_RD_TLP_CNT_31_0 \ 0x3106e /* 0xc41b8 */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c index 88db3dacb940..3d9636a6c968 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c @@ -17,11 +17,29 @@ static void __fbnic_mbx_wr_desc(struct fbnic_dev *fbd, int mbx_idx, { u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx); + /* Write the upper 32b and then the lower 32b. Doing this the + * FW can then read lower, upper, lower to verify that the state + * of the descriptor wasn't changed mid-transaction. + */ fw_wr32(fbd, desc_offset + 1, upper_32_bits(desc)); fw_wrfl(fbd); fw_wr32(fbd, desc_offset, lower_32_bits(desc)); } +static void __fbnic_mbx_invalidate_desc(struct fbnic_dev *fbd, int mbx_idx, + int desc_idx, u32 desc) +{ + u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx); + + /* For initialization we write the lower 32b of the descriptor first. + * This way we can set the state to mark it invalid before we clear the + * upper 32b. + */ + fw_wr32(fbd, desc_offset, desc); + fw_wrfl(fbd); + fw_wr32(fbd, desc_offset + 1, 0); +} + static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx) { u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx); @@ -33,29 +51,41 @@ static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx) return desc; } -static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx) +static void fbnic_mbx_reset_desc_ring(struct fbnic_dev *fbd, int mbx_idx) { int desc_idx; + /* Disable DMA transactions from the device, + * and flush any transactions triggered during cleaning + */ + switch (mbx_idx) { + case FBNIC_IPC_MBX_RX_IDX: + wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG, + FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH); + break; + case FBNIC_IPC_MBX_TX_IDX: + wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG, + FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH); + break; + } + + wrfl(fbd); + /* Initialize first descriptor to all 0s. Doing this gives us a * solid stop for the firmware to hit when it is done looping * through the ring. */ - __fbnic_mbx_wr_desc(fbd, mbx_idx, 0, 0); - - fw_wrfl(fbd); + __fbnic_mbx_invalidate_desc(fbd, mbx_idx, 0, 0); /* We then fill the rest of the ring starting at the end and moving * back toward descriptor 0 with skip descriptors that have no * length nor address, and tell the firmware that they can skip * them and just move past them to the one we initialized to 0. */ - for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;) { - __fbnic_mbx_wr_desc(fbd, mbx_idx, desc_idx, - FBNIC_IPC_MBX_DESC_FW_CMPL | - FBNIC_IPC_MBX_DESC_HOST_CMPL); - fw_wrfl(fbd); - } + for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;) + __fbnic_mbx_invalidate_desc(fbd, mbx_idx, desc_idx, + FBNIC_IPC_MBX_DESC_FW_CMPL | + FBNIC_IPC_MBX_DESC_HOST_CMPL); } void fbnic_mbx_init(struct fbnic_dev *fbd) @@ -76,7 +106,7 @@ void fbnic_mbx_init(struct fbnic_dev *fbd) wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY); for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++) - fbnic_mbx_init_desc_ring(fbd, i); + fbnic_mbx_reset_desc_ring(fbd, i); } static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx, @@ -141,7 +171,7 @@ static void fbnic_mbx_clean_desc_ring(struct fbnic_dev *fbd, int mbx_idx) { int i; - fbnic_mbx_init_desc_ring(fbd, mbx_idx); + fbnic_mbx_reset_desc_ring(fbd, mbx_idx); for (i = FBNIC_IPC_MBX_DESC_LEN; i--;) fbnic_mbx_unmap_and_free_msg(fbd, mbx_idx, i); @@ -322,67 +352,41 @@ static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type) return err; } -/** - * fbnic_fw_xmit_cap_msg - Allocate and populate a FW capabilities message - * @fbd: FBNIC device structure - * - * Return: NULL on failure to allocate, error pointer on error, or pointer - * to new TLV test message. - * - * Sends a single TLV header indicating the host wants the firmware to - * confirm the capabilities and version. - **/ -static int fbnic_fw_xmit_cap_msg(struct fbnic_dev *fbd) -{ - int err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ); - - /* Return 0 if we are not calling this on ASIC */ - return (err == -EOPNOTSUPP) ? 0 : err; -} - -static void fbnic_mbx_postinit_desc_ring(struct fbnic_dev *fbd, int mbx_idx) +static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx) { struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx]; - /* This is a one time init, so just exit if it is completed */ - if (mbx->ready) - return; - mbx->ready = true; switch (mbx_idx) { case FBNIC_IPC_MBX_RX_IDX: + /* Enable DMA writes from the device */ + wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG, + FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME); + /* Make sure we have a page for the FW to write to */ fbnic_mbx_alloc_rx_msgs(fbd); break; case FBNIC_IPC_MBX_TX_IDX: - /* Force version to 1 if we successfully requested an update - * from the firmware. This should be overwritten once we get - * the actual version from the firmware in the capabilities - * request message. - */ - if (!fbnic_fw_xmit_cap_msg(fbd) && - !fbd->fw_cap.running.mgmt.version) - fbd->fw_cap.running.mgmt.version = 1; + /* Enable DMA reads from the device */ + wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG, + FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME); break; } } -static void fbnic_mbx_postinit(struct fbnic_dev *fbd) +static bool fbnic_mbx_event(struct fbnic_dev *fbd) { - int i; - - /* We only need to do this on the first interrupt following init. + /* We only need to do this on the first interrupt following reset. * this primes the mailbox so that we will have cleared all the * skip descriptors. */ if (!(rd32(fbd, FBNIC_INTR_STATUS(0)) & (1u << FBNIC_FW_MSIX_ENTRY))) - return; + return false; wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY); - for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++) - fbnic_mbx_postinit_desc_ring(fbd, i); + return true; } /** @@ -859,7 +863,7 @@ next_page: void fbnic_mbx_poll(struct fbnic_dev *fbd) { - fbnic_mbx_postinit(fbd); + fbnic_mbx_event(fbd); fbnic_mbx_process_tx_msgs(fbd); fbnic_mbx_process_rx_msgs(fbd); @@ -867,60 +871,97 @@ void fbnic_mbx_poll(struct fbnic_dev *fbd) int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd) { - struct fbnic_fw_mbx *tx_mbx; - int attempts = 50; + unsigned long timeout = jiffies + 10 * HZ + 1; + int err, i; - /* Immediate fail if BAR4 isn't there */ - if (!fbnic_fw_present(fbd)) - return -ENODEV; + do { + if (!time_is_after_jiffies(timeout)) + return -ETIMEDOUT; - tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX]; - while (!tx_mbx->ready && --attempts) { /* Force the firmware to trigger an interrupt response to * avoid the mailbox getting stuck closed if the interrupt * is reset. */ - fbnic_mbx_init_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX); + fbnic_mbx_reset_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX); - msleep(200); + /* Immediate fail if BAR4 went away */ + if (!fbnic_fw_present(fbd)) + return -ENODEV; - fbnic_mbx_poll(fbd); - } + msleep(20); + } while (!fbnic_mbx_event(fbd)); + + /* FW has shown signs of life. Enable DMA and start Tx/Rx */ + for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++) + fbnic_mbx_init_desc_ring(fbd, i); + + /* Request an update from the firmware. This should overwrite + * mgmt.version once we get the actual version from the firmware + * in the capabilities request message. + */ + err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ); + if (err) + goto clean_mbx; + + /* Use "1" to indicate we entered the state waiting for a response */ + fbd->fw_cap.running.mgmt.version = 1; + + return 0; +clean_mbx: + /* Cleanup Rx buffers and disable mailbox */ + fbnic_mbx_clean(fbd); + return err; +} + +static void __fbnic_fw_evict_cmpl(struct fbnic_fw_completion *cmpl_data) +{ + cmpl_data->result = -EPIPE; + complete(&cmpl_data->done); +} - return attempts ? 0 : -ETIMEDOUT; +static void fbnic_mbx_evict_all_cmpl(struct fbnic_dev *fbd) +{ + if (fbd->cmpl_data) { + __fbnic_fw_evict_cmpl(fbd->cmpl_data); + fbd->cmpl_data = NULL; + } } void fbnic_mbx_flush_tx(struct fbnic_dev *fbd) { + unsigned long timeout = jiffies + 10 * HZ + 1; struct fbnic_fw_mbx *tx_mbx; - int attempts = 50; - u8 count = 0; - - /* Nothing to do if there is no mailbox */ - if (!fbnic_fw_present(fbd)) - return; + u8 tail; /* Record current Rx stats */ tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX]; - /* Nothing to do if mailbox never got to ready */ - if (!tx_mbx->ready) - return; + spin_lock_irq(&fbd->fw_tx_lock); + + /* Clear ready to prevent any further attempts to transmit */ + tx_mbx->ready = false; + + /* Read tail to determine the last tail state for the ring */ + tail = tx_mbx->tail; + + /* Flush any completions as we are no longer processing Rx */ + fbnic_mbx_evict_all_cmpl(fbd); + + spin_unlock_irq(&fbd->fw_tx_lock); /* Give firmware time to process packet, - * we will wait up to 10 seconds which is 50 waits of 200ms. + * we will wait up to 10 seconds which is 500 waits of 20ms. */ do { u8 head = tx_mbx->head; - if (head == tx_mbx->tail) + /* Tx ring is empty once head == tail */ + if (head == tail) break; - msleep(200); + msleep(20); fbnic_mbx_process_tx_msgs(fbd); - - count += (tx_mbx->head - head) % FBNIC_IPC_MBX_DESC_LEN; - } while (count < FBNIC_IPC_MBX_DESC_LEN && --attempts); + } while (time_is_after_jiffies(timeout)); } void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version, diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c index 1bbc0e56f3a0..1c88a2bf3a7a 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c @@ -19,69 +19,105 @@ static irqreturn_t fbnic_fw_msix_intr(int __always_unused irq, void *data) return IRQ_HANDLED; } +static int __fbnic_fw_enable_mbx(struct fbnic_dev *fbd, int vector) +{ + int err; + + /* Initialize mailbox and attempt to poll it into ready state */ + fbnic_mbx_init(fbd); + err = fbnic_mbx_poll_tx_ready(fbd); + if (err) { + dev_warn(fbd->dev, "FW mailbox did not enter ready state\n"); + return err; + } + + /* Enable interrupt and unmask the vector */ + enable_irq(vector); + fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY); + + return 0; +} + /** - * fbnic_fw_enable_mbx - Configure and initialize Firmware Mailbox + * fbnic_fw_request_mbx - Configure and initialize Firmware Mailbox * @fbd: Pointer to device to initialize * - * This function will initialize the firmware mailbox rings, enable the IRQ - * and initialize the communication between the Firmware and the host. The - * firmware is expected to respond to the initialization by sending an - * interrupt essentially notifying the host that it has seen the - * initialization and is now synced up. + * This function will allocate the IRQ and then reinitialize the mailbox + * starting communication between the host and firmware. * * Return: non-zero on failure. **/ -int fbnic_fw_enable_mbx(struct fbnic_dev *fbd) +int fbnic_fw_request_mbx(struct fbnic_dev *fbd) { - u32 vector = fbd->fw_msix_vector; - int err; + struct pci_dev *pdev = to_pci_dev(fbd->dev); + int vector, err; + + WARN_ON(fbd->fw_msix_vector); + + vector = pci_irq_vector(pdev, FBNIC_FW_MSIX_ENTRY); + if (vector < 0) + return vector; /* Request the IRQ for FW Mailbox vector. */ err = request_threaded_irq(vector, NULL, &fbnic_fw_msix_intr, - IRQF_ONESHOT, dev_name(fbd->dev), fbd); + IRQF_ONESHOT | IRQF_NO_AUTOEN, + dev_name(fbd->dev), fbd); if (err) return err; /* Initialize mailbox and attempt to poll it into ready state */ - fbnic_mbx_init(fbd); - err = fbnic_mbx_poll_tx_ready(fbd); - if (err) { - dev_warn(fbd->dev, "FW mailbox did not enter ready state\n"); + err = __fbnic_fw_enable_mbx(fbd, vector); + if (err) free_irq(vector, fbd); - return err; - } - /* Enable interrupts */ - fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY); + fbd->fw_msix_vector = vector; - return 0; + return err; } /** - * fbnic_fw_disable_mbx - Disable mailbox and place it in standby state - * @fbd: Pointer to device to disable + * fbnic_fw_disable_mbx - Temporarily place mailbox in standby state + * @fbd: Pointer to device * - * This function will disable the mailbox interrupt, free any messages still - * in the mailbox and place it into a standby state. The firmware is - * expected to see the update and assume that the host is in the reset state. + * Shutdown the mailbox by notifying the firmware to stop sending us logs, mask + * and synchronize the IRQ, and then clean up the rings. **/ -void fbnic_fw_disable_mbx(struct fbnic_dev *fbd) +static void fbnic_fw_disable_mbx(struct fbnic_dev *fbd) { - /* Disable interrupt and free vector */ - fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_FW_MSIX_ENTRY); + /* Disable interrupt and synchronize the IRQ */ + disable_irq(fbd->fw_msix_vector); - /* Free the vector */ - free_irq(fbd->fw_msix_vector, fbd); + /* Mask the vector */ + fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_FW_MSIX_ENTRY); /* Make sure disabling logs message is sent, must be done here to * avoid risk of completing without a running interrupt. */ fbnic_mbx_flush_tx(fbd); - - /* Reset the mailboxes to the initialized state */ fbnic_mbx_clean(fbd); } +/** + * fbnic_fw_free_mbx - Disable mailbox and place it in standby state + * @fbd: Pointer to device to disable + * + * This function will disable the mailbox interrupt, free any messages still + * in the mailbox and place it into a disabled state. The firmware is + * expected to see the update and assume that the host is in the reset state. + **/ +void fbnic_fw_free_mbx(struct fbnic_dev *fbd) +{ + /* Vector has already been freed */ + if (!fbd->fw_msix_vector) + return; + + fbnic_fw_disable_mbx(fbd); + + /* Free the vector */ + free_irq(fbd->fw_msix_vector, fbd); + fbd->fw_msix_vector = 0; +} + static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data) { struct fbnic_dev *fbd = data; @@ -101,7 +137,7 @@ static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data) } /** - * fbnic_pcs_irq_enable - Configure the MAC to enable it to advertise link + * fbnic_pcs_request_irq - Configure the PCS to enable it to advertise link * @fbd: Pointer to device to initialize * * This function provides basic bringup for the MAC/PCS IRQ. For now the IRQ @@ -109,41 +145,61 @@ static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data) * * Return: non-zero on failure. **/ -int fbnic_pcs_irq_enable(struct fbnic_dev *fbd) +int fbnic_pcs_request_irq(struct fbnic_dev *fbd) { - u32 vector = fbd->pcs_msix_vector; - int err; + struct pci_dev *pdev = to_pci_dev(fbd->dev); + int vector, err; - /* Request the IRQ for MAC link vector. - * Map MAC cause to it, and unmask it + WARN_ON(fbd->pcs_msix_vector); + + vector = pci_irq_vector(pdev, FBNIC_PCS_MSIX_ENTRY); + if (vector < 0) + return vector; + + /* Request the IRQ for PCS link vector. + * Map PCS cause to it, and unmask it */ err = request_irq(vector, &fbnic_pcs_msix_intr, 0, fbd->netdev->name, fbd); if (err) return err; + /* Map and enable interrupt, unmask vector after link is configured */ fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX), FBNIC_PCS_MSIX_ENTRY | FBNIC_INTR_MSIX_CTRL_ENABLE); + fbd->pcs_msix_vector = vector; + return 0; } /** - * fbnic_pcs_irq_disable - Teardown the MAC IRQ to prepare for stopping + * fbnic_pcs_free_irq - Teardown the PCS IRQ to prepare for stopping * @fbd: Pointer to device that is stopping * - * This function undoes the work done in fbnic_pcs_irq_enable and prepares + * This function undoes the work done in fbnic_pcs_request_irq and prepares * the device to no longer receive traffic on the host interface. **/ -void fbnic_pcs_irq_disable(struct fbnic_dev *fbd) +void fbnic_pcs_free_irq(struct fbnic_dev *fbd) { + /* Vector has already been freed */ + if (!fbd->pcs_msix_vector) + return; + /* Disable interrupt */ fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX), FBNIC_PCS_MSIX_ENTRY); + fbnic_wrfl(fbd); + + /* Synchronize IRQ to prevent race that would unmask vector */ + synchronize_irq(fbd->pcs_msix_vector); + + /* Mask the vector */ fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_PCS_MSIX_ENTRY); /* Free the vector */ free_irq(fbd->pcs_msix_vector, fbd); + fbd->pcs_msix_vector = 0; } void fbnic_synchronize_irq(struct fbnic_dev *fbd, int nr) @@ -226,9 +282,6 @@ void fbnic_free_irqs(struct fbnic_dev *fbd) { struct pci_dev *pdev = to_pci_dev(fbd->dev); - fbd->pcs_msix_vector = 0; - fbd->fw_msix_vector = 0; - fbd->num_irqs = 0; pci_free_irq_vectors(pdev); @@ -254,8 +307,5 @@ int fbnic_alloc_irqs(struct fbnic_dev *fbd) fbd->num_irqs = num_irqs; - fbd->pcs_msix_vector = pci_irq_vector(pdev, FBNIC_PCS_MSIX_ENTRY); - fbd->fw_msix_vector = pci_irq_vector(pdev, FBNIC_FW_MSIX_ENTRY); - return 0; } diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c index 14291401f463..dde4a37116e2 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c @@ -79,12 +79,6 @@ static void fbnic_mac_init_axi(struct fbnic_dev *fbd) fbnic_init_readrq(fbd, FBNIC_QM_RNI_RBP_CTL, cls, readrq); fbnic_init_mps(fbd, FBNIC_QM_RNI_RDE_CTL, cls, mps); fbnic_init_mps(fbd, FBNIC_QM_RNI_RCM_CTL, cls, mps); - - /* Enable XALI AR/AW outbound */ - wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG, - FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME); - wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG, - FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME); } static void fbnic_mac_init_qm(struct fbnic_dev *fbd) diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c index 79a01fdd1dd1..2524d9b88d59 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c @@ -44,9 +44,10 @@ int __fbnic_open(struct fbnic_net *fbn) if (err) goto time_stop; - err = fbnic_pcs_irq_enable(fbd); + err = fbnic_pcs_request_irq(fbd); if (err) goto time_stop; + /* Pull the BMC config and initialize the RPC */ fbnic_bmc_rpc_init(fbd); fbnic_rss_reinit(fbd, fbn); @@ -82,7 +83,7 @@ static int fbnic_stop(struct net_device *netdev) struct fbnic_net *fbn = netdev_priv(netdev); fbnic_down(fbn); - fbnic_pcs_irq_disable(fbn->fbd); + fbnic_pcs_free_irq(fbn->fbd); fbnic_time_stop(fbn); fbnic_fw_xmit_ownership_msg(fbn->fbd, false); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c index 6cbbc2ee3e1f..4e8595239c0f 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c @@ -283,7 +283,7 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto free_irqs; } - err = fbnic_fw_enable_mbx(fbd); + err = fbnic_fw_request_mbx(fbd); if (err) { dev_err(&pdev->dev, "Firmware mailbox initialization failure\n"); @@ -363,7 +363,7 @@ static void fbnic_remove(struct pci_dev *pdev) fbnic_hwmon_unregister(fbd); fbnic_dbg_fbd_exit(fbd); fbnic_devlink_unregister(fbd); - fbnic_fw_disable_mbx(fbd); + fbnic_fw_free_mbx(fbd); fbnic_free_irqs(fbd); fbnic_devlink_free(fbd); @@ -387,7 +387,7 @@ static int fbnic_pm_suspend(struct device *dev) rtnl_unlock(); null_uc_addr: - fbnic_fw_disable_mbx(fbd); + fbnic_fw_free_mbx(fbd); /* Free the IRQs so they aren't trying to occupy sleeping CPUs */ fbnic_free_irqs(fbd); @@ -420,7 +420,7 @@ static int __fbnic_pm_resume(struct device *dev) fbd->mac->init_regs(fbd); /* Re-enable mailbox */ - err = fbnic_fw_enable_mbx(fbd); + err = fbnic_fw_request_mbx(fbd); if (err) goto err_free_irqs; @@ -438,15 +438,15 @@ static int __fbnic_pm_resume(struct device *dev) if (netif_running(netdev)) { err = __fbnic_open(fbn); if (err) - goto err_disable_mbx; + goto err_free_mbx; } rtnl_unlock(); return 0; -err_disable_mbx: +err_free_mbx: rtnl_unlock(); - fbnic_fw_disable_mbx(fbd); + fbnic_fw_free_mbx(fbd); err_free_irqs: fbnic_free_irqs(fbd); err_invalidate_uc_addr: diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 23760b613d3e..a70b88037a20 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -1815,6 +1815,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx, if (nr_frags <= 0) { tx->frame_data0 |= TX_DESC_DATA0_LS_; tx->frame_data0 |= TX_DESC_DATA0_IOC_; + tx->frame_last = tx->frame_first; } tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; tx_descriptor->data0 = cpu_to_le32(tx->frame_data0); @@ -1884,6 +1885,7 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx, tx->frame_first = 0; tx->frame_data0 = 0; tx->frame_tail = 0; + tx->frame_last = 0; return -ENOMEM; } @@ -1924,16 +1926,18 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx, TX_DESC_DATA0_DTYPE_DATA_) { tx->frame_data0 |= TX_DESC_DATA0_LS_; tx->frame_data0 |= TX_DESC_DATA0_IOC_; + tx->frame_last = tx->frame_tail; } - tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; - buffer_info = &tx->buffer_info[tx->frame_tail]; + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_last]; + buffer_info = &tx->buffer_info[tx->frame_last]; buffer_info->skb = skb; if (time_stamp) buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED; if (ignore_sync) buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC; + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; tx_descriptor->data0 = cpu_to_le32(tx->frame_data0); tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail); tx->last_tail = tx->frame_tail; @@ -3491,6 +3495,7 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, struct pci_dev *pdev) { struct lan743x_tx *tx; + u32 sgmii_ctl; int index; int ret; @@ -3503,6 +3508,15 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, spin_lock_init(&adapter->eth_syslock_spinlock); mutex_init(&adapter->sgmii_rw_lock); pci11x1x_set_rfe_rd_fifo_threshold(adapter); + sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); + if (adapter->is_sgmii_en) { + sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_; + sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_; + } else { + sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_; + sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_; + } + lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); } else { adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS; adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS; @@ -3554,7 +3568,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) { - u32 sgmii_ctl; int ret; adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev); @@ -3566,10 +3579,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) adapter->mdiobus->priv = (void *)adapter; if (adapter->is_pci11x1x) { if (adapter->is_sgmii_en) { - sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); - sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_; - sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_; - lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); netif_dbg(adapter, drv, adapter->netdev, "SGMII operation\n"); adapter->mdiobus->read = lan743x_mdiobus_read_c22; @@ -3580,10 +3589,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus-c45\n"); } else { - sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); - sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_; - sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_; - lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); netif_dbg(adapter, drv, adapter->netdev, "RGMII operation\n"); // Only C22 support when RGMII I/F diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h index 7f73d66854be..db5fc73e41cc 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.h +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -980,6 +980,7 @@ struct lan743x_tx { u32 frame_first; u32 frame_data0; u32 frame_tail; + u32 frame_last; struct lan743x_tx_buffer_info *buffer_info; diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index ef93df520887..08bee56aea35 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -830,6 +830,7 @@ EXPORT_SYMBOL(ocelot_vlan_prepare); int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, bool untagged) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; int err; /* Ignore VID 0 added to our RX filter by the 8021q module, since @@ -849,6 +850,11 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, ocelot_bridge_vlan_find(ocelot, vid)); if (err) return err; + } else if (ocelot_port->pvid_vlan && + ocelot_bridge_vlan_find(ocelot, vid) == ocelot_port->pvid_vlan) { + err = ocelot_port_set_pvid(ocelot, port, NULL); + if (err) + return err; } /* Untagged egress vlan clasification */ diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 99df00c30b8c..b5d744d2586f 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -203,7 +203,7 @@ static struct pci_driver qede_pci_driver = { }; static struct qed_eth_cb_ops qede_ll_ops = { - { + .common = { #ifdef CONFIG_RFS_ACCEL .arfs_filter_op = qede_arfs_filter_op, #endif diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 28d24d59efb8..d57b976b9040 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -1484,8 +1484,11 @@ static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_o } cmd_op = (cmd.rsp.arg[0] & 0xff); - if (cmd.rsp.arg[0] >> 25 == 2) - return 2; + if (cmd.rsp.arg[0] >> 25 == 2) { + ret = 2; + goto out; + } + if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) set_bit(QLC_BC_VF_STATE, &vf->state); else diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c index 2aacc1996796..55b8d3666153 100644 --- a/drivers/net/ethernet/realtek/rtase/rtase_main.c +++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c @@ -1925,8 +1925,8 @@ static u16 rtase_calc_time_mitigation(u32 time_us) time_us = min_t(int, time_us, RTASE_MITI_MAX_TIME); - msb = fls(time_us); - if (msb >= RTASE_MITI_COUNT_BIT_NUM) { + if (time_us > RTASE_MITI_TIME_COUNT_MASK) { + msb = fls(time_us); time_unit = msb - RTASE_MITI_COUNT_BIT_NUM; time_count = time_us >> (msb - RTASE_MITI_COUNT_BIT_NUM); } else { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 85723a78793a..6c7e8655a7eb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -964,7 +964,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev, /* of_mdio_parse_addr returns a valid (0 ~ 31) PHY * address. No need to mask it again. */ - reg |= 1 << H3_EPHY_ADDR_SHIFT; + reg |= ret << H3_EPHY_ADDR_SHIFT; } else { /* For SoCs without internal PHY the PHY selection bit should be * set to 0 (external PHY). diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index 967a16212faf..0c011a47d5a3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -320,8 +320,8 @@ enum rtc_control { /* PTP and timestamping registers */ -#define GMAC3_X_ATSNS GENMASK(19, 16) -#define GMAC3_X_ATSNS_SHIFT 16 +#define GMAC3_X_ATSNS GENMASK(29, 25) +#define GMAC3_X_ATSNS_SHIFT 25 #define GMAC_PTP_TCR_ATSFC BIT(24) #define GMAC_PTP_TCR_ATSEN0 BIT(25) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index a8b901cdf5cb..56b76aaa58f0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -553,7 +553,7 @@ void dwmac1000_get_ptptime(void __iomem *ptpaddr, u64 *ptp_time) u64 ns; ns = readl(ptpaddr + GMAC_PTP_ATNR); - ns += readl(ptpaddr + GMAC_PTP_ATSR) * NSEC_PER_SEC; + ns += (u64)readl(ptpaddr + GMAC_PTP_ATSR) * NSEC_PER_SEC; *ptp_time = ns; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index 0f59aa982604..e2840fa241f2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -222,7 +222,7 @@ static void get_ptptime(void __iomem *ptpaddr, u64 *ptp_time) u64 ns; ns = readl(ptpaddr + PTP_ATNR); - ns += readl(ptpaddr + PTP_ATSR) * NSEC_PER_SEC; + ns += (u64)readl(ptpaddr + PTP_ATSR) * NSEC_PER_SEC; *ptp_time = ns; } diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 73c07f10f053..379b6e90121d 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -9064,6 +9064,8 @@ static void niu_try_msix(struct niu *np, u8 *ldg_num_map) msi_vec[i].entry = i; } + pdev->dev_flags |= PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST; + num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs); if (num_irqs < 0) { np->flags &= ~NIU_FLAGS_MSIX; diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index c9fd34787c99..30665ffe78cf 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -2666,7 +2666,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) of_property_read_bool(port_np, "ti,mac-only"); /* get phy/link info */ - port->slave.port_np = port_np; + port->slave.port_np = of_node_get(port_np); ret = of_get_phy_mode(port_np, &port->slave.phy_if); if (ret) { dev_err(dev, "%pOF read phy-mode err %d\n", @@ -2685,7 +2685,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) port->slave.mac_addr); if (!is_valid_ether_addr(port->slave.mac_addr)) { eth_random_addr(port->slave.mac_addr); - dev_err(dev, "Use random MAC address\n"); + dev_info(dev, "Use random MAC address\n"); } } @@ -2720,6 +2720,17 @@ static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common *common) } } +static void am65_cpsw_remove_dt(struct am65_cpsw_common *common) +{ + struct am65_cpsw_port *port; + int i; + + for (i = 0; i < common->port_num; i++) { + port = &common->ports[i]; + of_node_put(port->slave.port_np); + } +} + static int am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx) { @@ -3622,6 +3633,7 @@ err_ndevs_clear: am65_cpsw_nuss_cleanup_ndev(common); am65_cpsw_nuss_phylink_cleanup(common); am65_cpts_release(common->cpts); + am65_cpsw_remove_dt(common); err_of_clear: if (common->mdio_dev) of_platform_device_destroy(common->mdio_dev, NULL); @@ -3661,6 +3673,7 @@ static void am65_cpsw_nuss_remove(struct platform_device *pdev) am65_cpsw_nuss_phylink_cleanup(common); am65_cpts_release(common->cpts); am65_cpsw_disable_serdes_phy(common); + am65_cpsw_remove_dt(common); if (common->mdio_dev) of_platform_device_destroy(common->mdio_dev, NULL); diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c index b4a34c57b7b4..2a1c43316f46 100644 --- a/drivers/net/ethernet/ti/icssg/icss_iep.c +++ b/drivers/net/ethernet/ti/icssg/icss_iep.c @@ -412,6 +412,22 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep, int ret; u64 cmp; + if (!on) { + /* Disable CMP 1 */ + regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, + IEP_CMP_CFG_CMP_EN(1), 0); + + /* clear CMP regs */ + regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0); + if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) + regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0); + + /* Disable sync */ + regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); + + return 0; + } + /* Calculate width of the signal for PPS/PEROUT handling */ ts.tv_sec = req->on.sec; ts.tv_nsec = req->on.nsec; @@ -430,64 +446,39 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep, if (ret) return ret; - if (on) { - /* Configure CMP */ - regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp)); - if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) - regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp)); - /* Configure SYNC, based on req on width */ - regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, - div_u64(ns_width, iep->def_inc)); - regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0); - regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, - div_u64(ns_start, iep->def_inc)); - regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */ - /* Enable CMP 1 */ - regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, - IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1)); - } else { - /* Disable CMP 1 */ - regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, - IEP_CMP_CFG_CMP_EN(1), 0); - - /* clear regs */ - regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0); - if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) - regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0); - } + /* Configure CMP */ + regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp)); + if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) + regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp)); + /* Configure SYNC, based on req on width */ + regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, + div_u64(ns_width, iep->def_inc)); + regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0); + regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, + div_u64(ns_start, iep->def_inc)); + regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */ + /* Enable CMP 1 */ + regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, + IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1)); } else { - if (on) { - u64 start_ns; - - iep->period = ((u64)req->period.sec * NSEC_PER_SEC) + - req->period.nsec; - start_ns = ((u64)req->period.sec * NSEC_PER_SEC) - + req->period.nsec; - icss_iep_update_to_next_boundary(iep, start_ns); - - regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, - div_u64(ns_width, iep->def_inc)); - regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, - div_u64(ns_start, iep->def_inc)); - /* Enable Sync in single shot mode */ - regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, - IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN); - /* Enable CMP 1 */ - regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, - IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1)); - } else { - /* Disable CMP 1 */ - regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, - IEP_CMP_CFG_CMP_EN(1), 0); - - /* clear CMP regs */ - regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0); - if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) - regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0); - - /* Disable sync */ - regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); - } + u64 start_ns; + + iep->period = ((u64)req->period.sec * NSEC_PER_SEC) + + req->period.nsec; + start_ns = ((u64)req->period.sec * NSEC_PER_SEC) + + req->period.nsec; + icss_iep_update_to_next_boundary(iep, start_ns); + + regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, + div_u64(ns_width, iep->def_inc)); + regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, + div_u64(ns_start, iep->def_inc)); + /* Enable Sync in single shot mode */ + regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, + IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN); + /* Enable CMP 1 */ + regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, + IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1)); } return 0; @@ -498,11 +489,21 @@ static int icss_iep_perout_enable(struct icss_iep *iep, { int ret = 0; + if (!on) + goto disable; + /* Reject requests with unsupported flags */ if (req->flags & ~(PTP_PEROUT_DUTY_CYCLE | PTP_PEROUT_PHASE)) return -EOPNOTSUPP; + /* Set default "on" time (1ms) for the signal if not passed by the app */ + if (!(req->flags & PTP_PEROUT_DUTY_CYCLE)) { + req->on.sec = 0; + req->on.nsec = NSEC_PER_MSEC; + } + +disable: mutex_lock(&iep->ptp_clk_mutex); if (iep->pps_enabled) { @@ -513,12 +514,6 @@ static int icss_iep_perout_enable(struct icss_iep *iep, if (iep->perout_enabled == !!on) goto exit; - /* Set default "on" time (1ms) for the signal if not passed by the app */ - if (!(req->flags & PTP_PEROUT_DUTY_CYCLE)) { - req->on.sec = 0; - req->on.nsec = NSEC_PER_MSEC; - } - ret = icss_iep_perout_enable_hw(iep, req, on); if (!ret) iep->perout_enabled = !!on; diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c index 14002b026452..d88a0180294e 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_common.c +++ b/drivers/net/ethernet/ti/icssg/icssg_common.c @@ -187,7 +187,6 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn, xdp_return_frame(xdpf); break; default: - netdev_err(ndev, "tx_complete: invalid swdata type %d\n", swdata->type); prueth_xmit_free(tx_chn, desc_tx); ndev->stats.tx_dropped++; continue; @@ -567,6 +566,7 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac, { struct cppi5_host_desc_t *first_desc; struct net_device *ndev = emac->ndev; + struct netdev_queue *netif_txq; struct prueth_tx_chn *tx_chn; dma_addr_t desc_dma, buf_dma; struct prueth_swdata *swdata; @@ -583,7 +583,7 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac, first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); if (!first_desc) { netdev_dbg(ndev, "xdp tx: failed to allocate descriptor\n"); - goto drop_free_descs; /* drop */ + return ICSSG_XDP_CONSUMED; /* drop */ } if (page) { /* already DMA mapped by page_pool */ @@ -620,12 +620,17 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac, swdata->data.xdpf = xdpf; } + /* Report BQL before sending the packet */ + netif_txq = netdev_get_tx_queue(ndev, tx_chn->id); + netdev_tx_sent_queue(netif_txq, xdpf->len); + cppi5_hdesc_set_pktlen(first_desc, xdpf->len); desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc); ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); if (ret) { netdev_err(ndev, "xdp tx: push failed: %d\n", ret); + netdev_tx_completed_queue(netif_txq, 1, xdpf->len); goto drop_free_descs; } @@ -650,6 +655,8 @@ static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp, struct page *page, u32 *len) { struct net_device *ndev = emac->ndev; + struct netdev_queue *netif_txq; + int cpu = smp_processor_id(); struct bpf_prog *xdp_prog; struct xdp_frame *xdpf; u32 pkt_len = *len; @@ -669,10 +676,15 @@ static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp, goto drop; } - q_idx = smp_processor_id() % emac->tx_ch_num; + q_idx = cpu % emac->tx_ch_num; + netif_txq = netdev_get_tx_queue(ndev, q_idx); + __netif_tx_lock(netif_txq, cpu); result = emac_xmit_xdp_frame(emac, xdpf, page, q_idx); - if (result == ICSSG_XDP_CONSUMED) + __netif_tx_unlock(netif_txq); + if (result == ICSSG_XDP_CONSUMED) { + ndev->stats.tx_dropped++; goto drop; + } dev_sw_netstats_rx_add(ndev, xdpf->len); return result; @@ -977,6 +989,7 @@ enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); if (ret) { netdev_err(ndev, "tx: push failed: %d\n", ret); + netdev_tx_completed_queue(netif_txq, 1, pkt_len); goto drop_free_descs; } @@ -1215,9 +1228,6 @@ void prueth_reset_rx_chan(struct prueth_rx_chn *chn, prueth_rx_cleanup); if (disable) k3_udma_glue_disable_rx_chn(chn->rx_chn); - - page_pool_destroy(chn->pg_pool); - chn->pg_pool = NULL; } EXPORT_SYMBOL_GPL(prueth_reset_rx_chan); diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index 443f90fa6557..86fc1278127c 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -1075,17 +1075,21 @@ static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frame { struct prueth_emac *emac = netdev_priv(dev); struct net_device *ndev = emac->ndev; + struct netdev_queue *netif_txq; + int cpu = smp_processor_id(); struct xdp_frame *xdpf; unsigned int q_idx; int nxmit = 0; u32 err; int i; - q_idx = smp_processor_id() % emac->tx_ch_num; + q_idx = cpu % emac->tx_ch_num; + netif_txq = netdev_get_tx_queue(ndev, q_idx); if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; + __netif_tx_lock(netif_txq, cpu); for (i = 0; i < n; i++) { xdpf = frames[i]; err = emac_xmit_xdp_frame(emac, xdpf, NULL, q_idx); @@ -1095,6 +1099,7 @@ static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frame } nxmit++; } + __netif_tx_unlock(netif_txq); return nxmit; } @@ -1109,11 +1114,6 @@ static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frame static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf) { struct bpf_prog *prog = bpf->prog; - xdp_features_t val; - - val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | - NETDEV_XDP_ACT_NDO_XMIT; - xdp_set_features_flag(emac->ndev, val); if (!emac->xdpi.prog && !prog) return 0; @@ -1291,6 +1291,10 @@ static int prueth_netdev_init(struct prueth *prueth, ndev->hw_features = NETIF_F_SG; ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES; + xdp_set_features_flag(ndev, + NETDEV_XDP_ACT_BASIC | + NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_NDO_XMIT); netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll); hrtimer_setup(&emac->rx_hrtimer, &emac_rx_timer_callback, CLOCK_MONOTONIC, diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c index 89dc4c401a8d..e4d993f31374 100644 --- a/drivers/net/ethernet/vertexcom/mse102x.c +++ b/drivers/net/ethernet/vertexcom/mse102x.c @@ -6,6 +6,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/if_vlan.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/kernel.h> @@ -33,7 +34,7 @@ #define CMD_CTR (0x2 << CMD_SHIFT) #define CMD_MASK GENMASK(15, CMD_SHIFT) -#define LEN_MASK GENMASK(CMD_SHIFT - 1, 0) +#define LEN_MASK GENMASK(CMD_SHIFT - 2, 0) #define DET_CMD_LEN 4 #define DET_SOF_LEN 2 @@ -262,7 +263,7 @@ static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp, } static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff, - unsigned int frame_len) + unsigned int frame_len, bool drop) { struct mse102x_net_spi *mses = to_mse102x_spi(mse); struct spi_transfer *xfer = &mses->spi_xfer; @@ -280,6 +281,9 @@ static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff, netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n", __func__, ret); mse->stats.xfer_err++; + } else if (drop) { + netdev_dbg(mse->ndev, "%s: Drop frame\n", __func__); + ret = -EINVAL; } else if (*sof != cpu_to_be16(DET_SOF)) { netdev_dbg(mse->ndev, "%s: SPI start of frame is invalid (0x%04x)\n", __func__, *sof); @@ -307,6 +311,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse) struct sk_buff *skb; unsigned int rxalign; unsigned int rxlen; + bool drop = false; __be16 rx = 0; u16 cmd_resp; u8 *rxpkt; @@ -329,7 +334,8 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse) net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n", __func__, cmd_resp); mse->stats.invalid_rts++; - return; + drop = true; + goto drop; } net_dbg_ratelimited("%s: Unexpected response to first CMD\n", @@ -337,12 +343,20 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse) } rxlen = cmd_resp & LEN_MASK; - if (!rxlen) { - net_dbg_ratelimited("%s: No frame length defined\n", __func__); + if (rxlen < ETH_ZLEN || rxlen > VLAN_ETH_FRAME_LEN) { + net_dbg_ratelimited("%s: Invalid frame length: %d\n", __func__, + rxlen); mse->stats.invalid_len++; - return; + drop = true; } + /* In case of a invalid CMD_RTS, the frame must be consumed anyway. + * So assume the maximum possible frame length. + */ +drop: + if (drop) + rxlen = VLAN_ETH_FRAME_LEN; + rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4); skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign); if (!skb) @@ -353,7 +367,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse) * They are copied, but ignored. */ rxpkt = skb_put(skb, rxlen) - DET_SOF_LEN; - if (mse102x_rx_frame_spi(mse, rxpkt, rxlen)) { + if (mse102x_rx_frame_spi(mse, rxpkt, rxlen, drop)) { mse->ndev->stats.rx_errors++; dev_kfree_skb(skb); return; @@ -509,6 +523,7 @@ static irqreturn_t mse102x_irq(int irq, void *_mse) static int mse102x_net_open(struct net_device *ndev) { struct mse102x_net *mse = netdev_priv(ndev); + struct mse102x_net_spi *mses = to_mse102x_spi(mse); int ret; ret = request_threaded_irq(ndev->irq, NULL, mse102x_irq, IRQF_ONESHOT, @@ -524,6 +539,13 @@ static int mse102x_net_open(struct net_device *ndev) netif_carrier_on(ndev); + /* The SPI interrupt can stuck in case of pending packet(s). + * So poll for possible packet(s) to re-arm the interrupt. + */ + mutex_lock(&mses->lock); + mse102x_rx_pkt_spi(mse); + mutex_unlock(&mses->lock); + netif_dbg(mse, ifup, ndev, "network device up\n"); return 0; diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index aed45abafb1b..490d34233d38 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -434,14 +434,20 @@ static int wx_host_interface_command_r(struct wx *wx, u32 *buffer, wr32m(wx, WX_SW2FW_MBOX_CMD, WX_SW2FW_MBOX_CMD_VLD, WX_SW2FW_MBOX_CMD_VLD); /* polling reply from FW */ - err = read_poll_timeout(wx_poll_fw_reply, reply, reply, 1000, 50000, - true, wx, buffer, send_cmd); + err = read_poll_timeout(wx_poll_fw_reply, reply, reply, 2000, + timeout * 1000, true, wx, buffer, send_cmd); if (err) { wx_err(wx, "Polling from FW messages timeout, cmd: 0x%x, index: %d\n", send_cmd, wx->swfw_index); goto rel_out; } + if (hdr->cmd_or_resp.ret_status == 0x80) { + wx_err(wx, "Unknown FW command: 0x%x\n", send_cmd); + err = -EINVAL; + goto rel_out; + } + /* expect no reply from FW then return */ if (!return_data) goto rel_out; diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index 00b0b318df27..e69eaa65e0de 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -310,7 +310,8 @@ static bool wx_alloc_mapped_page(struct wx_ring *rx_ring, return true; page = page_pool_dev_alloc_pages(rx_ring->page_pool); - WARN_ON(!page); + if (unlikely(!page)) + return false; dma = page_pool_get_dma_addr(page); bi->page_dma = dma; @@ -546,7 +547,8 @@ static void wx_rx_checksum(struct wx_ring *ring, return; /* Hardware can't guarantee csum if IPv6 Dest Header found */ - if (dptype.prot != WX_DEC_PTYPE_PROT_SCTP && WX_RXD_IPV6EX(rx_desc)) + if (dptype.prot != WX_DEC_PTYPE_PROT_SCTP && + wx_test_staterr(rx_desc, WX_RXD_STAT_IPV6EX)) return; /* if L4 checksum error */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 5b230ecbbabb..4c545b2aa997 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -513,6 +513,7 @@ enum WX_MSCA_CMD_value { #define WX_RXD_STAT_L4CS BIT(7) /* L4 xsum calculated */ #define WX_RXD_STAT_IPCS BIT(8) /* IP xsum calculated */ #define WX_RXD_STAT_OUTERIPCS BIT(10) /* Cloud IP xsum calculated*/ +#define WX_RXD_STAT_IPV6EX BIT(12) /* IPv6 Dest Header */ #define WX_RXD_STAT_TS BIT(14) /* IEEE1588 Time Stamp */ #define WX_RXD_ERR_OUTERIPER BIT(26) /* CRC IP Header error */ @@ -589,8 +590,6 @@ enum wx_l2_ptypes { #define WX_RXD_PKTTYPE(_rxd) \ ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF) -#define WX_RXD_IPV6EX(_rxd) \ - ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 6) & 0x1) /*********************** Transmit Descriptor Config Masks ****************/ #define WX_TXD_STAT_DD BIT(0) /* Descriptor Done */ #define WX_TXD_DTYP_DATA 0 /* Adv Data Descriptor */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index a6159214ec0a..91b3055a5a9f 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -625,7 +625,7 @@ static int ngbe_probe(struct pci_dev *pdev, /* setup the private structure */ err = ngbe_sw_init(wx); if (err) - goto err_free_mac_table; + goto err_pci_release_regions; /* check if flash load is done after hw power up */ err = wx_check_flash_load(wx, NGBE_SPI_ILDR_STATUS_PERST); @@ -719,6 +719,7 @@ err_register: err_clear_interrupt_scheme: wx_clear_interrupt_scheme(wx); err_free_mac_table: + kfree(wx->rss_key); kfree(wx->mac_table); err_pci_release_regions: pci_release_selected_regions(pdev, diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index 4b9921b7bb11..a054b259d435 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -99,9 +99,15 @@ static int txgbe_calc_eeprom_checksum(struct wx *wx, u16 *checksum) } local_buffer = eeprom_ptrs; - for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++) + for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++) { + if (wx->mac.type == wx_mac_aml) { + if (i >= TXGBE_EEPROM_I2C_SRART_PTR && + i < TXGBE_EEPROM_I2C_END_PTR) + local_buffer[i] = 0xffff; + } if (i != wx->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM) *checksum += local_buffer[i]; + } kvfree(eeprom_ptrs); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index a2e245e3b016..38206a46693b 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -611,7 +611,7 @@ static int txgbe_probe(struct pci_dev *pdev, /* setup the private structure */ err = txgbe_sw_init(wx); if (err) - goto err_free_mac_table; + goto err_pci_release_regions; /* check if flash load is done after hw power up */ err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PERST); @@ -769,6 +769,7 @@ err_release_hw: wx_clear_interrupt_scheme(wx); wx_control_hw(wx, false); err_free_mac_table: + kfree(wx->rss_key); kfree(wx->mac_table); err_pci_release_regions: pci_release_selected_regions(pdev, diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 9c1c26234cad..f423012dec22 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -158,6 +158,8 @@ #define TXGBE_EEPROM_VERSION_L 0x1D #define TXGBE_EEPROM_VERSION_H 0x1E #define TXGBE_ISCSI_BOOT_CONFIG 0x07 +#define TXGBE_EEPROM_I2C_SRART_PTR 0x580 +#define TXGBE_EEPROM_I2C_END_PTR 0x800 #define TXGBE_MAX_MSIX_VECTORS 64 #define TXGBE_MAX_FDIR_INDICES 63 diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 70f7cb383228..cb6f5482d203 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -158,7 +158,6 @@ struct hv_netvsc_packet { u8 cp_partial; /* partial copy into send buffer */ u8 rmsg_size; /* RNDIS header and PPI size */ - u8 rmsg_pgcnt; /* page count of RNDIS header and PPI */ u8 page_buf_cnt; u16 q_idx; @@ -893,6 +892,18 @@ struct nvsp_message { sizeof(struct nvsp_message)) #define NETVSC_MIN_IN_MSG_SIZE sizeof(struct vmpacket_descriptor) +/* Maximum # of contiguous data ranges that can make up a trasmitted packet. + * Typically it's the max SKB fragments plus 2 for the rndis packet and the + * linear portion of the SKB. But if MAX_SKB_FRAGS is large, the value may + * need to be limited to MAX_PAGE_BUFFER_COUNT, which is the max # of entries + * in a GPA direct packet sent to netvsp over VMBus. + */ +#if MAX_SKB_FRAGS + 2 < MAX_PAGE_BUFFER_COUNT +#define MAX_DATA_RANGES (MAX_SKB_FRAGS + 2) +#else +#define MAX_DATA_RANGES MAX_PAGE_BUFFER_COUNT +#endif + /* Estimated requestor size: * out_ring_size/min_out_msg_size + in_ring_size/min_in_msg_size */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index d6f5b9ea3109..720104661d7f 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -953,8 +953,7 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device, + pend_size; int i; u32 padding = 0; - u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt : - packet->page_buf_cnt; + u32 page_count = packet->cp_partial ? 1 : packet->page_buf_cnt; u32 remain; /* Add padding */ @@ -1055,6 +1054,42 @@ static int netvsc_dma_map(struct hv_device *hv_dev, return 0; } +/* Build an "array" of mpb entries describing the data to be transferred + * over VMBus. After the desc header fields, each "array" entry is variable + * size, and each entry starts after the end of the previous entry. The + * "offset" and "len" fields for each entry imply the size of the entry. + * + * The pfns are in HV_HYP_PAGE_SIZE, because all communication with Hyper-V + * uses that granularity, even if the system page size of the guest is larger. + * Each entry in the input "pb" array must describe a contiguous range of + * guest physical memory so that the pfns are sequential if the range crosses + * a page boundary. The offset field must be < HV_HYP_PAGE_SIZE. + */ +static inline void netvsc_build_mpb_array(struct hv_page_buffer *pb, + u32 page_buffer_count, + struct vmbus_packet_mpb_array *desc, + u32 *desc_size) +{ + struct hv_mpb_array *mpb_entry = &desc->range; + int i, j; + + for (i = 0; i < page_buffer_count; i++) { + u32 offset = pb[i].offset; + u32 len = pb[i].len; + + mpb_entry->offset = offset; + mpb_entry->len = len; + + for (j = 0; j < HVPFN_UP(offset + len); j++) + mpb_entry->pfn_array[j] = pb[i].pfn + j; + + mpb_entry = (struct hv_mpb_array *)&mpb_entry->pfn_array[j]; + } + + desc->rangecount = page_buffer_count; + *desc_size = (char *)mpb_entry - (char *)desc; +} + static inline int netvsc_send_pkt( struct hv_device *device, struct hv_netvsc_packet *packet, @@ -1097,8 +1132,11 @@ static inline int netvsc_send_pkt( packet->dma_range = NULL; if (packet->page_buf_cnt) { + struct vmbus_channel_packet_page_buffer desc; + u32 desc_size; + if (packet->cp_partial) - pb += packet->rmsg_pgcnt; + pb++; ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb); if (ret) { @@ -1106,11 +1144,12 @@ static inline int netvsc_send_pkt( goto exit; } - ret = vmbus_sendpacket_pagebuffer(out_channel, - pb, packet->page_buf_cnt, - &nvmsg, sizeof(nvmsg), - req_id); - + netvsc_build_mpb_array(pb, packet->page_buf_cnt, + (struct vmbus_packet_mpb_array *)&desc, + &desc_size); + ret = vmbus_sendpacket_mpb_desc(out_channel, + (struct vmbus_packet_mpb_array *)&desc, + desc_size, &nvmsg, sizeof(nvmsg), req_id); if (ret) netvsc_dma_unmap(ndev_ctx->device_ctx, packet); } else { @@ -1259,7 +1298,7 @@ int netvsc_send(struct net_device *ndev, packet->send_buf_index = section_index; if (packet->cp_partial) { - packet->page_buf_cnt -= packet->rmsg_pgcnt; + packet->page_buf_cnt--; packet->total_data_buflen = msd_len + packet->rmsg_size; } else { packet->page_buf_cnt = 0; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index c51b318b8a72..d8b169ac0343 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -326,43 +326,10 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, return txq; } -static u32 fill_pg_buf(unsigned long hvpfn, u32 offset, u32 len, - struct hv_page_buffer *pb) -{ - int j = 0; - - hvpfn += offset >> HV_HYP_PAGE_SHIFT; - offset = offset & ~HV_HYP_PAGE_MASK; - - while (len > 0) { - unsigned long bytes; - - bytes = HV_HYP_PAGE_SIZE - offset; - if (bytes > len) - bytes = len; - pb[j].pfn = hvpfn; - pb[j].offset = offset; - pb[j].len = bytes; - - offset += bytes; - len -= bytes; - - if (offset == HV_HYP_PAGE_SIZE && len) { - hvpfn++; - offset = 0; - j++; - } - } - - return j + 1; -} - static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, struct hv_netvsc_packet *packet, struct hv_page_buffer *pb) { - u32 slots_used = 0; - char *data = skb->data; int frags = skb_shinfo(skb)->nr_frags; int i; @@ -371,28 +338,27 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, * 2. skb linear data * 3. skb fragment data */ - slots_used += fill_pg_buf(virt_to_hvpfn(hdr), - offset_in_hvpage(hdr), - len, - &pb[slots_used]); + pb[0].offset = offset_in_hvpage(hdr); + pb[0].len = len; + pb[0].pfn = virt_to_hvpfn(hdr); packet->rmsg_size = len; - packet->rmsg_pgcnt = slots_used; - slots_used += fill_pg_buf(virt_to_hvpfn(data), - offset_in_hvpage(data), - skb_headlen(skb), - &pb[slots_used]); + pb[1].offset = offset_in_hvpage(skb->data); + pb[1].len = skb_headlen(skb); + pb[1].pfn = virt_to_hvpfn(skb->data); for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; + struct hv_page_buffer *cur_pb = &pb[i + 2]; + u64 pfn = page_to_hvpfn(skb_frag_page(frag)); + u32 offset = skb_frag_off(frag); - slots_used += fill_pg_buf(page_to_hvpfn(skb_frag_page(frag)), - skb_frag_off(frag), - skb_frag_size(frag), - &pb[slots_used]); + cur_pb->offset = offset_in_hvpage(offset); + cur_pb->len = skb_frag_size(frag); + cur_pb->pfn = pfn + (offset >> HV_HYP_PAGE_SHIFT); } - return slots_used; + return frags + 2; } static int count_skb_frag_slots(struct sk_buff *skb) @@ -483,7 +449,7 @@ static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx) struct net_device *vf_netdev; u32 rndis_msg_size; u32 hash; - struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT]; + struct hv_page_buffer pb[MAX_DATA_RANGES]; /* If VF is present and up then redirect packets to it. * Skip the VF if it is marked down or has no carrier. diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 82747dfacd70..9e73959e61ee 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -225,8 +225,7 @@ static int rndis_filter_send_request(struct rndis_device *dev, struct rndis_request *req) { struct hv_netvsc_packet *packet; - struct hv_page_buffer page_buf[2]; - struct hv_page_buffer *pb = page_buf; + struct hv_page_buffer pb; int ret; /* Setup the packet to send it */ @@ -235,27 +234,14 @@ static int rndis_filter_send_request(struct rndis_device *dev, packet->total_data_buflen = req->request_msg.msg_len; packet->page_buf_cnt = 1; - pb[0].pfn = virt_to_phys(&req->request_msg) >> - HV_HYP_PAGE_SHIFT; - pb[0].len = req->request_msg.msg_len; - pb[0].offset = offset_in_hvpage(&req->request_msg); - - /* Add one page_buf when request_msg crossing page boundary */ - if (pb[0].offset + pb[0].len > HV_HYP_PAGE_SIZE) { - packet->page_buf_cnt++; - pb[0].len = HV_HYP_PAGE_SIZE - - pb[0].offset; - pb[1].pfn = virt_to_phys((void *)&req->request_msg - + pb[0].len) >> HV_HYP_PAGE_SHIFT; - pb[1].offset = 0; - pb[1].len = req->request_msg.msg_len - - pb[0].len; - } + pb.pfn = virt_to_phys(&req->request_msg) >> HV_HYP_PAGE_SHIFT; + pb.len = req->request_msg.msg_len; + pb.offset = offset_in_hvpage(&req->request_msg); trace_rndis_send(dev->ndev, 0, &req->request_msg); rcu_read_lock_bh(); - ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL, false); + ret = netvsc_send(dev->ndev, packet, NULL, &pb, NULL, false); rcu_read_unlock_bh(); return ret; diff --git a/drivers/net/mdio/mdio-mux-meson-gxl.c b/drivers/net/mdio/mdio-mux-meson-gxl.c index 00c66240136b..3dd12a8c8b03 100644 --- a/drivers/net/mdio/mdio-mux-meson-gxl.c +++ b/drivers/net/mdio/mdio-mux-meson-gxl.c @@ -17,6 +17,7 @@ #define REG2_LEDACT GENMASK(23, 22) #define REG2_LEDLINK GENMASK(25, 24) #define REG2_DIV4SEL BIT(27) +#define REG2_REVERSED BIT(28) #define REG2_ADCBYPASS BIT(30) #define REG2_CLKINSEL BIT(31) #define ETH_REG3 0x4 @@ -65,7 +66,7 @@ static void gxl_enable_internal_mdio(struct gxl_mdio_mux *priv) * The only constraint is that it must match the one in * drivers/net/phy/meson-gxl.c to properly match the PHY. */ - writel(FIELD_PREP(REG2_PHYID, EPHY_GXL_ID), + writel(REG2_REVERSED | FIELD_PREP(REG2_PHYID, EPHY_GXL_ID), priv->regs + ETH_REG2); /* Enable the internal phy */ diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index 14f361549638..e32013eb0186 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -730,7 +730,7 @@ static int dp83822_phy_reset(struct phy_device *phydev) return phydev->drv->config_init(phydev); } -#ifdef CONFIG_OF_MDIO +#if IS_ENABLED(CONFIG_OF_MDIO) static const u32 tx_amplitude_100base_tx_gain[] = { 80, 82, 83, 85, 87, 88, 90, 92, 93, 95, 97, 98, 100, 102, 103, 105, diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 24882d30f685..e2c6569d8c45 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -2027,12 +2027,6 @@ static int ksz9477_config_init(struct phy_device *phydev) return err; } - /* According to KSZ9477 Errata DS80000754C (Module 4) all EEE modes - * in this switch shall be regarded as broken. - */ - if (phydev->dev_flags & MICREL_NO_EEE) - phy_disable_eee(phydev); - return kszphy_config_init(phydev); } @@ -5705,7 +5699,6 @@ static struct phy_driver ksphy_driver[] = { .handle_interrupt = kszphy_handle_interrupt, .suspend = genphy_suspend, .resume = ksz9477_resume, - .get_features = ksz9477_get_features, } }; module_phy_driver(ksphy_driver); diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index 0e17cc458efd..93de88c1c8fd 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -37,47 +37,6 @@ static int lan88xx_write_page(struct phy_device *phydev, int page) return __phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, page); } -static int lan88xx_phy_config_intr(struct phy_device *phydev) -{ - int rc; - - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { - /* unmask all source and clear them before enable */ - rc = phy_write(phydev, LAN88XX_INT_MASK, 0x7FFF); - rc = phy_read(phydev, LAN88XX_INT_STS); - rc = phy_write(phydev, LAN88XX_INT_MASK, - LAN88XX_INT_MASK_MDINTPIN_EN_ | - LAN88XX_INT_MASK_LINK_CHANGE_); - } else { - rc = phy_write(phydev, LAN88XX_INT_MASK, 0); - if (rc) - return rc; - - /* Ack interrupts after they have been disabled */ - rc = phy_read(phydev, LAN88XX_INT_STS); - } - - return rc < 0 ? rc : 0; -} - -static irqreturn_t lan88xx_handle_interrupt(struct phy_device *phydev) -{ - int irq_status; - - irq_status = phy_read(phydev, LAN88XX_INT_STS); - if (irq_status < 0) { - phy_error(phydev); - return IRQ_NONE; - } - - if (!(irq_status & LAN88XX_INT_STS_LINK_CHANGE_)) - return IRQ_NONE; - - phy_trigger_machine(phydev); - - return IRQ_HANDLED; -} - static int lan88xx_suspend(struct phy_device *phydev) { struct lan88xx_priv *priv = phydev->priv; @@ -528,8 +487,9 @@ static struct phy_driver microchip_phy_driver[] = { .config_aneg = lan88xx_config_aneg, .link_change_notify = lan88xx_link_change_notify, - .config_intr = lan88xx_phy_config_intr, - .handle_interrupt = lan88xx_handle_interrupt, + /* Interrupt handling is broken, do not define related + * functions to force polling. + */ .suspend = lan88xx_suspend, .resume = genphy_resume, diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 675fbd225378..cc1bfd22fb81 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -244,6 +244,46 @@ static bool phy_drv_wol_enabled(struct phy_device *phydev) return wol.wolopts != 0; } +static void phy_link_change(struct phy_device *phydev, bool up) +{ + struct net_device *netdev = phydev->attached_dev; + + if (up) + netif_carrier_on(netdev); + else + netif_carrier_off(netdev); + phydev->adjust_link(netdev); + if (phydev->mii_ts && phydev->mii_ts->link_state) + phydev->mii_ts->link_state(phydev->mii_ts, phydev); +} + +/** + * phy_uses_state_machine - test whether consumer driver uses PAL state machine + * @phydev: the target PHY device structure + * + * Ultimately, this aims to indirectly determine whether the PHY is attached + * to a consumer which uses the state machine by calling phy_start() and + * phy_stop(). + * + * When the PHY driver consumer uses phylib, it must have previously called + * phy_connect_direct() or one of its derivatives, so that phy_prepare_link() + * has set up a hook for monitoring state changes. + * + * When the PHY driver is used by the MAC driver consumer through phylink (the + * only other provider of a phy_link_change() method), using the PHY state + * machine is not optional. + * + * Return: true if consumer calls phy_start() and phy_stop(), false otherwise. + */ +static bool phy_uses_state_machine(struct phy_device *phydev) +{ + if (phydev->phy_link_change == phy_link_change) + return phydev->attached_dev && phydev->adjust_link; + + /* phydev->phy_link_change is implicitly phylink_phy_change() */ + return true; +} + static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) { struct device_driver *drv = phydev->mdio.dev.driver; @@ -310,7 +350,7 @@ static __maybe_unused int mdio_bus_phy_suspend(struct device *dev) * may call phy routines that try to grab the same lock, and that may * lead to a deadlock. */ - if (phydev->attached_dev && phydev->adjust_link) + if (phy_uses_state_machine(phydev)) phy_stop_machine(phydev); if (!mdio_bus_phy_may_suspend(phydev)) @@ -364,7 +404,7 @@ no_resume: } } - if (phydev->attached_dev && phydev->adjust_link) + if (phy_uses_state_machine(phydev)) phy_start_machine(phydev); return 0; @@ -1055,19 +1095,6 @@ struct phy_device *phy_find_first(struct mii_bus *bus) } EXPORT_SYMBOL(phy_find_first); -static void phy_link_change(struct phy_device *phydev, bool up) -{ - struct net_device *netdev = phydev->attached_dev; - - if (up) - netif_carrier_on(netdev); - else - netif_carrier_off(netdev); - phydev->adjust_link(netdev); - if (phydev->mii_ts && phydev->mii_ts->link_state) - phydev->mii_ts->link_state(phydev->mii_ts, phydev); -} - /** * phy_prepare_link - prepares the PHY layer to monitor link status * @phydev: target phy_device struct diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c index bd3c9554f6ac..60893691d4c3 100644 --- a/drivers/net/phy/phy_led_triggers.c +++ b/drivers/net/phy/phy_led_triggers.c @@ -93,9 +93,8 @@ int phy_led_triggers_register(struct phy_device *phy) if (!phy->phy_num_led_triggers) return 0; - phy->led_link_trigger = devm_kzalloc(&phy->mdio.dev, - sizeof(*phy->led_link_trigger), - GFP_KERNEL); + phy->led_link_trigger = kzalloc(sizeof(*phy->led_link_trigger), + GFP_KERNEL); if (!phy->led_link_trigger) { err = -ENOMEM; goto out_clear; @@ -105,10 +104,9 @@ int phy_led_triggers_register(struct phy_device *phy) if (err) goto out_free_link; - phy->phy_led_triggers = devm_kcalloc(&phy->mdio.dev, - phy->phy_num_led_triggers, - sizeof(struct phy_led_trigger), - GFP_KERNEL); + phy->phy_led_triggers = kcalloc(phy->phy_num_led_triggers, + sizeof(struct phy_led_trigger), + GFP_KERNEL); if (!phy->phy_led_triggers) { err = -ENOMEM; goto out_unreg_link; @@ -129,11 +127,11 @@ int phy_led_triggers_register(struct phy_device *phy) out_unreg: while (i--) phy_led_trigger_unregister(&phy->phy_led_triggers[i]); - devm_kfree(&phy->mdio.dev, phy->phy_led_triggers); + kfree(phy->phy_led_triggers); out_unreg_link: phy_led_trigger_unregister(phy->led_link_trigger); out_free_link: - devm_kfree(&phy->mdio.dev, phy->led_link_trigger); + kfree(phy->led_link_trigger); phy->led_link_trigger = NULL; out_clear: phy->phy_num_led_triggers = 0; @@ -147,8 +145,13 @@ void phy_led_triggers_unregister(struct phy_device *phy) for (i = 0; i < phy->phy_num_led_triggers; i++) phy_led_trigger_unregister(&phy->phy_led_triggers[i]); + kfree(phy->phy_led_triggers); + phy->phy_led_triggers = NULL; - if (phy->led_link_trigger) + if (phy->led_link_trigger) { phy_led_trigger_unregister(phy->led_link_trigger); + kfree(phy->led_link_trigger); + phy->led_link_trigger = NULL; + } } EXPORT_SYMBOL_GPL(phy_led_triggers_unregister); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index b68369e2342b..1bdd5d8bb5b0 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -81,6 +81,7 @@ struct phylink { unsigned int pcs_state; bool link_failed; + bool suspend_link_up; bool major_config_failed; bool mac_supports_eee_ops; bool mac_supports_eee; @@ -2545,14 +2546,16 @@ void phylink_suspend(struct phylink *pl, bool mac_wol) /* Stop the resolver bringing the link up */ __set_bit(PHYLINK_DISABLE_MAC_WOL, &pl->phylink_disable_state); - /* Disable the carrier, to prevent transmit timeouts, - * but one would hope all packets have been sent. This - * also means phylink_resolve() will do nothing. - */ - if (pl->netdev) - netif_carrier_off(pl->netdev); - else + pl->suspend_link_up = phylink_link_is_up(pl); + if (pl->suspend_link_up) { + /* Disable the carrier, to prevent transmit timeouts, + * but one would hope all packets have been sent. This + * also means phylink_resolve() will do nothing. + */ + if (pl->netdev) + netif_carrier_off(pl->netdev); pl->old_link_state = false; + } /* We do not call mac_link_down() here as we want the * link to remain up to receive the WoL packets. @@ -2603,15 +2606,18 @@ void phylink_resume(struct phylink *pl) if (test_bit(PHYLINK_DISABLE_MAC_WOL, &pl->phylink_disable_state)) { /* Wake-on-Lan enabled, MAC handling */ - /* Call mac_link_down() so we keep the overall state balanced. - * Do this under the state_mutex lock for consistency. This - * will cause a "Link Down" message to be printed during - * resume, which is harmless - the true link state will be - * printed when we run a resolve. - */ - mutex_lock(&pl->state_mutex); - phylink_link_down(pl); - mutex_unlock(&pl->state_mutex); + if (pl->suspend_link_up) { + /* Call mac_link_down() so we keep the overall state + * balanced. Do this under the state_mutex lock for + * consistency. This will cause a "Link Down" message + * to be printed during resume, which is harmless - + * the true link state will be printed when we run a + * resolve. + */ + mutex_lock(&pl->state_mutex); + phylink_link_down(pl); + mutex_unlock(&pl->state_mutex); + } /* Re-apply the link parameters so that all the settings get * restored to the MAC. diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c index 644e99fc3623..9c4932198931 100644 --- a/drivers/net/ppp/ppp_synctty.c +++ b/drivers/net/ppp/ppp_synctty.c @@ -506,6 +506,11 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb) unsigned char *data; int islcp; + /* Ensure we can safely access protocol field and LCP code */ + if (!pskb_may_pull(skb, 3)) { + kfree_skb(skb); + return NULL; + } data = skb->data; proto = get_unaligned_be16(data); diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c index d8fc0c79745d..b75ceb90359f 100644 --- a/drivers/net/team/team_core.c +++ b/drivers/net/team/team_core.c @@ -1778,8 +1778,8 @@ static void team_change_rx_flags(struct net_device *dev, int change) struct team_port *port; int inc; - rcu_read_lock(); - list_for_each_entry_rcu(port, &team->port_list, list) { + mutex_lock(&team->lock); + list_for_each_entry(port, &team->port_list, list) { if (change & IFF_PROMISC) { inc = dev->flags & IFF_PROMISC ? 1 : -1; dev_set_promiscuity(port->dev, inc); @@ -1789,7 +1789,7 @@ static void team_change_rx_flags(struct net_device *dev, int change) dev_set_allmulti(port->dev, inc); } } - rcu_read_unlock(); + mutex_unlock(&team->lock); } static void team_set_rx_mode(struct net_device *dev) diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index bb0bf1415872..7b3739b29c8f 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -630,16 +630,6 @@ static const struct driver_info zte_rndis_info = { .tx_fixup = rndis_tx_fixup, }; -static const struct driver_info wwan_rndis_info = { - .description = "Mobile Broadband RNDIS device", - .flags = FLAG_WWAN | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT, - .bind = rndis_bind, - .unbind = rndis_unbind, - .status = rndis_status, - .rx_fixup = rndis_rx_fixup, - .tx_fixup = rndis_tx_fixup, -}; - /*-------------------------------------------------------------------------*/ static const struct usb_device_id products [] = { @@ -676,11 +666,9 @@ static const struct usb_device_id products [] = { USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3), .driver_info = (unsigned long) &rndis_info, }, { - /* Mobile Broadband Modem, seen in Novatel Verizon USB730L and - * Telit FN990A (RNDIS) - */ + /* Novatel Verizon USB730L */ USB_INTERFACE_INFO(USB_CLASS_MISC, 4, 1), - .driver_info = (unsigned long)&wwan_rndis_info, + .driver_info = (unsigned long) &rndis_info, }, { }, // END }; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 7e4617216a4b..e53ba600605a 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -3342,7 +3342,8 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq) +static void __virtnet_rx_pause(struct virtnet_info *vi, + struct receive_queue *rq) { bool running = netif_running(vi->dev); @@ -3352,15 +3353,64 @@ static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq) } } -static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq) +static void virtnet_rx_pause_all(struct virtnet_info *vi) { - bool running = netif_running(vi->dev); + int i; - if (!try_fill_recv(vi, rq, GFP_KERNEL)) - schedule_delayed_work(&vi->refill, 0); + /* + * Make sure refill_work does not run concurrently to + * avoid napi_disable race which leads to deadlock. + */ + disable_delayed_refill(vi); + cancel_delayed_work_sync(&vi->refill); + for (i = 0; i < vi->max_queue_pairs; i++) + __virtnet_rx_pause(vi, &vi->rq[i]); +} +static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq) +{ + /* + * Make sure refill_work does not run concurrently to + * avoid napi_disable race which leads to deadlock. + */ + disable_delayed_refill(vi); + cancel_delayed_work_sync(&vi->refill); + __virtnet_rx_pause(vi, rq); +} + +static void __virtnet_rx_resume(struct virtnet_info *vi, + struct receive_queue *rq, + bool refill) +{ + bool running = netif_running(vi->dev); + bool schedule_refill = false; + + if (refill && !try_fill_recv(vi, rq, GFP_KERNEL)) + schedule_refill = true; if (running) virtnet_napi_enable(rq); + + if (schedule_refill) + schedule_delayed_work(&vi->refill, 0); +} + +static void virtnet_rx_resume_all(struct virtnet_info *vi) +{ + int i; + + enable_delayed_refill(vi); + for (i = 0; i < vi->max_queue_pairs; i++) { + if (i < vi->curr_queue_pairs) + __virtnet_rx_resume(vi, &vi->rq[i], true); + else + __virtnet_rx_resume(vi, &vi->rq[i], false); + } +} + +static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq) +{ + enable_delayed_refill(vi); + __virtnet_rx_resume(vi, rq, true); } static int virtnet_rx_resize(struct virtnet_info *vi, @@ -3681,8 +3731,10 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) succ: vi->curr_queue_pairs = queue_pairs; /* virtnet_open() will refill when device is going to up. */ - if (dev->flags & IFF_UP) + spin_lock_bh(&vi->refill_lock); + if (dev->flags & IFF_UP && vi->refill_enabled) schedule_delayed_work(&vi->refill, 0); + spin_unlock_bh(&vi->refill_lock); return 0; } @@ -5626,6 +5678,10 @@ static void virtnet_get_base_stats(struct net_device *dev, if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) tx->hw_drop_ratelimits = 0; + + netdev_stat_queue_sum(dev, + dev->real_num_rx_queues, vi->max_queue_pairs, rx, + dev->real_num_tx_queues, vi->max_queue_pairs, tx); } static const struct netdev_stat_ops virtnet_stat_ops = { @@ -5838,8 +5894,10 @@ static int virtnet_xsk_pool_enable(struct net_device *dev, hdr_dma = virtqueue_dma_map_single_attrs(sq->vq, &xsk_hdr, vi->hdr_len, DMA_TO_DEVICE, 0); - if (virtqueue_dma_mapping_error(sq->vq, hdr_dma)) - return -ENOMEM; + if (virtqueue_dma_mapping_error(sq->vq, hdr_dma)) { + err = -ENOMEM; + goto err_free_buffs; + } err = xsk_pool_dma_map(pool, dma_dev, 0); if (err) @@ -5867,6 +5925,8 @@ err_rq: err_xsk_map: virtqueue_dma_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len, DMA_TO_DEVICE, 0); +err_free_buffs: + kvfree(rq->xsk_buffs); return err; } @@ -5959,12 +6019,12 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, if (prog) bpf_prog_add(prog, vi->max_queue_pairs - 1); + virtnet_rx_pause_all(vi); + /* Make sure NAPI is not using any XDP TX queues for RX. */ if (netif_running(dev)) { - for (i = 0; i < vi->max_queue_pairs; i++) { - virtnet_napi_disable(&vi->rq[i]); + for (i = 0; i < vi->max_queue_pairs; i++) virtnet_napi_tx_disable(&vi->sq[i]); - } } if (!prog) { @@ -5996,13 +6056,12 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, vi->xdp_enabled = false; } + virtnet_rx_resume_all(vi); for (i = 0; i < vi->max_queue_pairs; i++) { if (old_prog) bpf_prog_put(old_prog); - if (netif_running(dev)) { - virtnet_napi_enable(&vi->rq[i]); + if (netif_running(dev)) virtnet_napi_tx_enable(&vi->sq[i]); - } } return 0; @@ -6014,11 +6073,10 @@ err: rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); } + virtnet_rx_resume_all(vi); if (netif_running(dev)) { - for (i = 0; i < vi->max_queue_pairs; i++) { - virtnet_napi_enable(&vi->rq[i]); + for (i = 0; i < vi->max_queue_pairs; i++) virtnet_napi_tx_enable(&vi->sq[i]); - } } if (prog) bpf_prog_sub(prog, vi->max_queue_pairs - 1); diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 3df6aabc7e33..2440e30c5bd1 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -27,6 +27,10 @@ #include <linux/module.h> #include <net/ip6_checksum.h> +#ifdef CONFIG_X86 +#include <asm/msr.h> +#endif + #include "vmxnet3_int.h" #include "vmxnet3_xdp.h" @@ -3607,8 +3611,6 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) struct vmxnet3_adapter *adapter = netdev_priv(netdev); int err = 0; - WRITE_ONCE(netdev->mtu, new_mtu); - /* * Reset_work may be in the middle of resetting the device, wait for its * completion. @@ -3622,6 +3624,7 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) /* we need to re-create the rx queue based on the new mtu */ vmxnet3_rq_destroy_all(adapter); + WRITE_ONCE(netdev->mtu, new_mtu); vmxnet3_adjust_rx_ring_size(adapter); err = vmxnet3_rq_create_all(adapter); if (err) { @@ -3638,6 +3641,8 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) "Closing it\n", err); goto out; } + } else { + WRITE_ONCE(netdev->mtu, new_mtu); } out: diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.c b/drivers/net/vmxnet3/vmxnet3_xdp.c index 616ecc38d172..5f470499e600 100644 --- a/drivers/net/vmxnet3/vmxnet3_xdp.c +++ b/drivers/net/vmxnet3/vmxnet3_xdp.c @@ -397,7 +397,7 @@ vmxnet3_process_xdp(struct vmxnet3_adapter *adapter, xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq); xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset, - rbi->len, false); + rcd->len, false); xdp_buff_clear_frags_flag(&xdp); xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog); diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c index 6e6e9f05509a..06d19e90eadb 100644 --- a/drivers/net/vxlan/vxlan_vnifilter.c +++ b/drivers/net/vxlan/vxlan_vnifilter.c @@ -627,7 +627,11 @@ static void vxlan_vni_delete_group(struct vxlan_dev *vxlan, * default dst remote_ip previously added for this vni */ if (!vxlan_addr_any(&vninode->remote_ip) || - !vxlan_addr_any(&dst->remote_ip)) + !vxlan_addr_any(&dst->remote_ip)) { + u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, + vninode->vni); + + spin_lock_bh(&vxlan->hash_lock[hash_index]); __vxlan_fdb_delete(vxlan, all_zeros_mac, (vxlan_addr_any(&vninode->remote_ip) ? dst->remote_ip : vninode->remote_ip), @@ -635,6 +639,8 @@ static void vxlan_vni_delete_group(struct vxlan_dev *vxlan, vninode->vni, vninode->vni, dst->remote_ifindex, true); + spin_unlock_bh(&vxlan->hash_lock[hash_index]); + } if (vxlan->dev->flags & IFF_UP) { if (vxlan_addr_multicast(&vninode->remote_ip) && diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c index 4c1aecd1163c..419f5530f885 100644 --- a/drivers/net/wireless/ath/carl9170/fw.c +++ b/drivers/net/wireless/ath/carl9170/fw.c @@ -15,7 +15,7 @@ #include "fwcmd.h" #include "version.h" -static const u8 otus_magic[4] = { OTUS_MAGIC }; +static const u8 otus_magic[4] __nonstring = { OTUS_MAGIC }; static const void *carl9170_fw_find_desc(struct ar9170 *ar, const u8 descid[4], const unsigned int len, const u8 compatible_revision) diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c index 4f01189b7c4b..6842c2b02b39 100644 --- a/drivers/net/wireless/atmel/at76c50x-usb.c +++ b/drivers/net/wireless/atmel/at76c50x-usb.c @@ -2552,7 +2552,7 @@ static void at76_disconnect(struct usb_interface *interface) wiphy_info(priv->hw->wiphy, "disconnecting\n"); at76_delete_device(priv); - usb_put_dev(priv->udev); + usb_put_dev(interface_to_usbdev(interface)); dev_info(&interface->dev, "disconnected\n"); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index cfcf01eb0daa..f26e4679e4ff 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -561,8 +561,10 @@ struct brcmf_mp_device *brcmf_get_module_param(struct device *dev, if (!found) { /* No platform data for this device, try OF and DMI data */ brcmf_dmi_probe(settings, chip, chiprev); - if (brcmf_of_probe(dev, bus_type, settings) == -EPROBE_DEFER) + if (brcmf_of_probe(dev, bus_type, settings) == -EPROBE_DEFER) { + kfree(settings); return ERR_PTR(-EPROBE_DEFER); + } brcmf_acpi_probe(dev, bus_type, settings); } return settings; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index 2821c27f317e..d06c724f63d9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -896,14 +896,16 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen) } /* 1) Prepare USB boot loader for runtime image */ - brcmf_usb_dl_cmd(devinfo, DL_START, &state, sizeof(state)); + err = brcmf_usb_dl_cmd(devinfo, DL_START, &state, sizeof(state)); + if (err) + goto fail; rdlstate = le32_to_cpu(state.state); rdlbytes = le32_to_cpu(state.bytes); /* 2) Check we are in the Waiting state */ if (rdlstate != DL_WAITING) { - brcmf_err("Failed to DL_START\n"); + brcmf_err("Invalid DL state: %u\n", rdlstate); err = -EINVAL; goto fail; } diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c index 670031fd60dc..59af36960f9c 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c @@ -142,8 +142,6 @@ const struct iwl_cfg_trans_params iwl_sc_trans_cfg = { .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, }; -const char iwl_sp_name[] = "Intel(R) Wi-Fi 7 BE213 160MHz"; - const struct iwl_cfg iwl_cfg_sc = { .fw_name_mac = "sc", IWL_DEVICE_SC, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index b9bd89bfdd74..acafee538b8a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2025 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation */ #ifndef __IWL_CONFIG_H__ #define __IWL_CONFIG_H__ @@ -451,8 +451,11 @@ struct iwl_cfg { #define IWL_CFG_RF_ID_HR 0x7 #define IWL_CFG_RF_ID_HR1 0x4 -#define IWL_CFG_BW_NO_LIM (U16_MAX - 1) -#define IWL_CFG_BW_ANY U16_MAX +#define IWL_CFG_NO_160 0x1 +#define IWL_CFG_160 0x0 + +#define IWL_CFG_NO_320 0x1 +#define IWL_CFG_320 0x0 #define IWL_CFG_CORES_BT 0x0 #define IWL_CFG_CORES_BT_GNSS 0x5 @@ -464,7 +467,7 @@ struct iwl_cfg { #define IWL_CFG_IS_JACKET 0x1 #define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4) -#define IWL_SUBDEVICE_BW_LIM(subdevice) ((u16)((subdevice) & 0x0200) >> 9) +#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0200) >> 9) #define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10) struct iwl_dev_info { @@ -472,10 +475,10 @@ struct iwl_dev_info { u16 subdevice; u16 mac_type; u16 rf_type; - u16 bw_limit; u8 mac_step; u8 rf_step; u8 rf_id; + u8 no_160; u8 cores; u8 cdb; u8 jacket; @@ -489,7 +492,7 @@ extern const unsigned int iwl_dev_info_table_size; const struct iwl_dev_info * iwl_pci_find_dev_info(u16 device, u16 subsystem_device, u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb, - u8 jacket, u8 rf_id, u8 bw_limit, u8 cores, u8 rf_step); + u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step); extern const struct pci_device_id iwl_hw_card_ids[]; #endif @@ -550,7 +553,6 @@ extern const char iwl_ax231_name[]; extern const char iwl_ax411_name[]; extern const char iwl_fm_name[]; extern const char iwl_wh_name[]; -extern const char iwl_sp_name[]; extern const char iwl_gl_name[]; extern const char iwl_mtp_name[]; extern const char iwl_dr_name[]; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index be9e464c9b7b..3ff493e920d2 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -148,6 +148,7 @@ * during a error FW error. */ #define CSR_FUNC_SCRATCH_INIT_VALUE (0x01010101) +#define CSR_FUNC_SCRATCH_POWER_OFF_MASK 0xFFFF /* Bits for CSR_HW_IF_CONFIG_REG */ #define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH (0x0000000F) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index cd1b0048bb6d..c381511e9ec6 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2023, 2025 Intel Corporation + * Copyright (C) 2005-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -944,8 +944,7 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK); break; case NL80211_BAND_6GHZ: - if (!trans->reduced_cap_sku && - trans->bw_limit >= 320) { + if (!trans->reduced_cap_sku) { iftype_data->eht_cap.eht_cap_elem.phy_cap_info[0] |= IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; iftype_data->eht_cap.eht_cap_elem.phy_cap_info[1] |= @@ -1095,22 +1094,19 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, iftype_data->eht_cap.eht_mcs_nss_supp.bw._320.rx_tx_mcs13_max_nss = 0; } - if (trans->bw_limit < 160) + if (trans->no_160) iftype_data->he_cap.he_cap_elem.phy_cap_info[0] &= ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; - if (trans->bw_limit < 320 || trans->reduced_cap_sku) { + if (trans->reduced_cap_sku) { memset(&iftype_data->eht_cap.eht_mcs_nss_supp.bw._320, 0, sizeof(iftype_data->eht_cap.eht_mcs_nss_supp.bw._320)); - iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] &= - ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK; - } - - if (trans->reduced_cap_sku) { iftype_data->eht_cap.eht_mcs_nss_supp.bw._80.rx_tx_mcs13_max_nss = 0; iftype_data->eht_cap.eht_mcs_nss_supp.bw._160.rx_tx_mcs13_max_nss = 0; iftype_data->eht_cap.eht_cap_elem.phy_cap_info[8] &= ~IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA; + iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] &= + ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK; } } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c index c1607b6d0759..e7b2e08645ef 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c @@ -21,6 +21,7 @@ struct iwl_trans_dev_restart_data { struct list_head list; unsigned int restart_count; time64_t last_error; + bool backoff; char name[]; }; @@ -125,13 +126,20 @@ iwl_trans_determine_restart_mode(struct iwl_trans *trans) if (!data) return at_least; - if (ktime_get_boottime_seconds() - data->last_error >= + if (!data->backoff && + ktime_get_boottime_seconds() - data->last_error >= IWL_TRANS_RESET_OK_TIME) data->restart_count = 0; index = data->restart_count; - if (index >= ARRAY_SIZE(escalation_list)) + if (index >= ARRAY_SIZE(escalation_list)) { index = ARRAY_SIZE(escalation_list) - 1; + if (!data->backoff) { + data->backoff = true; + return IWL_RESET_MODE_BACKOFF; + } + data->backoff = false; + } return max(at_least, escalation_list[index]); } @@ -140,7 +148,8 @@ iwl_trans_determine_restart_mode(struct iwl_trans *trans) static void iwl_trans_restart_wk(struct work_struct *wk) { - struct iwl_trans *trans = container_of(wk, typeof(*trans), restart.wk); + struct iwl_trans *trans = container_of(wk, typeof(*trans), + restart.wk.work); struct iwl_trans_reprobe *reprobe; enum iwl_reset_mode mode; @@ -168,6 +177,12 @@ static void iwl_trans_restart_wk(struct work_struct *wk) return; mode = iwl_trans_determine_restart_mode(trans); + if (mode == IWL_RESET_MODE_BACKOFF) { + IWL_ERR(trans, "Too many device errors - delay next reset\n"); + queue_delayed_work(system_unbound_wq, &trans->restart.wk, + IWL_TRANS_RESET_DELAY); + return; + } iwl_trans_inc_restart_count(trans->dev); @@ -227,7 +242,7 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, trans->dev = dev; trans->num_rx_queues = 1; - INIT_WORK(&trans->restart.wk, iwl_trans_restart_wk); + INIT_DELAYED_WORK(&trans->restart.wk, iwl_trans_restart_wk); return trans; } @@ -271,7 +286,7 @@ int iwl_trans_init(struct iwl_trans *trans) void iwl_trans_free(struct iwl_trans *trans) { - cancel_work_sync(&trans->restart.wk); + cancel_delayed_work_sync(&trans->restart.wk); kmem_cache_destroy(trans->dev_cmd_pool); } @@ -403,7 +418,7 @@ void iwl_trans_op_mode_leave(struct iwl_trans *trans) iwl_trans_pcie_op_mode_leave(trans); - cancel_work_sync(&trans->restart.wk); + cancel_delayed_work_sync(&trans->restart.wk); trans->op_mode = NULL; @@ -540,7 +555,6 @@ void __releases(nic_access) iwl_trans_release_nic_access(struct iwl_trans *trans) { iwl_trans_pcie_release_nic_access(trans); - __release(nic_access); } IWL_EXPORT_SYMBOL(iwl_trans_release_nic_access); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 25fb4c50e38b..ce4954b0d524 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2023, 2025 Intel Corporation + * Copyright (C) 2005-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -876,7 +876,7 @@ struct iwl_txq { * only valid for discrete (not integrated) NICs * @invalid_tx_cmd: invalid TX command buffer * @reduced_cap_sku: reduced capability supported SKU - * @bw_limit: the max bandwidth + * @no_160: device not supporting 160 MHz * @step_urm: STEP is in URM, no support for MCS>9 in 320 MHz * @restart: restart worker data * @restart.wk: restart worker @@ -911,8 +911,7 @@ struct iwl_trans { char hw_id_str[52]; u32 sku_id[3]; bool reduced_cap_sku; - u16 bw_limit; - bool step_urm; + u8 no_160:1, step_urm:1; u8 dsbr_urm_fw_dependent:1, dsbr_urm_permanent:1; @@ -962,7 +961,7 @@ struct iwl_trans { struct iwl_dma_ptr invalid_tx_cmd; struct { - struct work_struct wk; + struct delayed_work wk; struct iwl_fw_error_dump_mode mode; bool during_reset; } restart; @@ -1163,7 +1162,7 @@ static inline void iwl_trans_schedule_reset(struct iwl_trans *trans, */ trans->restart.during_reset = test_bit(STATUS_IN_SW_RESET, &trans->status); - queue_work(system_unbound_wq, &trans->restart.wk); + queue_delayed_work(system_unbound_wq, &trans->restart.wk, 0); } static inline void iwl_trans_fw_error(struct iwl_trans *trans, @@ -1262,6 +1261,9 @@ enum iwl_reset_mode { IWL_RESET_MODE_RESCAN, IWL_RESET_MODE_FUNC_RESET, IWL_RESET_MODE_PROD_RESET, + + /* keep last - special backoff value */ + IWL_RESET_MODE_BACKOFF, }; void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode); diff --git a/drivers/net/wireless/intel/iwlwifi/mld/agg.c b/drivers/net/wireless/intel/iwlwifi/mld/agg.c index db9e0f04f4b7..687a9450ac98 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/agg.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/agg.c @@ -124,9 +124,9 @@ void iwl_mld_handle_bar_frame_release_notif(struct iwl_mld *mld, rcu_read_lock(); baid_data = rcu_dereference(mld->fw_id_to_ba[baid]); - if (!IWL_FW_CHECK(mld, !baid_data, - "Got valid BAID %d but not allocated, invalid BAR release!\n", - baid)) + if (IWL_FW_CHECK(mld, !baid_data, + "Got valid BAID %d but not allocated, invalid BAR release!\n", + baid)) goto out_unlock; if (IWL_FW_CHECK(mld, tid != baid_data->tid || diff --git a/drivers/net/wireless/intel/iwlwifi/mld/d3.c b/drivers/net/wireless/intel/iwlwifi/mld/d3.c index 5a7207accd86..ee99298eebf5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/d3.c @@ -1099,7 +1099,8 @@ iwl_mld_set_netdetect_info(struct iwl_mld *mld, if (!match) return; - netdetect_info->matches[netdetect_info->n_matches++] = match; + netdetect_info->matches[netdetect_info->n_matches] = match; + netdetect_info->n_matches++; /* We inverted the order of the SSIDs in the scan * request, so invert the index here. @@ -1116,9 +1117,11 @@ iwl_mld_set_netdetect_info(struct iwl_mld *mld, for_each_set_bit(j, (unsigned long *)&matches[i].matching_channels[0], - sizeof(matches[i].matching_channels)) - match->channels[match->n_channels++] = + sizeof(matches[i].matching_channels)) { + match->channels[match->n_channels] = netdetect_cfg->channels[j]->center_freq; + match->n_channels++; + } } } @@ -1895,7 +1898,6 @@ int iwl_mld_wowlan_resume(struct iwl_mld *mld) int link_id; int ret; bool fw_err = false; - bool keep_connection; lockdep_assert_wiphy(mld->wiphy); @@ -1965,7 +1967,7 @@ int iwl_mld_wowlan_resume(struct iwl_mld *mld) iwl_mld_process_netdetect_res(mld, bss_vif, &resume_data); mld->netdetect = false; } else { - keep_connection = + bool keep_connection = iwl_mld_process_wowlan_status(mld, bss_vif, resume_data.wowlan_status); @@ -1973,11 +1975,10 @@ int iwl_mld_wowlan_resume(struct iwl_mld *mld) if (keep_connection) iwl_mld_unblock_emlsr(mld, bss_vif, IWL_MLD_EMLSR_BLOCKED_WOWLAN); + else + ieee80211_resume_disconnect(bss_vif); } - if (!mld->netdetect && !keep_connection) - ieee80211_resume_disconnect(bss_vif); - goto out; err: diff --git a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c index 453ce2ba39d1..93f9f78e4276 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c @@ -396,8 +396,8 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct iwl_mld *mld, char *buf, .data[0] = &cmd, }; struct iwl_dhc_tas_status_resp *resp = NULL; + u32 resp_len = 0; ssize_t pos = 0; - u32 resp_len; u32 status; int ret; @@ -949,8 +949,9 @@ void iwl_mld_add_vif_debugfs(struct ieee80211_hw *hw, snprintf(name, sizeof(name), "%pd", vif->debugfs_dir); snprintf(target, sizeof(target), "../../../%pd3/iwlmld", vif->debugfs_dir); - mld_vif->dbgfs_slink = - debugfs_create_symlink(name, mld->debugfs_dir, target); + if (!mld_vif->dbgfs_slink) + mld_vif->dbgfs_slink = + debugfs_create_symlink(name, mld->debugfs_dir, target); if (iwlmld_mod_params.power_scheme != IWL_POWER_SCHEME_CAM && vif->type == NL80211_IFTYPE_STATION) { diff --git a/drivers/net/wireless/intel/iwlwifi/mld/fw.c b/drivers/net/wireless/intel/iwlwifi/mld/fw.c index 62da137e1024..4b083d447ee2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/fw.c @@ -333,19 +333,22 @@ int iwl_mld_load_fw(struct iwl_mld *mld) ret = iwl_trans_start_hw(mld->trans); if (ret) - return ret; + goto err; ret = iwl_mld_run_fw_init_sequence(mld); if (ret) - return ret; + goto err; ret = iwl_mld_init_mcc(mld); if (ret) - return ret; + goto err; mld->fw_status.running = true; return 0; +err: + iwl_mld_stop_fw(mld); + return ret; } void iwl_mld_stop_fw(struct iwl_mld *mld) @@ -358,6 +361,10 @@ void iwl_mld_stop_fw(struct iwl_mld *mld) iwl_trans_stop_device(mld->trans); + wiphy_work_cancel(mld->wiphy, &mld->async_handlers_wk); + + iwl_mld_purge_async_handlers_list(mld); + mld->fw_status.running = false; } diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.h b/drivers/net/wireless/intel/iwlwifi/mld/iface.h index d1d56b081bf6..ec14d0736cee 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/iface.h +++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.h @@ -166,7 +166,7 @@ struct iwl_mld_vif { struct iwl_mld_emlsr emlsr; -#if CONFIG_PM_SLEEP +#ifdef CONFIG_PM_SLEEP struct iwl_mld_wowlan_data wowlan_data; #endif #ifdef CONFIG_IWLWIFI_DEBUGFS diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c index 6851064b82da..68d97d3b8f02 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c @@ -475,8 +475,8 @@ static int iwl_mld_mac80211_start(struct ieee80211_hw *hw) { struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); - int ret; bool in_d3 = false; + int ret = 0; lockdep_assert_wiphy(mld->wiphy); @@ -537,7 +537,8 @@ void iwl_mld_mac80211_stop(struct ieee80211_hw *hw, bool suspend) /* if the suspend flow fails the fw is in error. Stop it here, and it * will be started upon wakeup */ - if (!suspend || iwl_mld_no_wowlan_suspend(mld)) + if (!suspend || + (IS_ENABLED(CONFIG_PM_SLEEP) && iwl_mld_no_wowlan_suspend(mld))) iwl_mld_stop_fw(mld); /* HW is stopped, no more coming RX. OTOH, the worker can't run as the @@ -650,6 +651,7 @@ void iwl_mld_mac80211_remove_interface(struct ieee80211_hw *hw, #ifdef CONFIG_IWLWIFI_DEBUGFS debugfs_remove(iwl_mld_vif_from_mac80211(vif)->dbgfs_slink); + iwl_mld_vif_from_mac80211(vif)->dbgfs_slink = NULL; #endif iwl_mld_rm_vif(mld, vif); @@ -1943,6 +1945,7 @@ static void iwl_mld_sta_rc_update(struct ieee80211_hw *hw, } } +#ifdef CONFIG_PM_SLEEP static void iwl_mld_set_wakeup(struct ieee80211_hw *hw, bool enabled) { struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); @@ -1994,6 +1997,7 @@ static int iwl_mld_resume(struct ieee80211_hw *hw) return 0; } +#endif static int iwl_mld_alloc_ptk_pn(struct iwl_mld *mld, struct iwl_mld_sta *mld_sta, diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.c b/drivers/net/wireless/intel/iwlwifi/mld/mld.c index d4a99ae64074..73d2166a4c25 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/mld.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.c @@ -75,6 +75,7 @@ void iwl_construct_mld(struct iwl_mld *mld, struct iwl_trans *trans, /* Setup async RX handling */ spin_lock_init(&mld->async_handlers_lock); + INIT_LIST_HEAD(&mld->async_handlers_list); wiphy_work_init(&mld->async_handlers_wk, iwl_mld_async_handlers_wk); @@ -414,9 +415,14 @@ iwl_op_mode_mld_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, wiphy_unlock(mld->wiphy); rtnl_unlock(); iwl_fw_flush_dumps(&mld->fwrt); - goto free_hw; + goto err; } + /* We are about to stop the FW. Notifications may require an + * operational FW, so handle them all here before we stop. + */ + wiphy_work_flush(mld->wiphy, &mld->async_handlers_wk); + iwl_mld_stop_fw(mld); wiphy_unlock(mld->wiphy); @@ -455,7 +461,8 @@ leds_exit: iwl_mld_leds_exit(mld); free_nvm: kfree(mld->nvm_data); -free_hw: +err: + iwl_trans_op_mode_leave(mld->trans); ieee80211_free_hw(mld->hw); return ERR_PTR(ret); } diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.h b/drivers/net/wireless/intel/iwlwifi/mld/mld.h index 5eceaaf7696d..a4a16da6ebf3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/mld.h +++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.h @@ -298,11 +298,6 @@ iwl_cleanup_mld(struct iwl_mld *mld) #endif iwl_mld_low_latency_restart_cleanup(mld); - - /* Empty the list of async notification handlers so we won't process - * notifications from the dead fw after the reconfig flow. - */ - iwl_mld_purge_async_handlers_list(mld); } enum iwl_power_scheme { diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 93446c374008..00056e76ea3d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2025 Intel Corporation + * Copyright (C) 2005-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -552,17 +552,16 @@ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_hw_card_ids); #define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \ - _rf_id, _rf_step, _bw_limit, _cores, _cdb, _cfg, _name) \ + _rf_id, _rf_step, _no_160, _cores, _cdb, _cfg, _name) \ { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \ .name = _name, .mac_type = _mac_type, .rf_type = _rf_type, .rf_step = _rf_step, \ - .bw_limit = _bw_limit, .cores = _cores, .rf_id = _rf_id, \ + .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \ .mac_step = _mac_step, .cdb = _cdb, .jacket = IWL_CFG_ANY } #define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \ _IWL_DEV_INFO(_device, _subdevice, IWL_CFG_ANY, IWL_CFG_ANY, \ - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \ - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY, \ - _cfg, _name) + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \ + IWL_CFG_ANY, _cfg, _name) VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { #if IS_ENABLED(CONFIG_IWLMVM) @@ -589,6 +588,8 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { IWL_DEV_INFO(0x7A70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), IWL_DEV_INFO(0x7AF0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), IWL_DEV_INFO(0x7AF0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), + IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), + IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name), IWL_DEV_INFO(0x7E40, 0x1691, iwl_cfg_ma, iwl_ax411_killer_1690s_name), @@ -725,66 +726,66 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9560_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9270_160_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9270_name), _IWL_DEV_INFO(0x271B, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9162_160_name), _IWL_DEV_INFO(0x271B, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9162_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9260_160_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9260_name), /* Qu with Jf */ @@ -792,132 +793,132 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550i_name), /* Qu C step */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550i_name), /* QuZ */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550i_name), /* Qu with Hr */ @@ -925,189 +926,189 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_b0_hr1_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_b0_hr_b0, iwl_ax203_name), /* Qu C step */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_c0_hr1_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_c0_hr_b0, iwl_ax203_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_c0_hr_b0, iwl_ax201_name), /* QuZ */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_quz_a0_hr1_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_quz_a0_hr_b0, iwl_ax203_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_quz_a0_hr_b0, iwl_ax201_name), /* Ma */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_ma, iwl_ax201_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_ma, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_ma, iwl_ax231_name), /* So with Hr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax203_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, - 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax201_name), /* So-F with Hr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax203_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, - 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax201_name), /* So-F with Gf */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), /* SoF with JF2 */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), /* SoF with JF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), /* So with GF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), /* So with JF2 */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), /* So with JF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), #endif /* CONFIG_IWLMVM */ @@ -1116,13 +1117,13 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_bz, iwl_ax201_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_bz, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, @@ -1134,119 +1135,104 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_bz, iwl_wh_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_bz, iwl_ax201_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_bz, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_bz, iwl_fm_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_bz, iwl_wh_name), /* Ga (Gl) */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_320, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_gl, iwl_gl_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - 160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_320, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_gl, iwl_mtp_name), /* Sc */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_sc, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_sc, iwl_fm_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY, IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_sc, iwl_wh_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, - 160, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_sc, iwl_sp_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_sc2, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_sc2, iwl_fm_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY, IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_sc2, iwl_wh_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, - 160, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_sc2, iwl_sp_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_sc2f, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_sc2f, iwl_fm_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY, IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_sc2f, iwl_wh_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, - 160, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_sc2f, iwl_sp_name), /* Dr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_DR, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_dr, iwl_dr_name), /* Br */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BR, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_br, iwl_br_name), #endif /* CONFIG_IWLMLD */ }; @@ -1398,7 +1384,7 @@ out: VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info * iwl_pci_find_dev_info(u16 device, u16 subsystem_device, u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb, - u8 jacket, u8 rf_id, u8 bw_limit, u8 cores, u8 rf_step) + u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step) { int num_devices = ARRAY_SIZE(iwl_dev_info_table); int i; @@ -1441,15 +1427,8 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device, dev_info->rf_id != rf_id) continue; - /* - * Check that bw_limit have the same "boolean" value since - * IWL_SUBDEVICE_BW_LIM can only return a boolean value and - * dev_info->bw_limit encodes a non-boolean value. - * dev_info->bw_limit == IWL_CFG_BW_NO_LIM must be equal to - * !bw_limit to have a match. - */ - if (dev_info->bw_limit != IWL_CFG_BW_ANY && - (dev_info->bw_limit == IWL_CFG_BW_NO_LIM) == !!bw_limit) + if (dev_info->no_160 != (u8)IWL_CFG_ANY && + dev_info->no_160 != no_160) continue; if (dev_info->cores != (u8)IWL_CFG_ANY && @@ -1587,13 +1566,13 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) CSR_HW_RFID_IS_CDB(iwl_trans->hw_rf_id), CSR_HW_RFID_IS_JACKET(iwl_trans->hw_rf_id), IWL_SUBDEVICE_RF_ID(pdev->subsystem_device), - IWL_SUBDEVICE_BW_LIM(pdev->subsystem_device), + IWL_SUBDEVICE_NO_160(pdev->subsystem_device), IWL_SUBDEVICE_CORES(pdev->subsystem_device), CSR_HW_RFID_STEP(iwl_trans->hw_rf_id)); if (dev_info) { iwl_trans->cfg = dev_info->cfg; iwl_trans->name = dev_info->name; - iwl_trans->bw_limit = dev_info->bw_limit; + iwl_trans->no_160 = dev_info->no_160 == IWL_CFG_NO_160; } #if IS_ENABLED(CONFIG_IWLMVM) @@ -1759,11 +1738,27 @@ static int _iwl_pci_resume(struct device *device, bool restore) * Scratch value was altered, this means the device was powered off, we * need to reset it completely. * Note: MAC (bits 0:7) will be cleared upon suspend even with wowlan, - * so assume that any bits there mean that the device is usable. + * but not bits [15:8]. So if we have bits set in lower word, assume + * the device is alive. + * For older devices, just try silently to grab the NIC. */ - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ && - !iwl_read32(trans, CSR_FUNC_SCRATCH)) - device_was_powered_off = true; + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + if (!(iwl_read32(trans, CSR_FUNC_SCRATCH) & + CSR_FUNC_SCRATCH_POWER_OFF_MASK)) + device_was_powered_off = true; + } else { + /* + * bh are re-enabled by iwl_trans_pcie_release_nic_access, + * so re-enable them if _iwl_trans_pcie_grab_nic_access fails. + */ + local_bh_disable(); + if (_iwl_trans_pcie_grab_nic_access(trans, true)) { + iwl_trans_pcie_release_nic_access(trans); + } else { + device_was_powered_off = true; + local_bh_enable(); + } + } if (restore || device_was_powered_off) { trans->state = IWL_TRANS_NO_FW; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 45460f93d24a..114a9195ad7f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -558,10 +558,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans); void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions, struct device *dev); -bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans); -#define _iwl_trans_pcie_grab_nic_access(trans) \ +bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent); +#define _iwl_trans_pcie_grab_nic_access(trans, silent) \ __cond_lock(nic_access_nobh, \ - likely(__iwl_trans_pcie_grab_nic_access(trans))) + likely(__iwl_trans_pcie_grab_nic_access(trans, silent))) void iwl_trans_pcie_check_product_reset_status(struct pci_dev *pdev); void iwl_trans_pcie_check_product_reset_mode(struct pci_dev *pdev); @@ -1105,7 +1105,8 @@ void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs, u32 *val); bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans); -void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans); +void __releases(nic_access_nobh) +iwl_trans_pcie_release_nic_access(struct iwl_trans *trans); /* transport gen 1 exported functions */ void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index 3ece34e30d58..472f26f83ba8 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -147,8 +147,14 @@ static void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) return; if (trans->state >= IWL_TRANS_FW_STARTED && - trans_pcie->fw_reset_handshake) + trans_pcie->fw_reset_handshake) { + /* + * Reset handshake can dump firmware on timeout, but that + * should assume that the firmware is already dead. + */ + trans->state = IWL_TRANS_NO_FW; iwl_trans_pcie_fw_reset_handshake(trans); + } trans_pcie->is_down = true; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index c917ed4c19bc..102a6123bba0 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -2351,7 +2351,8 @@ void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode) struct iwl_trans_pcie_removal *removal; char _msg = 0, *msg = &_msg; - if (WARN_ON(mode < IWL_RESET_MODE_REMOVE_ONLY)) + if (WARN_ON(mode < IWL_RESET_MODE_REMOVE_ONLY || + mode == IWL_RESET_MODE_BACKOFF)) return; if (test_bit(STATUS_TRANS_DEAD, &trans->status)) @@ -2405,7 +2406,7 @@ EXPORT_SYMBOL(iwl_trans_pcie_reset); * This version doesn't disable BHs but rather assumes they're * already disabled. */ -bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) +bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent) { int ret; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -2457,6 +2458,11 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) if (unlikely(ret < 0)) { u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL); + if (silent) { + spin_unlock(&trans_pcie->reg_lock); + return false; + } + WARN_ONCE(1, "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", cntrl); @@ -2488,7 +2494,7 @@ bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) bool ret; local_bh_disable(); - ret = __iwl_trans_pcie_grab_nic_access(trans); + ret = __iwl_trans_pcie_grab_nic_access(trans, false); if (ret) { /* keep BHs disabled until iwl_trans_pcie_release_nic_access */ return ret; @@ -2497,7 +2503,8 @@ bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) return false; } -void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans) +void __releases(nic_access_nobh) +iwl_trans_pcie_release_nic_access(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -2524,6 +2531,7 @@ void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans) * scheduled on different CPUs (after we drop reg_lock). */ out: + __release(nic_access_nobh); spin_unlock_bh(&trans_pcie->reg_lock); } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index bb90bcfc6763..9fc4e98693eb 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -1021,7 +1021,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, * returned. This needs to be done only on NICs that have * apmg_wake_up_wa set (see above.) */ - if (!_iwl_trans_pcie_grab_nic_access(trans)) + if (!_iwl_trans_pcie_grab_nic_access(trans, false)) return -EIO; /* diff --git a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c index 7ef5e89c6af2..d0bda23c628a 100644 --- a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c +++ b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c @@ -2,7 +2,7 @@ /* * KUnit tests for the iwlwifi device info table * - * Copyright (C) 2023-2025 Intel Corporation + * Copyright (C) 2023-2024 Intel Corporation */ #include <kunit/test.h> #include <linux/pci.h> @@ -13,9 +13,9 @@ MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); static void iwl_pci_print_dev_info(const char *pfx, const struct iwl_dev_info *di) { - printk(KERN_DEBUG "%sdev=%.4x,subdev=%.4x,mac_type=%.4x,mac_step=%.4x,rf_type=%.4x,cdb=%d,jacket=%d,rf_id=%.2x,bw_limit=%d,cores=%.2x\n", + printk(KERN_DEBUG "%sdev=%.4x,subdev=%.4x,mac_type=%.4x,mac_step=%.4x,rf_type=%.4x,cdb=%d,jacket=%d,rf_id=%.2x,no_160=%d,cores=%.2x\n", pfx, di->device, di->subdevice, di->mac_type, di->mac_step, - di->rf_type, di->cdb, di->jacket, di->rf_id, di->bw_limit, + di->rf_type, di->cdb, di->jacket, di->rf_id, di->no_160, di->cores); } @@ -31,13 +31,8 @@ static void devinfo_table_order(struct kunit *test) di->mac_type, di->mac_step, di->rf_type, di->cdb, di->jacket, di->rf_id, - di->bw_limit != IWL_CFG_BW_NO_LIM, - di->cores, di->rf_step); - if (!ret) { - iwl_pci_print_dev_info("No entry found for: ", di); - KUNIT_FAIL(test, - "No entry found for entry at index %d\n", idx); - } else if (ret != di) { + di->no_160, di->cores, di->rf_step); + if (ret != di) { iwl_pci_print_dev_info("searched: ", di); iwl_pci_print_dev_info("found: ", ret); KUNIT_FAIL(test, diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 844af16ee551..35b4ec91979e 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -1011,6 +1011,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev) int i; mt76_worker_disable(&dev->tx_worker); + napi_disable(&dev->tx_napi); netif_napi_del(&dev->tx_napi); for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c index e61da76b2097..14b1f603fb62 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c @@ -1924,14 +1924,14 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy, mt7925_mcu_sta_mld_tlv(skb, info->vif, info->link_sta->sta); mt7925_mcu_sta_eht_mld_tlv(skb, info->vif, info->link_sta->sta); } - - mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->link_sta); } if (!info->enable) { mt7925_mcu_sta_remove_tlv(skb); mt76_connac_mcu_add_tlv(skb, STA_REC_MLD_OFF, sizeof(struct tlv)); + } else { + mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->link_sta); } return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true); diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c index eae93efa6150..82d1bf7edba2 100644 --- a/drivers/net/wireless/purelifi/plfxlc/mac.c +++ b/drivers/net/wireless/purelifi/plfxlc/mac.c @@ -102,7 +102,6 @@ int plfxlc_mac_init_hw(struct ieee80211_hw *hw) void plfxlc_mac_release(struct plfxlc_mac *mac) { plfxlc_chip_release(&mac->chip); - lockdep_assert_held(&mac->lock); } int plfxlc_op_start(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/ti/wl1251/tx.c b/drivers/net/wireless/ti/wl1251/tx.c index 474b603c121c..adb4840b0489 100644 --- a/drivers/net/wireless/ti/wl1251/tx.c +++ b/drivers/net/wireless/ti/wl1251/tx.c @@ -342,8 +342,10 @@ void wl1251_tx_work(struct work_struct *work) while ((skb = skb_dequeue(&wl->tx_queue))) { if (!woken_up) { ret = wl1251_ps_elp_wakeup(wl); - if (ret < 0) + if (ret < 0) { + skb_queue_head(&wl->tx_queue, skb); goto out; + } woken_up = true; } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index fc52d5c4c69b..5091e1fa4a0d 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -985,20 +985,27 @@ static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata, act = bpf_prog_run_xdp(prog, xdp); switch (act) { case XDP_TX: - get_page(pdata); xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) { + trace_xdp_exception(queue->info->netdev, prog, act); + break; + } + get_page(pdata); err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0); - if (unlikely(!err)) + if (unlikely(err <= 0)) { + if (err < 0) + trace_xdp_exception(queue->info->netdev, prog, act); xdp_return_frame_rx_napi(xdpf); - else if (unlikely(err < 0)) - trace_xdp_exception(queue->info->netdev, prog, act); + } break; case XDP_REDIRECT: get_page(pdata); err = xdp_do_redirect(queue->info->netdev, xdp, prog); *need_xdp_flush = true; - if (unlikely(err)) + if (unlikely(err)) { trace_xdp_exception(queue->info->netdev, prog, act); + xdp_return_buff(xdp); + } break; case XDP_PASS: case XDP_DROP: diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c index 2c092ec8c0a9..3b6d759bcdf2 100644 --- a/drivers/nvme/common/auth.c +++ b/drivers/nvme/common/auth.c @@ -242,7 +242,7 @@ struct nvme_dhchap_key *nvme_auth_transform_key( { const char *hmac_name; struct crypto_shash *key_tfm; - struct shash_desc *shash; + SHASH_DESC_ON_STACK(shash, key_tfm); struct nvme_dhchap_key *transformed_key; int ret, key_len; @@ -267,19 +267,11 @@ struct nvme_dhchap_key *nvme_auth_transform_key( if (IS_ERR(key_tfm)) return ERR_CAST(key_tfm); - shash = kmalloc(sizeof(struct shash_desc) + - crypto_shash_descsize(key_tfm), - GFP_KERNEL); - if (!shash) { - ret = -ENOMEM; - goto out_free_key; - } - key_len = crypto_shash_digestsize(key_tfm); transformed_key = nvme_auth_alloc_key(key_len, key->hash); if (!transformed_key) { ret = -ENOMEM; - goto out_free_shash; + goto out_free_key; } shash->tfm = key_tfm; @@ -299,15 +291,12 @@ struct nvme_dhchap_key *nvme_auth_transform_key( if (ret < 0) goto out_free_transformed_key; - kfree(shash); crypto_free_shash(key_tfm); return transformed_key; out_free_transformed_key: nvme_auth_free_key(transformed_key); -out_free_shash: - kfree(shash); out_free_key: crypto_free_shash(key_tfm); diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index d47dfa80fb95..4d64b6935bb9 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -102,6 +102,7 @@ config NVME_TCP_TLS depends on NVME_TCP select NET_HANDSHAKE select KEYS + select TLS help Enables TLS encryption for NVMe TCP using the netlink handshake API. diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c index 6115fef74c1e..f6ddbe553289 100644 --- a/drivers/nvme/host/auth.c +++ b/drivers/nvme/host/auth.c @@ -31,6 +31,7 @@ struct nvme_dhchap_queue_context { u32 s1; u32 s2; bool bi_directional; + bool authenticated; u16 transaction; u8 status; u8 dhgroup_id; @@ -682,6 +683,7 @@ static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap) static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context *chap) { nvme_auth_reset_dhchap(chap); + chap->authenticated = false; if (chap->shash_tfm) crypto_free_shash(chap->shash_tfm); if (chap->dh_tfm) @@ -930,12 +932,14 @@ static void nvme_queue_auth_work(struct work_struct *work) } if (!ret) { chap->error = 0; + chap->authenticated = true; if (ctrl->opts->concat && (ret = nvme_auth_secure_concat(ctrl, chap))) { dev_warn(ctrl->device, "%s: qid %d failed to enable secure concatenation\n", __func__, chap->qid); chap->error = ret; + chap->authenticated = false; } return; } @@ -1023,13 +1027,16 @@ static void nvme_ctrl_auth_work(struct work_struct *work) return; for (q = 1; q < ctrl->queue_count; q++) { - ret = nvme_auth_negotiate(ctrl, q); - if (ret) { - dev_warn(ctrl->device, - "qid %d: error %d setting up authentication\n", - q, ret); - break; - } + struct nvme_dhchap_queue_context *chap = + &ctrl->dhchap_ctxs[q]; + /* + * Skip re-authentication if the queue had + * not been authenticated initially. + */ + if (!chap->authenticated) + continue; + cancel_work_sync(&chap->auth_work); + queue_work(nvme_auth_wq, &chap->auth_work); } /* @@ -1037,7 +1044,13 @@ static void nvme_ctrl_auth_work(struct work_struct *work) * the controller terminates the connection. */ for (q = 1; q < ctrl->queue_count; q++) { - ret = nvme_auth_wait(ctrl, q); + struct nvme_dhchap_queue_context *chap = + &ctrl->dhchap_ctxs[q]; + if (!chap->authenticated) + continue; + flush_work(&chap->auth_work); + ret = chap->error; + nvme_auth_reset_dhchap(chap); if (ret) dev_warn(ctrl->device, "qid %d: authentication failed\n", q); @@ -1076,6 +1089,7 @@ int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) chap = &ctrl->dhchap_ctxs[i]; chap->qid = i; chap->ctrl = ctrl; + chap->authenticated = false; INIT_WORK(&chap->auth_work, nvme_queue_auth_work); } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index cc23035148b4..f69a232a000a 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -38,6 +38,8 @@ struct nvme_ns_info { u32 nsid; __le32 anagrpid; u8 pi_offset; + u16 endgid; + u64 runs; bool is_shared; bool is_readonly; bool is_ready; @@ -150,6 +152,8 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, unsigned nsid); static void nvme_update_keep_alive(struct nvme_ctrl *ctrl, struct nvme_command *cmd); +static int nvme_get_log_lsi(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, + u8 lsp, u8 csi, void *log, size_t size, u64 offset, u16 lsi); void nvme_queue_scan(struct nvme_ctrl *ctrl) { @@ -664,10 +668,11 @@ static void nvme_free_ns_head(struct kref *ref) struct nvme_ns_head *head = container_of(ref, struct nvme_ns_head, ref); - nvme_mpath_remove_disk(head); + nvme_mpath_put_disk(head); ida_free(&head->subsys->ns_ida, head->instance); cleanup_srcu_struct(&head->srcu); nvme_put_subsystem(head->subsys); + kfree(head->plids); kfree(head); } @@ -991,6 +996,18 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, if (req->cmd_flags & REQ_RAHEAD) dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; + if (op == nvme_cmd_write && ns->head->nr_plids) { + u16 write_stream = req->bio->bi_write_stream; + + if (WARN_ON_ONCE(write_stream > ns->head->nr_plids)) + return BLK_STS_INVAL; + + if (write_stream) { + dsmgmt |= ns->head->plids[write_stream - 1] << 16; + control |= NVME_RW_DTYPE_DPLCMT; + } + } + if (req->cmd_flags & REQ_ATOMIC && !nvme_valid_atomic_write(req)) return BLK_STS_INVAL; @@ -1157,7 +1174,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, req->cmd_flags &= ~REQ_FAILFAST_DRIVER; if (buffer && bufflen) { - ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); + ret = blk_rq_map_kern(req, buffer, bufflen, GFP_KERNEL); if (ret) goto out; } @@ -1609,6 +1626,7 @@ static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl, info->is_shared = id->nmic & NVME_NS_NMIC_SHARED; info->is_readonly = id->nsattr & NVME_NS_ATTR_RO; info->is_ready = true; + info->endgid = le16_to_cpu(id->endgid); if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) { dev_info(ctrl->device, "Ignoring bogus Namespace Identifiers\n"); @@ -1649,6 +1667,7 @@ static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl, info->is_ready = id->nstat & NVME_NSTAT_NRDY; info->is_rotational = id->nsfeat & NVME_NS_ROTATIONAL; info->no_vwc = id->nsfeat & NVME_NS_VWC_NOT_PRESENT; + info->endgid = le16_to_cpu(id->endgid); } kfree(id); return ret; @@ -1674,7 +1693,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid, int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, unsigned int dword11, void *buffer, size_t buflen, - u32 *result) + void *result) { return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer, buflen, result); @@ -1683,7 +1702,7 @@ EXPORT_SYMBOL_GPL(nvme_set_features); int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, unsigned int dword11, void *buffer, size_t buflen, - u32 *result) + void *result) { return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer, buflen, result); @@ -2059,7 +2078,21 @@ static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id, if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs; else - atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; + atomic_bs = (1 + ns->ctrl->awupf) * bs; + + /* + * Set subsystem atomic bs. + */ + if (ns->ctrl->subsys->atomic_bs) { + if (atomic_bs != ns->ctrl->subsys->atomic_bs) { + dev_err_ratelimited(ns->ctrl->device, + "%s: Inconsistent Atomic Write Size, Namespace will not be added: Subsystem=%d bytes, Controller/Namespace=%d bytes\n", + ns->disk ? ns->disk->disk_name : "?", + ns->ctrl->subsys->atomic_bs, + atomic_bs); + } + } else + ns->ctrl->subsys->atomic_bs = atomic_bs; nvme_update_atomic_write_disk_info(ns, id, lim, bs, atomic_bs); } @@ -2153,6 +2186,148 @@ static int nvme_update_ns_info_generic(struct nvme_ns *ns, return ret; } +static int nvme_query_fdp_granularity(struct nvme_ctrl *ctrl, + struct nvme_ns_info *info, u8 fdp_idx) +{ + struct nvme_fdp_config_log hdr, *h; + struct nvme_fdp_config_desc *desc; + size_t size = sizeof(hdr); + void *log, *end; + int i, n, ret; + + ret = nvme_get_log_lsi(ctrl, 0, NVME_LOG_FDP_CONFIGS, 0, + NVME_CSI_NVM, &hdr, size, 0, info->endgid); + if (ret) { + dev_warn(ctrl->device, + "FDP configs log header status:0x%x endgid:%d\n", ret, + info->endgid); + return ret; + } + + size = le32_to_cpu(hdr.sze); + if (size > PAGE_SIZE * MAX_ORDER_NR_PAGES) { + dev_warn(ctrl->device, "FDP config size too large:%zu\n", + size); + return 0; + } + + h = kvmalloc(size, GFP_KERNEL); + if (!h) + return -ENOMEM; + + ret = nvme_get_log_lsi(ctrl, 0, NVME_LOG_FDP_CONFIGS, 0, + NVME_CSI_NVM, h, size, 0, info->endgid); + if (ret) { + dev_warn(ctrl->device, + "FDP configs log status:0x%x endgid:%d\n", ret, + info->endgid); + goto out; + } + + n = le16_to_cpu(h->numfdpc) + 1; + if (fdp_idx > n) { + dev_warn(ctrl->device, "FDP index:%d out of range:%d\n", + fdp_idx, n); + /* Proceed without registering FDP streams */ + ret = 0; + goto out; + } + + log = h + 1; + desc = log; + end = log + size - sizeof(*h); + for (i = 0; i < fdp_idx; i++) { + log += le16_to_cpu(desc->dsze); + desc = log; + if (log >= end) { + dev_warn(ctrl->device, + "FDP invalid config descriptor list\n"); + ret = 0; + goto out; + } + } + + if (le32_to_cpu(desc->nrg) > 1) { + dev_warn(ctrl->device, "FDP NRG > 1 not supported\n"); + ret = 0; + goto out; + } + + info->runs = le64_to_cpu(desc->runs); +out: + kvfree(h); + return ret; +} + +static int nvme_query_fdp_info(struct nvme_ns *ns, struct nvme_ns_info *info) +{ + struct nvme_ns_head *head = ns->head; + struct nvme_ctrl *ctrl = ns->ctrl; + struct nvme_fdp_ruh_status *ruhs; + struct nvme_fdp_config fdp; + struct nvme_command c = {}; + size_t size; + int i, ret; + + /* + * The FDP configuration is static for the lifetime of the namespace, + * so return immediately if we've already registered this namespace's + * streams. + */ + if (head->nr_plids) + return 0; + + ret = nvme_get_features(ctrl, NVME_FEAT_FDP, info->endgid, NULL, 0, + &fdp); + if (ret) { + dev_warn(ctrl->device, "FDP get feature status:0x%x\n", ret); + return ret; + } + + if (!(fdp.flags & FDPCFG_FDPE)) + return 0; + + ret = nvme_query_fdp_granularity(ctrl, info, fdp.fdpcidx); + if (!info->runs) + return ret; + + size = struct_size(ruhs, ruhsd, S8_MAX - 1); + ruhs = kzalloc(size, GFP_KERNEL); + if (!ruhs) + return -ENOMEM; + + c.imr.opcode = nvme_cmd_io_mgmt_recv; + c.imr.nsid = cpu_to_le32(head->ns_id); + c.imr.mo = NVME_IO_MGMT_RECV_MO_RUHS; + c.imr.numd = cpu_to_le32(nvme_bytes_to_numd(size)); + ret = nvme_submit_sync_cmd(ns->queue, &c, ruhs, size); + if (ret) { + dev_warn(ctrl->device, "FDP io-mgmt status:0x%x\n", ret); + goto free; + } + + head->nr_plids = le16_to_cpu(ruhs->nruhsd); + if (!head->nr_plids) + goto free; + + head->plids = kcalloc(head->nr_plids, sizeof(*head->plids), + GFP_KERNEL); + if (!head->plids) { + dev_warn(ctrl->device, + "failed to allocate %u FDP placement IDs\n", + head->nr_plids); + head->nr_plids = 0; + ret = -ENOMEM; + goto free; + } + + for (i = 0; i < head->nr_plids; i++) + head->plids[i] = le16_to_cpu(ruhs->ruhsd[i].pid); +free: + kfree(ruhs); + return ret; +} + static int nvme_update_ns_info_block(struct nvme_ns *ns, struct nvme_ns_info *info) { @@ -2190,6 +2365,12 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, goto out; } + if (ns->ctrl->ctratt & NVME_CTRL_ATTR_FDPS) { + ret = nvme_query_fdp_info(ns, info); + if (ret < 0) + goto out; + } + lim = queue_limits_start_update(ns->disk->queue); memflags = blk_mq_freeze_queue(ns->disk->queue); @@ -2201,6 +2382,17 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, nvme_set_chunk_sectors(ns, id, &lim); if (!nvme_update_disk_info(ns, id, &lim)) capacity = 0; + + /* + * Validate the max atomic write size fits within the subsystem's + * atomic write capabilities. + */ + if (lim.atomic_write_hw_max > ns->ctrl->subsys->atomic_bs) { + blk_mq_unfreeze_queue(ns->disk->queue, memflags); + ret = -ENXIO; + goto out; + } + nvme_config_discard(ns, &lim); if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && ns->head->ids.csi == NVME_CSI_ZNS) @@ -2223,6 +2415,12 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, if (!nvme_init_integrity(ns->head, &lim, info)) capacity = 0; + lim.max_write_streams = ns->head->nr_plids; + if (lim.max_write_streams) + lim.write_stream_granularity = min(info->runs, U32_MAX); + else + lim.write_stream_granularity = 0; + ret = queue_limits_commit_update(ns->disk->queue, &lim); if (ret) { blk_mq_unfreeze_queue(ns->disk->queue, memflags); @@ -2326,6 +2524,8 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info) ns->head->disk->flags |= GENHD_FL_HIDDEN; else nvme_init_integrity(ns->head, &lim, info); + lim.max_write_streams = ns_lim->max_write_streams; + lim.write_stream_granularity = ns_lim->write_stream_granularity; ret = queue_limits_commit_update(ns->head->disk->queue, &lim); set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk)); @@ -3031,7 +3231,6 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) kfree(subsys); return -EINVAL; } - subsys->awupf = le16_to_cpu(id->awupf); nvme_mpath_default_iopolicy(subsys); subsys->dev.class = &nvme_subsys_class; @@ -3084,8 +3283,8 @@ out_unlock: return ret; } -int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, - void *log, size_t size, u64 offset) +static int nvme_get_log_lsi(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, + u8 lsp, u8 csi, void *log, size_t size, u64 offset, u16 lsi) { struct nvme_command c = { }; u32 dwlen = nvme_bytes_to_numd(size); @@ -3099,10 +3298,18 @@ int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset)); c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset)); c.get_log_page.csi = csi; + c.get_log_page.lsi = cpu_to_le16(lsi); return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); } +int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, + void *log, size_t size, u64 offset) +{ + return nvme_get_log_lsi(ctrl, nsid, log_page, lsp, csi, log, size, + offset, 0); +} + static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi, struct nvme_effects_log **log) { @@ -3441,7 +3648,7 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl) dev_pm_qos_expose_latency_tolerance(ctrl->device); else if (!ctrl->apst_enabled && prev_apst_enabled) dev_pm_qos_hide_latency_tolerance(ctrl->device); - + ctrl->awupf = le16_to_cpu(id->awupf); out_free: kfree(id); return ret; @@ -3560,7 +3767,7 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl, */ if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h)) continue; - if (!list_empty(&h->list) && nvme_tryget_ns_head(h)) + if (nvme_tryget_ns_head(h)) return h; } @@ -3804,7 +4011,8 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info) } } else { ret = -EINVAL; - if (!info->is_shared || !head->shared) { + if ((!info->is_shared || !head->shared) && + !list_empty(&head->list)) { dev_err(ctrl->device, "Duplicate unshared namespace %d\n", info->nsid); @@ -4008,7 +4216,8 @@ static void nvme_ns_remove(struct nvme_ns *ns) mutex_lock(&ns->ctrl->subsys->lock); list_del_rcu(&ns->siblings); if (list_empty(&ns->head->list)) { - list_del_init(&ns->head->entry); + if (!nvme_mpath_queue_if_no_path(ns->head)) + list_del_init(&ns->head->entry); last_path = true; } mutex_unlock(&ns->ctrl->subsys->lock); @@ -4029,7 +4238,7 @@ static void nvme_ns_remove(struct nvme_ns *ns) synchronize_srcu(&ns->ctrl->srcu); if (last_path) - nvme_mpath_shutdown_disk(ns->head); + nvme_mpath_remove_disk(ns->head); nvme_put_ns(ns); } @@ -4295,6 +4504,15 @@ static void nvme_scan_work(struct work_struct *work) nvme_scan_ns_sequential(ctrl); } mutex_unlock(&ctrl->scan_lock); + + /* Requeue if we have missed AENs */ + if (test_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) + nvme_queue_scan(ctrl); +#ifdef CONFIG_NVME_MULTIPATH + else if (ctrl->ana_log_buf) + /* Re-read the ANA log page to not miss updates */ + queue_work(nvme_wq, &ctrl->ana_work); +#endif } /* @@ -4484,7 +4702,8 @@ static void nvme_fw_act_work(struct work_struct *work) msleep(100); } - if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) + if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING) || + !nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) return; nvme_unquiesce_io_queues(ctrl); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 2257c3c96dd2..fdafa3e9e66f 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1410,9 +1410,8 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) } static void -nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) +nvme_fc_xmt_ls_rsp_free(struct nvmefc_ls_rcv_op *lsop) { - struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private; struct nvme_fc_rport *rport = lsop->rport; struct nvme_fc_lport *lport = rport->lport; unsigned long flags; @@ -1434,6 +1433,14 @@ nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) } static void +nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) +{ + struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private; + + nvme_fc_xmt_ls_rsp_free(lsop); +} + +static void nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop) { struct nvme_fc_rport *rport = lsop->rport; @@ -1450,7 +1457,7 @@ nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop) dev_warn(lport->dev, "LLDD rejected LS RSP xmt: LS %d status %d\n", w0->ls_cmd, ret); - nvme_fc_xmt_ls_rsp_done(lsop->lsrsp); + nvme_fc_xmt_ls_rsp_free(lsop); return; } } diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 89be5911b25d..878ea8b1a0ac 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -10,10 +10,61 @@ #include "nvme.h" bool multipath = true; -module_param(multipath, bool, 0444); +static bool multipath_always_on; + +static int multipath_param_set(const char *val, const struct kernel_param *kp) +{ + int ret; + bool *arg = kp->arg; + + ret = param_set_bool(val, kp); + if (ret) + return ret; + + if (multipath_always_on && !*arg) { + pr_err("Can't disable multipath when multipath_always_on is configured.\n"); + *arg = true; + return -EINVAL; + } + + return 0; +} + +static const struct kernel_param_ops multipath_param_ops = { + .set = multipath_param_set, + .get = param_get_bool, +}; + +module_param_cb(multipath, &multipath_param_ops, &multipath, 0444); MODULE_PARM_DESC(multipath, "turn on native support for multiple controllers per subsystem"); +static int multipath_always_on_set(const char *val, + const struct kernel_param *kp) +{ + int ret; + bool *arg = kp->arg; + + ret = param_set_bool(val, kp); + if (ret < 0) + return ret; + + if (*arg) + multipath = true; + + return 0; +} + +static const struct kernel_param_ops multipath_always_on_ops = { + .set = multipath_always_on_set, + .get = param_get_bool, +}; + +module_param_cb(multipath_always_on, &multipath_always_on_ops, + &multipath_always_on, 0444); +MODULE_PARM_DESC(multipath_always_on, + "create multipath node always except for private namespace with non-unique nsid; note that this also implicitly enables native multipath support"); + static const char *nvme_iopolicy_names[] = { [NVME_IOPOLICY_NUMA] = "numa", [NVME_IOPOLICY_RR] = "round-robin", @@ -427,7 +478,7 @@ static bool nvme_available_path(struct nvme_ns_head *head) struct nvme_ns *ns; if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) - return NULL; + return false; list_for_each_entry_srcu(ns, &head->list, siblings, srcu_read_lock_held(&head->srcu)) { @@ -442,7 +493,17 @@ static bool nvme_available_path(struct nvme_ns_head *head) break; } } - return false; + + /* + * If "head->delayed_removal_secs" is configured (i.e., non-zero), do + * not immediately fail I/O. Instead, requeue the I/O for the configured + * duration, anticipating that if there's a transient link failure then + * it may recover within this time window. This parameter is exported to + * userspace via sysfs, and its default value is zero. It is internally + * mapped to NVME_NSHEAD_QUEUE_IF_NO_PATH. When delayed_removal_secs is + * non-zero, this flag is set to true. When zero, the flag is cleared. + */ + return nvme_mpath_queue_if_no_path(head); } static void nvme_ns_head_submit_bio(struct bio *bio) @@ -617,6 +678,40 @@ static void nvme_requeue_work(struct work_struct *work) } } +static void nvme_remove_head(struct nvme_ns_head *head) +{ + if (test_and_clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { + /* + * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared + * to allow multipath to fail all I/O. + */ + kblockd_schedule_work(&head->requeue_work); + + nvme_cdev_del(&head->cdev, &head->cdev_device); + synchronize_srcu(&head->srcu); + del_gendisk(head->disk); + nvme_put_ns_head(head); + } +} + +static void nvme_remove_head_work(struct work_struct *work) +{ + struct nvme_ns_head *head = container_of(to_delayed_work(work), + struct nvme_ns_head, remove_work); + bool remove = false; + + mutex_lock(&head->subsys->lock); + if (list_empty(&head->list)) { + list_del_init(&head->entry); + remove = true; + } + mutex_unlock(&head->subsys->lock); + if (remove) + nvme_remove_head(head); + + module_put(THIS_MODULE); +} + int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) { struct queue_limits lim; @@ -626,19 +721,31 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) spin_lock_init(&head->requeue_lock); INIT_WORK(&head->requeue_work, nvme_requeue_work); INIT_WORK(&head->partition_scan_work, nvme_partition_scan_work); + INIT_DELAYED_WORK(&head->remove_work, nvme_remove_head_work); + head->delayed_removal_secs = 0; /* - * Add a multipath node if the subsystems supports multiple controllers. - * We also do this for private namespaces as the namespace sharing flag - * could change after a rescan. + * If "multipath_always_on" is enabled, a multipath node is added + * regardless of whether the disk is single/multi ported, and whether + * the namespace is shared or private. If "multipath_always_on" is not + * enabled, a multipath node is added only if the subsystem supports + * multiple controllers and the "multipath" option is configured. In + * either case, for private namespaces, we ensure that the NSID is + * unique. */ - if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || - !nvme_is_unique_nsid(ctrl, head) || !multipath) + if (!multipath_always_on) { + if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || + !multipath) + return 0; + } + + if (!nvme_is_unique_nsid(ctrl, head)) return 0; blk_set_stacking_limits(&lim); lim.dma_alignment = 3; - lim.features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | BLK_FEAT_POLL; + lim.features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | + BLK_FEAT_POLL | BLK_FEAT_ATOMIC_WRITES; if (head->ids.csi == NVME_CSI_ZNS) lim.features |= BLK_FEAT_ZONED; @@ -659,6 +766,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) set_bit(GD_SUPPRESS_PART_SCAN, &head->disk->state); sprintf(head->disk->disk_name, "nvme%dn%d", ctrl->subsys->instance, head->instance); + nvme_tryget_ns_head(head); return 0; } @@ -1015,6 +1123,49 @@ static ssize_t numa_nodes_show(struct device *dev, struct device_attribute *attr } DEVICE_ATTR_RO(numa_nodes); +static ssize_t delayed_removal_secs_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + struct nvme_ns_head *head = disk->private_data; + int ret; + + mutex_lock(&head->subsys->lock); + ret = sysfs_emit(buf, "%u\n", head->delayed_removal_secs); + mutex_unlock(&head->subsys->lock); + return ret; +} + +static ssize_t delayed_removal_secs_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct gendisk *disk = dev_to_disk(dev); + struct nvme_ns_head *head = disk->private_data; + unsigned int sec; + int ret; + + ret = kstrtouint(buf, 0, &sec); + if (ret < 0) + return ret; + + mutex_lock(&head->subsys->lock); + head->delayed_removal_secs = sec; + if (sec) + set_bit(NVME_NSHEAD_QUEUE_IF_NO_PATH, &head->flags); + else + clear_bit(NVME_NSHEAD_QUEUE_IF_NO_PATH, &head->flags); + mutex_unlock(&head->subsys->lock); + /* + * Ensure that update to NVME_NSHEAD_QUEUE_IF_NO_PATH is seen + * by its reader. + */ + synchronize_srcu(&head->srcu); + + return count; +} + +DEVICE_ATTR_RW(delayed_removal_secs); + static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *desc, void *data) { @@ -1051,6 +1202,13 @@ void nvme_mpath_add_sysfs_link(struct nvme_ns_head *head) list_for_each_entry_rcu(ns, &head->list, siblings) { /* + * Ensure that ns path disk node is already added otherwise we + * may get invalid kobj name for target + */ + if (!test_bit(GD_ADDED, &ns->disk->state)) + continue; + + /* * Avoid creating link if it already exists for the given path. * When path ana state transitions from optimized to non- * optimized or vice-versa, the nvme_mpath_set_live() is @@ -1065,13 +1223,6 @@ void nvme_mpath_add_sysfs_link(struct nvme_ns_head *head) if (test_and_set_bit(NVME_NS_SYSFS_ATTR_LINK, &ns->flags)) continue; - /* - * Ensure that ns path disk node is already added otherwise we - * may get invalid kobj name for target - */ - if (!test_bit(GD_ADDED, &ns->disk->state)) - continue; - target = disk_to_dev(ns->disk); /* * Create sysfs link from head gendisk kobject @kobj to the @@ -1136,23 +1287,43 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid) #endif } -void nvme_mpath_shutdown_disk(struct nvme_ns_head *head) +void nvme_mpath_remove_disk(struct nvme_ns_head *head) { - if (!head->disk) - return; - if (test_and_clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { - nvme_cdev_del(&head->cdev, &head->cdev_device); + bool remove = false; + + mutex_lock(&head->subsys->lock); + /* + * We are called when all paths have been removed, and at that point + * head->list is expected to be empty. However, nvme_remove_ns() and + * nvme_init_ns_head() can run concurrently and so if head->delayed_ + * removal_secs is configured, it is possible that by the time we reach + * this point, head->list may no longer be empty. Therefore, we recheck + * head->list here. If it is no longer empty then we skip enqueuing the + * delayed head removal work. + */ + if (!list_empty(&head->list)) + goto out; + + if (head->delayed_removal_secs) { /* - * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared - * to allow multipath to fail all I/O. + * Ensure that no one could remove this module while the head + * remove work is pending. */ - synchronize_srcu(&head->srcu); - kblockd_schedule_work(&head->requeue_work); - del_gendisk(head->disk); + if (!try_module_get(THIS_MODULE)) + goto out; + queue_delayed_work(nvme_wq, &head->remove_work, + head->delayed_removal_secs * HZ); + } else { + list_del_init(&head->entry); + remove = true; } +out: + mutex_unlock(&head->subsys->lock); + if (remove) + nvme_remove_head(head); } -void nvme_mpath_remove_disk(struct nvme_ns_head *head) +void nvme_mpath_put_disk(struct nvme_ns_head *head) { if (!head->disk) return; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 51e078642127..ad0c1f834f09 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -410,6 +410,7 @@ struct nvme_ctrl { enum nvme_ctrl_type cntrltype; enum nvme_dctype dctype; + u16 awupf; /* 0's based value. */ }; static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl) @@ -442,11 +443,11 @@ struct nvme_subsystem { u8 cmic; enum nvme_subsys_type subtype; u16 vendor_id; - u16 awupf; /* 0's based awupf value. */ struct ida ns_ida; #ifdef CONFIG_NVME_MULTIPATH enum nvme_iopolicy iopolicy; #endif + u32 atomic_bs; }; /* @@ -496,6 +497,9 @@ struct nvme_ns_head { struct device cdev_device; struct gendisk *disk; + + u16 nr_plids; + u16 *plids; #ifdef CONFIG_NVME_MULTIPATH struct bio_list requeue_list; spinlock_t requeue_lock; @@ -503,7 +507,10 @@ struct nvme_ns_head { struct work_struct partition_scan_work; struct mutex lock; unsigned long flags; -#define NVME_NSHEAD_DISK_LIVE 0 + struct delayed_work remove_work; + unsigned int delayed_removal_secs; +#define NVME_NSHEAD_DISK_LIVE 0 +#define NVME_NSHEAD_QUEUE_IF_NO_PATH 1 struct nvme_ns __rcu *current_path[]; #endif }; @@ -896,10 +903,10 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, int qid, nvme_submit_flags_t flags); int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, unsigned int dword11, void *buffer, size_t buflen, - u32 *result); + void *result); int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, unsigned int dword11, void *buffer, size_t buflen, - u32 *result); + void *result); int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); int nvme_reset_ctrl(struct nvme_ctrl *ctrl); @@ -960,7 +967,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); void nvme_mpath_add_sysfs_link(struct nvme_ns_head *ns); void nvme_mpath_remove_sysfs_link(struct nvme_ns *ns); void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid); -void nvme_mpath_remove_disk(struct nvme_ns_head *head); +void nvme_mpath_put_disk(struct nvme_ns_head *head); int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl); void nvme_mpath_update(struct nvme_ctrl *ctrl); @@ -969,7 +976,7 @@ void nvme_mpath_stop(struct nvme_ctrl *ctrl); bool nvme_mpath_clear_current_path(struct nvme_ns *ns); void nvme_mpath_revalidate_paths(struct nvme_ns *ns); void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl); -void nvme_mpath_shutdown_disk(struct nvme_ns_head *head); +void nvme_mpath_remove_disk(struct nvme_ns_head *head); void nvme_mpath_start_request(struct request *rq); void nvme_mpath_end_request(struct request *rq); @@ -986,12 +993,19 @@ extern struct device_attribute dev_attr_ana_grpid; extern struct device_attribute dev_attr_ana_state; extern struct device_attribute dev_attr_queue_depth; extern struct device_attribute dev_attr_numa_nodes; +extern struct device_attribute dev_attr_delayed_removal_secs; extern struct device_attribute subsys_attr_iopolicy; static inline bool nvme_disk_is_ns_head(struct gendisk *disk) { return disk->fops == &nvme_ns_head_ops; } +static inline bool nvme_mpath_queue_if_no_path(struct nvme_ns_head *head) +{ + if (test_bit(NVME_NSHEAD_QUEUE_IF_NO_PATH, &head->flags)) + return true; + return false; +} #else #define multipath false static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) @@ -1012,7 +1026,7 @@ static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, static inline void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid) { } -static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) +static inline void nvme_mpath_put_disk(struct nvme_ns_head *head) { } static inline void nvme_mpath_add_sysfs_link(struct nvme_ns *ns) @@ -1031,7 +1045,7 @@ static inline void nvme_mpath_revalidate_paths(struct nvme_ns *ns) static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) { } -static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head *head) +static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) { } static inline void nvme_trace_bio_complete(struct request *req) @@ -1079,6 +1093,10 @@ static inline bool nvme_disk_is_ns_head(struct gendisk *disk) { return false; } +static inline bool nvme_mpath_queue_if_no_path(struct nvme_ns_head *head) +{ + return false; +} #endif /* CONFIG_NVME_MULTIPATH */ int nvme_ns_get_unique_id(struct nvme_ns *ns, u8 id[16], diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index b178d52eac1b..e0bfe04a2bc2 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -18,6 +18,7 @@ #include <linux/mm.h> #include <linux/module.h> #include <linux/mutex.h> +#include <linux/nodemask.h> #include <linux/once.h> #include <linux/pci.h> #include <linux/suspend.h> @@ -34,16 +35,31 @@ #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) -#define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc)) +/* Optimisation for I/Os between 4k and 128k */ +#define NVME_SMALL_POOL_SIZE 256 /* * These can be higher, but we need to ensure that any command doesn't * require an sg allocation that needs more than a page of data. */ #define NVME_MAX_KB_SZ 8192 -#define NVME_MAX_SEGS 128 -#define NVME_MAX_META_SEGS 15 -#define NVME_MAX_NR_ALLOCATIONS 5 +#define NVME_MAX_NR_DESCRIPTORS 5 + +/* + * For data SGLs we support a single descriptors worth of SGL entries, but for + * now we also limit it to avoid an allocation larger than PAGE_SIZE for the + * scatterlist. + */ +#define NVME_MAX_SEGS \ + min(NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc), \ + (PAGE_SIZE / sizeof(struct scatterlist))) + +/* + * For metadata SGLs, only the small descriptor is supported, and the first + * entry is the segment descriptor, which for the data pointer sits in the SQE. + */ +#define NVME_MAX_META_SEGS \ + ((NVME_SMALL_POOL_SIZE / sizeof(struct nvme_sgl_desc)) - 1) static int use_threaded_interrupts; module_param(use_threaded_interrupts, int, 0444); @@ -112,6 +128,11 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); static void nvme_delete_io_queues(struct nvme_dev *dev); static void nvme_update_attrs(struct nvme_dev *dev); +struct nvme_descriptor_pools { + struct dma_pool *large; + struct dma_pool *small; +}; + /* * Represents an NVM Express device. Each nvme_dev is a PCI function. */ @@ -121,8 +142,6 @@ struct nvme_dev { struct blk_mq_tag_set admin_tagset; u32 __iomem *dbs; struct device *dev; - struct dma_pool *prp_page_pool; - struct dma_pool *prp_small_pool; unsigned online_queues; unsigned max_qid; unsigned io_queues[HCTX_MAX_TYPES]; @@ -162,6 +181,7 @@ struct nvme_dev { unsigned int nr_allocated_queues; unsigned int nr_write_queues; unsigned int nr_poll_queues; + struct nvme_descriptor_pools descriptor_pools[]; }; static int io_queue_depth_set(const char *val, const struct kernel_param *kp) @@ -191,6 +211,7 @@ static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) */ struct nvme_queue { struct nvme_dev *dev; + struct nvme_descriptor_pools descriptor_pools; spinlock_t sq_lock; void *sq_cmds; /* only used for poll queues: */ @@ -219,30 +240,30 @@ struct nvme_queue { struct completion delete_done; }; -union nvme_descriptor { - struct nvme_sgl_desc *sg_list; - __le64 *prp_list; +/* bits for iod->flags */ +enum nvme_iod_flags { + /* this command has been aborted by the timeout handler */ + IOD_ABORTED = 1U << 0, + + /* uses the small descriptor pool */ + IOD_SMALL_DESCRIPTOR = 1U << 1, }; /* * The nvme_iod describes the data in an I/O. - * - * The sg pointer contains the list of PRP/SGL chunk allocations in addition - * to the actual struct scatterlist. */ struct nvme_iod { struct nvme_request req; struct nvme_command cmd; - bool aborted; - s8 nr_allocations; /* PRP list pool allocations. 0 means small - pool in use */ + u8 flags; + u8 nr_descriptors; unsigned int dma_len; /* length of single DMA segment mapping */ dma_addr_t first_dma; dma_addr_t meta_dma; struct sg_table sgt; struct sg_table meta_sgt; - union nvme_descriptor meta_list; - union nvme_descriptor list[NVME_MAX_NR_ALLOCATIONS]; + struct nvme_sgl_desc *meta_descriptor; + void *descriptors[NVME_MAX_NR_DESCRIPTORS]; }; static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev) @@ -390,37 +411,85 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db, * as it only leads to a small amount of wasted memory for the lifetime of * the I/O. */ -static int nvme_pci_npages_prp(void) +static __always_inline int nvme_pci_npages_prp(void) { unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE; unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE); return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8); } -static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, - unsigned int hctx_idx) +static struct nvme_descriptor_pools * +nvme_setup_descriptor_pools(struct nvme_dev *dev, unsigned numa_node) { - struct nvme_dev *dev = to_nvme_dev(data); - struct nvme_queue *nvmeq = &dev->queues[0]; + struct nvme_descriptor_pools *pools = &dev->descriptor_pools[numa_node]; + size_t small_align = NVME_SMALL_POOL_SIZE; - WARN_ON(hctx_idx != 0); - WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); + if (pools->small) + return pools; /* already initialized */ - hctx->driver_data = nvmeq; - return 0; + pools->large = dma_pool_create_node("nvme descriptor page", dev->dev, + NVME_CTRL_PAGE_SIZE, NVME_CTRL_PAGE_SIZE, 0, numa_node); + if (!pools->large) + return ERR_PTR(-ENOMEM); + + if (dev->ctrl.quirks & NVME_QUIRK_DMAPOOL_ALIGN_512) + small_align = 512; + + pools->small = dma_pool_create_node("nvme descriptor small", dev->dev, + NVME_SMALL_POOL_SIZE, small_align, 0, numa_node); + if (!pools->small) { + dma_pool_destroy(pools->large); + pools->large = NULL; + return ERR_PTR(-ENOMEM); + } + + return pools; } -static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, - unsigned int hctx_idx) +static void nvme_release_descriptor_pools(struct nvme_dev *dev) +{ + unsigned i; + + for (i = 0; i < nr_node_ids; i++) { + struct nvme_descriptor_pools *pools = &dev->descriptor_pools[i]; + + dma_pool_destroy(pools->large); + dma_pool_destroy(pools->small); + } +} + +static int nvme_init_hctx_common(struct blk_mq_hw_ctx *hctx, void *data, + unsigned qid) { struct nvme_dev *dev = to_nvme_dev(data); - struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; + struct nvme_queue *nvmeq = &dev->queues[qid]; + struct nvme_descriptor_pools *pools; + struct blk_mq_tags *tags; - WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); + tags = qid ? dev->tagset.tags[qid - 1] : dev->admin_tagset.tags[0]; + WARN_ON(tags != hctx->tags); + pools = nvme_setup_descriptor_pools(dev, hctx->numa_node); + if (IS_ERR(pools)) + return PTR_ERR(pools); + + nvmeq->descriptor_pools = *pools; hctx->driver_data = nvmeq; return 0; } +static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, + unsigned int hctx_idx) +{ + WARN_ON(hctx_idx != 0); + return nvme_init_hctx_common(hctx, data, 0); +} + +static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, + unsigned int hctx_idx) +{ + return nvme_init_hctx_common(hctx, data, hctx_idx + 1); +} + static int nvme_pci_init_request(struct blk_mq_tag_set *set, struct request *req, unsigned int hctx_idx, unsigned int numa_node) @@ -537,23 +606,39 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req, return true; } -static void nvme_free_prps(struct nvme_dev *dev, struct request *req) +static inline struct dma_pool *nvme_dma_pool(struct nvme_queue *nvmeq, + struct nvme_iod *iod) +{ + if (iod->flags & IOD_SMALL_DESCRIPTOR) + return nvmeq->descriptor_pools.small; + return nvmeq->descriptor_pools.large; +} + +static void nvme_free_descriptors(struct nvme_queue *nvmeq, struct request *req) { const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; struct nvme_iod *iod = blk_mq_rq_to_pdu(req); dma_addr_t dma_addr = iod->first_dma; int i; - for (i = 0; i < iod->nr_allocations; i++) { - __le64 *prp_list = iod->list[i].prp_list; + if (iod->nr_descriptors == 1) { + dma_pool_free(nvme_dma_pool(nvmeq, iod), iod->descriptors[0], + dma_addr); + return; + } + + for (i = 0; i < iod->nr_descriptors; i++) { + __le64 *prp_list = iod->descriptors[i]; dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]); - dma_pool_free(dev->prp_page_pool, prp_list, dma_addr); + dma_pool_free(nvmeq->descriptor_pools.large, prp_list, + dma_addr); dma_addr = next_dma_addr; } } -static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) +static void nvme_unmap_data(struct nvme_dev *dev, struct nvme_queue *nvmeq, + struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); @@ -566,15 +651,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) WARN_ON_ONCE(!iod->sgt.nents); dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); - - if (iod->nr_allocations == 0) - dma_pool_free(dev->prp_small_pool, iod->list[0].sg_list, - iod->first_dma); - else if (iod->nr_allocations == 1) - dma_pool_free(dev->prp_page_pool, iod->list[0].sg_list, - iod->first_dma); - else - nvme_free_prps(dev, req); + nvme_free_descriptors(nvmeq, req); mempool_free(iod->sgt.sgl, dev->iod_mempool); } @@ -592,11 +669,10 @@ static void nvme_print_sgl(struct scatterlist *sgl, int nents) } } -static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, +static blk_status_t nvme_pci_setup_prps(struct nvme_queue *nvmeq, struct request *req, struct nvme_rw_command *cmnd) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - struct dma_pool *pool; int length = blk_rq_payload_bytes(req); struct scatterlist *sg = iod->sgt.sgl; int dma_len = sg_dma_len(sg); @@ -604,7 +680,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1); __le64 *prp_list; dma_addr_t prp_dma; - int nprps, i; + int i; length -= (NVME_CTRL_PAGE_SIZE - offset); if (length <= 0) { @@ -626,30 +702,26 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, goto done; } - nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE); - if (nprps <= (256 / 8)) { - pool = dev->prp_small_pool; - iod->nr_allocations = 0; - } else { - pool = dev->prp_page_pool; - iod->nr_allocations = 1; - } + if (DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE) <= + NVME_SMALL_POOL_SIZE / sizeof(__le64)) + iod->flags |= IOD_SMALL_DESCRIPTOR; - prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); - if (!prp_list) { - iod->nr_allocations = -1; + prp_list = dma_pool_alloc(nvme_dma_pool(nvmeq, iod), GFP_ATOMIC, + &prp_dma); + if (!prp_list) return BLK_STS_RESOURCE; - } - iod->list[0].prp_list = prp_list; + iod->descriptors[iod->nr_descriptors++] = prp_list; iod->first_dma = prp_dma; i = 0; for (;;) { if (i == NVME_CTRL_PAGE_SIZE >> 3) { __le64 *old_prp_list = prp_list; - prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); + + prp_list = dma_pool_alloc(nvmeq->descriptor_pools.large, + GFP_ATOMIC, &prp_dma); if (!prp_list) goto free_prps; - iod->list[iod->nr_allocations++].prp_list = prp_list; + iod->descriptors[iod->nr_descriptors++] = prp_list; prp_list[0] = old_prp_list[i - 1]; old_prp_list[i - 1] = cpu_to_le64(prp_dma); i = 1; @@ -673,7 +745,7 @@ done: cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); return BLK_STS_OK; free_prps: - nvme_free_prps(dev, req); + nvme_free_descriptors(nvmeq, req); return BLK_STS_RESOURCE; bad_sgl: WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents), @@ -698,11 +770,10 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; } -static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, +static blk_status_t nvme_pci_setup_sgls(struct nvme_queue *nvmeq, struct request *req, struct nvme_rw_command *cmd) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - struct dma_pool *pool; struct nvme_sgl_desc *sg_list; struct scatterlist *sg = iod->sgt.sgl; unsigned int entries = iod->sgt.nents; @@ -717,21 +788,14 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, return BLK_STS_OK; } - if (entries <= (256 / sizeof(struct nvme_sgl_desc))) { - pool = dev->prp_small_pool; - iod->nr_allocations = 0; - } else { - pool = dev->prp_page_pool; - iod->nr_allocations = 1; - } + if (entries <= NVME_SMALL_POOL_SIZE / sizeof(*sg_list)) + iod->flags |= IOD_SMALL_DESCRIPTOR; - sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); - if (!sg_list) { - iod->nr_allocations = -1; + sg_list = dma_pool_alloc(nvme_dma_pool(nvmeq, iod), GFP_ATOMIC, + &sgl_dma); + if (!sg_list) return BLK_STS_RESOURCE; - } - - iod->list[0].sg_list = sg_list; + iod->descriptors[iod->nr_descriptors++] = sg_list; iod->first_dma = sgl_dma; nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); @@ -785,12 +849,12 @@ static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev, static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, struct nvme_command *cmnd) { + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; struct nvme_iod *iod = blk_mq_rq_to_pdu(req); blk_status_t ret = BLK_STS_RESOURCE; int rc; if (blk_rq_nr_phys_segments(req) == 1) { - struct nvme_queue *nvmeq = req->mq_hctx->driver_data; struct bio_vec bv = req_bvec(req); if (!is_pci_p2pdma_page(bv.bv_page)) { @@ -825,9 +889,9 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, } if (nvme_pci_use_sgls(dev, req, iod->sgt.nents)) - ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw); + ret = nvme_pci_setup_sgls(nvmeq, req, &cmnd->rw); else - ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); + ret = nvme_pci_setup_prps(nvmeq, req, &cmnd->rw); if (ret != BLK_STS_OK) goto out_unmap_sg; return BLK_STS_OK; @@ -842,6 +906,7 @@ out_free_sg: static blk_status_t nvme_pci_setup_meta_sgls(struct nvme_dev *dev, struct request *req) { + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_rw_command *cmnd = &iod->cmd.rw; struct nvme_sgl_desc *sg_list; @@ -865,12 +930,13 @@ static blk_status_t nvme_pci_setup_meta_sgls(struct nvme_dev *dev, if (rc) goto out_free_sg; - sg_list = dma_pool_alloc(dev->prp_small_pool, GFP_ATOMIC, &sgl_dma); + sg_list = dma_pool_alloc(nvmeq->descriptor_pools.small, GFP_ATOMIC, + &sgl_dma); if (!sg_list) goto out_unmap_sg; entries = iod->meta_sgt.nents; - iod->meta_list.sg_list = sg_list; + iod->meta_descriptor = sg_list; iod->meta_dma = sgl_dma; cmnd->flags = NVME_CMD_SGL_METASEG; @@ -912,7 +978,10 @@ static blk_status_t nvme_pci_setup_meta_mptr(struct nvme_dev *dev, static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req) { - if (nvme_pci_metadata_use_sgls(dev, req)) + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + + if ((iod->cmd.common.flags & NVME_CMD_SGL_METABUF) && + nvme_pci_metadata_use_sgls(dev, req)) return nvme_pci_setup_meta_sgls(dev, req); return nvme_pci_setup_meta_mptr(dev, req); } @@ -922,8 +991,8 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) struct nvme_iod *iod = blk_mq_rq_to_pdu(req); blk_status_t ret; - iod->aborted = false; - iod->nr_allocations = -1; + iod->flags = 0; + iod->nr_descriptors = 0; iod->sgt.nents = 0; iod->meta_sgt.nents = 0; @@ -947,7 +1016,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) return BLK_STS_OK; out_unmap_data: if (blk_rq_nr_phys_segments(req)) - nvme_unmap_data(dev, req); + nvme_unmap_data(dev, req->mq_hctx->driver_data, req); out_free_cmd: nvme_cleanup_cmd(req); return ret; @@ -1037,6 +1106,7 @@ static void nvme_queue_rqs(struct rq_list *rqlist) } static __always_inline void nvme_unmap_metadata(struct nvme_dev *dev, + struct nvme_queue *nvmeq, struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); @@ -1048,8 +1118,8 @@ static __always_inline void nvme_unmap_metadata(struct nvme_dev *dev, return; } - dma_pool_free(dev->prp_small_pool, iod->meta_list.sg_list, - iod->meta_dma); + dma_pool_free(nvmeq->descriptor_pools.small, iod->meta_descriptor, + iod->meta_dma); dma_unmap_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req), 0); mempool_free(iod->meta_sgt.sgl, dev->iod_meta_mempool); } @@ -1060,10 +1130,10 @@ static __always_inline void nvme_pci_unmap_rq(struct request *req) struct nvme_dev *dev = nvmeq->dev; if (blk_integrity_rq(req)) - nvme_unmap_metadata(dev, req); + nvme_unmap_metadata(dev, nvmeq, req); if (blk_rq_nr_phys_segments(req)) - nvme_unmap_data(dev, req); + nvme_unmap_data(dev, nvmeq, req); } static void nvme_pci_complete_rq(struct request *req) @@ -1202,7 +1272,9 @@ static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); + spin_lock(&nvmeq->cq_poll_lock); nvme_poll_cq(nvmeq, NULL); + spin_unlock(&nvmeq->cq_poll_lock); enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); } @@ -1488,7 +1560,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) * returned to the driver, or if this is the admin queue. */ opcode = nvme_req(req)->cmd->common.opcode; - if (!nvmeq->qid || iod->aborted) { + if (!nvmeq->qid || (iod->flags & IOD_ABORTED)) { dev_warn(dev->ctrl.device, "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, reset controller\n", req->tag, nvme_cid(req), opcode, @@ -1501,7 +1573,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) atomic_inc(&dev->ctrl.abort_limit); return BLK_EH_RESET_TIMER; } - iod->aborted = true; + iod->flags |= IOD_ABORTED; cmd.abort.opcode = nvme_admin_abort_cmd; cmd.abort.cid = nvme_cid(req); @@ -2840,35 +2912,6 @@ static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown) return 0; } -static int nvme_setup_prp_pools(struct nvme_dev *dev) -{ - size_t small_align = 256; - - dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, - NVME_CTRL_PAGE_SIZE, - NVME_CTRL_PAGE_SIZE, 0); - if (!dev->prp_page_pool) - return -ENOMEM; - - if (dev->ctrl.quirks & NVME_QUIRK_DMAPOOL_ALIGN_512) - small_align = 512; - - /* Optimisation for I/Os between 4k and 128k */ - dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, - 256, small_align, 0); - if (!dev->prp_small_pool) { - dma_pool_destroy(dev->prp_page_pool); - return -ENOMEM; - } - return 0; -} - -static void nvme_release_prp_pools(struct nvme_dev *dev) -{ - dma_pool_destroy(dev->prp_page_pool); - dma_pool_destroy(dev->prp_small_pool); -} - static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev) { size_t meta_size = sizeof(struct scatterlist) * (NVME_MAX_META_SEGS + 1); @@ -3183,7 +3226,8 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev, struct nvme_dev *dev; int ret = -ENOMEM; - dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); + dev = kzalloc_node(struct_size(dev, descriptor_pools, nr_node_ids), + GFP_KERNEL, node); if (!dev) return ERR_PTR(-ENOMEM); INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); @@ -3258,13 +3302,9 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (result) goto out_uninit_ctrl; - result = nvme_setup_prp_pools(dev); - if (result) - goto out_dev_unmap; - result = nvme_pci_alloc_iod_mempool(dev); if (result) - goto out_release_prp_pools; + goto out_dev_unmap; dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); @@ -3340,8 +3380,6 @@ out_disable: out_release_iod_mempool: mempool_destroy(dev->iod_mempool); mempool_destroy(dev->iod_meta_mempool); -out_release_prp_pools: - nvme_release_prp_pools(dev); out_dev_unmap: nvme_dev_unmap(dev); out_uninit_ctrl: @@ -3406,7 +3444,7 @@ static void nvme_remove(struct pci_dev *pdev) nvme_free_queues(dev, 0); mempool_destroy(dev->iod_mempool); mempool_destroy(dev->iod_meta_mempool); - nvme_release_prp_pools(dev); + nvme_release_descriptor_pools(dev); nvme_dev_unmap(dev); nvme_uninit_ctrl(&dev->ctrl); } @@ -3575,7 +3613,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) dev_info(dev->ctrl.device, "restart after slot reset\n"); pci_restore_state(pdev); - if (!nvme_try_sched_reset(&dev->ctrl)) + if (nvme_try_sched_reset(&dev->ctrl)) nvme_unquiesce_io_queues(&dev->ctrl); return PCI_ERS_RESULT_RECOVERED; } @@ -3623,6 +3661,9 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */ .driver_data = NVME_QUIRK_DMAPOOL_ALIGN_512, }, + { PCI_DEVICE(0x126f, 0x1001), /* Silicon Motion generic */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS | + NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS | NVME_QUIRK_BOGUS_NID, }, @@ -3646,6 +3687,9 @@ static const struct pci_device_id nvme_id_table[] = { NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x15b7, 0x5008), /* Sandisk SN530 */ .driver_data = NVME_QUIRK_BROKEN_MSI }, + { PCI_DEVICE(0x15b7, 0x5009), /* Sandisk SN550 */ + .driver_data = NVME_QUIRK_BROKEN_MSI | + NVME_QUIRK_NO_DEEPEST_PS }, { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ @@ -3731,6 +3775,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, + { PCI_DEVICE(0x025e, 0xf1ac), /* SOLIDIGM P44 pro SSDPFKKW020X7 */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */ @@ -3799,9 +3845,7 @@ static int __init nvme_init(void) BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); - BUILD_BUG_ON(NVME_MAX_SEGS > SGES_PER_PAGE); - BUILD_BUG_ON(sizeof(struct scatterlist) * NVME_MAX_SEGS > PAGE_SIZE); - BUILD_BUG_ON(nvme_pci_npages_prp() > NVME_MAX_NR_ALLOCATIONS); + BUILD_BUG_ON(nvme_pci_npages_prp() > NVME_MAX_NR_DESCRIPTORS); return pci_register_driver(&nvme_driver); } diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c index 6d31226f7a4f..29430949ce2f 100644 --- a/drivers/nvme/host/sysfs.c +++ b/drivers/nvme/host/sysfs.c @@ -260,6 +260,7 @@ static struct attribute *nvme_ns_attrs[] = { &dev_attr_ana_state.attr, &dev_attr_queue_depth.attr, &dev_attr_numa_nodes.attr, + &dev_attr_delayed_removal_secs.attr, #endif &dev_attr_io_passthru_err_log_enabled.attr, NULL, @@ -296,6 +297,12 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj, if (nvme_disk_is_ns_head(dev_to_disk(dev))) return 0; } + if (a == &dev_attr_delayed_removal_secs.attr) { + struct gendisk *disk = dev_to_disk(dev); + + if (!nvme_disk_is_ns_head(disk)) + return 0; + } #endif return a->mode; } @@ -306,13 +313,41 @@ static const struct attribute_group nvme_ns_attr_group = { }; #ifdef CONFIG_NVME_MULTIPATH +/* + * NOTE: The dummy attribute does not appear in sysfs. It exists solely to allow + * control over the visibility of the multipath sysfs node. Without at least one + * attribute defined in nvme_ns_mpath_attrs[], the sysfs implementation does not + * invoke the multipath_sysfs_group_visible() method. As a result, we would not + * be able to control the visibility of the multipath sysfs node. + */ +static struct attribute dummy_attr = { + .name = "dummy", +}; + static struct attribute *nvme_ns_mpath_attrs[] = { + &dummy_attr, NULL, }; +static bool multipath_sysfs_group_visible(struct kobject *kobj) +{ + struct device *dev = container_of(kobj, struct device, kobj); + + return nvme_disk_is_ns_head(dev_to_disk(dev)); +} + +static bool multipath_sysfs_attr_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + return false; +} + +DEFINE_SYSFS_GROUP_VISIBLE(multipath_sysfs) + const struct attribute_group nvme_ns_mpath_attr_group = { .name = "multipath", .attrs = nvme_ns_mpath_attrs, + .is_visible = SYSFS_GROUP_VISIBLE(multipath_sysfs), }; #endif diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 26c459f0198d..853bc67d045c 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -403,7 +403,7 @@ static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) } static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, - bool sync, bool last) + bool last) { struct nvme_tcp_queue *queue = req->queue; bool empty; @@ -417,7 +417,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, * are on the same cpu, so we don't introduce contention. */ if (queue->io_cpu == raw_smp_processor_id() && - sync && empty && mutex_trylock(&queue->send_mutex)) { + empty && mutex_trylock(&queue->send_mutex)) { nvme_tcp_send_all(queue); mutex_unlock(&queue->send_mutex); } @@ -770,7 +770,9 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, req->ttag = pdu->ttag; nvme_tcp_setup_h2c_data_pdu(req); - nvme_tcp_queue_request(req, false, true); + + llist_add(&req->lentry, &queue->req_list); + queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); return 0; } @@ -1803,6 +1805,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid, ret = PTR_ERR(sock_file); goto err_destroy_mutex; } + + sk_net_refcnt_upgrade(queue->sock->sk); nvme_tcp_reclassify_socket(queue->sock); /* Single syn retry */ @@ -1944,7 +1948,7 @@ static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue) cancel_work_sync(&queue->io_work); } -static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) +static void nvme_tcp_stop_queue_nowait(struct nvme_ctrl *nctrl, int qid) { struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); struct nvme_tcp_queue *queue = &ctrl->queues[qid]; @@ -1963,6 +1967,31 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) mutex_unlock(&queue->queue_lock); } +static void nvme_tcp_wait_queue(struct nvme_ctrl *nctrl, int qid) +{ + struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); + struct nvme_tcp_queue *queue = &ctrl->queues[qid]; + int timeout = 100; + + while (timeout > 0) { + if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags) || + !sk_wmem_alloc_get(queue->sock->sk)) + return; + msleep(2); + timeout -= 2; + } + dev_warn(nctrl->device, + "qid %d: timeout draining sock wmem allocation expired\n", + qid); +} + +static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) +{ + nvme_tcp_stop_queue_nowait(nctrl, qid); + nvme_tcp_wait_queue(nctrl, qid); +} + + static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue) { write_lock_bh(&queue->sock->sk->sk_callback_lock); @@ -2030,7 +2059,9 @@ static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl) int i; for (i = 1; i < ctrl->queue_count; i++) - nvme_tcp_stop_queue(ctrl, i); + nvme_tcp_stop_queue_nowait(ctrl, i); + for (i = 1; i < ctrl->queue_count; i++) + nvme_tcp_wait_queue(ctrl, i); } static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl, @@ -2356,7 +2387,7 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new) if (ret) return ret; - if (ctrl->opts && ctrl->opts->concat && !ctrl->tls_pskid) { + if (ctrl->opts->concat && !ctrl->tls_pskid) { /* See comments for nvme_tcp_key_revoke_needed() */ dev_dbg(ctrl->device, "restart admin queue for secure concatenation\n"); nvme_stop_keep_alive(ctrl); @@ -2608,7 +2639,7 @@ static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg) ctrl->async_req.curr_bio = NULL; ctrl->async_req.data_len = 0; - nvme_tcp_queue_request(&ctrl->async_req, true, true); + nvme_tcp_queue_request(&ctrl->async_req, true); } static void nvme_tcp_complete_timed_out(struct request *rq) @@ -2760,7 +2791,7 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx, nvme_start_request(rq); - nvme_tcp_queue_request(req, true, bd->last); + nvme_tcp_queue_request(req, bd->last); return BLK_STS_OK; } diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig index fb7446d6d682..4c253b433bf7 100644 --- a/drivers/nvme/target/Kconfig +++ b/drivers/nvme/target/Kconfig @@ -98,6 +98,7 @@ config NVME_TARGET_TCP_TLS bool "NVMe over Fabrics TCP target TLS encryption support" depends on NVME_TARGET_TCP select NET_HANDSHAKE + select TLS help Enables TLS encryption for the NVMe TCP target using the netlink handshake API. diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index acc138bbf8f2..c7317299078d 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -63,14 +63,9 @@ static void nvmet_execute_create_sq(struct nvmet_req *req) if (status != NVME_SC_SUCCESS) goto complete; - /* - * Note: The NVMe specification allows multiple SQs to use the same CQ. - * However, the target code does not really support that. So for now, - * prevent this and fail the command if sqid and cqid are different. - */ - if (!cqid || cqid != sqid) { - pr_err("SQ %u: Unsupported CQID %u\n", sqid, cqid); - status = NVME_SC_CQ_INVALID | NVME_STATUS_DNR; + status = nvmet_check_io_cqid(ctrl, cqid, false); + if (status != NVME_SC_SUCCESS) { + pr_err("SQ %u: Invalid CQID %u\n", sqid, cqid); goto complete; } @@ -79,7 +74,7 @@ static void nvmet_execute_create_sq(struct nvmet_req *req) goto complete; } - status = ctrl->ops->create_sq(ctrl, sqid, sq_flags, qsize, prp1); + status = ctrl->ops->create_sq(ctrl, sqid, cqid, sq_flags, qsize, prp1); complete: nvmet_req_complete(req, status); @@ -96,14 +91,15 @@ static void nvmet_execute_delete_cq(struct nvmet_req *req) goto complete; } - if (!cqid) { - status = NVME_SC_QID_INVALID | NVME_STATUS_DNR; + status = nvmet_check_io_cqid(ctrl, cqid, false); + if (status != NVME_SC_SUCCESS) goto complete; - } - status = nvmet_check_cqid(ctrl, cqid); - if (status != NVME_SC_SUCCESS) + if (!ctrl->cqs[cqid] || nvmet_cq_in_use(ctrl->cqs[cqid])) { + /* Some SQs are still using this CQ */ + status = NVME_SC_QID_INVALID | NVME_STATUS_DNR; goto complete; + } status = ctrl->ops->delete_cq(ctrl, cqid); @@ -127,12 +123,7 @@ static void nvmet_execute_create_cq(struct nvmet_req *req) goto complete; } - if (!cqid) { - status = NVME_SC_QID_INVALID | NVME_STATUS_DNR; - goto complete; - } - - status = nvmet_check_cqid(ctrl, cqid); + status = nvmet_check_io_cqid(ctrl, cqid, true); if (status != NVME_SC_SUCCESS) goto complete; diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c index 0b0645ac5df4..b340380f3892 100644 --- a/drivers/nvme/target/auth.c +++ b/drivers/nvme/target/auth.c @@ -240,7 +240,7 @@ void nvmet_auth_sq_free(struct nvmet_sq *sq) { cancel_delayed_work(&sq->auth_expired_work); #ifdef CONFIG_NVME_TARGET_TCP_TLS - sq->tls_key = 0; + sq->tls_key = NULL; #endif kfree(sq->dhchap_c1); sq->dhchap_c1 = NULL; @@ -280,9 +280,12 @@ void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) bool nvmet_check_auth_status(struct nvmet_req *req) { - if (req->sq->ctrl->host_key && - !req->sq->authenticated) - return false; + if (req->sq->ctrl->host_key) { + if (req->sq->qid > 0) + return true; + if (!req->sq->authenticated) + return false; + } return true; } @@ -290,7 +293,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, unsigned int shash_len) { struct crypto_shash *shash_tfm; - struct shash_desc *shash; + SHASH_DESC_ON_STACK(shash, shash_tfm); struct nvmet_ctrl *ctrl = req->sq->ctrl; const char *hash_name; u8 *challenge = req->sq->dhchap_c1; @@ -342,19 +345,13 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, req->sq->dhchap_c1, challenge, shash_len); if (ret) - goto out_free_challenge; + goto out; } pr_debug("ctrl %d qid %d host response seq %u transaction %d\n", ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1, req->sq->dhchap_tid); - shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm), - GFP_KERNEL); - if (!shash) { - ret = -ENOMEM; - goto out_free_challenge; - } shash->tfm = shash_tfm; ret = crypto_shash_init(shash); if (ret) @@ -389,8 +386,6 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, goto out; ret = crypto_shash_final(shash, response); out: - kfree(shash); -out_free_challenge: if (challenge != req->sq->dhchap_c1) kfree(challenge); out_free_response: @@ -600,13 +595,12 @@ void nvmet_auth_insert_psk(struct nvmet_sq *sq) pr_warn("%s: ctrl %d qid %d failed to refresh key, error %ld\n", __func__, sq->ctrl->cntlid, sq->qid, PTR_ERR(tls_key)); tls_key = NULL; - kfree_sensitive(tls_psk); } if (sq->ctrl->tls_key) key_put(sq->ctrl->tls_key); sq->ctrl->tls_key = tls_key; #endif - + kfree_sensitive(tls_psk); out_free_digest: kfree_sensitive(digest); out_free_psk: diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 71f8d06998d6..db7b17d1094e 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -324,6 +324,9 @@ int nvmet_enable_port(struct nvmet_port *port) lockdep_assert_held(&nvmet_config_sem); + if (port->disc_addr.trtype == NVMF_TRTYPE_MAX) + return -EINVAL; + ops = nvmet_transports[port->disc_addr.trtype]; if (!ops) { up_write(&nvmet_config_sem); @@ -810,11 +813,43 @@ void nvmet_req_complete(struct nvmet_req *req, u16 status) } EXPORT_SYMBOL_GPL(nvmet_req_complete); +void nvmet_cq_init(struct nvmet_cq *cq) +{ + refcount_set(&cq->ref, 1); +} +EXPORT_SYMBOL_GPL(nvmet_cq_init); + +bool nvmet_cq_get(struct nvmet_cq *cq) +{ + return refcount_inc_not_zero(&cq->ref); +} +EXPORT_SYMBOL_GPL(nvmet_cq_get); + +void nvmet_cq_put(struct nvmet_cq *cq) +{ + if (refcount_dec_and_test(&cq->ref)) + nvmet_cq_destroy(cq); +} +EXPORT_SYMBOL_GPL(nvmet_cq_put); + void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, u16 size) { cq->qid = qid; cq->size = size; + + ctrl->cqs[qid] = cq; +} + +void nvmet_cq_destroy(struct nvmet_cq *cq) +{ + struct nvmet_ctrl *ctrl = cq->ctrl; + + if (ctrl) { + ctrl->cqs[cq->qid] = NULL; + nvmet_ctrl_put(cq->ctrl); + cq->ctrl = NULL; + } } void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, @@ -834,37 +869,47 @@ static void nvmet_confirm_sq(struct percpu_ref *ref) complete(&sq->confirm_done); } -u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid) +u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create) { - if (!ctrl->sqs) + if (!ctrl->cqs) return NVME_SC_INTERNAL | NVME_STATUS_DNR; if (cqid > ctrl->subsys->max_qid) return NVME_SC_QID_INVALID | NVME_STATUS_DNR; - /* - * Note: For PCI controllers, the NVMe specifications allows multiple - * SQs to share a single CQ. However, we do not support this yet, so - * check that there is no SQ defined for a CQ. If one exist, then the - * CQ ID is invalid for creation as well as when the CQ is being - * deleted (as that would mean that the SQ was not deleted before the - * CQ). - */ - if (ctrl->sqs[cqid]) + if ((create && ctrl->cqs[cqid]) || (!create && !ctrl->cqs[cqid])) return NVME_SC_QID_INVALID | NVME_STATUS_DNR; return NVME_SC_SUCCESS; } +u16 nvmet_check_io_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create) +{ + if (!cqid) + return NVME_SC_QID_INVALID | NVME_STATUS_DNR; + return nvmet_check_cqid(ctrl, cqid, create); +} + +bool nvmet_cq_in_use(struct nvmet_cq *cq) +{ + return refcount_read(&cq->ref) > 1; +} +EXPORT_SYMBOL_GPL(nvmet_cq_in_use); + u16 nvmet_cq_create(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, u16 size) { u16 status; - status = nvmet_check_cqid(ctrl, qid); + status = nvmet_check_cqid(ctrl, qid, true); if (status != NVME_SC_SUCCESS) return status; + if (!kref_get_unless_zero(&ctrl->ref)) + return NVME_SC_INTERNAL | NVME_STATUS_DNR; + cq->ctrl = ctrl; + + nvmet_cq_init(cq); nvmet_cq_setup(ctrl, cq, qid, size); return NVME_SC_SUCCESS; @@ -888,7 +933,7 @@ u16 nvmet_check_sqid(struct nvmet_ctrl *ctrl, u16 sqid, } u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, - u16 sqid, u16 size) + struct nvmet_cq *cq, u16 sqid, u16 size) { u16 status; int ret; @@ -900,7 +945,7 @@ u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, if (status != NVME_SC_SUCCESS) return status; - ret = nvmet_sq_init(sq); + ret = nvmet_sq_init(sq, cq); if (ret) { status = NVME_SC_INTERNAL | NVME_STATUS_DNR; goto ctrl_put; @@ -932,6 +977,7 @@ void nvmet_sq_destroy(struct nvmet_sq *sq) wait_for_completion(&sq->free_done); percpu_ref_exit(&sq->ref); nvmet_auth_sq_free(sq); + nvmet_cq_put(sq->cq); /* * we must reference the ctrl again after waiting for inflight IO @@ -964,18 +1010,23 @@ static void nvmet_sq_free(struct percpu_ref *ref) complete(&sq->free_done); } -int nvmet_sq_init(struct nvmet_sq *sq) +int nvmet_sq_init(struct nvmet_sq *sq, struct nvmet_cq *cq) { int ret; + if (!nvmet_cq_get(cq)) + return -EINVAL; + ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL); if (ret) { pr_err("percpu_ref init failed!\n"); + nvmet_cq_put(cq); return ret; } init_completion(&sq->free_done); init_completion(&sq->confirm_done); nvmet_auth_sq_init(sq); + sq->cq = cq; return 0; } @@ -1105,13 +1156,13 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req) return ret; } -bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, - struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops) +bool nvmet_req_init(struct nvmet_req *req, struct nvmet_sq *sq, + const struct nvmet_fabrics_ops *ops) { u8 flags = req->cmd->common.flags; u16 status; - req->cq = cq; + req->cq = sq->cq; req->sq = sq; req->ops = ops; req->sg = NULL; @@ -1609,12 +1660,17 @@ struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args) if (!ctrl->sqs) goto out_free_changed_ns_list; + ctrl->cqs = kcalloc(subsys->max_qid + 1, sizeof(struct nvmet_cq *), + GFP_KERNEL); + if (!ctrl->cqs) + goto out_free_sqs; + ret = ida_alloc_range(&cntlid_ida, subsys->cntlid_min, subsys->cntlid_max, GFP_KERNEL); if (ret < 0) { args->status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR; - goto out_free_sqs; + goto out_free_cqs; } ctrl->cntlid = ret; @@ -1673,6 +1729,8 @@ init_pr_fail: mutex_unlock(&subsys->lock); nvmet_stop_keep_alive_timer(ctrl); ida_free(&cntlid_ida, ctrl->cntlid); +out_free_cqs: + kfree(ctrl->cqs); out_free_sqs: kfree(ctrl->sqs); out_free_changed_ns_list: @@ -1709,6 +1767,7 @@ static void nvmet_ctrl_free(struct kref *ref) nvmet_async_events_free(ctrl); kfree(ctrl->sqs); + kfree(ctrl->cqs); kfree(ctrl->changed_ns_list); kfree(ctrl); diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index df7207640506..c06f3e04296c 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c @@ -119,7 +119,7 @@ static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr, memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE); memcpy(e->traddr, traddr, NVMF_TRADDR_SIZE); memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE); - strncpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE); + strscpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE); } /* diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index f012bdf89850..7b8d8b397802 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -208,6 +208,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) return NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR; } + kref_get(&ctrl->ref); + old = cmpxchg(&req->cq->ctrl, NULL, ctrl); + if (old) { + pr_warn("queue already connected!\n"); + req->error_loc = offsetof(struct nvmf_connect_command, opcode); + return NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR; + } + /* note: convert queue size from 0's-based value to 1's-based value */ nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1); nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1); @@ -239,8 +247,8 @@ static u32 nvmet_connect_result(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq) bool needs_auth = nvmet_has_auth(ctrl, sq); key_serial_t keyid = nvmet_queue_tls_keyid(sq); - /* Do not authenticate I/O queues for secure concatenation */ - if (ctrl->concat && sq->qid) + /* Do not authenticate I/O queues */ + if (sq->qid) needs_auth = false; if (keyid) diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 7318b736d414..254537b93e63 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -816,7 +816,8 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue); - ret = nvmet_sq_init(&queue->nvme_sq); + nvmet_cq_init(&queue->nvme_cq); + ret = nvmet_sq_init(&queue->nvme_sq, &queue->nvme_cq); if (ret) goto out_fail_iodlist; @@ -826,6 +827,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, return queue; out_fail_iodlist: + nvmet_cq_put(&queue->nvme_cq); nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue); destroy_workqueue(queue->work_q); out_free_queue: @@ -934,6 +936,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) flush_workqueue(queue->work_q); nvmet_sq_destroy(&queue->nvme_sq); + nvmet_cq_put(&queue->nvme_cq); nvmet_fc_tgt_q_put(queue); } @@ -995,16 +998,6 @@ nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport) return kref_get_unless_zero(&hostport->ref); } -static void -nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport) -{ - /* if LLDD not implemented, leave as NULL */ - if (!hostport || !hostport->hosthandle) - return; - - nvmet_fc_hostport_put(hostport); -} - static struct nvmet_fc_hostport * nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) { @@ -1028,33 +1021,24 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) struct nvmet_fc_hostport *newhost, *match = NULL; unsigned long flags; + /* + * Caller holds a reference on tgtport. + */ + /* if LLDD not implemented, leave as NULL */ if (!hosthandle) return NULL; - /* - * take reference for what will be the newly allocated hostport if - * we end up using a new allocation - */ - if (!nvmet_fc_tgtport_get(tgtport)) - return ERR_PTR(-EINVAL); - spin_lock_irqsave(&tgtport->lock, flags); match = nvmet_fc_match_hostport(tgtport, hosthandle); spin_unlock_irqrestore(&tgtport->lock, flags); - if (match) { - /* no new allocation - release reference */ - nvmet_fc_tgtport_put(tgtport); + if (match) return match; - } newhost = kzalloc(sizeof(*newhost), GFP_KERNEL); - if (!newhost) { - /* no new allocation - release reference */ - nvmet_fc_tgtport_put(tgtport); + if (!newhost) return ERR_PTR(-ENOMEM); - } spin_lock_irqsave(&tgtport->lock, flags); match = nvmet_fc_match_hostport(tgtport, hosthandle); @@ -1063,6 +1047,7 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) kfree(newhost); newhost = match; } else { + nvmet_fc_tgtport_get(tgtport); newhost->tgtport = tgtport; newhost->hosthandle = hosthandle; INIT_LIST_HEAD(&newhost->host_list); @@ -1076,20 +1061,14 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) } static void -nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) -{ - nvmet_fc_delete_target_assoc(assoc); - nvmet_fc_tgt_a_put(assoc); -} - -static void nvmet_fc_delete_assoc_work(struct work_struct *work) { struct nvmet_fc_tgt_assoc *assoc = container_of(work, struct nvmet_fc_tgt_assoc, del_work); struct nvmet_fc_tgtport *tgtport = assoc->tgtport; - nvmet_fc_delete_assoc(assoc); + nvmet_fc_delete_target_assoc(assoc); + nvmet_fc_tgt_a_put(assoc); nvmet_fc_tgtport_put(tgtport); } @@ -1097,7 +1076,8 @@ static void nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) { nvmet_fc_tgtport_get(assoc->tgtport); - queue_work(nvmet_wq, &assoc->del_work); + if (!queue_work(nvmet_wq, &assoc->del_work)) + nvmet_fc_tgtport_put(assoc->tgtport); } static bool @@ -1143,6 +1123,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) goto out_ida; assoc->tgtport = tgtport; + nvmet_fc_tgtport_get(tgtport); assoc->a_id = idx; INIT_LIST_HEAD(&assoc->a_list); kref_init(&assoc->ref); @@ -1190,7 +1171,7 @@ nvmet_fc_target_assoc_free(struct kref *ref) /* Send Disconnect now that all i/o has completed */ nvmet_fc_xmt_disconnect_assoc(assoc); - nvmet_fc_free_hostport(assoc->hostport); + nvmet_fc_hostport_put(assoc->hostport); spin_lock_irqsave(&tgtport->lock, flags); oldls = assoc->rcv_disconn; spin_unlock_irqrestore(&tgtport->lock, flags); @@ -1244,6 +1225,8 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) dev_info(tgtport->dev, "{%d:%d} Association deleted\n", tgtport->fc_target_port.port_num, assoc->a_id); + + nvmet_fc_tgtport_put(tgtport); } static struct nvmet_fc_tgt_assoc * @@ -1274,6 +1257,7 @@ nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport, { lockdep_assert_held(&nvmet_fc_tgtlock); + nvmet_fc_tgtport_get(tgtport); pe->tgtport = tgtport; tgtport->pe = pe; @@ -1293,8 +1277,10 @@ nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe) unsigned long flags; spin_lock_irqsave(&nvmet_fc_tgtlock, flags); - if (pe->tgtport) + if (pe->tgtport) { + nvmet_fc_tgtport_put(pe->tgtport); pe->tgtport->pe = NULL; + } list_del(&pe->pe_list); spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); } @@ -1312,8 +1298,10 @@ nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport) spin_lock_irqsave(&nvmet_fc_tgtlock, flags); pe = tgtport->pe; - if (pe) + if (pe) { + nvmet_fc_tgtport_put(pe->tgtport); pe->tgtport = NULL; + } tgtport->pe = NULL; spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); } @@ -1336,6 +1324,9 @@ nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport) list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) { if (tgtport->fc_target_port.node_name == pe->node_name && tgtport->fc_target_port.port_name == pe->port_name) { + if (!nvmet_fc_tgtport_get(tgtport)) + continue; + WARN_ON(pe->tgtport); tgtport->pe = pe; pe->tgtport = tgtport; @@ -1455,11 +1446,6 @@ nvmet_fc_free_tgtport(struct kref *ref) struct nvmet_fc_tgtport *tgtport = container_of(ref, struct nvmet_fc_tgtport, ref); struct device *dev = tgtport->dev; - unsigned long flags; - - spin_lock_irqsave(&nvmet_fc_tgtlock, flags); - list_del(&tgtport->tgt_list); - spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); nvmet_fc_free_ls_iodlist(tgtport); @@ -1605,6 +1591,39 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); } +static void +nvmet_fc_free_pending_reqs(struct nvmet_fc_tgtport *tgtport) +{ + struct nvmet_fc_ls_req_op *lsop; + struct nvmefc_ls_req *lsreq; + struct nvmet_fc_ls_iod *iod; + int i; + + iod = tgtport->iod; + for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) + cancel_work(&iod->work); + + /* + * After this point the connection is lost and thus any pending + * request can't be processed by the normal completion path. This + * is likely a request from nvmet_fc_send_ls_req_async. + */ + while ((lsop = list_first_entry_or_null(&tgtport->ls_req_list, + struct nvmet_fc_ls_req_op, lsreq_list))) { + list_del(&lsop->lsreq_list); + + if (!lsop->req_queued) + continue; + + lsreq = &lsop->ls_req; + fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, + (lsreq->rqstlen + lsreq->rsplen), + DMA_BIDIRECTIONAL); + nvmet_fc_tgtport_put(tgtport); + kfree(lsop); + } +} + /** * nvmet_fc_unregister_targetport - transport entry point called by an * LLDD to deregister/remove a previously @@ -1620,6 +1639,11 @@ int nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) { struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); + unsigned long flags; + + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); + list_del(&tgtport->tgt_list); + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); nvmet_fc_portentry_unbind_tgt(tgtport); @@ -1628,13 +1652,7 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) flush_workqueue(nvmet_wq); - /* - * should terminate LS's as well. However, LS's will be generated - * at the tail end of association termination, so they likely don't - * exist yet. And even if they did, it's worthwhile to just let - * them finish and targetport ref counting will clean things up. - */ - + nvmet_fc_free_pending_reqs(tgtport); nvmet_fc_tgtport_put(tgtport); return 0; @@ -2551,10 +2569,8 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, fod->data_sg = NULL; fod->data_sg_cnt = 0; - ret = nvmet_req_init(&fod->req, - &fod->queue->nvme_cq, - &fod->queue->nvme_sq, - &nvmet_fc_tgt_fcp_ops); + ret = nvmet_req_init(&fod->req, &fod->queue->nvme_sq, + &nvmet_fc_tgt_fcp_ops); if (!ret) { /* bad SQE content or invalid ctrl state */ /* nvmet layer has already called op done to send rsp. */ @@ -2880,12 +2896,17 @@ nvmet_fc_add_port(struct nvmet_port *port) list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { if ((tgtport->fc_target_port.node_name == traddr.nn) && (tgtport->fc_target_port.port_name == traddr.pn)) { + if (!nvmet_fc_tgtport_get(tgtport)) + continue; + /* a FC port can only be 1 nvmet port id */ if (!tgtport->pe) { nvmet_fc_portentry_bind(tgtport, pe, port); ret = 0; } else ret = -EALREADY; + + nvmet_fc_tgtport_put(tgtport); break; } } @@ -2901,11 +2922,21 @@ static void nvmet_fc_remove_port(struct nvmet_port *port) { struct nvmet_fc_port_entry *pe = port->priv; + struct nvmet_fc_tgtport *tgtport = NULL; + unsigned long flags; + + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); + if (pe->tgtport && nvmet_fc_tgtport_get(pe->tgtport)) + tgtport = pe->tgtport; + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); nvmet_fc_portentry_unbind(pe); - /* terminate any outstanding associations */ - __nvmet_fc_free_assocs(pe->tgtport); + if (tgtport) { + /* terminate any outstanding associations */ + __nvmet_fc_free_assocs(tgtport); + nvmet_fc_tgtport_put(tgtport); + } kfree(pe); } @@ -2914,10 +2945,21 @@ static void nvmet_fc_discovery_chg(struct nvmet_port *port) { struct nvmet_fc_port_entry *pe = port->priv; - struct nvmet_fc_tgtport *tgtport = pe->tgtport; + struct nvmet_fc_tgtport *tgtport = NULL; + unsigned long flags; + + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); + if (pe->tgtport && nvmet_fc_tgtport_get(pe->tgtport)) + tgtport = pe->tgtport; + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); + + if (!tgtport) + return; if (tgtport && tgtport->ops->discovery_event) tgtport->ops->discovery_event(&tgtport->fc_target_port); + + nvmet_fc_tgtport_put(tgtport); } static ssize_t diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index e1abb27927ff..257b497d515a 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -207,13 +207,16 @@ static LIST_HEAD(fcloop_nports); struct fcloop_lport { struct nvme_fc_local_port *localport; struct list_head lport_list; - struct completion unreg_done; + refcount_t ref; }; struct fcloop_lport_priv { struct fcloop_lport *lport; }; +/* The port is already being removed, avoid double free */ +#define PORT_DELETED 0 + struct fcloop_rport { struct nvme_fc_remote_port *remoteport; struct nvmet_fc_target_port *targetport; @@ -222,6 +225,7 @@ struct fcloop_rport { spinlock_t lock; struct list_head ls_list; struct work_struct ls_work; + unsigned long flags; }; struct fcloop_tport { @@ -232,6 +236,7 @@ struct fcloop_tport { spinlock_t lock; struct list_head ls_list; struct work_struct ls_work; + unsigned long flags; }; struct fcloop_nport { @@ -239,7 +244,7 @@ struct fcloop_nport { struct fcloop_tport *tport; struct fcloop_lport *lport; struct list_head nport_list; - struct kref ref; + refcount_t ref; u64 node_name; u64 port_name; u32 port_role; @@ -274,7 +279,7 @@ struct fcloop_fcpreq { u32 inistate; bool active; bool aborted; - struct kref ref; + refcount_t ref; struct work_struct fcp_rcv_work; struct work_struct abort_rcv_work; struct work_struct tio_done_work; @@ -287,6 +292,9 @@ struct fcloop_ini_fcpreq { spinlock_t inilock; }; +/* SLAB cache for fcloop_lsreq structures */ +static struct kmem_cache *lsreq_cache; + static inline struct fcloop_lsreq * ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp) { @@ -337,6 +345,7 @@ fcloop_rport_lsrqst_work(struct work_struct *work) * callee may free memory containing tls_req. * do not reference lsreq after this. */ + kmem_cache_free(lsreq_cache, tls_req); spin_lock(&rport->lock); } @@ -348,10 +357,13 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport, struct nvme_fc_remote_port *remoteport, struct nvmefc_ls_req *lsreq) { - struct fcloop_lsreq *tls_req = lsreq->private; struct fcloop_rport *rport = remoteport->private; + struct fcloop_lsreq *tls_req; int ret = 0; + tls_req = kmem_cache_alloc(lsreq_cache, GFP_KERNEL); + if (!tls_req) + return -ENOMEM; tls_req->lsreq = lsreq; INIT_LIST_HEAD(&tls_req->ls_list); @@ -388,14 +400,17 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport, lsrsp->done(lsrsp); - if (remoteport) { - rport = remoteport->private; - spin_lock(&rport->lock); - list_add_tail(&tls_req->ls_list, &rport->ls_list); - spin_unlock(&rport->lock); - queue_work(nvmet_wq, &rport->ls_work); + if (!remoteport) { + kmem_cache_free(lsreq_cache, tls_req); + return 0; } + rport = remoteport->private; + spin_lock(&rport->lock); + list_add_tail(&tls_req->ls_list, &rport->ls_list); + spin_unlock(&rport->lock); + queue_work(nvmet_wq, &rport->ls_work); + return 0; } @@ -421,6 +436,7 @@ fcloop_tport_lsrqst_work(struct work_struct *work) * callee may free memory containing tls_req. * do not reference lsreq after this. */ + kmem_cache_free(lsreq_cache, tls_req); spin_lock(&tport->lock); } @@ -431,8 +447,8 @@ static int fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle, struct nvmefc_ls_req *lsreq) { - struct fcloop_lsreq *tls_req = lsreq->private; struct fcloop_tport *tport = targetport->private; + struct fcloop_lsreq *tls_req; int ret = 0; /* @@ -440,6 +456,10 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle, * hosthandle ignored as fcloop currently is * 1:1 tgtport vs remoteport */ + + tls_req = kmem_cache_alloc(lsreq_cache, GFP_KERNEL); + if (!tls_req) + return -ENOMEM; tls_req->lsreq = lsreq; INIT_LIST_HEAD(&tls_req->ls_list); @@ -456,6 +476,9 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle, ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp, lsreq->rqstaddr, lsreq->rqstlen); + if (ret) + kmem_cache_free(lsreq_cache, tls_req); + return ret; } @@ -470,18 +493,30 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport, struct nvmet_fc_target_port *targetport = rport->targetport; struct fcloop_tport *tport; + if (!targetport) { + /* + * The target port is gone. The target doesn't expect any + * response anymore and the ->done call is not valid + * because the resources have been freed by + * nvmet_fc_free_pending_reqs. + * + * We end up here from delete association exchange: + * nvmet_fc_xmt_disconnect_assoc sends an async request. + */ + kmem_cache_free(lsreq_cache, tls_req); + return 0; + } + memcpy(lsreq->rspaddr, lsrsp->rspbuf, ((lsreq->rsplen < lsrsp->rsplen) ? lsreq->rsplen : lsrsp->rsplen)); lsrsp->done(lsrsp); - if (targetport) { - tport = targetport->private; - spin_lock(&tport->lock); - list_add_tail(&tport->ls_list, &tls_req->ls_list); - spin_unlock(&tport->lock); - queue_work(nvmet_wq, &tport->ls_work); - } + tport = targetport->private; + spin_lock(&tport->lock); + list_add_tail(&tls_req->ls_list, &tport->ls_list); + spin_unlock(&tport->lock); + queue_work(nvmet_wq, &tport->ls_work); return 0; } @@ -534,24 +569,18 @@ fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport) } static void -fcloop_tfcp_req_free(struct kref *ref) +fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req) { - struct fcloop_fcpreq *tfcp_req = - container_of(ref, struct fcloop_fcpreq, ref); + if (!refcount_dec_and_test(&tfcp_req->ref)) + return; kfree(tfcp_req); } -static void -fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req) -{ - kref_put(&tfcp_req->ref, fcloop_tfcp_req_free); -} - static int fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req) { - return kref_get_unless_zero(&tfcp_req->ref); + return refcount_inc_not_zero(&tfcp_req->ref); } static void @@ -571,7 +600,8 @@ fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq, } /* release original io reference on tgt struct */ - fcloop_tfcp_req_put(tfcp_req); + if (tfcp_req) + fcloop_tfcp_req_put(tfcp_req); } static bool drop_fabric_opcode; @@ -623,12 +653,13 @@ fcloop_fcp_recv_work(struct work_struct *work) { struct fcloop_fcpreq *tfcp_req = container_of(work, struct fcloop_fcpreq, fcp_rcv_work); - struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq; + struct nvmefc_fcp_req *fcpreq; unsigned long flags; int ret = 0; bool aborted = false; spin_lock_irqsave(&tfcp_req->reqlock, flags); + fcpreq = tfcp_req->fcpreq; switch (tfcp_req->inistate) { case INI_IO_START: tfcp_req->inistate = INI_IO_ACTIVE; @@ -643,16 +674,19 @@ fcloop_fcp_recv_work(struct work_struct *work) } spin_unlock_irqrestore(&tfcp_req->reqlock, flags); - if (unlikely(aborted)) - ret = -ECANCELED; - else { - if (likely(!check_for_drop(tfcp_req))) - ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport, - &tfcp_req->tgt_fcp_req, - fcpreq->cmdaddr, fcpreq->cmdlen); - else - pr_info("%s: dropped command ********\n", __func__); + if (unlikely(aborted)) { + /* the abort handler will call fcloop_call_host_done */ + return; + } + + if (unlikely(check_for_drop(tfcp_req))) { + pr_info("%s: dropped command ********\n", __func__); + return; } + + ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport, + &tfcp_req->tgt_fcp_req, + fcpreq->cmdaddr, fcpreq->cmdlen); if (ret) fcloop_call_host_done(fcpreq, tfcp_req, ret); } @@ -667,15 +701,17 @@ fcloop_fcp_abort_recv_work(struct work_struct *work) unsigned long flags; spin_lock_irqsave(&tfcp_req->reqlock, flags); - fcpreq = tfcp_req->fcpreq; switch (tfcp_req->inistate) { case INI_IO_ABORTED: + fcpreq = tfcp_req->fcpreq; + tfcp_req->fcpreq = NULL; break; case INI_IO_COMPLETED: completed = true; break; default: spin_unlock_irqrestore(&tfcp_req->reqlock, flags); + fcloop_tfcp_req_put(tfcp_req); WARN_ON(1); return; } @@ -691,10 +727,6 @@ fcloop_fcp_abort_recv_work(struct work_struct *work) nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport, &tfcp_req->tgt_fcp_req); - spin_lock_irqsave(&tfcp_req->reqlock, flags); - tfcp_req->fcpreq = NULL; - spin_unlock_irqrestore(&tfcp_req->reqlock, flags); - fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED); /* call_host_done releases reference for abort downcall */ } @@ -748,7 +780,7 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport, INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work); INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work); INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work); - kref_init(&tfcp_req->ref); + refcount_set(&tfcp_req->ref, 1); queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work); @@ -963,13 +995,16 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, spin_lock(&inireq->inilock); tfcp_req = inireq->tfcp_req; - if (tfcp_req) - fcloop_tfcp_req_get(tfcp_req); + if (tfcp_req) { + if (!fcloop_tfcp_req_get(tfcp_req)) + tfcp_req = NULL; + } spin_unlock(&inireq->inilock); - if (!tfcp_req) + if (!tfcp_req) { /* abort has already been called */ - return; + goto out_host_done; + } /* break initiator/target relationship for io */ spin_lock_irqsave(&tfcp_req->reqlock, flags); @@ -984,7 +1019,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, default: spin_unlock_irqrestore(&tfcp_req->reqlock, flags); WARN_ON(1); - return; + goto out_host_done; } spin_unlock_irqrestore(&tfcp_req->reqlock, flags); @@ -998,27 +1033,56 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, */ fcloop_tfcp_req_put(tfcp_req); } + + return; + +out_host_done: + fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED); } static void -fcloop_nport_free(struct kref *ref) +fcloop_lport_put(struct fcloop_lport *lport) { - struct fcloop_nport *nport = - container_of(ref, struct fcloop_nport, ref); + unsigned long flags; - kfree(nport); + if (!refcount_dec_and_test(&lport->ref)) + return; + + spin_lock_irqsave(&fcloop_lock, flags); + list_del(&lport->lport_list); + spin_unlock_irqrestore(&fcloop_lock, flags); + + kfree(lport); +} + +static int +fcloop_lport_get(struct fcloop_lport *lport) +{ + return refcount_inc_not_zero(&lport->ref); } static void fcloop_nport_put(struct fcloop_nport *nport) { - kref_put(&nport->ref, fcloop_nport_free); + unsigned long flags; + + if (!refcount_dec_and_test(&nport->ref)) + return; + + spin_lock_irqsave(&fcloop_lock, flags); + list_del(&nport->nport_list); + spin_unlock_irqrestore(&fcloop_lock, flags); + + if (nport->lport) + fcloop_lport_put(nport->lport); + + kfree(nport); } static int fcloop_nport_get(struct fcloop_nport *nport) { - return kref_get_unless_zero(&nport->ref); + return refcount_inc_not_zero(&nport->ref); } static void @@ -1027,26 +1091,45 @@ fcloop_localport_delete(struct nvme_fc_local_port *localport) struct fcloop_lport_priv *lport_priv = localport->private; struct fcloop_lport *lport = lport_priv->lport; - /* release any threads waiting for the unreg to complete */ - complete(&lport->unreg_done); + fcloop_lport_put(lport); } static void fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport) { struct fcloop_rport *rport = remoteport->private; + bool put_port = false; + unsigned long flags; flush_work(&rport->ls_work); - fcloop_nport_put(rport->nport); + + spin_lock_irqsave(&fcloop_lock, flags); + if (!test_and_set_bit(PORT_DELETED, &rport->flags)) + put_port = true; + rport->nport->rport = NULL; + spin_unlock_irqrestore(&fcloop_lock, flags); + + if (put_port) + fcloop_nport_put(rport->nport); } static void fcloop_targetport_delete(struct nvmet_fc_target_port *targetport) { struct fcloop_tport *tport = targetport->private; + bool put_port = false; + unsigned long flags; flush_work(&tport->ls_work); - fcloop_nport_put(tport->nport); + + spin_lock_irqsave(&fcloop_lock, flags); + if (!test_and_set_bit(PORT_DELETED, &tport->flags)) + put_port = true; + tport->nport->tport = NULL; + spin_unlock_irqrestore(&fcloop_lock, flags); + + if (put_port) + fcloop_nport_put(tport->nport); } #define FCLOOP_HW_QUEUES 4 @@ -1070,7 +1153,6 @@ static struct nvme_fc_port_template fctemplate = { /* sizes of additional private data for data structures */ .local_priv_sz = sizeof(struct fcloop_lport_priv), .remote_priv_sz = sizeof(struct fcloop_rport), - .lsrqst_priv_sz = sizeof(struct fcloop_lsreq), .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq), }; @@ -1093,7 +1175,6 @@ static struct nvmet_fc_target_template tgttemplate = { .target_features = 0, /* sizes of additional private data for data structures */ .target_priv_sz = sizeof(struct fcloop_tport), - .lsrqst_priv_sz = sizeof(struct fcloop_lsreq), }; static ssize_t @@ -1140,6 +1221,7 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr, lport->localport = localport; INIT_LIST_HEAD(&lport->lport_list); + refcount_set(&lport->ref, 1); spin_lock_irqsave(&fcloop_lock, flags); list_add_tail(&lport->lport_list, &fcloop_lports); @@ -1156,60 +1238,94 @@ out_free_lport: return ret ? ret : count; } +static int +__localport_unreg(struct fcloop_lport *lport) +{ + return nvme_fc_unregister_localport(lport->localport); +} -static void -__unlink_local_port(struct fcloop_lport *lport) +static struct fcloop_nport * +__fcloop_nport_lookup(u64 node_name, u64 port_name) { - list_del(&lport->lport_list); + struct fcloop_nport *nport; + + list_for_each_entry(nport, &fcloop_nports, nport_list) { + if (nport->node_name != node_name || + nport->port_name != port_name) + continue; + + if (fcloop_nport_get(nport)) + return nport; + + break; + } + + return NULL; } -static int -__wait_localport_unreg(struct fcloop_lport *lport) +static struct fcloop_nport * +fcloop_nport_lookup(u64 node_name, u64 port_name) { - int ret; + struct fcloop_nport *nport; + unsigned long flags; - init_completion(&lport->unreg_done); + spin_lock_irqsave(&fcloop_lock, flags); + nport = __fcloop_nport_lookup(node_name, port_name); + spin_unlock_irqrestore(&fcloop_lock, flags); - ret = nvme_fc_unregister_localport(lport->localport); + return nport; +} - if (!ret) - wait_for_completion(&lport->unreg_done); +static struct fcloop_lport * +__fcloop_lport_lookup(u64 node_name, u64 port_name) +{ + struct fcloop_lport *lport; - kfree(lport); + list_for_each_entry(lport, &fcloop_lports, lport_list) { + if (lport->localport->node_name != node_name || + lport->localport->port_name != port_name) + continue; - return ret; + if (fcloop_lport_get(lport)) + return lport; + + break; + } + + return NULL; } +static struct fcloop_lport * +fcloop_lport_lookup(u64 node_name, u64 port_name) +{ + struct fcloop_lport *lport; + unsigned long flags; + + spin_lock_irqsave(&fcloop_lock, flags); + lport = __fcloop_lport_lookup(node_name, port_name); + spin_unlock_irqrestore(&fcloop_lock, flags); + + return lport; +} static ssize_t fcloop_delete_local_port(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct fcloop_lport *tlport, *lport = NULL; + struct fcloop_lport *lport; u64 nodename, portname; - unsigned long flags; int ret; ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf); if (ret) return ret; - spin_lock_irqsave(&fcloop_lock, flags); - - list_for_each_entry(tlport, &fcloop_lports, lport_list) { - if (tlport->localport->node_name == nodename && - tlport->localport->port_name == portname) { - lport = tlport; - __unlink_local_port(lport); - break; - } - } - spin_unlock_irqrestore(&fcloop_lock, flags); - + lport = fcloop_lport_lookup(nodename, portname); if (!lport) return -ENOENT; - ret = __wait_localport_unreg(lport); + ret = __localport_unreg(lport); + fcloop_lport_put(lport); return ret ? ret : count; } @@ -1217,8 +1333,8 @@ fcloop_delete_local_port(struct device *dev, struct device_attribute *attr, static struct fcloop_nport * fcloop_alloc_nport(const char *buf, size_t count, bool remoteport) { - struct fcloop_nport *newnport, *nport = NULL; - struct fcloop_lport *tmplport, *lport = NULL; + struct fcloop_nport *newnport, *nport; + struct fcloop_lport *lport; struct fcloop_ctrl_options *opts; unsigned long flags; u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS; @@ -1233,10 +1349,8 @@ fcloop_alloc_nport(const char *buf, size_t count, bool remoteport) goto out_free_opts; /* everything there ? */ - if ((opts->mask & opts_mask) != opts_mask) { - ret = -EINVAL; + if ((opts->mask & opts_mask) != opts_mask) goto out_free_opts; - } newnport = kzalloc(sizeof(*newnport), GFP_KERNEL); if (!newnport) @@ -1249,63 +1363,64 @@ fcloop_alloc_nport(const char *buf, size_t count, bool remoteport) newnport->port_role = opts->roles; if (opts->mask & NVMF_OPT_FCADDR) newnport->port_id = opts->fcaddr; - kref_init(&newnport->ref); + refcount_set(&newnport->ref, 1); spin_lock_irqsave(&fcloop_lock, flags); - - list_for_each_entry(tmplport, &fcloop_lports, lport_list) { - if (tmplport->localport->node_name == opts->wwnn && - tmplport->localport->port_name == opts->wwpn) - goto out_invalid_opts; - - if (tmplport->localport->node_name == opts->lpwwnn && - tmplport->localport->port_name == opts->lpwwpn) - lport = tmplport; + lport = __fcloop_lport_lookup(opts->wwnn, opts->wwpn); + if (lport) { + /* invalid configuration */ + fcloop_lport_put(lport); + goto out_free_newnport; } if (remoteport) { - if (!lport) - goto out_invalid_opts; - newnport->lport = lport; - } - - list_for_each_entry(nport, &fcloop_nports, nport_list) { - if (nport->node_name == opts->wwnn && - nport->port_name == opts->wwpn) { - if ((remoteport && nport->rport) || - (!remoteport && nport->tport)) { - nport = NULL; - goto out_invalid_opts; - } - - fcloop_nport_get(nport); - - spin_unlock_irqrestore(&fcloop_lock, flags); - - if (remoteport) - nport->lport = lport; - if (opts->mask & NVMF_OPT_ROLES) - nport->port_role = opts->roles; - if (opts->mask & NVMF_OPT_FCADDR) - nport->port_id = opts->fcaddr; + lport = __fcloop_lport_lookup(opts->lpwwnn, opts->lpwwpn); + if (!lport) { + /* invalid configuration */ goto out_free_newnport; } } - list_add_tail(&newnport->nport_list, &fcloop_nports); + nport = __fcloop_nport_lookup(opts->wwnn, opts->wwpn); + if (nport) { + if ((remoteport && nport->rport) || + (!remoteport && nport->tport)) { + /* invalid configuration */ + goto out_put_nport; + } + + /* found existing nport, discard the new nport */ + kfree(newnport); + } else { + list_add_tail(&newnport->nport_list, &fcloop_nports); + nport = newnport; + } + if (opts->mask & NVMF_OPT_ROLES) + nport->port_role = opts->roles; + if (opts->mask & NVMF_OPT_FCADDR) + nport->port_id = opts->fcaddr; + if (lport) { + if (!nport->lport) + nport->lport = lport; + else + fcloop_lport_put(lport); + } spin_unlock_irqrestore(&fcloop_lock, flags); kfree(opts); - return newnport; + return nport; -out_invalid_opts: - spin_unlock_irqrestore(&fcloop_lock, flags); +out_put_nport: + if (lport) + fcloop_lport_put(lport); + fcloop_nport_put(nport); out_free_newnport: + spin_unlock_irqrestore(&fcloop_lock, flags); kfree(newnport); out_free_opts: kfree(opts); - return nport; + return NULL; } static ssize_t @@ -1346,6 +1461,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr, rport->nport = nport; rport->lport = nport->lport; nport->rport = rport; + rport->flags = 0; spin_lock_init(&rport->lock); INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work); INIT_LIST_HEAD(&rport->ls_list); @@ -1359,21 +1475,18 @@ __unlink_remote_port(struct fcloop_nport *nport) { struct fcloop_rport *rport = nport->rport; + lockdep_assert_held(&fcloop_lock); + if (rport && nport->tport) nport->tport->remoteport = NULL; nport->rport = NULL; - list_del(&nport->nport_list); - return rport; } static int __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport) { - if (!rport) - return -EALREADY; - return nvme_fc_unregister_remoteport(rport->remoteport); } @@ -1381,8 +1494,8 @@ static ssize_t fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct fcloop_nport *nport = NULL, *tmpport; - static struct fcloop_rport *rport; + struct fcloop_nport *nport; + struct fcloop_rport *rport; u64 nodename, portname; unsigned long flags; int ret; @@ -1391,24 +1504,24 @@ fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr, if (ret) return ret; - spin_lock_irqsave(&fcloop_lock, flags); - - list_for_each_entry(tmpport, &fcloop_nports, nport_list) { - if (tmpport->node_name == nodename && - tmpport->port_name == portname && tmpport->rport) { - nport = tmpport; - rport = __unlink_remote_port(nport); - break; - } - } + nport = fcloop_nport_lookup(nodename, portname); + if (!nport) + return -ENOENT; + spin_lock_irqsave(&fcloop_lock, flags); + rport = __unlink_remote_port(nport); spin_unlock_irqrestore(&fcloop_lock, flags); - if (!nport) - return -ENOENT; + if (!rport) { + ret = -ENOENT; + goto out_nport_put; + } ret = __remoteport_unreg(nport, rport); +out_nport_put: + fcloop_nport_put(nport); + return ret ? ret : count; } @@ -1446,6 +1559,7 @@ fcloop_create_target_port(struct device *dev, struct device_attribute *attr, tport->nport = nport; tport->lport = nport->lport; nport->tport = tport; + tport->flags = 0; spin_lock_init(&tport->lock); INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work); INIT_LIST_HEAD(&tport->ls_list); @@ -1459,6 +1573,8 @@ __unlink_target_port(struct fcloop_nport *nport) { struct fcloop_tport *tport = nport->tport; + lockdep_assert_held(&fcloop_lock); + if (tport && nport->rport) nport->rport->targetport = NULL; nport->tport = NULL; @@ -1469,9 +1585,6 @@ __unlink_target_port(struct fcloop_nport *nport) static int __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport) { - if (!tport) - return -EALREADY; - return nvmet_fc_unregister_targetport(tport->targetport); } @@ -1479,8 +1592,8 @@ static ssize_t fcloop_delete_target_port(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct fcloop_nport *nport = NULL, *tmpport; - struct fcloop_tport *tport = NULL; + struct fcloop_nport *nport; + struct fcloop_tport *tport; u64 nodename, portname; unsigned long flags; int ret; @@ -1489,24 +1602,24 @@ fcloop_delete_target_port(struct device *dev, struct device_attribute *attr, if (ret) return ret; - spin_lock_irqsave(&fcloop_lock, flags); - - list_for_each_entry(tmpport, &fcloop_nports, nport_list) { - if (tmpport->node_name == nodename && - tmpport->port_name == portname && tmpport->tport) { - nport = tmpport; - tport = __unlink_target_port(nport); - break; - } - } + nport = fcloop_nport_lookup(nodename, portname); + if (!nport) + return -ENOENT; + spin_lock_irqsave(&fcloop_lock, flags); + tport = __unlink_target_port(nport); spin_unlock_irqrestore(&fcloop_lock, flags); - if (!nport) - return -ENOENT; + if (!tport) { + ret = -ENOENT; + goto out_nport_put; + } ret = __targetport_unreg(nport, tport); +out_nport_put: + fcloop_nport_put(nport); + return ret ? ret : count; } @@ -1572,15 +1685,20 @@ static const struct class fcloop_class = { }; static struct device *fcloop_device; - static int __init fcloop_init(void) { int ret; + lsreq_cache = kmem_cache_create("lsreq_cache", + sizeof(struct fcloop_lsreq), 0, + 0, NULL); + if (!lsreq_cache) + return -ENOMEM; + ret = class_register(&fcloop_class); if (ret) { pr_err("couldn't register class fcloop\n"); - return ret; + goto out_destroy_cache; } fcloop_device = device_create_with_groups( @@ -1598,13 +1716,15 @@ static int __init fcloop_init(void) out_destroy_class: class_unregister(&fcloop_class); +out_destroy_cache: + kmem_cache_destroy(lsreq_cache); return ret; } static void __exit fcloop_exit(void) { - struct fcloop_lport *lport = NULL; - struct fcloop_nport *nport = NULL; + struct fcloop_lport *lport; + struct fcloop_nport *nport; struct fcloop_tport *tport; struct fcloop_rport *rport; unsigned long flags; @@ -1615,7 +1735,7 @@ static void __exit fcloop_exit(void) for (;;) { nport = list_first_entry_or_null(&fcloop_nports, typeof(*nport), nport_list); - if (!nport) + if (!nport || !fcloop_nport_get(nport)) break; tport = __unlink_target_port(nport); @@ -1623,13 +1743,21 @@ static void __exit fcloop_exit(void) spin_unlock_irqrestore(&fcloop_lock, flags); - ret = __targetport_unreg(nport, tport); - if (ret) - pr_warn("%s: Failed deleting target port\n", __func__); + if (tport) { + ret = __targetport_unreg(nport, tport); + if (ret) + pr_warn("%s: Failed deleting target port\n", + __func__); + } - ret = __remoteport_unreg(nport, rport); - if (ret) - pr_warn("%s: Failed deleting remote port\n", __func__); + if (rport) { + ret = __remoteport_unreg(nport, rport); + if (ret) + pr_warn("%s: Failed deleting remote port\n", + __func__); + } + + fcloop_nport_put(nport); spin_lock_irqsave(&fcloop_lock, flags); } @@ -1637,17 +1765,17 @@ static void __exit fcloop_exit(void) for (;;) { lport = list_first_entry_or_null(&fcloop_lports, typeof(*lport), lport_list); - if (!lport) + if (!lport || !fcloop_lport_get(lport)) break; - __unlink_local_port(lport); - spin_unlock_irqrestore(&fcloop_lock, flags); - ret = __wait_localport_unreg(lport); + ret = __localport_unreg(lport); if (ret) pr_warn("%s: Failed deleting local port\n", __func__); + fcloop_lport_put(lport); + spin_lock_irqsave(&fcloop_lock, flags); } @@ -1657,6 +1785,7 @@ static void __exit fcloop_exit(void) device_destroy(&fcloop_class, MKDEV(0, 0)); class_unregister(&fcloop_class); + kmem_cache_destroy(lsreq_cache); } module_init(fcloop_init); diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index a5c41144667c..f85a8441bcc6 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -33,10 +33,12 @@ struct nvme_loop_ctrl { struct list_head list; struct blk_mq_tag_set tag_set; - struct nvme_loop_iod async_event_iod; struct nvme_ctrl ctrl; struct nvmet_port *port; + + /* Must be last --ends in a flexible-array member. */ + struct nvme_loop_iod async_event_iod; }; static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl) @@ -148,8 +150,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, nvme_start_request(req); iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; iod->req.port = queue->ctrl->port; - if (!nvmet_req_init(&iod->req, &queue->nvme_cq, - &queue->nvme_sq, &nvme_loop_ops)) + if (!nvmet_req_init(&iod->req, &queue->nvme_sq, &nvme_loop_ops)) return BLK_STS_OK; if (blk_rq_nr_phys_segments(req)) { @@ -181,8 +182,7 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg) iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH; iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; - if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq, - &nvme_loop_ops)) { + if (!nvmet_req_init(&iod->req, &queue->nvme_sq, &nvme_loop_ops)) { dev_err(ctrl->ctrl.device, "failed async event work\n"); return; } @@ -273,6 +273,7 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) nvme_unquiesce_admin_queue(&ctrl->ctrl); nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); + nvmet_cq_put(&ctrl->queues[0].nvme_cq); nvme_remove_admin_tag_set(&ctrl->ctrl); } @@ -302,6 +303,7 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) for (i = 1; i < ctrl->ctrl.queue_count; i++) { clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); + nvmet_cq_put(&ctrl->queues[i].nvme_cq); } ctrl->ctrl.queue_count = 1; /* @@ -327,9 +329,13 @@ static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) for (i = 1; i <= nr_io_queues; i++) { ctrl->queues[i].ctrl = ctrl; - ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); - if (ret) + nvmet_cq_init(&ctrl->queues[i].nvme_cq); + ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq, + &ctrl->queues[i].nvme_cq); + if (ret) { + nvmet_cq_put(&ctrl->queues[i].nvme_cq); goto out_destroy_queues; + } ctrl->ctrl.queue_count++; } @@ -360,9 +366,13 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) int error; ctrl->queues[0].ctrl = ctrl; - error = nvmet_sq_init(&ctrl->queues[0].nvme_sq); - if (error) + nvmet_cq_init(&ctrl->queues[0].nvme_cq); + error = nvmet_sq_init(&ctrl->queues[0].nvme_sq, + &ctrl->queues[0].nvme_cq); + if (error) { + nvmet_cq_put(&ctrl->queues[0].nvme_cq); return error; + } ctrl->ctrl.queue_count = 1; error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set, @@ -401,6 +411,7 @@ out_cleanup_tagset: nvme_remove_admin_tag_set(&ctrl->ctrl); out_free_sq: nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); + nvmet_cq_put(&ctrl->queues[0].nvme_cq); return error; } diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index b6db8b74dc4a..df69a9dee71c 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -141,13 +141,16 @@ static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns) } struct nvmet_cq { + struct nvmet_ctrl *ctrl; u16 qid; u16 size; + refcount_t ref; }; struct nvmet_sq { struct nvmet_ctrl *ctrl; struct percpu_ref ref; + struct nvmet_cq *cq; u16 qid; u16 size; u32 sqhd; @@ -247,6 +250,7 @@ struct nvmet_pr_log_mgr { struct nvmet_ctrl { struct nvmet_subsys *subsys; struct nvmet_sq **sqs; + struct nvmet_cq **cqs; void *drvdata; @@ -424,7 +428,7 @@ struct nvmet_fabrics_ops { u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl); /* Operations mandatory for PCI target controllers */ - u16 (*create_sq)(struct nvmet_ctrl *ctrl, u16 sqid, u16 flags, + u16 (*create_sq)(struct nvmet_ctrl *ctrl, u16 sqid, u16 cqid, u16 flags, u16 qsize, u64 prp1); u16 (*delete_sq)(struct nvmet_ctrl *ctrl, u16 sqid); u16 (*create_cq)(struct nvmet_ctrl *ctrl, u16 cqid, u16 flags, @@ -557,8 +561,8 @@ u32 nvmet_fabrics_admin_cmd_data_len(struct nvmet_req *req); u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req); u32 nvmet_fabrics_io_cmd_data_len(struct nvmet_req *req); -bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, - struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops); +bool nvmet_req_init(struct nvmet_req *req, struct nvmet_sq *sq, + const struct nvmet_fabrics_ops *ops); void nvmet_req_uninit(struct nvmet_req *req); size_t nvmet_req_transfer_len(struct nvmet_req *req); bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len); @@ -571,18 +575,24 @@ void nvmet_execute_set_features(struct nvmet_req *req); void nvmet_execute_get_features(struct nvmet_req *req); void nvmet_execute_keep_alive(struct nvmet_req *req); -u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid); +u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create); +u16 nvmet_check_io_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create); +void nvmet_cq_init(struct nvmet_cq *cq); void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, u16 size); u16 nvmet_cq_create(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, u16 size); +void nvmet_cq_destroy(struct nvmet_cq *cq); +bool nvmet_cq_get(struct nvmet_cq *cq); +void nvmet_cq_put(struct nvmet_cq *cq); +bool nvmet_cq_in_use(struct nvmet_cq *cq); u16 nvmet_check_sqid(struct nvmet_ctrl *ctrl, u16 sqid, bool create); void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid, u16 size); -u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid, - u16 size); +u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, + struct nvmet_cq *cq, u16 qid, u16 size); void nvmet_sq_destroy(struct nvmet_sq *sq); -int nvmet_sq_init(struct nvmet_sq *sq); +int nvmet_sq_init(struct nvmet_sq *sq, struct nvmet_cq *cq); void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl); diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c index 51c27b32248d..a4295a5b8d28 100644 --- a/drivers/nvme/target/pci-epf.c +++ b/drivers/nvme/target/pci-epf.c @@ -62,8 +62,7 @@ static DEFINE_MUTEX(nvmet_pci_epf_ports_mutex); #define NVMET_PCI_EPF_CQ_RETRY_INTERVAL msecs_to_jiffies(1) enum nvmet_pci_epf_queue_flags { - NVMET_PCI_EPF_Q_IS_SQ = 0, /* The queue is a submission queue */ - NVMET_PCI_EPF_Q_LIVE, /* The queue is live */ + NVMET_PCI_EPF_Q_LIVE = 0, /* The queue is live */ NVMET_PCI_EPF_Q_IRQ_ENABLED, /* IRQ is enabled for this queue */ }; @@ -596,9 +595,6 @@ static bool nvmet_pci_epf_should_raise_irq(struct nvmet_pci_epf_ctrl *ctrl, struct nvmet_pci_epf_irq_vector *iv = cq->iv; bool ret; - if (!test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) - return false; - /* IRQ coalescing for the admin queue is not allowed. */ if (!cq->qid) return true; @@ -625,7 +621,8 @@ static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl, struct pci_epf *epf = nvme_epf->epf; int ret = 0; - if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags)) + if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) || + !test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) return; mutex_lock(&ctrl->irq_lock); @@ -636,14 +633,16 @@ static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl, switch (nvme_epf->irq_type) { case PCI_IRQ_MSIX: case PCI_IRQ_MSI: + /* + * If we fail to raise an MSI or MSI-X interrupt, it is likely + * because the host is using legacy INTX IRQs (e.g. BIOS, + * grub), but we can fallback to the INTX type only if the + * endpoint controller supports this type. + */ ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no, nvme_epf->irq_type, cq->vector + 1); - if (!ret) + if (!ret || !nvme_epf->epc_features->intx_capable) break; - /* - * If we got an error, it is likely because the host is using - * legacy IRQs (e.g. BIOS, grub). - */ fallthrough; case PCI_IRQ_INTX: ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no, @@ -656,7 +655,9 @@ static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl, } if (ret) - dev_err(ctrl->dev, "Failed to raise IRQ (err=%d)\n", ret); + dev_err_ratelimited(ctrl->dev, + "CQ[%u]: Failed to raise IRQ (err=%d)\n", + cq->qid, ret); unlock: mutex_unlock(&ctrl->irq_lock); @@ -1319,8 +1320,14 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl, set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags); - dev_dbg(ctrl->dev, "CQ[%u]: %u entries of %zu B, IRQ vector %u\n", - cqid, qsize, cq->qes, cq->vector); + if (test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) + dev_dbg(ctrl->dev, + "CQ[%u]: %u entries of %zu B, IRQ vector %u\n", + cqid, qsize, cq->qes, cq->vector); + else + dev_dbg(ctrl->dev, + "CQ[%u]: %u entries of %zu B, IRQ disabled\n", + cqid, qsize, cq->qes); return NVME_SC_SUCCESS; @@ -1344,17 +1351,20 @@ static u16 nvmet_pci_epf_delete_cq(struct nvmet_ctrl *tctrl, u16 cqid) cancel_delayed_work_sync(&cq->work); nvmet_pci_epf_drain_queue(cq); - nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector); + if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) + nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector); nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &cq->pci_map); + nvmet_cq_put(&cq->nvme_cq); return NVME_SC_SUCCESS; } static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl, - u16 sqid, u16 flags, u16 qsize, u64 pci_addr) + u16 sqid, u16 cqid, u16 flags, u16 qsize, u64 pci_addr) { struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid]; + struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid]; u16 status; if (test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags)) @@ -1377,7 +1387,8 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl, sq->qes = ctrl->io_sqes; sq->pci_size = sq->qes * sq->depth; - status = nvmet_sq_create(tctrl, &sq->nvme_sq, sqid, sq->depth); + status = nvmet_sq_create(tctrl, &sq->nvme_sq, &cq->nvme_cq, sqid, + sq->depth); if (status != NVME_SC_SUCCESS) return status; @@ -1533,7 +1544,6 @@ static void nvmet_pci_epf_init_queue(struct nvmet_pci_epf_ctrl *ctrl, if (sq) { queue = &ctrl->sq[qid]; - set_bit(NVMET_PCI_EPF_Q_IS_SQ, &queue->flags); } else { queue = &ctrl->cq[qid]; INIT_DELAYED_WORK(&queue->work, nvmet_pci_epf_cq_work); @@ -1594,8 +1604,7 @@ static void nvmet_pci_epf_exec_iod_work(struct work_struct *work) goto complete; } - if (!nvmet_req_init(req, &iod->cq->nvme_cq, &iod->sq->nvme_sq, - &nvmet_pci_epf_fabrics_ops)) + if (!nvmet_req_init(req, &iod->sq->nvme_sq, &nvmet_pci_epf_fabrics_ops)) goto complete; iod->data_len = nvmet_req_transfer_len(req); @@ -1648,16 +1657,17 @@ static int nvmet_pci_epf_process_sq(struct nvmet_pci_epf_ctrl *ctrl, { struct nvmet_pci_epf_iod *iod; int ret, n = 0; + u16 head = sq->head; sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db); - while (sq->head != sq->tail && (!ctrl->sq_ab || n < ctrl->sq_ab)) { + while (head != sq->tail && (!ctrl->sq_ab || n < ctrl->sq_ab)) { iod = nvmet_pci_epf_alloc_iod(sq); if (!iod) break; /* Get the NVMe command submitted by the host. */ ret = nvmet_pci_epf_transfer(ctrl, &iod->cmd, - sq->pci_addr + sq->head * sq->qes, + sq->pci_addr + head * sq->qes, sq->qes, DMA_FROM_DEVICE); if (ret) { /* Not much we can do... */ @@ -1666,12 +1676,13 @@ static int nvmet_pci_epf_process_sq(struct nvmet_pci_epf_ctrl *ctrl, } dev_dbg(ctrl->dev, "SQ[%u]: head %u, tail %u, command %s\n", - sq->qid, sq->head, sq->tail, + sq->qid, head, sq->tail, nvmet_pci_epf_iod_name(iod)); - sq->head++; - if (sq->head == sq->depth) - sq->head = 0; + head++; + if (head == sq->depth) + head = 0; + WRITE_ONCE(sq->head, head); n++; queue_work_on(WORK_CPU_UNBOUND, sq->iod_wq, &iod->work); @@ -1761,8 +1772,17 @@ static void nvmet_pci_epf_cq_work(struct work_struct *work) if (!iod) break; - /* Post the IOD completion entry. */ + /* + * Post the IOD completion entry. If the IOD request was + * executed (req->execute() called), the CQE is already + * initialized. However, the IOD may have been failed before + * that, leaving the CQE not properly initialized. So always + * initialize it here. + */ cqe = &iod->cqe; + cqe->sq_head = cpu_to_le16(READ_ONCE(iod->sq->head)); + cqe->sq_id = cpu_to_le16(iod->sq->qid); + cqe->command_id = iod->cmd.common.command_id; cqe->status = cpu_to_le16((iod->status << 1) | cq->phase); dev_dbg(ctrl->dev, @@ -1800,6 +1820,21 @@ static void nvmet_pci_epf_cq_work(struct work_struct *work) NVMET_PCI_EPF_CQ_RETRY_INTERVAL); } +static void nvmet_pci_epf_clear_ctrl_config(struct nvmet_pci_epf_ctrl *ctrl) +{ + struct nvmet_ctrl *tctrl = ctrl->tctrl; + + /* Initialize controller status. */ + tctrl->csts = 0; + ctrl->csts = 0; + nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts); + + /* Initialize controller configuration and start polling. */ + tctrl->cc = 0; + ctrl->cc = 0; + nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc); +} + static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl) { u64 pci_addr, asq, acq; @@ -1846,8 +1881,8 @@ static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl) qsize = aqa & 0x00000fff; pci_addr = asq & GENMASK_ULL(63, 12); - status = nvmet_pci_epf_create_sq(ctrl->tctrl, 0, NVME_QUEUE_PHYS_CONTIG, - qsize, pci_addr); + status = nvmet_pci_epf_create_sq(ctrl->tctrl, 0, 0, + NVME_QUEUE_PHYS_CONTIG, qsize, pci_addr); if (status != NVME_SC_SUCCESS) { dev_err(ctrl->dev, "Failed to create admin submission queue\n"); nvmet_pci_epf_delete_cq(ctrl->tctrl, 0); @@ -1865,18 +1900,20 @@ static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl) return 0; err: - ctrl->csts = 0; + nvmet_pci_epf_clear_ctrl_config(ctrl); return -EINVAL; } -static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl) +static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl, + bool shutdown) { int qid; if (!ctrl->enabled) return; - dev_info(ctrl->dev, "Disabling controller\n"); + dev_info(ctrl->dev, "%s controller\n", + shutdown ? "Shutting down" : "Disabling"); ctrl->enabled = false; cancel_delayed_work_sync(&ctrl->poll_sqs); @@ -1893,6 +1930,11 @@ static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl) nvmet_pci_epf_delete_cq(ctrl->tctrl, 0); ctrl->csts &= ~NVME_CSTS_RDY; + if (shutdown) { + ctrl->csts |= NVME_CSTS_SHST_CMPLT; + ctrl->cc &= ~NVME_CC_ENABLE; + nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc); + } } static void nvmet_pci_epf_poll_cc_work(struct work_struct *work) @@ -1919,12 +1961,10 @@ static void nvmet_pci_epf_poll_cc_work(struct work_struct *work) } if (!nvmet_cc_en(new_cc) && nvmet_cc_en(old_cc)) - nvmet_pci_epf_disable_ctrl(ctrl); + nvmet_pci_epf_disable_ctrl(ctrl, false); - if (nvmet_cc_shn(new_cc) && !nvmet_cc_shn(old_cc)) { - nvmet_pci_epf_disable_ctrl(ctrl); - ctrl->csts |= NVME_CSTS_SHST_CMPLT; - } + if (nvmet_cc_shn(new_cc) && !nvmet_cc_shn(old_cc)) + nvmet_pci_epf_disable_ctrl(ctrl, true); if (!nvmet_cc_shn(new_cc) && nvmet_cc_shn(old_cc)) ctrl->csts &= ~NVME_CSTS_SHST_CMPLT; @@ -1963,16 +2003,10 @@ static void nvmet_pci_epf_init_bar(struct nvmet_pci_epf_ctrl *ctrl) /* Clear Controller Memory Buffer Supported (CMBS). */ ctrl->cap &= ~(0x1ULL << 57); - /* Controller configuration. */ - ctrl->cc = tctrl->cc & (~NVME_CC_ENABLE); - - /* Controller status. */ - ctrl->csts = ctrl->tctrl->csts; - nvmet_pci_epf_bar_write64(ctrl, NVME_REG_CAP, ctrl->cap); nvmet_pci_epf_bar_write32(ctrl, NVME_REG_VS, tctrl->subsys->ver); - nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts); - nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc); + + nvmet_pci_epf_clear_ctrl_config(ctrl); } static int nvmet_pci_epf_create_ctrl(struct nvmet_pci_epf *nvme_epf, @@ -2070,14 +2104,22 @@ out_mempool_exit: static void nvmet_pci_epf_start_ctrl(struct nvmet_pci_epf_ctrl *ctrl) { + + dev_info(ctrl->dev, "PCI link up\n"); + ctrl->link_up = true; + schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL); } static void nvmet_pci_epf_stop_ctrl(struct nvmet_pci_epf_ctrl *ctrl) { + dev_info(ctrl->dev, "PCI link down\n"); + ctrl->link_up = false; + cancel_delayed_work_sync(&ctrl->poll_cc); - nvmet_pci_epf_disable_ctrl(ctrl); + nvmet_pci_epf_disable_ctrl(ctrl, false); + nvmet_pci_epf_clear_ctrl_config(ctrl); } static void nvmet_pci_epf_destroy_ctrl(struct nvmet_pci_epf_ctrl *ctrl) @@ -2300,10 +2342,8 @@ static int nvmet_pci_epf_epc_init(struct pci_epf *epf) if (ret) goto out_clear_bar; - if (!epc_features->linkup_notifier) { - ctrl->link_up = true; + if (!epc_features->linkup_notifier) nvmet_pci_epf_start_ctrl(&nvme_epf->ctrl); - } return 0; @@ -2319,7 +2359,6 @@ static void nvmet_pci_epf_epc_deinit(struct pci_epf *epf) struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; - ctrl->link_up = false; nvmet_pci_epf_destroy_ctrl(ctrl); nvmet_pci_epf_deinit_dma(nvme_epf); @@ -2331,7 +2370,6 @@ static int nvmet_pci_epf_link_up(struct pci_epf *epf) struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; - ctrl->link_up = true; nvmet_pci_epf_start_ctrl(ctrl); return 0; @@ -2342,7 +2380,6 @@ static int nvmet_pci_epf_link_down(struct pci_epf *epf) struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; - ctrl->link_up = false; nvmet_pci_epf_stop_ctrl(ctrl); return 0; diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 2a4536ef6184..432bdf7cd49e 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -976,8 +976,7 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, cmd->send_sge.addr, cmd->send_sge.length, DMA_TO_DEVICE); - if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, - &queue->nvme_sq, &nvmet_rdma_ops)) + if (!nvmet_req_init(&cmd->req, &queue->nvme_sq, &nvmet_rdma_ops)) return; status = nvmet_rdma_map_sgl(cmd); @@ -1353,6 +1352,7 @@ static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue) pr_debug("freeing queue %d\n", queue->idx); nvmet_sq_destroy(&queue->nvme_sq); + nvmet_cq_put(&queue->nvme_cq); nvmet_rdma_destroy_queue_ib(queue); if (!queue->nsrq) { @@ -1436,7 +1436,8 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, goto out_reject; } - ret = nvmet_sq_init(&queue->nvme_sq); + nvmet_cq_init(&queue->nvme_cq); + ret = nvmet_sq_init(&queue->nvme_sq, &queue->nvme_cq); if (ret) { ret = NVME_RDMA_CM_NO_RSC; goto out_free_queue; @@ -1517,6 +1518,7 @@ out_ida_remove: out_destroy_sq: nvmet_sq_destroy(&queue->nvme_sq); out_free_queue: + nvmet_cq_put(&queue->nvme_cq); kfree(queue); out_reject: nvmet_rdma_cm_reject(cm_id, ret); diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index f2d0c920269b..c6603bd9c95e 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -7,6 +7,7 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> +#include <linux/crc32c.h> #include <linux/err.h> #include <linux/nvme-tcp.h> #include <linux/nvme-keyring.h> @@ -17,7 +18,6 @@ #include <net/handshake.h> #include <linux/inet.h> #include <linux/llist.h> -#include <crypto/hash.h> #include <trace/events/sock.h> #include "nvmet.h" @@ -172,8 +172,6 @@ struct nvmet_tcp_queue { /* digest state */ bool hdr_digest; bool data_digest; - struct ahash_request *snd_hash; - struct ahash_request *rcv_hash; /* TLS state */ key_serial_t tls_pskid; @@ -294,14 +292,9 @@ static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue) return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; } -static inline void nvmet_tcp_hdgst(struct ahash_request *hash, - void *pdu, size_t len) +static inline void nvmet_tcp_hdgst(void *pdu, size_t len) { - struct scatterlist sg; - - sg_init_one(&sg, pdu, len); - ahash_request_set_crypt(hash, &sg, pdu + len, len); - crypto_ahash_digest(hash); + put_unaligned_le32(~crc32c(~0, pdu, len), pdu + len); } static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue, @@ -318,7 +311,7 @@ static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue, } recv_digest = *(__le32 *)(pdu + hdr->hlen); - nvmet_tcp_hdgst(queue->rcv_hash, pdu, len); + nvmet_tcp_hdgst(pdu, len); exp_digest = *(__le32 *)(pdu + hdr->hlen); if (recv_digest != exp_digest) { pr_err("queue %d: header digest error: recv %#x expected %#x\n", @@ -441,12 +434,24 @@ err: return NVME_SC_INTERNAL; } -static void nvmet_tcp_calc_ddgst(struct ahash_request *hash, - struct nvmet_tcp_cmd *cmd) +static void nvmet_tcp_calc_ddgst(struct nvmet_tcp_cmd *cmd) { - ahash_request_set_crypt(hash, cmd->req.sg, - (void *)&cmd->exp_ddgst, cmd->req.transfer_len); - crypto_ahash_digest(hash); + size_t total_len = cmd->req.transfer_len; + struct scatterlist *sg = cmd->req.sg; + u32 crc = ~0; + + while (total_len) { + size_t len = min_t(size_t, total_len, sg->length); + + /* + * Note that the scatterlist does not contain any highmem pages, + * as it was allocated by sgl_alloc() with GFP_KERNEL. + */ + crc = crc32c(crc, sg_virt(sg), len); + total_len -= len; + sg = sg_next(sg); + } + cmd->exp_ddgst = cpu_to_le32(~crc); } static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) @@ -473,19 +478,18 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) if (queue->data_digest) { pdu->hdr.flags |= NVME_TCP_F_DDGST; - nvmet_tcp_calc_ddgst(queue->snd_hash, cmd); + nvmet_tcp_calc_ddgst(cmd); } if (cmd->queue->hdr_digest) { pdu->hdr.flags |= NVME_TCP_F_HDGST; - nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); + nvmet_tcp_hdgst(pdu, sizeof(*pdu)); } } static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd) { struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu; - struct nvmet_tcp_queue *queue = cmd->queue; u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); cmd->offset = 0; @@ -503,14 +507,13 @@ static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd) pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done); if (cmd->queue->hdr_digest) { pdu->hdr.flags |= NVME_TCP_F_HDGST; - nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); + nvmet_tcp_hdgst(pdu, sizeof(*pdu)); } } static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd) { struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu; - struct nvmet_tcp_queue *queue = cmd->queue; u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); cmd->offset = 0; @@ -523,7 +526,7 @@ static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd) pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); if (cmd->queue->hdr_digest) { pdu->hdr.flags |= NVME_TCP_F_HDGST; - nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); + nvmet_tcp_hdgst(pdu, sizeof(*pdu)); } } @@ -857,42 +860,6 @@ static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue) smp_store_release(&queue->rcv_state, NVMET_TCP_RECV_PDU); } -static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); - - ahash_request_free(queue->rcv_hash); - ahash_request_free(queue->snd_hash); - crypto_free_ahash(tfm); -} - -static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue) -{ - struct crypto_ahash *tfm; - - tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC); - if (IS_ERR(tfm)) - return PTR_ERR(tfm); - - queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); - if (!queue->snd_hash) - goto free_tfm; - ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); - - queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); - if (!queue->rcv_hash) - goto free_snd_hash; - ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); - - return 0; -free_snd_hash: - ahash_request_free(queue->snd_hash); -free_tfm: - crypto_free_ahash(tfm); - return -ENOMEM; -} - - static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) { struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq; @@ -921,11 +888,6 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE); queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE); - if (queue->hdr_digest || queue->data_digest) { - ret = nvmet_tcp_alloc_crypto(queue); - if (ret) - return ret; - } memset(icresp, 0, sizeof(*icresp)); icresp->hdr.type = nvme_tcp_icresp; @@ -1077,8 +1039,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) req = &queue->cmd->req; memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd)); - if (unlikely(!nvmet_req_init(req, &queue->nvme_cq, - &queue->nvme_sq, &nvmet_tcp_ops))) { + if (unlikely(!nvmet_req_init(req, &queue->nvme_sq, &nvmet_tcp_ops))) { pr_err("failed cmd %p id %d opcode %d, data_len: %d, status: %04x\n", req->cmd, req->cmd->common.command_id, req->cmd->common.opcode, @@ -1247,7 +1208,7 @@ static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd) { struct nvmet_tcp_queue *queue = cmd->queue; - nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd); + nvmet_tcp_calc_ddgst(cmd); queue->offset = 0; queue->left = NVME_TCP_DIGEST_LENGTH; queue->rcv_state = NVMET_TCP_RECV_DDGST; @@ -1560,6 +1521,9 @@ static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue) { struct socket *sock = queue->sock; + if (!queue->state_change) + return; + write_lock_bh(&sock->sk->sk_callback_lock); sock->sk->sk_data_ready = queue->data_ready; sock->sk->sk_state_change = queue->state_change; @@ -1612,13 +1576,12 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w) nvmet_sq_put_tls_key(&queue->nvme_sq); nvmet_tcp_uninit_data_in_cmds(queue); nvmet_sq_destroy(&queue->nvme_sq); + nvmet_cq_put(&queue->nvme_cq); cancel_work_sync(&queue->io_work); nvmet_tcp_free_cmd_data_in_buffers(queue); /* ->sock will be released by fput() */ fput(queue->sock->file); nvmet_tcp_free_cmds(queue); - if (queue->hdr_digest || queue->data_digest) - nvmet_tcp_free_crypto(queue); ida_free(&nvmet_tcp_queue_ida, queue->idx); page_frag_cache_drain(&queue->pf_cache); kfree(queue); @@ -1947,7 +1910,8 @@ static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, if (ret) goto out_ida_remove; - ret = nvmet_sq_init(&queue->nvme_sq); + nvmet_cq_init(&queue->nvme_cq); + ret = nvmet_sq_init(&queue->nvme_sq, &queue->nvme_cq); if (ret) goto out_free_connect; @@ -1990,6 +1954,7 @@ out_destroy_sq: mutex_unlock(&nvmet_tcp_queue_mutex); nvmet_sq_destroy(&queue->nvme_sq); out_free_connect: + nvmet_cq_put(&queue->nvme_cq); nvmet_tcp_free_cmd(&queue->connect); out_ida_remove: ida_free(&nvmet_tcp_queue_ida, queue->idx); diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index fff85bbf0ecd..e206efc29a00 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -594,9 +594,11 @@ static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem, cell->nbits = info->nbits; cell->np = info->np; - if (cell->nbits) + if (cell->nbits) { cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, BITS_PER_BYTE); + cell->raw_len = ALIGN(cell->bytes, nvmem->word_size); + } if (!IS_ALIGNED(cell->offset, nvmem->stride)) { dev_err(&nvmem->dev, @@ -605,6 +607,18 @@ static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem, return -EINVAL; } + if (!IS_ALIGNED(cell->raw_len, nvmem->word_size)) { + dev_err(&nvmem->dev, + "cell %s raw len %zd unaligned to nvmem word size %d\n", + cell->name ?: "<unknown>", cell->raw_len, + nvmem->word_size); + + if (info->raw_len) + return -EINVAL; + + cell->raw_len = ALIGN(cell->raw_len, nvmem->word_size); + } + return 0; } @@ -837,7 +851,9 @@ static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_nod if (addr && len == (2 * sizeof(u32))) { info.bit_offset = be32_to_cpup(addr++); info.nbits = be32_to_cpup(addr); - if (info.bit_offset >= BITS_PER_BYTE || info.nbits < 1) { + if (info.bit_offset >= BITS_PER_BYTE * info.bytes || + info.nbits < 1 || + info.bit_offset + info.nbits > BITS_PER_BYTE * info.bytes) { dev_err(dev, "nvmem: invalid bits on %pOF\n", child); of_node_put(child); return -EINVAL; @@ -1630,21 +1646,29 @@ EXPORT_SYMBOL_GPL(nvmem_cell_put); static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf) { u8 *p, *b; - int i, extra, bit_offset = cell->bit_offset; + int i, extra, bytes_offset; + int bit_offset = cell->bit_offset; p = b = buf; - if (bit_offset) { + + bytes_offset = bit_offset / BITS_PER_BYTE; + b += bytes_offset; + bit_offset %= BITS_PER_BYTE; + + if (bit_offset % BITS_PER_BYTE) { /* First shift */ - *b++ >>= bit_offset; + *p = *b++ >> bit_offset; /* setup rest of the bytes if any */ for (i = 1; i < cell->bytes; i++) { /* Get bits from next byte and shift them towards msb */ - *p |= *b << (BITS_PER_BYTE - bit_offset); + *p++ |= *b << (BITS_PER_BYTE - bit_offset); - p = b; - *b++ >>= bit_offset; + *p = *b++ >> bit_offset; } + } else if (p != b) { + memmove(p, b, cell->bytes - bytes_offset); + p += cell->bytes - 1; } else { /* point to the msb */ p += cell->bytes - 1; diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c index 116a39e804c7..a872c640b8c5 100644 --- a/drivers/nvmem/qfprom.c +++ b/drivers/nvmem/qfprom.c @@ -321,19 +321,32 @@ static int qfprom_reg_read(void *context, unsigned int reg, void *_val, size_t bytes) { struct qfprom_priv *priv = context; - u8 *val = _val; - int i = 0, words = bytes; + u32 *val = _val; void __iomem *base = priv->qfpcorrected; + int words = DIV_ROUND_UP(bytes, sizeof(u32)); + int i; if (read_raw_data && priv->qfpraw) base = priv->qfpraw; - while (words--) - *val++ = readb(base + reg + i++); + for (i = 0; i < words; i++) + *val++ = readl(base + reg + i * sizeof(u32)); return 0; } +/* Align reads to word boundary */ +static void qfprom_fixup_dt_cell_info(struct nvmem_device *nvmem, + struct nvmem_cell_info *cell) +{ + unsigned int byte_offset = cell->offset % sizeof(u32); + + cell->bit_offset += byte_offset * BITS_PER_BYTE; + cell->offset -= byte_offset; + if (byte_offset && !cell->nbits) + cell->nbits = cell->bytes * BITS_PER_BYTE; +} + static void qfprom_runtime_disable(void *data) { pm_runtime_disable(data); @@ -358,10 +371,11 @@ static int qfprom_probe(struct platform_device *pdev) struct nvmem_config econfig = { .name = "qfprom", .add_legacy_fixed_of_cells = true, - .stride = 1, - .word_size = 1, + .stride = 4, + .word_size = 4, .id = NVMEM_DEVID_AUTO, .reg_read = qfprom_reg_read, + .fixup_dt_cell_info = qfprom_fixup_dt_cell_info, }; struct device *dev = &pdev->dev; struct resource *res; diff --git a/drivers/nvmem/rockchip-otp.c b/drivers/nvmem/rockchip-otp.c index ebc3f0b24166..d88f12c53242 100644 --- a/drivers/nvmem/rockchip-otp.c +++ b/drivers/nvmem/rockchip-otp.c @@ -59,7 +59,6 @@ #define RK3588_OTPC_AUTO_EN 0x08 #define RK3588_OTPC_INT_ST 0x84 #define RK3588_OTPC_DOUT0 0x20 -#define RK3588_NO_SECURE_OFFSET 0x300 #define RK3588_NBYTES 4 #define RK3588_BURST_NUM 1 #define RK3588_BURST_SHIFT 8 @@ -69,6 +68,7 @@ struct rockchip_data { int size; + int read_offset; const char * const *clks; int num_clks; nvmem_reg_read_t reg_read; @@ -196,7 +196,7 @@ static int rk3588_otp_read(void *context, unsigned int offset, addr_start = round_down(offset, RK3588_NBYTES) / RK3588_NBYTES; addr_end = round_up(offset + bytes, RK3588_NBYTES) / RK3588_NBYTES; addr_len = addr_end - addr_start; - addr_start += RK3588_NO_SECURE_OFFSET; + addr_start += otp->data->read_offset / RK3588_NBYTES; buf = kzalloc(array_size(addr_len, RK3588_NBYTES), GFP_KERNEL); if (!buf) @@ -274,12 +274,21 @@ static const struct rockchip_data px30_data = { .reg_read = px30_otp_read, }; +static const struct rockchip_data rk3576_data = { + .size = 0x100, + .read_offset = 0x700, + .clks = px30_otp_clocks, + .num_clks = ARRAY_SIZE(px30_otp_clocks), + .reg_read = rk3588_otp_read, +}; + static const char * const rk3588_otp_clocks[] = { "otp", "apb_pclk", "phy", "arb", }; static const struct rockchip_data rk3588_data = { .size = 0x400, + .read_offset = 0xc00, .clks = rk3588_otp_clocks, .num_clks = ARRAY_SIZE(rk3588_otp_clocks), .reg_read = rk3588_otp_read, @@ -295,6 +304,10 @@ static const struct of_device_id rockchip_otp_match[] = { .data = &px30_data, }, { + .compatible = "rockchip,rk3576-otp", + .data = &rk3576_data, + }, + { .compatible = "rockchip,rk3588-otp", .data = &rk3588_data, }, diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c index 055518ee354d..d9996516f49e 100644 --- a/drivers/pci/hotplug/s390_pci_hpc.c +++ b/drivers/pci/hotplug/s390_pci_hpc.c @@ -59,16 +59,15 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn); if (pdev && pci_num_vf(pdev)) { - pci_dev_put(pdev); rc = -EBUSY; goto out; } rc = zpci_deconfigure_device(zdev); out: - mutex_unlock(&zdev->state_lock); if (pdev) pci_dev_put(pdev); + mutex_unlock(&zdev->state_lock); return rc; } diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c index 6569ba3577fe..8b8848788618 100644 --- a/drivers/pci/msi/msi.c +++ b/drivers/pci/msi/msi.c @@ -615,6 +615,9 @@ void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc) void __iomem *addr = pci_msix_desc_addr(desc); desc->pci.msi_attrib.can_mask = 1; + /* Workaround for SUN NIU insanity, which requires write before read */ + if (dev->dev_flags & PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST) + writel(0, addr + PCI_MSIX_ENTRY_DATA); desc->pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL); } } diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 4d7c9f64ea24..e77d5b53c0ce 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -5429,8 +5429,6 @@ static bool pci_bus_resettable(struct pci_bus *bus) return false; list_for_each_entry(dev, &bus->devices, bus_list) { - if (!pci_reset_supported(dev)) - return false; if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || (dev->subordinate && !pci_bus_resettable(dev->subordinate))) return false; @@ -5507,8 +5505,6 @@ static bool pci_slot_resettable(struct pci_slot *slot) list_for_each_entry(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; - if (!pci_reset_supported(dev)) - return false; if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || (dev->subordinate && !pci_bus_resettable(dev->subordinate))) return false; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 8d610c17e0f2..94daca15a096 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1990,12 +1990,12 @@ static void quirk_huawei_pcie_sva(struct pci_dev *pdev) device_create_managed_software_node(&pdev->dev, properties, NULL)) pci_warn(pdev, "could not add stall property"); } -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa250, quirk_huawei_pcie_sva); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa251, quirk_huawei_pcie_sva); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa255, quirk_huawei_pcie_sva); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa256, quirk_huawei_pcie_sva); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa258, quirk_huawei_pcie_sva); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa259, quirk_huawei_pcie_sva); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa250, quirk_huawei_pcie_sva); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa251, quirk_huawei_pcie_sva); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa255, quirk_huawei_pcie_sva); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa256, quirk_huawei_pcie_sva); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa258, quirk_huawei_pcie_sva); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa259, quirk_huawei_pcie_sva); /* * It's possible for the MSI to get corrupted if SHPC and ACPI are used diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 54d6f4fa3ce1..e994c546422c 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -187,6 +187,9 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head) panic("%s: kzalloc() failed!\n", __func__); tmp->res = r; tmp->dev = dev; + tmp->start = r->start; + tmp->end = r->end; + tmp->flags = r->flags; /* Fallback is smallest one or list is empty */ n = head; @@ -545,6 +548,7 @@ assign: pci_dbg(dev, "%s %pR: releasing\n", res_name, res); release_resource(res); + restore_dev_resource(dev_res); } /* Restore start/end/flags from saved list */ list_for_each_entry(save_res, &save_head, list) diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c index df9a28ba69dc..81b6f1a62349 100644 --- a/drivers/perf/apple_m1_cpu_pmu.c +++ b/drivers/perf/apple_m1_cpu_pmu.c @@ -474,8 +474,7 @@ static irqreturn_t m1_pmu_handle_irq(struct arm_pmu *cpu_pmu) if (!armpmu_event_set_period(event)) continue; - if (perf_event_overflow(event, &data, regs)) - m1_pmu_disable_event(event); + perf_event_overflow(event, &data, regs); } cpu_pmu->start(cpu_pmu); diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index e506d59654e7..3db9f4ed17e8 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -887,8 +887,7 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu) * an irq_work which will be taken care of in the handling of * IPI_IRQ_WORK. */ - if (perf_event_overflow(event, &data, regs)) - cpu_pmu->disable(event); + perf_event_overflow(event, &data, regs); } armv8pmu_start(cpu_pmu); diff --git a/drivers/perf/arm_v6_pmu.c b/drivers/perf/arm_v6_pmu.c index b09615bb2bb2..7cb12c8e06c7 100644 --- a/drivers/perf/arm_v6_pmu.c +++ b/drivers/perf/arm_v6_pmu.c @@ -276,8 +276,7 @@ armv6pmu_handle_irq(struct arm_pmu *cpu_pmu) if (!armpmu_event_set_period(event)) continue; - if (perf_event_overflow(event, &data, regs)) - cpu_pmu->disable(event); + perf_event_overflow(event, &data, regs); } /* diff --git a/drivers/perf/arm_v7_pmu.c b/drivers/perf/arm_v7_pmu.c index 17831e1920bd..a1e438101114 100644 --- a/drivers/perf/arm_v7_pmu.c +++ b/drivers/perf/arm_v7_pmu.c @@ -930,8 +930,7 @@ static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu) if (!armpmu_event_set_period(event)) continue; - if (perf_event_overflow(event, &data, regs)) - cpu_pmu->disable(event); + perf_event_overflow(event, &data, regs); } /* diff --git a/drivers/perf/arm_xscale_pmu.c b/drivers/perf/arm_xscale_pmu.c index 638fea9b1263..c2ac41dd9e19 100644 --- a/drivers/perf/arm_xscale_pmu.c +++ b/drivers/perf/arm_xscale_pmu.c @@ -186,8 +186,7 @@ xscale1pmu_handle_irq(struct arm_pmu *cpu_pmu) if (!armpmu_event_set_period(event)) continue; - if (perf_event_overflow(event, &data, regs)) - cpu_pmu->disable(event); + perf_event_overflow(event, &data, regs); } irq_work_run(); @@ -519,8 +518,7 @@ xscale2pmu_handle_irq(struct arm_pmu *cpu_pmu) if (!armpmu_event_set_period(event)) continue; - if (perf_event_overflow(event, &data, regs)) - cpu_pmu->disable(event); + perf_event_overflow(event, &data, regs); } irq_work_run(); diff --git a/drivers/phy/phy-can-transceiver.c b/drivers/phy/phy-can-transceiver.c index 2bec70615449..f59caff4b3d4 100644 --- a/drivers/phy/phy-can-transceiver.c +++ b/drivers/phy/phy-can-transceiver.c @@ -93,6 +93,16 @@ static const struct of_device_id can_transceiver_phy_ids[] = { }; MODULE_DEVICE_TABLE(of, can_transceiver_phy_ids); +/* Temporary wrapper until the multiplexer subsystem supports optional muxes */ +static inline struct mux_state * +devm_mux_state_get_optional(struct device *dev, const char *mux_name) +{ + if (!of_property_present(dev->of_node, "mux-states")) + return NULL; + + return devm_mux_state_get(dev, mux_name); +} + static int can_transceiver_phy_probe(struct platform_device *pdev) { struct phy_provider *phy_provider; @@ -114,13 +124,11 @@ static int can_transceiver_phy_probe(struct platform_device *pdev) match = of_match_node(can_transceiver_phy_ids, pdev->dev.of_node); drvdata = match->data; - mux_state = devm_mux_state_get(dev, NULL); - if (IS_ERR(mux_state)) { - if (PTR_ERR(mux_state) == -EPROBE_DEFER) - return PTR_ERR(mux_state); - } else { - can_transceiver_phy->mux_state = mux_state; - } + mux_state = devm_mux_state_get_optional(dev, NULL); + if (IS_ERR(mux_state)) + return PTR_ERR(mux_state); + + can_transceiver_phy->mux_state = mux_state; phy = devm_phy_create(dev, dev->of_node, &can_transceiver_phy_ops); diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c index 45b3b792696e..b33e2e2b5014 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c @@ -1754,7 +1754,8 @@ static void qmp_ufs_init_registers(struct qmp_ufs *qmp, const struct qmp_phy_cfg qmp_ufs_init_all(qmp, &cfg->tbls_hs_overlay[i]); } - qmp_ufs_init_all(qmp, &cfg->tbls_hs_b); + if (qmp->mode == PHY_MODE_UFS_HS_B) + qmp_ufs_init_all(qmp, &cfg->tbls_hs_b); } static int qmp_ufs_com_init(struct qmp_ufs *qmp) diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c index 775f4f973a6c..9fdf17e0848a 100644 --- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c +++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c @@ -9,6 +9,7 @@ * Copyright (C) 2014 Cogent Embedded, Inc. */ +#include <linux/cleanup.h> #include <linux/extcon-provider.h> #include <linux/interrupt.h> #include <linux/io.h> @@ -107,7 +108,6 @@ struct rcar_gen3_phy { struct rcar_gen3_chan *ch; u32 int_enable_bits; bool initialized; - bool otg_initialized; bool powered; }; @@ -119,9 +119,8 @@ struct rcar_gen3_chan { struct regulator *vbus; struct reset_control *rstc; struct work_struct work; - struct mutex lock; /* protects rphys[...].powered */ + spinlock_t lock; /* protects access to hardware and driver data structure. */ enum usb_dr_mode dr_mode; - int irq; u32 obint_enable_bits; bool extcon_host; bool is_otg_channel; @@ -320,16 +319,15 @@ static bool rcar_gen3_is_any_rphy_initialized(struct rcar_gen3_chan *ch) return false; } -static bool rcar_gen3_needs_init_otg(struct rcar_gen3_chan *ch) +static bool rcar_gen3_is_any_otg_rphy_initialized(struct rcar_gen3_chan *ch) { - int i; - - for (i = 0; i < NUM_OF_PHYS; i++) { - if (ch->rphys[i].otg_initialized) - return false; + for (enum rcar_gen3_phy_index i = PHY_INDEX_BOTH_HC; i <= PHY_INDEX_EHCI; + i++) { + if (ch->rphys[i].initialized) + return true; } - return true; + return false; } static bool rcar_gen3_are_all_rphys_power_off(struct rcar_gen3_chan *ch) @@ -351,7 +349,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr, bool is_b_device; enum phy_mode cur_mode, new_mode; - if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch)) + guard(spinlock_irqsave)(&ch->lock); + + if (!ch->is_otg_channel || !rcar_gen3_is_any_otg_rphy_initialized(ch)) return -EIO; if (sysfs_streq(buf, "host")) @@ -389,7 +389,7 @@ static ssize_t role_show(struct device *dev, struct device_attribute *attr, { struct rcar_gen3_chan *ch = dev_get_drvdata(dev); - if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch)) + if (!ch->is_otg_channel || !rcar_gen3_is_any_otg_rphy_initialized(ch)) return -EIO; return sprintf(buf, "%s\n", rcar_gen3_is_host(ch) ? "host" : @@ -402,6 +402,9 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch) void __iomem *usb2_base = ch->base; u32 val; + if (!ch->is_otg_channel || rcar_gen3_is_any_otg_rphy_initialized(ch)) + return; + /* Should not use functions of read-modify-write a register */ val = readl(usb2_base + USB2_LINECTRL1); val = (val & ~USB2_LINECTRL1_DP_RPD) | USB2_LINECTRL1_DPRPD_EN | @@ -415,7 +418,7 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch) val = readl(usb2_base + USB2_ADPCTRL); writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL); } - msleep(20); + mdelay(20); writel(0xffffffff, usb2_base + USB2_OBINTSTA); writel(ch->obint_enable_bits, usb2_base + USB2_OBINTEN); @@ -427,16 +430,27 @@ static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch) { struct rcar_gen3_chan *ch = _ch; void __iomem *usb2_base = ch->base; - u32 status = readl(usb2_base + USB2_OBINTSTA); + struct device *dev = ch->dev; irqreturn_t ret = IRQ_NONE; + u32 status; + + pm_runtime_get_noresume(dev); + + if (pm_runtime_suspended(dev)) + goto rpm_put; - if (status & ch->obint_enable_bits) { - dev_vdbg(ch->dev, "%s: %08x\n", __func__, status); - writel(ch->obint_enable_bits, usb2_base + USB2_OBINTSTA); - rcar_gen3_device_recognition(ch); - ret = IRQ_HANDLED; + scoped_guard(spinlock, &ch->lock) { + status = readl(usb2_base + USB2_OBINTSTA); + if (status & ch->obint_enable_bits) { + dev_vdbg(dev, "%s: %08x\n", __func__, status); + writel(ch->obint_enable_bits, usb2_base + USB2_OBINTSTA); + rcar_gen3_device_recognition(ch); + ret = IRQ_HANDLED; + } } +rpm_put: + pm_runtime_put_noidle(dev); return ret; } @@ -446,32 +460,23 @@ static int rcar_gen3_phy_usb2_init(struct phy *p) struct rcar_gen3_chan *channel = rphy->ch; void __iomem *usb2_base = channel->base; u32 val; - int ret; - if (!rcar_gen3_is_any_rphy_initialized(channel) && channel->irq >= 0) { - INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work); - ret = request_irq(channel->irq, rcar_gen3_phy_usb2_irq, - IRQF_SHARED, dev_name(channel->dev), channel); - if (ret < 0) { - dev_err(channel->dev, "No irq handler (%d)\n", channel->irq); - return ret; - } - } + guard(spinlock_irqsave)(&channel->lock); /* Initialize USB2 part */ val = readl(usb2_base + USB2_INT_ENABLE); val |= USB2_INT_ENABLE_UCOM_INTEN | rphy->int_enable_bits; writel(val, usb2_base + USB2_INT_ENABLE); - writel(USB2_SPD_RSM_TIMSET_INIT, usb2_base + USB2_SPD_RSM_TIMSET); - writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET); - - /* Initialize otg part */ - if (channel->is_otg_channel) { - if (rcar_gen3_needs_init_otg(channel)) - rcar_gen3_init_otg(channel); - rphy->otg_initialized = true; + + if (!rcar_gen3_is_any_rphy_initialized(channel)) { + writel(USB2_SPD_RSM_TIMSET_INIT, usb2_base + USB2_SPD_RSM_TIMSET); + writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET); } + /* Initialize otg part (only if we initialize a PHY with IRQs). */ + if (rphy->int_enable_bits) + rcar_gen3_init_otg(channel); + rphy->initialized = true; return 0; @@ -484,10 +489,9 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p) void __iomem *usb2_base = channel->base; u32 val; - rphy->initialized = false; + guard(spinlock_irqsave)(&channel->lock); - if (channel->is_otg_channel) - rphy->otg_initialized = false; + rphy->initialized = false; val = readl(usb2_base + USB2_INT_ENABLE); val &= ~rphy->int_enable_bits; @@ -495,9 +499,6 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p) val &= ~USB2_INT_ENABLE_UCOM_INTEN; writel(val, usb2_base + USB2_INT_ENABLE); - if (channel->irq >= 0 && !rcar_gen3_is_any_rphy_initialized(channel)) - free_irq(channel->irq, channel); - return 0; } @@ -509,16 +510,17 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p) u32 val; int ret = 0; - mutex_lock(&channel->lock); - if (!rcar_gen3_are_all_rphys_power_off(channel)) - goto out; - if (channel->vbus) { ret = regulator_enable(channel->vbus); if (ret) - goto out; + return ret; } + guard(spinlock_irqsave)(&channel->lock); + + if (!rcar_gen3_are_all_rphys_power_off(channel)) + goto out; + val = readl(usb2_base + USB2_USBCTR); val |= USB2_USBCTR_PLL_RST; writel(val, usb2_base + USB2_USBCTR); @@ -528,7 +530,6 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p) out: /* The powered flag should be set for any other phys anyway */ rphy->powered = true; - mutex_unlock(&channel->lock); return 0; } @@ -539,18 +540,20 @@ static int rcar_gen3_phy_usb2_power_off(struct phy *p) struct rcar_gen3_chan *channel = rphy->ch; int ret = 0; - mutex_lock(&channel->lock); - rphy->powered = false; + scoped_guard(spinlock_irqsave, &channel->lock) { + rphy->powered = false; - if (!rcar_gen3_are_all_rphys_power_off(channel)) - goto out; + if (rcar_gen3_are_all_rphys_power_off(channel)) { + u32 val = readl(channel->base + USB2_USBCTR); + + val |= USB2_USBCTR_PLL_RST; + writel(val, channel->base + USB2_USBCTR); + } + } if (channel->vbus) ret = regulator_disable(channel->vbus); -out: - mutex_unlock(&channel->lock); - return ret; } @@ -703,7 +706,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct rcar_gen3_chan *channel; struct phy_provider *provider; - int ret = 0, i; + int ret = 0, i, irq; if (!dev->of_node) { dev_err(dev, "This driver needs device tree\n"); @@ -719,8 +722,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) return PTR_ERR(channel->base); channel->obint_enable_bits = USB2_OBINT_BITS; - /* get irq number here and request_irq for OTG in phy_init */ - channel->irq = platform_get_irq_optional(pdev, 0); channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node); if (channel->dr_mode != USB_DR_MODE_UNKNOWN) { channel->is_otg_channel = true; @@ -763,7 +764,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) if (phy_data->no_adp_ctrl) channel->obint_enable_bits = USB2_OBINT_IDCHG_EN; - mutex_init(&channel->lock); + spin_lock_init(&channel->lock); for (i = 0; i < NUM_OF_PHYS; i++) { channel->rphys[i].phy = devm_phy_create(dev, NULL, phy_data->phy_usb2_ops); @@ -789,6 +790,20 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) channel->vbus = NULL; } + irq = platform_get_irq_optional(pdev, 0); + if (irq < 0 && irq != -ENXIO) { + ret = irq; + goto error; + } else if (irq > 0) { + INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work); + ret = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq, + IRQF_SHARED, dev_name(dev), channel); + if (ret < 0) { + dev_err(dev, "Failed to request irq (%d)\n", irq); + goto error; + } + } + provider = devm_of_phy_provider_register(dev, rcar_gen3_phy_usb2_xlate); if (IS_ERR(provider)) { dev_err(dev, "Failed to register PHY provider\n"); diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c b/drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c index 08c78c1bafc9..28a052e17366 100644 --- a/drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c +++ b/drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c @@ -1653,7 +1653,7 @@ static __maybe_unused int samsung_mipi_dcphy_runtime_resume(struct device *dev) return ret; } - clk_prepare_enable(samsung->ref_clk); + ret = clk_prepare_enable(samsung->ref_clk); if (ret) { dev_err(samsung->dev, "Failed to enable reference clock, %d\n", ret); clk_disable_unprepare(samsung->pclk); diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c index fe7c05748356..77236f012a1f 100644 --- a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c +++ b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c @@ -476,6 +476,8 @@ static const struct ropll_config ropll_tmds_cfg[] = { 1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, }, { 650000, 162, 162, 1, 1, 11, 1, 1, 1, 1, 1, 1, 1, 54, 0, 16, 4, 1, 1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, }, + { 502500, 84, 84, 1, 1, 7, 1, 1, 1, 1, 1, 1, 1, 11, 1, 4, 5, + 4, 11, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, }, { 337500, 0x70, 0x70, 1, 1, 0xf, 1, 1, 1, 1, 1, 1, 1, 0x2, 0, 0x01, 5, 1, 1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, }, { 400000, 100, 100, 1, 1, 11, 1, 1, 0, 1, 0, 1, 1, 0x9, 0, 0x05, 0, diff --git a/drivers/phy/starfive/phy-jh7110-usb.c b/drivers/phy/starfive/phy-jh7110-usb.c index cb5454fbe2c8..b505d89860b4 100644 --- a/drivers/phy/starfive/phy-jh7110-usb.c +++ b/drivers/phy/starfive/phy-jh7110-usb.c @@ -18,6 +18,8 @@ #include <linux/usb/of.h> #define USB_125M_CLK_RATE 125000000 +#define USB_CLK_MODE_OFF 0x0 +#define USB_CLK_MODE_RX_NORMAL_PWR BIT(1) #define USB_LS_KEEPALIVE_OFF 0x4 #define USB_LS_KEEPALIVE_ENABLE BIT(4) @@ -78,6 +80,7 @@ static int jh7110_usb2_phy_init(struct phy *_phy) { struct jh7110_usb2_phy *phy = phy_get_drvdata(_phy); int ret; + unsigned int val; ret = clk_set_rate(phy->usb_125m_clk, USB_125M_CLK_RATE); if (ret) @@ -87,6 +90,10 @@ static int jh7110_usb2_phy_init(struct phy *_phy) if (ret) return ret; + val = readl(phy->regs + USB_CLK_MODE_OFF); + val |= USB_CLK_MODE_RX_NORMAL_PWR; + writel(val, phy->regs + USB_CLK_MODE_OFF); + return 0; } diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c index fae6242aa730..23a23f2d64e5 100644 --- a/drivers/phy/tegra/xusb-tegra186.c +++ b/drivers/phy/tegra/xusb-tegra186.c @@ -237,6 +237,8 @@ #define DATA0_VAL_PD BIT(1) #define USE_XUSB_AO BIT(4) +#define TEGRA_UTMI_PAD_MAX 4 + #define TEGRA186_LANE(_name, _offset, _shift, _mask, _type) \ { \ .name = _name, \ @@ -269,7 +271,7 @@ struct tegra186_xusb_padctl { /* UTMI bias and tracking */ struct clk *usb2_trk_clk; - unsigned int bias_pad_enable; + DECLARE_BITMAP(utmi_pad_enabled, TEGRA_UTMI_PAD_MAX); /* padctl context */ struct tegra186_xusb_padctl_context context; @@ -603,12 +605,8 @@ static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl) u32 value; int err; - mutex_lock(&padctl->lock); - - if (priv->bias_pad_enable++ > 0) { - mutex_unlock(&padctl->lock); + if (!bitmap_empty(priv->utmi_pad_enabled, TEGRA_UTMI_PAD_MAX)) return; - } err = clk_prepare_enable(priv->usb2_trk_clk); if (err < 0) @@ -658,8 +656,6 @@ static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl) } else { clk_disable_unprepare(priv->usb2_trk_clk); } - - mutex_unlock(&padctl->lock); } static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl) @@ -667,17 +663,8 @@ static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl) struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl); u32 value; - mutex_lock(&padctl->lock); - - if (WARN_ON(priv->bias_pad_enable == 0)) { - mutex_unlock(&padctl->lock); - return; - } - - if (--priv->bias_pad_enable > 0) { - mutex_unlock(&padctl->lock); + if (!bitmap_empty(priv->utmi_pad_enabled, TEGRA_UTMI_PAD_MAX)) return; - } value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1); value |= USB2_PD_TRK; @@ -690,13 +677,13 @@ static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl) clk_disable_unprepare(priv->usb2_trk_clk); } - mutex_unlock(&padctl->lock); } static void tegra186_utmi_pad_power_on(struct phy *phy) { struct tegra_xusb_lane *lane = phy_get_drvdata(phy); struct tegra_xusb_padctl *padctl = lane->pad->padctl; + struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl); struct tegra_xusb_usb2_port *port; struct device *dev = padctl->dev; unsigned int index = lane->index; @@ -705,9 +692,16 @@ static void tegra186_utmi_pad_power_on(struct phy *phy) if (!phy) return; + mutex_lock(&padctl->lock); + if (test_bit(index, priv->utmi_pad_enabled)) { + mutex_unlock(&padctl->lock); + return; + } + port = tegra_xusb_find_usb2_port(padctl, index); if (!port) { dev_err(dev, "no port found for USB2 lane %u\n", index); + mutex_unlock(&padctl->lock); return; } @@ -724,18 +718,28 @@ static void tegra186_utmi_pad_power_on(struct phy *phy) value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index)); value &= ~USB2_OTG_PD_DR; padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index)); + + set_bit(index, priv->utmi_pad_enabled); + mutex_unlock(&padctl->lock); } static void tegra186_utmi_pad_power_down(struct phy *phy) { struct tegra_xusb_lane *lane = phy_get_drvdata(phy); struct tegra_xusb_padctl *padctl = lane->pad->padctl; + struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl); unsigned int index = lane->index; u32 value; if (!phy) return; + mutex_lock(&padctl->lock); + if (!test_bit(index, priv->utmi_pad_enabled)) { + mutex_unlock(&padctl->lock); + return; + } + dev_dbg(padctl->dev, "power down UTMI pad %u\n", index); value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index)); @@ -748,7 +752,11 @@ static void tegra186_utmi_pad_power_down(struct phy *phy) udelay(2); + clear_bit(index, priv->utmi_pad_enabled); + tegra186_utmi_bias_pad_power_off(padctl); + + mutex_unlock(&padctl->lock); } static int tegra186_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl, diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c index 79d4814d758d..c89df95aa6ca 100644 --- a/drivers/phy/tegra/xusb.c +++ b/drivers/phy/tegra/xusb.c @@ -548,16 +548,16 @@ static int tegra_xusb_port_init(struct tegra_xusb_port *port, err = dev_set_name(&port->dev, "%s-%u", name, index); if (err < 0) - goto unregister; + goto put_device; err = device_add(&port->dev); if (err < 0) - goto unregister; + goto put_device; return 0; -unregister: - device_unregister(&port->dev); +put_device: + put_device(&port->dev); return err; } diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c index 842a1e6cbfc4..18de31328540 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx.c +++ b/drivers/pinctrl/freescale/pinctrl-imx.c @@ -37,16 +37,16 @@ static inline const struct group_desc *imx_pinctrl_find_group_by_name( struct pinctrl_dev *pctldev, const char *name) { - const struct group_desc *grp = NULL; + const struct group_desc *grp; int i; for (i = 0; i < pctldev->num_groups; i++) { grp = pinctrl_generic_get_group(pctldev, i); if (grp && !strcmp(grp->grp.name, name)) - break; + return grp; } - return grp; + return NULL; } static void imx_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c index 8bbe2aa74861..e20aaba0a33a 100644 --- a/drivers/pinctrl/mediatek/mtk-eint.c +++ b/drivers/pinctrl/mediatek/mtk-eint.c @@ -449,7 +449,7 @@ int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_num, return -EOPNOTSUPP; virq = irq_find_mapping(eint->domain, eint_num); - eint_offset = (eint_num % 4) * 8; + eint_offset = (idx % 4) * 8; d = irq_get_irq_data(virq); set_offset = (idx / 4) * 4 + eint->regs->dbnc_set; diff --git a/drivers/pinctrl/mediatek/pinctrl-airoha.c b/drivers/pinctrl/mediatek/pinctrl-airoha.c index 547a798b71c8..5d84a778683d 100644 --- a/drivers/pinctrl/mediatek/pinctrl-airoha.c +++ b/drivers/pinctrl/mediatek/pinctrl-airoha.c @@ -6,6 +6,7 @@ */ #include <dt-bindings/pinctrl/mt65xx.h> +#include <linux/bitfield.h> #include <linux/bits.h> #include <linux/cleanup.h> #include <linux/gpio/driver.h> @@ -112,39 +113,19 @@ #define REG_LAN_LED1_MAPPING 0x0280 #define LAN4_LED_MAPPING_MASK GENMASK(18, 16) -#define LAN4_PHY4_LED_MAP BIT(18) -#define LAN4_PHY2_LED_MAP BIT(17) -#define LAN4_PHY1_LED_MAP BIT(16) -#define LAN4_PHY0_LED_MAP 0 -#define LAN4_PHY3_LED_MAP GENMASK(17, 16) +#define LAN4_PHY_LED_MAP(_n) FIELD_PREP_CONST(LAN4_LED_MAPPING_MASK, (_n)) #define LAN3_LED_MAPPING_MASK GENMASK(14, 12) -#define LAN3_PHY4_LED_MAP BIT(14) -#define LAN3_PHY2_LED_MAP BIT(13) -#define LAN3_PHY1_LED_MAP BIT(12) -#define LAN3_PHY0_LED_MAP 0 -#define LAN3_PHY3_LED_MAP GENMASK(13, 12) +#define LAN3_PHY_LED_MAP(_n) FIELD_PREP_CONST(LAN3_LED_MAPPING_MASK, (_n)) #define LAN2_LED_MAPPING_MASK GENMASK(10, 8) -#define LAN2_PHY4_LED_MAP BIT(12) -#define LAN2_PHY2_LED_MAP BIT(11) -#define LAN2_PHY1_LED_MAP BIT(10) -#define LAN2_PHY0_LED_MAP 0 -#define LAN2_PHY3_LED_MAP GENMASK(11, 10) +#define LAN2_PHY_LED_MAP(_n) FIELD_PREP_CONST(LAN2_LED_MAPPING_MASK, (_n)) #define LAN1_LED_MAPPING_MASK GENMASK(6, 4) -#define LAN1_PHY4_LED_MAP BIT(6) -#define LAN1_PHY2_LED_MAP BIT(5) -#define LAN1_PHY1_LED_MAP BIT(4) -#define LAN1_PHY0_LED_MAP 0 -#define LAN1_PHY3_LED_MAP GENMASK(5, 4) +#define LAN1_PHY_LED_MAP(_n) FIELD_PREP_CONST(LAN1_LED_MAPPING_MASK, (_n)) #define LAN0_LED_MAPPING_MASK GENMASK(2, 0) -#define LAN0_PHY4_LED_MAP BIT(3) -#define LAN0_PHY2_LED_MAP BIT(2) -#define LAN0_PHY1_LED_MAP BIT(1) -#define LAN0_PHY0_LED_MAP 0 -#define LAN0_PHY3_LED_MAP GENMASK(2, 1) +#define LAN0_PHY_LED_MAP(_n) FIELD_PREP_CONST(LAN0_LED_MAPPING_MASK, (_n)) /* CONF */ #define REG_I2C_SDA_E2 0x001c @@ -1476,8 +1457,8 @@ static const struct airoha_pinctrl_func_group phy1_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN1_LED_MAPPING_MASK, - LAN1_PHY1_LED_MAP + LAN0_LED_MAPPING_MASK, + LAN0_PHY_LED_MAP(0) }, .regmap_size = 2, }, { @@ -1491,8 +1472,8 @@ static const struct airoha_pinctrl_func_group phy1_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN2_LED_MAPPING_MASK, - LAN2_PHY1_LED_MAP + LAN1_LED_MAPPING_MASK, + LAN1_PHY_LED_MAP(0) }, .regmap_size = 2, }, { @@ -1506,8 +1487,8 @@ static const struct airoha_pinctrl_func_group phy1_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN3_LED_MAPPING_MASK, - LAN3_PHY1_LED_MAP + LAN2_LED_MAPPING_MASK, + LAN2_PHY_LED_MAP(0) }, .regmap_size = 2, }, { @@ -1521,8 +1502,8 @@ static const struct airoha_pinctrl_func_group phy1_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN4_LED_MAPPING_MASK, - LAN4_PHY1_LED_MAP + LAN3_LED_MAPPING_MASK, + LAN3_PHY_LED_MAP(0) }, .regmap_size = 2, }, @@ -1540,8 +1521,8 @@ static const struct airoha_pinctrl_func_group phy2_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN1_LED_MAPPING_MASK, - LAN1_PHY2_LED_MAP + LAN0_LED_MAPPING_MASK, + LAN0_PHY_LED_MAP(1) }, .regmap_size = 2, }, { @@ -1555,8 +1536,8 @@ static const struct airoha_pinctrl_func_group phy2_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN2_LED_MAPPING_MASK, - LAN2_PHY2_LED_MAP + LAN1_LED_MAPPING_MASK, + LAN1_PHY_LED_MAP(1) }, .regmap_size = 2, }, { @@ -1570,8 +1551,8 @@ static const struct airoha_pinctrl_func_group phy2_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN3_LED_MAPPING_MASK, - LAN3_PHY2_LED_MAP + LAN2_LED_MAPPING_MASK, + LAN2_PHY_LED_MAP(1) }, .regmap_size = 2, }, { @@ -1585,8 +1566,8 @@ static const struct airoha_pinctrl_func_group phy2_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN4_LED_MAPPING_MASK, - LAN4_PHY2_LED_MAP + LAN3_LED_MAPPING_MASK, + LAN3_PHY_LED_MAP(1) }, .regmap_size = 2, }, @@ -1604,8 +1585,8 @@ static const struct airoha_pinctrl_func_group phy3_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN1_LED_MAPPING_MASK, - LAN1_PHY3_LED_MAP + LAN0_LED_MAPPING_MASK, + LAN0_PHY_LED_MAP(2) }, .regmap_size = 2, }, { @@ -1619,8 +1600,8 @@ static const struct airoha_pinctrl_func_group phy3_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN2_LED_MAPPING_MASK, - LAN2_PHY3_LED_MAP + LAN1_LED_MAPPING_MASK, + LAN1_PHY_LED_MAP(2) }, .regmap_size = 2, }, { @@ -1634,8 +1615,8 @@ static const struct airoha_pinctrl_func_group phy3_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN3_LED_MAPPING_MASK, - LAN3_PHY3_LED_MAP + LAN2_LED_MAPPING_MASK, + LAN2_PHY_LED_MAP(2) }, .regmap_size = 2, }, { @@ -1649,8 +1630,8 @@ static const struct airoha_pinctrl_func_group phy3_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN4_LED_MAPPING_MASK, - LAN4_PHY3_LED_MAP + LAN3_LED_MAPPING_MASK, + LAN3_PHY_LED_MAP(2) }, .regmap_size = 2, }, @@ -1668,8 +1649,8 @@ static const struct airoha_pinctrl_func_group phy4_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN1_LED_MAPPING_MASK, - LAN1_PHY4_LED_MAP + LAN0_LED_MAPPING_MASK, + LAN0_PHY_LED_MAP(3) }, .regmap_size = 2, }, { @@ -1683,8 +1664,8 @@ static const struct airoha_pinctrl_func_group phy4_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN2_LED_MAPPING_MASK, - LAN2_PHY4_LED_MAP + LAN1_LED_MAPPING_MASK, + LAN1_PHY_LED_MAP(3) }, .regmap_size = 2, }, { @@ -1698,8 +1679,8 @@ static const struct airoha_pinctrl_func_group phy4_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN3_LED_MAPPING_MASK, - LAN3_PHY4_LED_MAP + LAN2_LED_MAPPING_MASK, + LAN2_PHY_LED_MAP(3) }, .regmap_size = 2, }, { @@ -1713,8 +1694,8 @@ static const struct airoha_pinctrl_func_group phy4_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN4_LED_MAPPING_MASK, - LAN4_PHY4_LED_MAP + LAN3_LED_MAPPING_MASK, + LAN3_PHY_LED_MAP(3) }, .regmap_size = 2, }, @@ -1732,8 +1713,8 @@ static const struct airoha_pinctrl_func_group phy1_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN1_LED_MAPPING_MASK, - LAN1_PHY1_LED_MAP + LAN0_LED_MAPPING_MASK, + LAN0_PHY_LED_MAP(0) }, .regmap_size = 2, }, { @@ -1747,8 +1728,8 @@ static const struct airoha_pinctrl_func_group phy1_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN2_LED_MAPPING_MASK, - LAN2_PHY1_LED_MAP + LAN1_LED_MAPPING_MASK, + LAN1_PHY_LED_MAP(0) }, .regmap_size = 2, }, { @@ -1762,8 +1743,8 @@ static const struct airoha_pinctrl_func_group phy1_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN3_LED_MAPPING_MASK, - LAN3_PHY1_LED_MAP + LAN2_LED_MAPPING_MASK, + LAN2_PHY_LED_MAP(0) }, .regmap_size = 2, }, { @@ -1777,8 +1758,8 @@ static const struct airoha_pinctrl_func_group phy1_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN4_LED_MAPPING_MASK, - LAN4_PHY1_LED_MAP + LAN3_LED_MAPPING_MASK, + LAN3_PHY_LED_MAP(0) }, .regmap_size = 2, }, @@ -1796,8 +1777,8 @@ static const struct airoha_pinctrl_func_group phy2_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN1_LED_MAPPING_MASK, - LAN1_PHY2_LED_MAP + LAN0_LED_MAPPING_MASK, + LAN0_PHY_LED_MAP(1) }, .regmap_size = 2, }, { @@ -1811,8 +1792,8 @@ static const struct airoha_pinctrl_func_group phy2_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN2_LED_MAPPING_MASK, - LAN2_PHY2_LED_MAP + LAN1_LED_MAPPING_MASK, + LAN1_PHY_LED_MAP(1) }, .regmap_size = 2, }, { @@ -1826,8 +1807,8 @@ static const struct airoha_pinctrl_func_group phy2_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN3_LED_MAPPING_MASK, - LAN3_PHY2_LED_MAP + LAN2_LED_MAPPING_MASK, + LAN2_PHY_LED_MAP(1) }, .regmap_size = 2, }, { @@ -1841,8 +1822,8 @@ static const struct airoha_pinctrl_func_group phy2_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN4_LED_MAPPING_MASK, - LAN4_PHY2_LED_MAP + LAN3_LED_MAPPING_MASK, + LAN3_PHY_LED_MAP(1) }, .regmap_size = 2, }, @@ -1860,8 +1841,8 @@ static const struct airoha_pinctrl_func_group phy3_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN1_LED_MAPPING_MASK, - LAN1_PHY3_LED_MAP + LAN0_LED_MAPPING_MASK, + LAN0_PHY_LED_MAP(2) }, .regmap_size = 2, }, { @@ -1875,8 +1856,8 @@ static const struct airoha_pinctrl_func_group phy3_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN2_LED_MAPPING_MASK, - LAN2_PHY3_LED_MAP + LAN1_LED_MAPPING_MASK, + LAN1_PHY_LED_MAP(2) }, .regmap_size = 2, }, { @@ -1890,8 +1871,8 @@ static const struct airoha_pinctrl_func_group phy3_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN3_LED_MAPPING_MASK, - LAN3_PHY3_LED_MAP + LAN2_LED_MAPPING_MASK, + LAN2_PHY_LED_MAP(2) }, .regmap_size = 2, }, { @@ -1905,8 +1886,8 @@ static const struct airoha_pinctrl_func_group phy3_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN4_LED_MAPPING_MASK, - LAN4_PHY3_LED_MAP + LAN3_LED_MAPPING_MASK, + LAN3_PHY_LED_MAP(2) }, .regmap_size = 2, }, @@ -1924,8 +1905,8 @@ static const struct airoha_pinctrl_func_group phy4_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN1_LED_MAPPING_MASK, - LAN1_PHY4_LED_MAP + LAN0_LED_MAPPING_MASK, + LAN0_PHY_LED_MAP(3) }, .regmap_size = 2, }, { @@ -1939,8 +1920,8 @@ static const struct airoha_pinctrl_func_group phy4_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN2_LED_MAPPING_MASK, - LAN2_PHY4_LED_MAP + LAN1_LED_MAPPING_MASK, + LAN1_PHY_LED_MAP(3) }, .regmap_size = 2, }, { @@ -1954,8 +1935,8 @@ static const struct airoha_pinctrl_func_group phy4_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN3_LED_MAPPING_MASK, - LAN3_PHY4_LED_MAP + LAN2_LED_MAPPING_MASK, + LAN2_PHY_LED_MAP(3) }, .regmap_size = 2, }, { @@ -1969,8 +1950,8 @@ static const struct airoha_pinctrl_func_group phy4_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN4_LED_MAPPING_MASK, - LAN4_PHY4_LED_MAP + LAN3_LED_MAPPING_MASK, + LAN3_PHY_LED_MAP(3) }, .regmap_size = 2, }, diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c index 91edb539925a..8596f3541265 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c @@ -1015,9 +1015,15 @@ static int mtk_eint_init(struct mtk_pinctrl *pctl, struct platform_device *pdev) if (!pctl->eint) return -ENOMEM; - pctl->eint->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(pctl->eint->base)) - return PTR_ERR(pctl->eint->base); + pctl->eint->nbase = 1; + /* mtk-eint expects an array */ + pctl->eint->base = devm_kzalloc(pctl->dev, sizeof(pctl->eint->base), GFP_KERNEL); + if (!pctl->eint->base) + return -ENOMEM; + + pctl->eint->base[0] = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(pctl->eint->base[0])) + return PTR_ERR(pctl->eint->base[0]); pctl->eint->irq = irq_of_parse_and_map(np, 0); if (!pctl->eint->irq) diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c index 253a0cc57e39..e5a32a0532ee 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.c +++ b/drivers/pinctrl/meson/pinctrl-meson.c @@ -487,7 +487,7 @@ static int meson_pinconf_get(struct pinctrl_dev *pcdev, unsigned int pin, case PIN_CONFIG_BIAS_PULL_DOWN: case PIN_CONFIG_BIAS_PULL_UP: if (meson_pinconf_get_pull(pc, pin) == param) - arg = 1; + arg = 60000; else return -EINVAL; break; diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 82f0cc43bbf4..0eb816395dc6 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -44,7 +44,6 @@ * @pctrl: pinctrl handle. * @chip: gpiochip handle. * @desc: pin controller descriptor - * @restart_nb: restart notifier block. * @irq: parent irq for the TLMM irq_chip. * @intr_target_use_scm: route irq to application cpu using scm calls * @lock: Spinlock to protect register resources as well @@ -64,7 +63,6 @@ struct msm_pinctrl { struct pinctrl_dev *pctrl; struct gpio_chip chip; struct pinctrl_desc desc; - struct notifier_block restart_nb; int irq; @@ -1471,10 +1469,9 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl) return 0; } -static int msm_ps_hold_restart(struct notifier_block *nb, unsigned long action, - void *data) +static int msm_ps_hold_restart(struct sys_off_data *data) { - struct msm_pinctrl *pctrl = container_of(nb, struct msm_pinctrl, restart_nb); + struct msm_pinctrl *pctrl = data->cb_data; writel(0, pctrl->regs[0] + PS_HOLD_OFFSET); mdelay(1000); @@ -1485,7 +1482,11 @@ static struct msm_pinctrl *poweroff_pctrl; static void msm_ps_hold_poweroff(void) { - msm_ps_hold_restart(&poweroff_pctrl->restart_nb, 0, NULL); + struct sys_off_data data = { + .cb_data = poweroff_pctrl, + }; + + msm_ps_hold_restart(&data); } static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl) @@ -1495,9 +1496,11 @@ static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl) for (i = 0; i < pctrl->soc->nfunctions; i++) if (!strcmp(func[i].name, "ps_hold")) { - pctrl->restart_nb.notifier_call = msm_ps_hold_restart; - pctrl->restart_nb.priority = 128; - if (register_restart_handler(&pctrl->restart_nb)) + if (devm_register_sys_off_handler(pctrl->dev, + SYS_OFF_MODE_RESTART, + 128, + msm_ps_hold_restart, + pctrl)) dev_err(pctrl->dev, "failed to setup restart handler.\n"); poweroff_pctrl = pctrl; @@ -1599,8 +1602,6 @@ void msm_pinctrl_remove(struct platform_device *pdev) struct msm_pinctrl *pctrl = platform_get_drvdata(pdev); gpiochip_remove(&pctrl->chip); - - unregister_restart_handler(&pctrl->restart_nb); } EXPORT_SYMBOL(msm_pinctrl_remove); diff --git a/drivers/pinctrl/qcom/pinctrl-sm8750.c b/drivers/pinctrl/qcom/pinctrl-sm8750.c index 1af11cd95fb0..b94fb4ee0ec3 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm8750.c +++ b/drivers/pinctrl/qcom/pinctrl-sm8750.c @@ -46,7 +46,9 @@ .out_bit = 1, \ .intr_enable_bit = 0, \ .intr_status_bit = 0, \ - .intr_target_bit = 5, \ + .intr_wakeup_present_bit = 6, \ + .intr_wakeup_enable_bit = 7, \ + .intr_target_bit = 8, \ .intr_target_kpss_val = 3, \ .intr_raw_status_bit = 4, \ .intr_polarity_bit = 1, \ diff --git a/drivers/platform/mellanox/mlxbf-bootctl.c b/drivers/platform/mellanox/mlxbf-bootctl.c index b95dcb8d483c..c18a5b96de5c 100644 --- a/drivers/platform/mellanox/mlxbf-bootctl.c +++ b/drivers/platform/mellanox/mlxbf-bootctl.c @@ -333,9 +333,9 @@ static ssize_t secure_boot_fuse_state_show(struct device *dev, else status = valid ? "Invalid" : "Free"; } - buf_len += sysfs_emit(buf + buf_len, "%d:%s ", key, status); + buf_len += sysfs_emit_at(buf, buf_len, "%d:%s ", key, status); } - buf_len += sysfs_emit(buf + buf_len, "\n"); + buf_len += sysfs_emit_at(buf, buf_len, "\n"); return buf_len; } diff --git a/drivers/platform/x86/amd/hsmp/acpi.c b/drivers/platform/x86/amd/hsmp/acpi.c index c1eccb3c80c5..73ca3f48c5cf 100644 --- a/drivers/platform/x86/amd/hsmp/acpi.c +++ b/drivers/platform/x86/amd/hsmp/acpi.c @@ -9,7 +9,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <asm/amd_hsmp.h> +#include <asm/amd/hsmp.h> #include <linux/acpi.h> #include <linux/device.h> @@ -23,13 +23,12 @@ #include <uapi/asm-generic/errno-base.h> -#include <asm/amd_node.h> +#include <asm/amd/node.h> #include "hsmp.h" -#define DRIVER_NAME "amd_hsmp" +#define DRIVER_NAME "hsmp_acpi" #define DRIVER_VERSION "2.3" -#define ACPI_HSMP_DEVICE_HID "AMDI0097" /* These are the strings specified in ACPI table */ #define MSG_IDOFF_STR "MsgIdOffset" diff --git a/drivers/platform/x86/amd/hsmp/hsmp.c b/drivers/platform/x86/amd/hsmp/hsmp.c index a3ac09a90de4..e262e8a97b45 100644 --- a/drivers/platform/x86/amd/hsmp/hsmp.c +++ b/drivers/platform/x86/amd/hsmp/hsmp.c @@ -7,7 +7,7 @@ * This file provides a device implementation for HSMP interface */ -#include <asm/amd_hsmp.h> +#include <asm/amd/hsmp.h> #include <linux/acpi.h> #include <linux/delay.h> diff --git a/drivers/platform/x86/amd/hsmp/hsmp.h b/drivers/platform/x86/amd/hsmp/hsmp.h index af8b21f821d6..d58d4f0c20d5 100644 --- a/drivers/platform/x86/amd/hsmp/hsmp.h +++ b/drivers/platform/x86/amd/hsmp/hsmp.h @@ -23,6 +23,7 @@ #define HSMP_CDEV_NAME "hsmp_cdev" #define HSMP_DEVNODE_NAME "hsmp" +#define ACPI_HSMP_DEVICE_HID "AMDI0097" struct hsmp_mbaddr_info { u32 base_addr; diff --git a/drivers/platform/x86/amd/hsmp/plat.c b/drivers/platform/x86/amd/hsmp/plat.c index b9782a078dbd..62bf9547631e 100644 --- a/drivers/platform/x86/amd/hsmp/plat.c +++ b/drivers/platform/x86/amd/hsmp/plat.c @@ -9,8 +9,9 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <asm/amd_hsmp.h> +#include <asm/amd/hsmp.h> +#include <linux/acpi.h> #include <linux/build_bug.h> #include <linux/device.h> #include <linux/module.h> @@ -18,7 +19,7 @@ #include <linux/platform_device.h> #include <linux/sysfs.h> -#include <asm/amd_node.h> +#include <asm/amd/node.h> #include "hsmp.h" @@ -266,7 +267,7 @@ static bool legacy_hsmp_support(void) } case 0x1A: switch (boot_cpu_data.x86_model) { - case 0x00 ... 0x1F: + case 0x00 ... 0x0F: return true; default: return false; @@ -288,6 +289,9 @@ static int __init hsmp_plt_init(void) return ret; } + if (acpi_dev_present(ACPI_HSMP_DEVICE_HID, NULL, -1)) + return -ENODEV; + hsmp_pdev = get_hsmp_pdev(); if (!hsmp_pdev) return -ENOMEM; diff --git a/drivers/platform/x86/amd/pmc/mp1_stb.c b/drivers/platform/x86/amd/pmc/mp1_stb.c index c005f00988f7..3b9b9f30faa3 100644 --- a/drivers/platform/x86/amd/pmc/mp1_stb.c +++ b/drivers/platform/x86/amd/pmc/mp1_stb.c @@ -11,7 +11,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <asm/amd_nb.h> +#include <asm/amd/nb.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/uaccess.h> diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c index b4f49720c87f..5c7c01f66cde 100644 --- a/drivers/platform/x86/amd/pmc/pmc-quirks.c +++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c @@ -11,6 +11,7 @@ #include <linux/dmi.h> #include <linux/io.h> #include <linux/ioport.h> +#include <asm/amd/fch.h> #include "pmc.h" @@ -20,7 +21,7 @@ struct quirk_entry { }; static struct quirk_entry quirk_s2idle_bug = { - .s2idle_bug_mmio = 0xfed80380, + .s2idle_bug_mmio = FCH_PM_BASE + FCH_PM_SCRATCH, }; static struct quirk_entry quirk_spurious_8042 = { @@ -217,6 +218,13 @@ static const struct dmi_system_id fwbug_list[] = { DMI_MATCH(DMI_BIOS_VERSION, "03.05"), } }, + { + .ident = "MECHREVO Wujie 14X (GX4HRXL)", + .driver_data = &quirk_spurious_8042, + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "WUJIE14-GX4HRXL"), + } + }, {} }; diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c index d789d6cab794..37c7a57afee5 100644 --- a/drivers/platform/x86/amd/pmc/pmc.c +++ b/drivers/platform/x86/amd/pmc/pmc.c @@ -28,7 +28,7 @@ #include <linux/seq_file.h> #include <linux/uaccess.h> -#include <asm/amd_node.h> +#include <asm/amd/node.h> #include "pmc.h" @@ -644,10 +644,9 @@ static void amd_pmc_s2idle_check(void) struct smu_metrics table; int rc; - /* CZN: Ensure that future s0i3 entry attempts at least 10ms passed */ - if (pdev->cpu_id == AMD_CPU_ID_CZN && !get_metrics_table(pdev, &table) && - table.s0i3_last_entry_status) - usleep_range(10000, 20000); + /* Avoid triggering OVP */ + if (!get_metrics_table(pdev, &table) && table.s0i3_last_entry_status) + msleep(2500); /* Dump the IdleMask before we add to the STB */ amd_pmc_idlemask_read(pdev, pdev->dev, NULL); diff --git a/drivers/platform/x86/amd/pmf/auto-mode.c b/drivers/platform/x86/amd/pmf/auto-mode.c index 02ff68be10d0..a184922bba8d 100644 --- a/drivers/platform/x86/amd/pmf/auto-mode.c +++ b/drivers/platform/x86/amd/pmf/auto-mode.c @@ -120,9 +120,9 @@ static void amd_pmf_set_automode(struct amd_pmf_dev *dev, int idx, amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pwr_ctrl->sppt_apu_only, NULL); amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pwr_ctrl->stt_min, NULL); amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, - pwr_ctrl->stt_skin_temp[STT_TEMP_APU], NULL); + fixp_q88_fromint(pwr_ctrl->stt_skin_temp[STT_TEMP_APU]), NULL); amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, - pwr_ctrl->stt_skin_temp[STT_TEMP_HS2], NULL); + fixp_q88_fromint(pwr_ctrl->stt_skin_temp[STT_TEMP_HS2]), NULL); if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX)) apmf_update_fan_idx(dev, config_store.mode_set[idx].fan_control.manual, diff --git a/drivers/platform/x86/amd/pmf/cnqf.c b/drivers/platform/x86/amd/pmf/cnqf.c index bc8899e15c91..207a0b33d8d3 100644 --- a/drivers/platform/x86/amd/pmf/cnqf.c +++ b/drivers/platform/x86/amd/pmf/cnqf.c @@ -81,10 +81,10 @@ static int amd_pmf_set_cnqf(struct amd_pmf_dev *dev, int src, int idx, amd_pmf_send_cmd(dev, SET_SPPT, false, pc->sppt, NULL); amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pc->sppt_apu_only, NULL); amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pc->stt_min, NULL); - amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, pc->stt_skin_temp[STT_TEMP_APU], - NULL); - amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, pc->stt_skin_temp[STT_TEMP_HS2], - NULL); + amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, + fixp_q88_fromint(pc->stt_skin_temp[STT_TEMP_APU]), NULL); + amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, + fixp_q88_fromint(pc->stt_skin_temp[STT_TEMP_HS2]), NULL); if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX)) apmf_update_fan_idx(dev, diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c index a2cb2d5544f5..76910601cac8 100644 --- a/drivers/platform/x86/amd/pmf/core.c +++ b/drivers/platform/x86/amd/pmf/core.c @@ -14,7 +14,7 @@ #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/power_supply.h> -#include <asm/amd_node.h> +#include <asm/amd/node.h> #include "pmf.h" /* PMF-SMU communication registers */ @@ -176,6 +176,20 @@ static void __maybe_unused amd_pmf_dump_registers(struct amd_pmf_dev *dev) dev_dbg(dev->dev, "AMD_PMF_REGISTER_MESSAGE:%x\n", value); } +/** + * fixp_q88_fromint: Convert integer to Q8.8 + * @val: input value + * + * Converts an integer into binary fixed point format where 8 bits + * are used for integer and 8 bits are used for the decimal. + * + * Return: unsigned integer converted to Q8.8 format + */ +u32 fixp_q88_fromint(u32 val) +{ + return val << 8; +} + int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32 *data) { int rc; diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h index e6bdee68ccf3..45b60238d527 100644 --- a/drivers/platform/x86/amd/pmf/pmf.h +++ b/drivers/platform/x86/amd/pmf/pmf.h @@ -777,6 +777,7 @@ int apmf_install_handler(struct amd_pmf_dev *pmf_dev); int apmf_os_power_slider_update(struct amd_pmf_dev *dev, u8 flag); int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer); int amd_pmf_notify_sbios_heartbeat_event_v2(struct amd_pmf_dev *dev, u8 flag); +u32 fixp_q88_fromint(u32 val); /* SPS Layer */ int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf); diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c index d3083383f11f..49e14ca94a9e 100644 --- a/drivers/platform/x86/amd/pmf/sps.c +++ b/drivers/platform/x86/amd/pmf/sps.c @@ -198,9 +198,11 @@ static void amd_pmf_update_slider_v2(struct amd_pmf_dev *dev, int idx) amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, apts_config_store.val[idx].stt_min_limit, NULL); amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, - apts_config_store.val[idx].stt_skin_temp_limit_apu, NULL); + fixp_q88_fromint(apts_config_store.val[idx].stt_skin_temp_limit_apu), + NULL); amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, - apts_config_store.val[idx].stt_skin_temp_limit_hs2, NULL); + fixp_q88_fromint(apts_config_store.val[idx].stt_skin_temp_limit_hs2), + NULL); } void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx, @@ -217,9 +219,11 @@ void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx, amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, config_store.prop[src][idx].stt_min, NULL); amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, - config_store.prop[src][idx].stt_skin_temp[STT_TEMP_APU], NULL); + fixp_q88_fromint(config_store.prop[src][idx].stt_skin_temp[STT_TEMP_APU]), + NULL); amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, - config_store.prop[src][idx].stt_skin_temp[STT_TEMP_HS2], NULL); + fixp_q88_fromint(config_store.prop[src][idx].stt_skin_temp[STT_TEMP_HS2]), + NULL); } else if (op == SLIDER_OP_GET) { amd_pmf_send_cmd(dev, GET_SPL, true, ARG_NONE, &table->prop[src][idx].spl); amd_pmf_send_cmd(dev, GET_FPPT, true, ARG_NONE, &table->prop[src][idx].fppt); diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c index a1e43873a07b..d3bd12ad036a 100644 --- a/drivers/platform/x86/amd/pmf/tee-if.c +++ b/drivers/platform/x86/amd/pmf/tee-if.c @@ -123,7 +123,8 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_ case PMF_POLICY_STT_SKINTEMP_APU: if (dev->prev_data->stt_skintemp_apu != val) { - amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, val, NULL); + amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, + fixp_q88_fromint(val), NULL); dev_dbg(dev->dev, "update STT_SKINTEMP_APU: %u\n", val); dev->prev_data->stt_skintemp_apu = val; } @@ -131,7 +132,8 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_ case PMF_POLICY_STT_SKINTEMP_HS2: if (dev->prev_data->stt_skintemp_hs2 != val) { - amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, val, NULL); + amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, + fixp_q88_fromint(val), NULL); dev_dbg(dev->dev, "update STT_SKINTEMP_HS2: %u\n", val); dev->prev_data->stt_skintemp_hs2 = val; } @@ -332,6 +334,11 @@ static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev) return 0; } +static inline bool amd_pmf_pb_valid(struct amd_pmf_dev *dev) +{ + return memchr_inv(dev->policy_buf, 0xff, dev->policy_sz); +} + #ifdef CONFIG_AMD_PMF_DEBUG static void amd_pmf_hex_dump_pb(struct amd_pmf_dev *dev) { @@ -359,12 +366,22 @@ static ssize_t amd_pmf_get_pb_data(struct file *filp, const char __user *buf, dev->policy_buf = new_policy_buf; dev->policy_sz = length; + if (!amd_pmf_pb_valid(dev)) { + ret = -EINVAL; + goto cleanup; + } + amd_pmf_hex_dump_pb(dev); ret = amd_pmf_start_policy_engine(dev); if (ret < 0) - return ret; + goto cleanup; return length; + +cleanup: + kfree(dev->policy_buf); + dev->policy_buf = NULL; + return ret; } static const struct file_operations pb_fops = { @@ -526,6 +543,12 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev) memcpy_fromio(dev->policy_buf, dev->policy_base, dev->policy_sz); + if (!amd_pmf_pb_valid(dev)) { + dev_info(dev->dev, "No Smart PC policy present\n"); + ret = -EINVAL; + goto err_free_policy; + } + amd_pmf_hex_dump_pb(dev); dev->prev_data = kzalloc(sizeof(*dev->prev_data), GFP_KERNEL); diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index d460dd194f19..a0a411b4f2d6 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c @@ -426,11 +426,14 @@ static int asus_pega_lucid_set(struct asus_laptop *asus, int unit, bool enable) static int pega_acc_axis(struct asus_laptop *asus, int curr, char *method) { + unsigned long long val = (unsigned long long)curr; + acpi_status status; int i, delta; - unsigned long long val; - for (i = 0; i < PEGA_ACC_RETRIES; i++) { - acpi_evaluate_integer(asus->handle, method, NULL, &val); + for (i = 0; i < PEGA_ACC_RETRIES; i++) { + status = acpi_evaluate_integer(asus->handle, method, NULL, &val); + if (ACPI_FAILURE(status)) + continue; /* The output is noisy. From reading the ASL * dissassembly, timeout errors are returned with 1's * in the high word, and the lack of locking around diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 38ef778e8c19..47cc766624d7 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -304,6 +304,7 @@ struct asus_wmi { u32 kbd_rgb_dev; bool kbd_rgb_state_available; + bool oobe_state_available; u8 throttle_thermal_policy_mode; u32 throttle_thermal_policy_dev; @@ -1826,7 +1827,7 @@ static int asus_wmi_led_init(struct asus_wmi *asus) goto error; } - if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_OOBE)) { + if (asus->oobe_state_available) { /* * Disable OOBE state, so that e.g. the keyboard backlight * works. @@ -4723,6 +4724,7 @@ static int asus_wmi_add(struct platform_device *pdev) asus->egpu_enable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_EGPU); asus->dgpu_disable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_DGPU); asus->kbd_rgb_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_STATE); + asus->oobe_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_OOBE); asus->ally_mcu_usb_switch = acpi_has_method(NULL, ASUS_USB0_PWR_EC0_CSEE) && dmi_check_system(asus_ally_mcu_quirk); @@ -4777,7 +4779,8 @@ static int asus_wmi_add(struct platform_device *pdev) goto fail_leds; asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result); - if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT)) + if ((result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT)) == + (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT)) asus->driver->wlan_ctrl_by_user = 1; if (!(asus->driver->wlan_ctrl_by_user && ashs_present())) { @@ -4970,6 +4973,13 @@ static int asus_hotk_restore(struct device *device) } if (!IS_ERR_OR_NULL(asus->kbd_led.dev)) kbd_led_update(asus); + if (asus->oobe_state_available) { + /* + * Disable OOBE state, so that e.g. the keyboard backlight + * works. + */ + asus_wmi_set_devstate(ASUS_WMI_DEVID_OOBE, 1, NULL); + } if (asus_wmi_has_fnlock_key(asus)) asus_wmi_fnlock_update(asus); diff --git a/drivers/platform/x86/dell/alienware-wmi-wmax.c b/drivers/platform/x86/dell/alienware-wmi-wmax.c index 3d3014b5adf0..08b82c151e07 100644 --- a/drivers/platform/x86/dell/alienware-wmi-wmax.c +++ b/drivers/platform/x86/dell/alienware-wmi-wmax.c @@ -62,11 +62,43 @@ static struct awcc_quirks empty_quirks; static const struct dmi_system_id awcc_dmi_table[] __initconst = { { + .ident = "Alienware Area-51m R2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), + DMI_MATCH(DMI_PRODUCT_NAME, "Alienware Area-51m R2"), + }, + .driver_data = &generic_quirks, + }, + { + .ident = "Alienware m15 R7", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), + DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m15 R7"), + }, + .driver_data = &generic_quirks, + }, + { + .ident = "Alienware m16 R1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), + DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m16 R1"), + }, + .driver_data = &g_series_quirks, + }, + { .ident = "Alienware m16 R1 AMD", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m16 R1 AMD"), }, + .driver_data = &g_series_quirks, + }, + { + .ident = "Alienware m16 R2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), + DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m16 R2"), + }, .driver_data = &generic_quirks, }, { @@ -94,6 +126,14 @@ static const struct dmi_system_id awcc_dmi_table[] __initconst = { .driver_data = &generic_quirks, }, { + .ident = "Alienware x15 R2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), + DMI_MATCH(DMI_PRODUCT_NAME, "Alienware x15 R2"), + }, + .driver_data = &generic_quirks, + }, + { .ident = "Alienware x17 R2", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), @@ -126,6 +166,14 @@ static const struct dmi_system_id awcc_dmi_table[] __initconst = { .driver_data = &g_series_quirks, }, { + .ident = "Dell Inc. G16 7630", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Dell G16 7630"), + }, + .driver_data = &g_series_quirks, + }, + { .ident = "Dell Inc. G3 3500", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), @@ -149,6 +197,14 @@ static const struct dmi_system_id awcc_dmi_table[] __initconst = { }, .driver_data = &g_series_quirks, }, + { + .ident = "Dell Inc. G5 5505", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "G5 5505"), + }, + .driver_data = &g_series_quirks, + }, }; enum WMAX_THERMAL_INFORMATION_OPERATIONS { @@ -607,12 +663,10 @@ static int thermal_profile_probe(void *drvdata, unsigned long *choices) for (u32 i = 0; i < sys_desc[3]; i++) { ret = wmax_thermal_information(priv->wdev, WMAX_OPERATION_LIST_IDS, i + first_mode, &out_data); - - if (ret == -EIO) - return ret; - if (ret == -EBADRQC) break; + if (ret) + return ret; if (!is_wmax_thermal_code(out_data)) continue; diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c index 230e6ee96636..d8f1bf5e58a0 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c @@ -45,7 +45,7 @@ static ssize_t current_password_store(struct kobject *kobj, int length; length = strlen(buf); - if (buf[length-1] == '\n') + if (length && buf[length - 1] == '\n') length--; /* firmware does verifiation of min/max password length, diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index a0eae24ca9e6..162809140f68 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c @@ -17,13 +17,13 @@ /* * fujitsu-laptop.c - Fujitsu laptop support, providing access to additional * features made available on a range of Fujitsu laptops including the - * P2xxx/P5xxx/S6xxx/S7xxx series. + * P2xxx/P5xxx/S2xxx/S6xxx/S7xxx series. * * This driver implements a vendor-specific backlight control interface for * Fujitsu laptops and provides support for hotkeys present on certain Fujitsu * laptops. * - * This driver has been tested on a Fujitsu Lifebook S6410, S7020 and + * This driver has been tested on a Fujitsu Lifebook S2110, S6410, S7020 and * P8010. It should work on most P-series and S-series Lifebooks, but * YMMV. * @@ -107,7 +107,11 @@ #define KEY2_CODE 0x411 #define KEY3_CODE 0x412 #define KEY4_CODE 0x413 -#define KEY5_CODE 0x420 +#define KEY5_CODE 0x414 +#define KEY6_CODE 0x415 +#define KEY7_CODE 0x416 +#define KEY8_CODE 0x417 +#define KEY9_CODE 0x420 /* Hotkey ringbuffer limits */ #define MAX_HOTKEY_RINGBUFFER_SIZE 100 @@ -560,7 +564,7 @@ static const struct key_entry keymap_default[] = { { KE_KEY, KEY2_CODE, { KEY_PROG2 } }, { KE_KEY, KEY3_CODE, { KEY_PROG3 } }, { KE_KEY, KEY4_CODE, { KEY_PROG4 } }, - { KE_KEY, KEY5_CODE, { KEY_RFKILL } }, + { KE_KEY, KEY9_CODE, { KEY_RFKILL } }, /* Soft keys read from status flags */ { KE_KEY, FLAG_RFKILL, { KEY_RFKILL } }, { KE_KEY, FLAG_TOUCHPAD_TOGGLE, { KEY_TOUCHPAD_TOGGLE } }, @@ -584,6 +588,18 @@ static const struct key_entry keymap_p8010[] = { { KE_END, 0 } }; +static const struct key_entry keymap_s2110[] = { + { KE_KEY, KEY1_CODE, { KEY_PROG1 } }, /* "A" */ + { KE_KEY, KEY2_CODE, { KEY_PROG2 } }, /* "B" */ + { KE_KEY, KEY3_CODE, { KEY_WWW } }, /* "Internet" */ + { KE_KEY, KEY4_CODE, { KEY_EMAIL } }, /* "E-mail" */ + { KE_KEY, KEY5_CODE, { KEY_STOPCD } }, + { KE_KEY, KEY6_CODE, { KEY_PLAYPAUSE } }, + { KE_KEY, KEY7_CODE, { KEY_PREVIOUSSONG } }, + { KE_KEY, KEY8_CODE, { KEY_NEXTSONG } }, + { KE_END, 0 } +}; + static const struct key_entry *keymap = keymap_default; static int fujitsu_laptop_dmi_keymap_override(const struct dmi_system_id *id) @@ -621,6 +637,15 @@ static const struct dmi_system_id fujitsu_laptop_dmi_table[] = { }, .driver_data = (void *)keymap_p8010 }, + { + .callback = fujitsu_laptop_dmi_keymap_override, + .ident = "Fujitsu LifeBook S2110", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S2110"), + }, + .driver_data = (void *)keymap_s2110 + }, {} }; diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 17a09b7784ed..ede483573fe0 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -1294,6 +1294,16 @@ static const struct key_entry ideapad_keymap[] = { /* Specific to some newer models */ { KE_KEY, 0x3e | IDEAPAD_WMI_KEY, { KEY_MICMUTE } }, { KE_KEY, 0x3f | IDEAPAD_WMI_KEY, { KEY_RFKILL } }, + /* Star- (User Assignable Key) */ + { KE_KEY, 0x44 | IDEAPAD_WMI_KEY, { KEY_PROG1 } }, + /* Eye */ + { KE_KEY, 0x45 | IDEAPAD_WMI_KEY, { KEY_PROG3 } }, + /* Performance toggle also Fn+Q, handled inside ideapad_wmi_notify() */ + { KE_KEY, 0x3d | IDEAPAD_WMI_KEY, { KEY_PROG4 } }, + /* shift + prtsc */ + { KE_KEY, 0x2d | IDEAPAD_WMI_KEY, { KEY_CUT } }, + { KE_KEY, 0x29 | IDEAPAD_WMI_KEY, { KEY_TOUCHPAD_TOGGLE } }, + { KE_KEY, 0x2a | IDEAPAD_WMI_KEY, { KEY_ROOT_MENU } }, { KE_END }, }; @@ -2080,6 +2090,12 @@ static void ideapad_wmi_notify(struct wmi_device *wdev, union acpi_object *data) dev_dbg(&wdev->dev, "WMI fn-key event: 0x%llx\n", data->integer.value); + /* performance button triggered by 0x3d */ + if (data->integer.value == 0x3d && priv->dytc) { + platform_profile_cycle(); + break; + } + /* 0x02 FnLock, 0x03 Esc */ if (data->integer.value == 0x02 || data->integer.value == 0x03) ideapad_fn_lock_led_notify(priv, data->integer.value == 0x02); diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c index 88a1a9ff2f34..0b5e43444ed6 100644 --- a/drivers/platform/x86/intel/hid.c +++ b/drivers/platform/x86/intel/hid.c @@ -44,16 +44,17 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alex Hung"); static const struct acpi_device_id intel_hid_ids[] = { - {"INT33D5", 0}, - {"INTC1051", 0}, - {"INTC1054", 0}, - {"INTC1070", 0}, - {"INTC1076", 0}, - {"INTC1077", 0}, - {"INTC1078", 0}, - {"INTC107B", 0}, - {"INTC10CB", 0}, - {"", 0}, + { "INT33D5" }, + { "INTC1051" }, + { "INTC1054" }, + { "INTC1070" }, + { "INTC1076" }, + { "INTC1077" }, + { "INTC1078" }, + { "INTC107B" }, + { "INTC10CB" }, + { "INTC10CC" }, + { } }; MODULE_DEVICE_TABLE(acpi, intel_hid_ids); diff --git a/drivers/platform/x86/intel/ifs/core.c b/drivers/platform/x86/intel/ifs/core.c index 1ae50702bdb7..b73e582128c9 100644 --- a/drivers/platform/x86/intel/ifs/core.c +++ b/drivers/platform/x86/intel/ifs/core.c @@ -8,6 +8,7 @@ #include <linux/slab.h> #include <asm/cpu_device_id.h> +#include <asm/msr.h> #include "ifs.h" @@ -115,13 +116,13 @@ static int __init ifs_init(void) if (!m) return -ENODEV; - if (rdmsrl_safe(MSR_IA32_CORE_CAPS, &msrval)) + if (rdmsrq_safe(MSR_IA32_CORE_CAPS, &msrval)) return -ENODEV; if (!(msrval & MSR_IA32_CORE_CAPS_INTEGRITY_CAPS)) return -ENODEV; - if (rdmsrl_safe(MSR_INTEGRITY_CAPS, &msrval)) + if (rdmsrq_safe(MSR_INTEGRITY_CAPS, &msrval)) return -ENODEV; ifs_pkg_auth = kmalloc_array(topology_max_packages(), sizeof(bool), GFP_KERNEL); diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c index de54bd1a5970..50f1fdf7dfed 100644 --- a/drivers/platform/x86/intel/ifs/load.c +++ b/drivers/platform/x86/intel/ifs/load.c @@ -5,6 +5,7 @@ #include <linux/sizes.h> #include <asm/cpu.h> #include <asm/microcode.h> +#include <asm/msr.h> #include "ifs.h" @@ -127,8 +128,8 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work) ifsd = ifs_get_data(dev); msrs = ifs_get_test_msrs(dev); /* run scan hash copy */ - wrmsrl(msrs->copy_hashes, ifs_hash_ptr); - rdmsrl(msrs->copy_hashes_status, hashes_status.data); + wrmsrq(msrs->copy_hashes, ifs_hash_ptr); + rdmsrq(msrs->copy_hashes_status, hashes_status.data); /* enumerate the scan image information */ num_chunks = hashes_status.num_chunks; @@ -149,8 +150,8 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work) linear_addr = base + i * chunk_size; linear_addr |= i; - wrmsrl(msrs->copy_chunks, linear_addr); - rdmsrl(msrs->copy_chunks_status, chunk_status.data); + wrmsrq(msrs->copy_chunks, linear_addr); + rdmsrq(msrs->copy_chunks_status, chunk_status.data); ifsd->valid_chunks = chunk_status.valid_chunks; err_code = chunk_status.error_code; @@ -195,8 +196,8 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev) msrs = ifs_get_test_msrs(dev); if (need_copy_scan_hashes(ifsd)) { - wrmsrl(msrs->copy_hashes, ifs_hash_ptr); - rdmsrl(msrs->copy_hashes_status, hashes_status.data); + wrmsrq(msrs->copy_hashes, ifs_hash_ptr); + rdmsrq(msrs->copy_hashes_status, hashes_status.data); /* enumerate the scan image information */ chunk_size = hashes_status.chunk_size * SZ_1K; @@ -216,8 +217,8 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev) } if (ifsd->generation >= IFS_GEN_STRIDE_AWARE) { - wrmsrl(msrs->test_ctrl, INVALIDATE_STRIDE); - rdmsrl(msrs->copy_chunks_status, chunk_status.data); + wrmsrq(msrs->test_ctrl, INVALIDATE_STRIDE); + rdmsrq(msrs->copy_chunks_status, chunk_status.data); if (chunk_status.valid_chunks != 0) { dev_err(dev, "Couldn't invalidate installed stride - %d\n", chunk_status.valid_chunks); @@ -238,9 +239,9 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev) chunk_table[1] = linear_addr; do { local_irq_disable(); - wrmsrl(msrs->copy_chunks, (u64)chunk_table); + wrmsrq(msrs->copy_chunks, (u64)chunk_table); local_irq_enable(); - rdmsrl(msrs->copy_chunks_status, chunk_status.data); + rdmsrq(msrs->copy_chunks_status, chunk_status.data); err_code = chunk_status.error_code; } while (err_code == AUTH_INTERRUPTED_ERROR && --retry_count); diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c index f978dd05d4d8..dfc119d7354d 100644 --- a/drivers/platform/x86/intel/ifs/runtest.c +++ b/drivers/platform/x86/intel/ifs/runtest.c @@ -7,6 +7,7 @@ #include <linux/nmi.h> #include <linux/slab.h> #include <linux/stop_machine.h> +#include <asm/msr.h> #include "ifs.h" @@ -209,8 +210,8 @@ static int doscan(void *data) * take up to 200 milliseconds (in the case where all chunks * are processed in a single pass) before it retires. */ - wrmsrl(MSR_ACTIVATE_SCAN, params->activate->data); - rdmsrl(MSR_SCAN_STATUS, status.data); + wrmsrq(MSR_ACTIVATE_SCAN, params->activate->data); + rdmsrq(MSR_SCAN_STATUS, status.data); trace_ifs_status(ifsd->cur_batch, start, stop, status.data); @@ -321,9 +322,9 @@ static int do_array_test(void *data) first = cpumask_first(cpu_smt_mask(cpu)); if (cpu == first) { - wrmsrl(MSR_ARRAY_BIST, command->data); + wrmsrq(MSR_ARRAY_BIST, command->data); /* Pass back the result of the test */ - rdmsrl(MSR_ARRAY_BIST, command->data); + rdmsrq(MSR_ARRAY_BIST, command->data); } return 0; @@ -374,8 +375,8 @@ static int do_array_test_gen1(void *status) first = cpumask_first(cpu_smt_mask(cpu)); if (cpu == first) { - wrmsrl(MSR_ARRAY_TRIGGER, ARRAY_GEN1_TEST_ALL_ARRAYS); - rdmsrl(MSR_ARRAY_STATUS, *((u64 *)status)); + wrmsrq(MSR_ARRAY_TRIGGER, ARRAY_GEN1_TEST_ALL_ARRAYS); + rdmsrq(MSR_ARRAY_STATUS, *((u64 *)status)); } return 0; @@ -526,8 +527,8 @@ static int dosbaf(void *data) * starts scan of each requested bundle. The core test happens * during the "execution" of the WRMSR. */ - wrmsrl(MSR_ACTIVATE_SBAF, run_params->activate->data); - rdmsrl(MSR_SBAF_STATUS, status.data); + wrmsrq(MSR_ACTIVATE_SBAF, run_params->activate->data); + rdmsrq(MSR_SBAF_STATUS, status.data); trace_ifs_sbaf(ifsd->cur_batch, *run_params->activate, status); /* Pass back the result of the test */ diff --git a/drivers/platform/x86/intel/pmc/arl.c b/drivers/platform/x86/intel/pmc/arl.c index 320993bd6d31..f9c48738b853 100644 --- a/drivers/platform/x86/intel/pmc/arl.c +++ b/drivers/platform/x86/intel/pmc/arl.c @@ -681,6 +681,7 @@ static struct pmc_info arl_pmc_info_list[] = { #define ARL_NPU_PCI_DEV 0xad1d #define ARL_GNA_PCI_DEV 0xae4c +#define ARL_H_NPU_PCI_DEV 0x7d1d #define ARL_H_GNA_PCI_DEV 0x774c /* * Set power state of select devices that do not have drivers to D3 @@ -694,7 +695,7 @@ static void arl_d3_fixup(void) static void arl_h_d3_fixup(void) { - pmc_core_set_device_d3(ARL_NPU_PCI_DEV); + pmc_core_set_device_d3(ARL_H_NPU_PCI_DEV); pmc_core_set_device_d3(ARL_H_GNA_PCI_DEV); } diff --git a/drivers/platform/x86/intel/pmc/cnp.c b/drivers/platform/x86/intel/pmc/cnp.c index 2c5af158bbe2..efea4e1ba52b 100644 --- a/drivers/platform/x86/intel/pmc/cnp.c +++ b/drivers/platform/x86/intel/pmc/cnp.c @@ -10,6 +10,7 @@ #include <linux/smp.h> #include <linux/suspend.h> +#include <asm/msr.h> #include "core.h" /* Cannon Lake: PGD PFET Enable Ack Status Register(s) bitmap */ @@ -227,10 +228,10 @@ static void disable_c1_auto_demote(void *unused) int cpunum = smp_processor_id(); u64 val; - rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, val); + rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, val); per_cpu(pkg_cst_config, cpunum) = val; val &= ~NHM_C1_AUTO_DEMOTE; - wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, val); + wrmsrq(MSR_PKG_CST_CONFIG_CONTROL, val); pr_debug("%s: cpu:%d cst %llx\n", __func__, cpunum, val); } @@ -239,7 +240,7 @@ static void restore_c1_auto_demote(void *unused) { int cpunum = smp_processor_id(); - wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, per_cpu(pkg_cst_config, cpunum)); + wrmsrq(MSR_PKG_CST_CONFIG_CONTROL, per_cpu(pkg_cst_config, cpunum)); pr_debug("%s: cpu:%d cst %llx\n", __func__, cpunum, per_cpu(pkg_cst_config, cpunum)); diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c index 7a1d11f2914f..9f678c753dfa 100644 --- a/drivers/platform/x86/intel/pmc/core.c +++ b/drivers/platform/x86/intel/pmc/core.c @@ -22,7 +22,7 @@ #include <linux/suspend.h> #include <linux/units.h> -#include <asm/cpuid.h> +#include <asm/cpuid/api.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> #include <asm/msr.h> @@ -1082,7 +1082,7 @@ static int pmc_core_pkgc_show(struct seq_file *s, void *unused) unsigned int index; for (index = 0; map[index].name ; index++) { - if (rdmsrl_safe(map[index].bit_mask, &pcstate_count)) + if (rdmsrq_safe(map[index].bit_mask, &pcstate_count)) continue; pcstate_count *= 1000; @@ -1587,7 +1587,7 @@ static __maybe_unused int pmc_core_suspend(struct device *dev) /* Save PKGC residency for checking later */ for (i = 0; i < pmcdev->num_of_pkgc; i++) { - if (rdmsrl_safe(msr_map[i].bit_mask, &pmcdev->pkgc_res_cnt[i])) + if (rdmsrq_safe(msr_map[i].bit_mask, &pmcdev->pkgc_res_cnt[i])) return -EIO; } @@ -1603,7 +1603,7 @@ static inline bool pmc_core_is_deepest_pkgc_failed(struct pmc_dev *pmcdev) u32 deepest_pkgc_msr = msr_map[pmcdev->num_of_pkgc - 1].bit_mask; u64 deepest_pkgc_residency; - if (rdmsrl_safe(deepest_pkgc_msr, &deepest_pkgc_residency)) + if (rdmsrq_safe(deepest_pkgc_msr, &deepest_pkgc_residency)) return false; if (deepest_pkgc_residency == pmcdev->pkgc_res_cnt[pmcdev->num_of_pkgc - 1]) @@ -1655,7 +1655,7 @@ int pmc_core_resume_common(struct pmc_dev *pmcdev) for (i = 0; i < pmcdev->num_of_pkgc; i++) { u64 pc_cnt; - if (!rdmsrl_safe(msr_map[i].bit_mask, &pc_cnt)) { + if (!rdmsrq_safe(msr_map[i].bit_mask, &pc_cnt)) { dev_info(dev, "Prev %s cnt = 0x%llx, Current %s cnt = 0x%llx\n", msr_map[i].name, pmcdev->pkgc_res_cnt[i], msr_map[i].name, pc_cnt); diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c index 31239a93dd71..8a5713593811 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c @@ -21,6 +21,7 @@ #include <asm/cpu_device_id.h> #include <asm/intel-family.h> +#include <asm/msr.h> #include "isst_if_common.h" @@ -191,7 +192,7 @@ void isst_resume_common(void) if (cb->registered) isst_mbox_resume_command(cb, sst_cmd); } else { - wrmsrl_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd, + wrmsrq_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd, sst_cmd->data); } } @@ -211,7 +212,7 @@ static void isst_restore_msr_local(int cpu) hash_for_each_possible(isst_hash, sst_cmd, hnode, punit_msr_white_list[i]) { if (!sst_cmd->mbox_cmd_type && sst_cmd->cpu == cpu) - wrmsrl_safe(sst_cmd->cmd, sst_cmd->data); + wrmsrq_safe(sst_cmd->cmd, sst_cmd->data); } } mutex_unlock(&isst_hash_lock); @@ -406,7 +407,7 @@ static int isst_if_cpu_online(unsigned int cpu) isst_cpu_info[cpu].numa_node = cpu_to_node(cpu); - ret = rdmsrl_safe(MSR_CPU_BUS_NUMBER, &data); + ret = rdmsrq_safe(MSR_CPU_BUS_NUMBER, &data); if (ret) { /* This is not a fatal error on MSR mailbox only I/F */ isst_cpu_info[cpu].bus_info[0] = -1; @@ -420,12 +421,12 @@ static int isst_if_cpu_online(unsigned int cpu) if (isst_hpm_support) { - ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data); + ret = rdmsrq_safe(MSR_PM_LOGICAL_ID, &data); if (!ret) goto set_punit_id; } - ret = rdmsrl_safe(MSR_THREAD_ID_INFO, &data); + ret = rdmsrq_safe(MSR_THREAD_ID_INFO, &data); if (ret) { isst_cpu_info[cpu].punit_cpu_id = -1; return ret; @@ -524,7 +525,7 @@ static long isst_if_msr_cmd_req(u8 *cmd_ptr, int *write_only, int resume) if (!capable(CAP_SYS_ADMIN)) return -EPERM; - ret = wrmsrl_safe_on_cpu(msr_cmd->logical_cpu, + ret = wrmsrq_safe_on_cpu(msr_cmd->logical_cpu, msr_cmd->msr, msr_cmd->data); *write_only = 1; @@ -535,7 +536,7 @@ static long isst_if_msr_cmd_req(u8 *cmd_ptr, int *write_only, int resume) } else { u64 data; - ret = rdmsrl_safe_on_cpu(msr_cmd->logical_cpu, + ret = rdmsrq_safe_on_cpu(msr_cmd->logical_cpu, msr_cmd->msr, &data); if (!ret) { msr_cmd->data = data; @@ -831,8 +832,8 @@ static int __init isst_if_common_init(void) u64 data; /* Can fail only on some Skylake-X generations */ - if (rdmsrl_safe(MSR_OS_MAILBOX_INTERFACE, &data) || - rdmsrl_safe(MSR_OS_MAILBOX_DATA, &data)) + if (rdmsrq_safe(MSR_OS_MAILBOX_INTERFACE, &data) || + rdmsrq_safe(MSR_OS_MAILBOX_DATA, &data)) return -ENODEV; } diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c index c4b7af00352b..22745b217c6f 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c @@ -18,6 +18,7 @@ #include <uapi/linux/isst_if.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> +#include <asm/msr.h> #include "isst_if_common.h" @@ -39,7 +40,7 @@ static int isst_if_send_mbox_cmd(u8 command, u8 sub_command, u32 parameter, /* Poll for rb bit == 0 */ retries = OS_MAILBOX_RETRY_COUNT; do { - rdmsrl(MSR_OS_MAILBOX_INTERFACE, data); + rdmsrq(MSR_OS_MAILBOX_INTERFACE, data); if (data & BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT)) { ret = -EBUSY; continue; @@ -52,19 +53,19 @@ static int isst_if_send_mbox_cmd(u8 command, u8 sub_command, u32 parameter, return ret; /* Write DATA register */ - wrmsrl(MSR_OS_MAILBOX_DATA, command_data); + wrmsrq(MSR_OS_MAILBOX_DATA, command_data); /* Write command register */ data = BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT) | (parameter & GENMASK_ULL(13, 0)) << 16 | (sub_command << 8) | command; - wrmsrl(MSR_OS_MAILBOX_INTERFACE, data); + wrmsrq(MSR_OS_MAILBOX_INTERFACE, data); /* Poll for rb bit == 0 */ retries = OS_MAILBOX_RETRY_COUNT; do { - rdmsrl(MSR_OS_MAILBOX_INTERFACE, data); + rdmsrq(MSR_OS_MAILBOX_INTERFACE, data); if (data & BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT)) { ret = -EBUSY; continue; @@ -74,7 +75,7 @@ static int isst_if_send_mbox_cmd(u8 command, u8 sub_command, u32 parameter, return -ENXIO; if (response_data) { - rdmsrl(MSR_OS_MAILBOX_DATA, data); + rdmsrq(MSR_OS_MAILBOX_DATA, data); *response_data = data; } ret = 0; @@ -176,11 +177,11 @@ static int __init isst_if_mbox_init(void) return -ENODEV; /* Check presence of mailbox MSRs */ - ret = rdmsrl_safe(MSR_OS_MAILBOX_INTERFACE, &data); + ret = rdmsrq_safe(MSR_OS_MAILBOX_INTERFACE, &data); if (ret) return ret; - ret = rdmsrl_safe(MSR_OS_MAILBOX_DATA, &data); + ret = rdmsrq_safe(MSR_OS_MAILBOX_DATA, &data); if (ret) return ret; diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c index 9978cdd19851..4d30d5360c8f 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c @@ -27,6 +27,7 @@ #include <linux/kernel.h> #include <linux/minmax.h> #include <linux/module.h> +#include <asm/msr.h> #include <uapi/linux/isst_if.h> #include "isst_tpmi_core.h" @@ -556,7 +557,7 @@ static bool disable_dynamic_sst_features(void) { u64 value; - rdmsrl(MSR_PM_ENABLE, value); + rdmsrq(MSR_PM_ENABLE, value); return !(value & 0x1); } diff --git a/drivers/platform/x86/intel/tpmi_power_domains.c b/drivers/platform/x86/intel/tpmi_power_domains.c index 2f01cd22a6ee..c21b3cb99b7c 100644 --- a/drivers/platform/x86/intel/tpmi_power_domains.c +++ b/drivers/platform/x86/intel/tpmi_power_domains.c @@ -157,7 +157,7 @@ static int tpmi_get_logical_id(unsigned int cpu, struct tpmi_cpu_info *info) u64 data; int ret; - ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data); + ret = rdmsrq_safe(MSR_PM_LOGICAL_ID, &data); if (ret) return ret; @@ -203,7 +203,7 @@ static int __init tpmi_init(void) return -ENODEV; /* Check for MSR 0x54 presence */ - ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data); + ret = rdmsrq_safe(MSR_PM_LOGICAL_ID, &data); if (ret) return ret; diff --git a/drivers/platform/x86/intel/turbo_max_3.c b/drivers/platform/x86/intel/turbo_max_3.c index 79a0bcdeffb8..b5af3e91ba04 100644 --- a/drivers/platform/x86/intel/turbo_max_3.c +++ b/drivers/platform/x86/intel/turbo_max_3.c @@ -17,6 +17,7 @@ #include <asm/cpu_device_id.h> #include <asm/intel-family.h> +#include <asm/msr.h> #define MSR_OC_MAILBOX 0x150 #define MSR_OC_MAILBOX_CMD_OFFSET 32 @@ -41,14 +42,14 @@ static int get_oc_core_priority(unsigned int cpu) value = cmd << MSR_OC_MAILBOX_CMD_OFFSET; /* Set the busy bit to indicate OS is trying to issue command */ value |= BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT); - ret = wrmsrl_safe(MSR_OC_MAILBOX, value); + ret = wrmsrq_safe(MSR_OC_MAILBOX, value); if (ret) { pr_debug("cpu %d OC mailbox write failed\n", cpu); return ret; } for (i = 0; i < OC_MAILBOX_RETRY_COUNT; ++i) { - ret = rdmsrl_safe(MSR_OC_MAILBOX, &value); + ret = rdmsrq_safe(MSR_OC_MAILBOX, &value); if (ret) { pr_debug("cpu %d OC mailbox read failed\n", cpu); break; diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c index 40bbf8e45fa4..2a6897035150 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c @@ -21,6 +21,7 @@ #include <linux/suspend.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> +#include <asm/msr.h> #include "uncore-frequency-common.h" @@ -51,7 +52,7 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *valu if (data->control_cpu < 0) return -ENXIO; - ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap); + ret = rdmsrq_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap); if (ret) return ret; @@ -76,7 +77,7 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu if (data->control_cpu < 0) return -ENXIO; - ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap); + ret = rdmsrq_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap); if (ret) return ret; @@ -88,7 +89,7 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu cap |= FIELD_PREP(UNCORE_MIN_RATIO_MASK, input); } - ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap); + ret = wrmsrq_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap); if (ret) return ret; @@ -105,7 +106,7 @@ static int uncore_read_freq(struct uncore_data *data, unsigned int *freq) if (data->control_cpu < 0) return -ENXIO; - ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_PERF_STATUS, &ratio); + ret = rdmsrq_on_cpu(data->control_cpu, MSR_UNCORE_PERF_STATUS, &ratio); if (ret) return ret; @@ -146,15 +147,13 @@ static int uncore_event_cpu_online(unsigned int cpu) { struct uncore_data *data; int target; + int ret; /* Check if there is an online cpu in the package for uncore MSR */ target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu)); if (target < nr_cpu_ids) return 0; - /* Use this CPU on this die as a control CPU */ - cpumask_set_cpu(cpu, &uncore_cpu_mask); - data = uncore_get_instance(cpu); if (!data) return 0; @@ -163,7 +162,14 @@ static int uncore_event_cpu_online(unsigned int cpu) data->die_id = topology_die_id(cpu); data->domain_id = UNCORE_DOMAIN_ID_INVALID; - return uncore_freq_add_entry(data, cpu); + ret = uncore_freq_add_entry(data, cpu); + if (ret) + return ret; + + /* Use this CPU on this die as a control CPU */ + cpumask_set_cpu(cpu, &uncore_cpu_mask); + + return 0; } static int uncore_event_cpu_offline(unsigned int cpu) @@ -207,7 +213,7 @@ static int uncore_pm_notify(struct notifier_block *nb, unsigned long mode, if (!data || !data->valid || !data->stored_uncore_data) return 0; - wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, + wrmsrq_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, data->stored_uncore_data); } break; diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c index 5d717b1c23cf..9506f28fb7d8 100644 --- a/drivers/platform/x86/intel_ips.c +++ b/drivers/platform/x86/intel_ips.c @@ -370,7 +370,7 @@ static void ips_cpu_raise(struct ips_driver *ips) if (!ips->cpu_turbo_enabled) return; - rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); + rdmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); cur_tdp_limit = turbo_override & TURBO_TDP_MASK; new_tdp_limit = cur_tdp_limit + 8; /* 1W increase */ @@ -382,12 +382,12 @@ static void ips_cpu_raise(struct ips_driver *ips) thm_writew(THM_MPCPC, (new_tdp_limit * 10) / 8); turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN; - wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); + wrmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); turbo_override &= ~TURBO_TDP_MASK; turbo_override |= new_tdp_limit; - wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); + wrmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); } /** @@ -405,7 +405,7 @@ static void ips_cpu_lower(struct ips_driver *ips) u64 turbo_override; u16 cur_limit, new_limit; - rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); + rdmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); cur_limit = turbo_override & TURBO_TDP_MASK; new_limit = cur_limit - 8; /* 1W decrease */ @@ -417,12 +417,12 @@ static void ips_cpu_lower(struct ips_driver *ips) thm_writew(THM_MPCPC, (new_limit * 10) / 8); turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN; - wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); + wrmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); turbo_override &= ~TURBO_TDP_MASK; turbo_override |= new_limit; - wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); + wrmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); } /** @@ -437,10 +437,10 @@ static void do_enable_cpu_turbo(void *data) { u64 perf_ctl; - rdmsrl(IA32_PERF_CTL, perf_ctl); + rdmsrq(IA32_PERF_CTL, perf_ctl); if (perf_ctl & IA32_PERF_TURBO_DIS) { perf_ctl &= ~IA32_PERF_TURBO_DIS; - wrmsrl(IA32_PERF_CTL, perf_ctl); + wrmsrq(IA32_PERF_CTL, perf_ctl); } } @@ -475,10 +475,10 @@ static void do_disable_cpu_turbo(void *data) { u64 perf_ctl; - rdmsrl(IA32_PERF_CTL, perf_ctl); + rdmsrq(IA32_PERF_CTL, perf_ctl); if (!(perf_ctl & IA32_PERF_TURBO_DIS)) { perf_ctl |= IA32_PERF_TURBO_DIS; - wrmsrl(IA32_PERF_CTL, perf_ctl); + wrmsrq(IA32_PERF_CTL, perf_ctl); } } @@ -1215,7 +1215,7 @@ static int cpu_clamp_show(struct seq_file *m, void *data) u64 turbo_override; int tdp, tdc; - rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); + rdmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); tdp = (int)(turbo_override & TURBO_TDP_MASK); tdc = (int)((turbo_override & TURBO_TDC_MASK) >> TURBO_TDC_SHIFT); @@ -1290,7 +1290,7 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips) return NULL; } - rdmsrl(IA32_MISC_ENABLE, misc_en); + rdmsrq(IA32_MISC_ENABLE, misc_en); /* * If the turbo enable bit isn't set, we shouldn't try to enable/disable * turbo manually or we'll get an illegal MSR access, even though @@ -1312,7 +1312,7 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips) return NULL; } - rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_power); + rdmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_power); tdp = turbo_power & TURBO_TDP_MASK; /* Sanity check TDP against CPU */ @@ -1496,7 +1496,7 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id) * Check PLATFORM_INFO MSR to make sure this chip is * turbo capable. */ - rdmsrl(PLATFORM_INFO, platform_info); + rdmsrq(PLATFORM_INFO, platform_info); if (!(platform_info & PLATFORM_TDP)) { dev_err(&dev->dev, "platform indicates TDP override unavailable, aborting\n"); return -ENODEV; @@ -1529,7 +1529,7 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id) ips->mgta_val = thm_readw(THM_MGTA); /* Save turbo limits & ratios */ - rdmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit); + rdmsrq(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit); ips_disable_cpu_turbo(ips); ips->cpu_turbo_enabled = false; @@ -1596,10 +1596,10 @@ static void ips_remove(struct pci_dev *dev) if (ips->gpu_turbo_disable) symbol_put(i915_gpu_turbo_disable); - rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); + rdmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); turbo_override &= ~(TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN); - wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); - wrmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit); + wrmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); + wrmsrq(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit); free_irq(ips->irq, ips); pci_free_irq_vectors(dev); diff --git a/drivers/platform/x86/msi-wmi-platform.c b/drivers/platform/x86/msi-wmi-platform.c index 9b5c7f8c79b0..dc5e9878cb68 100644 --- a/drivers/platform/x86/msi-wmi-platform.c +++ b/drivers/platform/x86/msi-wmi-platform.c @@ -10,6 +10,7 @@ #include <linux/acpi.h> #include <linux/bits.h> #include <linux/bitfield.h> +#include <linux/cleanup.h> #include <linux/debugfs.h> #include <linux/device.h> #include <linux/device/driver.h> @@ -17,6 +18,7 @@ #include <linux/hwmon.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/mutex.h> #include <linux/printk.h> #include <linux/rwsem.h> #include <linux/types.h> @@ -76,8 +78,13 @@ enum msi_wmi_platform_method { MSI_PLATFORM_GET_WMI = 0x1d, }; -struct msi_wmi_platform_debugfs_data { +struct msi_wmi_platform_data { struct wmi_device *wdev; + struct mutex wmi_lock; /* Necessary when calling WMI methods */ +}; + +struct msi_wmi_platform_debugfs_data { + struct msi_wmi_platform_data *data; enum msi_wmi_platform_method method; struct rw_semaphore buffer_lock; /* Protects debugfs buffer */ size_t length; @@ -132,8 +139,9 @@ static int msi_wmi_platform_parse_buffer(union acpi_object *obj, u8 *output, siz return 0; } -static int msi_wmi_platform_query(struct wmi_device *wdev, enum msi_wmi_platform_method method, - u8 *input, size_t input_length, u8 *output, size_t output_length) +static int msi_wmi_platform_query(struct msi_wmi_platform_data *data, + enum msi_wmi_platform_method method, u8 *input, + size_t input_length, u8 *output, size_t output_length) { struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_buffer in = { @@ -147,9 +155,15 @@ static int msi_wmi_platform_query(struct wmi_device *wdev, enum msi_wmi_platform if (!input_length || !output_length) return -EINVAL; - status = wmidev_evaluate_method(wdev, 0x0, method, &in, &out); - if (ACPI_FAILURE(status)) - return -EIO; + /* + * The ACPI control method responsible for handling the WMI method calls + * is not thread-safe. Because of this we have to do the locking ourself. + */ + scoped_guard(mutex, &data->wmi_lock) { + status = wmidev_evaluate_method(data->wdev, 0x0, method, &in, &out); + if (ACPI_FAILURE(status)) + return -EIO; + } obj = out.pointer; if (!obj) @@ -170,22 +184,22 @@ static umode_t msi_wmi_platform_is_visible(const void *drvdata, enum hwmon_senso static int msi_wmi_platform_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val) { - struct wmi_device *wdev = dev_get_drvdata(dev); + struct msi_wmi_platform_data *data = dev_get_drvdata(dev); u8 input[32] = { 0 }; u8 output[32]; - u16 data; + u16 value; int ret; - ret = msi_wmi_platform_query(wdev, MSI_PLATFORM_GET_FAN, input, sizeof(input), output, + ret = msi_wmi_platform_query(data, MSI_PLATFORM_GET_FAN, input, sizeof(input), output, sizeof(output)); if (ret < 0) return ret; - data = get_unaligned_be16(&output[channel * 2 + 1]); - if (!data) + value = get_unaligned_be16(&output[channel * 2 + 1]); + if (!value) *val = 0; else - *val = 480000 / data; + *val = 480000 / value; return 0; } @@ -231,7 +245,7 @@ static ssize_t msi_wmi_platform_write(struct file *fp, const char __user *input, return ret; down_write(&data->buffer_lock); - ret = msi_wmi_platform_query(data->wdev, data->method, payload, data->length, data->buffer, + ret = msi_wmi_platform_query(data->data, data->method, payload, data->length, data->buffer, data->length); up_write(&data->buffer_lock); @@ -277,17 +291,17 @@ static void msi_wmi_platform_debugfs_remove(void *data) debugfs_remove_recursive(dir); } -static void msi_wmi_platform_debugfs_add(struct wmi_device *wdev, struct dentry *dir, +static void msi_wmi_platform_debugfs_add(struct msi_wmi_platform_data *drvdata, struct dentry *dir, const char *name, enum msi_wmi_platform_method method) { struct msi_wmi_platform_debugfs_data *data; struct dentry *entry; - data = devm_kzalloc(&wdev->dev, sizeof(*data), GFP_KERNEL); + data = devm_kzalloc(&drvdata->wdev->dev, sizeof(*data), GFP_KERNEL); if (!data) return; - data->wdev = wdev; + data->data = drvdata; data->method = method; init_rwsem(&data->buffer_lock); @@ -298,82 +312,82 @@ static void msi_wmi_platform_debugfs_add(struct wmi_device *wdev, struct dentry entry = debugfs_create_file(name, 0600, dir, data, &msi_wmi_platform_debugfs_fops); if (IS_ERR(entry)) - devm_kfree(&wdev->dev, data); + devm_kfree(&drvdata->wdev->dev, data); } -static void msi_wmi_platform_debugfs_init(struct wmi_device *wdev) +static void msi_wmi_platform_debugfs_init(struct msi_wmi_platform_data *data) { struct dentry *dir; char dir_name[64]; int ret, method; - scnprintf(dir_name, ARRAY_SIZE(dir_name), "%s-%s", DRIVER_NAME, dev_name(&wdev->dev)); + scnprintf(dir_name, ARRAY_SIZE(dir_name), "%s-%s", DRIVER_NAME, dev_name(&data->wdev->dev)); dir = debugfs_create_dir(dir_name, NULL); if (IS_ERR(dir)) return; - ret = devm_add_action_or_reset(&wdev->dev, msi_wmi_platform_debugfs_remove, dir); + ret = devm_add_action_or_reset(&data->wdev->dev, msi_wmi_platform_debugfs_remove, dir); if (ret < 0) return; for (method = MSI_PLATFORM_GET_PACKAGE; method <= MSI_PLATFORM_GET_WMI; method++) - msi_wmi_platform_debugfs_add(wdev, dir, msi_wmi_platform_debugfs_names[method - 1], + msi_wmi_platform_debugfs_add(data, dir, msi_wmi_platform_debugfs_names[method - 1], method); } -static int msi_wmi_platform_hwmon_init(struct wmi_device *wdev) +static int msi_wmi_platform_hwmon_init(struct msi_wmi_platform_data *data) { struct device *hdev; - hdev = devm_hwmon_device_register_with_info(&wdev->dev, "msi_wmi_platform", wdev, + hdev = devm_hwmon_device_register_with_info(&data->wdev->dev, "msi_wmi_platform", data, &msi_wmi_platform_chip_info, NULL); return PTR_ERR_OR_ZERO(hdev); } -static int msi_wmi_platform_ec_init(struct wmi_device *wdev) +static int msi_wmi_platform_ec_init(struct msi_wmi_platform_data *data) { u8 input[32] = { 0 }; u8 output[32]; u8 flags; int ret; - ret = msi_wmi_platform_query(wdev, MSI_PLATFORM_GET_EC, input, sizeof(input), output, + ret = msi_wmi_platform_query(data, MSI_PLATFORM_GET_EC, input, sizeof(input), output, sizeof(output)); if (ret < 0) return ret; flags = output[MSI_PLATFORM_EC_FLAGS_OFFSET]; - dev_dbg(&wdev->dev, "EC RAM version %lu.%lu\n", + dev_dbg(&data->wdev->dev, "EC RAM version %lu.%lu\n", FIELD_GET(MSI_PLATFORM_EC_MAJOR_MASK, flags), FIELD_GET(MSI_PLATFORM_EC_MINOR_MASK, flags)); - dev_dbg(&wdev->dev, "EC firmware version %.28s\n", + dev_dbg(&data->wdev->dev, "EC firmware version %.28s\n", &output[MSI_PLATFORM_EC_VERSION_OFFSET]); if (!(flags & MSI_PLATFORM_EC_IS_TIGERLAKE)) { if (!force) return -ENODEV; - dev_warn(&wdev->dev, "Loading on a non-Tigerlake platform\n"); + dev_warn(&data->wdev->dev, "Loading on a non-Tigerlake platform\n"); } return 0; } -static int msi_wmi_platform_init(struct wmi_device *wdev) +static int msi_wmi_platform_init(struct msi_wmi_platform_data *data) { u8 input[32] = { 0 }; u8 output[32]; int ret; - ret = msi_wmi_platform_query(wdev, MSI_PLATFORM_GET_WMI, input, sizeof(input), output, + ret = msi_wmi_platform_query(data, MSI_PLATFORM_GET_WMI, input, sizeof(input), output, sizeof(output)); if (ret < 0) return ret; - dev_dbg(&wdev->dev, "WMI interface version %u.%u\n", + dev_dbg(&data->wdev->dev, "WMI interface version %u.%u\n", output[MSI_PLATFORM_WMI_MAJOR_OFFSET], output[MSI_PLATFORM_WMI_MINOR_OFFSET]); @@ -381,7 +395,8 @@ static int msi_wmi_platform_init(struct wmi_device *wdev) if (!force) return -ENODEV; - dev_warn(&wdev->dev, "Loading despite unsupported WMI interface version (%u.%u)\n", + dev_warn(&data->wdev->dev, + "Loading despite unsupported WMI interface version (%u.%u)\n", output[MSI_PLATFORM_WMI_MAJOR_OFFSET], output[MSI_PLATFORM_WMI_MINOR_OFFSET]); } @@ -391,19 +406,31 @@ static int msi_wmi_platform_init(struct wmi_device *wdev) static int msi_wmi_platform_probe(struct wmi_device *wdev, const void *context) { + struct msi_wmi_platform_data *data; int ret; - ret = msi_wmi_platform_init(wdev); + data = devm_kzalloc(&wdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->wdev = wdev; + dev_set_drvdata(&wdev->dev, data); + + ret = devm_mutex_init(&wdev->dev, &data->wmi_lock); + if (ret < 0) + return ret; + + ret = msi_wmi_platform_init(data); if (ret < 0) return ret; - ret = msi_wmi_platform_ec_init(wdev); + ret = msi_wmi_platform_ec_init(data); if (ret < 0) return ret; - msi_wmi_platform_debugfs_init(wdev); + msi_wmi_platform_debugfs_init(data); - return msi_wmi_platform_hwmon_init(wdev); + return msi_wmi_platform_hwmon_init(data); } static const struct wmi_device_id msi_wmi_platform_id_table[] = { diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c index 0fc275e461be..00b1e7c79a3d 100644 --- a/drivers/platform/x86/think-lmi.c +++ b/drivers/platform/x86/think-lmi.c @@ -1061,8 +1061,8 @@ static ssize_t current_value_store(struct kobject *kobj, ret = -EINVAL; goto out; } - set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->display_name, - new_setting, tlmi_priv.pwd_admin->signature); + set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->name, + new_setting, tlmi_priv.pwd_admin->signature); if (!set_str) { ret = -ENOMEM; goto out; @@ -1092,7 +1092,7 @@ static ssize_t current_value_store(struct kobject *kobj, goto out; } - set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->display_name, + set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->name, new_setting); if (!set_str) { ret = -ENOMEM; @@ -1120,11 +1120,11 @@ static ssize_t current_value_store(struct kobject *kobj, } if (auth_str) - set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->display_name, - new_setting, auth_str); + set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->name, + new_setting, auth_str); else - set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->display_name, - new_setting); + set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->name, + new_setting); if (!set_str) { ret = -ENOMEM; goto out; @@ -1629,9 +1629,6 @@ static int tlmi_analyze(struct wmi_device *wdev) continue; } - /* It is not allowed to have '/' for file name. Convert it into '\'. */ - strreplace(item, '/', '\\'); - /* Remove the value part */ strreplace(item, ',', '\0'); @@ -1644,11 +1641,16 @@ static int tlmi_analyze(struct wmi_device *wdev) } setting->wdev = wdev; setting->index = i; + + strscpy(setting->name, item); + /* It is not allowed to have '/' for file name. Convert it into '\'. */ + strreplace(item, '/', '\\'); strscpy(setting->display_name, item); + /* If BIOS selections supported, load those */ if (tlmi_priv.can_get_bios_selections) { - ret = tlmi_get_bios_selections(setting->display_name, - &setting->possible_values); + ret = tlmi_get_bios_selections(setting->name, + &setting->possible_values); if (ret || !setting->possible_values) pr_info("Error retrieving possible values for %d : %s\n", i, setting->display_name); diff --git a/drivers/platform/x86/think-lmi.h b/drivers/platform/x86/think-lmi.h index a80452482227..9b014644d316 100644 --- a/drivers/platform/x86/think-lmi.h +++ b/drivers/platform/x86/think-lmi.h @@ -90,6 +90,7 @@ struct tlmi_attr_setting { struct kobject kobj; struct wmi_device *wdev; int index; + char name[TLMI_SETTINGS_MAXLEN]; char display_name[TLMI_SETTINGS_MAXLEN]; char *possible_values; }; diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 5790095c175e..657625dd60a0 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -231,6 +231,7 @@ enum tpacpi_hkey_event_t { /* Thermal events */ TP_HKEY_EV_ALARM_BAT_HOT = 0x6011, /* battery too hot */ TP_HKEY_EV_ALARM_BAT_XHOT = 0x6012, /* battery critically hot */ + TP_HKEY_EV_ALARM_BAT_LIM_CHANGE = 0x6013, /* battery charge limit changed*/ TP_HKEY_EV_ALARM_SENSOR_HOT = 0x6021, /* sensor too hot */ TP_HKEY_EV_ALARM_SENSOR_XHOT = 0x6022, /* sensor critically hot */ TP_HKEY_EV_THM_TABLE_CHANGED = 0x6030, /* windows; thermal table changed */ @@ -3777,6 +3778,10 @@ static bool hotkey_notify_6xxx(const u32 hkey, bool *send_acpi_ev) pr_alert("THERMAL EMERGENCY: battery is extremely hot!\n"); /* recommended action: immediate sleep/hibernate */ break; + case TP_HKEY_EV_ALARM_BAT_LIM_CHANGE: + pr_debug("Battery Info: battery charge threshold changed\n"); + /* User changed charging threshold. No action needed */ + return true; case TP_HKEY_EV_ALARM_SENSOR_HOT: pr_crit("THERMAL ALARM: a sensor reports something is too hot!\n"); /* recommended action: warn user through gui, that */ @@ -11478,6 +11483,8 @@ static int __must_check __init get_thinkpad_model_data( tp->vendor = PCI_VENDOR_ID_IBM; else if (dmi_name_in_vendors("LENOVO")) tp->vendor = PCI_VENDOR_ID_LENOVO; + else if (dmi_name_in_vendors("NEC")) + tp->vendor = PCI_VENDOR_ID_LENOVO; else return 0; diff --git a/drivers/platform/x86/x86-android-tablets/dmi.c b/drivers/platform/x86/x86-android-tablets/dmi.c index 3e5fa3b6e2fd..278c6d151dc4 100644 --- a/drivers/platform/x86/x86-android-tablets/dmi.c +++ b/drivers/platform/x86/x86-android-tablets/dmi.c @@ -180,6 +180,18 @@ const struct dmi_system_id x86_android_tablet_ids[] __initconst = { .driver_data = (void *)&peaq_c1010_info, }, { + /* Vexia Edu Atla 10 tablet 5V version */ + .matches = { + /* Having all 3 of these not set is somewhat unique */ + DMI_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."), + DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."), + DMI_MATCH(DMI_BOARD_NAME, "To be filled by O.E.M."), + /* Above strings are too generic, also match on BIOS date */ + DMI_MATCH(DMI_BIOS_DATE, "05/14/2015"), + }, + .driver_data = (void *)&vexia_edu_atla10_5v_info, + }, + { /* Vexia Edu Atla 10 tablet 9V version */ .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), @@ -187,7 +199,7 @@ const struct dmi_system_id x86_android_tablet_ids[] __initconst = { /* Above strings are too generic, also match on BIOS date */ DMI_MATCH(DMI_BIOS_DATE, "08/25/2014"), }, - .driver_data = (void *)&vexia_edu_atla10_info, + .driver_data = (void *)&vexia_edu_atla10_9v_info, }, { /* Whitelabel (sold as various brands) TM800A550L */ diff --git a/drivers/platform/x86/x86-android-tablets/other.c b/drivers/platform/x86/x86-android-tablets/other.c index 1d93d9edb23f..f7bd9f863c85 100644 --- a/drivers/platform/x86/x86-android-tablets/other.c +++ b/drivers/platform/x86/x86-android-tablets/other.c @@ -599,62 +599,122 @@ const struct x86_dev_info whitelabel_tm800a550l_info __initconst = { }; /* - * Vexia EDU ATLA 10 tablet, Android 4.2 / 4.4 + Guadalinex Ubuntu tablet + * Vexia EDU ATLA 10 tablet 5V, Android 4.4 + Guadalinex Ubuntu tablet + * distributed to schools in the Spanish AndalucÃa region. + */ +static const struct property_entry vexia_edu_atla10_5v_touchscreen_props[] = { + PROPERTY_ENTRY_U32("hid-descr-addr", 0x0000), + PROPERTY_ENTRY_U32("post-reset-deassert-delay-ms", 120), + { } +}; + +static const struct software_node vexia_edu_atla10_5v_touchscreen_node = { + .properties = vexia_edu_atla10_5v_touchscreen_props, +}; + +static const struct x86_i2c_client_info vexia_edu_atla10_5v_i2c_clients[] __initconst = { + { + /* kxcjk1013 accelerometer */ + .board_info = { + .type = "kxcjk1013", + .addr = 0x0f, + .dev_name = "kxcjk1013", + }, + .adapter_path = "\\_SB_.I2C3", + }, { + /* touchscreen controller */ + .board_info = { + .type = "hid-over-i2c", + .addr = 0x38, + .dev_name = "FTSC1000", + .swnode = &vexia_edu_atla10_5v_touchscreen_node, + }, + .adapter_path = "\\_SB_.I2C4", + .irq_data = { + .type = X86_ACPI_IRQ_TYPE_APIC, + .index = 0x44, + .trigger = ACPI_LEVEL_SENSITIVE, + .polarity = ACPI_ACTIVE_HIGH, + }, + } +}; + +static struct gpiod_lookup_table vexia_edu_atla10_5v_ft5416_gpios = { + .dev_id = "i2c-FTSC1000", + .table = { + GPIO_LOOKUP("INT33FC:01", 26, "reset", GPIO_ACTIVE_LOW), + { } + }, +}; + +static struct gpiod_lookup_table * const vexia_edu_atla10_5v_gpios[] = { + &vexia_edu_atla10_5v_ft5416_gpios, + NULL +}; + +const struct x86_dev_info vexia_edu_atla10_5v_info __initconst = { + .i2c_client_info = vexia_edu_atla10_5v_i2c_clients, + .i2c_client_count = ARRAY_SIZE(vexia_edu_atla10_5v_i2c_clients), + .gpiod_lookup_tables = vexia_edu_atla10_5v_gpios, +}; + +/* + * Vexia EDU ATLA 10 tablet 9V, Android 4.2 + Guadalinex Ubuntu tablet * distributed to schools in the Spanish AndalucÃa region. */ static const char * const crystal_cove_pwrsrc_psy[] = { "crystal_cove_pwrsrc" }; -static const struct property_entry vexia_edu_atla10_ulpmc_props[] = { +static const struct property_entry vexia_edu_atla10_9v_ulpmc_props[] = { PROPERTY_ENTRY_STRING_ARRAY("supplied-from", crystal_cove_pwrsrc_psy), { } }; -static const struct software_node vexia_edu_atla10_ulpmc_node = { - .properties = vexia_edu_atla10_ulpmc_props, +static const struct software_node vexia_edu_atla10_9v_ulpmc_node = { + .properties = vexia_edu_atla10_9v_ulpmc_props, }; -static const char * const vexia_edu_atla10_accel_mount_matrix[] = { +static const char * const vexia_edu_atla10_9v_accel_mount_matrix[] = { "0", "-1", "0", "1", "0", "0", "0", "0", "1" }; -static const struct property_entry vexia_edu_atla10_accel_props[] = { - PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", vexia_edu_atla10_accel_mount_matrix), +static const struct property_entry vexia_edu_atla10_9v_accel_props[] = { + PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", vexia_edu_atla10_9v_accel_mount_matrix), { } }; -static const struct software_node vexia_edu_atla10_accel_node = { - .properties = vexia_edu_atla10_accel_props, +static const struct software_node vexia_edu_atla10_9v_accel_node = { + .properties = vexia_edu_atla10_9v_accel_props, }; -static const struct property_entry vexia_edu_atla10_touchscreen_props[] = { +static const struct property_entry vexia_edu_atla10_9v_touchscreen_props[] = { PROPERTY_ENTRY_U32("hid-descr-addr", 0x0000), PROPERTY_ENTRY_U32("post-reset-deassert-delay-ms", 120), { } }; -static const struct software_node vexia_edu_atla10_touchscreen_node = { - .properties = vexia_edu_atla10_touchscreen_props, +static const struct software_node vexia_edu_atla10_9v_touchscreen_node = { + .properties = vexia_edu_atla10_9v_touchscreen_props, }; -static const struct property_entry vexia_edu_atla10_pmic_props[] = { +static const struct property_entry vexia_edu_atla10_9v_pmic_props[] = { PROPERTY_ENTRY_BOOL("linux,register-pwrsrc-power_supply"), { } }; -static const struct software_node vexia_edu_atla10_pmic_node = { - .properties = vexia_edu_atla10_pmic_props, +static const struct software_node vexia_edu_atla10_9v_pmic_node = { + .properties = vexia_edu_atla10_9v_pmic_props, }; -static const struct x86_i2c_client_info vexia_edu_atla10_i2c_clients[] __initconst = { +static const struct x86_i2c_client_info vexia_edu_atla10_9v_i2c_clients[] __initconst = { { /* I2C attached embedded controller, used to access fuel-gauge */ .board_info = { .type = "vexia_atla10_ec", .addr = 0x76, .dev_name = "ulpmc", - .swnode = &vexia_edu_atla10_ulpmc_node, + .swnode = &vexia_edu_atla10_9v_ulpmc_node, }, .adapter_path = "0000:00:18.1", }, { @@ -679,7 +739,7 @@ static const struct x86_i2c_client_info vexia_edu_atla10_i2c_clients[] __initcon .type = "kxtj21009", .addr = 0x0f, .dev_name = "kxtj21009", - .swnode = &vexia_edu_atla10_accel_node, + .swnode = &vexia_edu_atla10_9v_accel_node, }, .adapter_path = "0000:00:18.5", }, { @@ -688,7 +748,7 @@ static const struct x86_i2c_client_info vexia_edu_atla10_i2c_clients[] __initcon .type = "hid-over-i2c", .addr = 0x38, .dev_name = "FTSC1000", - .swnode = &vexia_edu_atla10_touchscreen_node, + .swnode = &vexia_edu_atla10_9v_touchscreen_node, }, .adapter_path = "0000:00:18.6", .irq_data = { @@ -703,7 +763,7 @@ static const struct x86_i2c_client_info vexia_edu_atla10_i2c_clients[] __initcon .type = "intel_soc_pmic_crc", .addr = 0x6e, .dev_name = "intel_soc_pmic_crc", - .swnode = &vexia_edu_atla10_pmic_node, + .swnode = &vexia_edu_atla10_9v_pmic_node, }, .adapter_path = "0000:00:18.7", .irq_data = { @@ -715,7 +775,7 @@ static const struct x86_i2c_client_info vexia_edu_atla10_i2c_clients[] __initcon } }; -static const struct x86_serdev_info vexia_edu_atla10_serdevs[] __initconst = { +static const struct x86_serdev_info vexia_edu_atla10_9v_serdevs[] __initconst = { { .ctrl.pci.devfn = PCI_DEVFN(0x1e, 3), .ctrl_devname = "serial0", @@ -723,7 +783,7 @@ static const struct x86_serdev_info vexia_edu_atla10_serdevs[] __initconst = { }, }; -static struct gpiod_lookup_table vexia_edu_atla10_ft5416_gpios = { +static struct gpiod_lookup_table vexia_edu_atla10_9v_ft5416_gpios = { .dev_id = "i2c-FTSC1000", .table = { GPIO_LOOKUP("INT33FC:00", 60, "reset", GPIO_ACTIVE_LOW), @@ -731,12 +791,12 @@ static struct gpiod_lookup_table vexia_edu_atla10_ft5416_gpios = { }, }; -static struct gpiod_lookup_table * const vexia_edu_atla10_gpios[] = { - &vexia_edu_atla10_ft5416_gpios, +static struct gpiod_lookup_table * const vexia_edu_atla10_9v_gpios[] = { + &vexia_edu_atla10_9v_ft5416_gpios, NULL }; -static int __init vexia_edu_atla10_init(struct device *dev) +static int __init vexia_edu_atla10_9v_init(struct device *dev) { struct pci_dev *pdev; int ret; @@ -760,13 +820,13 @@ static int __init vexia_edu_atla10_init(struct device *dev) return 0; } -const struct x86_dev_info vexia_edu_atla10_info __initconst = { - .i2c_client_info = vexia_edu_atla10_i2c_clients, - .i2c_client_count = ARRAY_SIZE(vexia_edu_atla10_i2c_clients), - .serdev_info = vexia_edu_atla10_serdevs, - .serdev_count = ARRAY_SIZE(vexia_edu_atla10_serdevs), - .gpiod_lookup_tables = vexia_edu_atla10_gpios, - .init = vexia_edu_atla10_init, +const struct x86_dev_info vexia_edu_atla10_9v_info __initconst = { + .i2c_client_info = vexia_edu_atla10_9v_i2c_clients, + .i2c_client_count = ARRAY_SIZE(vexia_edu_atla10_9v_i2c_clients), + .serdev_info = vexia_edu_atla10_9v_serdevs, + .serdev_count = ARRAY_SIZE(vexia_edu_atla10_9v_serdevs), + .gpiod_lookup_tables = vexia_edu_atla10_9v_gpios, + .init = vexia_edu_atla10_9v_init, .use_pci = true, }; diff --git a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h index 63a38a0069ba..dcf8d49e3b5f 100644 --- a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h +++ b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h @@ -127,7 +127,8 @@ extern const struct x86_dev_info nextbook_ares8_info; extern const struct x86_dev_info nextbook_ares8a_info; extern const struct x86_dev_info peaq_c1010_info; extern const struct x86_dev_info whitelabel_tm800a550l_info; -extern const struct x86_dev_info vexia_edu_atla10_info; +extern const struct x86_dev_info vexia_edu_atla10_5v_info; +extern const struct x86_dev_info vexia_edu_atla10_9v_info; extern const struct x86_dev_info xiaomi_mipad2_info; extern const struct dmi_system_id x86_android_tablet_ids[]; diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c index 9b2f28b34bb5..d6c1ddb807b2 100644 --- a/drivers/pmdomain/core.c +++ b/drivers/pmdomain/core.c @@ -3126,7 +3126,7 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev, /* Verify that the index is within a valid range. */ num_domains = of_count_phandle_with_args(dev->of_node, "power-domains", "#power-domain-cells"); - if (index >= num_domains) + if (num_domains < 0 || index >= num_domains) return NULL; /* Allocate and register device on the genpd bus. */ diff --git a/drivers/pmdomain/renesas/rcar-gen4-sysc.c b/drivers/pmdomain/renesas/rcar-gen4-sysc.c index 66409cff2083..e001b5c25bed 100644 --- a/drivers/pmdomain/renesas/rcar-gen4-sysc.c +++ b/drivers/pmdomain/renesas/rcar-gen4-sysc.c @@ -338,11 +338,6 @@ static int __init rcar_gen4_sysc_pd_init(void) struct rcar_gen4_sysc_pd *pd; size_t n; - if (!area->name) { - /* Skip NULLified area */ - continue; - } - n = strlen(area->name) + 1; pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL); if (!pd) { diff --git a/drivers/pmdomain/renesas/rcar-sysc.c b/drivers/pmdomain/renesas/rcar-sysc.c index dce1a6d37e80..047495f54e8a 100644 --- a/drivers/pmdomain/renesas/rcar-sysc.c +++ b/drivers/pmdomain/renesas/rcar-sysc.c @@ -396,11 +396,6 @@ static int __init rcar_sysc_pd_init(void) struct rcar_sysc_pd *pd; size_t n; - if (!area->name) { - /* Skip NULLified area */ - continue; - } - n = strlen(area->name) + 1; pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL); if (!pd) { diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c index 6085a1471de2..6e1d4bfd28ac 100644 --- a/drivers/pnp/quirks.c +++ b/drivers/pnp/quirks.c @@ -290,7 +290,7 @@ static void quirk_system_pci_resources(struct pnp_dev *dev) #ifdef CONFIG_AMD_NB -#include <asm/amd_nb.h> +#include <asm/amd/nb.h> static void quirk_amd_mmconfig_area(struct pnp_dev *dev) { diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index edb058c19c9c..439dd0bf8644 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -33,7 +33,7 @@ struct power_supply_attr { [POWER_SUPPLY_PROP_ ## _name] = \ { \ .prop_name = #_name, \ - .attr_name = #_name "\0", \ + .attr_name = #_name, \ .text_values = _text, \ .text_values_len = _len, \ } diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 5ab3feb29686..e3be40adc0d7 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -28,6 +28,7 @@ #include <asm/cpu_device_id.h> #include <asm/intel-family.h> #include <asm/iosf_mbi.h> +#include <asm/msr.h> /* bitmasks for RAPL MSRs, used by primitive access functions */ #define ENERGY_STATUS_MASK 0xffffffff diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c index 2b81aabdb0db..8ad2115d65f6 100644 --- a/drivers/powercap/intel_rapl_msr.c +++ b/drivers/powercap/intel_rapl_msr.c @@ -24,6 +24,7 @@ #include <asm/cpu_device_id.h> #include <asm/intel-family.h> +#include <asm/msr.h> /* Local defines */ #define MSR_PLATFORM_POWER_LIMIT 0x0000065C @@ -103,7 +104,7 @@ static int rapl_cpu_down_prep(unsigned int cpu) static int rapl_msr_read_raw(int cpu, struct reg_action *ra) { - if (rdmsrl_safe_on_cpu(cpu, ra->reg.msr, &ra->value)) { + if (rdmsrq_safe_on_cpu(cpu, ra->reg.msr, &ra->value)) { pr_debug("failed to read msr 0x%x on cpu %d\n", ra->reg.msr, cpu); return -EIO; } @@ -116,14 +117,14 @@ static void rapl_msr_update_func(void *info) struct reg_action *ra = info; u64 val; - ra->err = rdmsrl_safe(ra->reg.msr, &val); + ra->err = rdmsrq_safe(ra->reg.msr, &val); if (ra->err) return; val &= ~ra->mask; val |= ra->value; - ra->err = wrmsrl_safe(ra->reg.msr, val); + ra->err = wrmsrq_safe(ra->reg.msr, val); } static int rapl_msr_write_raw(int cpu, struct reg_action *ra) diff --git a/drivers/pps/generators/pps_gen_tio.c b/drivers/pps/generators/pps_gen_tio.c index 1d5ffe055463..de00a85bfafa 100644 --- a/drivers/pps/generators/pps_gen_tio.c +++ b/drivers/pps/generators/pps_gen_tio.c @@ -230,7 +230,7 @@ static int pps_gen_tio_probe(struct platform_device *pdev) hrtimer_setup(&tio->timer, hrtimer_callback, CLOCK_REALTIME, HRTIMER_MODE_ABS); spin_lock_init(&tio->lock); - platform_set_drvdata(pdev, &tio); + platform_set_drvdata(pdev, tio); return 0; } diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index 7945c6be1f7c..e63481f24238 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -315,6 +315,8 @@ struct ptp_ocp_serial_port { #define OCP_BOARD_ID_LEN 13 #define OCP_SERIAL_LEN 6 #define OCP_SMA_NUM 4 +#define OCP_SIGNAL_NUM 4 +#define OCP_FREQ_NUM 4 enum { PORT_GNSS, @@ -342,8 +344,8 @@ struct ptp_ocp { struct dcf_master_reg __iomem *dcf_out; struct dcf_slave_reg __iomem *dcf_in; struct tod_reg __iomem *nmea_out; - struct frequency_reg __iomem *freq_in[4]; - struct ptp_ocp_ext_src *signal_out[4]; + struct frequency_reg __iomem *freq_in[OCP_FREQ_NUM]; + struct ptp_ocp_ext_src *signal_out[OCP_SIGNAL_NUM]; struct ptp_ocp_ext_src *pps; struct ptp_ocp_ext_src *ts0; struct ptp_ocp_ext_src *ts1; @@ -378,10 +380,12 @@ struct ptp_ocp { u32 utc_tai_offset; u32 ts_window_adjust; u64 fw_cap; - struct ptp_ocp_signal signal[4]; + struct ptp_ocp_signal signal[OCP_SIGNAL_NUM]; struct ptp_ocp_sma_connector sma[OCP_SMA_NUM]; const struct ocp_sma_op *sma_op; struct dpll_device *dpll; + int signals_nr; + int freq_in_nr; }; #define OCP_REQ_TIMESTAMP BIT(0) @@ -2067,6 +2071,7 @@ ptp_ocp_signal_set(struct ptp_ocp *bp, int gen, struct ptp_ocp_signal *s) if (!s->start) { /* roundup() does not work on 32-bit systems */ s->start = DIV64_U64_ROUND_UP(start_ns, s->period); + s->start *= s->period; s->start = ktime_add(s->start, s->phase); } @@ -2577,12 +2582,60 @@ static const struct ocp_sma_op ocp_fb_sma_op = { .set_output = ptp_ocp_sma_fb_set_output, }; +static int +ptp_ocp_sma_adva_set_output(struct ptp_ocp *bp, int sma_nr, u32 val) +{ + u32 reg, mask, shift; + unsigned long flags; + u32 __iomem *gpio; + + gpio = sma_nr > 2 ? &bp->sma_map1->gpio2 : &bp->sma_map2->gpio2; + shift = sma_nr & 1 ? 0 : 16; + + mask = 0xffff << (16 - shift); + + spin_lock_irqsave(&bp->lock, flags); + + reg = ioread32(gpio); + reg = (reg & mask) | (val << shift); + + iowrite32(reg, gpio); + + spin_unlock_irqrestore(&bp->lock, flags); + + return 0; +} + +static int +ptp_ocp_sma_adva_set_inputs(struct ptp_ocp *bp, int sma_nr, u32 val) +{ + u32 reg, mask, shift; + unsigned long flags; + u32 __iomem *gpio; + + gpio = sma_nr > 2 ? &bp->sma_map2->gpio1 : &bp->sma_map1->gpio1; + shift = sma_nr & 1 ? 0 : 16; + + mask = 0xffff << (16 - shift); + + spin_lock_irqsave(&bp->lock, flags); + + reg = ioread32(gpio); + reg = (reg & mask) | (val << shift); + + iowrite32(reg, gpio); + + spin_unlock_irqrestore(&bp->lock, flags); + + return 0; +} + static const struct ocp_sma_op ocp_adva_sma_op = { .tbl = { ptp_ocp_adva_sma_in, ptp_ocp_adva_sma_out }, .init = ptp_ocp_sma_fb_init, .get = ptp_ocp_sma_fb_get, - .set_inputs = ptp_ocp_sma_fb_set_inputs, - .set_output = ptp_ocp_sma_fb_set_output, + .set_inputs = ptp_ocp_sma_adva_set_inputs, + .set_output = ptp_ocp_sma_adva_set_output, }; static int @@ -2648,6 +2701,8 @@ ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r) bp->eeprom_map = fb_eeprom_map; bp->fw_version = ioread32(&bp->image->version); bp->sma_op = &ocp_fb_sma_op; + bp->signals_nr = 4; + bp->freq_in_nr = 4; ptp_ocp_fb_set_version(bp); @@ -2813,6 +2868,8 @@ ptp_ocp_art_board_init(struct ptp_ocp *bp, struct ocp_resource *r) bp->fw_version = ioread32(&bp->reg->version); bp->fw_tag = 2; bp->sma_op = &ocp_art_sma_op; + bp->signals_nr = 4; + bp->freq_in_nr = 4; /* Enable MAC serial port during initialisation */ iowrite32(1, &bp->board_config->mro50_serial_activate); @@ -2839,6 +2896,8 @@ ptp_ocp_adva_board_init(struct ptp_ocp *bp, struct ocp_resource *r) bp->flash_start = 0xA00000; bp->eeprom_map = fb_eeprom_map; bp->sma_op = &ocp_adva_sma_op; + bp->signals_nr = 2; + bp->freq_in_nr = 2; version = ioread32(&bp->image->version); /* if lower 16 bits are empty, this is the fw loader. */ @@ -3959,7 +4018,7 @@ _signal_summary_show(struct seq_file *s, struct ptp_ocp *bp, int nr) { struct signal_reg __iomem *reg = bp->signal_out[nr]->mem; struct ptp_ocp_signal *signal = &bp->signal[nr]; - char label[8]; + char label[16]; bool on; u32 val; @@ -3982,7 +4041,7 @@ static void _frequency_summary_show(struct seq_file *s, int nr, struct frequency_reg __iomem *reg) { - char label[8]; + char label[16]; bool on; u32 val; @@ -4126,11 +4185,11 @@ ptp_ocp_summary_show(struct seq_file *s, void *data) } if (bp->fw_cap & OCP_CAP_SIGNAL) - for (i = 0; i < 4; i++) + for (i = 0; i < bp->signals_nr; i++) _signal_summary_show(s, bp, i); if (bp->fw_cap & OCP_CAP_FREQ) - for (i = 0; i < 4; i++) + for (i = 0; i < bp->freq_in_nr; i++) _frequency_summary_show(s, i, bp->freq_in[i]); if (bp->irig_out) { diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index a40c511e0096..0387bd838487 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -322,7 +322,7 @@ static int __pwm_set_waveform(struct pwm_device *pwm, const struct pwm_ops *ops = chip->ops; char wfhw[WFHWSIZE]; struct pwm_waveform wf_rounded; - int err; + int err, ret_tohw; BUG_ON(WFHWSIZE < ops->sizeof_wfhw); @@ -332,16 +332,16 @@ static int __pwm_set_waveform(struct pwm_device *pwm, if (!pwm_wf_valid(wf)) return -EINVAL; - err = __pwm_round_waveform_tohw(chip, pwm, wf, &wfhw); - if (err) - return err; + ret_tohw = __pwm_round_waveform_tohw(chip, pwm, wf, &wfhw); + if (ret_tohw < 0) + return ret_tohw; if ((IS_ENABLED(CONFIG_PWM_DEBUG) || exact) && wf->period_length_ns) { err = __pwm_round_waveform_fromhw(chip, pwm, &wfhw, &wf_rounded); if (err) return err; - if (IS_ENABLED(CONFIG_PWM_DEBUG) && !pwm_check_rounding(wf, &wf_rounded)) + if (IS_ENABLED(CONFIG_PWM_DEBUG) && ret_tohw == 0 && !pwm_check_rounding(wf, &wf_rounded)) dev_err(&chip->dev, "Wrong rounding: requested %llu/%llu [+%llu], result %llu/%llu [+%llu]\n", wf->duty_length_ns, wf->period_length_ns, wf->duty_offset_ns, wf_rounded.duty_length_ns, wf_rounded.period_length_ns, wf_rounded.duty_offset_ns); @@ -382,7 +382,8 @@ static int __pwm_set_waveform(struct pwm_device *pwm, wf_rounded.duty_length_ns, wf_rounded.period_length_ns, wf_rounded.duty_offset_ns, wf_set.duty_length_ns, wf_set.period_length_ns, wf_set.duty_offset_ns); } - return 0; + + return ret_tohw; } /** diff --git a/drivers/pwm/pwm-axi-pwmgen.c b/drivers/pwm/pwm-axi-pwmgen.c index 4259a0db9ff4..4337c8f5acf0 100644 --- a/drivers/pwm/pwm-axi-pwmgen.c +++ b/drivers/pwm/pwm-axi-pwmgen.c @@ -75,6 +75,7 @@ static int axi_pwmgen_round_waveform_tohw(struct pwm_chip *chip, { struct axi_pwmgen_waveform *wfhw = _wfhw; struct axi_pwmgen_ddata *ddata = axi_pwmgen_ddata_from_chip(chip); + int ret = 0; if (wf->period_length_ns == 0) { *wfhw = (struct axi_pwmgen_waveform){ @@ -91,12 +92,15 @@ static int axi_pwmgen_round_waveform_tohw(struct pwm_chip *chip, if (wfhw->period_cnt == 0) { /* * The specified period is too short for the hardware. - * Let's round .duty_cycle down to 0 to get a (somewhat) - * valid result. + * So round up .period_cnt to 1 (i.e. the smallest + * possible period). With .duty_cycle and .duty_offset + * being less than or equal to .period, their rounded + * value must be 0. */ wfhw->period_cnt = 1; wfhw->duty_cycle_cnt = 0; wfhw->duty_offset_cnt = 0; + ret = 1; } else { wfhw->duty_cycle_cnt = min_t(u64, mul_u64_u32_div(wf->duty_length_ns, ddata->clk_rate_hz, NSEC_PER_SEC), @@ -111,7 +115,7 @@ static int axi_pwmgen_round_waveform_tohw(struct pwm_chip *chip, pwm->hwpwm, wf->duty_length_ns, wf->period_length_ns, wf->duty_offset_ns, ddata->clk_rate_hz, wfhw->period_cnt, wfhw->duty_cycle_cnt, wfhw->duty_offset_cnt); - return 0; + return ret; } static int axi_pwmgen_round_waveform_fromhw(struct pwm_chip *chip, struct pwm_device *pwm, diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c index 2510c10ca473..c45a5fca4cbb 100644 --- a/drivers/pwm/pwm-fsl-ftm.c +++ b/drivers/pwm/pwm-fsl-ftm.c @@ -118,6 +118,9 @@ static unsigned int fsl_pwm_ticks_to_ns(struct fsl_pwm_chip *fpc, unsigned long long exval; rate = clk_get_rate(fpc->clk[fpc->period.clk_select]); + if (rate >> fpc->period.clk_ps == 0) + return 0; + exval = ticks; exval *= 1000000000UL; do_div(exval, rate >> fpc->period.clk_ps); @@ -190,6 +193,9 @@ static unsigned int fsl_pwm_calculate_duty(struct fsl_pwm_chip *fpc, unsigned int period = fpc->period.mod_period + 1; unsigned int period_ns = fsl_pwm_ticks_to_ns(fpc, period); + if (!period_ns) + return 0; + duty = (unsigned long long)duty_ns * period; do_div(duty, period_ns); diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c index 01dfa0fab80a..7eaab5831499 100644 --- a/drivers/pwm/pwm-mediatek.c +++ b/drivers/pwm/pwm-mediatek.c @@ -121,21 +121,25 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip); u32 clkdiv = 0, cnt_period, cnt_duty, reg_width = PWMDWIDTH, reg_thres = PWMTHRES; + unsigned long clk_rate; u64 resolution; int ret; ret = pwm_mediatek_clk_enable(chip, pwm); - if (ret < 0) return ret; + clk_rate = clk_get_rate(pc->clk_pwms[pwm->hwpwm]); + if (!clk_rate) + return -EINVAL; + /* Make sure we use the bus clock and not the 26MHz clock */ if (pc->soc->has_ck_26m_sel) writel(0, pc->regs + PWM_CK_26M_SEL); /* Using resolution in picosecond gets accuracy higher */ resolution = (u64)NSEC_PER_SEC * 1000; - do_div(resolution, clk_get_rate(pc->clk_pwms[pwm->hwpwm])); + do_div(resolution, clk_rate); cnt_period = DIV_ROUND_CLOSEST_ULL((u64)period_ns * 1000, resolution); while (cnt_period > 8191) { diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c index 2261789cc27d..578dbdd2d5a7 100644 --- a/drivers/pwm/pwm-rcar.c +++ b/drivers/pwm/pwm-rcar.c @@ -8,6 +8,7 @@ * - The hardware cannot generate a 0% duty cycle. */ +#include <linux/bitfield.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> @@ -102,23 +103,24 @@ static void rcar_pwm_set_clock_control(struct rcar_pwm_chip *rp, rcar_pwm_write(rp, value, RCAR_PWMCR); } -static int rcar_pwm_set_counter(struct rcar_pwm_chip *rp, int div, int duty_ns, - int period_ns) +static int rcar_pwm_set_counter(struct rcar_pwm_chip *rp, int div, u64 duty_ns, + u64 period_ns) { - unsigned long long one_cycle, tmp; /* 0.01 nanoseconds */ + unsigned long long tmp; unsigned long clk_rate = clk_get_rate(rp->clk); u32 cyc, ph; - one_cycle = NSEC_PER_SEC * 100ULL << div; - do_div(one_cycle, clk_rate); + /* div <= 24 == RCAR_PWM_MAX_DIVISION, so the shift doesn't overflow. */ + tmp = mul_u64_u64_div_u64(period_ns, clk_rate, (u64)NSEC_PER_SEC << div); + if (tmp > FIELD_MAX(RCAR_PWMCNT_CYC0_MASK)) + tmp = FIELD_MAX(RCAR_PWMCNT_CYC0_MASK); - tmp = period_ns * 100ULL; - do_div(tmp, one_cycle); - cyc = (tmp << RCAR_PWMCNT_CYC0_SHIFT) & RCAR_PWMCNT_CYC0_MASK; + cyc = FIELD_PREP(RCAR_PWMCNT_CYC0_MASK, tmp); - tmp = duty_ns * 100ULL; - do_div(tmp, one_cycle); - ph = tmp & RCAR_PWMCNT_PH0_MASK; + tmp = mul_u64_u64_div_u64(duty_ns, clk_rate, (u64)NSEC_PER_SEC << div); + if (tmp > FIELD_MAX(RCAR_PWMCNT_PH0_MASK)) + tmp = FIELD_MAX(RCAR_PWMCNT_PH0_MASK); + ph = FIELD_PREP(RCAR_PWMCNT_PH0_MASK, tmp); /* Avoid prohibited setting */ if (cyc == 0 || ph == 0) diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c index a59de4de18b6..ec2c05c9ee7a 100644 --- a/drivers/pwm/pwm-stm32.c +++ b/drivers/pwm/pwm-stm32.c @@ -103,22 +103,16 @@ static int stm32_pwm_round_waveform_tohw(struct pwm_chip *chip, if (ret) goto out; - /* - * calculate the best value for ARR for the given PSC, refuse if - * the resulting period gets bigger than the requested one. - */ arr = mul_u64_u64_div_u64(wf->period_length_ns, rate, (u64)NSEC_PER_SEC * (wfhw->psc + 1)); if (arr <= wfhw->arr) { /* - * requested period is small than the currently + * requested period is smaller than the currently * configured and unchangable period, report back the smallest - * possible period, i.e. the current state; Initialize - * ccr to anything valid. + * possible period, i.e. the current state and return 1 + * to indicate the wrong rounding direction. */ - wfhw->ccr = 0; ret = 1; - goto out; } } else { diff --git a/drivers/ras/amd/atl/internal.h b/drivers/ras/amd/atl/internal.h index f9be26d25348..2b6279d32774 100644 --- a/drivers/ras/amd/atl/internal.h +++ b/drivers/ras/amd/atl/internal.h @@ -17,8 +17,8 @@ #include <linux/bitops.h> #include <linux/ras.h> -#include <asm/amd_nb.h> -#include <asm/amd_node.h> +#include <asm/amd/nb.h> +#include <asm/amd/node.h> #include "reg_fields.h" @@ -362,4 +362,7 @@ static inline void atl_debug_on_bad_intlv_mode(struct addr_ctx *ctx) atl_debug(ctx, "Unrecognized interleave mode: %u", ctx->map.intlv_mode); } +#define MI300_UMC_MCA_COL GENMASK(5, 1) +#define MI300_UMC_MCA_ROW13 BIT(23) + #endif /* __AMD_ATL_INTERNAL_H__ */ diff --git a/drivers/ras/amd/atl/umc.c b/drivers/ras/amd/atl/umc.c index dc8aa12f63c8..6e072b7667e9 100644 --- a/drivers/ras/amd/atl/umc.c +++ b/drivers/ras/amd/atl/umc.c @@ -229,7 +229,6 @@ int get_umc_info_mi300(void) * Additionally, the PC and Bank bits may be hashed. This must be accounted for before * reconstructing the normalized address. */ -#define MI300_UMC_MCA_COL GENMASK(5, 1) #define MI300_UMC_MCA_BANK GENMASK(9, 6) #define MI300_UMC_MCA_ROW GENMASK(24, 10) #define MI300_UMC_MCA_PC BIT(25) @@ -320,7 +319,7 @@ static unsigned long convert_dram_to_norm_addr_mi300(unsigned long addr) * See amd_atl::convert_dram_to_norm_addr_mi300() for MI300 address formats. */ #define MI300_NUM_COL BIT(HWEIGHT(MI300_UMC_MCA_COL)) -static void retire_row_mi300(struct atl_err *a_err) +static void _retire_row_mi300(struct atl_err *a_err) { unsigned long addr; struct page *p; @@ -351,6 +350,22 @@ static void retire_row_mi300(struct atl_err *a_err) } } +/* + * In addition to the column bits, the row[13] bit should also be included when + * calculating addresses affected by a physical row. + * + * Instead of running through another loop over a single bit, just run through + * the column bits twice and flip the row[13] bit in-between. + * + * See MI300_UMC_MCA_ROW for the row bits in MCA_ADDR_UMC value. + */ +static void retire_row_mi300(struct atl_err *a_err) +{ + _retire_row_mi300(a_err); + a_err->addr ^= MI300_UMC_MCA_ROW13; + _retire_row_mi300(a_err); +} + void amd_retire_dram_row(struct atl_err *a_err) { if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous) diff --git a/drivers/ras/amd/fmpm.c b/drivers/ras/amd/fmpm.c index 90de737fbc90..8877c6ff64c4 100644 --- a/drivers/ras/amd/fmpm.c +++ b/drivers/ras/amd/fmpm.c @@ -250,6 +250,13 @@ static bool rec_has_valid_entries(struct fru_rec *rec) return true; } +/* + * Row retirement is done on MI300 systems, and some bits are 'don't + * care' for comparing addresses with unique physical rows. This + * includes all column bits and the row[13] bit. + */ +#define MASK_ADDR(addr) ((addr) & ~(MI300_UMC_MCA_ROW13 | MI300_UMC_MCA_COL)) + static bool fpds_equal(struct cper_fru_poison_desc *old, struct cper_fru_poison_desc *new) { /* @@ -258,7 +265,7 @@ static bool fpds_equal(struct cper_fru_poison_desc *old, struct cper_fru_poison_ * * Also, order the checks from most->least likely to fail to shortcut the code. */ - if (old->addr != new->addr) + if (MASK_ADDR(old->addr) != MASK_ADDR(new->addr)) return false; if (old->hw_id != new->hw_id) diff --git a/drivers/regulator/max20086-regulator.c b/drivers/regulator/max20086-regulator.c index 59eb23d467ec..198d45f8e884 100644 --- a/drivers/regulator/max20086-regulator.c +++ b/drivers/regulator/max20086-regulator.c @@ -132,7 +132,7 @@ static int max20086_regulators_register(struct max20086 *chip) static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on) { - struct of_regulator_match matches[MAX20086_MAX_REGULATORS] = { }; + struct of_regulator_match *matches; struct device_node *node; unsigned int i; int ret; @@ -143,6 +143,11 @@ static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on) return -ENODEV; } + matches = devm_kcalloc(chip->dev, chip->info->num_outputs, + sizeof(*matches), GFP_KERNEL); + if (!matches) + return -ENOMEM; + for (i = 0; i < chip->info->num_outputs; ++i) matches[i].name = max20086_output_names[i]; diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c index 775b056d795a..2c7e519a2254 100644 --- a/drivers/remoteproc/qcom_wcnss.c +++ b/drivers/remoteproc/qcom_wcnss.c @@ -456,7 +456,8 @@ static int wcnss_init_regulators(struct qcom_wcnss *wcnss, if (wcnss->num_pds) { info += wcnss->num_pds; /* Handle single power domain case */ - num_vregs += num_pd_vregs - wcnss->num_pds; + if (wcnss->num_pds < num_pd_vregs) + num_vregs += num_pd_vregs - wcnss->num_pds; } else { num_vregs += num_pd_vregs; } diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig index 4bfe469c04aa..8c1c908d2c6e 100644 --- a/drivers/s390/block/Kconfig +++ b/drivers/s390/block/Kconfig @@ -5,7 +5,7 @@ comment "S/390 block device drivers" config DCSSBLK def_tristate m prompt "DCSSBLK support" - depends on S390 && BLOCK + depends on S390 && BLOCK && (DAX || DAX=n) help Support for dcss block device @@ -14,7 +14,6 @@ config DCSSBLK_DAX depends on DCSSBLK # requires S390 ZONE_DEVICE support depends on BROKEN - select DAX prompt "DCSSBLK DAX support" help Enable DAX operation for the dcss block device diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 7248e547fefb..cdc7b2f16b88 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -314,7 +314,7 @@ dcssblk_load_segment(char *name, struct segment_info **seg_info) if (*seg_info == NULL) return -ENOMEM; - strcpy((*seg_info)->segment_name, name); + strscpy((*seg_info)->segment_name, name); /* load the segment */ rc = segment_load(name, SEGMENT_SHARED, @@ -612,7 +612,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char rc = -ENOMEM; goto out; } - strcpy(dev_info->segment_name, local_buf); + strscpy(dev_info->segment_name, local_buf); dev_info->segment_type = seg_info->segment_type; INIT_LIST_HEAD(&dev_info->seg_list); } diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index 34f3820d7f74..8402a0042c0d 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c @@ -102,6 +102,7 @@ struct tty3270 { /* Input stuff. */ char *prompt; /* Output string for input area. */ + size_t prompt_sz; /* Size of output string. */ char *input; /* Input string for read request. */ struct raw3270_request *read; /* Single read request. */ struct raw3270_request *kreset; /* Single keyboard reset request. */ @@ -206,7 +207,7 @@ static int tty3270_input_size(int cols) static void tty3270_update_prompt(struct tty3270 *tp, char *input) { - strcpy(tp->prompt, input); + strscpy(tp->prompt, input, tp->prompt_sz); tp->update_flags |= TTY_UPDATE_INPUT; tty3270_set_timer(tp, 1); } @@ -971,6 +972,7 @@ static void tty3270_resize(struct raw3270_view *view, char *old_input, *new_input; struct tty_struct *tty; struct winsize ws; + size_t prompt_sz; int new_allocated, old_allocated = tp->allocated_lines; if (old_model == new_model && @@ -982,10 +984,11 @@ static void tty3270_resize(struct raw3270_view *view, return; } - new_input = kzalloc(tty3270_input_size(new_cols), GFP_KERNEL | GFP_DMA); + prompt_sz = tty3270_input_size(new_cols); + new_input = kzalloc(prompt_sz, GFP_KERNEL | GFP_DMA); if (!new_input) return; - new_prompt = kzalloc(tty3270_input_size(new_cols), GFP_KERNEL); + new_prompt = kzalloc(prompt_sz, GFP_KERNEL); if (!new_prompt) goto out_input; screen = tty3270_alloc_screen(tp, new_rows, new_cols, &new_allocated); @@ -1010,6 +1013,7 @@ static void tty3270_resize(struct raw3270_view *view, old_rcl_lines = tp->rcl_lines; tp->input = new_input; tp->prompt = new_prompt; + tp->prompt_sz = prompt_sz; tp->rcl_lines = new_rcl_lines; tp->rcl_read_index = 0; tp->rcl_write_index = 0; @@ -1096,6 +1100,7 @@ static int tty3270_create_view(int index, struct tty3270 **newtp) { struct tty3270 *tp; + size_t prompt_sz; int rc; if (tty3270_max_index < index + 1) @@ -1125,17 +1130,19 @@ tty3270_create_view(int index, struct tty3270 **newtp) goto out_free_screen; } - tp->input = kzalloc(tty3270_input_size(tp->view.cols), GFP_KERNEL | GFP_DMA); + prompt_sz = tty3270_input_size(tp->view.cols); + tp->input = kzalloc(prompt_sz, GFP_KERNEL | GFP_DMA); if (!tp->input) { rc = -ENOMEM; goto out_free_converted_line; } - tp->prompt = kzalloc(tty3270_input_size(tp->view.cols), GFP_KERNEL); + tp->prompt = kzalloc(prompt_sz, GFP_KERNEL); if (!tp->prompt) { rc = -ENOMEM; goto out_free_input; } + tp->prompt_sz = prompt_sz; tp->rcl_lines = tty3270_alloc_recall(tp->view.cols); if (!tp->rcl_lines) { diff --git a/drivers/s390/char/diag_ftp.c b/drivers/s390/char/diag_ftp.c index 711f6982438e..f41b39c9d267 100644 --- a/drivers/s390/char/diag_ftp.c +++ b/drivers/s390/char/diag_ftp.c @@ -159,7 +159,7 @@ ssize_t diag_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize) goto out; } - len = strscpy(ldfpl->fident, ftp->fname, sizeof(ldfpl->fident)); + len = strscpy(ldfpl->fident, ftp->fname); if (len < 0) { len = -EINVAL; goto out_free; diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 1564cd7e3f59..288734cd8f4b 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -41,6 +41,7 @@ #include <linux/module.h> #include <asm/uv.h> #include <asm/chsc.h> +#include <linux/mempool.h> #include "ap_bus.h" #include "ap_debug.h" @@ -103,6 +104,27 @@ static struct ap_config_info *const ap_qci_info_old = &qci[1]; debug_info_t *ap_dbf_info; /* + * There is a need for a do-not-allocate-memory path through the AP bus + * layer. The pkey layer may be triggered via the in-kernel interface from + * a protected key crypto algorithm (namely PAES) to convert a secure key + * into a protected key. This happens in a workqueue context, so sleeping + * is allowed but memory allocations causing IO operations are not permitted. + * To accomplish this, an AP message memory pool with pre-allocated space + * is established. When ap_init_apmsg() with use_mempool set to true is + * called, instead of kmalloc() the ap message buffer is allocated from + * the ap_msg_pool. This pool only holds a limited amount of buffers: + * ap_msg_pool_min_items with the item size AP_DEFAULT_MAX_MSG_SIZE and + * exactly one of these items (if available) is returned if ap_init_apmsg() + * with the use_mempool arg set to true is called. When this pool is exhausted + * and use_mempool is set true, ap_init_apmsg() returns -ENOMEM without + * any attempt to allocate memory and the caller has to deal with that. + */ +static mempool_t *ap_msg_pool; +static unsigned int ap_msg_pool_min_items = 8; +module_param_named(msgpool_min_items, ap_msg_pool_min_items, uint, 0440); +MODULE_PARM_DESC(msgpool_min_items, "AP message pool minimal items"); + +/* * AP bus rescan related things. */ static bool ap_scan_bus(void); @@ -547,6 +569,48 @@ static void ap_poll_thread_stop(void) #define is_card_dev(x) ((x)->parent == ap_root_device) #define is_queue_dev(x) ((x)->parent != ap_root_device) +/* + * ap_init_apmsg() - Initialize ap_message. + */ +int ap_init_apmsg(struct ap_message *ap_msg, u32 flags) +{ + unsigned int maxmsgsize; + + memset(ap_msg, 0, sizeof(*ap_msg)); + ap_msg->flags = flags; + + if (flags & AP_MSG_FLAG_MEMPOOL) { + ap_msg->msg = mempool_alloc_preallocated(ap_msg_pool); + if (!ap_msg->msg) + return -ENOMEM; + ap_msg->bufsize = AP_DEFAULT_MAX_MSG_SIZE; + return 0; + } + + maxmsgsize = atomic_read(&ap_max_msg_size); + ap_msg->msg = kmalloc(maxmsgsize, GFP_KERNEL); + if (!ap_msg->msg) + return -ENOMEM; + ap_msg->bufsize = maxmsgsize; + + return 0; +} +EXPORT_SYMBOL(ap_init_apmsg); + +/* + * ap_release_apmsg() - Release ap_message. + */ +void ap_release_apmsg(struct ap_message *ap_msg) +{ + if (ap_msg->flags & AP_MSG_FLAG_MEMPOOL) { + memzero_explicit(ap_msg->msg, ap_msg->bufsize); + mempool_free(ap_msg->msg, ap_msg_pool); + } else { + kfree_sensitive(ap_msg->msg); + } +} +EXPORT_SYMBOL(ap_release_apmsg); + /** * ap_bus_match() * @dev: Pointer to device @@ -2431,6 +2495,14 @@ static int __init ap_module_init(void) /* init ap_queue hashtable */ hash_init(ap_queues); + /* create ap msg buffer memory pool */ + ap_msg_pool = mempool_create_kmalloc_pool(ap_msg_pool_min_items, + AP_DEFAULT_MAX_MSG_SIZE); + if (!ap_msg_pool) { + rc = -ENOMEM; + goto out; + } + /* set up the AP permissions (ioctls, ap and aq masks) */ ap_perms_init(); @@ -2477,6 +2549,7 @@ out_device: out_bus: bus_unregister(&ap_bus_type); out: + mempool_destroy(ap_msg_pool); ap_debug_exit(); return rc; } @@ -2487,6 +2560,7 @@ static void __exit ap_module_exit(void) ap_irq_exit(); root_device_unregister(ap_root_device); bus_unregister(&ap_bus_type); + mempool_destroy(ap_msg_pool); ap_debug_exit(); } diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index f4622ee4d894..88b625ba1978 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -214,6 +214,11 @@ struct ap_queue { typedef enum ap_sm_wait (ap_func_t)(struct ap_queue *queue); +struct ap_response_type { + struct completion work; + int type; +}; + struct ap_message { struct list_head list; /* Request queueing. */ unsigned long psmid; /* Message id. */ @@ -222,7 +227,7 @@ struct ap_message { size_t bufsize; /* allocated msg buffer size */ u16 flags; /* Flags, see AP_MSG_FLAG_xxx */ int rc; /* Return code for this message */ - void *private; /* ap driver private pointer. */ + struct ap_response_type response; /* receive is called from tasklet context */ void (*receive)(struct ap_queue *, struct ap_message *, struct ap_message *); @@ -231,27 +236,10 @@ struct ap_message { #define AP_MSG_FLAG_SPECIAL 0x0001 /* flag msg as 'special' with NQAP */ #define AP_MSG_FLAG_USAGE 0x0002 /* CCA, EP11: usage (no admin) msg */ #define AP_MSG_FLAG_ADMIN 0x0004 /* CCA, EP11: admin (=control) msg */ +#define AP_MSG_FLAG_MEMPOOL 0x0008 /* ap msg buffer allocated via mempool */ -/** - * ap_init_message() - Initialize ap_message. - * Initialize a message before using. Otherwise this might result in - * unexpected behaviour. - */ -static inline void ap_init_message(struct ap_message *ap_msg) -{ - memset(ap_msg, 0, sizeof(*ap_msg)); -} - -/** - * ap_release_message() - Release ap_message. - * Releases all memory used internal within the ap_message struct - * Currently this is the message and private field. - */ -static inline void ap_release_message(struct ap_message *ap_msg) -{ - kfree_sensitive(ap_msg->msg); - kfree_sensitive(ap_msg->private); -} +int ap_init_apmsg(struct ap_message *ap_msg, u32 flags); +void ap_release_apmsg(struct ap_message *ap_msg); enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event); enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event); diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 3a39e167bdbf..cef60770f68b 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -24,7 +24,8 @@ */ static int key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, size_t keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags) { int rc; @@ -32,14 +33,14 @@ static int key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, rc = pkey_handler_key_to_protkey(apqns, nr_apqns, key, keylen, protkey, protkeylen, - protkeytype); + protkeytype, xflags); /* if this did not work, try the slowpath way */ if (rc == -ENODEV) { rc = pkey_handler_slowpath_key_to_protkey(apqns, nr_apqns, key, keylen, protkey, protkeylen, - protkeytype); + protkeytype, xflags); if (rc) rc = -ENODEV; } @@ -52,16 +53,16 @@ static int key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, * In-Kernel function: Transform a key blob (of any type) into a protected key */ int pkey_key2protkey(const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + u8 *protkey, u32 *protkeylen, u32 *protkeytype, u32 xflags) { int rc; rc = key2protkey(NULL, 0, key, keylen, - protkey, protkeylen, protkeytype); + protkey, protkeylen, protkeytype, xflags); if (rc == -ENODEV) { pkey_handler_request_modules(); rc = key2protkey(NULL, 0, key, keylen, - protkey, protkeylen, protkeytype); + protkey, protkeylen, protkeytype, xflags); } return rc; @@ -103,7 +104,7 @@ static int pkey_ioctl_genseck(struct pkey_genseck __user *ugs) keybuflen = sizeof(kgs.seckey.seckey); rc = pkey_handler_gen_key(&apqn, 1, kgs.keytype, PKEY_TYPE_CCA_DATA, 0, 0, - kgs.seckey.seckey, &keybuflen, NULL); + kgs.seckey.seckey, &keybuflen, NULL, 0); pr_debug("gen_key()=%d\n", rc); if (!rc && copy_to_user(ugs, &kgs, sizeof(kgs))) rc = -EFAULT; @@ -129,7 +130,7 @@ static int pkey_ioctl_clr2seck(struct pkey_clr2seck __user *ucs) kcs.keytype, PKEY_TYPE_CCA_DATA, 0, 0, kcs.clrkey.clrkey, pkey_keytype_aes_to_size(kcs.keytype), - kcs.seckey.seckey, &keybuflen, NULL); + kcs.seckey.seckey, &keybuflen, NULL, 0); pr_debug("clr_to_key()=%d\n", rc); if (!rc && copy_to_user(ucs, &kcs, sizeof(kcs))) rc = -EFAULT; @@ -154,7 +155,8 @@ static int pkey_ioctl_sec2protk(struct pkey_sec2protk __user *usp) ksp.seckey.seckey, sizeof(ksp.seckey.seckey), ksp.protkey.protkey, - &ksp.protkey.len, &ksp.protkey.type); + &ksp.protkey.len, &ksp.protkey.type, + 0); pr_debug("key_to_protkey()=%d\n", rc); if (!rc && copy_to_user(usp, &ksp, sizeof(ksp))) rc = -EFAULT; @@ -198,7 +200,7 @@ static int pkey_ioctl_clr2protk(struct pkey_clr2protk __user *ucp) rc = key2protkey(NULL, 0, tmpbuf, sizeof(*t) + keylen, kcp.protkey.protkey, - &kcp.protkey.len, &kcp.protkey.type); + &kcp.protkey.len, &kcp.protkey.type, 0); pr_debug("key2protkey()=%d\n", rc); kfree_sensitive(tmpbuf); @@ -228,12 +230,12 @@ static int pkey_ioctl_findcard(struct pkey_findcard __user *ufc) rc = pkey_handler_apqns_for_key(kfc.seckey.seckey, sizeof(kfc.seckey.seckey), PKEY_FLAGS_MATCH_CUR_MKVP, - apqns, &nr_apqns); + apqns, &nr_apqns, 0); if (rc == -ENODEV) rc = pkey_handler_apqns_for_key(kfc.seckey.seckey, sizeof(kfc.seckey.seckey), PKEY_FLAGS_MATCH_ALT_MKVP, - apqns, &nr_apqns); + apqns, &nr_apqns, 0); pr_debug("apqns_for_key()=%d\n", rc); if (rc) { kfree(apqns); @@ -262,7 +264,7 @@ static int pkey_ioctl_skey2pkey(struct pkey_skey2pkey __user *usp) sizeof(ksp.seckey.seckey), ksp.protkey.protkey, &ksp.protkey.len, - &ksp.protkey.type); + &ksp.protkey.type, 0); pr_debug("key_to_protkey()=%d\n", rc); if (!rc && copy_to_user(usp, &ksp, sizeof(ksp))) rc = -EFAULT; @@ -285,7 +287,7 @@ static int pkey_ioctl_verifykey(struct pkey_verifykey __user *uvk) rc = pkey_handler_verify_key(kvk.seckey.seckey, sizeof(kvk.seckey.seckey), &kvk.cardnr, &kvk.domain, - &keytype, &keybitsize, &flags); + &keytype, &keybitsize, &flags, 0); pr_debug("verify_key()=%d\n", rc); if (!rc && keytype != PKEY_TYPE_CCA_DATA) rc = -EINVAL; @@ -312,7 +314,7 @@ static int pkey_ioctl_genprotk(struct pkey_genprotk __user *ugp) rc = pkey_handler_gen_key(NULL, 0, kgp.keytype, PKEY_TYPE_PROTKEY, 0, 0, kgp.protkey.protkey, &kgp.protkey.len, - &kgp.protkey.type); + &kgp.protkey.type, 0); pr_debug("gen_key()=%d\n", rc); if (!rc && copy_to_user(ugp, &kgp, sizeof(kgp))) rc = -EFAULT; @@ -354,7 +356,7 @@ static int pkey_ioctl_verifyprotk(struct pkey_verifyprotk __user *uvp) memcpy(t->protkey, kvp.protkey.protkey, kvp.protkey.len); rc = pkey_handler_verify_key(tmpbuf, sizeof(*t), - NULL, NULL, NULL, NULL, NULL); + NULL, NULL, NULL, NULL, NULL, 0); pr_debug("verify_key()=%d\n", rc); kfree_sensitive(tmpbuf); @@ -377,7 +379,7 @@ static int pkey_ioctl_kblob2protk(struct pkey_kblob2pkey __user *utp) ktp.protkey.len = sizeof(ktp.protkey.protkey); rc = key2protkey(NULL, 0, kkey, ktp.keylen, ktp.protkey.protkey, &ktp.protkey.len, - &ktp.protkey.type); + &ktp.protkey.type, 0); pr_debug("key2protkey()=%d\n", rc); kfree_sensitive(kkey); if (!rc && copy_to_user(utp, &ktp, sizeof(ktp))) @@ -414,7 +416,7 @@ static int pkey_ioctl_genseck2(struct pkey_genseck2 __user *ugs) } rc = pkey_handler_gen_key(apqns, kgs.apqn_entries, u, kgs.type, kgs.size, kgs.keygenflags, - kkey, &klen, NULL); + kkey, &klen, NULL, 0); pr_debug("gen_key()=%d\n", rc); kfree(apqns); if (rc) { @@ -471,7 +473,7 @@ static int pkey_ioctl_clr2seck2(struct pkey_clr2seck2 __user *ucs) rc = pkey_handler_clr_to_key(apqns, kcs.apqn_entries, u, kcs.type, kcs.size, kcs.keygenflags, kcs.clrkey.clrkey, kcs.size / 8, - kkey, &klen, NULL); + kkey, &klen, NULL, 0); pr_debug("clr_to_key()=%d\n", rc); kfree(apqns); if (rc) { @@ -514,7 +516,7 @@ static int pkey_ioctl_verifykey2(struct pkey_verifykey2 __user *uvk) rc = pkey_handler_verify_key(kkey, kvk.keylen, &kvk.cardnr, &kvk.domain, - &kvk.type, &kvk.size, &kvk.flags); + &kvk.type, &kvk.size, &kvk.flags, 0); pr_debug("verify_key()=%d\n", rc); kfree_sensitive(kkey); @@ -544,7 +546,7 @@ static int pkey_ioctl_kblob2protk2(struct pkey_kblob2pkey2 __user *utp) ktp.protkey.len = sizeof(ktp.protkey.protkey); rc = key2protkey(apqns, ktp.apqn_entries, kkey, ktp.keylen, ktp.protkey.protkey, &ktp.protkey.len, - &ktp.protkey.type); + &ktp.protkey.type, 0); pr_debug("key2protkey()=%d\n", rc); kfree(apqns); kfree_sensitive(kkey); @@ -579,7 +581,7 @@ static int pkey_ioctl_apqns4k(struct pkey_apqns4key __user *uak) return PTR_ERR(kkey); } rc = pkey_handler_apqns_for_key(kkey, kak.keylen, kak.flags, - apqns, &nr_apqns); + apqns, &nr_apqns, 0); pr_debug("apqns_for_key()=%d\n", rc); kfree_sensitive(kkey); if (rc && rc != -ENOSPC) { @@ -626,7 +628,7 @@ static int pkey_ioctl_apqns4kt(struct pkey_apqns4keytype __user *uat) } rc = pkey_handler_apqns_for_keytype(kat.type, kat.cur_mkvp, kat.alt_mkvp, - kat.flags, apqns, &nr_apqns); + kat.flags, apqns, &nr_apqns, 0); pr_debug("apqns_for_keytype()=%d\n", rc); if (rc && rc != -ENOSPC) { kfree(apqns); @@ -678,7 +680,7 @@ static int pkey_ioctl_kblob2protk3(struct pkey_kblob2pkey3 __user *utp) return -ENOMEM; } rc = key2protkey(apqns, ktp.apqn_entries, kkey, ktp.keylen, - protkey, &protkeylen, &ktp.pkeytype); + protkey, &protkeylen, &ktp.pkeytype, 0); pr_debug("key2protkey()=%d\n", rc); kfree(apqns); kfree_sensitive(kkey); diff --git a/drivers/s390/crypto/pkey_base.c b/drivers/s390/crypto/pkey_base.c index 64a376501d26..9e6f319acc63 100644 --- a/drivers/s390/crypto/pkey_base.c +++ b/drivers/s390/crypto/pkey_base.c @@ -150,7 +150,8 @@ EXPORT_SYMBOL(pkey_handler_put); int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags) { const struct pkey_handler *h; int rc = -ENODEV; @@ -159,7 +160,7 @@ int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns, if (h && h->key_to_protkey) { rc = h->key_to_protkey(apqns, nr_apqns, key, keylen, protkey, protkeylen, - protkeytype); + protkeytype, xflags); } pkey_handler_put(h); @@ -177,7 +178,7 @@ int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, u8 *protkey, u32 *protkeylen, - u32 *protkeytype) + u32 *protkeytype, u32 xflags) { const struct pkey_handler *h, *htmp[10]; int i, n = 0, rc = -ENODEV; @@ -199,7 +200,7 @@ int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns, rc = h->slowpath_key_to_protkey(apqns, nr_apqns, key, keylen, protkey, protkeylen, - protkeytype); + protkeytype, xflags); module_put(h->module); } @@ -210,7 +211,7 @@ EXPORT_SYMBOL(pkey_handler_slowpath_key_to_protkey); int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 keysubtype, u32 keybitsize, u32 flags, - u8 *keybuf, u32 *keybuflen, u32 *keyinfo) + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags) { const struct pkey_handler *h; int rc = -ENODEV; @@ -219,7 +220,7 @@ int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, if (h && h->gen_key) { rc = h->gen_key(apqns, nr_apqns, keytype, keysubtype, keybitsize, flags, - keybuf, keybuflen, keyinfo); + keybuf, keybuflen, keyinfo, xflags); } pkey_handler_put(h); @@ -231,7 +232,8 @@ int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 keysubtype, u32 keybitsize, u32 flags, const u8 *clrkey, u32 clrkeylen, - u8 *keybuf, u32 *keybuflen, u32 *keyinfo) + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, + u32 xflags) { const struct pkey_handler *h; int rc = -ENODEV; @@ -240,7 +242,7 @@ int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns, if (h && h->clr_to_key) { rc = h->clr_to_key(apqns, nr_apqns, keytype, keysubtype, keybitsize, flags, clrkey, clrkeylen, - keybuf, keybuflen, keyinfo); + keybuf, keybuflen, keyinfo, xflags); } pkey_handler_put(h); @@ -250,7 +252,8 @@ EXPORT_SYMBOL(pkey_handler_clr_to_key); int pkey_handler_verify_key(const u8 *key, u32 keylen, u16 *card, u16 *dom, - u32 *keytype, u32 *keybitsize, u32 *flags) + u32 *keytype, u32 *keybitsize, u32 *flags, + u32 xflags) { const struct pkey_handler *h; int rc = -ENODEV; @@ -258,7 +261,7 @@ int pkey_handler_verify_key(const u8 *key, u32 keylen, h = pkey_handler_get_keybased(key, keylen); if (h && h->verify_key) { rc = h->verify_key(key, keylen, card, dom, - keytype, keybitsize, flags); + keytype, keybitsize, flags, xflags); } pkey_handler_put(h); @@ -267,14 +270,16 @@ int pkey_handler_verify_key(const u8 *key, u32 keylen, EXPORT_SYMBOL(pkey_handler_verify_key); int pkey_handler_apqns_for_key(const u8 *key, u32 keylen, u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns) + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 xflags) { const struct pkey_handler *h; int rc = -ENODEV; h = pkey_handler_get_keybased(key, keylen); if (h && h->apqns_for_key) - rc = h->apqns_for_key(key, keylen, flags, apqns, nr_apqns); + rc = h->apqns_for_key(key, keylen, flags, apqns, nr_apqns, + xflags); pkey_handler_put(h); return rc; @@ -283,7 +288,8 @@ EXPORT_SYMBOL(pkey_handler_apqns_for_key); int pkey_handler_apqns_for_keytype(enum pkey_key_type keysubtype, u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns) + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 xflags) { const struct pkey_handler *h; int rc = -ENODEV; @@ -292,7 +298,7 @@ int pkey_handler_apqns_for_keytype(enum pkey_key_type keysubtype, if (h && h->apqns_for_keytype) { rc = h->apqns_for_keytype(keysubtype, cur_mkvp, alt_mkvp, flags, - apqns, nr_apqns); + apqns, nr_apqns, xflags); } pkey_handler_put(h); diff --git a/drivers/s390/crypto/pkey_base.h b/drivers/s390/crypto/pkey_base.h index 7347647dfaa7..9cdb3e74477f 100644 --- a/drivers/s390/crypto/pkey_base.h +++ b/drivers/s390/crypto/pkey_base.h @@ -159,29 +159,33 @@ struct pkey_handler { bool (*is_supported_keytype)(enum pkey_key_type); int (*key_to_protkey)(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype); + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags); int (*slowpath_key_to_protkey)(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, u8 *protkey, u32 *protkeylen, - u32 *protkeytype); + u32 *protkeytype, u32 xflags); int (*gen_key)(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 keysubtype, u32 keybitsize, u32 flags, - u8 *keybuf, u32 *keybuflen, u32 *keyinfo); + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags); int (*clr_to_key)(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 keysubtype, u32 keybitsize, u32 flags, const u8 *clrkey, u32 clrkeylen, - u8 *keybuf, u32 *keybuflen, u32 *keyinfo); + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags); int (*verify_key)(const u8 *key, u32 keylen, u16 *card, u16 *dom, - u32 *keytype, u32 *keybitsize, u32 *flags); + u32 *keytype, u32 *keybitsize, u32 *flags, + u32 xflags); int (*apqns_for_key)(const u8 *key, u32 keylen, u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns); + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 xflags); int (*apqns_for_keytype)(enum pkey_key_type ktype, u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns); + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 xflags); /* used internal by pkey base */ struct list_head list; }; @@ -199,29 +203,34 @@ void pkey_handler_put(const struct pkey_handler *handler); int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype); + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags); int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, u8 *protkey, u32 *protkeylen, - u32 *protkeytype); + u32 *protkeytype, u32 xflags); int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 keysubtype, u32 keybitsize, u32 flags, - u8 *keybuf, u32 *keybuflen, u32 *keyinfo); + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags); int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 keysubtype, u32 keybitsize, u32 flags, const u8 *clrkey, u32 clrkeylen, - u8 *keybuf, u32 *keybuflen, u32 *keyinfo); + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, + u32 xflags); int pkey_handler_verify_key(const u8 *key, u32 keylen, u16 *card, u16 *dom, - u32 *keytype, u32 *keybitsize, u32 *flags); + u32 *keytype, u32 *keybitsize, u32 *flags, + u32 xflags); int pkey_handler_apqns_for_key(const u8 *key, u32 keylen, u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns); + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 xflags); int pkey_handler_apqns_for_keytype(enum pkey_key_type ktype, u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns); + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 xflags); /* * Unconditional try to load all handler modules diff --git a/drivers/s390/crypto/pkey_cca.c b/drivers/s390/crypto/pkey_cca.c index cda22db31f6c..6c7897a93f27 100644 --- a/drivers/s390/crypto/pkey_cca.c +++ b/drivers/s390/crypto/pkey_cca.c @@ -70,12 +70,15 @@ static bool is_cca_keytype(enum pkey_key_type key_type) } static int cca_apqns4key(const u8 *key, u32 keylen, u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns) + struct pkey_apqn *apqns, size_t *nr_apqns, u32 pflags) { struct keytoken_header *hdr = (struct keytoken_header *)key; - u32 _nr_apqns, *_apqns = NULL; + u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns); + u32 xflags; int rc; + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + if (!flags) flags = PKEY_FLAGS_MATCH_CUR_MKVP | PKEY_FLAGS_MATCH_ALT_MKVP; @@ -107,9 +110,9 @@ static int cca_apqns4key(const u8 *key, u32 keylen, u32 flags, /* unknown CCA internal token type */ return -EINVAL; } - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, minhwtype, AES_MK_SET, - cur_mkvp, old_mkvp, 1); + cur_mkvp, old_mkvp, xflags); if (rc) goto out; @@ -126,9 +129,9 @@ static int cca_apqns4key(const u8 *key, u32 keylen, u32 flags, /* unknown CCA internal 2 token type */ return -EINVAL; } - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, ZCRYPT_CEX7, APKA_MK_SET, - cur_mkvp, old_mkvp, 1); + cur_mkvp, old_mkvp, xflags); if (rc) goto out; @@ -147,18 +150,21 @@ static int cca_apqns4key(const u8 *key, u32 keylen, u32 flags, *nr_apqns = _nr_apqns; out: - kfree(_apqns); pr_debug("rc=%d\n", rc); return rc; } static int cca_apqns4type(enum pkey_key_type ktype, u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns) + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 pflags) { - u32 _nr_apqns, *_apqns = NULL; + u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns); + u32 xflags; int rc; + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + zcrypt_wait_api_operational(); if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) { @@ -171,9 +177,9 @@ static int cca_apqns4type(enum pkey_key_type ktype, old_mkvp = *((u64 *)alt_mkvp); if (ktype == PKEY_TYPE_CCA_CIPHER) minhwtype = ZCRYPT_CEX6; - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, minhwtype, AES_MK_SET, - cur_mkvp, old_mkvp, 1); + cur_mkvp, old_mkvp, xflags); if (rc) goto out; @@ -184,9 +190,9 @@ static int cca_apqns4type(enum pkey_key_type ktype, cur_mkvp = *((u64 *)cur_mkvp); if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) old_mkvp = *((u64 *)alt_mkvp); - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, ZCRYPT_CEX7, APKA_MK_SET, - cur_mkvp, old_mkvp, 1); + cur_mkvp, old_mkvp, xflags); if (rc) goto out; @@ -205,19 +211,22 @@ static int cca_apqns4type(enum pkey_key_type ktype, *nr_apqns = _nr_apqns; out: - kfree(_apqns); pr_debug("rc=%d\n", rc); return rc; } static int cca_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 pflags) { struct keytoken_header *hdr = (struct keytoken_header *)key; - struct pkey_apqn *local_apqns = NULL; + struct pkey_apqn _apqns[MAXAPQNSINLIST]; + u32 xflags; int i, rc; + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + if (keylen < sizeof(*hdr)) return -EINVAL; @@ -253,14 +262,10 @@ static int cca_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, if (!apqns || (nr_apqns == 1 && apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { nr_apqns = MAXAPQNSINLIST; - local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), - GFP_KERNEL); - if (!local_apqns) - return -ENOMEM; - rc = cca_apqns4key(key, keylen, 0, local_apqns, &nr_apqns); + rc = cca_apqns4key(key, keylen, 0, _apqns, &nr_apqns, pflags); if (rc) goto out; - apqns = local_apqns; + apqns = _apqns; } for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { @@ -268,16 +273,16 @@ static int cca_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, hdr->version == TOKVER_CCA_AES) { rc = cca_sec2protkey(apqns[i].card, apqns[i].domain, key, protkey, - protkeylen, protkeytype); + protkeylen, protkeytype, xflags); } else if (hdr->type == TOKTYPE_CCA_INTERNAL && hdr->version == TOKVER_CCA_VLSC) { rc = cca_cipher2protkey(apqns[i].card, apqns[i].domain, key, protkey, - protkeylen, protkeytype); + protkeylen, protkeytype, xflags); } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) { rc = cca_ecc2protkey(apqns[i].card, apqns[i].domain, key, protkey, - protkeylen, protkeytype); + protkeylen, protkeytype, xflags); } else { rc = -EINVAL; break; @@ -285,7 +290,6 @@ static int cca_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, } out: - kfree(local_apqns); pr_debug("rc=%d\n", rc); return rc; } @@ -302,10 +306,13 @@ out: static int cca_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 subtype, u32 keybitsize, u32 flags, - u8 *keybuf, u32 *keybuflen, u32 *_keyinfo) + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags) { - struct pkey_apqn *local_apqns = NULL; + struct pkey_apqn _apqns[MAXAPQNSINLIST]; int i, len, rc; + u32 xflags; + + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; /* check keytype, subtype, keybitsize */ switch (keytype) { @@ -340,32 +347,27 @@ static int cca_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, if (!apqns || (nr_apqns == 1 && apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { nr_apqns = MAXAPQNSINLIST; - local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), - GFP_KERNEL); - if (!local_apqns) - return -ENOMEM; rc = cca_apqns4type(subtype, NULL, NULL, 0, - local_apqns, &nr_apqns); + _apqns, &nr_apqns, pflags); if (rc) goto out; - apqns = local_apqns; + apqns = _apqns; } for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { if (subtype == PKEY_TYPE_CCA_CIPHER) { rc = cca_gencipherkey(apqns[i].card, apqns[i].domain, keybitsize, flags, - keybuf, keybuflen); + keybuf, keybuflen, xflags); } else { /* PKEY_TYPE_CCA_DATA */ rc = cca_genseckey(apqns[i].card, apqns[i].domain, - keybitsize, keybuf); + keybitsize, keybuf, xflags); *keybuflen = (rc ? 0 : SECKEYBLOBSIZE); } } out: - kfree(local_apqns); pr_debug("rc=%d\n", rc); return rc; } @@ -383,10 +385,13 @@ static int cca_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 subtype, u32 keybitsize, u32 flags, const u8 *clrkey, u32 clrkeylen, - u8 *keybuf, u32 *keybuflen, u32 *_keyinfo) + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags) { - struct pkey_apqn *local_apqns = NULL; + struct pkey_apqn _apqns[MAXAPQNSINLIST]; int i, len, rc; + u32 xflags; + + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; /* check keytype, subtype, clrkeylen, keybitsize */ switch (keytype) { @@ -426,44 +431,42 @@ static int cca_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns, if (!apqns || (nr_apqns == 1 && apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { nr_apqns = MAXAPQNSINLIST; - local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), - GFP_KERNEL); - if (!local_apqns) - return -ENOMEM; rc = cca_apqns4type(subtype, NULL, NULL, 0, - local_apqns, &nr_apqns); + _apqns, &nr_apqns, pflags); if (rc) goto out; - apqns = local_apqns; + apqns = _apqns; } for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { if (subtype == PKEY_TYPE_CCA_CIPHER) { rc = cca_clr2cipherkey(apqns[i].card, apqns[i].domain, keybitsize, flags, clrkey, - keybuf, keybuflen); + keybuf, keybuflen, xflags); } else { /* PKEY_TYPE_CCA_DATA */ rc = cca_clr2seckey(apqns[i].card, apqns[i].domain, - keybitsize, clrkey, keybuf); + keybitsize, clrkey, keybuf, xflags); *keybuflen = (rc ? 0 : SECKEYBLOBSIZE); } } out: - kfree(local_apqns); pr_debug("rc=%d\n", rc); return rc; } static int cca_verifykey(const u8 *key, u32 keylen, u16 *card, u16 *dom, - u32 *keytype, u32 *keybitsize, u32 *flags) + u32 *keytype, u32 *keybitsize, u32 *flags, u32 pflags) { struct keytoken_header *hdr = (struct keytoken_header *)key; - u32 nr_apqns, *apqns = NULL; + u32 apqns[MAXAPQNSINLIST], nr_apqns = ARRAY_SIZE(apqns); + u32 xflags; int rc; + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + if (keylen < sizeof(*hdr)) return -EINVAL; @@ -478,15 +481,16 @@ static int cca_verifykey(const u8 *key, u32 keylen, goto out; *keytype = PKEY_TYPE_CCA_DATA; *keybitsize = t->bitsize; - rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom, + rc = cca_findcard2(apqns, &nr_apqns, *card, *dom, ZCRYPT_CEX3C, AES_MK_SET, - t->mkvp, 0, 1); + t->mkvp, 0, xflags); if (!rc) *flags = PKEY_FLAGS_MATCH_CUR_MKVP; if (rc == -ENODEV) { - rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom, + nr_apqns = ARRAY_SIZE(apqns); + rc = cca_findcard2(apqns, &nr_apqns, *card, *dom, ZCRYPT_CEX3C, AES_MK_SET, - 0, t->mkvp, 1); + 0, t->mkvp, xflags); if (!rc) *flags = PKEY_FLAGS_MATCH_ALT_MKVP; } @@ -511,15 +515,16 @@ static int cca_verifykey(const u8 *key, u32 keylen, *keybitsize = PKEY_SIZE_AES_192; else if (!t->plfver && t->wpllen == 640) *keybitsize = PKEY_SIZE_AES_256; - rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom, + rc = cca_findcard2(apqns, &nr_apqns, *card, *dom, ZCRYPT_CEX6, AES_MK_SET, - t->mkvp0, 0, 1); + t->mkvp0, 0, xflags); if (!rc) *flags = PKEY_FLAGS_MATCH_CUR_MKVP; if (rc == -ENODEV) { - rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom, + nr_apqns = ARRAY_SIZE(apqns); + rc = cca_findcard2(apqns, &nr_apqns, *card, *dom, ZCRYPT_CEX6, AES_MK_SET, - 0, t->mkvp0, 1); + 0, t->mkvp0, xflags); if (!rc) *flags = PKEY_FLAGS_MATCH_ALT_MKVP; } @@ -535,7 +540,6 @@ static int cca_verifykey(const u8 *key, u32 keylen, } out: - kfree(apqns); pr_debug("rc=%d\n", rc); return rc; } @@ -551,12 +555,12 @@ static int cca_slowpath_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, u8 *protkey, u32 *protkeylen, - u32 *protkeytype) + u32 *protkeytype, u32 pflags) { const struct keytoken_header *hdr = (const struct keytoken_header *)key; const struct clearkeytoken *t = (const struct clearkeytoken *)key; + u8 tmpbuf[SECKEYBLOBSIZE]; /* 64 bytes */ u32 tmplen, keysize = 0; - u8 *tmpbuf; int i, rc; if (keylen < sizeof(*hdr)) @@ -568,26 +572,20 @@ static int cca_slowpath_key2protkey(const struct pkey_apqn *apqns, if (!keysize || t->len != keysize) return -EINVAL; - /* alloc tmp key buffer */ - tmpbuf = kmalloc(SECKEYBLOBSIZE, GFP_ATOMIC); - if (!tmpbuf) - return -ENOMEM; - /* try two times in case of failure */ for (i = 0, rc = -ENODEV; i < 2 && rc; i++) { tmplen = SECKEYBLOBSIZE; rc = cca_clr2key(NULL, 0, t->keytype, PKEY_TYPE_CCA_DATA, 8 * keysize, 0, t->clearkey, t->len, - tmpbuf, &tmplen, NULL); + tmpbuf, &tmplen, NULL, pflags); pr_debug("cca_clr2key()=%d\n", rc); if (rc) continue; rc = cca_key2protkey(NULL, 0, tmpbuf, tmplen, - protkey, protkeylen, protkeytype); + protkey, protkeylen, protkeytype, pflags); pr_debug("cca_key2protkey()=%d\n", rc); } - kfree(tmpbuf); pr_debug("rc=%d\n", rc); return rc; } diff --git a/drivers/s390/crypto/pkey_ep11.c b/drivers/s390/crypto/pkey_ep11.c index 5b033ca3e828..6b23adc560c8 100644 --- a/drivers/s390/crypto/pkey_ep11.c +++ b/drivers/s390/crypto/pkey_ep11.c @@ -70,12 +70,15 @@ static bool is_ep11_keytype(enum pkey_key_type key_type) } static int ep11_apqns4key(const u8 *key, u32 keylen, u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns) + struct pkey_apqn *apqns, size_t *nr_apqns, u32 pflags) { struct keytoken_header *hdr = (struct keytoken_header *)key; - u32 _nr_apqns, *_apqns = NULL; + u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns); + u32 xflags; int rc; + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + if (!flags) flags = PKEY_FLAGS_MATCH_CUR_MKVP; @@ -98,8 +101,8 @@ static int ep11_apqns4key(const u8 *key, u32 keylen, u32 flags, minhwtype = ZCRYPT_CEX7; api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; } - rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, - minhwtype, api, kb->wkvp); + rc = ep11_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + minhwtype, api, kb->wkvp, xflags); if (rc) goto out; @@ -115,8 +118,8 @@ static int ep11_apqns4key(const u8 *key, u32 keylen, u32 flags, minhwtype = ZCRYPT_CEX7; api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; } - rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, - minhwtype, api, kb->wkvp); + rc = ep11_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + minhwtype, api, kb->wkvp, xflags); if (rc) goto out; @@ -135,18 +138,20 @@ static int ep11_apqns4key(const u8 *key, u32 keylen, u32 flags, *nr_apqns = _nr_apqns; out: - kfree(_apqns); pr_debug("rc=%d\n", rc); return rc; } static int ep11_apqns4type(enum pkey_key_type ktype, u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns) + struct pkey_apqn *apqns, size_t *nr_apqns, u32 pflags) { - u32 _nr_apqns, *_apqns = NULL; + u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns); + u32 xflags; int rc; + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + zcrypt_wait_api_operational(); if (ktype == PKEY_TYPE_EP11 || @@ -158,8 +163,8 @@ static int ep11_apqns4type(enum pkey_key_type ktype, if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) wkvp = cur_mkvp; api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; - rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, - ZCRYPT_CEX7, api, wkvp); + rc = ep11_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + ZCRYPT_CEX7, api, wkvp, xflags); if (rc) goto out; @@ -178,19 +183,22 @@ static int ep11_apqns4type(enum pkey_key_type ktype, *nr_apqns = _nr_apqns; out: - kfree(_apqns); pr_debug("rc=%d\n", rc); return rc; } static int ep11_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 pflags) { struct keytoken_header *hdr = (struct keytoken_header *)key; - struct pkey_apqn *local_apqns = NULL; + struct pkey_apqn _apqns[MAXAPQNSINLIST]; + u32 xflags; int i, rc; + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + if (keylen < sizeof(*hdr)) return -EINVAL; @@ -225,14 +233,10 @@ static int ep11_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, if (!apqns || (nr_apqns == 1 && apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { nr_apqns = MAXAPQNSINLIST; - local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), - GFP_KERNEL); - if (!local_apqns) - return -ENOMEM; - rc = ep11_apqns4key(key, keylen, 0, local_apqns, &nr_apqns); + rc = ep11_apqns4key(key, keylen, 0, _apqns, &nr_apqns, pflags); if (rc) goto out; - apqns = local_apqns; + apqns = _apqns; } for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { @@ -241,19 +245,19 @@ static int ep11_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain, key, hdr->len, protkey, - protkeylen, protkeytype); + protkeylen, protkeytype, xflags); } else if (hdr->type == TOKTYPE_NON_CCA && hdr->version == TOKVER_EP11_ECC_WITH_HEADER && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain, key, hdr->len, protkey, - protkeylen, protkeytype); + protkeylen, protkeytype, xflags); } else if (hdr->type == TOKTYPE_NON_CCA && hdr->version == TOKVER_EP11_AES && is_ep11_keyblob(key)) { rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain, key, hdr->len, protkey, - protkeylen, protkeytype); + protkeylen, protkeytype, xflags); } else { rc = -EINVAL; break; @@ -261,7 +265,6 @@ static int ep11_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, } out: - kfree(local_apqns); pr_debug("rc=%d\n", rc); return rc; } @@ -278,10 +281,13 @@ out: static int ep11_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 subtype, u32 keybitsize, u32 flags, - u8 *keybuf, u32 *keybuflen, u32 *_keyinfo) + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags) { - struct pkey_apqn *local_apqns = NULL; + struct pkey_apqn _apqns[MAXAPQNSINLIST]; int i, len, rc; + u32 xflags; + + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; /* check keytype, subtype, keybitsize */ switch (keytype) { @@ -316,25 +322,20 @@ static int ep11_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, if (!apqns || (nr_apqns == 1 && apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { nr_apqns = MAXAPQNSINLIST; - local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), - GFP_KERNEL); - if (!local_apqns) - return -ENOMEM; rc = ep11_apqns4type(subtype, NULL, NULL, 0, - local_apqns, &nr_apqns); + _apqns, &nr_apqns, pflags); if (rc) goto out; - apqns = local_apqns; + apqns = _apqns; } for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { rc = ep11_genaeskey(apqns[i].card, apqns[i].domain, keybitsize, flags, - keybuf, keybuflen, subtype); + keybuf, keybuflen, subtype, xflags); } out: - kfree(local_apqns); pr_debug("rc=%d\n", rc); return rc; } @@ -352,10 +353,13 @@ static int ep11_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 subtype, u32 keybitsize, u32 flags, const u8 *clrkey, u32 clrkeylen, - u8 *keybuf, u32 *keybuflen, u32 *_keyinfo) + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags) { - struct pkey_apqn *local_apqns = NULL; + struct pkey_apqn _apqns[MAXAPQNSINLIST]; int i, len, rc; + u32 xflags; + + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; /* check keytype, subtype, clrkeylen, keybitsize */ switch (keytype) { @@ -395,37 +399,35 @@ static int ep11_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns, if (!apqns || (nr_apqns == 1 && apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { nr_apqns = MAXAPQNSINLIST; - local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), - GFP_KERNEL); - if (!local_apqns) - return -ENOMEM; rc = ep11_apqns4type(subtype, NULL, NULL, 0, - local_apqns, &nr_apqns); + _apqns, &nr_apqns, pflags); if (rc) goto out; - apqns = local_apqns; + apqns = _apqns; } for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { rc = ep11_clr2keyblob(apqns[i].card, apqns[i].domain, keybitsize, flags, clrkey, - keybuf, keybuflen, subtype); + keybuf, keybuflen, subtype, xflags); } out: - kfree(local_apqns); pr_debug("rc=%d\n", rc); return rc; } static int ep11_verifykey(const u8 *key, u32 keylen, u16 *card, u16 *dom, - u32 *keytype, u32 *keybitsize, u32 *flags) + u32 *keytype, u32 *keybitsize, u32 *flags, u32 pflags) { struct keytoken_header *hdr = (struct keytoken_header *)key; - u32 nr_apqns, *apqns = NULL; + u32 apqns[MAXAPQNSINLIST], nr_apqns = ARRAY_SIZE(apqns); + u32 xflags; int rc; + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + if (keylen < sizeof(*hdr)) return -EINVAL; @@ -443,9 +445,9 @@ static int ep11_verifykey(const u8 *key, u32 keylen, *keybitsize = kb->head.bitlen; api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; - rc = ep11_findcard2(&apqns, &nr_apqns, *card, *dom, + rc = ep11_findcard2(apqns, &nr_apqns, *card, *dom, ZCRYPT_CEX7, api, - ep11_kb_wkvp(key, keylen)); + ep11_kb_wkvp(key, keylen), xflags); if (rc) goto out; @@ -467,9 +469,9 @@ static int ep11_verifykey(const u8 *key, u32 keylen, *keybitsize = kh->bitlen; api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; - rc = ep11_findcard2(&apqns, &nr_apqns, *card, *dom, + rc = ep11_findcard2(apqns, &nr_apqns, *card, *dom, ZCRYPT_CEX7, api, - ep11_kb_wkvp(key, keylen)); + ep11_kb_wkvp(key, keylen), xflags); if (rc) goto out; @@ -484,7 +486,6 @@ static int ep11_verifykey(const u8 *key, u32 keylen, } out: - kfree(apqns); pr_debug("rc=%d\n", rc); return rc; } @@ -500,12 +501,12 @@ static int ep11_slowpath_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, u8 *protkey, u32 *protkeylen, - u32 *protkeytype) + u32 *protkeytype, u32 pflags) { const struct keytoken_header *hdr = (const struct keytoken_header *)key; const struct clearkeytoken *t = (const struct clearkeytoken *)key; + u8 tmpbuf[MAXEP11AESKEYBLOBSIZE]; /* 336 bytes */ u32 tmplen, keysize = 0; - u8 *tmpbuf; int i, rc; if (keylen < sizeof(*hdr)) @@ -517,26 +518,20 @@ static int ep11_slowpath_key2protkey(const struct pkey_apqn *apqns, if (!keysize || t->len != keysize) return -EINVAL; - /* alloc tmp key buffer */ - tmpbuf = kmalloc(MAXEP11AESKEYBLOBSIZE, GFP_ATOMIC); - if (!tmpbuf) - return -ENOMEM; - /* try two times in case of failure */ for (i = 0, rc = -ENODEV; i < 2 && rc; i++) { tmplen = MAXEP11AESKEYBLOBSIZE; rc = ep11_clr2key(NULL, 0, t->keytype, PKEY_TYPE_EP11, 8 * keysize, 0, t->clearkey, t->len, - tmpbuf, &tmplen, NULL); + tmpbuf, &tmplen, NULL, pflags); pr_debug("ep11_clr2key()=%d\n", rc); if (rc) continue; rc = ep11_key2protkey(NULL, 0, tmpbuf, tmplen, - protkey, protkeylen, protkeytype); + protkey, protkeylen, protkeytype, pflags); pr_debug("ep11_key2protkey()=%d\n", rc); } - kfree(tmpbuf); pr_debug("rc=%d\n", rc); return rc; } diff --git a/drivers/s390/crypto/pkey_pckmo.c b/drivers/s390/crypto/pkey_pckmo.c index 835d59f4fbc5..7eca9f1340bd 100644 --- a/drivers/s390/crypto/pkey_pckmo.c +++ b/drivers/s390/crypto/pkey_pckmo.c @@ -406,7 +406,8 @@ out: static int pkey_pckmo_key2protkey(const struct pkey_apqn *_apqns, size_t _nr_apqns, const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *keyinfo) + u8 *protkey, u32 *protkeylen, u32 *keyinfo, + u32 _xflags __always_unused) { return pckmo_key2protkey(key, keylen, protkey, protkeylen, keyinfo); @@ -415,7 +416,8 @@ static int pkey_pckmo_key2protkey(const struct pkey_apqn *_apqns, static int pkey_pckmo_gen_key(const struct pkey_apqn *_apqns, size_t _nr_apqns, u32 keytype, u32 keysubtype, u32 _keybitsize, u32 _flags, - u8 *keybuf, u32 *keybuflen, u32 *keyinfo) + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, + u32 _xflags __always_unused) { return pckmo_gen_protkey(keytype, keysubtype, keybuf, keybuflen, keyinfo); @@ -423,7 +425,8 @@ static int pkey_pckmo_gen_key(const struct pkey_apqn *_apqns, size_t _nr_apqns, static int pkey_pckmo_verifykey(const u8 *key, u32 keylen, u16 *_card, u16 *_dom, - u32 *_keytype, u32 *_keybitsize, u32 *_flags) + u32 *_keytype, u32 *_keybitsize, + u32 *_flags, u32 _xflags __always_unused) { return pckmo_verify_key(key, keylen); } diff --git a/drivers/s390/crypto/pkey_sysfs.c b/drivers/s390/crypto/pkey_sysfs.c index 57edc97bafd2..cea772973649 100644 --- a/drivers/s390/crypto/pkey_sysfs.c +++ b/drivers/s390/crypto/pkey_sysfs.c @@ -29,13 +29,13 @@ static int sys_pkey_handler_gen_key(u32 keytype, u32 keysubtype, rc = pkey_handler_gen_key(NULL, 0, keytype, keysubtype, keybitsize, flags, - keybuf, keybuflen, keyinfo); + keybuf, keybuflen, keyinfo, 0); if (rc == -ENODEV) { pkey_handler_request_modules(); rc = pkey_handler_gen_key(NULL, 0, keytype, keysubtype, keybitsize, flags, - keybuf, keybuflen, keyinfo); + keybuf, keybuflen, keyinfo, 0); } return rc; diff --git a/drivers/s390/crypto/pkey_uv.c b/drivers/s390/crypto/pkey_uv.c index 805817b14354..e5c6e01acaf3 100644 --- a/drivers/s390/crypto/pkey_uv.c +++ b/drivers/s390/crypto/pkey_uv.c @@ -21,6 +21,12 @@ MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("s390 protected key UV handler"); /* + * One pre-allocated uv_secret_list for use with uv_find_secret() + */ +static struct uv_secret_list *uv_list; +static DEFINE_MUTEX(uv_list_mutex); + +/* * UV secret token struct and defines. */ @@ -85,13 +91,26 @@ static bool is_uv_keytype(enum pkey_key_type keytype) } } +static int get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN], + struct uv_secret_list_item_hdr *secret) +{ + int rc; + + mutex_lock(&uv_list_mutex); + memset(uv_list, 0, sizeof(*uv_list)); + rc = uv_find_secret(secret_id, uv_list, secret); + mutex_unlock(&uv_list_mutex); + + return rc; +} + static int retrieve_secret(const u8 secret_id[UV_SECRET_ID_LEN], u16 *secret_type, u8 *buf, u32 *buflen) { struct uv_secret_list_item_hdr secret_meta_data; int rc; - rc = uv_get_secret_metadata(secret_id, &secret_meta_data); + rc = get_secret_metadata(secret_id, &secret_meta_data); if (rc) return rc; @@ -172,7 +191,8 @@ static int uv_get_size_and_type(u16 secret_type, u32 *pkeysize, u32 *pkeytype) static int uv_key2protkey(const struct pkey_apqn *_apqns __always_unused, size_t _nr_apqns __always_unused, const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *keyinfo) + u8 *protkey, u32 *protkeylen, u32 *keyinfo, + u32 _xflags __always_unused) { struct uvsecrettoken *t = (struct uvsecrettoken *)key; u32 pkeysize, pkeytype; @@ -214,7 +234,8 @@ out: static int uv_verifykey(const u8 *key, u32 keylen, u16 *_card __always_unused, u16 *_dom __always_unused, - u32 *keytype, u32 *keybitsize, u32 *flags) + u32 *keytype, u32 *keybitsize, u32 *flags, + u32 xflags __always_unused) { struct uvsecrettoken *t = (struct uvsecrettoken *)key; struct uv_secret_list_item_hdr secret_meta_data; @@ -225,7 +246,7 @@ static int uv_verifykey(const u8 *key, u32 keylen, if (rc) goto out; - rc = uv_get_secret_metadata(t->secret_id, &secret_meta_data); + rc = get_secret_metadata(t->secret_id, &secret_meta_data); if (rc) goto out; @@ -263,13 +284,23 @@ static struct pkey_handler uv_handler = { */ static int __init pkey_uv_init(void) { + int rc; + if (!is_prot_virt_guest()) return -ENODEV; if (!test_bit_inv(BIT_UVC_CMD_RETR_SECRET, uv_info.inst_calls_list)) return -ENODEV; - return pkey_handler_register(&uv_handler); + uv_list = kmalloc(sizeof(*uv_list), GFP_KERNEL); + if (!uv_list) + return -ENOMEM; + + rc = pkey_handler_register(&uv_handler); + if (rc) + kfree(uv_list); + + return rc; } /* @@ -278,6 +309,9 @@ static int __init pkey_uv_init(void) static void __exit pkey_uv_exit(void) { pkey_handler_unregister(&uv_handler); + mutex_lock(&uv_list_mutex); + kvfree(uv_list); + mutex_unlock(&uv_list_mutex); } module_cpu_feature_match(S390_CPU_FEATURE_UV, pkey_uv_init); diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 5020696f1379..89baa87a13fc 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -50,6 +50,10 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ "Copyright IBM Corp. 2001, 2012"); MODULE_LICENSE("GPL"); +unsigned int zcrypt_mempool_threshold = 5; +module_param_named(mempool_threshold, zcrypt_mempool_threshold, uint, 0440); +MODULE_PARM_DESC(mempool_threshold, "CCA and EP11 request/reply mempool minimal items (min: 1)"); + /* * zcrypt tracepoint functions */ @@ -642,16 +646,17 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, struct zcrypt_queue *zq, *pref_zq; struct ap_message ap_msg; unsigned int wgt = 0, pref_wgt = 0; - unsigned int func_code; - int cpen, qpen, qid = 0, rc = -ENODEV; + unsigned int func_code = 0; + int cpen, qpen, qid = 0, rc; struct module *mod; trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); - ap_init_message(&ap_msg); + rc = ap_init_apmsg(&ap_msg, 0); + if (rc) + goto out; if (mex->outputdatalength < mex->inputdatalength) { - func_code = 0; rc = -EINVAL; goto out; } @@ -728,7 +733,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, spin_unlock(&zcrypt_list_lock); out: - ap_release_message(&ap_msg); + ap_release_apmsg(&ap_msg); if (tr) { tr->last_rc = rc; tr->last_qid = qid; @@ -746,16 +751,17 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, struct zcrypt_queue *zq, *pref_zq; struct ap_message ap_msg; unsigned int wgt = 0, pref_wgt = 0; - unsigned int func_code; - int cpen, qpen, qid = 0, rc = -ENODEV; + unsigned int func_code = 0; + int cpen, qpen, qid = 0, rc; struct module *mod; trace_s390_zcrypt_req(crt, TP_ICARSACRT); - ap_init_message(&ap_msg); + rc = ap_init_apmsg(&ap_msg, 0); + if (rc) + goto out; if (crt->outputdatalength < crt->inputdatalength) { - func_code = 0; rc = -EINVAL; goto out; } @@ -832,7 +838,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, spin_unlock(&zcrypt_list_lock); out: - ap_release_message(&ap_msg); + ap_release_apmsg(&ap_msg); if (tr) { tr->last_rc = rc; tr->last_qid = qid; @@ -842,23 +848,28 @@ out: return rc; } -static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, +static long _zcrypt_send_cprb(u32 xflags, struct ap_perms *perms, struct zcrypt_track *tr, struct ica_xcRB *xcrb) { + bool userspace = xflags & ZCRYPT_XFLAG_USERSPACE; struct zcrypt_card *zc, *pref_zc; struct zcrypt_queue *zq, *pref_zq; struct ap_message ap_msg; unsigned int wgt = 0, pref_wgt = 0; - unsigned int func_code; + unsigned int func_code = 0; unsigned short *domain, tdom; - int cpen, qpen, qid = 0, rc = -ENODEV; + int cpen, qpen, qid = 0, rc; struct module *mod; trace_s390_zcrypt_req(xcrb, TB_ZSECSENDCPRB); xcrb->status = 0; - ap_init_message(&ap_msg); + + rc = ap_init_apmsg(&ap_msg, xflags & ZCRYPT_XFLAG_NOMEMALLOC ? + AP_MSG_FLAG_MEMPOOL : 0); + if (rc) + goto out; rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); if (rc) @@ -962,7 +973,7 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, spin_unlock(&zcrypt_list_lock); out: - ap_release_message(&ap_msg); + ap_release_apmsg(&ap_msg); if (tr) { tr->last_rc = rc; tr->last_qid = qid; @@ -972,7 +983,7 @@ out: return rc; } -long zcrypt_send_cprb(struct ica_xcRB *xcrb) +long zcrypt_send_cprb(struct ica_xcRB *xcrb, u32 xflags) { struct zcrypt_track tr; int rc; @@ -980,13 +991,13 @@ long zcrypt_send_cprb(struct ica_xcRB *xcrb) memset(&tr, 0, sizeof(tr)); do { - rc = _zcrypt_send_cprb(false, &ap_perms, &tr, xcrb); + rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); /* on ENODEV failure: retry once again after a requested rescan */ if (rc == -ENODEV && zcrypt_process_rescan()) do { - rc = _zcrypt_send_cprb(false, &ap_perms, &tr, xcrb); + rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; @@ -1024,50 +1035,50 @@ static bool is_desired_ep11_queue(unsigned int dev_qid, return false; } -static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, +static long _zcrypt_send_ep11_cprb(u32 xflags, struct ap_perms *perms, struct zcrypt_track *tr, struct ep11_urb *xcrb) { + bool userspace = xflags & ZCRYPT_XFLAG_USERSPACE; struct zcrypt_card *zc, *pref_zc; struct zcrypt_queue *zq, *pref_zq; - struct ep11_target_dev *targets; + struct ep11_target_dev *targets = NULL; unsigned short target_num; unsigned int wgt = 0, pref_wgt = 0; - unsigned int func_code, domain; + unsigned int func_code = 0, domain; struct ap_message ap_msg; - int cpen, qpen, qid = 0, rc = -ENODEV; + int cpen, qpen, qid = 0, rc; struct module *mod; trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); - ap_init_message(&ap_msg); + rc = ap_init_apmsg(&ap_msg, xflags & ZCRYPT_XFLAG_NOMEMALLOC ? + AP_MSG_FLAG_MEMPOOL : 0); + if (rc) + goto out; target_num = (unsigned short)xcrb->targets_num; /* empty list indicates autoselect (all available targets) */ - targets = NULL; + rc = -ENOMEM; if (target_num != 0) { - struct ep11_target_dev __user *uptr; - - targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); - if (!targets) { - func_code = 0; - rc = -ENOMEM; - goto out; - } - - uptr = (struct ep11_target_dev __force __user *)xcrb->targets; - if (z_copy_from_user(userspace, targets, uptr, - target_num * sizeof(*targets))) { - func_code = 0; - rc = -EFAULT; - goto out_free; + if (userspace) { + targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); + if (!targets) + goto out; + if (copy_from_user(targets, xcrb->targets, + target_num * sizeof(*targets))) { + rc = -EFAULT; + goto out; + } + } else { + targets = (struct ep11_target_dev __force __kernel *)xcrb->targets; } } rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); if (rc) - goto out_free; + goto out; print_hex_dump_debug("ep11req: ", DUMP_PREFIX_ADDRESS, 16, 1, ap_msg.msg, ap_msg.len, false); @@ -1075,11 +1086,11 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, if (ap_msg.flags & AP_MSG_FLAG_ADMIN) { if (!test_bit_inv(domain, perms->adm)) { rc = -ENODEV; - goto out_free; + goto out; } } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) { rc = -EOPNOTSUPP; - goto out_free; + goto out; } } @@ -1147,7 +1158,7 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, pr_debug("no match for address ff.ffff => ENODEV\n"); } rc = -ENODEV; - goto out_free; + goto out; } qid = pref_zq->queue->qid; @@ -1161,10 +1172,10 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); spin_unlock(&zcrypt_list_lock); -out_free: - kfree(targets); out: - ap_release_message(&ap_msg); + if (userspace) + kfree(targets); + ap_release_apmsg(&ap_msg); if (tr) { tr->last_rc = rc; tr->last_qid = qid; @@ -1174,7 +1185,7 @@ out: return rc; } -long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) +long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb, u32 xflags) { struct zcrypt_track tr; int rc; @@ -1182,13 +1193,13 @@ long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) memset(&tr, 0, sizeof(tr)); do { - rc = _zcrypt_send_ep11_cprb(false, &ap_perms, &tr, xcrb); + rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); /* on ENODEV failure: retry once again after a requested rescan */ if (rc == -ENODEV && zcrypt_process_rescan()) do { - rc = _zcrypt_send_ep11_cprb(false, &ap_perms, &tr, xcrb); + rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; @@ -1204,7 +1215,7 @@ static long zcrypt_rng(char *buffer) struct zcrypt_card *zc, *pref_zc; struct zcrypt_queue *zq, *pref_zq; unsigned int wgt = 0, pref_wgt = 0; - unsigned int func_code; + unsigned int func_code = 0; struct ap_message ap_msg; unsigned int domain; int qid = 0, rc = -ENODEV; @@ -1212,7 +1223,9 @@ static long zcrypt_rng(char *buffer) trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); - ap_init_message(&ap_msg); + rc = ap_init_apmsg(&ap_msg, 0); + if (rc) + goto out; rc = prep_rng_ap_msg(&ap_msg, &func_code, &domain); if (rc) goto out; @@ -1258,7 +1271,7 @@ static long zcrypt_rng(char *buffer) spin_unlock(&zcrypt_list_lock); out: - ap_release_message(&ap_msg); + ap_release_apmsg(&ap_msg); trace_s390_zcrypt_rep(buffer, func_code, rc, AP_QID_CARD(qid), AP_QID_QUEUE(qid)); return rc; @@ -1291,19 +1304,25 @@ static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus) spin_unlock(&zcrypt_list_lock); } -void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus) +void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus, + int maxcard, int maxqueue) { struct zcrypt_card *zc; struct zcrypt_queue *zq; struct zcrypt_device_status_ext *stat; int card, queue; + maxcard = min_t(int, maxcard, MAX_ZDEV_CARDIDS_EXT); + maxqueue = min_t(int, maxqueue, MAX_ZDEV_DOMAINS_EXT); + spin_lock(&zcrypt_list_lock); for_each_zcrypt_card(zc) { for_each_zcrypt_queue(zq, zc) { card = AP_QID_CARD(zq->queue->qid); queue = AP_QID_QUEUE(zq->queue->qid); - stat = &devstatus[card * AP_DOMAINS + queue]; + if (card >= maxcard || queue >= maxqueue) + continue; + stat = &devstatus[card * maxqueue + queue]; stat->hwtype = zc->card->ap_dev.device_type; stat->functions = zc->card->hwinfo.fac >> 26; stat->qid = zq->queue->qid; @@ -1523,6 +1542,7 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg) int rc; struct ica_xcRB xcrb; struct zcrypt_track tr; + u32 xflags = ZCRYPT_XFLAG_USERSPACE; struct ica_xcRB __user *uxcrb = (void __user *)arg; memset(&tr, 0, sizeof(tr)); @@ -1530,13 +1550,13 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg) return -EFAULT; do { - rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); + rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); /* on ENODEV failure: retry once again after a requested rescan */ if (rc == -ENODEV && zcrypt_process_rescan()) do { - rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); + rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; @@ -1553,6 +1573,7 @@ static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg) int rc; struct ep11_urb xcrb; struct zcrypt_track tr; + u32 xflags = ZCRYPT_XFLAG_USERSPACE; struct ep11_urb __user *uxcrb = (void __user *)arg; memset(&tr, 0, sizeof(tr)); @@ -1560,13 +1581,13 @@ static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg) return -EFAULT; do { - rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb); + rc = _zcrypt_send_ep11_cprb(xflags, perms, &tr, &xcrb); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); /* on ENODEV failure: retry once again after a requested rescan */ if (rc == -ENODEV && zcrypt_process_rescan()) do { - rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb); + rc = _zcrypt_send_ep11_cprb(xflags, perms, &tr, &xcrb); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; @@ -1607,7 +1628,9 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, GFP_KERNEL); if (!device_status) return -ENOMEM; - zcrypt_device_status_mask_ext(device_status); + zcrypt_device_status_mask_ext(device_status, + MAX_ZDEV_CARDIDS_EXT, + MAX_ZDEV_DOMAINS_EXT); if (copy_to_user((char __user *)arg, device_status, total_size)) rc = -EFAULT; @@ -1827,6 +1850,7 @@ static long trans_xcrb32(struct ap_perms *perms, struct file *filp, unsigned int cmd, unsigned long arg) { struct compat_ica_xcrb __user *uxcrb32 = compat_ptr(arg); + u32 xflags = ZCRYPT_XFLAG_USERSPACE; struct compat_ica_xcrb xcrb32; struct zcrypt_track tr; struct ica_xcRB xcrb64; @@ -1856,13 +1880,13 @@ static long trans_xcrb32(struct ap_perms *perms, struct file *filp, xcrb64.priority_window = xcrb32.priority_window; xcrb64.status = xcrb32.status; do { - rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64); + rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb64); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); /* on ENODEV failure: retry once again after a requested rescan */ if (rc == -ENODEV && zcrypt_process_rescan()) do { - rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64); + rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb64); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; @@ -2132,13 +2156,27 @@ int __init zcrypt_api_init(void) { int rc; + /* make sure the mempool threshold is >= 1 */ + if (zcrypt_mempool_threshold < 1) { + rc = -EINVAL; + goto out; + } + rc = zcrypt_debug_init(); if (rc) goto out; rc = zcdn_init(); if (rc) - goto out; + goto out_zcdn_init_failed; + + rc = zcrypt_ccamisc_init(); + if (rc) + goto out_ccamisc_init_failed; + + rc = zcrypt_ep11misc_init(); + if (rc) + goto out_ep11misc_init_failed; /* Register the request sprayer. */ rc = misc_register(&zcrypt_misc_device); @@ -2151,7 +2189,12 @@ int __init zcrypt_api_init(void) return 0; out_misc_register_failed: + zcrypt_ep11misc_exit(); +out_ep11misc_init_failed: + zcrypt_ccamisc_exit(); +out_ccamisc_init_failed: zcdn_exit(); +out_zcdn_init_failed: zcrypt_debug_exit(); out: return rc; diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h index 4ed481df57ca..6ef8850a42df 100644 --- a/drivers/s390/crypto/zcrypt_api.h +++ b/drivers/s390/crypto/zcrypt_api.h @@ -76,6 +76,13 @@ struct zcrypt_track { #define TRACK_AGAIN_CARD_WEIGHT_PENALTY 1000 #define TRACK_AGAIN_QUEUE_WEIGHT_PENALTY 10000 +/* + * xflags - to be used with zcrypt_send_cprb() and + * zcrypt_send_ep11_cprb() for the xflags parameter. + */ +#define ZCRYPT_XFLAG_USERSPACE 0x0001 /* data ptrs address userspace */ +#define ZCRYPT_XFLAG_NOMEMALLOC 0x0002 /* do not allocate memory via kmalloc */ + struct zcrypt_ops { long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *, struct ap_message *); @@ -132,6 +139,8 @@ extern atomic_t zcrypt_rescan_req; extern spinlock_t zcrypt_list_lock; extern struct list_head zcrypt_card_list; +extern unsigned int zcrypt_mempool_threshold; + #define for_each_zcrypt_card(_zc) \ list_for_each_entry(_zc, &zcrypt_card_list, list) @@ -161,9 +170,10 @@ void zcrypt_msgtype_unregister(struct zcrypt_ops *); struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int); int zcrypt_api_init(void); void zcrypt_api_exit(void); -long zcrypt_send_cprb(struct ica_xcRB *xcRB); -long zcrypt_send_ep11_cprb(struct ep11_urb *urb); -void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus); +long zcrypt_send_cprb(struct ica_xcRB *xcRB, u32 xflags); +long zcrypt_send_ep11_cprb(struct ep11_urb *urb, u32 xflags); +void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus, + int maxcard, int maxqueue); int zcrypt_device_status_ext(int card, int queue, struct zcrypt_device_status_ext *devstatus); diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c index 43a27cb3db84..b975a3728c23 100644 --- a/drivers/s390/crypto/zcrypt_ccamisc.c +++ b/drivers/s390/crypto/zcrypt_ccamisc.c @@ -11,6 +11,7 @@ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/init.h> +#include <linux/mempool.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/random.h> @@ -29,16 +30,31 @@ /* Size of vardata block used for some of the cca requests/replies */ #define VARDATASIZE 4096 -struct cca_info_list_entry { - struct list_head list; - u16 cardnr; - u16 domain; - struct cca_info info; -}; +/* + * Cprb memory pool held for urgent cases where no memory + * can be allocated via kmalloc. This pool is only used + * when alloc_and_prep_cprbmem() is called with the xflag + * ZCRYPT_XFLAG_NOMEMALLOC. The cprb memory needs to hold + * space for request AND reply! + */ +#define CPRB_MEMPOOL_ITEM_SIZE (16 * 1024) +static mempool_t *cprb_mempool; -/* a list with cca_info_list_entry entries */ -static LIST_HEAD(cca_info_list); -static DEFINE_SPINLOCK(cca_info_list_lock); +/* + * This is a pre-allocated memory for the device status array + * used within the findcard() functions. It is currently + * 128 * 128 * 4 bytes = 64 KB big. Usage of this memory is + * controlled via dev_status_mem_mutex. Needs adaption if more + * than 128 cards or domains to be are supported. + */ +#define ZCRYPT_DEV_STATUS_CARD_MAX 128 +#define ZCRYPT_DEV_STATUS_QUEUE_MAX 128 +#define ZCRYPT_DEV_STATUS_ENTRIES (ZCRYPT_DEV_STATUS_CARD_MAX * \ + ZCRYPT_DEV_STATUS_QUEUE_MAX) +#define ZCRYPT_DEV_STATUS_EXT_SIZE (ZCRYPT_DEV_STATUS_ENTRIES * \ + sizeof(struct zcrypt_device_status_ext)) +static void *dev_status_mem; +static DEFINE_MUTEX(dev_status_mem_mutex); /* * Simple check if the token is a valid CCA secure AES data key @@ -219,19 +235,27 @@ EXPORT_SYMBOL(cca_check_sececckeytoken); static int alloc_and_prep_cprbmem(size_t paramblen, u8 **p_cprb_mem, struct CPRBX **p_req_cprb, - struct CPRBX **p_rep_cprb) + struct CPRBX **p_rep_cprb, + u32 xflags) { - u8 *cprbmem; + u8 *cprbmem = NULL; size_t cprbplusparamblen = sizeof(struct CPRBX) + paramblen; + size_t len = 2 * cprbplusparamblen; struct CPRBX *preqcblk, *prepcblk; /* * allocate consecutive memory for request CPRB, request param * block, reply CPRB and reply param block */ - cprbmem = kcalloc(2, cprbplusparamblen, GFP_KERNEL); + if (xflags & ZCRYPT_XFLAG_NOMEMALLOC) { + if (len <= CPRB_MEMPOOL_ITEM_SIZE) + cprbmem = mempool_alloc_preallocated(cprb_mempool); + } else { + cprbmem = kmalloc(len, GFP_KERNEL); + } if (!cprbmem) return -ENOMEM; + memset(cprbmem, 0, len); preqcblk = (struct CPRBX *)cprbmem; prepcblk = (struct CPRBX *)(cprbmem + cprbplusparamblen); @@ -261,11 +285,15 @@ static int alloc_and_prep_cprbmem(size_t paramblen, * with zeros before freeing (useful if there was some * clear key material in there). */ -static void free_cprbmem(void *mem, size_t paramblen, int scrub) +static void free_cprbmem(void *mem, size_t paramblen, bool scrub, u32 xflags) { - if (scrub) + if (mem && scrub) memzero_explicit(mem, 2 * (sizeof(struct CPRBX) + paramblen)); - kfree(mem); + + if (xflags & ZCRYPT_XFLAG_NOMEMALLOC) + mempool_free(mem, cprb_mempool); + else + kfree(mem); } /* @@ -290,7 +318,7 @@ static inline void prep_xcrb(struct ica_xcRB *pxcrb, * Generate (random) CCA AES DATA secure key. */ int cca_genseckey(u16 cardnr, u16 domain, - u32 keybitsize, u8 *seckey) + u32 keybitsize, u8 *seckey, u32 xflags) { int i, rc, keysize; int seckeysize; @@ -332,7 +360,8 @@ int cca_genseckey(u16 cardnr, u16 domain, } __packed * prepparm; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -379,7 +408,7 @@ int cca_genseckey(u16 cardnr, u16 domain, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n", __func__, (int)cardnr, (int)domain, rc); @@ -424,7 +453,7 @@ int cca_genseckey(u16 cardnr, u16 domain, memcpy(seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE); out: - free_cprbmem(mem, PARMBSIZE, 0); + free_cprbmem(mem, PARMBSIZE, false, xflags); return rc; } EXPORT_SYMBOL(cca_genseckey); @@ -433,7 +462,7 @@ EXPORT_SYMBOL(cca_genseckey); * Generate an CCA AES DATA secure key with given key value. */ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, - const u8 *clrkey, u8 *seckey) + const u8 *clrkey, u8 *seckey, u32 xflags) { int rc, keysize, seckeysize; u8 *mem, *ptr; @@ -473,7 +502,8 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, } __packed * prepparm; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -517,7 +547,7 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", __func__, (int)cardnr, (int)domain, rc); @@ -563,7 +593,7 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, memcpy(seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE); out: - free_cprbmem(mem, PARMBSIZE, 1); + free_cprbmem(mem, PARMBSIZE, true, xflags); return rc; } EXPORT_SYMBOL(cca_clr2seckey); @@ -573,7 +603,7 @@ EXPORT_SYMBOL(cca_clr2seckey); */ int cca_sec2protkey(u16 cardnr, u16 domain, const u8 *seckey, u8 *protkey, u32 *protkeylen, - u32 *protkeytype) + u32 *protkeytype, u32 xflags) { int rc; u8 *mem, *ptr; @@ -619,7 +649,8 @@ int cca_sec2protkey(u16 cardnr, u16 domain, } __packed * prepparm; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -644,7 +675,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", __func__, (int)cardnr, (int)domain, rc); @@ -712,7 +743,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain, *protkeylen = prepparm->lv3.ckb.len; out: - free_cprbmem(mem, PARMBSIZE, 0); + free_cprbmem(mem, PARMBSIZE, true, xflags); return rc; } EXPORT_SYMBOL(cca_sec2protkey); @@ -737,7 +768,7 @@ static const u8 aes_cipher_key_skeleton[] = { * Generate (random) CCA AES CIPHER secure key. */ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, - u8 *keybuf, u32 *keybufsize) + u8 *keybuf, u32 *keybufsize, u32 xflags) { int rc; u8 *mem, *ptr; @@ -813,7 +844,8 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, struct cipherkeytoken *t; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -872,7 +904,7 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", __func__, (int)cardnr, (int)domain, rc); @@ -923,7 +955,7 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, *keybufsize = t->len; out: - free_cprbmem(mem, PARMBSIZE, 0); + free_cprbmem(mem, PARMBSIZE, false, xflags); return rc; } EXPORT_SYMBOL(cca_gencipherkey); @@ -938,7 +970,8 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, const u8 *clr_key_value, int clr_key_bit_size, u8 *key_token, - int *key_token_size) + int *key_token_size, + u32 xflags) { int rc, n; u8 *mem, *ptr; @@ -989,7 +1022,8 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, int complete = strncmp(rule_array_2, "COMPLETE", 8) ? 0 : 1; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -1038,7 +1072,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", __func__, (int)cardnr, (int)domain, rc); @@ -1077,7 +1111,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, *key_token_size = t->len; out: - free_cprbmem(mem, PARMBSIZE, 0); + free_cprbmem(mem, PARMBSIZE, false, xflags); return rc; } @@ -1085,23 +1119,31 @@ out: * Build CCA AES CIPHER secure key with a given clear key value. */ int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags, - const u8 *clrkey, u8 *keybuf, u32 *keybufsize) + const u8 *clrkey, u8 *keybuf, u32 *keybufsize, u32 xflags) { int rc; - u8 *token; + void *mem; int tokensize; - u8 exorbuf[32]; + u8 *token, exorbuf[32]; struct cipherkeytoken *t; /* fill exorbuf with random data */ get_random_bytes(exorbuf, sizeof(exorbuf)); - /* allocate space for the key token to build */ - token = kmalloc(MAXCCAVLSCTOKENSIZE, GFP_KERNEL); - if (!token) + /* + * Allocate space for the key token to build. + * Also we only need up to MAXCCAVLSCTOKENSIZE bytes for this + * we use the already existing cprb mempool to solve this + * short term memory requirement. + */ + mem = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ? + mempool_alloc_preallocated(cprb_mempool) : + mempool_alloc(cprb_mempool, GFP_KERNEL); + if (!mem) return -ENOMEM; /* prepare the token with the key skeleton */ + token = (u8 *)mem; tokensize = SIZEOF_SKELETON; memcpy(token, aes_cipher_key_skeleton, tokensize); @@ -1120,28 +1162,28 @@ int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags, * 4/4 COMPLETE the secure cipher key import */ rc = _ip_cprb_helper(card, dom, "AES ", "FIRST ", "MIN3PART", - exorbuf, keybitsize, token, &tokensize); + exorbuf, keybitsize, token, &tokensize, xflags); if (rc) { ZCRYPT_DBF_ERR("%s clear key import 1/4 with CSNBKPI2 failed, rc=%d\n", __func__, rc); goto out; } rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL, - clrkey, keybitsize, token, &tokensize); + clrkey, keybitsize, token, &tokensize, xflags); if (rc) { ZCRYPT_DBF_ERR("%s clear key import 2/4 with CSNBKPI2 failed, rc=%d\n", __func__, rc); goto out; } rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL, - exorbuf, keybitsize, token, &tokensize); + exorbuf, keybitsize, token, &tokensize, xflags); if (rc) { ZCRYPT_DBF_ERR("%s clear key import 3/4 with CSNBKPI2 failed, rc=%d\n", __func__, rc); goto out; } rc = _ip_cprb_helper(card, dom, "AES ", "COMPLETE", NULL, - NULL, keybitsize, token, &tokensize); + NULL, keybitsize, token, &tokensize, xflags); if (rc) { ZCRYPT_DBF_ERR("%s clear key import 4/4 with CSNBKPI2 failed, rc=%d\n", __func__, rc); @@ -1158,7 +1200,7 @@ int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags, *keybufsize = tokensize; out: - kfree(token); + mempool_free(mem, cprb_mempool); return rc; } EXPORT_SYMBOL(cca_clr2cipherkey); @@ -1167,7 +1209,8 @@ EXPORT_SYMBOL(cca_clr2cipherkey); * Derive proteced key from CCA AES cipher secure key. */ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags) { int rc; u8 *mem, *ptr; @@ -1219,7 +1262,8 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, int keytoklen = ((struct cipherkeytoken *)ckey)->len; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -1249,7 +1293,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", __func__, (int)cardnr, (int)domain, rc); @@ -1323,7 +1367,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, *protkeylen = prepparm->vud.ckb.keylen; out: - free_cprbmem(mem, PARMBSIZE, 0); + free_cprbmem(mem, PARMBSIZE, true, xflags); return rc; } EXPORT_SYMBOL(cca_cipher2protkey); @@ -1332,7 +1376,7 @@ EXPORT_SYMBOL(cca_cipher2protkey); * Derive protected key from CCA ECC secure private key. */ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + u8 *protkey, u32 *protkeylen, u32 *protkeytype, u32 xflags) { int rc; u8 *mem, *ptr; @@ -1382,7 +1426,8 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, int keylen = ((struct eccprivkeytoken *)key)->len; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -1412,7 +1457,7 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", __func__, (int)cardnr, (int)domain, rc); @@ -1470,7 +1515,7 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, *protkeytype = PKEY_KEYTYPE_ECC; out: - free_cprbmem(mem, PARMBSIZE, 0); + free_cprbmem(mem, PARMBSIZE, true, xflags); return rc; } EXPORT_SYMBOL(cca_ecc2protkey); @@ -1481,7 +1526,8 @@ EXPORT_SYMBOL(cca_ecc2protkey); int cca_query_crypto_facility(u16 cardnr, u16 domain, const char *keyword, u8 *rarray, size_t *rarraylen, - u8 *varray, size_t *varraylen) + u8 *varray, size_t *varraylen, + u32 xflags) { int rc; u16 len; @@ -1505,7 +1551,8 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain, } __packed * prepparm; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(parmbsize, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(parmbsize, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -1526,7 +1573,7 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", __func__, (int)cardnr, (int)domain, rc); @@ -1573,94 +1620,21 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain, } out: - free_cprbmem(mem, parmbsize, 0); + free_cprbmem(mem, parmbsize, false, xflags); return rc; } EXPORT_SYMBOL(cca_query_crypto_facility); -static int cca_info_cache_fetch(u16 cardnr, u16 domain, struct cca_info *ci) -{ - int rc = -ENOENT; - struct cca_info_list_entry *ptr; - - spin_lock_bh(&cca_info_list_lock); - list_for_each_entry(ptr, &cca_info_list, list) { - if (ptr->cardnr == cardnr && ptr->domain == domain) { - memcpy(ci, &ptr->info, sizeof(*ci)); - rc = 0; - break; - } - } - spin_unlock_bh(&cca_info_list_lock); - - return rc; -} - -static void cca_info_cache_update(u16 cardnr, u16 domain, - const struct cca_info *ci) -{ - int found = 0; - struct cca_info_list_entry *ptr; - - spin_lock_bh(&cca_info_list_lock); - list_for_each_entry(ptr, &cca_info_list, list) { - if (ptr->cardnr == cardnr && - ptr->domain == domain) { - memcpy(&ptr->info, ci, sizeof(*ci)); - found = 1; - break; - } - } - if (!found) { - ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC); - if (!ptr) { - spin_unlock_bh(&cca_info_list_lock); - return; - } - ptr->cardnr = cardnr; - ptr->domain = domain; - memcpy(&ptr->info, ci, sizeof(*ci)); - list_add(&ptr->list, &cca_info_list); - } - spin_unlock_bh(&cca_info_list_lock); -} - -static void cca_info_cache_scrub(u16 cardnr, u16 domain) -{ - struct cca_info_list_entry *ptr; - - spin_lock_bh(&cca_info_list_lock); - list_for_each_entry(ptr, &cca_info_list, list) { - if (ptr->cardnr == cardnr && - ptr->domain == domain) { - list_del(&ptr->list); - kfree(ptr); - break; - } - } - spin_unlock_bh(&cca_info_list_lock); -} - -static void __exit mkvp_cache_free(void) -{ - struct cca_info_list_entry *ptr, *pnext; - - spin_lock_bh(&cca_info_list_lock); - list_for_each_entry_safe(ptr, pnext, &cca_info_list, list) { - list_del(&ptr->list); - kfree(ptr); - } - spin_unlock_bh(&cca_info_list_lock); -} - /* - * Fetch cca_info values via query_crypto_facility from adapter. + * Fetch cca_info values about a CCA queue via + * query_crypto_facility from adapter. */ -static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci) +int cca_get_info(u16 cardnr, u16 domain, struct cca_info *ci, u32 xflags) { + void *mem; int rc, found = 0; size_t rlen, vlen; - u8 *rarray, *varray, *pg; + u8 *rarray, *varray; struct zcrypt_device_status_ext devstat; memset(ci, 0, sizeof(*ci)); @@ -1671,17 +1645,22 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci) return rc; ci->hwtype = devstat.hwtype; - /* prep page for rule array and var array use */ - pg = (u8 *)__get_free_page(GFP_KERNEL); - if (!pg) + /* + * Prep memory for rule array and var array use. + * Use the cprb mempool for this. + */ + mem = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ? + mempool_alloc_preallocated(cprb_mempool) : + mempool_alloc(cprb_mempool, GFP_KERNEL); + if (!mem) return -ENOMEM; - rarray = pg; - varray = pg + PAGE_SIZE / 2; + rarray = (u8 *)mem; + varray = (u8 *)mem + PAGE_SIZE / 2; rlen = vlen = PAGE_SIZE / 2; /* QF for this card/domain */ rc = cca_query_crypto_facility(cardnr, domain, "STATICSA", - rarray, &rlen, varray, &vlen); + rarray, &rlen, varray, &vlen, xflags); if (rc == 0 && rlen >= 10 * 8 && vlen >= 204) { memcpy(ci->serial, rarray, 8); ci->new_asym_mk_state = (char)rarray[4 * 8]; @@ -1708,7 +1687,7 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci) goto out; rlen = vlen = PAGE_SIZE / 2; rc = cca_query_crypto_facility(cardnr, domain, "STATICSB", - rarray, &rlen, varray, &vlen); + rarray, &rlen, varray, &vlen, xflags); if (rc == 0 && rlen >= 13 * 8 && vlen >= 240) { ci->new_apka_mk_state = (char)rarray[10 * 8]; ci->cur_apka_mk_state = (char)rarray[11 * 8]; @@ -1723,177 +1702,32 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci) } out: - free_page((unsigned long)pg); + mempool_free(mem, cprb_mempool); return found == 2 ? 0 : -ENOENT; } - -/* - * Fetch cca information about a CCA queue. - */ -int cca_get_info(u16 card, u16 dom, struct cca_info *ci, int verify) -{ - int rc; - - rc = cca_info_cache_fetch(card, dom, ci); - if (rc || verify) { - rc = fetch_cca_info(card, dom, ci); - if (rc == 0) - cca_info_cache_update(card, dom, ci); - } - - return rc; -} EXPORT_SYMBOL(cca_get_info); -/* - * Search for a matching crypto card based on the - * Master Key Verification Pattern given. - */ -static int findcard(u64 mkvp, u16 *pcardnr, u16 *pdomain, - int verify, int minhwtype) -{ - struct zcrypt_device_status_ext *device_status; - u16 card, dom; - struct cca_info ci; - int i, rc, oi = -1; - - /* mkvp must not be zero, minhwtype needs to be >= 0 */ - if (mkvp == 0 || minhwtype < 0) - return -EINVAL; - - /* fetch status of all crypto cards */ - device_status = kvcalloc(MAX_ZDEV_ENTRIES_EXT, - sizeof(struct zcrypt_device_status_ext), - GFP_KERNEL); - if (!device_status) - return -ENOMEM; - zcrypt_device_status_mask_ext(device_status); - - /* walk through all crypto cards */ - for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { - card = AP_QID_CARD(device_status[i].qid); - dom = AP_QID_QUEUE(device_status[i].qid); - if (device_status[i].online && - device_status[i].functions & 0x04) { - /* enabled CCA card, check current mkvp from cache */ - if (cca_info_cache_fetch(card, dom, &ci) == 0 && - ci.hwtype >= minhwtype && - ci.cur_aes_mk_state == '2' && - ci.cur_aes_mkvp == mkvp) { - if (!verify) - break; - /* verify: refresh card info */ - if (fetch_cca_info(card, dom, &ci) == 0) { - cca_info_cache_update(card, dom, &ci); - if (ci.hwtype >= minhwtype && - ci.cur_aes_mk_state == '2' && - ci.cur_aes_mkvp == mkvp) - break; - } - } - } else { - /* Card is offline and/or not a CCA card. */ - /* del mkvp entry from cache if it exists */ - cca_info_cache_scrub(card, dom); - } - } - if (i >= MAX_ZDEV_ENTRIES_EXT) { - /* nothing found, so this time without cache */ - for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { - if (!(device_status[i].online && - device_status[i].functions & 0x04)) - continue; - card = AP_QID_CARD(device_status[i].qid); - dom = AP_QID_QUEUE(device_status[i].qid); - /* fresh fetch mkvp from adapter */ - if (fetch_cca_info(card, dom, &ci) == 0) { - cca_info_cache_update(card, dom, &ci); - if (ci.hwtype >= minhwtype && - ci.cur_aes_mk_state == '2' && - ci.cur_aes_mkvp == mkvp) - break; - if (ci.hwtype >= minhwtype && - ci.old_aes_mk_state == '2' && - ci.old_aes_mkvp == mkvp && - oi < 0) - oi = i; - } - } - if (i >= MAX_ZDEV_ENTRIES_EXT && oi >= 0) { - /* old mkvp matched, use this card then */ - card = AP_QID_CARD(device_status[oi].qid); - dom = AP_QID_QUEUE(device_status[oi].qid); - } - } - if (i < MAX_ZDEV_ENTRIES_EXT || oi >= 0) { - if (pcardnr) - *pcardnr = card; - if (pdomain) - *pdomain = dom; - rc = (i < MAX_ZDEV_ENTRIES_EXT ? 0 : 1); - } else { - rc = -ENODEV; - } - - kvfree(device_status); - return rc; -} - -/* - * Search for a matching crypto card based on the Master Key - * Verification Pattern provided inside a secure key token. - */ -int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify) -{ - u64 mkvp; - int minhwtype = 0; - const struct keytoken_header *hdr = (struct keytoken_header *)key; - - if (hdr->type != TOKTYPE_CCA_INTERNAL) - return -EINVAL; - - switch (hdr->version) { - case TOKVER_CCA_AES: - mkvp = ((struct secaeskeytoken *)key)->mkvp; - break; - case TOKVER_CCA_VLSC: - mkvp = ((struct cipherkeytoken *)key)->mkvp0; - minhwtype = AP_DEVICE_TYPE_CEX6; - break; - default: - return -EINVAL; - } - - return findcard(mkvp, pcardnr, pdomain, verify, minhwtype); -} -EXPORT_SYMBOL(cca_findcard); - -int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, +int cca_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain, int minhwtype, int mktype, u64 cur_mkvp, u64 old_mkvp, - int verify) + u32 xflags) { struct zcrypt_device_status_ext *device_status; - u32 *_apqns = NULL, _nr_apqns = 0; - int i, card, dom, curmatch, oldmatch, rc = 0; + int i, card, dom, curmatch, oldmatch; struct cca_info ci; + u32 _nr_apqns = 0; - /* fetch status of all crypto cards */ - device_status = kvcalloc(MAX_ZDEV_ENTRIES_EXT, - sizeof(struct zcrypt_device_status_ext), - GFP_KERNEL); - if (!device_status) - return -ENOMEM; - zcrypt_device_status_mask_ext(device_status); + /* occupy the device status memory */ + mutex_lock(&dev_status_mem_mutex); + memset(dev_status_mem, 0, ZCRYPT_DEV_STATUS_EXT_SIZE); + device_status = (struct zcrypt_device_status_ext *)dev_status_mem; - /* allocate 1k space for up to 256 apqns */ - _apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL); - if (!_apqns) { - kvfree(device_status); - return -ENOMEM; - } + /* fetch crypto device status into this struct */ + zcrypt_device_status_mask_ext(device_status, + ZCRYPT_DEV_STATUS_CARD_MAX, + ZCRYPT_DEV_STATUS_QUEUE_MAX); /* walk through all the crypto apqnss */ - for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { + for (i = 0; i < ZCRYPT_DEV_STATUS_ENTRIES; i++) { card = AP_QID_CARD(device_status[i].qid); dom = AP_QID_QUEUE(device_status[i].qid); /* check online state */ @@ -1909,7 +1743,7 @@ int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, if (domain != 0xFFFF && dom != domain) continue; /* get cca info on this apqn */ - if (cca_get_info(card, dom, &ci, verify)) + if (cca_get_info(card, dom, &ci, xflags)) continue; /* current master key needs to be valid */ if (mktype == AES_MK_SET && ci.cur_aes_mk_state != '2') @@ -1939,27 +1773,41 @@ int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, continue; } /* apqn passed all filtering criterons, add to the array */ - if (_nr_apqns < 256) - _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom); + if (_nr_apqns < *nr_apqns) + apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom); } - /* nothing found ? */ - if (!_nr_apqns) { - kfree(_apqns); - rc = -ENODEV; - } else { - /* no re-allocation, simple return the _apqns array */ - *apqns = _apqns; - *nr_apqns = _nr_apqns; - rc = 0; - } + *nr_apqns = _nr_apqns; - kvfree(device_status); - return rc; + /* release the device status memory */ + mutex_unlock(&dev_status_mem_mutex); + + return _nr_apqns ? 0 : -ENODEV; } EXPORT_SYMBOL(cca_findcard2); -void __exit zcrypt_ccamisc_exit(void) +int __init zcrypt_ccamisc_init(void) +{ + /* Pre-allocate a small memory pool for cca cprbs. */ + cprb_mempool = mempool_create_kmalloc_pool(zcrypt_mempool_threshold, + CPRB_MEMPOOL_ITEM_SIZE); + if (!cprb_mempool) + return -ENOMEM; + + /* Pre-allocate one crypto status card struct used in findcard() */ + dev_status_mem = kvmalloc(ZCRYPT_DEV_STATUS_EXT_SIZE, GFP_KERNEL); + if (!dev_status_mem) { + mempool_destroy(cprb_mempool); + return -ENOMEM; + } + + return 0; +} + +void zcrypt_ccamisc_exit(void) { - mkvp_cache_free(); + mutex_lock(&dev_status_mem_mutex); + kvfree(dev_status_mem); + mutex_unlock(&dev_status_mem_mutex); + mempool_destroy(cprb_mempool); } diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h index 26bdca702523..1ecc4e37e9ad 100644 --- a/drivers/s390/crypto/zcrypt_ccamisc.h +++ b/drivers/s390/crypto/zcrypt_ccamisc.h @@ -160,44 +160,47 @@ int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl, /* * Generate (random) CCA AES DATA secure key. */ -int cca_genseckey(u16 cardnr, u16 domain, u32 keybitsize, u8 *seckey); +int cca_genseckey(u16 cardnr, u16 domain, u32 keybitsize, u8 *seckey, + u32 xflags); /* * Generate CCA AES DATA secure key with given clear key value. */ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, - const u8 *clrkey, u8 *seckey); + const u8 *clrkey, u8 *seckey, u32 xflags); /* * Derive proteced key from an CCA AES DATA secure key. */ int cca_sec2protkey(u16 cardnr, u16 domain, const u8 *seckey, u8 *protkey, u32 *protkeylen, - u32 *protkeytype); + u32 *protkeytype, u32 xflags); /* * Generate (random) CCA AES CIPHER secure key. */ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, - u8 *keybuf, u32 *keybufsize); + u8 *keybuf, u32 *keybufsize, u32 xflags); /* * Derive proteced key from CCA AES cipher secure key. */ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, - u8 *protkey, u32 *protkeylen, u32 *protkeytype); + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags); /* * Build CCA AES CIPHER secure key with a given clear key value. */ int cca_clr2cipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, - const u8 *clrkey, u8 *keybuf, u32 *keybufsize); + const u8 *clrkey, u8 *keybuf, u32 *keybufsize, + u32 xflags); /* * Derive proteced key from CCA ECC secure private key. */ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, - u8 *protkey, u32 *protkeylen, u32 *protkeytype); + u8 *protkey, u32 *protkeylen, u32 *protkeytype, u32 xflags); /* * Query cryptographic facility from CCA adapter @@ -205,16 +208,8 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, int cca_query_crypto_facility(u16 cardnr, u16 domain, const char *keyword, u8 *rarray, size_t *rarraylen, - u8 *varray, size_t *varraylen); - -/* - * Search for a matching crypto card based on the Master Key - * Verification Pattern provided inside a secure key. - * Works with CCA AES data and cipher keys. - * Returns < 0 on failure, 0 if CURRENT MKVP matches and - * 1 if OLD MKVP matches. - */ -int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify); + u8 *varray, size_t *varraylen, + u32 xflags); /* * Build a list of cca apqns meeting the following constrains: @@ -224,21 +219,16 @@ int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify); * - if minhwtype > 0 only apqns with hwtype >= minhwtype * - if cur_mkvp != 0 only apqns where cur_mkvp == mkvp * - if old_mkvp != 0 only apqns where old_mkvp == mkvp - * - if verify is enabled and a cur_mkvp and/or old_mkvp - * value is given, then refetch the cca_info and make sure the current - * cur_mkvp or old_mkvp values of the apqn are used. * The mktype determines which set of master keys to use: * 0 = AES_MK_SET - AES MK set, 1 = APKA MK_SET - APKA MK set - * The array of apqn entries is allocated with kmalloc and returned in *apqns; - * the number of apqns stored into the list is returned in *nr_apqns. One apqn - * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and - * may be casted to struct pkey_apqn. The return value is either 0 for success - * or a negative errno value. If no apqn meeting the criteria is found, - * -ENODEV is returned. + * The caller should set *nr_apqns to the nr of elements available in *apqns. + * On return *nr_apqns is then updated with the nr of apqns filled into *apqns. + * The return value is either 0 for success or a negative errno value. + * If no apqn meeting the criteria is found, -ENODEV is returned. */ -int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, +int cca_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain, int minhwtype, int mktype, u64 cur_mkvp, u64 old_mkvp, - int verify); + u32 xflags); #define AES_MK_SET 0 #define APKA_MK_SET 1 @@ -270,8 +260,9 @@ struct cca_info { /* * Fetch cca information about an CCA queue. */ -int cca_get_info(u16 card, u16 dom, struct cca_info *ci, int verify); +int cca_get_info(u16 card, u16 dom, struct cca_info *ci, u32 xflags); +int zcrypt_ccamisc_init(void); void zcrypt_ccamisc_exit(void); #endif /* _ZCRYPT_CCAMISC_H_ */ diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c index 64df7d2f6266..6ba7fbddd3f7 100644 --- a/drivers/s390/crypto/zcrypt_cex4.c +++ b/drivers/s390/crypto/zcrypt_cex4.c @@ -79,14 +79,13 @@ static ssize_t cca_serialnr_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct zcrypt_card *zc = dev_get_drvdata(dev); - struct cca_info ci; struct ap_card *ac = to_ap_card(dev); + struct cca_info ci; memset(&ci, 0, sizeof(ci)); if (ap_domain_index >= 0) - cca_get_info(ac->id, ap_domain_index, &ci, zc->online); + cca_get_info(ac->id, ap_domain_index, &ci, 0); return sysfs_emit(buf, "%s\n", ci.serial); } @@ -110,17 +109,17 @@ static ssize_t cca_mkvps_show(struct device *dev, struct device_attribute *attr, char *buf) { + static const char * const new_state[] = { "empty", "partial", "full" }; + static const char * const cao_state[] = { "invalid", "valid" }; struct zcrypt_queue *zq = dev_get_drvdata(dev); - int n = 0; struct cca_info ci; - static const char * const cao_state[] = { "invalid", "valid" }; - static const char * const new_state[] = { "empty", "partial", "full" }; + int n = 0; memset(&ci, 0, sizeof(ci)); cca_get_info(AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), - &ci, zq->online); + &ci, 0); if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3') n += sysfs_emit_at(buf, n, "AES NEW: %s 0x%016llx\n", @@ -210,13 +209,12 @@ static ssize_t ep11_api_ordinalnr_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct zcrypt_card *zc = dev_get_drvdata(dev); - struct ep11_card_info ci; struct ap_card *ac = to_ap_card(dev); + struct ep11_card_info ci; memset(&ci, 0, sizeof(ci)); - ep11_get_card_info(ac->id, &ci, zc->online); + ep11_get_card_info(ac->id, &ci, 0); if (ci.API_ord_nr > 0) return sysfs_emit(buf, "%u\n", ci.API_ord_nr); @@ -231,13 +229,12 @@ static ssize_t ep11_fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct zcrypt_card *zc = dev_get_drvdata(dev); - struct ep11_card_info ci; struct ap_card *ac = to_ap_card(dev); + struct ep11_card_info ci; memset(&ci, 0, sizeof(ci)); - ep11_get_card_info(ac->id, &ci, zc->online); + ep11_get_card_info(ac->id, &ci, 0); if (ci.FW_version > 0) return sysfs_emit(buf, "%d.%d\n", @@ -254,13 +251,12 @@ static ssize_t ep11_serialnr_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct zcrypt_card *zc = dev_get_drvdata(dev); - struct ep11_card_info ci; struct ap_card *ac = to_ap_card(dev); + struct ep11_card_info ci; memset(&ci, 0, sizeof(ci)); - ep11_get_card_info(ac->id, &ci, zc->online); + ep11_get_card_info(ac->id, &ci, 0); if (ci.serial[0]) return sysfs_emit(buf, "%16.16s\n", ci.serial); @@ -291,14 +287,13 @@ static ssize_t ep11_card_op_modes_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct zcrypt_card *zc = dev_get_drvdata(dev); - int i, n = 0; - struct ep11_card_info ci; struct ap_card *ac = to_ap_card(dev); + struct ep11_card_info ci; + int i, n = 0; memset(&ci, 0, sizeof(ci)); - ep11_get_card_info(ac->id, &ci, zc->online); + ep11_get_card_info(ac->id, &ci, 0); for (i = 0; ep11_op_modes[i].mode_txt; i++) { if (ci.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) { @@ -348,7 +343,7 @@ static ssize_t ep11_mkvps_show(struct device *dev, if (zq->online) ep11_get_domain_info(AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), - &di); + &di, 0); if (di.cur_wk_state == '0') { n = sysfs_emit(buf, "WK CUR: %s -\n", @@ -395,7 +390,7 @@ static ssize_t ep11_queue_op_modes_show(struct device *dev, if (zq->online) ep11_get_domain_info(AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), - &di); + &di, 0); for (i = 0; ep11_op_modes[i].mode_txt; i++) { if (di.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) { diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c index cb7e6da43602..2f50fc7b8f61 100644 --- a/drivers/s390/crypto/zcrypt_ep11misc.c +++ b/drivers/s390/crypto/zcrypt_ep11misc.c @@ -10,9 +10,10 @@ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/init.h> +#include <linux/mempool.h> #include <linux/module.h> -#include <linux/slab.h> #include <linux/random.h> +#include <linux/slab.h> #include <asm/zcrypt.h> #include <asm/pkey.h> #include <crypto/aes.h> @@ -30,85 +31,29 @@ static const u8 def_iv[16] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff }; -/* ep11 card info cache */ -struct card_list_entry { - struct list_head list; - u16 cardnr; - struct ep11_card_info info; -}; -static LIST_HEAD(card_list); -static DEFINE_SPINLOCK(card_list_lock); - -static int card_cache_fetch(u16 cardnr, struct ep11_card_info *ci) -{ - int rc = -ENOENT; - struct card_list_entry *ptr; - - spin_lock_bh(&card_list_lock); - list_for_each_entry(ptr, &card_list, list) { - if (ptr->cardnr == cardnr) { - memcpy(ci, &ptr->info, sizeof(*ci)); - rc = 0; - break; - } - } - spin_unlock_bh(&card_list_lock); - - return rc; -} - -static void card_cache_update(u16 cardnr, const struct ep11_card_info *ci) -{ - int found = 0; - struct card_list_entry *ptr; - - spin_lock_bh(&card_list_lock); - list_for_each_entry(ptr, &card_list, list) { - if (ptr->cardnr == cardnr) { - memcpy(&ptr->info, ci, sizeof(*ci)); - found = 1; - break; - } - } - if (!found) { - ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC); - if (!ptr) { - spin_unlock_bh(&card_list_lock); - return; - } - ptr->cardnr = cardnr; - memcpy(&ptr->info, ci, sizeof(*ci)); - list_add(&ptr->list, &card_list); - } - spin_unlock_bh(&card_list_lock); -} - -static void card_cache_scrub(u16 cardnr) -{ - struct card_list_entry *ptr; - - spin_lock_bh(&card_list_lock); - list_for_each_entry(ptr, &card_list, list) { - if (ptr->cardnr == cardnr) { - list_del(&ptr->list); - kfree(ptr); - break; - } - } - spin_unlock_bh(&card_list_lock); -} - -static void __exit card_cache_free(void) -{ - struct card_list_entry *ptr, *pnext; +/* + * Cprb memory pool held for urgent cases where no memory + * can be allocated via kmalloc. This pool is only used when + * alloc_cprbmem() is called with the xflag ZCRYPT_XFLAG_NOMEMALLOC. + */ +#define CPRB_MEMPOOL_ITEM_SIZE (8 * 1024) +static mempool_t *cprb_mempool; - spin_lock_bh(&card_list_lock); - list_for_each_entry_safe(ptr, pnext, &card_list, list) { - list_del(&ptr->list); - kfree(ptr); - } - spin_unlock_bh(&card_list_lock); -} +/* + * This is a pre-allocated memory for the device status array + * used within the ep11_findcard2() function. It is currently + * 128 * 128 * 4 bytes = 64 KB big. Usage of this memory is + * controlled via dev_status_mem_mutex. Needs adaption if more + * than 128 cards or domains to be are supported. + */ +#define ZCRYPT_DEV_STATUS_CARD_MAX 128 +#define ZCRYPT_DEV_STATUS_QUEUE_MAX 128 +#define ZCRYPT_DEV_STATUS_ENTRIES (ZCRYPT_DEV_STATUS_CARD_MAX * \ + ZCRYPT_DEV_STATUS_QUEUE_MAX) +#define ZCRYPT_DEV_STATUS_EXT_SIZE (ZCRYPT_DEV_STATUS_ENTRIES * \ + sizeof(struct zcrypt_device_status_ext)) +static void *dev_status_mem; +static DEFINE_MUTEX(dev_status_mem_mutex); static int ep11_kb_split(const u8 *kb, size_t kblen, u32 kbver, struct ep11kblob_header **kbhdr, size_t *kbhdrsize, @@ -411,14 +356,20 @@ EXPORT_SYMBOL(ep11_check_aes_key); /* * Allocate and prepare ep11 cprb plus additional payload. */ -static inline struct ep11_cprb *alloc_cprb(size_t payload_len) +static void *alloc_cprbmem(size_t payload_len, u32 xflags) { size_t len = sizeof(struct ep11_cprb) + payload_len; - struct ep11_cprb *cprb; + struct ep11_cprb *cprb = NULL; - cprb = kzalloc(len, GFP_KERNEL); + if (xflags & ZCRYPT_XFLAG_NOMEMALLOC) { + if (len <= CPRB_MEMPOOL_ITEM_SIZE) + cprb = mempool_alloc_preallocated(cprb_mempool); + } else { + cprb = kmalloc(len, GFP_KERNEL); + } if (!cprb) return NULL; + memset(cprb, 0, len); cprb->cprb_len = sizeof(struct ep11_cprb); cprb->cprb_ver_id = 0x04; @@ -430,6 +381,20 @@ static inline struct ep11_cprb *alloc_cprb(size_t payload_len) } /* + * Free ep11 cprb buffer space. + */ +static void free_cprbmem(void *mem, size_t payload_len, bool scrub, u32 xflags) +{ + if (mem && scrub) + memzero_explicit(mem, sizeof(struct ep11_cprb) + payload_len); + + if (xflags & ZCRYPT_XFLAG_NOMEMALLOC) + mempool_free(mem, cprb_mempool); + else + kfree(mem); +} + +/* * Some helper functions related to ASN1 encoding. * Limited to length info <= 2 byte. */ @@ -489,6 +454,7 @@ static inline void prep_urb(struct ep11_urb *u, struct ep11_cprb *req, size_t req_len, struct ep11_cprb *rep, size_t rep_len) { + memset(u, 0, sizeof(*u)); u->targets = (u8 __user *)t; u->targets_num = nt; u->req = (u8 __user *)req; @@ -583,7 +549,7 @@ static int check_reply_cprb(const struct ep11_cprb *rep, const char *func) * Helper function which does an ep11 query with given query type. */ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type, - size_t buflen, u8 *buf) + size_t buflen, u8 *buf, u32 xflags) { struct ep11_info_req_pl { struct pl_head head; @@ -605,11 +571,11 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type, } __packed * rep_pl; struct ep11_cprb *req = NULL, *rep = NULL; struct ep11_target_dev target; - struct ep11_urb *urb = NULL; + struct ep11_urb urb; int api = EP11_API_V1, rc = -ENOMEM; /* request cprb and payload */ - req = alloc_cprb(sizeof(struct ep11_info_req_pl)); + req = alloc_cprbmem(sizeof(struct ep11_info_req_pl), xflags); if (!req) goto out; req_pl = (struct ep11_info_req_pl *)(((u8 *)req) + sizeof(*req)); @@ -621,22 +587,19 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type, req_pl->query_subtype_len = sizeof(u32); /* reply cprb and payload */ - rep = alloc_cprb(sizeof(struct ep11_info_rep_pl) + buflen); + rep = alloc_cprbmem(sizeof(struct ep11_info_rep_pl) + buflen, xflags); if (!rep) goto out; rep_pl = (struct ep11_info_rep_pl *)(((u8 *)rep) + sizeof(*rep)); /* urb and target */ - urb = kmalloc(sizeof(*urb), GFP_KERNEL); - if (!urb) - goto out; target.ap_id = cardnr; target.dom_id = domain; - prep_urb(urb, &target, 1, + prep_urb(&urb, &target, 1, req, sizeof(*req) + sizeof(*req_pl), rep, sizeof(*rep) + sizeof(*rep_pl) + buflen); - rc = zcrypt_send_ep11_cprb(urb); + rc = zcrypt_send_ep11_cprb(&urb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", __func__, (int)cardnr, (int)domain, rc); @@ -667,16 +630,15 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type, memcpy(buf, ((u8 *)rep_pl) + sizeof(*rep_pl), rep_pl->data_len); out: - kfree(req); - kfree(rep); - kfree(urb); + free_cprbmem(req, 0, false, xflags); + free_cprbmem(rep, 0, false, xflags); return rc; } /* * Provide information about an EP11 card. */ -int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify) +int ep11_get_card_info(u16 card, struct ep11_card_info *info, u32 xflags) { int rc; struct ep11_module_query_info { @@ -706,30 +668,26 @@ int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify) u32 max_CP_index; } __packed * pmqi = NULL; - rc = card_cache_fetch(card, info); - if (rc || verify) { - pmqi = kmalloc(sizeof(*pmqi), GFP_KERNEL); - if (!pmqi) - return -ENOMEM; - rc = ep11_query_info(card, AUTOSEL_DOM, - 0x01 /* module info query */, - sizeof(*pmqi), (u8 *)pmqi); - if (rc) { - if (rc == -ENODEV) - card_cache_scrub(card); - goto out; - } - memset(info, 0, sizeof(*info)); - info->API_ord_nr = pmqi->API_ord_nr; - info->FW_version = - (pmqi->FW_major_vers << 8) + pmqi->FW_minor_vers; - memcpy(info->serial, pmqi->serial, sizeof(info->serial)); - info->op_mode = pmqi->op_mode; - card_cache_update(card, info); - } + /* use the cprb mempool to satisfy this short term mem alloc */ + pmqi = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ? + mempool_alloc_preallocated(cprb_mempool) : + mempool_alloc(cprb_mempool, GFP_KERNEL); + if (!pmqi) + return -ENOMEM; + rc = ep11_query_info(card, AUTOSEL_DOM, + 0x01 /* module info query */, + sizeof(*pmqi), (u8 *)pmqi, xflags); + if (rc) + goto out; + + memset(info, 0, sizeof(*info)); + info->API_ord_nr = pmqi->API_ord_nr; + info->FW_version = (pmqi->FW_major_vers << 8) + pmqi->FW_minor_vers; + memcpy(info->serial, pmqi->serial, sizeof(info->serial)); + info->op_mode = pmqi->op_mode; out: - kfree(pmqi); + mempool_free(pmqi, cprb_mempool); return rc; } EXPORT_SYMBOL(ep11_get_card_info); @@ -737,7 +695,8 @@ EXPORT_SYMBOL(ep11_get_card_info); /* * Provide information about a domain within an EP11 card. */ -int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info) +int ep11_get_domain_info(u16 card, u16 domain, + struct ep11_domain_info *info, u32 xflags) { int rc; struct ep11_domain_query_info { @@ -746,36 +705,32 @@ int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info) u8 new_WK_VP[32]; u32 dom_flags; u64 op_mode; - } __packed * p_dom_info; - - p_dom_info = kmalloc(sizeof(*p_dom_info), GFP_KERNEL); - if (!p_dom_info) - return -ENOMEM; + } __packed dom_query_info; rc = ep11_query_info(card, domain, 0x03 /* domain info query */, - sizeof(*p_dom_info), (u8 *)p_dom_info); + sizeof(dom_query_info), (u8 *)&dom_query_info, + xflags); if (rc) goto out; memset(info, 0, sizeof(*info)); info->cur_wk_state = '0'; info->new_wk_state = '0'; - if (p_dom_info->dom_flags & 0x10 /* left imprint mode */) { - if (p_dom_info->dom_flags & 0x02 /* cur wk valid */) { + if (dom_query_info.dom_flags & 0x10 /* left imprint mode */) { + if (dom_query_info.dom_flags & 0x02 /* cur wk valid */) { info->cur_wk_state = '1'; - memcpy(info->cur_wkvp, p_dom_info->cur_WK_VP, 32); + memcpy(info->cur_wkvp, dom_query_info.cur_WK_VP, 32); } - if (p_dom_info->dom_flags & 0x04 || /* new wk present */ - p_dom_info->dom_flags & 0x08 /* new wk committed */) { + if (dom_query_info.dom_flags & 0x04 || /* new wk present */ + dom_query_info.dom_flags & 0x08 /* new wk committed */) { info->new_wk_state = - p_dom_info->dom_flags & 0x08 ? '2' : '1'; - memcpy(info->new_wkvp, p_dom_info->new_WK_VP, 32); + dom_query_info.dom_flags & 0x08 ? '2' : '1'; + memcpy(info->new_wkvp, dom_query_info.new_WK_VP, 32); } } - info->op_mode = p_dom_info->op_mode; + info->op_mode = dom_query_info.op_mode; out: - kfree(p_dom_info); return rc; } EXPORT_SYMBOL(ep11_get_domain_info); @@ -788,7 +743,7 @@ EXPORT_SYMBOL(ep11_get_domain_info); static int _ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, - u8 *keybuf, size_t *keybufsize) + u8 *keybuf, size_t *keybufsize, u32 xflags) { struct keygen_req_pl { struct pl_head head; @@ -823,7 +778,7 @@ static int _ep11_genaeskey(u16 card, u16 domain, struct ep11_cprb *req = NULL, *rep = NULL; size_t req_pl_size, pinblob_size = 0; struct ep11_target_dev target; - struct ep11_urb *urb = NULL; + struct ep11_urb urb; int api, rc = -ENOMEM; u8 *p; @@ -851,7 +806,7 @@ static int _ep11_genaeskey(u16 card, u16 domain, pinblob_size = EP11_PINBLOB_V1_BYTES; } req_pl_size = sizeof(struct keygen_req_pl) + ASN1TAGLEN(pinblob_size); - req = alloc_cprb(req_pl_size); + req = alloc_cprbmem(req_pl_size, xflags); if (!req) goto out; req_pl = (struct keygen_req_pl *)(((u8 *)req) + sizeof(*req)); @@ -877,22 +832,19 @@ static int _ep11_genaeskey(u16 card, u16 domain, *p++ = pinblob_size; /* reply cprb and payload */ - rep = alloc_cprb(sizeof(struct keygen_rep_pl)); + rep = alloc_cprbmem(sizeof(struct keygen_rep_pl), xflags); if (!rep) goto out; rep_pl = (struct keygen_rep_pl *)(((u8 *)rep) + sizeof(*rep)); /* urb and target */ - urb = kmalloc(sizeof(*urb), GFP_KERNEL); - if (!urb) - goto out; target.ap_id = card; target.dom_id = domain; - prep_urb(urb, &target, 1, + prep_urb(&urb, &target, 1, req, sizeof(*req) + req_pl_size, rep, sizeof(*rep) + sizeof(*rep_pl)); - rc = zcrypt_send_ep11_cprb(urb); + rc = zcrypt_send_ep11_cprb(&urb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", __func__, (int)card, (int)domain, rc); @@ -925,14 +877,13 @@ static int _ep11_genaeskey(u16 card, u16 domain, *keybufsize = rep_pl->data_len; out: - kfree(req); - kfree(rep); - kfree(urb); + free_cprbmem(req, 0, false, xflags); + free_cprbmem(rep, sizeof(struct keygen_rep_pl), true, xflags); return rc; } int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, - u8 *keybuf, u32 *keybufsize, u32 keybufver) + u8 *keybuf, u32 *keybufsize, u32 keybufver, u32 xflags) { struct ep11kblob_header *hdr; size_t hdr_size, pl_size; @@ -953,7 +904,7 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, return rc; rc = _ep11_genaeskey(card, domain, keybitsize, keygenflags, - pl, &pl_size); + pl, &pl_size, xflags); if (rc) return rc; @@ -973,7 +924,8 @@ static int ep11_cryptsingle(u16 card, u16 domain, u16 mode, u32 mech, const u8 *iv, const u8 *key, size_t keysize, const u8 *inbuf, size_t inbufsize, - u8 *outbuf, size_t *outbufsize) + u8 *outbuf, size_t *outbufsize, + u32 xflags) { struct crypt_req_pl { struct pl_head head; @@ -1000,8 +952,8 @@ static int ep11_cryptsingle(u16 card, u16 domain, } __packed * rep_pl; struct ep11_cprb *req = NULL, *rep = NULL; struct ep11_target_dev target; - struct ep11_urb *urb = NULL; - size_t req_pl_size, rep_pl_size; + struct ep11_urb urb; + size_t req_pl_size, rep_pl_size = 0; int n, api = EP11_API_V1, rc = -ENOMEM; u8 *p; @@ -1012,7 +964,7 @@ static int ep11_cryptsingle(u16 card, u16 domain, /* request cprb and payload */ req_pl_size = sizeof(struct crypt_req_pl) + (iv ? 16 : 0) + ASN1TAGLEN(keysize) + ASN1TAGLEN(inbufsize); - req = alloc_cprb(req_pl_size); + req = alloc_cprbmem(req_pl_size, xflags); if (!req) goto out; req_pl = (struct crypt_req_pl *)(((u8 *)req) + sizeof(*req)); @@ -1034,22 +986,19 @@ static int ep11_cryptsingle(u16 card, u16 domain, /* reply cprb and payload, assume out data size <= in data size + 32 */ rep_pl_size = sizeof(struct crypt_rep_pl) + ASN1TAGLEN(inbufsize + 32); - rep = alloc_cprb(rep_pl_size); + rep = alloc_cprbmem(rep_pl_size, xflags); if (!rep) goto out; rep_pl = (struct crypt_rep_pl *)(((u8 *)rep) + sizeof(*rep)); /* urb and target */ - urb = kmalloc(sizeof(*urb), GFP_KERNEL); - if (!urb) - goto out; target.ap_id = card; target.dom_id = domain; - prep_urb(urb, &target, 1, + prep_urb(&urb, &target, 1, req, sizeof(*req) + req_pl_size, rep, sizeof(*rep) + rep_pl_size); - rc = zcrypt_send_ep11_cprb(urb); + rc = zcrypt_send_ep11_cprb(&urb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", __func__, (int)card, (int)domain, rc); @@ -1095,9 +1044,8 @@ static int ep11_cryptsingle(u16 card, u16 domain, *outbufsize = n; out: - kfree(req); - kfree(rep); - kfree(urb); + free_cprbmem(req, req_pl_size, true, xflags); + free_cprbmem(rep, rep_pl_size, true, xflags); return rc; } @@ -1106,7 +1054,7 @@ static int _ep11_unwrapkey(u16 card, u16 domain, const u8 *enckey, size_t enckeysize, u32 mech, const u8 *iv, u32 keybitsize, u32 keygenflags, - u8 *keybuf, size_t *keybufsize) + u8 *keybuf, size_t *keybufsize, u32 xflags) { struct uw_req_pl { struct pl_head head; @@ -1143,7 +1091,7 @@ static int _ep11_unwrapkey(u16 card, u16 domain, struct ep11_cprb *req = NULL, *rep = NULL; size_t req_pl_size, pinblob_size = 0; struct ep11_target_dev target; - struct ep11_urb *urb = NULL; + struct ep11_urb urb; int api, rc = -ENOMEM; u8 *p; @@ -1161,7 +1109,7 @@ static int _ep11_unwrapkey(u16 card, u16 domain, req_pl_size = sizeof(struct uw_req_pl) + (iv ? 16 : 0) + ASN1TAGLEN(keksize) + ASN1TAGLEN(0) + ASN1TAGLEN(pinblob_size) + ASN1TAGLEN(enckeysize); - req = alloc_cprb(req_pl_size); + req = alloc_cprbmem(req_pl_size, xflags); if (!req) goto out; req_pl = (struct uw_req_pl *)(((u8 *)req) + sizeof(*req)); @@ -1197,22 +1145,19 @@ static int _ep11_unwrapkey(u16 card, u16 domain, p += asn1tag_write(p, 0x04, enckey, enckeysize); /* reply cprb and payload */ - rep = alloc_cprb(sizeof(struct uw_rep_pl)); + rep = alloc_cprbmem(sizeof(struct uw_rep_pl), xflags); if (!rep) goto out; rep_pl = (struct uw_rep_pl *)(((u8 *)rep) + sizeof(*rep)); /* urb and target */ - urb = kmalloc(sizeof(*urb), GFP_KERNEL); - if (!urb) - goto out; target.ap_id = card; target.dom_id = domain; - prep_urb(urb, &target, 1, + prep_urb(&urb, &target, 1, req, sizeof(*req) + req_pl_size, rep, sizeof(*rep) + sizeof(*rep_pl)); - rc = zcrypt_send_ep11_cprb(urb); + rc = zcrypt_send_ep11_cprb(&urb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", __func__, (int)card, (int)domain, rc); @@ -1245,9 +1190,8 @@ static int _ep11_unwrapkey(u16 card, u16 domain, *keybufsize = rep_pl->data_len; out: - kfree(req); - kfree(rep); - kfree(urb); + free_cprbmem(req, req_pl_size, true, xflags); + free_cprbmem(rep, sizeof(struct uw_rep_pl), true, xflags); return rc; } @@ -1257,7 +1201,7 @@ static int ep11_unwrapkey(u16 card, u16 domain, u32 mech, const u8 *iv, u32 keybitsize, u32 keygenflags, u8 *keybuf, u32 *keybufsize, - u8 keybufver) + u8 keybufver, u32 xflags) { struct ep11kblob_header *hdr; size_t hdr_size, pl_size; @@ -1271,7 +1215,7 @@ static int ep11_unwrapkey(u16 card, u16 domain, rc = _ep11_unwrapkey(card, domain, kek, keksize, enckey, enckeysize, mech, iv, keybitsize, keygenflags, - pl, &pl_size); + pl, &pl_size, xflags); if (rc) return rc; @@ -1290,7 +1234,7 @@ static int ep11_unwrapkey(u16 card, u16 domain, static int _ep11_wrapkey(u16 card, u16 domain, const u8 *key, size_t keysize, u32 mech, const u8 *iv, - u8 *databuf, size_t *datasize) + u8 *databuf, size_t *datasize, u32 xflags) { struct wk_req_pl { struct pl_head head; @@ -1319,7 +1263,7 @@ static int _ep11_wrapkey(u16 card, u16 domain, } __packed * rep_pl; struct ep11_cprb *req = NULL, *rep = NULL; struct ep11_target_dev target; - struct ep11_urb *urb = NULL; + struct ep11_urb urb; size_t req_pl_size; int api, rc = -ENOMEM; u8 *p; @@ -1327,7 +1271,7 @@ static int _ep11_wrapkey(u16 card, u16 domain, /* request cprb and payload */ req_pl_size = sizeof(struct wk_req_pl) + (iv ? 16 : 0) + ASN1TAGLEN(keysize) + 4; - req = alloc_cprb(req_pl_size); + req = alloc_cprbmem(req_pl_size, xflags); if (!req) goto out; if (!mech || mech == 0x80060001) @@ -1357,22 +1301,19 @@ static int _ep11_wrapkey(u16 card, u16 domain, *p++ = 0; /* reply cprb and payload */ - rep = alloc_cprb(sizeof(struct wk_rep_pl)); + rep = alloc_cprbmem(sizeof(struct wk_rep_pl), xflags); if (!rep) goto out; rep_pl = (struct wk_rep_pl *)(((u8 *)rep) + sizeof(*rep)); /* urb and target */ - urb = kmalloc(sizeof(*urb), GFP_KERNEL); - if (!urb) - goto out; target.ap_id = card; target.dom_id = domain; - prep_urb(urb, &target, 1, + prep_urb(&urb, &target, 1, req, sizeof(*req) + req_pl_size, rep, sizeof(*rep) + sizeof(*rep_pl)); - rc = zcrypt_send_ep11_cprb(urb); + rc = zcrypt_send_ep11_cprb(&urb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", __func__, (int)card, (int)domain, rc); @@ -1405,18 +1346,18 @@ static int _ep11_wrapkey(u16 card, u16 domain, *datasize = rep_pl->data_len; out: - kfree(req); - kfree(rep); - kfree(urb); + free_cprbmem(req, req_pl_size, true, xflags); + free_cprbmem(rep, sizeof(struct wk_rep_pl), true, xflags); return rc; } int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, const u8 *clrkey, u8 *keybuf, u32 *keybufsize, - u32 keytype) + u32 keytype, u32 xflags) { int rc; - u8 encbuf[64], *kek = NULL; + void *mem; + u8 encbuf[64], *kek; size_t clrkeylen, keklen, encbuflen = sizeof(encbuf); if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256) { @@ -1427,18 +1368,24 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, return -EINVAL; } - /* allocate memory for the temp kek */ + /* + * Allocate space for the temp kek. + * Also we only need up to MAXEP11AESKEYBLOBSIZE bytes for this + * we use the already existing cprb mempool to solve this + * short term memory requirement. + */ + mem = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ? + mempool_alloc_preallocated(cprb_mempool) : + mempool_alloc(cprb_mempool, GFP_KERNEL); + if (!mem) + return -ENOMEM; + kek = (u8 *)mem; keklen = MAXEP11AESKEYBLOBSIZE; - kek = kmalloc(keklen, GFP_ATOMIC); - if (!kek) { - rc = -ENOMEM; - goto out; - } /* Step 1: generate AES 256 bit random kek key */ rc = _ep11_genaeskey(card, domain, 256, 0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */ - kek, &keklen); + kek, &keklen, xflags); if (rc) { ZCRYPT_DBF_ERR("%s generate kek key failed, rc=%d\n", __func__, rc); @@ -1447,7 +1394,7 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, /* Step 2: encrypt clear key value with the kek key */ rc = ep11_cryptsingle(card, domain, 0, 0, def_iv, kek, keklen, - clrkey, clrkeylen, encbuf, &encbuflen); + clrkey, clrkeylen, encbuf, &encbuflen, xflags); if (rc) { ZCRYPT_DBF_ERR("%s encrypting key value with kek key failed, rc=%d\n", __func__, rc); @@ -1457,22 +1404,23 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, /* Step 3: import the encrypted key value as a new key */ rc = ep11_unwrapkey(card, domain, kek, keklen, encbuf, encbuflen, 0, def_iv, - keybitsize, 0, keybuf, keybufsize, keytype); + keybitsize, 0, keybuf, keybufsize, keytype, xflags); if (rc) { - ZCRYPT_DBF_ERR("%s importing key value as new key failed,, rc=%d\n", + ZCRYPT_DBF_ERR("%s importing key value as new key failed, rc=%d\n", __func__, rc); goto out; } out: - kfree(kek); + mempool_free(mem, cprb_mempool); return rc; } EXPORT_SYMBOL(ep11_clr2keyblob); int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, u32 keybloblen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags) { struct ep11kblob_header *hdr; struct ep11keyblob *key; @@ -1498,15 +1446,29 @@ int ep11_kblob2protkey(u16 card, u16 dom, } /* !!! hdr is no longer a valid header !!! */ - /* alloc temp working buffer */ + /* need a temp working buffer */ wkbuflen = (keylen + AES_BLOCK_SIZE) & (~(AES_BLOCK_SIZE - 1)); - wkbuf = kmalloc(wkbuflen, GFP_ATOMIC); - if (!wkbuf) - return -ENOMEM; + if (wkbuflen > CPRB_MEMPOOL_ITEM_SIZE) { + /* this should never happen */ + rc = -ENOMEM; + ZCRYPT_DBF_WARN("%s wkbuflen %d > cprb mempool item size %d, rc=%d\n", + __func__, (int)wkbuflen, CPRB_MEMPOOL_ITEM_SIZE, rc); + return rc; + } + /* use the cprb mempool to satisfy this short term mem allocation */ + wkbuf = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ? + mempool_alloc_preallocated(cprb_mempool) : + mempool_alloc(cprb_mempool, GFP_ATOMIC); + if (!wkbuf) { + rc = -ENOMEM; + ZCRYPT_DBF_WARN("%s allocating tmp buffer via cprb mempool failed, rc=%d\n", + __func__, rc); + return rc; + } /* ep11 secure key -> protected key + info */ rc = _ep11_wrapkey(card, dom, (u8 *)key, keylen, - 0, def_iv, wkbuf, &wkbuflen); + 0, def_iv, wkbuf, &wkbuflen, xflags); if (rc) { ZCRYPT_DBF_ERR("%s rewrapping ep11 key to pkey failed, rc=%d\n", __func__, rc); @@ -1573,37 +1535,32 @@ int ep11_kblob2protkey(u16 card, u16 dom, *protkeylen = wki->pkeysize; out: - kfree(wkbuf); + mempool_free(wkbuf, cprb_mempool); return rc; } EXPORT_SYMBOL(ep11_kblob2protkey); -int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, - int minhwtype, int minapi, const u8 *wkvp) +int ep11_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain, + int minhwtype, int minapi, const u8 *wkvp, u32 xflags) { struct zcrypt_device_status_ext *device_status; - u32 *_apqns = NULL, _nr_apqns = 0; - int i, card, dom, rc = -ENOMEM; struct ep11_domain_info edi; struct ep11_card_info eci; + u32 _nr_apqns = 0; + int i, card, dom; - /* fetch status of all crypto cards */ - device_status = kvcalloc(MAX_ZDEV_ENTRIES_EXT, - sizeof(struct zcrypt_device_status_ext), - GFP_KERNEL); - if (!device_status) - return -ENOMEM; - zcrypt_device_status_mask_ext(device_status); + /* occupy the device status memory */ + mutex_lock(&dev_status_mem_mutex); + memset(dev_status_mem, 0, ZCRYPT_DEV_STATUS_EXT_SIZE); + device_status = (struct zcrypt_device_status_ext *)dev_status_mem; - /* allocate 1k space for up to 256 apqns */ - _apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL); - if (!_apqns) { - kvfree(device_status); - return -ENOMEM; - } + /* fetch crypto device status into this struct */ + zcrypt_device_status_mask_ext(device_status, + ZCRYPT_DEV_STATUS_CARD_MAX, + ZCRYPT_DEV_STATUS_QUEUE_MAX); /* walk through all the crypto apqnss */ - for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { + for (i = 0; i < ZCRYPT_DEV_STATUS_ENTRIES; i++) { card = AP_QID_CARD(device_status[i].qid); dom = AP_QID_QUEUE(device_status[i].qid); /* check online state */ @@ -1623,14 +1580,14 @@ int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, continue; /* check min api version if given */ if (minapi > 0) { - if (ep11_get_card_info(card, &eci, 0)) + if (ep11_get_card_info(card, &eci, xflags)) continue; if (minapi > eci.API_ord_nr) continue; } /* check wkvp if given */ if (wkvp) { - if (ep11_get_domain_info(card, dom, &edi)) + if (ep11_get_domain_info(card, dom, &edi, xflags)) continue; if (edi.cur_wk_state != '1') continue; @@ -1638,27 +1595,40 @@ int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, continue; } /* apqn passed all filtering criterons, add to the array */ - if (_nr_apqns < 256) - _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom); + if (_nr_apqns < *nr_apqns) + apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom); } - /* nothing found ? */ - if (!_nr_apqns) { - kfree(_apqns); - rc = -ENODEV; - } else { - /* no re-allocation, simple return the _apqns array */ - *apqns = _apqns; - *nr_apqns = _nr_apqns; - rc = 0; - } + *nr_apqns = _nr_apqns; - kvfree(device_status); - return rc; + mutex_unlock(&dev_status_mem_mutex); + + return _nr_apqns ? 0 : -ENODEV; } EXPORT_SYMBOL(ep11_findcard2); -void __exit zcrypt_ep11misc_exit(void) +int __init zcrypt_ep11misc_init(void) +{ + /* Pre-allocate a small memory pool for ep11 cprbs. */ + cprb_mempool = mempool_create_kmalloc_pool(2 * zcrypt_mempool_threshold, + CPRB_MEMPOOL_ITEM_SIZE); + if (!cprb_mempool) + return -ENOMEM; + + /* Pre-allocate one crypto status card struct used in ep11_findcard2() */ + dev_status_mem = kvmalloc(ZCRYPT_DEV_STATUS_EXT_SIZE, GFP_KERNEL); + if (!dev_status_mem) { + mempool_destroy(cprb_mempool); + return -ENOMEM; + } + + return 0; +} + +void zcrypt_ep11misc_exit(void) { - card_cache_free(); + mutex_lock(&dev_status_mem_mutex); + kvfree(dev_status_mem); + mutex_unlock(&dev_status_mem_mutex); + mempool_destroy(cprb_mempool); } diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h index 9f1bdffdec68..b5e6fd861815 100644 --- a/drivers/s390/crypto/zcrypt_ep11misc.h +++ b/drivers/s390/crypto/zcrypt_ep11misc.h @@ -104,25 +104,26 @@ struct ep11_domain_info { /* * Provide information about an EP11 card. */ -int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify); +int ep11_get_card_info(u16 card, struct ep11_card_info *info, u32 xflags); /* * Provide information about a domain within an EP11 card. */ -int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info); +int ep11_get_domain_info(u16 card, u16 domain, + struct ep11_domain_info *info, u32 xflags); /* * Generate (random) EP11 AES secure key. */ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, - u8 *keybuf, u32 *keybufsize, u32 keybufver); + u8 *keybuf, u32 *keybufsize, u32 keybufver, u32 xflags); /* * Generate EP11 AES secure key with given clear key value. */ int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, const u8 *clrkey, u8 *keybuf, u32 *keybufsize, - u32 keytype); + u32 keytype, u32 xflags); /* * Build a list of ep11 apqns meeting the following constrains: @@ -136,22 +137,22 @@ int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, * key for this domain. When a wkvp is given there will always be a re-fetch * of the domain info for the potential apqn - so this triggers an request * reply to each apqn eligible. - * The array of apqn entries is allocated with kmalloc and returned in *apqns; - * the number of apqns stored into the list is returned in *nr_apqns. One apqn - * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and - * may be casted to struct pkey_apqn. The return value is either 0 for success - * or a negative errno value. If no apqn meeting the criteria is found, - * -ENODEV is returned. + * The caller should set *nr_apqns to the nr of elements available in *apqns. + * On return *nr_apqns is then updated with the nr of apqns filled into *apqns. + * The return value is either 0 for success or a negative errno value. + * If no apqn meeting the criteria is found, -ENODEV is returned. */ -int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, - int minhwtype, int minapi, const u8 *wkvp); +int ep11_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain, + int minhwtype, int minapi, const u8 *wkvp, u32 xflags); /* * Derive proteced key from EP11 key blob (AES and ECC keys). */ int ep11_kblob2protkey(u16 card, u16 dom, const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype); + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags); +int zcrypt_ep11misc_init(void); void zcrypt_ep11misc_exit(void); #endif /* _ZCRYPT_EP11MISC_H_ */ diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c index adc65eddaa1e..fc0a2a053dc2 100644 --- a/drivers/s390/crypto/zcrypt_msgtype50.c +++ b/drivers/s390/crypto/zcrypt_msgtype50.c @@ -438,7 +438,7 @@ static void zcrypt_msgtype50_receive(struct ap_queue *aq, msg->len = sizeof(error_reply); } out: - complete((struct completion *)msg->private); + complete(&msg->response.work); } static atomic_t zcrypt_step = ATOMIC_INIT(0); @@ -449,30 +449,30 @@ static atomic_t zcrypt_step = ATOMIC_INIT(0); * @zq: pointer to zcrypt_queue structure that identifies the * CEXxA device to the request distributor * @mex: pointer to the modexpo request buffer + * This function assumes that ap_msg has been initialized with + * ap_init_apmsg() and thus a valid buffer with the size of + * ap_msg->bufsize is available within ap_msg. Also the caller has + * to make sure ap_release_apmsg() is always called even on failure. */ static long zcrypt_msgtype50_modexpo(struct zcrypt_queue *zq, struct ica_rsa_modexpo *mex, struct ap_message *ap_msg) { - struct completion work; int rc; - ap_msg->bufsize = MSGTYPE50_CRB3_MAX_MSG_SIZE; - ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); - if (!ap_msg->msg) - return -ENOMEM; + if (ap_msg->bufsize < MSGTYPE50_CRB3_MAX_MSG_SIZE) + return -EMSGSIZE; ap_msg->receive = zcrypt_msgtype50_receive; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = &work; rc = ICAMEX_msg_to_type50MEX_msg(zq, ap_msg, mex); if (rc) goto out; - init_completion(&work); + init_completion(&ap_msg->response.work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out; - rc = wait_for_completion_interruptible(&work); + rc = wait_for_completion_interruptible(&ap_msg->response.work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) @@ -485,7 +485,6 @@ static long zcrypt_msgtype50_modexpo(struct zcrypt_queue *zq, } out: - ap_msg->private = NULL; if (rc) pr_debug("send me cprb at dev=%02x.%04x rc=%d\n", AP_QID_CARD(zq->queue->qid), @@ -499,30 +498,30 @@ out: * @zq: pointer to zcrypt_queue structure that identifies the * CEXxA device to the request distributor * @crt: pointer to the modexpoc_crt request buffer + * This function assumes that ap_msg has been initialized with + * ap_init_apmsg() and thus a valid buffer with the size of + * ap_msg->bufsize is available within ap_msg. Also the caller has + * to make sure ap_release_apmsg() is always called even on failure. */ static long zcrypt_msgtype50_modexpo_crt(struct zcrypt_queue *zq, struct ica_rsa_modexpo_crt *crt, struct ap_message *ap_msg) { - struct completion work; int rc; - ap_msg->bufsize = MSGTYPE50_CRB3_MAX_MSG_SIZE; - ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); - if (!ap_msg->msg) - return -ENOMEM; + if (ap_msg->bufsize < MSGTYPE50_CRB3_MAX_MSG_SIZE) + return -EMSGSIZE; ap_msg->receive = zcrypt_msgtype50_receive; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = &work; rc = ICACRT_msg_to_type50CRT_msg(zq, ap_msg, crt); if (rc) goto out; - init_completion(&work); + init_completion(&ap_msg->response.work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out; - rc = wait_for_completion_interruptible(&work); + rc = wait_for_completion_interruptible(&ap_msg->response.work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) @@ -535,7 +534,6 @@ static long zcrypt_msgtype50_modexpo_crt(struct zcrypt_queue *zq, } out: - ap_msg->private = NULL; if (rc) pr_debug("send crt cprb at dev=%02x.%04x rc=%d\n", AP_QID_CARD(zq->queue->qid), diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index b64c9d9fc613..9cefbb30960f 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c @@ -31,11 +31,6 @@ #define CEIL4(x) ((((x) + 3) / 4) * 4) -struct response_type { - struct completion work; - int type; -}; - #define CEXXC_RESPONSE_TYPE_ICA 0 #define CEXXC_RESPONSE_TYPE_XCRB 1 #define CEXXC_RESPONSE_TYPE_EP11 2 @@ -856,7 +851,7 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq, .type = TYPE82_RSP_CODE, .reply_code = REP82_ERROR_MACHINE_FAILURE, }; - struct response_type *resp_type = msg->private; + struct ap_response_type *resp_type = &msg->response; struct type86x_reply *t86r; int len; @@ -920,7 +915,7 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq, .type = TYPE82_RSP_CODE, .reply_code = REP82_ERROR_MACHINE_FAILURE, }; - struct response_type *resp_type = msg->private; + struct ap_response_type *resp_type = &msg->response; struct type86_ep11_reply *t86r; int len; @@ -967,9 +962,7 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq, struct ica_rsa_modexpo *mex, struct ap_message *ap_msg) { - struct response_type resp_type = { - .type = CEXXC_RESPONSE_TYPE_ICA, - }; + struct ap_response_type *resp_type = &ap_msg->response; int rc; ap_msg->msg = (void *)get_zeroed_page(GFP_KERNEL); @@ -979,15 +972,15 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq, ap_msg->receive = zcrypt_msgtype6_receive; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = &resp_type; rc = icamex_msg_to_type6mex_msgx(zq, ap_msg, mex); if (rc) goto out_free; - init_completion(&resp_type.work); + resp_type->type = CEXXC_RESPONSE_TYPE_ICA; + init_completion(&resp_type->work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out_free; - rc = wait_for_completion_interruptible(&resp_type.work); + rc = wait_for_completion_interruptible(&resp_type->work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) @@ -1001,7 +994,6 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq, out_free: free_page((unsigned long)ap_msg->msg); - ap_msg->private = NULL; ap_msg->msg = NULL; return rc; } @@ -1017,9 +1009,7 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq, struct ica_rsa_modexpo_crt *crt, struct ap_message *ap_msg) { - struct response_type resp_type = { - .type = CEXXC_RESPONSE_TYPE_ICA, - }; + struct ap_response_type *resp_type = &ap_msg->response; int rc; ap_msg->msg = (void *)get_zeroed_page(GFP_KERNEL); @@ -1029,15 +1019,15 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq, ap_msg->receive = zcrypt_msgtype6_receive; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = &resp_type; rc = icacrt_msg_to_type6crt_msgx(zq, ap_msg, crt); if (rc) goto out_free; - init_completion(&resp_type.work); + resp_type->type = CEXXC_RESPONSE_TYPE_ICA; + init_completion(&resp_type->work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out_free; - rc = wait_for_completion_interruptible(&resp_type.work); + rc = wait_for_completion_interruptible(&resp_type->work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) @@ -1051,7 +1041,6 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq, out_free: free_page((unsigned long)ap_msg->msg); - ap_msg->private = NULL; ap_msg->msg = NULL; return rc; } @@ -1061,28 +1050,21 @@ out_free: * Prepare a CCA AP msg: fetch the required data from userspace, * prepare the AP msg, fill some info into the ap_message struct, * extract some data from the CPRB and give back to the caller. - * This function allocates memory and needs an ap_msg prepared - * by the caller with ap_init_message(). Also the caller has to - * make sure ap_release_message() is always called even on failure. + * This function assumes that ap_msg has been initialized with + * ap_init_apmsg() and thus a valid buffer with the size of + * ap_msg->bufsize is available within ap_msg. Also the caller has + * to make sure ap_release_apmsg() is always called even on failure. */ int prep_cca_ap_msg(bool userspace, struct ica_xcRB *xcrb, struct ap_message *ap_msg, unsigned int *func_code, unsigned short **dom) { - struct response_type resp_type = { - .type = CEXXC_RESPONSE_TYPE_XCRB, - }; + struct ap_response_type *resp_type = &ap_msg->response; - ap_msg->bufsize = atomic_read(&ap_max_msg_size); - ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); - if (!ap_msg->msg) - return -ENOMEM; ap_msg->receive = zcrypt_msgtype6_receive; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); - if (!ap_msg->private) - return -ENOMEM; + resp_type->type = CEXXC_RESPONSE_TYPE_XCRB; return xcrb_msg_to_type6cprb_msgx(userspace, ap_msg, xcrb, func_code, dom); } @@ -1097,7 +1079,7 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq, struct ica_xcRB *xcrb, struct ap_message *ap_msg) { - struct response_type *rtype = ap_msg->private; + struct ap_response_type *resp_type = &ap_msg->response; struct { struct type6_hdr hdr; struct CPRBX cprbx; @@ -1128,11 +1110,11 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq, msg->hdr.fromcardlen1 -= delta; } - init_completion(&rtype->work); + init_completion(&resp_type->work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out; - rc = wait_for_completion_interruptible(&rtype->work); + rc = wait_for_completion_interruptible(&resp_type->work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) @@ -1158,28 +1140,21 @@ out: * Prepare an EP11 AP msg: fetch the required data from userspace, * prepare the AP msg, fill some info into the ap_message struct, * extract some data from the CPRB and give back to the caller. - * This function allocates memory and needs an ap_msg prepared - * by the caller with ap_init_message(). Also the caller has to - * make sure ap_release_message() is always called even on failure. + * This function assumes that ap_msg has been initialized with + * ap_init_apmsg() and thus a valid buffer with the size of + * ap_msg->bufsize is available within ap_msg. Also the caller has + * to make sure ap_release_apmsg() is always called even on failure. */ int prep_ep11_ap_msg(bool userspace, struct ep11_urb *xcrb, struct ap_message *ap_msg, unsigned int *func_code, unsigned int *domain) { - struct response_type resp_type = { - .type = CEXXC_RESPONSE_TYPE_EP11, - }; + struct ap_response_type *resp_type = &ap_msg->response; - ap_msg->bufsize = atomic_read(&ap_max_msg_size); - ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); - if (!ap_msg->msg) - return -ENOMEM; ap_msg->receive = zcrypt_msgtype6_receive_ep11; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); - if (!ap_msg->private) - return -ENOMEM; + resp_type->type = CEXXC_RESPONSE_TYPE_EP11; return xcrb_msg_to_type6_ep11cprb_msgx(userspace, ap_msg, xcrb, func_code, domain); } @@ -1197,7 +1172,7 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue * { int rc; unsigned int lfmt; - struct response_type *rtype = ap_msg->private; + struct ap_response_type *resp_type = &ap_msg->response; struct { struct type6_hdr hdr; struct ep11_cprb cprbx; @@ -1251,11 +1226,11 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue * msg->hdr.fromcardlen1 = zq->reply.bufsize - sizeof(struct type86_hdr) - sizeof(struct type86_fmt2_ext); - init_completion(&rtype->work); + init_completion(&resp_type->work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out; - rc = wait_for_completion_interruptible(&rtype->work); + rc = wait_for_completion_interruptible(&resp_type->work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) @@ -1276,23 +1251,25 @@ out: return rc; } +/* + * Prepare a CEXXC get random request ap message. + * This function assumes that ap_msg has been initialized with + * ap_init_apmsg() and thus a valid buffer with the size of + * ap_max_msg_size is available within ap_msg. Also the caller has + * to make sure ap_release_apmsg() is always called even on failure. + */ int prep_rng_ap_msg(struct ap_message *ap_msg, int *func_code, unsigned int *domain) { - struct response_type resp_type = { - .type = CEXXC_RESPONSE_TYPE_XCRB, - }; + struct ap_response_type *resp_type = &ap_msg->response; - ap_msg->bufsize = AP_DEFAULT_MAX_MSG_SIZE; - ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); - if (!ap_msg->msg) - return -ENOMEM; + if (ap_msg->bufsize < AP_DEFAULT_MAX_MSG_SIZE) + return -EMSGSIZE; ap_msg->receive = zcrypt_msgtype6_receive; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); - if (!ap_msg->private) - return -ENOMEM; + + resp_type->type = CEXXC_RESPONSE_TYPE_XCRB; rng_type6cprb_msgx(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain); @@ -1319,16 +1296,16 @@ static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq, short int verb_length; short int key_length; } __packed * msg = ap_msg->msg; - struct response_type *rtype = ap_msg->private; + struct ap_response_type *resp_type = &ap_msg->response; int rc; msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); - init_completion(&rtype->work); + init_completion(&resp_type->work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out; - rc = wait_for_completion_interruptible(&rtype->work); + rc = wait_for_completion_interruptible(&resp_type->work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c index 9e580ef69bda..aaa1eea6149b 100644 --- a/drivers/s390/net/ctcm_mpc.c +++ b/drivers/s390/net/ctcm_mpc.c @@ -179,7 +179,7 @@ void ctcmpc_dumpit(char *buf, int len) ctcm_pr_debug(" %s (+%s) : %s [%s]\n", addr, boff, bhex, basc); dup = 0; - strcpy(duphex, bhex); + strscpy(duphex, bhex); } else dup++; diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 21fa7ac849e5..4904b831c0a7 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -302,11 +302,17 @@ static struct airq_info *new_airq_info(int index) static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs, u64 *first, void **airq_info) { - int i, j; + int i, j, queue_idx, highest_queue_idx = -1; struct airq_info *info; unsigned long *indicator_addr = NULL; unsigned long bit, flags; + /* Array entries without an actual queue pointer must be ignored. */ + for (i = 0; i < nvqs; i++) { + if (vqs[i]) + highest_queue_idx++; + } + for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) { mutex_lock(&airq_areas_lock); if (!airq_areas[i]) @@ -316,7 +322,7 @@ static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs, if (!info) return NULL; write_lock_irqsave(&info->lock, flags); - bit = airq_iv_alloc(info->aiv, nvqs); + bit = airq_iv_alloc(info->aiv, highest_queue_idx + 1); if (bit == -1UL) { /* Not enough vacancies. */ write_unlock_irqrestore(&info->lock, flags); @@ -325,8 +331,10 @@ static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs, *first = bit; *airq_info = info; indicator_addr = info->aiv->vector; - for (j = 0; j < nvqs; j++) { - airq_iv_set_ptr(info->aiv, bit + j, + for (j = 0, queue_idx = 0; j < nvqs; j++) { + if (!vqs[j]) + continue; + airq_iv_set_ptr(info->aiv, bit + queue_idx++, (unsigned long)vqs[j]); } write_unlock_irqrestore(&info->lock, flags); diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 5a3c670aec27..5522310bab8d 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -403,6 +403,7 @@ config SCSI_ACARD config SCSI_AHA152X tristate "Adaptec AHA152X/2825 support" depends on ISA && SCSI + depends on !HIGHMEM select SCSI_SPI_ATTRS select CHECK_SIGNATURE help @@ -795,6 +796,7 @@ config SCSI_PPA tristate "IOMEGA parallel port (ppa - older drives)" depends on SCSI && PARPORT_PC depends on HAS_IOPORT + depends on !HIGHMEM help This driver supports older versions of IOMEGA's parallel port ZIP drive (a 100 MB removable media device). @@ -822,6 +824,7 @@ config SCSI_PPA config SCSI_IMM tristate "IOMEGA parallel port (imm - newer drives)" depends on SCSI && PARPORT_PC + depends on !HIGHMEM help This driver supports newer versions of IOMEGA's parallel port ZIP drive (a 100 MB removable media device). diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index 4276f868cd91..e94c0a19c435 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c @@ -746,7 +746,6 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup) /* need to have host registered before triggering any interrupt */ list_add_tail(&HOSTDATA(shpnt)->host_list, &aha152x_host_list); - shpnt->no_highmem = true; shpnt->io_port = setup->io_port; shpnt->n_io_port = IO_RANGE; shpnt->irq = setup->irq; diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 5cb1d3db4907..944cf2fb0561 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -935,8 +935,28 @@ static void hisi_sas_phyup_work_common(struct work_struct *work, container_of(work, typeof(*phy), works[event]); struct hisi_hba *hisi_hba = phy->hisi_hba; struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct asd_sas_port *sas_port = sas_phy->port; + struct hisi_sas_port *port = phy->port; + struct device *dev = hisi_hba->dev; + struct domain_device *port_dev; int phy_no = sas_phy->id; + if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) && + sas_port && port && (port->id != phy->port_id)) { + dev_info(dev, "phy%d's hw port id changed from %d to %llu\n", + phy_no, port->id, phy->port_id); + port_dev = sas_port->port_dev; + if (port_dev && !dev_is_expander(port_dev->dev_type)) { + /* + * Set the device state to gone to block + * sending IO to the device. + */ + set_bit(SAS_DEV_GONE, &port_dev->state); + hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); + return; + } + } + phy->wait_phyup_cnt = 0; if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP) hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index a1fc400ab4c3..1e9830940f84 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -2501,6 +2501,7 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_port *port = to_hisi_sas_port(sas_port); struct sas_ata_task *ata_task = &task->ata_task; struct sas_tmf_task *tmf = slot->tmf; + int phy_id; u8 *buf_cmd; int has_data = 0, hdr_tag = 0; u32 dw0, dw1 = 0, dw2 = 0; @@ -2508,10 +2509,14 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba, /* create header */ /* dw0 */ dw0 = port->id << CMD_HDR_PORT_OFF; - if (parent_dev && dev_is_expander(parent_dev->dev_type)) + if (parent_dev && dev_is_expander(parent_dev->dev_type)) { dw0 |= 3 << CMD_HDR_CMD_OFF; - else + } else { + phy_id = device->phy->identify.phy_identifier; + dw0 |= (1U << phy_id) << CMD_HDR_PHY_ID_OFF; + dw0 |= CMD_HDR_FORCE_PHY_MSK; dw0 |= 4 << CMD_HDR_CMD_OFF; + } if (tmf && ata_task->force_phy) { dw0 |= CMD_HDR_FORCE_PHY_MSK; diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 2684d6482067..08dac9ae2f10 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -359,6 +359,10 @@ #define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF) #define CMD_HDR_TLR_CTRL_OFF 6 #define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF) +#define CMD_HDR_PHY_ID_OFF 8 +#define CMD_HDR_PHY_ID_MSK (0x1ff << CMD_HDR_PHY_ID_OFF) +#define CMD_HDR_FORCE_PHY_OFF 17 +#define CMD_HDR_FORCE_PHY_MSK (0x1U << CMD_HDR_FORCE_PHY_OFF) #define CMD_HDR_PORT_OFF 18 #define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF) #define CMD_HDR_PRIORITY_OFF 27 @@ -1429,15 +1433,21 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; struct asd_sas_port *sas_port = device->port; struct hisi_sas_port *port = to_hisi_sas_port(sas_port); + int phy_id; u8 *buf_cmd; int has_data = 0, hdr_tag = 0; u32 dw1 = 0, dw2 = 0; hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF); - if (parent_dev && dev_is_expander(parent_dev->dev_type)) + if (parent_dev && dev_is_expander(parent_dev->dev_type)) { hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF); - else + } else { + phy_id = device->phy->identify.phy_identifier; + hdr->dw0 |= cpu_to_le32((1U << phy_id) + << CMD_HDR_PHY_ID_OFF); + hdr->dw0 |= CMD_HDR_FORCE_PHY_MSK; hdr->dw0 |= cpu_to_le32(4U << CMD_HDR_CMD_OFF); + } switch (task->data_dir) { case DMA_TO_DEVICE: diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c index 1d4c7310f1a6..0821cf994b98 100644 --- a/drivers/scsi/imm.c +++ b/drivers/scsi/imm.c @@ -1224,7 +1224,6 @@ static int __imm_attach(struct parport *pb) host = scsi_host_alloc(&imm_template, sizeof(imm_struct *)); if (!host) goto out1; - host->no_highmem = true; host->io_port = pb->base; host->n_io_port = ports; host->dma_channel = -1; diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 088cc40ae866..8ee2bfe47571 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -23,8 +23,8 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "07.727.03.00-rc1" -#define MEGASAS_RELDATE "Oct 03, 2023" +#define MEGASAS_VERSION "07.734.00.00-rc1" +#define MEGASAS_RELDATE "Apr 03, 2025" #define MEGASAS_MSIX_NAME_LEN 32 diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index c20447b39cb9..5e33d411fa3d 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -2103,6 +2103,9 @@ static int megasas_sdev_configure(struct scsi_device *sdev, /* This sdev property may change post OCR */ megasas_set_dynamic_target_properties(sdev, lim, is_target_prop); + if (!MEGASAS_IS_LOGICAL(sdev)) + sdev->no_vpd_size = 1; + mutex_unlock(&instance->reset_mutex); return 0; @@ -3662,8 +3665,10 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, case MFI_STAT_SCSI_IO_FAILED: case MFI_STAT_LD_INIT_IN_PROGRESS: - cmd->scmd->result = - (DID_ERROR << 16) | hdr->scsi_status; + if (hdr->scsi_status == 0xf0) + cmd->scmd->result = (DID_ERROR << 16) | SAM_STAT_CHECK_CONDITION; + else + cmd->scmd->result = (DID_ERROR << 16) | hdr->scsi_status; break; case MFI_STAT_SCSI_DONE_WITH_ERROR: diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 721860cb1ef6..a6794f49e9fa 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -2043,7 +2043,10 @@ map_cmd_status(struct fusion_context *fusion, case MFI_STAT_SCSI_IO_FAILED: case MFI_STAT_LD_INIT_IN_PROGRESS: - scmd->result = (DID_ERROR << 16) | ext_status; + if (ext_status == 0xf0) + scmd->result = (DID_ERROR << 16) | SAM_STAT_CHECK_CONDITION; + else + scmd->result = (DID_ERROR << 16) | ext_status; break; case MFI_STAT_SCSI_DONE_WITH_ERROR: diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c index 3fcb1ad3b070..1d7901a8f0e4 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_fw.c +++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c @@ -174,6 +174,9 @@ static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc, char *desc = NULL; u16 event; + if (!(mrioc->logging_level & MPI3_DEBUG_EVENT)) + return; + event = event_reply->event; switch (event) { @@ -451,6 +454,7 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc) return 0; } + atomic_set(&mrioc->admin_pend_isr, 0); reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + admin_reply_ci; @@ -565,7 +569,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci)); mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma, reply_qidx); - atomic_dec(&op_reply_q->pend_ios); + if (reply_dma) mpi3mr_repost_reply_buf(mrioc, reply_dma); num_op_reply++; @@ -2925,6 +2929,7 @@ static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc) mrioc->admin_reply_ci = 0; mrioc->admin_reply_ephase = 1; atomic_set(&mrioc->admin_reply_q_in_use, 0); + atomic_set(&mrioc->admin_pend_isr, 0); if (!mrioc->admin_req_base) { mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev, @@ -4653,6 +4658,7 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc) if (mrioc->admin_reply_base) memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz); atomic_set(&mrioc->admin_reply_q_in_use, 0); + atomic_set(&mrioc->admin_pend_isr, 0); if (mrioc->init_cmds.reply) { memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply)); diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c index dc4bd422b601..486db5b2f05d 100644 --- a/drivers/scsi/myrb.c +++ b/drivers/scsi/myrb.c @@ -891,7 +891,7 @@ static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn) status = mmio_init_fn(pdev, base, &mbox); if (status != MYRB_STATUS_SUCCESS) { dev_err(&pdev->dev, - "Failed to enable mailbox, statux %02X\n", + "Failed to enable mailbox, status %02X\n", status); return false; } diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 183ce00aa671..f7067878b34f 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -766,6 +766,7 @@ static void pm8001_dev_gone_notify(struct domain_device *dev) spin_lock_irqsave(&pm8001_ha->lock, flags); } PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id); + pm8001_ha->phy[pm8001_dev->attached_phy].phy_attached = 0; pm8001_free_dev(pm8001_dev); } else { pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n"); diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c index a06329b47851..1ed3171f1797 100644 --- a/drivers/scsi/ppa.c +++ b/drivers/scsi/ppa.c @@ -1104,7 +1104,6 @@ static int __ppa_attach(struct parport *pb) host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *)); if (!host) goto out1; - host->no_highmem = true; host->io_port = pb->base; host->n_io_port = ports; host->dma_channel = -1; diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 53daf923ad8e..518a252eb6aa 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -707,26 +707,23 @@ void scsi_cdl_check(struct scsi_device *sdev) */ int scsi_cdl_enable(struct scsi_device *sdev, bool enable) { - struct scsi_mode_data data; - struct scsi_sense_hdr sshdr; - struct scsi_vpd *vpd; - bool is_ata = false; char buf[64]; + bool is_ata; int ret; if (!sdev->cdl_supported) return -EOPNOTSUPP; rcu_read_lock(); - vpd = rcu_dereference(sdev->vpd_pg89); - if (vpd) - is_ata = true; + is_ata = rcu_dereference(sdev->vpd_pg89); rcu_read_unlock(); /* * For ATA devices, CDL needs to be enabled with a SET FEATURES command. */ if (is_ata) { + struct scsi_mode_data data; + struct scsi_sense_hdr sshdr; char *buf_data; int len; @@ -735,16 +732,30 @@ int scsi_cdl_enable(struct scsi_device *sdev, bool enable) if (ret) return -EINVAL; - /* Enable CDL using the ATA feature page */ + /* Enable or disable CDL using the ATA feature page */ len = min_t(size_t, sizeof(buf), data.length - data.header_length - data.block_descriptor_length); buf_data = buf + data.header_length + data.block_descriptor_length; - if (enable) - buf_data[4] = 0x02; - else - buf_data[4] = 0; + + /* + * If we want to enable CDL and CDL is already enabled on the + * device, do nothing. This avoids needlessly resetting the CDL + * statistics on the device as that is implied by the CDL enable + * action. Similar to this, there is no need to do anything if + * we want to disable CDL and CDL is already disabled. + */ + if (enable) { + if ((buf_data[4] & 0x03) == 0x02) + goto out; + buf_data[4] &= ~0x03; + buf_data[4] |= 0x02; + } else { + if ((buf_data[4] & 0x03) == 0x00) + goto out; + buf_data[4] &= ~0x03; + } ret = scsi_mode_select(sdev, 1, 0, buf_data, len, 5 * HZ, 3, &data, &sshdr); @@ -756,6 +767,7 @@ int scsi_cdl_enable(struct scsi_device *sdev, bool enable) } } +out: sdev->cdl_enable = enable; return 0; diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index 2fa45556e1ea..0ddc95bafc71 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -601,7 +601,7 @@ static int sg_scsi_ioctl(struct request_queue *q, bool open_for_write, } if (bytes) { - err = blk_rq_map_kern(q, rq, buffer, bytes, GFP_NOIO); + err = blk_rq_map_kern(rq, buffer, bytes, GFP_NOIO); if (err) goto error; } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 0d29470e86b0..144c72f0737a 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -313,8 +313,7 @@ retry: return PTR_ERR(req); if (bufflen) { - ret = blk_rq_map_kern(sdev->request_queue, req, - buffer, bufflen, GFP_NOIO); + ret = blk_rq_map_kern(req, buffer, bufflen, GFP_NOIO); if (ret) goto out; } @@ -1253,8 +1252,12 @@ EXPORT_SYMBOL_GPL(scsi_alloc_request); */ static void scsi_cleanup_rq(struct request *rq) { + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); + + cmd->flags = 0; + if (rq->rq_flags & RQF_DONTPREP) { - scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq)); + scsi_mq_uninit_cmd(cmd); rq->rq_flags &= ~RQF_DONTPREP; } } @@ -2000,9 +2003,6 @@ void scsi_init_limits(struct Scsi_Host *shost, struct queue_limits *lim) lim->dma_alignment = max_t(unsigned int, shost->dma_alignment, dma_get_cache_alignment() - 1); - if (shost->no_highmem) - lim->features |= BLK_FEAT_BOUNCE_HIGH; - /* * Propagate the DMA formation properties to the dma-mapping layer as * a courtesy service to the LLDDs. This needs to check that the buses diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 9c347c64c315..0b8c91bf793f 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -3182,11 +3182,14 @@ iscsi_set_host_param(struct iscsi_transport *transport, } /* see similar check in iscsi_if_set_param() */ - if (strlen(data) > ev->u.set_host_param.len) - return -EINVAL; + if (strlen(data) > ev->u.set_host_param.len) { + err = -EINVAL; + goto out; + } err = transport->set_host_param(shost, ev->u.set_host_param.param, data, ev->u.set_host_param.len); +out: scsi_host_put(shost); return err; } diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index 64f6b22e8cc0..aeb58a9e6b7f 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -388,7 +388,7 @@ static void srp_reconnect_work(struct work_struct *work) "reconnect attempt %d failed (%d)\n", ++rport->failed_reconnects, res); delay = rport->reconnect_delay * - min(100, max(1, rport->failed_reconnects - 10)); + clamp(rport->failed_reconnects - 10, 1, 100); if (delay > 0) queue_delayed_work(system_long_wq, &rport->reconnect_work, delay * HZ); diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 7a447ff600d2..a8db66428f80 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -169,6 +169,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp, unsigned int nr_zones, size_t *buflen) { struct request_queue *q = sdkp->disk->queue; + unsigned int max_segments; size_t bufsize; void *buf; @@ -180,12 +181,15 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp, * Furthermore, since the report zone command cannot be split, make * sure that the allocated buffer can always be mapped by limiting the * number of pages allocated to the HBA max segments limit. + * Since max segments can be larger than the max inline bio vectors, + * further limit the allocated buffer to BIO_MAX_INLINE_VECS. */ nr_zones = min(nr_zones, sdkp->zone_info.nr_zones); bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE); bufsize = min_t(size_t, bufsize, queue_max_hw_sectors(q) << SECTOR_SHIFT); - bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT); + max_segments = min(BIO_MAX_INLINE_VECS, queue_max_segments(q)); + bufsize = min_t(size_t, bufsize, max_segments << PAGE_SHIFT); while (bufsize >= SECTOR_SIZE) { buf = kvzalloc(bufsize, GFP_KERNEL | __GFP_NORETRY); diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 88135fdb8bd1..8a26eca4fdc9 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -19,6 +19,7 @@ #include <linux/bcd.h> #include <linux/reboot.h> #include <linux/cciss_ioctl.h> +#include <linux/crash_dump.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> @@ -5246,7 +5247,7 @@ static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) ctrl_info->error_buffer_length = ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; - if (reset_devices) + if (is_kdump_kernel()) max_transfer_size = min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE_KDUMP); else @@ -5275,7 +5276,7 @@ static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) u16 num_elements_per_iq; u16 num_elements_per_oq; - if (reset_devices) { + if (is_kdump_kernel()) { num_queue_groups = 1; } else { int num_cpus; @@ -8288,12 +8289,12 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) u32 product_id; if (reset_devices) { - if (pqi_is_fw_triage_supported(ctrl_info)) { + if (is_kdump_kernel() && pqi_is_fw_triage_supported(ctrl_info)) { rc = sis_wait_for_fw_triage_completion(ctrl_info); if (rc) return rc; } - if (sis_is_ctrl_logging_supported(ctrl_info)) { + if (is_kdump_kernel() && sis_is_ctrl_logging_supported(ctrl_info)) { sis_notify_kdump(ctrl_info); rc = sis_wait_for_ctrl_logging_completion(ctrl_info); if (rc) @@ -8344,7 +8345,7 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) ctrl_info->product_id = (u8)product_id; ctrl_info->product_revision = (u8)(product_id >> 8); - if (reset_devices) { + if (is_kdump_kernel()) { if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS_KDUMP) ctrl_info->max_outstanding_requests = @@ -8480,7 +8481,7 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) if (rc) return rc; - if (ctrl_info->ctrl_logging_supported && !reset_devices) { + if (ctrl_info->ctrl_logging_supported && !is_kdump_kernel()) { pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LOG_MIN_SIZE); pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE); } diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 35db061ae3ec..2e6b2412d2c9 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1819,6 +1819,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) return SCSI_MLQUEUE_DEVICE_BUSY; } + payload->rangecount = 1; payload->range.len = length; payload->range.offset = offset_in_hvpg; diff --git a/drivers/soc/dove/pmu.c b/drivers/soc/dove/pmu.c index cfc0efab27d7..7bbd3f940e4d 100644 --- a/drivers/soc/dove/pmu.c +++ b/drivers/soc/dove/pmu.c @@ -257,10 +257,9 @@ static void pmu_irq_handler(struct irq_desc *desc) * So, let's structure the code so that the window is as small as * possible. */ - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); done &= readl_relaxed(base + PMC_IRQ_CAUSE); writel_relaxed(done, base + PMC_IRQ_CAUSE); - irq_gc_unlock(gc); } static int __init dove_init_pmu_irq(struct pmu_data *pmu, int irq) diff --git a/drivers/soc/samsung/exynos-usi.c b/drivers/soc/samsung/exynos-usi.c index c5661ac19f7b..5f7bdf3bab05 100644 --- a/drivers/soc/samsung/exynos-usi.c +++ b/drivers/soc/samsung/exynos-usi.c @@ -233,7 +233,7 @@ static void exynos_usi_unconfigure(void *data) /* Make sure that we've stopped providing the clock to USI IP */ val = readl(usi->regs + USI_OPTION); val &= ~USI_OPTION_CLKREQ_ON; - val |= ~USI_OPTION_CLKSTOP_ON; + val |= USI_OPTION_CLKSTOP_ON; writel(val, usi->regs + USI_OPTION); /* Set USI block state to reset */ diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c index 6f8a20014e76..39aecd34c641 100644 --- a/drivers/soundwire/bus.c +++ b/drivers/soundwire/bus.c @@ -122,6 +122,10 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, set_bit(SDW_GROUP13_DEV_NUM, bus->assigned); set_bit(SDW_MASTER_DEV_NUM, bus->assigned); + ret = sdw_irq_create(bus, fwnode); + if (ret) + return ret; + /* * SDW is an enumerable bus, but devices can be powered off. So, * they won't be able to report as present. @@ -138,6 +142,7 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, if (ret < 0) { dev_err(bus->dev, "Finding slaves failed:%d\n", ret); + sdw_irq_delete(bus); return ret; } @@ -156,10 +161,6 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, bus->params.curr_bank = SDW_BANK0; bus->params.next_bank = SDW_BANK1; - ret = sdw_irq_create(bus, fwnode); - if (ret) - return ret; - return 0; } EXPORT_SYMBOL(sdw_bus_master_add); diff --git a/drivers/soundwire/intel_auxdevice.c b/drivers/soundwire/intel_auxdevice.c index 5ea6399e6c9b..10a602d4843a 100644 --- a/drivers/soundwire/intel_auxdevice.c +++ b/drivers/soundwire/intel_auxdevice.c @@ -353,9 +353,6 @@ static int intel_link_probe(struct auxiliary_device *auxdev, /* use generic bandwidth allocation algorithm */ sdw->cdns.bus.compute_params = sdw_compute_params; - /* avoid resuming from pm_runtime suspend if it's not required */ - dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND); - ret = sdw_bus_master_add(bus, dev, dev->fwnode); if (ret) { dev_err(dev, "sdw_bus_master_add fail: %d\n", ret); @@ -640,7 +637,10 @@ static int __maybe_unused intel_suspend(struct device *dev) return 0; } - if (pm_runtime_suspended(dev)) { + /* Prevent runtime PM from racing with the code below. */ + pm_runtime_disable(dev); + + if (pm_runtime_status_suspended(dev)) { dev_dbg(dev, "pm_runtime status: suspended\n"); clock_stop_quirks = sdw->link_res->clock_stop_quirks; @@ -648,7 +648,7 @@ static int __maybe_unused intel_suspend(struct device *dev) if ((clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) || !clock_stop_quirks) { - if (pm_runtime_suspended(dev->parent)) { + if (pm_runtime_status_suspended(dev->parent)) { /* * paranoia check: this should not happen with the .prepare * resume to full power @@ -715,7 +715,6 @@ static int __maybe_unused intel_resume(struct device *dev) struct sdw_cdns *cdns = dev_get_drvdata(dev); struct sdw_intel *sdw = cdns_to_intel(cdns); struct sdw_bus *bus = &cdns->bus; - int link_flags; int ret; if (bus->prop.hw_disabled || !sdw->startup_done) { @@ -724,23 +723,6 @@ static int __maybe_unused intel_resume(struct device *dev) return 0; } - if (pm_runtime_suspended(dev)) { - dev_dbg(dev, "pm_runtime status was suspended, forcing active\n"); - - /* follow required sequence from runtime_pm.rst */ - pm_runtime_disable(dev); - pm_runtime_set_active(dev); - pm_runtime_mark_last_busy(dev); - pm_runtime_enable(dev); - - pm_runtime_resume(bus->dev); - - link_flags = md_flags >> (bus->link_id * 8); - - if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE)) - pm_runtime_idle(dev); - } - ret = sdw_intel_link_power_up(sdw); if (ret) { dev_err(dev, "%s failed: %d\n", __func__, ret); @@ -761,6 +743,14 @@ static int __maybe_unused intel_resume(struct device *dev) } /* + * Runtime PM has been disabled in intel_suspend(), so set the status + * to active because the device has just been resumed and re-enable + * runtime PM. + */ + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + /* * after system resume, the pm_runtime suspend() may kick in * during the enumeration, before any children device force the * master device to remain active. Using pm_runtime_get() diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 067c954cb6ea..863781ba6c16 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0+ // // Copyright 2013 Freescale Semiconductor, Inc. -// Copyright 2020 NXP +// Copyright 2020-2025 NXP // // Freescale DSPI driver // This file contains a driver for the Freescale DSPI @@ -62,6 +62,7 @@ #define SPI_SR_TFIWF BIT(18) #define SPI_SR_RFDF BIT(17) #define SPI_SR_CMDFFF BIT(16) +#define SPI_SR_TXRXS BIT(30) #define SPI_SR_CLEAR (SPI_SR_TCFQF | \ SPI_SR_TFUF | SPI_SR_TFFF | \ SPI_SR_CMDTCF | SPI_SR_SPEF | \ @@ -921,9 +922,20 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, struct spi_transfer *transfer; bool cs = false; int status = 0; + u32 val = 0; + bool cs_change = false; message->actual_length = 0; + /* Put DSPI in running mode if halted. */ + regmap_read(dspi->regmap, SPI_MCR, &val); + if (val & SPI_MCR_HALT) { + regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, 0); + while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 && + !(val & SPI_SR_TXRXS)) + ; + } + list_for_each_entry(transfer, &message->transfers, transfer_list) { dspi->cur_transfer = transfer; dspi->cur_msg = message; @@ -953,6 +965,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, dspi->tx_cmd |= SPI_PUSHR_CMD_CONT; } + cs_change = transfer->cs_change; dspi->tx = transfer->tx_buf; dspi->rx = transfer->rx_buf; dspi->len = transfer->len; @@ -962,6 +975,8 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF, SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF); + regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR); + spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer, dspi->progress, !dspi->irq); @@ -988,6 +1003,15 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, dspi_deassert_cs(spi, &cs); } + if (status || !cs_change) { + /* Put DSPI in stop mode */ + regmap_update_bits(dspi->regmap, SPI_MCR, + SPI_MCR_HALT, SPI_MCR_HALT); + while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 && + val & SPI_SR_TXRXS) + ; + } + message->status = status; spi_finalize_current_message(ctlr); @@ -1167,6 +1191,20 @@ static int dspi_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume); +static const struct regmap_range dspi_yes_ranges[] = { + regmap_reg_range(SPI_MCR, SPI_MCR), + regmap_reg_range(SPI_TCR, SPI_CTAR(3)), + regmap_reg_range(SPI_SR, SPI_TXFR3), + regmap_reg_range(SPI_RXFR0, SPI_RXFR3), + regmap_reg_range(SPI_CTARE(0), SPI_CTARE(3)), + regmap_reg_range(SPI_SREX, SPI_SREX), +}; + +static const struct regmap_access_table dspi_access_table = { + .yes_ranges = dspi_yes_ranges, + .n_yes_ranges = ARRAY_SIZE(dspi_yes_ranges), +}; + static const struct regmap_range dspi_volatile_ranges[] = { regmap_reg_range(SPI_MCR, SPI_TCR), regmap_reg_range(SPI_SR, SPI_SR), @@ -1184,6 +1222,8 @@ static const struct regmap_config dspi_regmap_config = { .reg_stride = 4, .max_register = 0x88, .volatile_table = &dspi_volatile_table, + .rd_table = &dspi_access_table, + .wr_table = &dspi_access_table, }; static const struct regmap_range dspi_xspi_volatile_ranges[] = { @@ -1205,6 +1245,8 @@ static const struct regmap_config dspi_xspi_regmap_config[] = { .reg_stride = 4, .max_register = 0x13c, .volatile_table = &dspi_xspi_volatile_table, + .rd_table = &dspi_access_table, + .wr_table = &dspi_access_table, }, { .name = "pushr", @@ -1227,6 +1269,8 @@ static int dspi_init(struct fsl_dspi *dspi) if (!spi_controller_is_target(dspi->ctlr)) mcr |= SPI_MCR_HOST; + mcr |= SPI_MCR_HALT; + regmap_write(dspi->regmap, SPI_MCR, mcr); regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR); diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c index 5c59fddb32c1..b5ecffcaf795 100644 --- a/drivers/spi/spi-fsl-qspi.c +++ b/drivers/spi/spi-fsl-qspi.c @@ -949,24 +949,20 @@ static int fsl_qspi_probe(struct platform_device *pdev) ret = devm_add_action_or_reset(dev, fsl_qspi_cleanup, q); if (ret) - goto err_destroy_mutex; + goto err_put_ctrl; ret = devm_spi_register_controller(dev, ctlr); if (ret) - goto err_destroy_mutex; + goto err_put_ctrl; return 0; -err_destroy_mutex: - mutex_destroy(&q->lock); - err_disable_clk: fsl_qspi_clk_disable_unprep(q); err_put_ctrl: spi_controller_put(ctlr); - dev_err(dev, "Freescale QuadSPI probe failed\n"); return ret; } diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 832d6e9009eb..c93d80a4d734 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -1695,9 +1695,12 @@ static int spi_imx_transfer_one(struct spi_controller *controller, struct spi_device *spi, struct spi_transfer *transfer) { + int ret; struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller); - spi_imx_setupxfer(spi, transfer); + ret = spi_imx_setupxfer(spi, transfer); + if (ret < 0) + return ret; transfer->effective_speed_hz = spi_imx->spi_bus_clk; /* flush rxfifo before transfer */ diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c index 31a878d9458d..7740f94847a8 100644 --- a/drivers/spi/spi-loopback-test.c +++ b/drivers/spi/spi-loopback-test.c @@ -420,7 +420,7 @@ MODULE_LICENSE("GPL"); static void spi_test_print_hex_dump(char *pre, const void *ptr, size_t len) { /* limit the hex_dump */ - if (len < 1024) { + if (len <= 1024) { print_hex_dump(KERN_INFO, pre, DUMP_PREFIX_OFFSET, 16, 1, ptr, len, 0); diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index a31a1db07aa4..5db0639d3b01 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -596,7 +596,11 @@ u64 spi_mem_calc_op_duration(struct spi_mem_op *op) ns_per_cycles = 1000000000 / op->max_freq; ncycles += ((op->cmd.nbytes * 8) / op->cmd.buswidth) / (op->cmd.dtr ? 2 : 1); ncycles += ((op->addr.nbytes * 8) / op->addr.buswidth) / (op->addr.dtr ? 2 : 1); - ncycles += ((op->dummy.nbytes * 8) / op->dummy.buswidth) / (op->dummy.dtr ? 2 : 1); + + /* Dummy bytes are optional for some SPI flash memory operations */ + if (op->dummy.nbytes) + ncycles += ((op->dummy.nbytes * 8) / op->dummy.buswidth) / (op->dummy.dtr ? 2 : 1); + ncycles += ((op->data.nbytes * 8) / op->data.buswidth) / (op->data.dtr ? 2 : 1); return ncycles * ns_per_cycles; diff --git a/drivers/spi/spi-qpic-snand.c b/drivers/spi/spi-qpic-snand.c index 17eb67e19132..94948c8781e8 100644 --- a/drivers/spi/spi-qpic-snand.c +++ b/drivers/spi/spi-qpic-snand.c @@ -142,7 +142,7 @@ static void qcom_spi_set_read_loc_first(struct qcom_nand_controller *snandc, else if (reg == NAND_READ_LOCATION_1) snandc->regs->read_location1 = locreg_val; else if (reg == NAND_READ_LOCATION_2) - snandc->regs->read_location1 = locreg_val; + snandc->regs->read_location2 = locreg_val; else if (reg == NAND_READ_LOCATION_3) snandc->regs->read_location3 = locreg_val; } @@ -1307,8 +1307,7 @@ static int qcom_spi_send_cmdaddr(struct qcom_nand_controller *snandc, snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg << 16); snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg); snandc->qspi->cmd = cpu_to_le32(cmd); - qcom_spi_block_erase(snandc); - return 0; + return qcom_spi_block_erase(snandc); default: break; } diff --git a/drivers/spi/spi-stm32-ospi.c b/drivers/spi/spi-stm32-ospi.c index 668022098b1e..9ec9823409cc 100644 --- a/drivers/spi/spi-stm32-ospi.c +++ b/drivers/spi/spi-stm32-ospi.c @@ -960,6 +960,10 @@ err_pm_resume: err_pm_enable: pm_runtime_force_suspend(ospi->dev); mutex_destroy(&ospi->lock); + if (ospi->dma_chtx) + dma_release_channel(ospi->dma_chtx); + if (ospi->dma_chrx) + dma_release_channel(ospi->dma_chrx); return ret; } diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c index fcbe864c9b7d..aa92fd5a35a9 100644 --- a/drivers/spi/spi-sun4i.c +++ b/drivers/spi/spi-sun4i.c @@ -264,6 +264,9 @@ static int sun4i_spi_transfer_one(struct spi_controller *host, else reg |= SUN4I_CTL_DHB; + /* Now that the settings are correct, enable the interface */ + reg |= SUN4I_CTL_ENABLE; + sun4i_spi_write(sspi, SUN4I_CTL_REG, reg); /* Ensure that we have a parent clock fast enough */ @@ -404,7 +407,7 @@ static int sun4i_spi_runtime_resume(struct device *dev) } sun4i_spi_write(sspi, SUN4I_CTL_REG, - SUN4I_CTL_ENABLE | SUN4I_CTL_MASTER | SUN4I_CTL_TP); + SUN4I_CTL_MASTER | SUN4I_CTL_TP); return 0; @@ -462,6 +465,7 @@ static int sun4i_spi_probe(struct platform_device *pdev) sspi->host = host; host->max_speed_hz = 100 * 1000 * 1000; host->min_speed_hz = 3 * 1000; + host->use_gpio_descriptors = true; host->set_cs = sun4i_spi_set_cs; host->transfer_one = sun4i_spi_transfer_one; host->num_chipselect = 4; diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c index 3822d7c8d8ed..795a8482c2c7 100644 --- a/drivers/spi/spi-tegra114.c +++ b/drivers/spi/spi-tegra114.c @@ -728,9 +728,9 @@ static int tegra_spi_set_hw_cs_timing(struct spi_device *spi) u32 inactive_cycles; u8 cs_state; - if (setup->unit != SPI_DELAY_UNIT_SCK || - hold->unit != SPI_DELAY_UNIT_SCK || - inactive->unit != SPI_DELAY_UNIT_SCK) { + if ((setup->value && setup->unit != SPI_DELAY_UNIT_SCK) || + (hold->value && hold->unit != SPI_DELAY_UNIT_SCK) || + (inactive->value && inactive->unit != SPI_DELAY_UNIT_SCK)) { dev_err(&spi->dev, "Invalid delay unit %d, should be SPI_DELAY_UNIT_SCK\n", SPI_DELAY_UNIT_SCK); diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c index 08e49a876894..64e1b2f8a000 100644 --- a/drivers/spi/spi-tegra210-quad.c +++ b/drivers/spi/spi-tegra210-quad.c @@ -1117,9 +1117,9 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi, (&tqspi->xfer_completion, QSPI_DMA_TIMEOUT); - if (WARN_ON(ret == 0)) { - dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n", - ret); + if (WARN_ON_ONCE(ret == 0)) { + dev_err_ratelimited(tqspi->dev, + "QSPI Transfer failed with timeout\n"); if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX)) dmaengine_terminate_all diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c index 7540c20090c7..351f983ef914 100644 --- a/drivers/staging/axis-fifo/axis-fifo.c +++ b/drivers/staging/axis-fifo/axis-fifo.c @@ -393,16 +393,14 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf, bytes_available = ioread32(fifo->base_addr + XLLF_RLR_OFFSET); if (!bytes_available) { - dev_err(fifo->dt_device, "received a packet of length 0 - fifo core will be reset\n"); - reset_ip_core(fifo); + dev_err(fifo->dt_device, "received a packet of length 0\n"); ret = -EIO; goto end_unlock; } if (bytes_available > len) { - dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu) - fifo core will be reset\n", + dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu)\n", bytes_available, len); - reset_ip_core(fifo); ret = -EINVAL; goto end_unlock; } @@ -411,8 +409,7 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf, /* this probably can't happen unless IP * registers were previously mishandled */ - dev_err(fifo->dt_device, "received a packet that isn't word-aligned - fifo core will be reset\n"); - reset_ip_core(fifo); + dev_err(fifo->dt_device, "received a packet that isn't word-aligned\n"); ret = -EIO; goto end_unlock; } @@ -433,7 +430,6 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf, if (copy_to_user(buf + copied * sizeof(u32), tmp_buf, copy * sizeof(u32))) { - reset_ip_core(fifo); ret = -EFAULT; goto end_unlock; } @@ -542,7 +538,6 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf, if (copy_from_user(tmp_buf, buf + copied * sizeof(u32), copy * sizeof(u32))) { - reset_ip_core(fifo); ret = -EFAULT; goto end_unlock; } @@ -775,9 +770,6 @@ static int axis_fifo_parse_dt(struct axis_fifo *fifo) goto end; } - /* IP sets TDFV to fifo depth - 4 so we will do the same */ - fifo->tx_fifo_depth -= 4; - ret = get_dts_property(fifo, "xlnx,use-rx-data", &fifo->has_rx_fifo); if (ret) { dev_err(fifo->dt_device, "missing xlnx,use-rx-data property\n"); diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c index 6c14d7bcdd67..081b17f49863 100644 --- a/drivers/staging/iio/adc/ad7816.c +++ b/drivers/staging/iio/adc/ad7816.c @@ -136,7 +136,7 @@ static ssize_t ad7816_store_mode(struct device *dev, struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct ad7816_chip_info *chip = iio_priv(indio_dev); - if (strcmp(buf, "full")) { + if (strcmp(buf, "full") == 0) { gpiod_set_value(chip->rdwr_pin, 1); chip->mode = AD7816_FULL; } else { diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c index b839b50ac26a..fa7ea4ca4c36 100644 --- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c +++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c @@ -1900,6 +1900,7 @@ static int bcm2835_mmal_probe(struct vchiq_device *device) __func__, ret); goto free_dev; } + dev->v4l2_dev.dev = &device->dev; /* setup v4l controls */ ret = bcm2835_mmal_init_controls(dev, &dev->ctrl_handler); diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 1244ef3aa86c..620ba6e0ab07 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -4263,8 +4263,8 @@ int iscsit_close_connection( spin_unlock(&iscsit_global->ts_bitmap_lock); iscsit_stop_timers_for_cmds(conn); - iscsit_stop_nopin_response_timer(conn); iscsit_stop_nopin_timer(conn); + iscsit_stop_nopin_response_timer(conn); if (conn->conn_transport->iscsit_wait_conn) conn->conn_transport->iscsit_wait_conn(conn); diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c index c868d8b7bd1c..57cf46f69669 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c @@ -9,6 +9,7 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/thermal.h> +#include <asm/msr.h> #include "int340x_thermal_zone.h" #include "processor_thermal_device.h" #include "../intel_soc_dts_iosf.h" @@ -153,7 +154,7 @@ static ssize_t tcc_offset_degree_celsius_store(struct device *dev, u64 val; int err; - err = rdmsrl_safe(MSR_PLATFORM_INFO, &val); + err = rdmsrq_safe(MSR_PLATFORM_INFO, &val); if (err) return err; diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c index a55aaa8cef42..2097aae39946 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c @@ -485,7 +485,7 @@ static const struct pci_device_id proc_thermal_pci_ids[] = { { PCI_DEVICE_DATA(INTEL, ADL_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_WT_REQ) }, { PCI_DEVICE_DATA(INTEL, LNLM_THERMAL, PROC_THERMAL_FEATURE_MSI_SUPPORT | - PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_DLVR | + PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_DLVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_WT_HINT | PROC_THERMAL_FEATURE_POWER_FLOOR) }, { PCI_DEVICE_DATA(INTEL, MTLP_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_DLVR | @@ -495,8 +495,9 @@ static const struct pci_device_id proc_thermal_pci_ids[] = { { PCI_DEVICE_DATA(INTEL, RPL_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_WT_REQ) }, { PCI_DEVICE_DATA(INTEL, PTL_THERMAL, PROC_THERMAL_FEATURE_RAPL | - PROC_THERMAL_FEATURE_DLVR | PROC_THERMAL_FEATURE_MSI_SUPPORT | - PROC_THERMAL_FEATURE_WT_HINT | PROC_THERMAL_FEATURE_POWER_FLOOR) }, + PROC_THERMAL_FEATURE_DLVR | PROC_THERMAL_FEATURE_DVFS | + PROC_THERMAL_FEATURE_MSI_SUPPORT | PROC_THERMAL_FEATURE_WT_HINT | + PROC_THERMAL_FEATURE_POWER_FLOOR) }, { }, }; diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c index dad63f2d5f90..3a028b78d9af 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c @@ -166,15 +166,18 @@ static const struct mmio_reg adl_dvfs_mmio_regs[] = { { 0, 0x5A40, 1, 0x1, 0}, /* rfi_disable */ }; +static const struct mapping_table *dlvr_mapping; +static const struct mmio_reg *dlvr_mmio_regs_table; + #define RFIM_SHOW(suffix, table)\ static ssize_t suffix##_show(struct device *dev,\ struct device_attribute *attr,\ char *buf)\ {\ - const struct mapping_table *mapping = NULL;\ + const struct mmio_reg *mmio_regs = dlvr_mmio_regs_table;\ + const struct mapping_table *mapping = dlvr_mapping;\ struct proc_thermal_device *proc_priv;\ struct pci_dev *pdev = to_pci_dev(dev);\ - const struct mmio_reg *mmio_regs;\ const char **match_strs;\ int ret, err;\ u32 reg_val;\ @@ -186,12 +189,6 @@ static ssize_t suffix##_show(struct device *dev,\ mmio_regs = adl_dvfs_mmio_regs;\ } else if (table == 2) { \ match_strs = (const char **)dlvr_strings;\ - if (pdev->device == PCI_DEVICE_ID_INTEL_LNLM_THERMAL) {\ - mmio_regs = lnl_dlvr_mmio_regs;\ - mapping = lnl_dlvr_mapping;\ - } else {\ - mmio_regs = dlvr_mmio_regs;\ - } \ } else {\ match_strs = (const char **)fivr_strings;\ mmio_regs = tgl_fivr_mmio_regs;\ @@ -214,12 +211,12 @@ static ssize_t suffix##_store(struct device *dev,\ struct device_attribute *attr,\ const char *buf, size_t count)\ {\ - const struct mapping_table *mapping = NULL;\ + const struct mmio_reg *mmio_regs = dlvr_mmio_regs_table;\ + const struct mapping_table *mapping = dlvr_mapping;\ struct proc_thermal_device *proc_priv;\ struct pci_dev *pdev = to_pci_dev(dev);\ unsigned int input;\ const char **match_strs;\ - const struct mmio_reg *mmio_regs;\ int ret, err;\ u32 reg_val;\ u32 mask;\ @@ -230,12 +227,6 @@ static ssize_t suffix##_store(struct device *dev,\ mmio_regs = adl_dvfs_mmio_regs;\ } else if (table == 2) { \ match_strs = (const char **)dlvr_strings;\ - if (pdev->device == PCI_DEVICE_ID_INTEL_LNLM_THERMAL) {\ - mmio_regs = lnl_dlvr_mmio_regs;\ - mapping = lnl_dlvr_mapping;\ - } else {\ - mmio_regs = dlvr_mmio_regs;\ - } \ } else {\ match_strs = (const char **)fivr_strings;\ mmio_regs = tgl_fivr_mmio_regs;\ @@ -448,6 +439,16 @@ int proc_thermal_rfim_add(struct pci_dev *pdev, struct proc_thermal_device *proc } if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_DLVR) { + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_LNLM_THERMAL: + case PCI_DEVICE_ID_INTEL_PTL_THERMAL: + dlvr_mmio_regs_table = lnl_dlvr_mmio_regs; + dlvr_mapping = lnl_dlvr_mapping; + break; + default: + dlvr_mmio_regs_table = dlvr_mmio_regs; + break; + } ret = sysfs_create_group(&pdev->dev.kobj, &dlvr_attribute_group); if (ret) return ret; diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c index 5b18a46a10b0..bd2fca7dc017 100644 --- a/drivers/thermal/intel/intel_hfi.c +++ b/drivers/thermal/intel/intel_hfi.c @@ -284,7 +284,7 @@ void intel_hfi_process_event(__u64 pkg_therm_status_msr_val) if (!raw_spin_trylock(&hfi_instance->event_lock)) return; - rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr); + rdmsrq(MSR_IA32_PACKAGE_THERM_STATUS, msr); hfi = msr & PACKAGE_THERM_STATUS_HFI_UPDATED; if (!hfi) { raw_spin_unlock(&hfi_instance->event_lock); @@ -356,9 +356,9 @@ static void hfi_enable(void) { u64 msr_val; - rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); + rdmsrq(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); msr_val |= HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT; - wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); + wrmsrq(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); } static void hfi_set_hw_table(struct hfi_instance *hfi_instance) @@ -368,7 +368,7 @@ static void hfi_set_hw_table(struct hfi_instance *hfi_instance) hw_table_pa = virt_to_phys(hfi_instance->hw_table); msr_val = hw_table_pa | HW_FEEDBACK_PTR_VALID_BIT; - wrmsrl(MSR_IA32_HW_FEEDBACK_PTR, msr_val); + wrmsrq(MSR_IA32_HW_FEEDBACK_PTR, msr_val); } /* Caller must hold hfi_instance_lock. */ @@ -377,9 +377,9 @@ static void hfi_disable(void) u64 msr_val; int i; - rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); + rdmsrq(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); msr_val &= ~HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT; - wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); + wrmsrq(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); /* * Wait for hardware to acknowledge the disabling of HFI. Some @@ -388,7 +388,7 @@ static void hfi_disable(void) * memory. */ for (i = 0; i < 2000; i++) { - rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); + rdmsrq(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); if (msr_val & PACKAGE_THERM_STATUS_HFI_UPDATED) break; diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c index 96a24df79686..9a4cec000910 100644 --- a/drivers/thermal/intel/intel_powerclamp.c +++ b/drivers/thermal/intel/intel_powerclamp.c @@ -340,7 +340,7 @@ static bool has_pkg_state_counter(void) /* check if any one of the counter msrs exists */ while (info->msr_index) { - if (!rdmsrl_safe(info->msr_index, &val)) + if (!rdmsrq_safe(info->msr_index, &val)) return true; info++; } @@ -356,7 +356,7 @@ static u64 pkg_state_counter(void) while (info->msr_index) { if (!info->skip) { - if (!rdmsrl_safe(info->msr_index, &val)) + if (!rdmsrq_safe(info->msr_index, &val)) count += val; else info->skip = true; diff --git a/drivers/thermal/intel/intel_tcc_cooling.c b/drivers/thermal/intel/intel_tcc_cooling.c index 9ff0ebdde0ef..f352ecafbedf 100644 --- a/drivers/thermal/intel/intel_tcc_cooling.c +++ b/drivers/thermal/intel/intel_tcc_cooling.c @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/thermal.h> #include <asm/cpu_device_id.h> +#include <asm/msr.h> #define TCC_PROGRAMMABLE BIT(30) #define TCC_LOCKED BIT(31) @@ -81,14 +82,14 @@ static int __init tcc_cooling_init(void) if (!id) return -ENODEV; - err = rdmsrl_safe(MSR_PLATFORM_INFO, &val); + err = rdmsrq_safe(MSR_PLATFORM_INFO, &val); if (err) return err; if (!(val & TCC_PROGRAMMABLE)) return -ENODEV; - err = rdmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, &val); + err = rdmsrq_safe(MSR_IA32_TEMPERATURE_TARGET, &val); if (err) return err; diff --git a/drivers/thermal/intel/therm_throt.c b/drivers/thermal/intel/therm_throt.c index e69868e868eb..debc94e2dc16 100644 --- a/drivers/thermal/intel/therm_throt.c +++ b/drivers/thermal/intel/therm_throt.c @@ -273,7 +273,7 @@ void thermal_clear_package_intr_status(int level, u64 bit_mask) } msr_val &= ~bit_mask; - wrmsrl(msr, msr_val); + wrmsrq(msr, msr_val); } EXPORT_SYMBOL_GPL(thermal_clear_package_intr_status); @@ -287,7 +287,7 @@ static void get_therm_status(int level, bool *proc_hot, u8 *temp) else msr = MSR_IA32_PACKAGE_THERM_STATUS; - rdmsrl(msr, msr_val); + rdmsrq(msr, msr_val); if (msr_val & THERM_STATUS_PROCHOT_LOG) *proc_hot = true; else @@ -643,7 +643,7 @@ static void notify_thresholds(__u64 msr_val) void __weak notify_hwp_interrupt(void) { - wrmsrl_safe(MSR_HWP_STATUS, 0); + wrmsrq_safe(MSR_HWP_STATUS, 0); } /* Thermal transition interrupt handler */ @@ -654,7 +654,7 @@ void intel_thermal_interrupt(void) if (static_cpu_has(X86_FEATURE_HWP)) notify_hwp_interrupt(); - rdmsrl(MSR_IA32_THERM_STATUS, msr_val); + rdmsrq(MSR_IA32_THERM_STATUS, msr_val); /* Check for violation of core thermal thresholds*/ notify_thresholds(msr_val); @@ -669,7 +669,7 @@ void intel_thermal_interrupt(void) CORE_LEVEL); if (this_cpu_has(X86_FEATURE_PTS)) { - rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); + rdmsrq(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); /* check violations of package thermal thresholds */ notify_package_thresholds(msr_val); therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT, diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c index 496abf8e55e0..3fc679b6f11b 100644 --- a/drivers/thermal/intel/x86_pkg_temp_thermal.c +++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c @@ -20,6 +20,7 @@ #include <linux/debugfs.h> #include <asm/cpu_device_id.h> +#include <asm/msr.h> #include "thermal_interrupt.h" @@ -329,6 +330,7 @@ static int pkg_temp_thermal_device_add(unsigned int cpu) tj_max = intel_tcc_get_tjmax(cpu); if (tj_max < 0) return tj_max; + tj_max *= 1000; zonedev = kzalloc(sizeof(*zonedev), GFP_KERNEL); if (!zonedev) diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c index 1b137e068444..3449945493ce 100644 --- a/drivers/tty/serial/msm_serial.c +++ b/drivers/tty/serial/msm_serial.c @@ -1746,6 +1746,12 @@ msm_serial_early_console_setup_dm(struct earlycon_device *device, if (!device->port.membase) return -ENODEV; + /* Disable DM / single-character modes */ + msm_write(&device->port, 0, UARTDM_DMEN); + msm_write(&device->port, MSM_UART_CR_CMD_RESET_RX, MSM_UART_CR); + msm_write(&device->port, MSM_UART_CR_CMD_RESET_TX, MSM_UART_CR); + msm_write(&device->port, MSM_UART_CR_TX_ENABLE, MSM_UART_CR); + device->con->write = msm_serial_early_write_dm; return 0; } diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c index 5904a2d4cefa..054a8e630ace 100644 --- a/drivers/tty/serial/sifive.c +++ b/drivers/tty/serial/sifive.c @@ -563,8 +563,11 @@ static void sifive_serial_break_ctl(struct uart_port *port, int break_state) static int sifive_serial_startup(struct uart_port *port) { struct sifive_serial_port *ssp = port_to_sifive_serial_port(port); + unsigned long flags; + uart_port_lock_irqsave(&ssp->port, &flags); __ssp_enable_rxwm(ssp); + uart_port_unlock_irqrestore(&ssp->port, flags); return 0; } @@ -572,9 +575,12 @@ static int sifive_serial_startup(struct uart_port *port) static void sifive_serial_shutdown(struct uart_port *port) { struct sifive_serial_port *ssp = port_to_sifive_serial_port(port); + unsigned long flags; + uart_port_lock_irqsave(&ssp->port, &flags); __ssp_disable_rxwm(ssp); __ssp_disable_txwm(ssp); + uart_port_unlock_irqrestore(&ssp->port, flags); } /** diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c index 0bd6544e30a6..791e2f1f7c0b 100644 --- a/drivers/tty/vt/selection.c +++ b/drivers/tty/vt/selection.c @@ -193,13 +193,12 @@ int set_selection_user(const struct tiocl_selection __user *sel, return -EFAULT; /* - * TIOCL_SELCLEAR, TIOCL_SELPOINTER and TIOCL_SELMOUSEREPORT are OK to - * use without CAP_SYS_ADMIN as they do not modify the selection. + * TIOCL_SELCLEAR and TIOCL_SELPOINTER are OK to use without + * CAP_SYS_ADMIN as they do not modify the selection. */ switch (v.sel_mode) { case TIOCL_SELCLEAR: case TIOCL_SELPOINTER: - case TIOCL_SELMOUSEREPORT: break; default: if (!capable(CAP_SYS_ADMIN)) diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 240ce135bbfb..f1294c29f484 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -677,13 +677,6 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) unsigned long flags; int err; - if (!ufshcd_cmd_inflight(lrbp->cmd)) { - dev_err(hba->dev, - "%s: skip abort. cmd at tag %d already completed.\n", - __func__, tag); - return FAILED; - } - /* Skip task abort in case previous aborts failed and report failure */ if (lrbp->req_abort_skip) { dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n", @@ -692,6 +685,11 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) } hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); + if (!hwq) { + dev_err(hba->dev, "%s: skip abort. cmd at tag %d already completed.\n", + __func__, tag); + return FAILED; + } if (ufshcd_mcq_sqe_search(hba, hwq, tag)) { /* diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c index 90b5ab60f5ae..634cf163f4cb 100644 --- a/drivers/ufs/core/ufs-sysfs.c +++ b/drivers/ufs/core/ufs-sysfs.c @@ -466,6 +466,56 @@ static ssize_t critical_health_show(struct device *dev, return sysfs_emit(buf, "%d\n", hba->critical_health_count); } +static ssize_t device_lvl_exception_count_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + if (hba->dev_info.wspecversion < 0x410) + return -EOPNOTSUPP; + + return sysfs_emit(buf, "%u\n", atomic_read(&hba->dev_lvl_exception_count)); +} + +static ssize_t device_lvl_exception_count_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + unsigned int value; + + if (kstrtouint(buf, 0, &value)) + return -EINVAL; + + /* the only supported usecase is to reset the dev_lvl_exception_count */ + if (value) + return -EINVAL; + + atomic_set(&hba->dev_lvl_exception_count, 0); + + return count; +} + +static ssize_t device_lvl_exception_id_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + u64 exception_id; + int err; + + ufshcd_rpm_get_sync(hba); + err = ufshcd_read_device_lvl_exception_id(hba, &exception_id); + ufshcd_rpm_put_sync(hba); + + if (err) + return err; + + hba->dev_lvl_exception_id = exception_id; + return sysfs_emit(buf, "%llu\n", exception_id); +} + static DEVICE_ATTR_RW(rpm_lvl); static DEVICE_ATTR_RO(rpm_target_dev_state); static DEVICE_ATTR_RO(rpm_target_link_state); @@ -479,6 +529,8 @@ static DEVICE_ATTR_RW(wb_flush_threshold); static DEVICE_ATTR_RW(rtc_update_ms); static DEVICE_ATTR_RW(pm_qos_enable); static DEVICE_ATTR_RO(critical_health); +static DEVICE_ATTR_RW(device_lvl_exception_count); +static DEVICE_ATTR_RO(device_lvl_exception_id); static struct attribute *ufs_sysfs_ufshcd_attrs[] = { &dev_attr_rpm_lvl.attr, @@ -494,6 +546,8 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = { &dev_attr_rtc_update_ms.attr, &dev_attr_pm_qos_enable.attr, &dev_attr_critical_health.attr, + &dev_attr_device_lvl_exception_count.attr, + &dev_attr_device_lvl_exception_id.attr, NULL }; diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index 10b4a19a70f1..d0a2c963a27d 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -94,6 +94,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, enum query_opcode desc_op); int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable); +int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id); /* Wrapper functions for safely calling variant operations */ static inline const char *ufshcd_get_var_name(struct ufs_hba *hba) diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 0534390c2a35..7735421e3991 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -278,6 +278,7 @@ static const struct ufs_dev_quirk ufs_fixups[] = { .model = UFS_ANY_MODEL, .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE | + UFS_DEVICE_QUIRK_PA_HIBER8TIME | UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS }, { .wmanufacturerid = UFS_VENDOR_SKHYNIX, .model = UFS_ANY_MODEL, @@ -3176,16 +3177,10 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, int err; retry: - time_left = wait_for_completion_timeout(hba->dev_cmd.complete, + time_left = wait_for_completion_timeout(&hba->dev_cmd.complete, time_left); if (likely(time_left)) { - /* - * The completion handler called complete() and the caller of - * this function still owns the @lrbp tag so the code below does - * not trigger any race conditions. - */ - hba->dev_cmd.complete = NULL; err = ufshcd_get_tr_ocs(lrbp, NULL); if (!err) err = ufshcd_dev_cmd_completion(hba, lrbp); @@ -3199,7 +3194,6 @@ retry: /* successfully cleared the command, retry if needed */ if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) err = -EAGAIN; - hba->dev_cmd.complete = NULL; return err; } @@ -3215,11 +3209,9 @@ retry: spin_lock_irqsave(&hba->outstanding_lock, flags); pending = test_bit(lrbp->task_tag, &hba->outstanding_reqs); - if (pending) { - hba->dev_cmd.complete = NULL; + if (pending) __clear_bit(lrbp->task_tag, &hba->outstanding_reqs); - } spin_unlock_irqrestore(&hba->outstanding_lock, flags); if (!pending) { @@ -3237,8 +3229,6 @@ retry: spin_lock_irqsave(&hba->outstanding_lock, flags); pending = test_bit(lrbp->task_tag, &hba->outstanding_reqs); - if (pending) - hba->dev_cmd.complete = NULL; spin_unlock_irqrestore(&hba->outstanding_lock, flags); if (!pending) { @@ -3272,13 +3262,9 @@ static void ufshcd_dev_man_unlock(struct ufs_hba *hba) static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, const u32 tag, int timeout) { - DECLARE_COMPLETION_ONSTACK(wait); int err; - hba->dev_cmd.complete = &wait; - ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); - ufshcd_send_command(hba, tag, hba->dev_cmd_queue); err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); @@ -5585,12 +5571,12 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag, ufshcd_release_scsi_cmd(hba, lrbp); /* Do not touch lrbp after scsi done */ scsi_done(cmd); - } else if (hba->dev_cmd.complete) { + } else { if (cqe) { ocs = le32_to_cpu(cqe->status) & MASK_OCS; lrbp->utr_descriptor_ptr->header.ocs = ocs; } - complete(hba->dev_cmd.complete); + complete(&hba->dev_cmd.complete); } } @@ -5692,6 +5678,8 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba, continue; hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); + if (!hwq) + continue; if (force_compl) { ufshcd_mcq_compl_all_cqes_lock(hba, hwq); @@ -6013,6 +6001,42 @@ out: __func__, err); } +int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id) +{ + struct utp_upiu_query_v4_0 *upiu_resp; + struct ufs_query_req *request = NULL; + struct ufs_query_res *response = NULL; + int err; + + if (hba->dev_info.wspecversion < 0x410) + return -EOPNOTSUPP; + + ufshcd_hold(hba); + mutex_lock(&hba->dev_cmd.lock); + + ufshcd_init_query(hba, &request, &response, + UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_DEV_LVL_EXCEPTION_ID, 0, 0); + + request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; + + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); + + if (err) { + dev_err(hba->dev, "%s: failed to read device level exception %d\n", + __func__, err); + goto out; + } + + upiu_resp = (struct utp_upiu_query_v4_0 *)response; + *exception_id = get_unaligned_be64(&upiu_resp->osf3); +out: + mutex_unlock(&hba->dev_cmd.lock); + ufshcd_release(hba); + + return err; +} + static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn) { u8 index; @@ -6083,7 +6107,7 @@ int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable) return ret; } -static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba, +static bool ufshcd_wb_curr_buff_threshold_check(struct ufs_hba *hba, u32 avail_buf) { u32 cur_buf; @@ -6165,15 +6189,13 @@ static bool ufshcd_wb_need_flush(struct ufs_hba *hba) } /* - * The ufs device needs the vcc to be ON to flush. * With user-space reduction enabled, it's enough to enable flush * by checking only the available buffer. The threshold * defined here is > 90% full. * With user-space preserved enabled, the current-buffer * should be checked too because the wb buffer size can reduce * when disk tends to be full. This info is provided by current - * buffer (dCurrentWriteBoosterBufferSize). There's no point in - * keeping vcc on when current buffer is empty. + * buffer (dCurrentWriteBoosterBufferSize). */ index = ufshcd_wb_get_query_index(hba); ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, @@ -6188,7 +6210,7 @@ static bool ufshcd_wb_need_flush(struct ufs_hba *hba) if (!hba->dev_info.b_presrv_uspc_en) return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10); - return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf); + return ufshcd_wb_curr_buff_threshold_check(hba, avail_buf); } static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work) @@ -6240,6 +6262,11 @@ static void ufshcd_exception_event_handler(struct work_struct *work) sysfs_notify(&hba->dev->kobj, NULL, "critical_health"); } + if (status & hba->ee_drv_mask & MASK_EE_DEV_LVL_EXCEPTION) { + atomic_inc(&hba->dev_lvl_exception_count); + sysfs_notify(&hba->dev->kobj, NULL, "device_lvl_exception_count"); + } + ufs_debugfs_exception_event(hba, status); } @@ -7238,8 +7265,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, err = -EINVAL; } } - ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, - (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); return err; } @@ -8139,6 +8164,22 @@ static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf) } } +static void ufshcd_device_lvl_exception_probe(struct ufs_hba *hba, u8 *desc_buf) +{ + u32 ext_ufs_feature; + + if (hba->dev_info.wspecversion < 0x410) + return; + + ext_ufs_feature = get_unaligned_be32(desc_buf + + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP); + if (!(ext_ufs_feature & UFS_DEV_LVL_EXCEPTION_SUP)) + return; + + atomic_set(&hba->dev_lvl_exception_count, 0); + ufshcd_enable_ee(hba, MASK_EE_DEV_LVL_EXCEPTION); +} + static void ufshcd_set_rtt(struct ufs_hba *hba) { struct ufs_dev_info *dev_info = &hba->dev_info; @@ -8339,6 +8380,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba) ufs_init_rtc(hba, desc_buf); + ufshcd_device_lvl_exception_probe(hba, desc_buf); + /* * ufshcd_read_string_desc returns size of the string * reset the error value @@ -8428,6 +8471,31 @@ out: return ret; } +/** + * ufshcd_quirk_override_pa_h8time - Ensures proper adjustment of PA_HIBERN8TIME. + * @hba: per-adapter instance + * + * Some UFS devices require specific adjustments to the PA_HIBERN8TIME parameter + * to ensure proper hibernation timing. This function retrieves the current + * PA_HIBERN8TIME value and increments it by 100us. + */ +static void ufshcd_quirk_override_pa_h8time(struct ufs_hba *hba) +{ + u32 pa_h8time; + int ret; + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME), &pa_h8time); + if (ret) { + dev_err(hba->dev, "Failed to get PA_HIBERN8TIME: %d\n", ret); + return; + } + + /* Increment by 1 to increase hibernation time by 100 µs */ + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), pa_h8time + 1); + if (ret) + dev_err(hba->dev, "Failed updating PA_HIBERN8TIME: %d\n", ret); +} + static void ufshcd_tune_unipro_params(struct ufs_hba *hba) { ufshcd_vops_apply_dev_quirks(hba); @@ -8438,6 +8506,9 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba) if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE) ufshcd_quirk_tune_host_pa_tactivate(hba); + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_HIBER8TIME) + ufshcd_quirk_override_pa_h8time(hba); } static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) @@ -10490,6 +10561,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE); + init_completion(&hba->dev_cmd.complete); + err = ufshcd_hba_init(hba); if (err) goto out_error; diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c index d7539cda97da..3e545af536e5 100644 --- a/drivers/ufs/host/ufs-exynos.c +++ b/drivers/ufs/host/ufs-exynos.c @@ -34,7 +34,7 @@ * Exynos's Vendor specific registers for UFSHCI */ #define HCI_TXPRDT_ENTRY_SIZE 0x00 -#define PRDT_PREFECT_EN BIT(31) +#define PRDT_PREFETCH_EN BIT(31) #define HCI_RXPRDT_ENTRY_SIZE 0x04 #define HCI_1US_TO_CNT_VAL 0x0C #define CNT_VAL_1US_MASK 0x3FF @@ -92,11 +92,16 @@ UIC_TRANSPORT_NO_CONNECTION_RX |\ UIC_TRANSPORT_BAD_TC) -/* FSYS UFS Shareability */ -#define UFS_WR_SHARABLE BIT(2) -#define UFS_RD_SHARABLE BIT(1) -#define UFS_SHARABLE (UFS_WR_SHARABLE | UFS_RD_SHARABLE) -#define UFS_SHAREABILITY_OFFSET 0x710 +/* UFS Shareability */ +#define UFS_EXYNOSAUTO_WR_SHARABLE BIT(2) +#define UFS_EXYNOSAUTO_RD_SHARABLE BIT(1) +#define UFS_EXYNOSAUTO_SHARABLE (UFS_EXYNOSAUTO_WR_SHARABLE | \ + UFS_EXYNOSAUTO_RD_SHARABLE) +#define UFS_GS101_WR_SHARABLE BIT(1) +#define UFS_GS101_RD_SHARABLE BIT(0) +#define UFS_GS101_SHARABLE (UFS_GS101_WR_SHARABLE | \ + UFS_GS101_RD_SHARABLE) +#define UFS_SHAREABILITY_OFFSET 0x710 /* Multi-host registers */ #define MHCTRL 0xC4 @@ -209,8 +214,8 @@ static int exynos_ufs_shareability(struct exynos_ufs *ufs) /* IO Coherency setting */ if (ufs->sysreg) { return regmap_update_bits(ufs->sysreg, - ufs->shareability_reg_offset, - UFS_SHARABLE, UFS_SHARABLE); + ufs->iocc_offset, + ufs->iocc_mask, ufs->iocc_val); } return 0; @@ -957,6 +962,12 @@ static int exynos_ufs_phy_init(struct exynos_ufs *ufs) } phy_set_bus_width(generic_phy, ufs->avail_ln_rx); + + if (generic_phy->power_count) { + phy_power_off(generic_phy); + phy_exit(generic_phy); + } + ret = phy_init(generic_phy); if (ret) { dev_err(hba->dev, "%s: phy init failed, ret = %d\n", @@ -1049,9 +1060,14 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba) exynos_ufs_config_intr(ufs, DFES_DEF_L4_ERRS, UNIPRO_L4); exynos_ufs_set_unipro_pclk_div(ufs); + exynos_ufs_setup_clocks(hba, true, PRE_CHANGE); + /* unipro */ exynos_ufs_config_unipro(ufs); + if (ufs->drv_data->pre_link) + ufs->drv_data->pre_link(ufs); + /* m-phy */ exynos_ufs_phy_init(ufs); if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR)) { @@ -1059,11 +1075,6 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba) exynos_ufs_config_phy_cap_attr(ufs); } - exynos_ufs_setup_clocks(hba, true, PRE_CHANGE); - - if (ufs->drv_data->pre_link) - ufs->drv_data->pre_link(ufs); - return 0; } @@ -1087,12 +1098,17 @@ static int exynos_ufs_post_link(struct ufs_hba *hba) struct exynos_ufs *ufs = ufshcd_get_variant(hba); struct phy *generic_phy = ufs->phy; struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; + u32 val = ilog2(DATA_UNIT_SIZE); exynos_ufs_establish_connt(ufs); exynos_ufs_fit_aggr_timeout(ufs); hci_writel(ufs, 0xa, HCI_DATA_REORDER); - hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_TXPRDT_ENTRY_SIZE); + + if (hba->caps & UFSHCD_CAP_CRYPTO) + val |= PRDT_PREFETCH_EN; + hci_writel(ufs, val, HCI_TXPRDT_ENTRY_SIZE); + hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_RXPRDT_ENTRY_SIZE); hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE); hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE); @@ -1168,12 +1184,22 @@ static int exynos_ufs_parse_dt(struct device *dev, struct exynos_ufs *ufs) ufs->sysreg = NULL; else { if (of_property_read_u32_index(np, "samsung,sysreg", 1, - &ufs->shareability_reg_offset)) { + &ufs->iocc_offset)) { dev_warn(dev, "can't get an offset from sysreg. Set to default value\n"); - ufs->shareability_reg_offset = UFS_SHAREABILITY_OFFSET; + ufs->iocc_offset = UFS_SHAREABILITY_OFFSET; } } + ufs->iocc_mask = ufs->drv_data->iocc_mask; + /* + * no 'dma-coherent' property means the descriptors are + * non-cacheable so iocc shareability should be disabled. + */ + if (of_dma_is_coherent(dev->of_node)) + ufs->iocc_val = ufs->iocc_mask; + else + ufs->iocc_val = 0; + ufs->pclk_avail_min = PCLK_AVAIL_MIN; ufs->pclk_avail_max = PCLK_AVAIL_MAX; @@ -1497,6 +1523,14 @@ out: return ret; } +static void exynos_ufs_exit(struct ufs_hba *hba) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + + phy_power_off(ufs->phy); + phy_exit(ufs->phy); +} + static int exynos_ufs_host_reset(struct ufs_hba *hba) { struct exynos_ufs *ufs = ufshcd_get_variant(hba); @@ -1667,6 +1701,12 @@ static void exynos_ufs_hibern8_notify(struct ufs_hba *hba, } } +static int gs101_ufs_suspend(struct exynos_ufs *ufs) +{ + hci_writel(ufs, 0 << 0, HCI_GPIO_OUT); + return 0; +} + static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, enum ufs_notify_change_status status) { @@ -1675,6 +1715,9 @@ static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, if (status == PRE_CHANGE) return 0; + if (ufs->drv_data->suspend) + ufs->drv_data->suspend(ufs); + if (!ufshcd_is_link_active(hba)) phy_power_off(ufs->phy); @@ -1952,6 +1995,7 @@ static int gs101_ufs_pre_pwr_change(struct exynos_ufs *ufs, static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = { .name = "exynos_ufs", .init = exynos_ufs_init, + .exit = exynos_ufs_exit, .hce_enable_notify = exynos_ufs_hce_enable_notify, .link_startup_notify = exynos_ufs_link_startup_notify, .pwr_change_notify = exynos_ufs_pwr_change_notify, @@ -1990,13 +2034,7 @@ static int exynos_ufs_probe(struct platform_device *pdev) static void exynos_ufs_remove(struct platform_device *pdev) { - struct ufs_hba *hba = platform_get_drvdata(pdev); - struct exynos_ufs *ufs = ufshcd_get_variant(hba); - ufshcd_pltfrm_remove(pdev); - - phy_power_off(ufs->phy); - phy_exit(ufs->phy); } static struct exynos_ufs_uic_attr exynos7_uic_attr = { @@ -2035,6 +2073,7 @@ static const struct exynos_ufs_drv_data exynosauto_ufs_drvs = { .opts = EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL | EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR | EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX, + .iocc_mask = UFS_EXYNOSAUTO_SHARABLE, .drv_init = exynosauto_ufs_drv_init, .post_hce_enable = exynosauto_ufs_post_hce_enable, .pre_link = exynosauto_ufs_pre_link, @@ -2136,10 +2175,12 @@ static const struct exynos_ufs_drv_data gs101_ufs_drvs = { .opts = EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR | EXYNOS_UFS_OPT_UFSPR_SECURE | EXYNOS_UFS_OPT_TIMER_TICK_SELECT, + .iocc_mask = UFS_GS101_SHARABLE, .drv_init = gs101_ufs_drv_init, .pre_link = gs101_ufs_pre_link, .post_link = gs101_ufs_post_link, .pre_pwr_change = gs101_ufs_pre_pwr_change, + .suspend = gs101_ufs_suspend, }; static const struct of_device_id exynos_ufs_of_match[] = { diff --git a/drivers/ufs/host/ufs-exynos.h b/drivers/ufs/host/ufs-exynos.h index aac517276189..abe7e472759e 100644 --- a/drivers/ufs/host/ufs-exynos.h +++ b/drivers/ufs/host/ufs-exynos.h @@ -181,6 +181,7 @@ struct exynos_ufs_drv_data { struct exynos_ufs_uic_attr *uic_attr; unsigned int quirks; unsigned int opts; + u32 iocc_mask; /* SoC's specific operations */ int (*drv_init)(struct exynos_ufs *ufs); int (*pre_link)(struct exynos_ufs *ufs); @@ -191,6 +192,7 @@ struct exynos_ufs_drv_data { const struct ufs_pa_layer_attr *pwr); int (*pre_hce_enable)(struct exynos_ufs *ufs); int (*post_hce_enable)(struct exynos_ufs *ufs); + int (*suspend)(struct exynos_ufs *ufs); }; struct ufs_phy_time_cfg { @@ -230,7 +232,9 @@ struct exynos_ufs { ktime_t entry_hibern8_t; const struct exynos_ufs_drv_data *drv_data; struct regmap *sysreg; - u32 shareability_reg_offset; + u32 iocc_offset; + u32 iocc_mask; + u32 iocc_val; u32 opts; #define EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL BIT(0) diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 1b37449fbffc..c0761ccc1381 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -33,6 +33,10 @@ ((((c) >> 16) & MCQ_QCFGPTR_MASK) * MCQ_QCFGPTR_UNIT) #define MCQ_QCFG_SIZE 0x40 +/* De-emphasis for gear-5 */ +#define DEEMPHASIS_3_5_dB 0x04 +#define NO_DEEMPHASIS 0x0 + enum { TSTBUS_UAWM, TSTBUS_UARM, @@ -795,6 +799,23 @@ static int ufs_qcom_icc_update_bw(struct ufs_qcom_host *host) return ufs_qcom_icc_set_bw(host, bw_table.mem_bw, bw_table.cfg_bw); } +static void ufs_qcom_set_tx_hs_equalizer(struct ufs_hba *hba, u32 gear, u32 tx_lanes) +{ + u32 equalizer_val; + int ret, i; + + /* Determine the equalizer value based on the gear */ + equalizer_val = (gear == 5) ? DEEMPHASIS_3_5_dB : NO_DEEMPHASIS; + + for (i = 0; i < tx_lanes; i++) { + ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HS_EQUALIZER, i), + equalizer_val); + if (ret) + dev_err(hba->dev, "%s: failed equalizer lane %d\n", + __func__, i); + } +} + static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_change_status status, const struct ufs_pa_layer_attr *dev_max_params, @@ -846,6 +867,11 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, dev_req_params->gear_tx, PA_INITIAL_ADAPT); } + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING) + ufs_qcom_set_tx_hs_equalizer(hba, + dev_req_params->gear_tx, dev_req_params->lane_tx); + break; case POST_CHANGE: if (ufs_qcom_cfg_timers(hba, false)) { @@ -893,6 +919,16 @@ static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba) (pa_vs_config_reg1 | (1 << 12))); } +static void ufs_qcom_override_pa_tx_hsg1_sync_len(struct ufs_hba *hba) +{ + int err; + + err = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TX_HSG1_SYNC_LENGTH), + PA_TX_HSG1_SYNC_LENGTH_VAL); + if (err) + dev_err(hba->dev, "Failed (%d) set PA_TX_HSG1_SYNC_LENGTH\n", err); +} + static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba) { int err = 0; @@ -900,6 +936,9 @@ static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba) if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME) err = ufs_qcom_quirk_host_pa_saveconfigtime(hba); + if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH) + ufs_qcom_override_pa_tx_hsg1_sync_len(hba); + return err; } @@ -914,6 +953,10 @@ static struct ufs_dev_quirk ufs_qcom_dev_fixups[] = { { .wmanufacturerid = UFS_VENDOR_WDC, .model = UFS_ANY_MODEL, .quirk = UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE }, + { .wmanufacturerid = UFS_VENDOR_SAMSUNG, + .model = UFS_ANY_MODEL, + .quirk = UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH | + UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING }, {} }; diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h index d0e6ec9128e7..05d4cb569c50 100644 --- a/drivers/ufs/host/ufs-qcom.h +++ b/drivers/ufs/host/ufs-qcom.h @@ -122,8 +122,11 @@ enum { TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN) /* QUniPro Vendor specific attributes */ +#define PA_TX_HSG1_SYNC_LENGTH 0x1552 #define PA_VS_CONFIG_REG1 0x9000 #define DME_VS_CORE_CLK_CTRL 0xD002 +#define TX_HS_EQUALIZER 0x0037 + /* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */ #define CLK_1US_CYCLES_MASK_V4 GENMASK(27, 16) #define CLK_1US_CYCLES_MASK GENMASK(7, 0) @@ -141,6 +144,21 @@ enum { #define UNIPRO_CORE_CLK_FREQ_201_5_MHZ 202 #define UNIPRO_CORE_CLK_FREQ_403_MHZ 403 +/* TX_HSG1_SYNC_LENGTH attr value */ +#define PA_TX_HSG1_SYNC_LENGTH_VAL 0x4A + +/* + * Some ufs device vendors need a different TSync length. + * Enable this quirk to give an additional TX_HS_SYNC_LENGTH. + */ +#define UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH BIT(16) + +/* + * Some ufs device vendors need a different Deemphasis setting. + * Enable this quirk to tune TX Deemphasis parameters. + */ +#define UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING BIT(17) + /* ICE allocator type to share AES engines among TX stream and RX stream */ #define ICE_ALLOCATOR_TYPE 2 diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c index 1b19b5647495..69c1df0f4ca5 100644 --- a/drivers/uio/uio_hv_generic.c +++ b/drivers/uio/uio_hv_generic.c @@ -131,15 +131,12 @@ static void hv_uio_rescind(struct vmbus_channel *channel) vmbus_device_unregister(channel->device_obj); } -/* Sysfs API to allow mmap of the ring buffers +/* Function used for mmap of ring buffer sysfs interface. * The ring buffer is allocated as contiguous memory by vmbus_open */ -static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj, - const struct bin_attribute *attr, - struct vm_area_struct *vma) +static int +hv_uio_ring_mmap(struct vmbus_channel *channel, struct vm_area_struct *vma) { - struct vmbus_channel *channel - = container_of(kobj, struct vmbus_channel, kobj); void *ring_buffer = page_address(channel->ringbuffer_page); if (channel->state != CHANNEL_OPENED_STATE) @@ -149,15 +146,6 @@ static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj, channel->ringbuffer_pagecount << PAGE_SHIFT); } -static const struct bin_attribute ring_buffer_bin_attr = { - .attr = { - .name = "ring", - .mode = 0600, - }, - .size = 2 * SZ_2M, - .mmap = hv_uio_ring_mmap, -}; - /* Callback from VMBUS subsystem when new channel created. */ static void hv_uio_new_channel(struct vmbus_channel *new_sc) @@ -178,8 +166,7 @@ hv_uio_new_channel(struct vmbus_channel *new_sc) /* Disable interrupts on sub channel */ new_sc->inbound.ring_buffer->interrupt_mask = 1; set_channel_read_mode(new_sc, HV_CALL_ISR); - - ret = sysfs_create_bin_file(&new_sc->kobj, &ring_buffer_bin_attr); + ret = hv_create_ring_sysfs(new_sc, hv_uio_ring_mmap); if (ret) { dev_err(device, "sysfs create ring bin file failed; %d\n", ret); vmbus_close(new_sc); @@ -350,10 +337,18 @@ hv_uio_probe(struct hv_device *dev, goto fail_close; } - ret = sysfs_create_bin_file(&channel->kobj, &ring_buffer_bin_attr); - if (ret) - dev_notice(&dev->device, - "sysfs create ring bin file failed; %d\n", ret); + /* + * This internally calls sysfs_update_group, which returns a non-zero value if it executes + * before sysfs_create_group. This is expected as the 'ring' will be created later in + * vmbus_device_register() -> vmbus_add_channel_kobj(). Thus, no need to check the return + * value and print warning. + * + * Creating/exposing sysfs in driver probe is not encouraged as it can lead to race + * conditions with userspace. For backward compatibility, "ring" sysfs could not be removed + * or decoupled from uio_hv_generic probe. Userspace programs can make use of inotify + * APIs to make sure that ring is created. + */ + hv_create_ring_sysfs(channel, hv_uio_ring_mmap); hv_set_drvdata(dev, pdata); @@ -375,7 +370,7 @@ hv_uio_remove(struct hv_device *dev) if (!pdata) return; - sysfs_remove_bin_file(&dev->channel->kobj, &ring_buffer_bin_attr); + hv_remove_ring_sysfs(dev->channel); uio_unregister_device(&pdata->info); hv_uio_cleanup(dev, pdata); diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c index 694aa1457739..d9d8dc05b235 100644 --- a/drivers/usb/cdns3/cdns3-gadget.c +++ b/drivers/usb/cdns3/cdns3-gadget.c @@ -1963,6 +1963,7 @@ static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data) unsigned int bit; unsigned long reg; + local_bh_disable(); spin_lock_irqsave(&priv_dev->lock, flags); reg = readl(&priv_dev->regs->usb_ists); @@ -2004,6 +2005,7 @@ static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data) irqend: writel(~0, &priv_dev->regs->ep_ien); spin_unlock_irqrestore(&priv_dev->lock, flags); + local_bh_enable(); return ret; } diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c index 87f310841735..4824a10df07e 100644 --- a/drivers/usb/cdns3/cdnsp-gadget.c +++ b/drivers/usb/cdns3/cdnsp-gadget.c @@ -139,6 +139,26 @@ static void cdnsp_clear_port_change_bit(struct cdnsp_device *pdev, (portsc & PORT_CHANGE_BITS), port_regs); } +static void cdnsp_set_apb_timeout_value(struct cdnsp_device *pdev) +{ + struct cdns *cdns = dev_get_drvdata(pdev->dev); + __le32 __iomem *reg; + void __iomem *base; + u32 offset = 0; + u32 val; + + if (!cdns->override_apb_timeout) + return; + + base = &pdev->cap_regs->hc_capbase; + offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP); + reg = base + offset + REG_CHICKEN_BITS_3_OFFSET; + + val = le32_to_cpu(readl(reg)); + val = CHICKEN_APB_TIMEOUT_SET(val, cdns->override_apb_timeout); + writel(cpu_to_le32(val), reg); +} + static void cdnsp_set_chicken_bits_2(struct cdnsp_device *pdev, u32 bit) { __le32 __iomem *reg; @@ -1773,6 +1793,8 @@ static void cdnsp_get_rev_cap(struct cdnsp_device *pdev) reg += cdnsp_find_next_ext_cap(reg, 0, RTL_REV_CAP); pdev->rev_cap = reg; + pdev->rtl_revision = readl(&pdev->rev_cap->rtl_revision); + dev_info(pdev->dev, "Rev: %08x/%08x, eps: %08x, buff: %08x/%08x\n", readl(&pdev->rev_cap->ctrl_revision), readl(&pdev->rev_cap->rtl_revision), @@ -1798,6 +1820,15 @@ static int cdnsp_gen_setup(struct cdnsp_device *pdev) pdev->hci_version = HC_VERSION(pdev->hcc_params); pdev->hcc_params = readl(&pdev->cap_regs->hcc_params); + /* + * Override the APB timeout value to give the controller more time for + * enabling UTMI clock and synchronizing APB and UTMI clock domains. + * This fix is platform specific and is required to fixes issue with + * reading incorrect value from PORTSC register after resuming + * from L1 state. + */ + cdnsp_set_apb_timeout_value(pdev); + cdnsp_get_rev_cap(pdev); /* Make sure the Device Controller is halted. */ diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h index 84887dfea763..12534be52f39 100644 --- a/drivers/usb/cdns3/cdnsp-gadget.h +++ b/drivers/usb/cdns3/cdnsp-gadget.h @@ -520,6 +520,9 @@ struct cdnsp_rev_cap { #define REG_CHICKEN_BITS_2_OFFSET 0x48 #define CHICKEN_XDMA_2_TP_CACHE_DIS BIT(28) +#define REG_CHICKEN_BITS_3_OFFSET 0x4C +#define CHICKEN_APB_TIMEOUT_SET(p, val) (((p) & ~GENMASK(21, 0)) | (val)) + /* XBUF Extended Capability ID. */ #define XBUF_CAP_ID 0xCB #define XBUF_RX_TAG_MASK_0_OFFSET 0x1C @@ -1357,6 +1360,7 @@ struct cdnsp_port { * @rev_cap: Controller Capabilities Registers. * @hcs_params1: Cached register copies of read-only HCSPARAMS1 * @hcc_params: Cached register copies of read-only HCCPARAMS1 + * @rtl_revision: Cached controller rtl revision. * @setup: Temporary buffer for setup packet. * @ep0_preq: Internal allocated request used during enumeration. * @ep0_stage: ep0 stage during enumeration process. @@ -1411,6 +1415,8 @@ struct cdnsp_device { __u32 hcs_params1; __u32 hcs_params3; __u32 hcc_params; + #define RTL_REVISION_NEW_LPM 0x2700 + __u32 rtl_revision; /* Lock used in interrupt thread context. */ spinlock_t lock; struct usb_ctrlrequest setup; diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c index a51144504ff3..8c361b8394e9 100644 --- a/drivers/usb/cdns3/cdnsp-pci.c +++ b/drivers/usb/cdns3/cdnsp-pci.c @@ -28,6 +28,8 @@ #define PCI_DRIVER_NAME "cdns-pci-usbssp" #define PLAT_DRIVER_NAME "cdns-usbssp" +#define CHICKEN_APB_TIMEOUT_VALUE 0x1C20 + static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev) { /* @@ -139,6 +141,14 @@ static int cdnsp_pci_probe(struct pci_dev *pdev, cdnsp->otg_irq = pdev->irq; } + /* + * Cadence PCI based platform require some longer timeout for APB + * to fixes domain clock synchronization issue after resuming + * controller from L1 state. + */ + cdnsp->override_apb_timeout = CHICKEN_APB_TIMEOUT_VALUE; + pci_set_drvdata(pdev, cdnsp); + if (pci_is_enabled(func)) { cdnsp->dev = dev; cdnsp->gadget_init = cdnsp_gadget_init; @@ -148,8 +158,6 @@ static int cdnsp_pci_probe(struct pci_dev *pdev, goto free_cdnsp; } - pci_set_drvdata(pdev, cdnsp); - device_wakeup_enable(&pdev->dev); if (pci_dev_run_wake(pdev)) pm_runtime_put_noidle(&pdev->dev); diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c index 46852529499d..fd06cb85c4ea 100644 --- a/drivers/usb/cdns3/cdnsp-ring.c +++ b/drivers/usb/cdns3/cdnsp-ring.c @@ -308,7 +308,8 @@ static bool cdnsp_ring_ep_doorbell(struct cdnsp_device *pdev, writel(db_value, reg_addr); - cdnsp_force_l0_go(pdev); + if (pdev->rtl_revision < RTL_REVISION_NEW_LPM) + cdnsp_force_l0_go(pdev); /* Doorbell was set. */ return true; diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h index 921cccf1ca9d..801be9e61340 100644 --- a/drivers/usb/cdns3/core.h +++ b/drivers/usb/cdns3/core.h @@ -79,6 +79,8 @@ struct cdns3_platform_data { * @pdata: platform data from glue layer * @lock: spinlock structure * @xhci_plat_data: xhci private data structure pointer + * @override_apb_timeout: hold value of APB timeout. For value 0 the default + * value in CHICKEN_BITS_3 will be preserved. * @gadget_init: pointer to gadget initialization function */ struct cdns { @@ -117,6 +119,7 @@ struct cdns { struct cdns3_platform_data *pdata; spinlock_t lock; struct xhci_plat_priv *xhci_plat_data; + u32 override_apb_timeout; int (*gadget_init)(struct cdns *cdns); }; diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index 1a7fc638213e..4f8bfd242b59 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -336,6 +336,13 @@ static int ci_hdrc_imx_notify_event(struct ci_hdrc *ci, unsigned int event) return ret; } +static void ci_hdrc_imx_disable_regulator(void *arg) +{ + struct ci_hdrc_imx_data *data = arg; + + regulator_disable(data->hsic_pad_regulator); +} + static int ci_hdrc_imx_probe(struct platform_device *pdev) { struct ci_hdrc_imx_data *data; @@ -394,6 +401,13 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) "Failed to enable HSIC pad regulator\n"); goto err_put; } + ret = devm_add_action_or_reset(dev, + ci_hdrc_imx_disable_regulator, data); + if (ret) { + dev_err(dev, + "Failed to add regulator devm action\n"); + goto err_put; + } } } @@ -432,11 +446,11 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) ret = imx_get_clks(dev); if (ret) - goto disable_hsic_regulator; + goto qos_remove_request; ret = imx_prepare_enable_clks(dev); if (ret) - goto disable_hsic_regulator; + goto qos_remove_request; ret = clk_prepare_enable(data->clk_wakeup); if (ret) @@ -470,7 +484,11 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) of_usb_get_phy_mode(np) == USBPHY_INTERFACE_MODE_ULPI) { pdata.flags |= CI_HDRC_OVERRIDE_PHY_CONTROL; data->override_phy_control = true; - usb_phy_init(pdata.usb_phy); + ret = usb_phy_init(pdata.usb_phy); + if (ret) { + dev_err(dev, "Failed to init phy\n"); + goto err_clk; + } } if (pdata.flags & CI_HDRC_SUPPORTS_RUNTIME_PM) @@ -479,7 +497,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) ret = imx_usbmisc_init(data->usbmisc_data); if (ret) { dev_err(dev, "usbmisc init failed, ret=%d\n", ret); - goto err_clk; + goto phy_shutdown; } data->ci_pdev = ci_hdrc_add_device(dev, @@ -488,7 +506,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) if (IS_ERR(data->ci_pdev)) { ret = PTR_ERR(data->ci_pdev); dev_err_probe(dev, ret, "ci_hdrc_add_device failed\n"); - goto err_clk; + goto phy_shutdown; } if (data->usbmisc_data) { @@ -522,19 +540,20 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) disable_device: ci_hdrc_remove_device(data->ci_pdev); +phy_shutdown: + if (data->override_phy_control) + usb_phy_shutdown(data->phy); err_clk: clk_disable_unprepare(data->clk_wakeup); err_wakeup_clk: imx_disable_unprepare_clks(dev); -disable_hsic_regulator: - if (data->hsic_pad_regulator) - /* don't overwrite original ret (cf. EPROBE_DEFER) */ - regulator_disable(data->hsic_pad_regulator); +qos_remove_request: if (pdata.flags & CI_HDRC_PMQOS) cpu_latency_qos_remove_request(&data->pm_qos_req); data->ci_pdev = NULL; err_put: - put_device(data->usbmisc_data->dev); + if (data->usbmisc_data) + put_device(data->usbmisc_data->dev); return ret; } @@ -556,10 +575,9 @@ static void ci_hdrc_imx_remove(struct platform_device *pdev) clk_disable_unprepare(data->clk_wakeup); if (data->plat_data->flags & CI_HDRC_PMQOS) cpu_latency_qos_remove_request(&data->pm_qos_req); - if (data->hsic_pad_regulator) - regulator_disable(data->hsic_pad_regulator); } - put_device(data->usbmisc_data->dev); + if (data->usbmisc_data) + put_device(data->usbmisc_data->dev); } static void ci_hdrc_imx_shutdown(struct platform_device *pdev) diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 86ee39db013f..16e7fa4d488d 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -726,7 +726,7 @@ static int wdm_open(struct inode *inode, struct file *file) rv = -EBUSY; goto out; } - + smp_rmb(); /* ordered against wdm_wwan_port_stop() */ rv = usb_autopm_get_interface(desc->intf); if (rv < 0) { dev_err(&desc->intf->dev, "Error autopm - %d\n", rv); @@ -829,6 +829,7 @@ static struct usb_class_driver wdm_class = { static int wdm_wwan_port_start(struct wwan_port *port) { struct wdm_device *desc = wwan_port_get_drvdata(port); + int rv; /* The interface is both exposed via the WWAN framework and as a * legacy usbmisc chardev. If chardev is already open, just fail @@ -848,7 +849,15 @@ static int wdm_wwan_port_start(struct wwan_port *port) wwan_port_txon(port); /* Start getting events */ - return usb_submit_urb(desc->validity, GFP_KERNEL); + rv = usb_submit_urb(desc->validity, GFP_KERNEL); + if (rv < 0) { + wwan_port_txoff(port); + desc->manage_power(desc->intf, 0); + /* this must be last lest we race with chardev open */ + clear_bit(WDM_WWAN_IN_USE, &desc->flags); + } + + return rv; } static void wdm_wwan_port_stop(struct wwan_port *port) @@ -859,8 +868,10 @@ static void wdm_wwan_port_stop(struct wwan_port *port) poison_urbs(desc); desc->manage_power(desc->intf, 0); clear_bit(WDM_READ, &desc->flags); - clear_bit(WDM_WWAN_IN_USE, &desc->flags); unpoison_urbs(desc); + smp_wmb(); /* ordered against wdm_open() */ + /* this must be last lest we open a poisoned device */ + clear_bit(WDM_WWAN_IN_USE, &desc->flags); } static void wdm_wwan_port_tx_complete(struct urb *urb) @@ -868,7 +879,7 @@ static void wdm_wwan_port_tx_complete(struct urb *urb) struct sk_buff *skb = urb->context; struct wdm_device *desc = skb_shinfo(skb)->destructor_arg; - usb_autopm_put_interface(desc->intf); + usb_autopm_put_interface_async(desc->intf); wwan_port_txon(desc->wwanp); kfree_skb(skb); } @@ -898,7 +909,7 @@ static int wdm_wwan_port_tx(struct wwan_port *port, struct sk_buff *skb) req->bRequestType = (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE); req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND; req->wValue = 0; - req->wIndex = desc->inum; + req->wIndex = desc->inum; /* already converted */ req->wLength = cpu_to_le16(skb->len); skb_shinfo(skb)->destructor_arg = desc; diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 34e46ef308ab..740d2d2b19fb 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -482,6 +482,7 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb) u8 *buffer; u8 tag; int rv; + long wait_rv; dev_dbg(dev, "Enter ioctl_read_stb iin_ep_present: %d\n", data->iin_ep_present); @@ -511,16 +512,17 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb) } if (data->iin_ep_present) { - rv = wait_event_interruptible_timeout( + wait_rv = wait_event_interruptible_timeout( data->waitq, atomic_read(&data->iin_data_valid) != 0, file_data->timeout); - if (rv < 0) { - dev_dbg(dev, "wait interrupted %d\n", rv); + if (wait_rv < 0) { + dev_dbg(dev, "wait interrupted %ld\n", wait_rv); + rv = wait_rv; goto exit; } - if (rv == 0) { + if (wait_rv == 0) { dev_dbg(dev, "wait timed out\n"); rv = -ETIMEDOUT; goto exit; @@ -539,6 +541,8 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb) dev_dbg(dev, "stb:0x%02x received %d\n", (unsigned int)*stb, rv); + rv = 0; + exit: /* bump interrupt bTag */ data->iin_bTag += 1; @@ -602,9 +606,9 @@ static int usbtmc488_ioctl_wait_srq(struct usbtmc_file_data *file_data, { struct usbtmc_device_data *data = file_data->data; struct device *dev = &data->intf->dev; - int rv; u32 timeout; unsigned long expire; + long wait_rv; if (!data->iin_ep_present) { dev_dbg(dev, "no interrupt endpoint present\n"); @@ -618,25 +622,24 @@ static int usbtmc488_ioctl_wait_srq(struct usbtmc_file_data *file_data, mutex_unlock(&data->io_mutex); - rv = wait_event_interruptible_timeout( - data->waitq, - atomic_read(&file_data->srq_asserted) != 0 || - atomic_read(&file_data->closing), - expire); + wait_rv = wait_event_interruptible_timeout( + data->waitq, + atomic_read(&file_data->srq_asserted) != 0 || + atomic_read(&file_data->closing), + expire); mutex_lock(&data->io_mutex); /* Note! disconnect or close could be called in the meantime */ if (atomic_read(&file_data->closing) || data->zombie) - rv = -ENODEV; + return -ENODEV; - if (rv < 0) { - /* dev can be invalid now! */ - pr_debug("%s - wait interrupted %d\n", __func__, rv); - return rv; + if (wait_rv < 0) { + dev_dbg(dev, "%s - wait interrupted %ld\n", __func__, wait_rv); + return wait_rv; } - if (rv == 0) { + if (wait_rv == 0) { dev_dbg(dev, "%s - wait timed out\n", __func__); return -ETIMEDOUT; } @@ -830,6 +833,7 @@ static ssize_t usbtmc_generic_read(struct usbtmc_file_data *file_data, unsigned long expire; int bufcount = 1; int again = 0; + long wait_rv; /* mutex already locked */ @@ -942,19 +946,24 @@ static ssize_t usbtmc_generic_read(struct usbtmc_file_data *file_data, if (!(flags & USBTMC_FLAG_ASYNC)) { dev_dbg(dev, "%s: before wait time %lu\n", __func__, expire); - retval = wait_event_interruptible_timeout( + wait_rv = wait_event_interruptible_timeout( file_data->wait_bulk_in, usbtmc_do_transfer(file_data), expire); - dev_dbg(dev, "%s: wait returned %d\n", - __func__, retval); + dev_dbg(dev, "%s: wait returned %ld\n", + __func__, wait_rv); + + if (wait_rv < 0) { + retval = wait_rv; + goto error; + } - if (retval <= 0) { - if (retval == 0) - retval = -ETIMEDOUT; + if (wait_rv == 0) { + retval = -ETIMEDOUT; goto error; } + } urb = usb_get_from_anchor(&file_data->in_anchor); @@ -1380,7 +1389,10 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf, if (!buffer) return -ENOMEM; - mutex_lock(&data->io_mutex); + retval = mutex_lock_interruptible(&data->io_mutex); + if (retval < 0) + goto exit_nolock; + if (data->zombie) { retval = -ENODEV; goto exit; @@ -1503,6 +1515,7 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf, exit: mutex_unlock(&data->io_mutex); +exit_nolock: kfree(buffer); return retval; } diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 8efbacc5bc34..36d3df7d040c 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -369,6 +369,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0781, 0x5583), .driver_info = USB_QUIRK_NO_LPM }, { USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM }, + /* SanDisk Corp. SanDisk 3.2Gen1 */ + { USB_DEVICE(0x0781, 0x55a3), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Realforce 87U Keyboard */ { USB_DEVICE(0x0853, 0x011b), .driver_info = USB_QUIRK_NO_LPM }, @@ -383,6 +386,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0904, 0x6103), .driver_info = USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL }, + /* Silicon Motion Flash Drive */ + { USB_DEVICE(0x090c, 0x1000), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Sound Devices USBPre2 */ { USB_DEVICE(0x0926, 0x0202), .driver_info = USB_QUIRK_ENDPOINT_IGNORE }, @@ -539,6 +545,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x2040, 0x7200), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, + /* VLI disk */ + { USB_DEVICE(0x2109, 0x0711), .driver_info = USB_QUIRK_NO_LPM }, + /* Raydium Touchscreen */ { USB_DEVICE(0x2386, 0x3114), .driver_info = USB_QUIRK_NO_LPM }, diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index aaa39e663f60..27eae4cf223d 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -1164,6 +1164,9 @@ struct dwc3_scratchpad_array { * @gsbuscfg0_reqinfo: store GSBUSCFG0.DATRDREQINFO, DESRDREQINFO, * DATWRREQINFO, and DESWRREQINFO value passed from * glue driver. + * @wakeup_pending_funcs: Indicates whether any interface has requested for + * function wakeup in bitmap format where bit position + * represents interface_id. */ struct dwc3 { struct work_struct drd_work; @@ -1394,6 +1397,7 @@ struct dwc3 { int num_ep_resized; struct dentry *debug_root; u32 gsbuscfg0_reqinfo; + u32 wakeup_pending_funcs; }; #define INCRX_BURST_MODE 0 diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c index a33a42ba0249..4ca7f6240d07 100644 --- a/drivers/usb/dwc3/dwc3-xilinx.c +++ b/drivers/usb/dwc3/dwc3-xilinx.c @@ -207,15 +207,13 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data) skip_usb3_phy: /* ulpi reset via gpio-modepin or gpio-framework driver */ - reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); + reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(reset_gpio)) { return dev_err_probe(dev, PTR_ERR(reset_gpio), "Failed to request reset GPIO\n"); } if (reset_gpio) { - /* Toggle ulpi to reset the phy. */ - gpiod_set_value_cansleep(reset_gpio, 1); usleep_range(5000, 10000); gpiod_set_value_cansleep(reset_gpio, 0); usleep_range(5000, 10000); diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 47e73c4ed62d..321361288935 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -276,8 +276,6 @@ int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned int cmd, return ret; } -static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async); - /** * dwc3_send_gadget_ep_cmd - issue an endpoint command * @dep: the endpoint to which the command is going to be issued @@ -2359,10 +2357,8 @@ static int dwc3_gadget_get_frame(struct usb_gadget *g) return __dwc3_gadget_get_frame(dwc); } -static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async) +static int __dwc3_gadget_wakeup(struct dwc3 *dwc) { - int retries; - int ret; u32 reg; @@ -2390,8 +2386,7 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async) return -EINVAL; } - if (async) - dwc3_gadget_enable_linksts_evts(dwc, true); + dwc3_gadget_enable_linksts_evts(dwc, true); ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); if (ret < 0) { @@ -2410,27 +2405,8 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async) /* * Since link status change events are enabled we will receive - * an U0 event when wakeup is successful. So bail out. + * an U0 event when wakeup is successful. */ - if (async) - return 0; - - /* poll until Link State changes to ON */ - retries = 20000; - - while (retries--) { - reg = dwc3_readl(dwc->regs, DWC3_DSTS); - - /* in HS, means ON */ - if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) - break; - } - - if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { - dev_err(dwc->dev, "failed to send remote wakeup\n"); - return -EINVAL; - } - return 0; } @@ -2451,7 +2427,7 @@ static int dwc3_gadget_wakeup(struct usb_gadget *g) spin_unlock_irqrestore(&dwc->lock, flags); return -EINVAL; } - ret = __dwc3_gadget_wakeup(dwc, true); + ret = __dwc3_gadget_wakeup(dwc); spin_unlock_irqrestore(&dwc->lock, flags); @@ -2479,14 +2455,10 @@ static int dwc3_gadget_func_wakeup(struct usb_gadget *g, int intf_id) */ link_state = dwc3_gadget_get_link_state(dwc); if (link_state == DWC3_LINK_STATE_U3) { - ret = __dwc3_gadget_wakeup(dwc, false); - if (ret) { - spin_unlock_irqrestore(&dwc->lock, flags); - return -EINVAL; - } - dwc3_resume_gadget(dwc); - dwc->suspended = false; - dwc->link_state = DWC3_LINK_STATE_U0; + dwc->wakeup_pending_funcs |= BIT(intf_id); + ret = __dwc3_gadget_wakeup(dwc); + spin_unlock_irqrestore(&dwc->lock, flags); + return ret; } ret = dwc3_send_gadget_generic_command(dwc, DWC3_DGCMD_DEV_NOTIFICATION, @@ -4353,6 +4325,8 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, { enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; unsigned int pwropt; + int ret; + int intf_id; /* * WORKAROUND: DWC3 < 2.50a have an issue when configured without @@ -4428,7 +4402,7 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, switch (next) { case DWC3_LINK_STATE_U0: - if (dwc->gadget->wakeup_armed) { + if (dwc->gadget->wakeup_armed || dwc->wakeup_pending_funcs) { dwc3_gadget_enable_linksts_evts(dwc, false); dwc3_resume_gadget(dwc); dwc->suspended = false; @@ -4451,6 +4425,18 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, } dwc->link_state = next; + + /* Proceed with func wakeup if any interfaces that has requested */ + while (dwc->wakeup_pending_funcs && (next == DWC3_LINK_STATE_U0)) { + intf_id = ffs(dwc->wakeup_pending_funcs) - 1; + ret = dwc3_send_gadget_generic_command(dwc, DWC3_DGCMD_DEV_NOTIFICATION, + DWC3_DGCMDPAR_DN_FUNC_WAKE | + DWC3_DGCMDPAR_INTF_SEL(intf_id)); + if (ret) + dev_err(dwc->dev, "Failed to send DN wake for intf %d\n", intf_id); + + dwc->wakeup_pending_funcs &= ~BIT(intf_id); + } } static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc, @@ -4617,6 +4603,12 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) if (!count) return IRQ_NONE; + if (count > evt->length) { + dev_err_ratelimited(dwc->dev, "invalid count(%u) > evt->length(%u)\n", + count, evt->length); + return IRQ_NONE; + } + evt->count = count; evt->flags |= DWC3_EVENT_PENDING; diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 869ad99afb48..8dbc132a505e 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -2011,15 +2011,13 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) if (f->get_status) { status = f->get_status(f); + if (status < 0) break; - } else { - /* Set D0 and D1 bits based on func wakeup capability */ - if (f->config->bmAttributes & USB_CONFIG_ATT_WAKEUP) { - status |= USB_INTRF_STAT_FUNC_RW_CAP; - if (f->func_wakeup_armed) - status |= USB_INTRF_STAT_FUNC_RW; - } + + /* if D5 is not set, then device is not wakeup capable */ + if (!(f->config->bmAttributes & USB_CONFIG_ATT_WAKEUP)) + status &= ~(USB_INTRF_STAT_FUNC_RW_CAP | USB_INTRF_STAT_FUNC_RW); } put_unaligned_le16(status & 0x0000ffff, req->buf); diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c index 80841de845b0..027226325039 100644 --- a/drivers/usb/gadget/function/f_ecm.c +++ b/drivers/usb/gadget/function/f_ecm.c @@ -892,6 +892,12 @@ static void ecm_resume(struct usb_function *f) gether_resume(&ecm->port); } +static int ecm_get_status(struct usb_function *f) +{ + return (f->func_wakeup_armed ? USB_INTRF_STAT_FUNC_RW : 0) | + USB_INTRF_STAT_FUNC_RW_CAP; +} + static void ecm_free(struct usb_function *f) { struct f_ecm *ecm; @@ -960,6 +966,7 @@ static struct usb_function *ecm_alloc(struct usb_function_instance *fi) ecm->port.func.disable = ecm_disable; ecm->port.func.free_func = ecm_free; ecm->port.func.suspend = ecm_suspend; + ecm->port.func.get_status = ecm_get_status; ecm->port.func.resume = ecm_resume; return &ecm->port.func; diff --git a/drivers/usb/gadget/function/f_midi2.c b/drivers/usb/gadget/function/f_midi2.c index 12e866fb311d..0a800ba53816 100644 --- a/drivers/usb/gadget/function/f_midi2.c +++ b/drivers/usb/gadget/function/f_midi2.c @@ -475,7 +475,7 @@ static void reply_ump_stream_ep_info(struct f_midi2_ep *ep) /* reply a UMP EP device info */ static void reply_ump_stream_ep_device(struct f_midi2_ep *ep) { - struct snd_ump_stream_msg_devince_info rep = { + struct snd_ump_stream_msg_device_info rep = { .type = UMP_MSG_TYPE_STREAM, .status = UMP_STREAM_MSG_STATUS_DEVICE_INFO, .manufacture_id = ep->info.manufacturer, diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c index c7fdbc55fb0b..2957316fd3d0 100644 --- a/drivers/usb/gadget/udc/tegra-xudc.c +++ b/drivers/usb/gadget/udc/tegra-xudc.c @@ -1749,6 +1749,10 @@ static int __tegra_xudc_ep_disable(struct tegra_xudc_ep *ep) val = xudc_readl(xudc, CTRL); val &= ~CTRL_RUN; xudc_writel(xudc, val, CTRL); + + val = xudc_readl(xudc, ST); + if (val & ST_RC) + xudc_writel(xudc, ST_RC, ST); } dev_info(xudc->dev, "ep %u disabled\n", ep->index); diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c index 900ea0d368e0..9f0a6b27e47c 100644 --- a/drivers/usb/host/ohci-pci.c +++ b/drivers/usb/host/ohci-pci.c @@ -165,6 +165,25 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd) return 0; } +static int ohci_quirk_loongson(struct usb_hcd *hcd) +{ + struct pci_dev *pdev = to_pci_dev(hcd->self.controller); + + /* + * Loongson's LS7A OHCI controller (rev 0x02) has a + * flaw. MMIO register with offset 0x60/64 is treated + * as legacy PS2-compatible keyboard/mouse interface. + * Since OHCI only use 4KB BAR resource, LS7A OHCI's + * 32KB BAR is wrapped around (the 2nd 4KB BAR space + * is the same as the 1st 4KB internally). So add 4KB + * offset (0x1000) to the OHCI registers as a quirk. + */ + if (pdev->revision == 0x2) + hcd->regs += SZ_4K; /* SZ_4K = 0x1000 */ + + return 0; +} + static int ohci_quirk_qemu(struct usb_hcd *hcd) { struct ohci_hcd *ohci = hcd_to_ohci(hcd); @@ -225,6 +244,10 @@ static const struct pci_device_id ohci_pci_quirks[] = { .driver_data = (unsigned long)ohci_quirk_amd700, }, { + PCI_DEVICE(PCI_VENDOR_ID_LOONGSON, 0x7a24), + .driver_data = (unsigned long)ohci_quirk_loongson, + }, + { .vendor = PCI_VENDOR_ID_APPLE, .device = 0x003f, .subvendor = PCI_SUBVENDOR_ID_REDHAT_QUMRANET, diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c index a7c934404ebc..62318291f566 100644 --- a/drivers/usb/host/uhci-platform.c +++ b/drivers/usb/host/uhci-platform.c @@ -121,7 +121,7 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev) } /* Get and enable clock if any specified */ - uhci->clk = devm_clk_get(&pdev->dev, NULL); + uhci->clk = devm_clk_get_optional(&pdev->dev, NULL); if (IS_ERR(uhci->clk)) { ret = PTR_ERR(uhci->clk); goto err_rmr; diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c index fd7895b24367..0d4ce5734165 100644 --- a/drivers/usb/host/xhci-dbgcap.c +++ b/drivers/usb/host/xhci-dbgcap.c @@ -823,6 +823,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) { dma_addr_t deq; union xhci_trb *evt; + enum evtreturn ret = EVT_DONE; u32 ctrl, portsc; bool update_erdp = false; @@ -909,6 +910,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) break; case TRB_TYPE(TRB_TRANSFER): dbc_handle_xfer_event(dbc, evt); + ret = EVT_XFER_DONE; break; default: break; @@ -927,7 +929,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) lo_hi_writeq(deq, &dbc->regs->erdp); } - return EVT_DONE; + return ret; } static void xhci_dbc_handle_events(struct work_struct *work) @@ -936,6 +938,7 @@ static void xhci_dbc_handle_events(struct work_struct *work) struct xhci_dbc *dbc; unsigned long flags; unsigned int poll_interval; + unsigned long busypoll_timelimit; dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work); poll_interval = dbc->poll_interval; @@ -954,11 +957,21 @@ static void xhci_dbc_handle_events(struct work_struct *work) dbc->driver->disconnect(dbc); break; case EVT_DONE: - /* set fast poll rate if there are pending data transfers */ + /* + * Set fast poll rate if there are pending out transfers, or + * a transfer was recently processed + */ + busypoll_timelimit = dbc->xfer_timestamp + + msecs_to_jiffies(DBC_XFER_INACTIVITY_TIMEOUT); + if (!list_empty(&dbc->eps[BULK_OUT].list_pending) || - !list_empty(&dbc->eps[BULK_IN].list_pending)) + time_is_after_jiffies(busypoll_timelimit)) poll_interval = 0; break; + case EVT_XFER_DONE: + dbc->xfer_timestamp = jiffies; + poll_interval = 0; + break; default: dev_info(dbc->dev, "stop handling dbc events\n"); return; diff --git a/drivers/usb/host/xhci-dbgcap.h b/drivers/usb/host/xhci-dbgcap.h index 9dc8f4d8077c..47ac72c2286d 100644 --- a/drivers/usb/host/xhci-dbgcap.h +++ b/drivers/usb/host/xhci-dbgcap.h @@ -96,6 +96,7 @@ struct dbc_ep { #define DBC_WRITE_BUF_SIZE 8192 #define DBC_POLL_INTERVAL_DEFAULT 64 /* milliseconds */ #define DBC_POLL_INTERVAL_MAX 5000 /* milliseconds */ +#define DBC_XFER_INACTIVITY_TIMEOUT 10 /* milliseconds */ /* * Private structure for DbC hardware state: */ @@ -142,6 +143,7 @@ struct xhci_dbc { enum dbc_state state; struct delayed_work event_work; unsigned int poll_interval; /* ms */ + unsigned long xfer_timestamp; unsigned resume_required:1; struct dbc_ep eps[2]; @@ -187,6 +189,7 @@ struct dbc_request { enum evtreturn { EVT_ERR = -1, EVT_DONE, + EVT_XFER_DONE, EVT_GSER, EVT_DISC, }; diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index c0f226584a40..486347776cb2 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -1878,9 +1878,10 @@ int xhci_bus_resume(struct usb_hcd *hcd) int max_ports, port_index; int sret; u32 next_state; - u32 temp, portsc; + u32 portsc; struct xhci_hub *rhub; struct xhci_port **ports; + bool disabled_irq = false; rhub = xhci_get_rhub(hcd); ports = rhub->ports; @@ -1896,17 +1897,20 @@ int xhci_bus_resume(struct usb_hcd *hcd) return -ESHUTDOWN; } - /* delay the irqs */ - temp = readl(&xhci->op_regs->command); - temp &= ~CMD_EIE; - writel(temp, &xhci->op_regs->command); - /* bus specific resume for ports we suspended at bus_suspend */ - if (hcd->speed >= HCD_USB3) + if (hcd->speed >= HCD_USB3) { next_state = XDEV_U0; - else + } else { next_state = XDEV_RESUME; - + if (bus_state->bus_suspended) { + /* + * prevent port event interrupts from interfering + * with usb2 port resume process + */ + xhci_disable_interrupter(xhci->interrupters[0]); + disabled_irq = true; + } + } port_index = max_ports; while (port_index--) { portsc = readl(ports[port_index]->addr); @@ -1974,11 +1978,9 @@ int xhci_bus_resume(struct usb_hcd *hcd) (void) readl(&xhci->op_regs->command); bus_state->next_statechange = jiffies + msecs_to_jiffies(5); - /* re-enable irqs */ - temp = readl(&xhci->op_regs->command); - temp |= CMD_EIE; - writel(temp, &xhci->op_regs->command); - temp = readl(&xhci->op_regs->command); + /* re-enable interrupter */ + if (disabled_irq) + xhci_enable_interrupter(xhci->interrupters[0]); spin_unlock_irqrestore(&xhci->lock, flags); return 0; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 5d64c297721c..423bf3649570 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -561,8 +561,8 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, * pointer command pending because the device can choose to start any * stream once the endpoint is on the HW schedule. */ - if (ep_state & (EP_STOP_CMD_PENDING | SET_DEQ_PENDING | EP_HALTED | - EP_CLEARING_TT | EP_STALLED)) + if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) || + (ep_state & EP_HALTED) || (ep_state & EP_CLEARING_TT)) return; trace_xhci_ring_ep_doorbell(slot_id, DB_VALUE(ep_index, stream_id)); @@ -699,7 +699,7 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, int new_cycle; dma_addr_t addr; u64 hw_dequeue; - bool cycle_found = false; + bool hw_dequeue_found = false; bool td_last_trb_found = false; u32 trb_sct = 0; int ret; @@ -715,25 +715,24 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id); new_seg = ep_ring->deq_seg; new_deq = ep_ring->dequeue; - new_cycle = hw_dequeue & 0x1; + new_cycle = le32_to_cpu(td->end_trb->generic.field[3]) & TRB_CYCLE; /* - * We want to find the pointer, segment and cycle state of the new trb - * (the one after current TD's end_trb). We know the cycle state at - * hw_dequeue, so walk the ring until both hw_dequeue and end_trb are - * found. + * Walk the ring until both the next TRB and hw_dequeue are found (don't + * move hw_dequeue back if it went forward due to a HW bug). Cycle state + * is loaded from a known good TRB, track later toggles to maintain it. */ do { - if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq) + if (!hw_dequeue_found && xhci_trb_virt_to_dma(new_seg, new_deq) == (dma_addr_t)(hw_dequeue & ~0xf)) { - cycle_found = true; + hw_dequeue_found = true; if (td_last_trb_found) break; } if (new_deq == td->end_trb) td_last_trb_found = true; - if (cycle_found && trb_is_link(new_deq) && + if (td_last_trb_found && trb_is_link(new_deq) && link_trb_toggles_cycle(new_deq)) new_cycle ^= 0x1; @@ -745,7 +744,7 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, return -EINVAL; } - } while (!cycle_found || !td_last_trb_found); + } while (!hw_dequeue_found || !td_last_trb_found); /* Don't update the ring cycle state for the producer (us). */ addr = xhci_trb_virt_to_dma(new_seg, new_deq); @@ -2573,9 +2572,6 @@ static void process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, xhci_handle_halted_endpoint(xhci, ep, td, EP_SOFT_RESET); return; - case COMP_STALL_ERROR: - ep->ep_state |= EP_STALLED; - break; default: /* do nothing */ break; @@ -2916,7 +2912,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, if (xhci_spurious_success_tx_event(xhci, ep_ring)) { xhci_dbg(xhci, "Spurious event dma %pad, comp_code %u after %u\n", &ep_trb_dma, trb_comp_code, ep_ring->old_trb_comp_code); - ep_ring->old_trb_comp_code = trb_comp_code; + ep_ring->old_trb_comp_code = 0; return 0; } @@ -3780,7 +3776,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, * enqueue a No Op TRB, this can prevent the Setup and Data Stage * TRB to be breaked by the Link TRB. */ - if (trb_is_link(ep_ring->enqueue + 1)) { + if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue + 1)) { field = TRB_TYPE(TRB_TR_NOOP) | ep_ring->cycle_state; queue_trb(xhci, ep_ring, false, 0, 0, TRB_INTR_TARGET(0), field); diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index b5c362c2051d..0c7af44d4dae 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -1364,6 +1364,7 @@ static void tegra_xhci_id_work(struct work_struct *work) tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(tegra->padctl, tegra->otg_usb2_port); + pm_runtime_get_sync(tegra->dev); if (tegra->host_mode) { /* switch to host mode */ if (tegra->otg_usb3_port >= 0) { @@ -1393,6 +1394,7 @@ static void tegra_xhci_id_work(struct work_struct *work) } tegra_xhci_set_port_power(tegra, true, true); + pm_runtime_mark_last_busy(tegra->dev); } else { if (tegra->otg_usb3_port >= 0) @@ -1400,6 +1402,7 @@ static void tegra_xhci_id_work(struct work_struct *work) tegra_xhci_set_port_power(tegra, true, false); } + pm_runtime_put_autosuspend(tegra->dev); } #if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_PM_SLEEP) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 0452b8d65832..90eb491267b5 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -322,7 +322,7 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci) xhci_info(xhci, "Fault detected\n"); } -static int xhci_enable_interrupter(struct xhci_interrupter *ir) +int xhci_enable_interrupter(struct xhci_interrupter *ir) { u32 iman; @@ -335,7 +335,7 @@ static int xhci_enable_interrupter(struct xhci_interrupter *ir) return 0; } -static int xhci_disable_interrupter(struct xhci_interrupter *ir) +int xhci_disable_interrupter(struct xhci_interrupter *ir) { u32 iman; @@ -1605,11 +1605,6 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag goto free_priv; } - /* Class driver might not be aware ep halted due to async URB giveback */ - if (*ep_state & EP_STALLED) - dev_dbg(&urb->dev->dev, "URB %p queued before clearing halt\n", - urb); - switch (usb_endpoint_type(&urb->ep->desc)) { case USB_ENDPOINT_XFER_CONTROL: @@ -1770,8 +1765,8 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) goto done; } - /* In these cases no commands are pending but the endpoint is stopped */ - if (ep->ep_state & (EP_CLEARING_TT | EP_STALLED)) { + /* In this case no commands are pending but the endpoint is stopped */ + if (ep->ep_state & EP_CLEARING_TT) { /* and cancelled TDs can be given back right away */ xhci_dbg(xhci, "Invalidating TDs instantly on slot %d ep %d in state 0x%x\n", urb->dev->slot_id, ep_index, ep->ep_state); @@ -3209,11 +3204,8 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, ep = &vdev->eps[ep_index]; - spin_lock_irqsave(&xhci->lock, flags); - - ep->ep_state &= ~EP_STALLED; - /* Bail out if toggle is already being cleared by a endpoint reset */ + spin_lock_irqsave(&xhci->lock, flags); if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) { ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE; spin_unlock_irqrestore(&xhci->lock, flags); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 37860f1e3aba..242ab9fbc8ae 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -664,7 +664,7 @@ struct xhci_virt_ep { unsigned int err_count; unsigned int ep_state; #define SET_DEQ_PENDING (1 << 0) -#define EP_HALTED (1 << 1) /* Halted host ep handling */ +#define EP_HALTED (1 << 1) /* For stall handling */ #define EP_STOP_CMD_PENDING (1 << 2) /* For URB cancellation */ /* Transitioning the endpoint to using streams, don't enqueue URBs */ #define EP_GETTING_STREAMS (1 << 3) @@ -675,7 +675,6 @@ struct xhci_virt_ep { #define EP_SOFT_CLEAR_TOGGLE (1 << 7) /* usb_hub_clear_tt_buffer is in progress */ #define EP_CLEARING_TT (1 << 8) -#define EP_STALLED (1 << 9) /* For stall handling */ /* ---- Related to URB cancellation ---- */ struct list_head cancelled_td_list; struct xhci_hcd *xhci; @@ -1891,6 +1890,8 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci, struct usb_tt *tt, gfp_t mem_flags); int xhci_set_interrupter_moderation(struct xhci_interrupter *ir, u32 imod_interval); +int xhci_enable_interrupter(struct xhci_interrupter *ir); +int xhci_disable_interrupter(struct xhci_interrupter *ir); /* xHCI ring, segment, TRB, and TD functions */ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb); diff --git a/drivers/usb/misc/onboard_usb_dev.c b/drivers/usb/misc/onboard_usb_dev.c index 75ac3c6aa92d..f5372dfa241a 100644 --- a/drivers/usb/misc/onboard_usb_dev.c +++ b/drivers/usb/misc/onboard_usb_dev.c @@ -569,8 +569,14 @@ static void onboard_dev_usbdev_disconnect(struct usb_device *udev) } static const struct usb_device_id onboard_dev_id_table[] = { - { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6504) }, /* CYUSB33{0,1,2}x/CYUSB230x 3.0 HUB */ - { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6506) }, /* CYUSB33{0,1,2}x/CYUSB230x 2.0 HUB */ + { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6500) }, /* CYUSB330x 3.0 HUB */ + { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6502) }, /* CYUSB330x 2.0 HUB */ + { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6503) }, /* CYUSB33{0,1}x 2.0 HUB, Vendor Mode */ + { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6504) }, /* CYUSB331x 3.0 HUB */ + { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6506) }, /* CYUSB331x 2.0 HUB */ + { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6507) }, /* CYUSB332x 2.0 HUB, Vendor Mode */ + { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6508) }, /* CYUSB332x 3.0 HUB */ + { USB_DEVICE(VENDOR_ID_CYPRESS, 0x650a) }, /* CYUSB332x 2.0 HUB */ { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6570) }, /* CY7C6563x 2.0 HUB */ { USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 HUB */ { USB_DEVICE(VENDOR_ID_GENESYS, 0x0610) }, /* Genesys Logic GL852G USB 2.0 HUB */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 9b34e23b7091..6ac7a0a5cf07 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1093,6 +1093,8 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 1) }, { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 2) }, { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 3) }, + /* Abacus Electrics */ + { USB_DEVICE(FTDI_VID, ABACUS_OPTICAL_PROBE_PID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 52be47d684ea..9acb6f837327 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -443,6 +443,11 @@ #define LINX_FUTURE_2_PID 0xF44C /* Linx future device */ /* + * Abacus Electrics + */ +#define ABACUS_OPTICAL_PROBE_PID 0xf458 /* ABACUS ELECTRICS Optical Probe */ + +/* * Oceanic product ids */ #define FTDI_OCEANIC_PID 0xF460 /* Oceanic dive instrument */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 5cd26dac2069..27879cc57536 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -611,6 +611,7 @@ static void option_instat_callback(struct urb *urb); /* Sierra Wireless products */ #define SIERRA_VENDOR_ID 0x1199 #define SIERRA_PRODUCT_EM9191 0x90d3 +#define SIERRA_PRODUCT_EM9291 0x90e3 /* UNISOC (Spreadtrum) products */ #define UNISOC_VENDOR_ID 0x1782 @@ -2432,6 +2433,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9291, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9291, 0xff, 0xff, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) }, { USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0530, 0xff), /* TCL IK512 MBIM */ diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index 2c12449ff60c..a0afaf254d12 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c @@ -100,6 +100,11 @@ DEVICE(nokia, NOKIA_IDS); { USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */ DEVICE_N(novatel_gps, NOVATEL_IDS, 3); +/* OWON electronic test and measurement equipment driver */ +#define OWON_IDS() \ + { USB_DEVICE(0x5345, 0x1234) } /* HDS200 oscilloscopes and others */ +DEVICE(owon, OWON_IDS); + /* Siemens USB/MPI adapter */ #define SIEMENS_IDS() \ { USB_DEVICE(0x908, 0x0004) } @@ -134,6 +139,7 @@ static struct usb_serial_driver * const serial_drivers[] = { &motorola_tetra_device, &nokia_device, &novatel_gps_device, + &owon_device, &siemens_mpi_device, &suunto_device, &vivopay_device, @@ -153,6 +159,7 @@ static const struct usb_device_id id_table[] = { MOTOROLA_TETRA_IDS(), NOKIA_IDS(), NOVATEL_IDS(), + OWON_IDS(), SIEMENS_IDS(), SUUNTO_IDS(), VIVOPAY_IDS(), diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 1f8c9b16a0fb..d460d71b4257 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -83,6 +83,13 @@ UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_REPORT_LUNS), +/* Reported-by: Oliver Neukum <oneukum@suse.com> */ +UNUSUAL_DEV(0x125f, 0xa94a, 0x0160, 0x0160, + "ADATA", + "Portable HDD CH94", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_ATA_1X), + /* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */ UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999, "Initio Corporation", diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index d36f3b6992bb..152ee3376550 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -1056,13 +1056,20 @@ int usb_stor_probe1(struct us_data **pus, goto BadDevice; /* - * Some USB host controllers can't do DMA; they have to use PIO. - * For such controllers we need to make sure the block layer sets - * up bounce buffers in addressable memory. + * Some USB host controllers can't do DMA: They have to use PIO, or they + * have to use a small dedicated local memory area, or they have other + * restrictions on addressable memory. + * + * We can't support these controllers on highmem systems as we don't + * kmap or bounce buffer. */ - if (!hcd_uses_dma(bus_to_hcd(us->pusb_dev->bus)) || - bus_to_hcd(us->pusb_dev->bus)->localmem_pool) - host->no_highmem = true; + if (IS_ENABLED(CONFIG_HIGHMEM) && + (!hcd_uses_dma(bus_to_hcd(us->pusb_dev->bus)) || + bus_to_hcd(us->pusb_dev->bus)->localmem_pool)) { + dev_warn(&intf->dev, "USB Mass Storage not supported on this host controller\n"); + result = -EINVAL; + goto release; + } /* Get the unusual_devs entries and the descriptors */ result = get_device_info(us, id, unusual_dev); @@ -1081,6 +1088,7 @@ int usb_stor_probe1(struct us_data **pus, BadDevice: usb_stor_dbg(us, "storage_probe() failed\n"); +release: release_everything(us); return result; } diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c index 9c76c3d0c6cf..67a533e35150 100644 --- a/drivers/usb/typec/class.c +++ b/drivers/usb/typec/class.c @@ -1052,9 +1052,11 @@ struct typec_partner *typec_register_partner(struct typec_port *port, partner->usb_mode = USB_MODE_USB3; } + mutex_lock(&port->partner_link_lock); ret = device_register(&partner->dev); if (ret) { dev_err(&port->dev, "failed to register partner (%d)\n", ret); + mutex_unlock(&port->partner_link_lock); put_device(&partner->dev); return ERR_PTR(ret); } @@ -1063,6 +1065,7 @@ struct typec_partner *typec_register_partner(struct typec_port *port, typec_partner_link_device(partner, port->usb2_dev); if (port->usb3_dev) typec_partner_link_device(partner, port->usb3_dev); + mutex_unlock(&port->partner_link_lock); return partner; } @@ -1083,12 +1086,18 @@ void typec_unregister_partner(struct typec_partner *partner) port = to_typec_port(partner->dev.parent); - if (port->usb2_dev) + mutex_lock(&port->partner_link_lock); + if (port->usb2_dev) { typec_partner_unlink_device(partner, port->usb2_dev); - if (port->usb3_dev) + port->usb2_dev = NULL; + } + if (port->usb3_dev) { typec_partner_unlink_device(partner, port->usb3_dev); + port->usb3_dev = NULL; + } device_unregister(&partner->dev); + mutex_unlock(&port->partner_link_lock); } EXPORT_SYMBOL_GPL(typec_unregister_partner); @@ -2041,10 +2050,11 @@ static struct typec_partner *typec_get_partner(struct typec_port *port) static void typec_partner_attach(struct typec_connector *con, struct device *dev) { struct typec_port *port = container_of(con, struct typec_port, con); - struct typec_partner *partner = typec_get_partner(port); + struct typec_partner *partner; struct usb_device *udev = to_usb_device(dev); enum usb_mode usb_mode; + mutex_lock(&port->partner_link_lock); if (udev->speed < USB_SPEED_SUPER) { usb_mode = USB_MODE_USB2; port->usb2_dev = dev; @@ -2053,18 +2063,22 @@ static void typec_partner_attach(struct typec_connector *con, struct device *dev port->usb3_dev = dev; } + partner = typec_get_partner(port); if (partner) { typec_partner_set_usb_mode(partner, usb_mode); typec_partner_link_device(partner, dev); put_device(&partner->dev); } + mutex_unlock(&port->partner_link_lock); } static void typec_partner_deattach(struct typec_connector *con, struct device *dev) { struct typec_port *port = container_of(con, struct typec_port, con); - struct typec_partner *partner = typec_get_partner(port); + struct typec_partner *partner; + mutex_lock(&port->partner_link_lock); + partner = typec_get_partner(port); if (partner) { typec_partner_unlink_device(partner, dev); put_device(&partner->dev); @@ -2074,6 +2088,7 @@ static void typec_partner_deattach(struct typec_connector *con, struct device *d port->usb2_dev = NULL; else if (port->usb3_dev == dev) port->usb3_dev = NULL; + mutex_unlock(&port->partner_link_lock); } /** @@ -2614,6 +2629,7 @@ struct typec_port *typec_register_port(struct device *parent, ida_init(&port->mode_ids); mutex_init(&port->port_type_lock); + mutex_init(&port->partner_link_lock); port->id = id; port->ops = cap->ops; diff --git a/drivers/usb/typec/class.h b/drivers/usb/typec/class.h index b3076a24ad2e..db2fe96c48ff 100644 --- a/drivers/usb/typec/class.h +++ b/drivers/usb/typec/class.h @@ -59,6 +59,7 @@ struct typec_port { enum typec_port_type port_type; enum usb_mode usb_mode; struct mutex port_type_lock; + struct mutex partner_link_lock; enum typec_orientation orientation; struct typec_switch *sw; diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index a99db4e025cd..8adf6f954633 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -5965,7 +5965,7 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1, case SNK_TRY_WAIT_DEBOUNCE: if (!tcpm_port_is_sink(port)) { port->max_wait = 0; - tcpm_set_state(port, SRC_TRYWAIT, 0); + tcpm_set_state(port, SRC_TRYWAIT, PD_T_PD_DEBOUNCE); } break; case SRC_TRY_WAIT: diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c index 420af5139c70..8aae80b457d7 100644 --- a/drivers/usb/typec/ucsi/displayport.c +++ b/drivers/usb/typec/ucsi/displayport.c @@ -54,7 +54,8 @@ static int ucsi_displayport_enter(struct typec_altmode *alt, u32 *vdo) u8 cur = 0; int ret; - mutex_lock(&dp->con->lock); + if (!ucsi_con_mutex_lock(dp->con)) + return -ENOTCONN; if (!dp->override && dp->initialized) { const struct typec_altmode *p = typec_altmode_get_partner(alt); @@ -100,7 +101,7 @@ static int ucsi_displayport_enter(struct typec_altmode *alt, u32 *vdo) schedule_work(&dp->work); ret = 0; err_unlock: - mutex_unlock(&dp->con->lock); + ucsi_con_mutex_unlock(dp->con); return ret; } @@ -112,7 +113,8 @@ static int ucsi_displayport_exit(struct typec_altmode *alt) u64 command; int ret = 0; - mutex_lock(&dp->con->lock); + if (!ucsi_con_mutex_lock(dp->con)) + return -ENOTCONN; if (!dp->override) { const struct typec_altmode *p = typec_altmode_get_partner(alt); @@ -144,7 +146,7 @@ static int ucsi_displayport_exit(struct typec_altmode *alt) schedule_work(&dp->work); out_unlock: - mutex_unlock(&dp->con->lock); + ucsi_con_mutex_unlock(dp->con); return ret; } @@ -202,20 +204,21 @@ static int ucsi_displayport_vdm(struct typec_altmode *alt, int cmd = PD_VDO_CMD(header); int svdm_version; - mutex_lock(&dp->con->lock); + if (!ucsi_con_mutex_lock(dp->con)) + return -ENOTCONN; if (!dp->override && dp->initialized) { const struct typec_altmode *p = typec_altmode_get_partner(alt); dev_warn(&p->dev, "firmware doesn't support alternate mode overriding\n"); - mutex_unlock(&dp->con->lock); + ucsi_con_mutex_unlock(dp->con); return -EOPNOTSUPP; } svdm_version = typec_altmode_get_svdm_version(alt); if (svdm_version < 0) { - mutex_unlock(&dp->con->lock); + ucsi_con_mutex_unlock(dp->con); return svdm_version; } @@ -259,7 +262,7 @@ static int ucsi_displayport_vdm(struct typec_altmode *alt, break; } - mutex_unlock(&dp->con->lock); + ucsi_con_mutex_unlock(dp->con); return 0; } @@ -296,6 +299,8 @@ void ucsi_displayport_remove_partner(struct typec_altmode *alt) if (!dp) return; + cancel_work_sync(&dp->work); + dp->data.conf = 0; dp->data.status = 0; dp->initialized = false; diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index e8c7e9dc4930..01ce858a1a2b 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -1923,6 +1923,40 @@ void ucsi_set_drvdata(struct ucsi *ucsi, void *data) EXPORT_SYMBOL_GPL(ucsi_set_drvdata); /** + * ucsi_con_mutex_lock - Acquire the connector mutex + * @con: The connector interface to lock + * + * Returns true on success, false if the connector is disconnected + */ +bool ucsi_con_mutex_lock(struct ucsi_connector *con) +{ + bool mutex_locked = false; + bool connected = true; + + while (connected && !mutex_locked) { + mutex_locked = mutex_trylock(&con->lock) != 0; + connected = UCSI_CONSTAT(con, CONNECTED); + if (connected && !mutex_locked) + msleep(20); + } + + connected = connected && con->partner; + if (!connected && mutex_locked) + mutex_unlock(&con->lock); + + return connected; +} + +/** + * ucsi_con_mutex_unlock - Release the connector mutex + * @con: The connector interface to unlock + */ +void ucsi_con_mutex_unlock(struct ucsi_connector *con) +{ + mutex_unlock(&con->lock); +} + +/** * ucsi_create - Allocate UCSI instance * @dev: Device interface to the PPM (Platform Policy Manager) * @ops: I/O routines diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h index 3a2c1762bec1..9c5278a0c5d4 100644 --- a/drivers/usb/typec/ucsi/ucsi.h +++ b/drivers/usb/typec/ucsi/ucsi.h @@ -94,6 +94,8 @@ int ucsi_register(struct ucsi *ucsi); void ucsi_unregister(struct ucsi *ucsi); void *ucsi_get_drvdata(struct ucsi *ucsi); void ucsi_set_drvdata(struct ucsi *ucsi, void *data); +bool ucsi_con_mutex_lock(struct ucsi_connector *con); +void ucsi_con_mutex_unlock(struct ucsi_connector *con); void ucsi_connector_change(struct ucsi *ucsi, u8 num); diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 14437396d721..8f02f236b5b4 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -1815,7 +1815,7 @@ int vfio_config_init(struct vfio_pci_core_device *vdev) } if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx || - vdev->pdev->irq == IRQ_NOTCONNECTED) + !vdev->pdev->irq || vdev->pdev->irq == IRQ_NOTCONNECTED) vconfig[PCI_INTERRUPT_PIN] = 0; ret = vfio_cap_init(vdev); diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index 35f9046af315..6328c3a05bcd 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -1646,14 +1646,14 @@ static vm_fault_t vfio_pci_mmap_huge_fault(struct vm_fault *vmf, { struct vm_area_struct *vma = vmf->vma; struct vfio_pci_core_device *vdev = vma->vm_private_data; - unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff; + unsigned long addr = vmf->address & ~((PAGE_SIZE << order) - 1); + unsigned long pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; + unsigned long pfn = vma_to_pfn(vma) + pgoff; vm_fault_t ret = VM_FAULT_SIGBUS; - pfn = vma_to_pfn(vma) + pgoff; - - if (order && (pfn & ((1 << order) - 1) || - vmf->address & ((PAGE_SIZE << order) - 1) || - vmf->address + (PAGE_SIZE << order) > vma->vm_end)) { + if (order && (addr < vma->vm_start || + addr + (PAGE_SIZE << order) > vma->vm_end || + pfn & ((1 << order) - 1))) { ret = VM_FAULT_FALLBACK; goto out; } diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index f6f5a7ac7894..26bcf3a7f70c 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -627,6 +627,9 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) int ret; llnode = llist_del_all(&svq->completion_list); + + mutex_lock(&svq->vq.mutex); + llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) { se_cmd = &cmd->tvc_se_cmd; @@ -660,6 +663,8 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) vhost_scsi_release_cmd_res(se_cmd); } + mutex_unlock(&svq->vq.mutex); + if (signal) vhost_signal(&svq->vs->dev, &svq->vq); } @@ -994,39 +999,66 @@ static void vhost_scsi_target_queue_cmd(struct vhost_scsi_nexus *nexus, static void vhost_scsi_send_status(struct vhost_scsi *vs, struct vhost_virtqueue *vq, - int head, unsigned int out, u8 status) + struct vhost_scsi_ctx *vc, u8 status) { - struct virtio_scsi_cmd_resp __user *resp; struct virtio_scsi_cmd_resp rsp; + struct iov_iter iov_iter; int ret; memset(&rsp, 0, sizeof(rsp)); rsp.status = status; - resp = vq->iov[out].iov_base; - ret = __copy_to_user(resp, &rsp, sizeof(rsp)); - if (!ret) - vhost_add_used_and_signal(&vs->dev, vq, head, 0); + + iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in, + sizeof(rsp)); + + ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter); + + if (likely(ret == sizeof(rsp))) + vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); else pr_err("Faulted on virtio_scsi_cmd_resp\n"); } +#define TYPE_IO_CMD 0 +#define TYPE_CTRL_TMF 1 +#define TYPE_CTRL_AN 2 + static void vhost_scsi_send_bad_target(struct vhost_scsi *vs, struct vhost_virtqueue *vq, - int head, unsigned out) + struct vhost_scsi_ctx *vc, int type) { - struct virtio_scsi_cmd_resp __user *resp; - struct virtio_scsi_cmd_resp rsp; + union { + struct virtio_scsi_cmd_resp cmd; + struct virtio_scsi_ctrl_tmf_resp tmf; + struct virtio_scsi_ctrl_an_resp an; + } rsp; + struct iov_iter iov_iter; + size_t rsp_size; int ret; memset(&rsp, 0, sizeof(rsp)); - rsp.response = VIRTIO_SCSI_S_BAD_TARGET; - resp = vq->iov[out].iov_base; - ret = __copy_to_user(resp, &rsp, sizeof(rsp)); - if (!ret) - vhost_add_used_and_signal(&vs->dev, vq, head, 0); + + if (type == TYPE_IO_CMD) { + rsp_size = sizeof(struct virtio_scsi_cmd_resp); + rsp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET; + } else if (type == TYPE_CTRL_TMF) { + rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp); + rsp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET; + } else { + rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp); + rsp.an.response = VIRTIO_SCSI_S_BAD_TARGET; + } + + iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in, + rsp_size); + + ret = copy_to_iter(&rsp, rsp_size, &iov_iter); + + if (likely(ret == rsp_size)) + vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); else - pr_err("Faulted on virtio_scsi_cmd_resp\n"); + pr_err("Faulted on virtio scsi type=%d\n", type); } static int @@ -1390,9 +1422,9 @@ err: if (ret == -ENXIO) break; else if (ret == -EIO) - vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out); + vhost_scsi_send_bad_target(vs, vq, &vc, TYPE_IO_CMD); else if (ret == -ENOMEM) - vhost_scsi_send_status(vs, vq, vc.head, vc.out, + vhost_scsi_send_status(vs, vq, &vc, SAM_STAT_TASK_SET_FULL); } while (likely(!vhost_exceeds_weight(vq, ++c, 0))); out: @@ -1432,8 +1464,11 @@ static void vhost_scsi_tmf_resp_work(struct vhost_work *work) else resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED; + mutex_lock(&tmf->svq->vq.mutex); vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs, tmf->vq_desc, &tmf->resp_iov, resp_code); + mutex_unlock(&tmf->svq->vq.mutex); + vhost_scsi_release_tmf_res(tmf); } @@ -1623,7 +1658,10 @@ err: if (ret == -ENXIO) break; else if (ret == -EIO) - vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out); + vhost_scsi_send_bad_target(vs, vq, &vc, + v_req.type == VIRTIO_SCSI_T_TMF ? + TYPE_CTRL_TMF : + TYPE_CTRL_AN); } while (likely(!vhost_exceeds_weight(vq, ++c, 0))); out: mutex_unlock(&vq->mutex); diff --git a/drivers/video/fbdev/geode/display_gx.c b/drivers/video/fbdev/geode/display_gx.c index b5f25dffd274..099322cefce0 100644 --- a/drivers/video/fbdev/geode/display_gx.c +++ b/drivers/video/fbdev/geode/display_gx.c @@ -13,6 +13,7 @@ #include <asm/io.h> #include <asm/div64.h> #include <asm/delay.h> +#include <asm/msr.h> #include <linux/cs5535.h> #include "gxfb.h" diff --git a/drivers/video/fbdev/geode/gxfb_core.c b/drivers/video/fbdev/geode/gxfb_core.c index af996634c1a9..8d69be7c9d31 100644 --- a/drivers/video/fbdev/geode/gxfb_core.c +++ b/drivers/video/fbdev/geode/gxfb_core.c @@ -29,6 +29,7 @@ #include <linux/pci.h> #include <linux/cs5535.h> +#include <asm/msr.h> #include <asm/olpc.h> #include "gxfb.h" @@ -377,7 +378,7 @@ static int gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* Figure out if this is a TFT or CRT part */ - rdmsrl(MSR_GX_GLD_MSR_CONFIG, val); + rdmsrq(MSR_GX_GLD_MSR_CONFIG, val); if ((val & MSR_GX_GLD_MSR_CONFIG_FP) == MSR_GX_GLD_MSR_CONFIG_FP) par->enable_crt = 0; diff --git a/drivers/video/fbdev/geode/lxfb_ops.c b/drivers/video/fbdev/geode/lxfb_ops.c index 32baaf59fcf7..2e33da9849b0 100644 --- a/drivers/video/fbdev/geode/lxfb_ops.c +++ b/drivers/video/fbdev/geode/lxfb_ops.c @@ -11,6 +11,7 @@ #include <linux/delay.h> #include <linux/cs5535.h> +#include <asm/msr.h> #include "lxfb.h" /* TODO @@ -358,7 +359,7 @@ void lx_set_mode(struct fb_info *info) /* Set output mode */ - rdmsrl(MSR_LX_GLD_MSR_CONFIG, msrval); + rdmsrq(MSR_LX_GLD_MSR_CONFIG, msrval); msrval &= ~MSR_LX_GLD_MSR_CONFIG_FMT; if (par->output & OUTPUT_PANEL) { @@ -371,7 +372,7 @@ void lx_set_mode(struct fb_info *info) } else msrval |= MSR_LX_GLD_MSR_CONFIG_FMT_CRT; - wrmsrl(MSR_LX_GLD_MSR_CONFIG, msrval); + wrmsrq(MSR_LX_GLD_MSR_CONFIG, msrval); /* Clear the various buffers */ /* FIXME: Adjust for panning here */ @@ -419,7 +420,7 @@ void lx_set_mode(struct fb_info *info) /* Set default watermark values */ - rdmsrl(MSR_LX_SPARE_MSR, msrval); + rdmsrq(MSR_LX_SPARE_MSR, msrval); msrval &= ~(MSR_LX_SPARE_MSR_DIS_CFIFO_HGO | MSR_LX_SPARE_MSR_VFIFO_ARB_SEL @@ -427,7 +428,7 @@ void lx_set_mode(struct fb_info *info) | MSR_LX_SPARE_MSR_WM_LPEN_OVRD); msrval |= MSR_LX_SPARE_MSR_DIS_VIFO_WM | MSR_LX_SPARE_MSR_DIS_INIT_V_PRI; - wrmsrl(MSR_LX_SPARE_MSR, msrval); + wrmsrq(MSR_LX_SPARE_MSR, msrval); gcfg = DC_GENERAL_CFG_DFLE; /* Display fifo enable */ gcfg |= (0x6 << DC_GENERAL_CFG_DFHPSL_SHIFT) | /* default priority */ @@ -591,10 +592,10 @@ static void lx_save_regs(struct lxfb_par *par) } while ((i & GP_BLT_STATUS_PB) || !(i & GP_BLT_STATUS_CE)); /* save MSRs */ - rdmsrl(MSR_LX_MSR_PADSEL, par->msr.padsel); - rdmsrl(MSR_GLCP_DOTPLL, par->msr.dotpll); - rdmsrl(MSR_LX_GLD_MSR_CONFIG, par->msr.dfglcfg); - rdmsrl(MSR_LX_SPARE_MSR, par->msr.dcspare); + rdmsrq(MSR_LX_MSR_PADSEL, par->msr.padsel); + rdmsrq(MSR_GLCP_DOTPLL, par->msr.dotpll); + rdmsrq(MSR_LX_GLD_MSR_CONFIG, par->msr.dfglcfg); + rdmsrq(MSR_LX_SPARE_MSR, par->msr.dcspare); write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK); @@ -664,7 +665,7 @@ static void lx_restore_display_ctlr(struct lxfb_par *par) uint32_t filt; int i; - wrmsrl(MSR_LX_SPARE_MSR, par->msr.dcspare); + wrmsrq(MSR_LX_SPARE_MSR, par->msr.dcspare); for (i = 0; i < ARRAY_SIZE(par->dc); i++) { switch (i) { @@ -729,8 +730,8 @@ static void lx_restore_video_proc(struct lxfb_par *par) { int i; - wrmsrl(MSR_LX_GLD_MSR_CONFIG, par->msr.dfglcfg); - wrmsrl(MSR_LX_MSR_PADSEL, par->msr.padsel); + wrmsrq(MSR_LX_GLD_MSR_CONFIG, par->msr.dfglcfg); + wrmsrq(MSR_LX_MSR_PADSEL, par->msr.padsel); for (i = 0; i < ARRAY_SIZE(par->vp); i++) { switch (i) { diff --git a/drivers/video/fbdev/geode/suspend_gx.c b/drivers/video/fbdev/geode/suspend_gx.c index 8c49d4e98772..73307f42e343 100644 --- a/drivers/video/fbdev/geode/suspend_gx.c +++ b/drivers/video/fbdev/geode/suspend_gx.c @@ -21,8 +21,8 @@ static void gx_save_regs(struct gxfb_par *par) } while (i & (GP_BLT_STATUS_BLT_PENDING | GP_BLT_STATUS_BLT_BUSY)); /* save MSRs */ - rdmsrl(MSR_GX_MSR_PADSEL, par->msr.padsel); - rdmsrl(MSR_GLCP_DOTPLL, par->msr.dotpll); + rdmsrq(MSR_GX_MSR_PADSEL, par->msr.padsel); + rdmsrq(MSR_GLCP_DOTPLL, par->msr.dotpll); write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK); @@ -43,14 +43,14 @@ static void gx_set_dotpll(uint32_t dotpll_hi) uint32_t dotpll_lo; int i; - rdmsrl(MSR_GLCP_DOTPLL, dotpll_lo); + rdmsrq(MSR_GLCP_DOTPLL, dotpll_lo); dotpll_lo |= MSR_GLCP_DOTPLL_DOTRESET; dotpll_lo &= ~MSR_GLCP_DOTPLL_BYPASS; wrmsr(MSR_GLCP_DOTPLL, dotpll_lo, dotpll_hi); /* wait for the PLL to lock */ for (i = 0; i < 200; i++) { - rdmsrl(MSR_GLCP_DOTPLL, dotpll_lo); + rdmsrq(MSR_GLCP_DOTPLL, dotpll_lo); if (dotpll_lo & MSR_GLCP_DOTPLL_LOCK) break; udelay(1); @@ -133,7 +133,7 @@ static void gx_restore_video_proc(struct gxfb_par *par) { int i; - wrmsrl(MSR_GX_MSR_PADSEL, par->msr.padsel); + wrmsrq(MSR_GX_MSR_PADSEL, par->msr.padsel); for (i = 0; i < ARRAY_SIZE(par->vp); i++) { switch (i) { diff --git a/drivers/video/fbdev/geode/video_gx.c b/drivers/video/fbdev/geode/video_gx.c index 91dac2aa247c..5717c3356949 100644 --- a/drivers/video/fbdev/geode/video_gx.c +++ b/drivers/video/fbdev/geode/video_gx.c @@ -142,8 +142,8 @@ void gx_set_dclk_frequency(struct fb_info *info) } } - rdmsrl(MSR_GLCP_SYS_RSTPLL, sys_rstpll); - rdmsrl(MSR_GLCP_DOTPLL, dotpll); + rdmsrq(MSR_GLCP_SYS_RSTPLL, sys_rstpll); + rdmsrq(MSR_GLCP_DOTPLL, dotpll); /* Program new M, N and P. */ dotpll &= 0x00000000ffffffffull; @@ -151,7 +151,7 @@ void gx_set_dclk_frequency(struct fb_info *info) dotpll |= MSR_GLCP_DOTPLL_DOTRESET; dotpll &= ~MSR_GLCP_DOTPLL_BYPASS; - wrmsrl(MSR_GLCP_DOTPLL, dotpll); + wrmsrq(MSR_GLCP_DOTPLL, dotpll); /* Program dividers. */ sys_rstpll &= ~( MSR_GLCP_SYS_RSTPLL_DOTPREDIV2 @@ -159,15 +159,15 @@ void gx_set_dclk_frequency(struct fb_info *info) | MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3 ); sys_rstpll |= pll_table[best_i].sys_rstpll_bits; - wrmsrl(MSR_GLCP_SYS_RSTPLL, sys_rstpll); + wrmsrq(MSR_GLCP_SYS_RSTPLL, sys_rstpll); /* Clear reset bit to start PLL. */ dotpll &= ~(MSR_GLCP_DOTPLL_DOTRESET); - wrmsrl(MSR_GLCP_DOTPLL, dotpll); + wrmsrq(MSR_GLCP_DOTPLL, dotpll); /* Wait for LOCK bit. */ do { - rdmsrl(MSR_GLCP_DOTPLL, dotpll); + rdmsrq(MSR_GLCP_DOTPLL, dotpll); } while (timeout-- && !(dotpll & MSR_GLCP_DOTPLL_LOCK)); } @@ -180,10 +180,10 @@ gx_configure_tft(struct fb_info *info) /* Set up the DF pad select MSR */ - rdmsrl(MSR_GX_MSR_PADSEL, val); + rdmsrq(MSR_GX_MSR_PADSEL, val); val &= ~MSR_GX_MSR_PADSEL_MASK; val |= MSR_GX_MSR_PADSEL_TFT; - wrmsrl(MSR_GX_MSR_PADSEL, val); + wrmsrq(MSR_GX_MSR_PADSEL, val); /* Turn off the panel */ diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 150753c3b578..95d5d7993e5b 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -407,6 +407,12 @@ static void virtio_dev_shutdown(struct device *_d) if (!drv) return; + /* If the driver has its own shutdown method, use that. */ + if (drv->shutdown) { + drv->shutdown(dev); + return; + } + /* * Some devices get wedged if you kick them after they are * reset. Mark all vqs as broken to make sure we don't. diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index 5eaade757860..d50fe030d825 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -247,7 +247,7 @@ virtio_pci_admin_cmd_dev_parts_objects_enable(struct virtio_device *virtio_dev) sg_init_one(&data_sg, get_data, sizeof(*get_data)); sg_init_one(&result_sg, result, sizeof(*result)); cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEVICE_CAP_GET); - cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SELF); cmd.data_sg = &data_sg; cmd.result_sg = &result_sg; ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); @@ -305,7 +305,7 @@ static void virtio_pci_admin_cmd_cap_init(struct virtio_device *virtio_dev) sg_init_one(&result_sg, data, sizeof(*data)); cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_CAP_ID_LIST_QUERY); - cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SELF); cmd.result_sg = &result_sg; ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index fdd2d2b07b5a..b784aab66867 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -2650,7 +2650,7 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) struct vring_virtqueue *vq = to_vvq(_vq); if (vq->event_triggered) - vq->event_triggered = false; + data_race(vq->event_triggered = false); return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) : virtqueue_enable_cb_delayed_split(_vq); diff --git a/drivers/w1/slaves/w1_ds2406.c b/drivers/w1/slaves/w1_ds2406.c index 1cae9b243ff8..76026d615111 100644 --- a/drivers/w1/slaves/w1_ds2406.c +++ b/drivers/w1/slaves/w1_ds2406.c @@ -29,8 +29,6 @@ static ssize_t w1_f12_read_state( { u8 w1_buf[6] = {W1_F12_FUNC_READ_STATUS, 7, 0, 0, 0, 0}; struct w1_slave *sl = kobj_to_w1_slave(kobj); - u16 crc = 0; - int i; ssize_t rtnval = 1; if (off != 0) @@ -47,9 +45,7 @@ static ssize_t w1_f12_read_state( w1_write_block(sl->master, w1_buf, 3); w1_read_block(sl->master, w1_buf+3, 3); - for (i = 0; i < 6; i++) - crc = crc16_byte(crc, w1_buf[i]); - if (crc == 0xb001) /* good read? */ + if (crc16(0, w1_buf, sizeof(w1_buf)) == 0xb001) /* good read? */ *buf = ((w1_buf[3]>>5)&3)|0x30; else rtnval = -EIO; @@ -66,8 +62,6 @@ static ssize_t w1_f12_write_output( { struct w1_slave *sl = kobj_to_w1_slave(kobj); u8 w1_buf[6] = {W1_F12_FUNC_WRITE_STATUS, 7, 0, 0, 0, 0}; - u16 crc = 0; - int i; ssize_t rtnval = 1; if (count != 1 || off != 0) @@ -83,9 +77,7 @@ static ssize_t w1_f12_write_output( w1_buf[3] = (((*buf)&3)<<5)|0x1F; w1_write_block(sl->master, w1_buf, 4); w1_read_block(sl->master, w1_buf+4, 2); - for (i = 0; i < 6; i++) - crc = crc16_byte(crc, w1_buf[i]); - if (crc == 0xb001) /* good read? */ + if (crc16(0, w1_buf, sizeof(w1_buf)) == 0xb001) /* good read? */ w1_write_8(sl->master, 0xFF); else rtnval = -EIO; diff --git a/drivers/watchdog/diag288_wdt.c b/drivers/watchdog/diag288_wdt.c index 76dffc89c641..887d5a6c155b 100644 --- a/drivers/watchdog/diag288_wdt.c +++ b/drivers/watchdog/diag288_wdt.c @@ -29,26 +29,13 @@ #include <linux/watchdog.h> #include <asm/machine.h> #include <asm/ebcdic.h> +#include <asm/diag288.h> #include <asm/diag.h> #include <linux/io.h> #define MAX_CMDLEN 240 #define DEFAULT_CMD "SYSTEM RESTART" -#define MIN_INTERVAL 15 /* Minimal time supported by diag88 */ -#define MAX_INTERVAL 3600 /* One hour should be enough - pure estimation */ - -#define WDT_DEFAULT_TIMEOUT 30 - -/* Function codes - init, change, cancel */ -#define WDT_FUNC_INIT 0 -#define WDT_FUNC_CHANGE 1 -#define WDT_FUNC_CANCEL 2 -#define WDT_FUNC_CONCEAL 0x80000000 - -/* Action codes for LPAR watchdog */ -#define LPARWDT_RESTART 0 - static char wdt_cmd[MAX_CMDLEN] = DEFAULT_CMD; static bool conceal_on; static bool nowayout_info = WATCHDOG_NOWAYOUT; @@ -75,22 +62,8 @@ static char *cmd_buf; static int diag288(unsigned int func, unsigned int timeout, unsigned long action, unsigned int len) { - union register_pair r1 = { .even = func, .odd = timeout, }; - union register_pair r3 = { .even = action, .odd = len, }; - int err; - diag_stat_inc(DIAG_STAT_X288); - - err = -EINVAL; - asm volatile( - " diag %[r1],%[r3],0x288\n" - "0: la %[err],0\n" - "1:\n" - EX_TABLE(0b, 1b) - : [err] "+d" (err) - : [r1] "d" (r1.pair), [r3] "d" (r3.pair) - : "cc", "memory"); - return err; + return __diag288(func, timeout, action, len); } static int diag288_str(unsigned int func, unsigned int timeout, char *cmd) @@ -189,8 +162,6 @@ static struct watchdog_device wdt_dev = { static int __init diag288_init(void) { - int ret; - watchdog_set_nowayout(&wdt_dev, nowayout_info); if (machine_is_vm()) { @@ -199,24 +170,6 @@ static int __init diag288_init(void) pr_err("The watchdog cannot be initialized\n"); return -ENOMEM; } - - ret = diag288_str(WDT_FUNC_INIT, MIN_INTERVAL, "BEGIN"); - if (ret != 0) { - pr_err("The watchdog cannot be initialized\n"); - kfree(cmd_buf); - return -EINVAL; - } - } else { - if (diag288(WDT_FUNC_INIT, WDT_DEFAULT_TIMEOUT, - LPARWDT_RESTART, 0)) { - pr_err("The watchdog cannot be initialized\n"); - return -EINVAL; - } - } - - if (diag288(WDT_FUNC_CANCEL, 0, 0, 0)) { - pr_err("The watchdog cannot be deactivated\n"); - return -EINVAL; } return watchdog_register_device(&wdt_dev); @@ -228,5 +181,5 @@ static void __exit diag288_exit(void) kfree(cmd_buf); } -module_init(diag288_init); +module_cpu_feature_match(S390_CPU_FEATURE_D288, diag288_init); module_exit(diag288_exit); diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index f7d6f47971fd..24f485827e03 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -278,7 +278,7 @@ config XEN_PRIVCMD_EVENTFD config XEN_ACPI_PROCESSOR tristate "Xen ACPI processor" - depends on XEN && XEN_PV_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ + depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ default m help This ACPI processor uploads Power Management information to the Xen diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 65d4e7fa1eb8..8c852807ba1c 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -679,7 +679,7 @@ void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages) } EXPORT_SYMBOL(xen_free_ballooned_pages); -static void __init balloon_add_regions(void) +static int __init balloon_add_regions(void) { unsigned long start_pfn, pages; unsigned long pfn, extra_pfn_end; @@ -702,26 +702,38 @@ static void __init balloon_add_regions(void) for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) balloon_append(pfn_to_page(pfn)); - balloon_stats.total_pages += extra_pfn_end - start_pfn; + /* + * Extra regions are accounted for in the physmap, but need + * decreasing from current_pages to balloon down the initial + * allocation, because they are already accounted for in + * total_pages. + */ + if (extra_pfn_end - start_pfn >= balloon_stats.current_pages) { + WARN(1, "Extra pages underflow current target"); + return -ERANGE; + } + balloon_stats.current_pages -= extra_pfn_end - start_pfn; } + + return 0; } static int __init balloon_init(void) { struct task_struct *task; + int rc; if (!xen_domain()) return -ENODEV; pr_info("Initialising balloon driver\n"); -#ifdef CONFIG_XEN_PV - balloon_stats.current_pages = xen_pv_domain() - ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn) - : get_num_physpages(); -#else - balloon_stats.current_pages = get_num_physpages(); -#endif + if (xen_released_pages >= get_num_physpages()) { + WARN(1, "Released pages underflow current target"); + return -ERANGE; + } + + balloon_stats.current_pages = get_num_physpages() - xen_released_pages; balloon_stats.target_pages = balloon_stats.current_pages; balloon_stats.balloon_low = 0; balloon_stats.balloon_high = 0; @@ -738,7 +750,9 @@ static int __init balloon_init(void) register_sysctl_init("xen/balloon", balloon_table); #endif - balloon_add_regions(); + rc = balloon_add_regions(); + if (rc) + return rc; task = kthread_run(balloon_thread, NULL, "xen-balloon"); if (IS_ERR(task)) { diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 1f65795cf5d7..ef56a2500ed6 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -217,6 +217,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, * buffering it. */ if (dma_capable(dev, dev_addr, size, true) && + !dma_kmalloc_needs_bounce(dev, size, dir) && !range_straddles_page_boundary(phys, size) && !xen_arch_need_swiotlb(dev, phys, dev_addr) && !is_swiotlb_force_bounce(dev)) diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h index 13821e7e825e..9ac0427724a3 100644 --- a/drivers/xen/xenbus/xenbus.h +++ b/drivers/xen/xenbus/xenbus.h @@ -77,6 +77,7 @@ enum xb_req_state { struct xb_req_data { struct list_head list; wait_queue_head_t wq; + struct kref kref; struct xsd_sockmsg msg; uint32_t caller_req_id; enum xsd_sockmsg_type type; @@ -103,6 +104,7 @@ int xb_init_comms(void); void xb_deinit_comms(void); int xs_watch_msg(struct xs_watch_event *event); void xs_request_exit(struct xb_req_data *req); +void xs_free_req(struct kref *kref); int xenbus_match(struct device *_dev, const struct device_driver *_drv); int xenbus_dev_probe(struct device *_dev); diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c index e5fda0256feb..82df2da1b880 100644 --- a/drivers/xen/xenbus/xenbus_comms.c +++ b/drivers/xen/xenbus/xenbus_comms.c @@ -309,8 +309,8 @@ static int process_msg(void) virt_wmb(); req->state = xb_req_state_got_reply; req->cb(req); - } else - kfree(req); + } + kref_put(&req->kref, xs_free_req); } mutex_unlock(&xs_response_mutex); @@ -386,14 +386,13 @@ static int process_writes(void) state.req->msg.type = XS_ERROR; state.req->err = err; list_del(&state.req->list); - if (state.req->state == xb_req_state_aborted) - kfree(state.req); - else { + if (state.req->state != xb_req_state_aborted) { /* write err, then update state */ virt_wmb(); state.req->state = xb_req_state_got_reply; wake_up(&state.req->wq); } + kref_put(&state.req->kref, xs_free_req); mutex_unlock(&xb_write_mutex); diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 46f8916597e5..f5c21ba64df5 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c @@ -406,7 +406,7 @@ void xenbus_dev_queue_reply(struct xb_req_data *req) mutex_unlock(&u->reply_mutex); kfree(req->body); - kfree(req); + kref_put(&req->kref, xs_free_req); kref_put(&u->kref, xenbus_file_free); diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 6d32ffb01136..86fe6e779056 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -966,9 +966,15 @@ static int __init xenbus_init(void) if (xen_pv_domain()) xen_store_domain_type = XS_PV; if (xen_hvm_domain()) + { xen_store_domain_type = XS_HVM; - if (xen_hvm_domain() && xen_initial_domain()) - xen_store_domain_type = XS_LOCAL; + err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); + if (err) + goto out_error; + xen_store_evtchn = (int)v; + if (!v && xen_initial_domain()) + xen_store_domain_type = XS_LOCAL; + } if (xen_pv_domain() && !xen_start_info->store_evtchn) xen_store_domain_type = XS_LOCAL; if (xen_pv_domain() && xen_start_info->store_evtchn) @@ -987,10 +993,6 @@ static int __init xenbus_init(void) xen_store_interface = gfn_to_virt(xen_store_gfn); break; case XS_HVM: - err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); - if (err) - goto out_error; - xen_store_evtchn = (int)v; err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); if (err) goto out_error; diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c index fcb335bb7b18..6d1819269cbe 100644 --- a/drivers/xen/xenbus/xenbus_probe_frontend.c +++ b/drivers/xen/xenbus/xenbus_probe_frontend.c @@ -513,4 +513,5 @@ static int __init boot_wait_for_devices(void) late_initcall(boot_wait_for_devices); #endif +MODULE_DESCRIPTION("Xen PV-device frontend support"); MODULE_LICENSE("GPL"); diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index d32c726f7a12..dcf9182c8451 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c @@ -112,6 +112,12 @@ static void xs_suspend_exit(void) wake_up_all(&xs_state_enter_wq); } +void xs_free_req(struct kref *kref) +{ + struct xb_req_data *req = container_of(kref, struct xb_req_data, kref); + kfree(req); +} + static uint32_t xs_request_enter(struct xb_req_data *req) { uint32_t rq_id; @@ -237,6 +243,12 @@ static void xs_send(struct xb_req_data *req, struct xsd_sockmsg *msg) req->caller_req_id = req->msg.req_id; req->msg.req_id = xs_request_enter(req); + /* + * Take 2nd ref. One for this thread, and the second for the + * xenbus_thread. + */ + kref_get(&req->kref); + mutex_lock(&xb_write_mutex); list_add_tail(&req->list, &xb_write_list); notify = list_is_singular(&xb_write_list); @@ -261,8 +273,8 @@ static void *xs_wait_for_reply(struct xb_req_data *req, struct xsd_sockmsg *msg) if (req->state == xb_req_state_queued || req->state == xb_req_state_wait_reply) req->state = xb_req_state_aborted; - else - kfree(req); + + kref_put(&req->kref, xs_free_req); mutex_unlock(&xb_write_mutex); return ret; @@ -291,6 +303,7 @@ int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par) req->cb = xenbus_dev_queue_reply; req->par = par; req->user_req = true; + kref_init(&req->kref); xs_send(req, msg); @@ -319,6 +332,7 @@ static void *xs_talkv(struct xenbus_transaction t, req->num_vecs = num_vecs; req->cb = xs_wake_up; req->user_req = false; + kref_init(&req->kref); msg.req_id = 0; msg.tx_id = t.id; |