diff options
Diffstat (limited to 'drivers')
220 files changed, 2000 insertions, 1521 deletions
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2.c b/drivers/accel/habanalabs/gaudi2/gaudi2.c index a38b88baadf2..5722e4128d3c 100644 --- a/drivers/accel/habanalabs/gaudi2/gaudi2.c +++ b/drivers/accel/habanalabs/gaudi2/gaudi2.c @@ -10437,7 +10437,7 @@ end: (u64 *)(lin_dma_pkts_arr), DEBUGFS_WRITE64); WREG32(sob_addr, 0); - kfree(lin_dma_pkts_arr); + kvfree(lin_dma_pkts_arr); return rc; } diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c index bf8dc92a373a..2561b045acc7 100644 --- a/drivers/acpi/apei/einj-core.c +++ b/drivers/acpi/apei/einj-core.c @@ -315,7 +315,7 @@ static void __iomem *einj_get_parameter_address(void) memcpy_fromio(&v5param, p, v5param_size); acpi5 = 1; check_vendor_extension(pa_v5, &v5param); - if (available_error_type & ACPI65_EINJV2_SUPP) { + if (is_v2 && available_error_type & ACPI65_EINJV2_SUPP) { len = v5param.einjv2_struct.length; offset = offsetof(struct einjv2_extension_struct, component_arr); max_nr_components = (len - offset) / @@ -540,6 +540,9 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, struct set_error_type_with_address *v5param; v5param = kmalloc(v5param_size, GFP_KERNEL); + if (!v5param) + return -ENOMEM; + memcpy_fromio(v5param, einj_param, v5param_size); v5param->type = type; if (type & ACPI5_VENDOR_BIT) { @@ -1091,7 +1094,7 @@ err_put_table: return rc; } -static void __exit einj_remove(struct faux_device *fdev) +static void einj_remove(struct faux_device *fdev) { struct apei_exec_context ctx; @@ -1114,15 +1117,9 @@ static void __exit einj_remove(struct faux_device *fdev) } static struct faux_device *einj_dev; -/* - * einj_remove() lives in .exit.text. For drivers registered via - * platform_driver_probe() this is ok because they cannot get unbound at - * runtime. So mark the driver struct with __refdata to prevent modpost - * triggering a section mismatch warning. - */ -static struct faux_device_ops einj_device_ops __refdata = { +static struct faux_device_ops einj_device_ops = { .probe = einj_probe, - .remove = __exit_p(einj_remove), + .remove = einj_remove, }; static int __init einj_init(void) diff --git a/drivers/acpi/pfr_update.c b/drivers/acpi/pfr_update.c index 318683744ed1..11b1c2828005 100644 --- a/drivers/acpi/pfr_update.c +++ b/drivers/acpi/pfr_update.c @@ -329,7 +329,7 @@ static bool applicable_image(const void *data, struct pfru_update_cap_info *cap, if (type == PFRU_CODE_INJECT_TYPE) return payload_hdr->rt_ver >= cap->code_rt_version; - return payload_hdr->rt_ver >= cap->drv_rt_version; + return payload_hdr->svn_ver >= cap->drv_svn; } static void print_update_debug_info(struct pfru_updated_result *result, diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c index eeae160c898d..fa3c76a2b49d 100644 --- a/drivers/atm/atmtcp.c +++ b/drivers/atm/atmtcp.c @@ -279,6 +279,19 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci) return NULL; } +static int atmtcp_c_pre_send(struct atm_vcc *vcc, struct sk_buff *skb) +{ + struct atmtcp_hdr *hdr; + + if (skb->len < sizeof(struct atmtcp_hdr)) + return -EINVAL; + + hdr = (struct atmtcp_hdr *)skb->data; + if (hdr->length == ATMTCP_HDR_MAGIC) + return -EINVAL; + + return 0; +} static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb) { @@ -288,9 +301,6 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb) struct sk_buff *new_skb; int result = 0; - if (skb->len < sizeof(struct atmtcp_hdr)) - goto done; - dev = vcc->dev_data; hdr = (struct atmtcp_hdr *) skb->data; if (hdr->length == ATMTCP_HDR_MAGIC) { @@ -347,6 +357,7 @@ static const struct atmdev_ops atmtcp_v_dev_ops = { static const struct atmdev_ops atmtcp_c_dev_ops = { .close = atmtcp_c_close, + .pre_send = atmtcp_c_pre_send, .send = atmtcp_c_send }; diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index dbf5456cd891..2ea6e05e6ec9 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -675,7 +675,7 @@ static void dpm_async_resume_subordinate(struct device *dev, async_func_t func) idx = device_links_read_lock(); /* Start processing the device's "async" consumers. */ - list_for_each_entry_rcu(link, &dev->links.consumers, s_node) + list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node) if (READ_ONCE(link->status) != DL_STATE_DORMANT) dpm_async_with_cleanup(link->consumer, func); @@ -1330,7 +1330,7 @@ static void dpm_async_suspend_superior(struct device *dev, async_func_t func) idx = device_links_read_lock(); /* Start processing the device's "async" suppliers. */ - list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) + list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) if (READ_ONCE(link->status) != DL_STATE_DORMANT) dpm_async_with_cleanup(link->supplier, func); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 1b6ee91f8eb9..57263c273f0f 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -137,20 +137,29 @@ static void loop_global_unlock(struct loop_device *lo, bool global) static int max_part; static int part_shift; -static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file) +static loff_t lo_calculate_size(struct loop_device *lo, struct file *file) { + struct kstat stat; loff_t loopsize; + int ret; - /* Compute loopsize in bytes */ - loopsize = i_size_read(file->f_mapping->host); - if (offset > 0) - loopsize -= offset; + /* + * Get the accurate file size. This provides better results than + * cached inode data, particularly for network filesystems where + * metadata may be stale. + */ + ret = vfs_getattr_nosec(&file->f_path, &stat, STATX_SIZE, 0); + if (ret) + return 0; + + loopsize = stat.size; + if (lo->lo_offset > 0) + loopsize -= lo->lo_offset; /* offset is beyond i_size, weird but possible */ if (loopsize < 0) return 0; - - if (sizelimit > 0 && sizelimit < loopsize) - loopsize = sizelimit; + if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize) + loopsize = lo->lo_sizelimit; /* * Unfortunately, if we want to do I/O on the device, * the number of 512-byte sectors has to fit into a sector_t. @@ -158,11 +167,6 @@ static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file) return loopsize >> 9; } -static loff_t get_loop_size(struct loop_device *lo, struct file *file) -{ - return get_size(lo->lo_offset, lo->lo_sizelimit, file); -} - /* * We support direct I/O only if lo_offset is aligned with the logical I/O size * of backing device, and the logical block size of loop is bigger than that of @@ -569,7 +573,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, error = -EINVAL; /* size of the new backing store needs to be the same */ - if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) + if (lo_calculate_size(lo, file) != lo_calculate_size(lo, old_file)) goto out_err; /* @@ -1063,7 +1067,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode, loop_update_dio(lo); loop_sysfs_init(lo); - size = get_loop_size(lo, file); + size = lo_calculate_size(lo, file); loop_set_size(lo, size); /* Order wrt reading lo_state in loop_validate_file(). */ @@ -1255,8 +1259,7 @@ out_unfreeze: if (partscan) clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state); if (!err && size_changed) { - loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit, - lo->lo_backing_file); + loff_t new_size = lo_calculate_size(lo, lo->lo_backing_file); loop_set_size(lo, new_size); } out_unlock: @@ -1399,7 +1402,7 @@ static int loop_set_capacity(struct loop_device *lo) if (unlikely(lo->lo_state != Lo_bound)) return -ENXIO; - size = get_loop_size(lo, lo->lo_backing_file); + size = lo_calculate_size(lo, lo->lo_backing_file); loop_set_size(lo, size); return 0; diff --git a/drivers/cdx/controller/cdx_rpmsg.c b/drivers/cdx/controller/cdx_rpmsg.c index 04b578a0be17..61f1a290ff08 100644 --- a/drivers/cdx/controller/cdx_rpmsg.c +++ b/drivers/cdx/controller/cdx_rpmsg.c @@ -129,8 +129,7 @@ static int cdx_rpmsg_probe(struct rpmsg_device *rpdev) chinfo.src = RPMSG_ADDR_ANY; chinfo.dst = rpdev->dst; - strscpy(chinfo.name, cdx_rpmsg_id_table[0].name, - strlen(cdx_rpmsg_id_table[0].name)); + strscpy(chinfo.name, cdx_rpmsg_id_table[0].name, sizeof(chinfo.name)); cdx_mcdi->ept = rpmsg_create_ept(rpdev, cdx_rpmsg_cb, NULL, chinfo); if (!cdx_mcdi->ept) { diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c index 23b7178522ae..7e2f2b1a1c36 100644 --- a/drivers/comedi/comedi_fops.c +++ b/drivers/comedi/comedi_fops.c @@ -1587,6 +1587,9 @@ static int do_insnlist_ioctl(struct comedi_device *dev, memset(&data[n], 0, (MIN_SAMPLES - n) * sizeof(unsigned int)); } + } else { + memset(data, 0, max_t(unsigned int, n, MIN_SAMPLES) * + sizeof(unsigned int)); } ret = parse_insn(dev, insns + i, data, file); if (ret < 0) @@ -1670,6 +1673,8 @@ static int do_insn_ioctl(struct comedi_device *dev, memset(&data[insn->n], 0, (MIN_SAMPLES - insn->n) * sizeof(unsigned int)); } + } else { + memset(data, 0, n_data * sizeof(unsigned int)); } ret = parse_insn(dev, insn, data, file); if (ret < 0) diff --git a/drivers/comedi/drivers.c b/drivers/comedi/drivers.c index f1dc854928c1..c9ebaadc5e82 100644 --- a/drivers/comedi/drivers.c +++ b/drivers/comedi/drivers.c @@ -620,11 +620,9 @@ static int insn_rw_emulate_bits(struct comedi_device *dev, unsigned int chan = CR_CHAN(insn->chanspec); unsigned int base_chan = (chan < 32) ? 0 : chan; unsigned int _data[2]; + unsigned int i; int ret; - if (insn->n == 0) - return 0; - memset(_data, 0, sizeof(_data)); memset(&_insn, 0, sizeof(_insn)); _insn.insn = INSN_BITS; @@ -635,18 +633,21 @@ static int insn_rw_emulate_bits(struct comedi_device *dev, if (insn->insn == INSN_WRITE) { if (!(s->subdev_flags & SDF_WRITABLE)) return -EINVAL; - _data[0] = 1U << (chan - base_chan); /* mask */ - _data[1] = data[0] ? (1U << (chan - base_chan)) : 0; /* bits */ + _data[0] = 1U << (chan - base_chan); /* mask */ } + for (i = 0; i < insn->n; i++) { + if (insn->insn == INSN_WRITE) + _data[1] = data[i] ? _data[0] : 0; /* bits */ - ret = s->insn_bits(dev, s, &_insn, _data); - if (ret < 0) - return ret; + ret = s->insn_bits(dev, s, &_insn, _data); + if (ret < 0) + return ret; - if (insn->insn == INSN_READ) - data[0] = (_data[1] >> (chan - base_chan)) & 1; + if (insn->insn == INSN_READ) + data[i] = (_data[1] >> (chan - base_chan)) & 1; + } - return 1; + return insn->n; } static int __comedi_device_postconfig_async(struct comedi_device *dev, diff --git a/drivers/comedi/drivers/pcl726.c b/drivers/comedi/drivers/pcl726.c index 0430630e6ebb..b542896fa0e4 100644 --- a/drivers/comedi/drivers/pcl726.c +++ b/drivers/comedi/drivers/pcl726.c @@ -328,7 +328,8 @@ static int pcl726_attach(struct comedi_device *dev, * Hook up the external trigger source interrupt only if the * user config option is valid and the board supports interrupts. */ - if (it->options[1] && (board->irq_mask & (1 << it->options[1]))) { + if (it->options[1] > 0 && it->options[1] < 16 && + (board->irq_mask & (1U << it->options[1]))) { ret = request_irq(it->options[1], pcl726_interrupt, 0, dev->board_name, dev); if (ret == 0) { diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 81306612a5c6..b2e3d0b0a116 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -287,20 +287,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, return 0; } - if (tick_nohz_tick_stopped()) { - /* - * If the tick is already stopped, the cost of possible short - * idle duration misprediction is much higher, because the CPU - * may be stuck in a shallow idle state for a long time as a - * result of it. In that case say we might mispredict and use - * the known time till the closest timer event for the idle - * state selection. - */ - if (predicted_ns < TICK_NSEC) - predicted_ns = data->next_timer_ns; - } else if (latency_req > predicted_ns) { - latency_req = predicted_ns; - } + /* + * If the tick is already stopped, the cost of possible short idle + * duration misprediction is much higher, because the CPU may be stuck + * in a shallow idle state for a long time as a result of it. In that + * case, say we might mispredict and use the known time till the closest + * timer event for the idle state selection. + */ + if (tick_nohz_tick_stopped() && predicted_ns < TICK_NSEC) + predicted_ns = data->next_timer_ns; /* * Find the idle state with the lowest power while satisfying @@ -316,13 +311,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, if (idx == -1) idx = i; /* first enabled state */ + if (s->exit_latency_ns > latency_req) + break; + if (s->target_residency_ns > predicted_ns) { /* * Use a physical idle state, not busy polling, unless * a timer is going to trigger soon enough. */ if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) && - s->exit_latency_ns <= latency_req && s->target_residency_ns <= data->next_timer_ns) { predicted_ns = s->target_residency_ns; idx = i; @@ -354,8 +351,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, return idx; } - if (s->exit_latency_ns > latency_req) - break; idx = i; } diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c index 0be0d569589d..b7629a0e4813 100644 --- a/drivers/fpga/zynq-fpga.c +++ b/drivers/fpga/zynq-fpga.c @@ -405,12 +405,12 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt) } } - priv->dma_nelms = - dma_map_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE, 0); - if (priv->dma_nelms == 0) { + err = dma_map_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE, 0); + if (err) { dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n"); - return -ENOMEM; + return err; } + priv->dma_nelms = sgt->nents; /* enable clock */ err = clk_enable(priv->clk); diff --git a/drivers/gpio/gpiolib-acpi-quirks.c b/drivers/gpio/gpiolib-acpi-quirks.c index c13545dce349..bfb04e67c4bc 100644 --- a/drivers/gpio/gpiolib-acpi-quirks.c +++ b/drivers/gpio/gpiolib-acpi-quirks.c @@ -344,6 +344,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = { .ignore_interrupt = "AMDI0030:00@8", }, }, + { + /* + * Spurious wakeups from TP_ATTN# pin + * Found in BIOS 5.35 + * https://gitlab.freedesktop.org/drm/amd/-/issues/4482 + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_FAMILY, "ProArt PX13"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_wake = "ASCP1A00:00@8", + }, + }, {} /* Terminating entry */ }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index ff98c87b2e0b..5743ebb2f1b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -514,7 +514,7 @@ bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev, return false; if (drm_gem_is_imported(obj)) { - struct dma_buf *dma_buf = obj->dma_buf; + struct dma_buf *dma_buf = obj->import_attach->dmabuf; if (dma_buf->ops != &amdgpu_dmabuf_ops) /* No XGMI with non AMD GPUs */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 6626a6e64ff5..d1ccbfcf21fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -317,7 +317,8 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj, */ if (!vm->is_compute_context || !vm->process_info) return 0; - if (!drm_gem_is_imported(obj) || !dma_buf_is_dynamic(obj->dma_buf)) + if (!drm_gem_is_imported(obj) || + !dma_buf_is_dynamic(obj->import_attach->dmabuf)) return 0; mutex_lock_nested(&vm->process_info->lock, 1); if (!WARN_ON(!vm->process_info->eviction_fence)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 0b87798daebd..c39bb06ebda1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1283,7 +1283,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, struct drm_gem_object *obj = &bo->tbo.base; if (drm_gem_is_imported(obj) && bo_va->is_xgmi) { - struct dma_buf *dma_buf = obj->dma_buf; + struct dma_buf *dma_buf = obj->import_attach->dmabuf; struct drm_gem_object *gobj = dma_buf->priv; struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index cd0e2976e268..a0ca3b2c6bd8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -7792,6 +7792,9 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn, struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn); int ret; + if (WARN_ON(unlikely(!old_con_state || !new_con_state))) + return -EINVAL; + trace_amdgpu_dm_connector_atomic_check(new_con_state); if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 010172f930ae..45feb404b097 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -299,6 +299,25 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable) irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); if (enable) { + struct dc *dc = adev->dm.dc; + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); + struct psr_settings *psr = &acrtc_state->stream->link->psr_settings; + struct replay_settings *pr = &acrtc_state->stream->link->replay_settings; + bool sr_supported = (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED) || + pr->config.replay_supported; + + /* + * IPS & self-refresh feature can cause vblank counter resets between + * vblank disable and enable. + * It may cause system stuck due to waiting for the vblank counter. + * Call this function to estimate missed vblanks by using timestamps and + * update the vblank counter in DRM. + */ + if (dc->caps.ips_support && + dc->config.disable_ips != DMUB_IPS_DISABLE_ALL && + sr_supported && vblank->config.disable_immediate) + drm_crtc_vblank_restore(crtc); + /* vblank irq on -> Only need vupdate irq in vrr mode */ if (amdgpu_dm_crtc_vrr_active(acrtc_state)) rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true); diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 67f08495b7e6..154fd2c18e88 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -174,11 +174,8 @@ static struct graphics_object_id bios_parser_get_connector_id( return object_id; } - if (tbl->ucNumberOfObjects <= i) { - dm_error("Can't find connector id %d in connector table of size %d.\n", - i, tbl->ucNumberOfObjects); + if (tbl->ucNumberOfObjects <= i) return object_id; - } id = le16_to_cpu(tbl->asObjects[i].usObjectID); object_id = object_id_from_bios_object_id(id); diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c index 2bcae0643e61..58e88778da7f 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c @@ -993,7 +993,7 @@ static enum bp_result set_pixel_clock_v3( allocation.sPCLKInput.usFbDiv = cpu_to_le16((uint16_t)bp_params->feedback_divider); allocation.sPCLKInput.ucFracFbDiv = - (uint8_t)bp_params->fractional_feedback_divider; + (uint8_t)(bp_params->fractional_feedback_divider / 100000); allocation.sPCLKInput.ucPostDiv = (uint8_t)bp_params->pixel_clock_post_divider; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c index f5ad0a177038..dbd6ef1b60a0 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c @@ -72,9 +72,9 @@ static const struct state_dependent_clocks dce80_max_clks_by_state[] = { /* ClocksStateLow */ { .display_clk_khz = 352000, .pixel_clk_khz = 330000}, /* ClocksStateNominal */ -{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 }, +{ .display_clk_khz = 625000, .pixel_clk_khz = 400000 }, /* ClocksStatePerformance */ -{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } }; +{ .display_clk_khz = 625000, .pixel_clk_khz = 400000 } }; int dentist_get_divider_from_did(int did) { @@ -391,8 +391,6 @@ static void dce_pplib_apply_display_requirements( { struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; - pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); - dce110_fill_display_configs(context, pp_display_cfg); if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) @@ -405,11 +403,9 @@ static void dce_update_clocks(struct clk_mgr *clk_mgr_base, { struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dm_pp_power_level_change_request level_change_req; - int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; - - /*TODO: W/A for dal3 linux, investigate why this works */ - if (!clk_mgr_dce->dfs_bypass_active) - patched_disp_clk = patched_disp_clk * 115 / 100; + const int max_disp_clk = + clk_mgr_dce->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz; + int patched_disp_clk = MIN(max_disp_clk, context->bw_ctx.bw.dce.dispclk_khz); level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context); /* get max clock state from PPLIB */ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c index f8409453434c..13cf415e38e5 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c @@ -120,9 +120,15 @@ void dce110_fill_display_configs( const struct dc_state *context, struct dm_pp_display_configuration *pp_display_cfg) { + struct dc *dc = context->clk_mgr->ctx->dc; int j; int num_cfgs = 0; + pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); + pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz; + pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0; + pp_display_cfg->crtc_index = dc->res_pool->res_cap->num_timing_generator; + for (j = 0; j < context->stream_count; j++) { int k; @@ -164,6 +170,23 @@ void dce110_fill_display_configs( cfg->v_refresh /= stream->timing.h_total; cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2) / stream->timing.v_total; + + /* Find first CRTC index and calculate its line time. + * This is necessary for DPM on SI GPUs. + */ + if (cfg->pipe_idx < pp_display_cfg->crtc_index) { + const struct dc_crtc_timing *timing = + &context->streams[0]->timing; + + pp_display_cfg->crtc_index = cfg->pipe_idx; + pp_display_cfg->line_time_in_us = + timing->h_total * 10000 / timing->pix_clk_100hz; + } + } + + if (!num_cfgs) { + pp_display_cfg->crtc_index = 0; + pp_display_cfg->line_time_in_us = 0; } pp_display_cfg->display_count = num_cfgs; @@ -223,25 +246,8 @@ void dce11_pplib_apply_display_requirements( pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw_ctx.bw.dce.sclk_deep_sleep_khz; - pp_display_cfg->avail_mclk_switch_time_us = - dce110_get_min_vblank_time_us(context); - /* TODO: dce11.2*/ - pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0; - - pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz; - dce110_fill_display_configs(context, pp_display_cfg); - /* TODO: is this still applicable?*/ - if (pp_display_cfg->display_count == 1) { - const struct dc_crtc_timing *timing = - &context->streams[0]->timing; - - pp_display_cfg->crtc_index = - pp_display_cfg->disp_configs[0].pipe_idx; - pp_display_cfg->line_time_in_us = timing->h_total * 10000 / timing->pix_clk_100hz; - } - if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c index 0267644717b2..a39641a0ff09 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c @@ -83,22 +83,13 @@ static const struct state_dependent_clocks dce60_max_clks_by_state[] = { static int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - int dprefclk_wdivider; - int dp_ref_clk_khz; - int target_div; + struct dc_context *ctx = clk_mgr_base->ctx; + int dp_ref_clk_khz = 0; - /* DCE6 has no DPREFCLK_CNTL to read DP Reference Clock source */ - - /* Read the mmDENTIST_DISPCLK_CNTL to get the currently - * programmed DID DENTIST_DPREFCLK_WDIVIDER*/ - REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider); - - /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/ - target_div = dentist_get_divider_from_did(dprefclk_wdivider); - - /* Calculate the current DFS clock, in kHz.*/ - dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->base.dentist_vco_freq_khz) / target_div; + if (ASIC_REV_IS_TAHITI_P(ctx->asic_id.hw_internal_rev)) + dp_ref_clk_khz = ctx->dc_bios->fw_info.default_display_engine_pll_frequency; + else + dp_ref_clk_khz = clk_mgr_base->clks.dispclk_khz; return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz); } @@ -109,8 +100,6 @@ static void dce60_pplib_apply_display_requirements( { struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; - pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); - dce110_fill_display_configs(context, pp_display_cfg); if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) @@ -123,11 +112,9 @@ static void dce60_update_clocks(struct clk_mgr *clk_mgr_base, { struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dm_pp_power_level_change_request level_change_req; - int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; - - /*TODO: W/A for dal3 linux, investigate why this works */ - if (!clk_mgr_dce->dfs_bypass_active) - patched_disp_clk = patched_disp_clk * 115 / 100; + const int max_disp_clk = + clk_mgr_dce->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz; + int patched_disp_clk = MIN(max_disp_clk, context->bw_ctx.bw.dce.dispclk_khz); level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context); /* get max clock state from PPLIB */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 9ab0ee20ca6f..dcc48b5238e5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -217,11 +217,24 @@ static bool create_links( connectors_num, num_virtual_links); - // condition loop on link_count to allow skipping invalid indices + /* When getting the number of connectors, the VBIOS reports the number of valid indices, + * but it doesn't say which indices are valid, and not every index has an actual connector. + * So, if we don't find a connector on an index, that is not an error. + * + * - There is no guarantee that the first N indices will be valid + * - VBIOS may report a higher amount of valid indices than there are actual connectors + * - Some VBIOS have valid configurations for more connectors than there actually are + * on the card. This may be because the manufacturer used the same VBIOS for different + * variants of the same card. + */ for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) { + struct graphics_object_id connector_id = bios->funcs->get_connector_id(bios, i); struct link_init_data link_init_params = {0}; struct dc_link *link; + if (connector_id.id == CONNECTOR_ID_UNKNOWN) + continue; + DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count); link_init_params.ctx = dc->ctx; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index 4a9d07c31bc5..0c50fe266c8a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -896,13 +896,13 @@ void dce110_link_encoder_construct( enc110->base.id, &bp_cap_info); /* Override features with DCE-specific values */ - if (BP_RESULT_OK == result) { + if (result == BP_RESULT_OK) { enc110->base.features.flags.bits.IS_HBR2_CAPABLE = bp_cap_info.DP_HBR2_EN; enc110->base.features.flags.bits.IS_HBR3_CAPABLE = bp_cap_info.DP_HBR3_EN; enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; - } else { + } else if (result != BP_RESULT_NORECORD) { DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", __func__, result); @@ -1798,13 +1798,13 @@ void dce60_link_encoder_construct( enc110->base.id, &bp_cap_info); /* Override features with DCE-specific values */ - if (BP_RESULT_OK == result) { + if (result == BP_RESULT_OK) { enc110->base.features.flags.bits.IS_HBR2_CAPABLE = bp_cap_info.DP_HBR2_EN; enc110->base.features.flags.bits.IS_HBR3_CAPABLE = bp_cap_info.DP_HBR3_EN; enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; - } else { + } else if (result != BP_RESULT_NORECORD) { DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", __func__, result); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c index e7a318e26d38..fcd3d86ad517 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c @@ -4,7 +4,6 @@ #include "dc.h" #include "dc_dmub_srv.h" -#include "dc_dp_types.h" #include "dmub/dmub_srv.h" #include "core_types.h" #include "dmub_replay.h" @@ -44,45 +43,21 @@ static void dmub_replay_get_state(struct dmub_replay *dmub, enum replay_state *s /* * Enable/Disable Replay. */ -static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst, - struct dc_link *link) +static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst) { union dmub_rb_cmd cmd; struct dc_context *dc = dmub->ctx; uint32_t retry_count; enum replay_state state = REPLAY_STATE_0; - struct pipe_ctx *pipe_ctx = NULL; - struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx; - uint8_t i; memset(&cmd, 0, sizeof(cmd)); cmd.replay_enable.header.type = DMUB_CMD__REPLAY; cmd.replay_enable.data.panel_inst = panel_inst; cmd.replay_enable.header.sub_type = DMUB_CMD__REPLAY_ENABLE; - if (enable) { + if (enable) cmd.replay_enable.data.enable = REPLAY_ENABLE; - // hpo stream/link encoder assignments are not static, need to update everytime we try to enable replay - if (link->cur_link_settings.link_rate >= LINK_RATE_UHBR10) { - for (i = 0; i < MAX_PIPES; i++) { - if (res_ctx && - res_ctx->pipe_ctx[i].stream && - res_ctx->pipe_ctx[i].stream->link && - res_ctx->pipe_ctx[i].stream->link == link && - res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) { - pipe_ctx = &res_ctx->pipe_ctx[i]; - //TODO: refactor for multi edp support - break; - } - } - - if (!pipe_ctx) - return; - - cmd.replay_enable.data.hpo_stream_enc_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; - cmd.replay_enable.data.hpo_link_enc_inst = pipe_ctx->link_res.hpo_dp_link_enc->inst; - } - } else + else cmd.replay_enable.data.enable = REPLAY_DISABLE; cmd.replay_enable.header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_enable_data); @@ -174,17 +149,6 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub, copy_settings_data->digbe_inst = replay_context->digbe_inst; copy_settings_data->digfe_inst = replay_context->digfe_inst; - if (link->cur_link_settings.link_rate >= LINK_RATE_UHBR10) { - if (pipe_ctx->stream_res.hpo_dp_stream_enc) - copy_settings_data->hpo_stream_enc_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; - else - copy_settings_data->hpo_stream_enc_inst = 0; - if (pipe_ctx->link_res.hpo_dp_link_enc) - copy_settings_data->hpo_link_enc_inst = pipe_ctx->link_res.hpo_dp_link_enc->inst; - else - copy_settings_data->hpo_link_enc_inst = 0; - } - if (pipe_ctx->plane_res.dpp) copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst; else @@ -247,7 +211,6 @@ static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub, pCmd->header.type = DMUB_CMD__REPLAY; pCmd->header.sub_type = DMUB_CMD__REPLAY_SET_COASTING_VTOTAL; pCmd->header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data); - pCmd->replay_set_coasting_vtotal_data.panel_inst = panel_inst; pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF); pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h index ccbe385e132c..e6346c0ffc0e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h @@ -19,7 +19,7 @@ struct dmub_replay_funcs { void (*replay_get_state)(struct dmub_replay *dmub, enum replay_state *state, uint8_t panel_inst); void (*replay_enable)(struct dmub_replay *dmub, bool enable, bool wait, - uint8_t panel_inst, struct dc_link *link); + uint8_t panel_inst); bool (*replay_copy_settings)(struct dmub_replay *dmub, struct dc_link *link, struct replay_context *replay_context, uint8_t panel_inst); void (*replay_set_power_opt)(struct dmub_replay *dmub, unsigned int power_opt, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index e7927b8f5ba3..98ec9b5a559c 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -944,7 +944,7 @@ bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active, // TODO: Handle mux change case if force_static is set // If force_static is set, just change the replay_allow_active state directly if (replay != NULL && link->replay_settings.replay_feature_enabled) - replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst, link); + replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst); link->replay_settings.replay_allow_active = *allow_active; } diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index c587b3441e07..6a69a788abe8 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -4048,14 +4048,6 @@ struct dmub_cmd_replay_copy_settings_data { */ uint8_t digbe_inst; /** - * @hpo_stream_enc_inst: HPO stream encoder instance - */ - uint8_t hpo_stream_enc_inst; - /** - * @hpo_link_enc_inst: HPO link encoder instance - */ - uint8_t hpo_link_enc_inst; - /** * AUX HW instance. */ uint8_t aux_inst; @@ -4159,18 +4151,6 @@ struct dmub_rb_cmd_replay_enable_data { * This does not support HDMI/DP2 for now. */ uint8_t phy_rate; - /** - * @hpo_stream_enc_inst: HPO stream encoder instance - */ - uint8_t hpo_stream_enc_inst; - /** - * @hpo_link_enc_inst: HPO link encoder instance - */ - uint8_t hpo_link_enc_inst; - /** - * @pad: Align structure to 4 byte boundary. - */ - uint8_t pad[2]; }; /** diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index e58e7b93810b..6b7db8ec9a53 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -260,6 +260,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp) return MOD_HDCP_STATUS_FAILURE; } + if (!display) + return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf; mutex_lock(&psp->hdcp_context.mutex); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index 3aea32baea3d..f32474af90b3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -1697,9 +1697,11 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu, uint32_t *min_power_limit) { struct smu_table_context *table_context = &smu->smu_table; + struct smu_14_0_2_powerplay_table *powerplay_table = + table_context->power_play_table; PPTable_t *pptable = table_context->driver_pptable; CustomSkuTable_t *skutable = &pptable->CustomSkuTable; - uint32_t power_limit; + uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0; uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC]; if (smu_v14_0_get_current_power_limit(smu, &power_limit)) @@ -1712,11 +1714,29 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu, if (default_power_limit) *default_power_limit = power_limit; - if (max_power_limit) - *max_power_limit = msg_limit; + if (powerplay_table) { + if (smu->od_enabled && + smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) { + od_percent_upper = pptable->SkuTable.OverDriveLimitsBasicMax.Ppt; + od_percent_lower = pptable->SkuTable.OverDriveLimitsBasicMin.Ppt; + } else if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) { + od_percent_upper = 0; + od_percent_lower = pptable->SkuTable.OverDriveLimitsBasicMin.Ppt; + } + } + + dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", + od_percent_upper, od_percent_lower, power_limit); + + if (max_power_limit) { + *max_power_limit = msg_limit * (100 + od_percent_upper); + *max_power_limit /= 100; + } - if (min_power_limit) - *min_power_limit = 0; + if (min_power_limit) { + *min_power_limit = power_limit * (100 + od_percent_lower); + *min_power_limit /= 100; + } return 0; } diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index ed35e567d117..efe534977d12 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -1474,8 +1474,8 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data) dp = devm_drm_bridge_alloc(dev, struct analogix_dp_device, bridge, &analogix_dp_bridge_funcs); - if (!dp) - return ERR_PTR(-ENOMEM); + if (IS_ERR(dp)) + return ERR_CAST(dp); dp->dev = &pdev->dev; dp->dpms_mode = DRM_MODE_DPMS_OFF; diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index bbc7fecb6f4a..74d949995a72 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -2432,6 +2432,8 @@ static const struct drm_gpuvm_ops lock_ops = { * * The expected usage is: * + * .. code-block:: c + * * vm_bind { * struct drm_exec exec; * diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs index 09a9b452e8b7..50c286c5cee8 100644 --- a/drivers/gpu/drm/drm_panic_qr.rs +++ b/drivers/gpu/drm/drm_panic_qr.rs @@ -381,6 +381,26 @@ struct DecFifo { len: usize, } +// On arm32 architecture, dividing an `u64` by a constant will generate a call +// to `__aeabi_uldivmod` which is not present in the kernel. +// So use the multiply by inverse method for this architecture. +fn div10(val: u64) -> u64 { + if cfg!(target_arch = "arm") { + let val_h = val >> 32; + let val_l = val & 0xFFFFFFFF; + let b_h: u64 = 0x66666666; + let b_l: u64 = 0x66666667; + + let tmp1 = val_h * b_l + ((val_l * b_l) >> 32); + let tmp2 = val_l * b_h + (tmp1 & 0xffffffff); + let tmp3 = val_h * b_h + (tmp1 >> 32) + (tmp2 >> 32); + + tmp3 >> 2 + } else { + val / 10 + } +} + impl DecFifo { fn push(&mut self, data: u64, len: usize) { let mut chunk = data; @@ -389,7 +409,7 @@ impl DecFifo { } for i in 0..len { self.decimals[i] = (chunk % 10) as u8; - chunk /= 10; + chunk = div10(chunk); } self.len += len; } diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c index 74f7832ea53e..0726cb5b736e 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c +++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c @@ -325,6 +325,17 @@ static int hibmc_dp_link_downgrade_training_eq(struct hibmc_dp_dev *dp) return hibmc_dp_link_reduce_rate(dp); } +static void hibmc_dp_update_caps(struct hibmc_dp_dev *dp) +{ + dp->link.cap.link_rate = dp->dpcd[DP_MAX_LINK_RATE]; + if (dp->link.cap.link_rate > DP_LINK_BW_8_1 || !dp->link.cap.link_rate) + dp->link.cap.link_rate = DP_LINK_BW_8_1; + + dp->link.cap.lanes = dp->dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; + if (dp->link.cap.lanes > HIBMC_DP_LANE_NUM_MAX) + dp->link.cap.lanes = HIBMC_DP_LANE_NUM_MAX; +} + int hibmc_dp_link_training(struct hibmc_dp_dev *dp) { struct hibmc_dp_link *link = &dp->link; @@ -334,8 +345,7 @@ int hibmc_dp_link_training(struct hibmc_dp_dev *dp) if (ret) drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret); - dp->link.cap.link_rate = dp->dpcd[DP_MAX_LINK_RATE]; - dp->link.cap.lanes = 0x2; + hibmc_dp_update_caps(dp); ret = hibmc_dp_get_serdes_rate_cfg(dp); if (ret < 0) diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index 768b97f9e74a..289304500ab0 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -32,7 +32,7 @@ DEFINE_DRM_GEM_FOPS(hibmc_fops); -static const char *g_irqs_names_map[HIBMC_MAX_VECTORS] = { "vblank", "hpd" }; +static const char *g_irqs_names_map[HIBMC_MAX_VECTORS] = { "hibmc-vblank", "hibmc-hpd" }; static irqreturn_t hibmc_interrupt(int irq, void *arg) { @@ -115,6 +115,8 @@ static const struct drm_mode_config_funcs hibmc_mode_funcs = { static int hibmc_kms_init(struct hibmc_drm_private *priv) { struct drm_device *dev = &priv->dev; + struct drm_encoder *encoder; + u32 clone_mask = 0; int ret; ret = drmm_mode_config_init(dev); @@ -154,6 +156,12 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv) return ret; } + drm_for_each_encoder(encoder, dev) + clone_mask |= drm_encoder_mask(encoder); + + drm_for_each_encoder(encoder, dev) + encoder->possible_clones = clone_mask; + return 0; } @@ -277,7 +285,6 @@ static void hibmc_unload(struct drm_device *dev) static int hibmc_msi_init(struct drm_device *dev) { struct pci_dev *pdev = to_pci_dev(dev->dev); - char name[32] = {0}; int valid_irq_num; int irq; int ret; @@ -292,9 +299,6 @@ static int hibmc_msi_init(struct drm_device *dev) valid_irq_num = ret; for (int i = 0; i < valid_irq_num; i++) { - snprintf(name, ARRAY_SIZE(name) - 1, "%s-%s-%s", - dev->driver->name, pci_name(pdev), g_irqs_names_map[i]); - irq = pci_irq_vector(pdev, i); if (i) @@ -302,10 +306,10 @@ static int hibmc_msi_init(struct drm_device *dev) ret = devm_request_threaded_irq(&pdev->dev, irq, hibmc_dp_interrupt, hibmc_dp_hpd_isr, - IRQF_SHARED, name, dev); + IRQF_SHARED, g_irqs_names_map[i], dev); else ret = devm_request_irq(&pdev->dev, irq, hibmc_interrupt, - IRQF_SHARED, name, dev); + IRQF_SHARED, g_irqs_names_map[i], dev); if (ret) { drm_err(dev, "install irq failed: %d\n", ret); return ret; @@ -323,13 +327,13 @@ static int hibmc_load(struct drm_device *dev) ret = hibmc_hw_init(priv); if (ret) - goto err; + return ret; ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (ret) { drm_err(dev, "Error initializing VRAM MM; %d\n", ret); - goto err; + return ret; } ret = hibmc_kms_init(priv); diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h index 274feabe7df0..ca8502e2760c 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h @@ -69,6 +69,7 @@ int hibmc_de_init(struct hibmc_drm_private *priv); int hibmc_vdac_init(struct hibmc_drm_private *priv); int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *connector); +void hibmc_ddc_del(struct hibmc_vdac *vdac); int hibmc_dp_init(struct hibmc_drm_private *priv); diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c index 99b3b77b5445..44860011855e 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c @@ -95,3 +95,8 @@ int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *vdac) return i2c_bit_add_bus(&vdac->adapter); } + +void hibmc_ddc_del(struct hibmc_vdac *vdac) +{ + i2c_del_adapter(&vdac->adapter); +} diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c index e8a527ede854..841e81f47b68 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c @@ -53,7 +53,7 @@ static void hibmc_connector_destroy(struct drm_connector *connector) { struct hibmc_vdac *vdac = to_hibmc_vdac(connector); - i2c_del_adapter(&vdac->adapter); + hibmc_ddc_del(vdac); drm_connector_cleanup(connector); } @@ -110,7 +110,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv) ret = drmm_encoder_init(dev, encoder, NULL, DRM_MODE_ENCODER_DAC, NULL); if (ret) { drm_err(dev, "failed to init encoder: %d\n", ret); - return ret; + goto err; } drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs); @@ -121,7 +121,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv) &vdac->adapter); if (ret) { drm_err(dev, "failed to init connector: %d\n", ret); - return ret; + goto err; } drm_connector_helper_add(connector, &hibmc_connector_helper_funcs); @@ -131,4 +131,9 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv) connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; return 0; + +err: + hibmc_ddc_del(vdac); + + return ret; } diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c index fb25ec8adae3..68157f177b6a 100644 --- a/drivers/gpu/drm/i915/display/intel_display_irq.c +++ b/drivers/gpu/drm/i915/display/intel_display_irq.c @@ -1506,10 +1506,14 @@ u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl) if (!(master_ctl & GEN11_GU_MISC_IRQ)) return 0; + intel_display_rpm_assert_block(display); + iir = intel_de_read(display, GEN11_GU_MISC_IIR); if (likely(iir)) intel_de_write(display, GEN11_GU_MISC_IIR, iir); + intel_display_rpm_assert_unblock(display); + return iir; } diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 3bc57579fe53..668ef139391b 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -23,6 +23,7 @@ #include "intel_modeset_lock.h" #include "intel_tc.h" +#define DP_PIN_ASSIGNMENT_NONE 0x0 #define DP_PIN_ASSIGNMENT_C 0x3 #define DP_PIN_ASSIGNMENT_D 0x4 #define DP_PIN_ASSIGNMENT_E 0x5 @@ -66,6 +67,7 @@ struct intel_tc_port { enum tc_port_mode init_mode; enum phy_fia phy_fia; u8 phy_fia_idx; + u8 max_lane_count; }; static enum intel_display_power_domain @@ -307,6 +309,8 @@ static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port) REG_FIELD_GET(TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK, val); switch (pin_assignment) { + case DP_PIN_ASSIGNMENT_NONE: + return 0; default: MISSING_CASE(pin_assignment); fallthrough; @@ -365,12 +369,12 @@ static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port) } } -int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port) +static int get_max_lane_count(struct intel_tc_port *tc) { - struct intel_display *display = to_intel_display(dig_port); - struct intel_tc_port *tc = to_tc_port(dig_port); + struct intel_display *display = to_intel_display(tc->dig_port); + struct intel_digital_port *dig_port = tc->dig_port; - if (!intel_encoder_is_tc(&dig_port->base) || tc->mode != TC_PORT_DP_ALT) + if (tc->mode != TC_PORT_DP_ALT) return 4; assert_tc_cold_blocked(tc); @@ -384,6 +388,25 @@ int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port) return intel_tc_port_get_max_lane_count(dig_port); } +static void read_pin_configuration(struct intel_tc_port *tc) +{ + tc->max_lane_count = get_max_lane_count(tc); +} + +int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port) +{ + struct intel_display *display = to_intel_display(dig_port); + struct intel_tc_port *tc = to_tc_port(dig_port); + + if (!intel_encoder_is_tc(&dig_port->base)) + return 4; + + if (DISPLAY_VER(display) < 20) + return get_max_lane_count(tc); + + return tc->max_lane_count; +} + void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, int required_lanes) { @@ -596,9 +619,12 @@ static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc) tc_cold_wref = __tc_cold_block(tc, &domain); tc->mode = tc_phy_get_current_mode(tc); - if (tc->mode != TC_PORT_DISCONNECTED) + if (tc->mode != TC_PORT_DISCONNECTED) { tc->lock_wakeref = tc_cold_block(tc); + read_pin_configuration(tc); + } + __tc_cold_unblock(tc, domain, tc_cold_wref); } @@ -656,8 +682,11 @@ static bool icl_tc_phy_connect(struct intel_tc_port *tc, tc->lock_wakeref = tc_cold_block(tc); - if (tc->mode == TC_PORT_TBT_ALT) + if (tc->mode == TC_PORT_TBT_ALT) { + read_pin_configuration(tc); + return true; + } if ((!tc_phy_is_ready(tc) || !icl_tc_phy_take_ownership(tc, true)) && @@ -668,6 +697,7 @@ static bool icl_tc_phy_connect(struct intel_tc_port *tc, goto out_unblock_tc_cold; } + read_pin_configuration(tc); if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) goto out_release_phy; @@ -858,9 +888,12 @@ static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc) port_wakeref = intel_display_power_get(display, port_power_domain); tc->mode = tc_phy_get_current_mode(tc); - if (tc->mode != TC_PORT_DISCONNECTED) + if (tc->mode != TC_PORT_DISCONNECTED) { tc->lock_wakeref = tc_cold_block(tc); + read_pin_configuration(tc); + } + intel_display_power_put(display, port_power_domain, port_wakeref); } @@ -873,6 +906,9 @@ static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes) if (tc->mode == TC_PORT_TBT_ALT) { tc->lock_wakeref = tc_cold_block(tc); + + read_pin_configuration(tc); + return true; } @@ -894,6 +930,8 @@ static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes) tc->lock_wakeref = tc_cold_block(tc); + read_pin_configuration(tc); + if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) goto out_unblock_tc_cold; @@ -1124,9 +1162,18 @@ static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc) tc_cold_wref = __tc_cold_block(tc, &domain); tc->mode = tc_phy_get_current_mode(tc); - if (tc->mode != TC_PORT_DISCONNECTED) + if (tc->mode != TC_PORT_DISCONNECTED) { tc->lock_wakeref = tc_cold_block(tc); + read_pin_configuration(tc); + /* + * Set a valid lane count value for a DP-alt sink which got + * disconnected. The driver can only disable the output on this PHY. + */ + if (tc->max_lane_count == 0) + tc->max_lane_count = 4; + } + drm_WARN_ON(display->drm, (tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) && !xelpdp_tc_phy_tcss_power_is_enabled(tc)); @@ -1138,14 +1185,19 @@ static bool xelpdp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes) { tc->lock_wakeref = tc_cold_block(tc); - if (tc->mode == TC_PORT_TBT_ALT) + if (tc->mode == TC_PORT_TBT_ALT) { + read_pin_configuration(tc); + return true; + } if (!xelpdp_tc_phy_enable_tcss_power(tc, true)) goto out_unblock_tccold; xelpdp_tc_phy_take_ownership(tc, true); + read_pin_configuration(tc); + if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) goto out_release_phy; @@ -1226,14 +1278,19 @@ static void tc_phy_get_hw_state(struct intel_tc_port *tc) tc->phy_ops->get_hw_state(tc); } -static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc, - bool phy_is_ready, bool phy_is_owned) +/* Is the PHY owned by display i.e. is it in legacy or DP-alt mode? */ +static bool tc_phy_owned_by_display(struct intel_tc_port *tc, + bool phy_is_ready, bool phy_is_owned) { struct intel_display *display = to_intel_display(tc->dig_port); - drm_WARN_ON(display->drm, phy_is_owned && !phy_is_ready); + if (DISPLAY_VER(display) < 20) { + drm_WARN_ON(display->drm, phy_is_owned && !phy_is_ready); - return phy_is_ready && phy_is_owned; + return phy_is_ready && phy_is_owned; + } else { + return phy_is_owned; + } } static bool tc_phy_is_connected(struct intel_tc_port *tc, @@ -1244,7 +1301,7 @@ static bool tc_phy_is_connected(struct intel_tc_port *tc, bool phy_is_owned = tc_phy_is_owned(tc); bool is_connected; - if (tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) + if (tc_phy_owned_by_display(tc, phy_is_ready, phy_is_owned)) is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY; else is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT; @@ -1352,7 +1409,7 @@ tc_phy_get_current_mode(struct intel_tc_port *tc) phy_is_ready = tc_phy_is_ready(tc); phy_is_owned = tc_phy_is_owned(tc); - if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) { + if (!tc_phy_owned_by_display(tc, phy_is_ready, phy_is_owned)) { mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode); } else { drm_WARN_ON(display->drm, live_mode == TC_PORT_TBT_ALT); @@ -1441,11 +1498,11 @@ static void intel_tc_port_reset_mode(struct intel_tc_port *tc, intel_display_power_flush_work(display); if (!intel_tc_cold_requires_aux_pw(dig_port)) { enum intel_display_power_domain aux_domain; - bool aux_powered; aux_domain = intel_aux_power_domain(dig_port); - aux_powered = intel_display_power_is_enabled(display, aux_domain); - drm_WARN_ON(display->drm, aux_powered); + if (intel_display_power_is_enabled(display, aux_domain)) + drm_dbg_kms(display->drm, "Port %s: AUX unexpectedly powered\n", + tc->port_name); } tc_phy_disconnect(tc); diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index b37e400f74e5..5a95f06900b5 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -634,6 +634,8 @@ static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine, static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) { + struct drm_i915_private *i915 = engine->i915; + /* Wa_1406697149 (WaDisableBankHangMode:icl) */ wa_write(wal, GEN8_L3CNTLREG, GEN8_ERRDETBCTRL); @@ -669,6 +671,15 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, /* Wa_1406306137:icl,ehl */ wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU); + + if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) { + /* + * Disable Repacking for Compression (masked R/W access) + * before rendering compressed surfaces for display. + */ + wa_masked_en(wal, CACHE_MODE_0_GEN7, + DISABLE_REPACKING_FOR_COMPRESSION); + } } /* @@ -2306,15 +2317,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN8_RC_SEMA_IDLE_MSG_DISABLE); } - if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) { - /* - * "Disable Repacking for Compression (masked R/W access) - * before rendering compressed surfaces for display." - */ - wa_masked_en(wal, CACHE_MODE_0_GEN7, - DISABLE_REPACKING_FOR_COMPRESSION); - } - if (GRAPHICS_VER(i915) == 11) { /* This is not an Wa. Enable for better image quality */ wa_masked_en(wal, diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c index edbbda78bac9..c4949e815eb3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_exec.c +++ b/drivers/gpu/drm/nouveau/nouveau_exec.c @@ -60,14 +60,14 @@ * virtual address in the GPU's VA space there is no guarantee that the actual * mappings are created in the GPU's MMU. If the given memory is swapped out * at the time the bind operation is executed the kernel will stash the mapping - * details into it's internal alloctor and create the actual MMU mappings once + * details into it's internal allocator and create the actual MMU mappings once * the memory is swapped back in. While this is transparent for userspace, it is * guaranteed that all the backing memory is swapped back in and all the memory * mappings, as requested by userspace previously, are actually mapped once the * DRM_NOUVEAU_EXEC ioctl is called to submit an exec job. * * A VM_BIND job can be executed either synchronously or asynchronously. If - * exectued asynchronously, userspace may provide a list of syncobjs this job + * executed asynchronously, userspace may provide a list of syncobjs this job * will wait for and/or a list of syncobj the kernel will signal once the * VM_BIND job finished execution. If executed synchronously the ioctl will * block until the bind job is finished. For synchronous jobs the kernel will @@ -82,7 +82,7 @@ * Since VM_BIND jobs update the GPU's VA space on job submit, EXEC jobs do have * an up to date view of the VA space. However, the actual mappings might still * be pending. Hence, EXEC jobs require to have the particular fences - of - * the corresponding VM_BIND jobs they depent on - attached to them. + * the corresponding VM_BIND jobs they depend on - attached to them. */ static int diff --git a/drivers/gpu/drm/nouveau/nvif/vmm.c b/drivers/gpu/drm/nouveau/nvif/vmm.c index 99296f03371a..07c1ebc2a941 100644 --- a/drivers/gpu/drm/nouveau/nvif/vmm.c +++ b/drivers/gpu/drm/nouveau/nvif/vmm.c @@ -219,7 +219,8 @@ nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, case RAW: args->type = NVIF_VMM_V0_TYPE_RAW; break; default: WARN_ON(1); - return -EINVAL; + ret = -EINVAL; + goto done; } memcpy(args->data, argv, argc); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c index 9d06ff722fea..0dc4782df8c0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c @@ -325,7 +325,7 @@ r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries) rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), info.retries); if (IS_ERR_OR_NULL(rpc)) { - kfree(buf); + kvfree(buf); return rpc; } @@ -334,7 +334,7 @@ r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries) rpc = r535_gsp_msgq_recv_one_elem(gsp, &info); if (IS_ERR_OR_NULL(rpc)) { - kfree(buf); + kvfree(buf); return rpc; } diff --git a/drivers/gpu/drm/nova/file.rs b/drivers/gpu/drm/nova/file.rs index 7e59a34b830d..4fe62cf98a23 100644 --- a/drivers/gpu/drm/nova/file.rs +++ b/drivers/gpu/drm/nova/file.rs @@ -39,7 +39,8 @@ impl File { _ => return Err(EINVAL), }; - getparam.set_value(value); + #[allow(clippy::useless_conversion)] + getparam.set_value(value.into()); Ok(0) } diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index ab525668939a..faf50d872be3 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig @@ -53,6 +53,7 @@ config ROCKCHIP_CDN_DP bool "Rockchip cdn DP" depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m) select DRM_DISPLAY_HELPER + select DRM_BRIDGE_CONNECTOR select DRM_DISPLAY_DP_HELPER help This selects support for Rockchip SoC specific extensions diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index 186f6452a7d3..b50927a824b4 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -2579,12 +2579,13 @@ static int vop2_win_init(struct vop2 *vop2) } /* - * The window registers are only updated when config done is written. - * Until that they read back the old value. As we read-modify-write - * these registers mark them as non-volatile. This makes sure we read - * the new values from the regmap register cache. + * The window and video port registers are only updated when config + * done is written. Until that they read back the old value. As we + * read-modify-write these registers mark them as non-volatile. This + * makes sure we read the new values from the regmap register cache. */ static const struct regmap_range vop2_nonvolatile_range[] = { + regmap_reg_range(RK3568_VP0_CTRL_BASE, RK3588_VP3_CTRL_BASE + 255), regmap_reg_range(0x1000, 0x23ff), }; diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c index 7299fa8971ce..981dada8f3a8 100644 --- a/drivers/gpu/drm/tests/drm_format_helper_test.c +++ b/drivers/gpu/drm/tests/drm_format_helper_test.c @@ -1033,13 +1033,14 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test) NULL : &result->dst_pitch; drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state); - buf = le32buf_to_cpu(test, buf, dst_size / sizeof(u32)); + buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); buf = dst.vaddr; /* restore original value of buf */ memset(buf, 0, dst_size); drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state); + buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); } diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 7d20ac4bb633..84f412fd3c5d 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -408,7 +408,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile) /* Special layout, prepared below.. */ vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION | - XE_VM_FLAG_SET_TILE_ID(tile)); + XE_VM_FLAG_SET_TILE_ID(tile), NULL); if (IS_ERR(vm)) return ERR_CAST(vm); diff --git a/drivers/gpu/drm/xe/xe_pxp_submit.c b/drivers/gpu/drm/xe/xe_pxp_submit.c index d92ec0f515b0..ca95f2a4d4ef 100644 --- a/drivers/gpu/drm/xe/xe_pxp_submit.c +++ b/drivers/gpu/drm/xe/xe_pxp_submit.c @@ -101,7 +101,7 @@ static int allocate_gsc_client_resources(struct xe_gt *gt, xe_assert(xe, hwe); /* PXP instructions must be issued from PPGTT */ - vm = xe_vm_create(xe, XE_VM_FLAG_GSC); + vm = xe_vm_create(xe, XE_VM_FLAG_GSC, NULL); if (IS_ERR(vm)) return PTR_ERR(vm); diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 2035604121e6..ec04bef8ae40 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -1640,7 +1640,7 @@ static void xe_vm_free_scratch(struct xe_vm *vm) } } -struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) +struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef) { struct drm_gem_object *vm_resv_obj; struct xe_vm *vm; @@ -1661,9 +1661,10 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) vm->xe = xe; vm->size = 1ull << xe->info.va_bits; - vm->flags = flags; + if (xef) + vm->xef = xe_file_get(xef); /** * GSC VMs are kernel-owned, only used for PXP ops and can sometimes be * manipulated under the PXP mutex. However, the PXP mutex can be taken @@ -1794,6 +1795,20 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) if (number_tiles > 1) vm->composite_fence_ctx = dma_fence_context_alloc(1); + if (xef && xe->info.has_asid) { + u32 asid; + + down_write(&xe->usm.lock); + err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm, + XA_LIMIT(1, XE_MAX_ASID - 1), + &xe->usm.next_asid, GFP_KERNEL); + up_write(&xe->usm.lock); + if (err < 0) + goto err_unlock_close; + + vm->usm.asid = asid; + } + trace_xe_vm_create(vm); return vm; @@ -1814,6 +1829,8 @@ err_no_resv: for_each_tile(tile, xe, id) xe_range_fence_tree_fini(&vm->rftree[id]); ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move); + if (vm->xef) + xe_file_put(vm->xef); kfree(vm); if (flags & XE_VM_FLAG_LR_MODE) xe_pm_runtime_put(xe); @@ -2059,9 +2076,8 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data, struct xe_device *xe = to_xe_device(dev); struct xe_file *xef = to_xe_file(file); struct drm_xe_vm_create *args = data; - struct xe_tile *tile; struct xe_vm *vm; - u32 id, asid; + u32 id; int err; u32 flags = 0; @@ -2097,29 +2113,10 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data, if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) flags |= XE_VM_FLAG_FAULT_MODE; - vm = xe_vm_create(xe, flags); + vm = xe_vm_create(xe, flags, xef); if (IS_ERR(vm)) return PTR_ERR(vm); - if (xe->info.has_asid) { - down_write(&xe->usm.lock); - err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm, - XA_LIMIT(1, XE_MAX_ASID - 1), - &xe->usm.next_asid, GFP_KERNEL); - up_write(&xe->usm.lock); - if (err < 0) - goto err_close_and_put; - - vm->usm.asid = asid; - } - - vm->xef = xe_file_get(xef); - - /* Record BO memory for VM pagetable created against client */ - for_each_tile(tile, xe, id) - if (vm->pt_root[id]) - xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo); - #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM) /* Warning: Security issue - never enable by default */ args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE); @@ -3421,6 +3418,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm, free_bind_ops: if (args->num_binds > 1) kvfree(*bind_ops); + *bind_ops = NULL; return err; } @@ -3527,7 +3525,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) struct xe_exec_queue *q = NULL; u32 num_syncs, num_ufence = 0; struct xe_sync_entry *syncs = NULL; - struct drm_xe_vm_bind_op *bind_ops; + struct drm_xe_vm_bind_op *bind_ops = NULL; struct xe_vma_ops vops; struct dma_fence *fence; int err; diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h index 3475a118f666..2f213737c7e5 100644 --- a/drivers/gpu/drm/xe/xe_vm.h +++ b/drivers/gpu/drm/xe/xe_vm.h @@ -26,7 +26,7 @@ struct xe_sync_entry; struct xe_svm_range; struct drm_exec; -struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags); +struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef); struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id); int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node); diff --git a/drivers/i2c/busses/i2c-rtl9300.c b/drivers/i2c/busses/i2c-rtl9300.c index e064e8a4a1f0..cfafe089102a 100644 --- a/drivers/i2c/busses/i2c-rtl9300.c +++ b/drivers/i2c/busses/i2c-rtl9300.c @@ -143,10 +143,10 @@ static int rtl9300_i2c_write(struct rtl9300_i2c *i2c, u8 *buf, int len) return -EIO; for (i = 0; i < len; i++) { - if (i % 4 == 0) - vals[i/4] = 0; - vals[i/4] <<= 8; - vals[i/4] |= buf[i]; + unsigned int shift = (i % 4) * 8; + unsigned int reg = i / 4; + + vals[reg] |= buf[i] << shift; } return regmap_bulk_write(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0, @@ -175,7 +175,7 @@ static int rtl9300_i2c_execute_xfer(struct rtl9300_i2c *i2c, char read_write, return ret; ret = regmap_read_poll_timeout(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL1, - val, !(val & RTL9300_I2C_MST_CTRL1_I2C_TRIG), 100, 2000); + val, !(val & RTL9300_I2C_MST_CTRL1_I2C_TRIG), 100, 100000); if (ret) return ret; @@ -281,15 +281,19 @@ static int rtl9300_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned s ret = rtl9300_i2c_reg_addr_set(i2c, command, 1); if (ret) goto out_unlock; - ret = rtl9300_i2c_config_xfer(i2c, chan, addr, data->block[0]); + if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX) { + ret = -EINVAL; + goto out_unlock; + } + ret = rtl9300_i2c_config_xfer(i2c, chan, addr, data->block[0] + 1); if (ret) goto out_unlock; if (read_write == I2C_SMBUS_WRITE) { - ret = rtl9300_i2c_write(i2c, &data->block[1], data->block[0]); + ret = rtl9300_i2c_write(i2c, &data->block[0], data->block[0] + 1); if (ret) goto out_unlock; } - len = data->block[0]; + len = data->block[0] + 1; break; default: diff --git a/drivers/iio/accel/sca3300.c b/drivers/iio/accel/sca3300.c index bda370c0f660..8380b237831c 100644 --- a/drivers/iio/accel/sca3300.c +++ b/drivers/iio/accel/sca3300.c @@ -477,7 +477,7 @@ static irqreturn_t sca3300_trigger_handler(int irq, void *p) struct iio_dev *indio_dev = pf->indio_dev; struct sca3300_data *data = iio_priv(indio_dev); int bit, ret, val, i = 0; - IIO_DECLARE_BUFFER_WITH_TS(s16, channels, SCA3300_SCAN_MAX); + IIO_DECLARE_BUFFER_WITH_TS(s16, channels, SCA3300_SCAN_MAX) = { }; iio_for_each_active_channel(indio_dev, bit) { ret = sca3300_read_reg(data, indio_dev->channels[bit].address, &val); diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 6de2abad0197..24f2572c487e 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -1300,7 +1300,7 @@ config RN5T618_ADC config ROHM_BD79124 tristate "Rohm BD79124 ADC driver" - depends on I2C + depends on I2C && GPIOLIB select REGMAP_I2C select IIO_ADC_HELPER help diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index 9808df2e9242..4d8c6bafd1c3 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -849,7 +849,7 @@ enum { static int ad7124_syscalib_locked(struct ad7124_state *st, const struct iio_chan_spec *chan) { struct device *dev = &st->sd.spi->dev; - struct ad7124_channel *ch = &st->channels[chan->channel]; + struct ad7124_channel *ch = &st->channels[chan->address]; int ret; if (ch->syscalib_mode == AD7124_SYSCALIB_ZERO_SCALE) { @@ -865,8 +865,8 @@ static int ad7124_syscalib_locked(struct ad7124_state *st, const struct iio_chan if (ret < 0) return ret; - dev_dbg(dev, "offset for channel %d after zero-scale calibration: 0x%x\n", - chan->channel, ch->cfg.calibration_offset); + dev_dbg(dev, "offset for channel %lu after zero-scale calibration: 0x%x\n", + chan->address, ch->cfg.calibration_offset); } else { ch->cfg.calibration_gain = st->gain_default; @@ -880,8 +880,8 @@ static int ad7124_syscalib_locked(struct ad7124_state *st, const struct iio_chan if (ret < 0) return ret; - dev_dbg(dev, "gain for channel %d after full-scale calibration: 0x%x\n", - chan->channel, ch->cfg.calibration_gain); + dev_dbg(dev, "gain for channel %lu after full-scale calibration: 0x%x\n", + chan->address, ch->cfg.calibration_gain); } return 0; @@ -924,7 +924,7 @@ static int ad7124_set_syscalib_mode(struct iio_dev *indio_dev, { struct ad7124_state *st = iio_priv(indio_dev); - st->channels[chan->channel].syscalib_mode = mode; + st->channels[chan->address].syscalib_mode = mode; return 0; } @@ -934,7 +934,7 @@ static int ad7124_get_syscalib_mode(struct iio_dev *indio_dev, { struct ad7124_state *st = iio_priv(indio_dev); - return st->channels[chan->channel].syscalib_mode; + return st->channels[chan->address].syscalib_mode; } static const struct iio_enum ad7124_syscalib_mode_enum = { diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c index 4413207be28f..683146e83ab2 100644 --- a/drivers/iio/adc/ad7173.c +++ b/drivers/iio/adc/ad7173.c @@ -200,7 +200,7 @@ struct ad7173_channel_config { /* * Following fields are used to compare equality. If you * make adaptations in it, you most likely also have to adapt - * ad7173_find_live_config(), too. + * ad7173_is_setup_equal(), too. */ struct_group(config_props, bool bipolar; @@ -561,12 +561,19 @@ static void ad7173_reset_usage_cnts(struct ad7173_state *st) st->config_usage_counter = 0; } -static struct ad7173_channel_config * -ad7173_find_live_config(struct ad7173_state *st, struct ad7173_channel_config *cfg) +/** + * ad7173_is_setup_equal - Compare two channel setups + * @cfg1: First channel configuration + * @cfg2: Second channel configuration + * + * Compares all configuration options that affect the registers connected to + * SETUP_SEL, namely CONFIGx, FILTERx, GAINx and OFFSETx. + * + * Returns: true if the setups are identical, false otherwise + */ +static bool ad7173_is_setup_equal(const struct ad7173_channel_config *cfg1, + const struct ad7173_channel_config *cfg2) { - struct ad7173_channel_config *cfg_aux; - int i; - /* * This is just to make sure that the comparison is adapted after * struct ad7173_channel_config was changed. @@ -579,14 +586,22 @@ ad7173_find_live_config(struct ad7173_state *st, struct ad7173_channel_config *c u8 ref_sel; })); + return cfg1->bipolar == cfg2->bipolar && + cfg1->input_buf == cfg2->input_buf && + cfg1->odr == cfg2->odr && + cfg1->ref_sel == cfg2->ref_sel; +} + +static struct ad7173_channel_config * +ad7173_find_live_config(struct ad7173_state *st, struct ad7173_channel_config *cfg) +{ + struct ad7173_channel_config *cfg_aux; + int i; + for (i = 0; i < st->num_channels; i++) { cfg_aux = &st->channels[i].cfg; - if (cfg_aux->live && - cfg->bipolar == cfg_aux->bipolar && - cfg->input_buf == cfg_aux->input_buf && - cfg->odr == cfg_aux->odr && - cfg->ref_sel == cfg_aux->ref_sel) + if (cfg_aux->live && ad7173_is_setup_equal(cfg, cfg_aux)) return cfg_aux; } return NULL; @@ -1228,7 +1243,7 @@ static int ad7173_update_scan_mode(struct iio_dev *indio_dev, const unsigned long *scan_mask) { struct ad7173_state *st = iio_priv(indio_dev); - int i, ret; + int i, j, k, ret; for (i = 0; i < indio_dev->num_channels; i++) { if (test_bit(i, scan_mask)) @@ -1239,6 +1254,54 @@ static int ad7173_update_scan_mode(struct iio_dev *indio_dev, return ret; } + /* + * On some chips, there are more channels that setups, so if there were + * more unique setups requested than the number of available slots, + * ad7173_set_channel() will have written over some of the slots. We + * can detect this by making sure each assigned cfg_slot matches the + * requested configuration. If it doesn't, we know that the slot was + * overwritten by a different channel. + */ + for_each_set_bit(i, scan_mask, indio_dev->num_channels) { + const struct ad7173_channel_config *cfg1, *cfg2; + + cfg1 = &st->channels[i].cfg; + + for_each_set_bit(j, scan_mask, indio_dev->num_channels) { + cfg2 = &st->channels[j].cfg; + + /* + * Only compare configs that are assigned to the same + * SETUP_SEL slot and don't compare channel to itself. + */ + if (i == j || cfg1->cfg_slot != cfg2->cfg_slot) + continue; + + /* + * If we find two different configs trying to use the + * same SETUP_SEL slot, then we know that the that we + * have too many unique configurations requested for + * the available slots and at least one was overwritten. + */ + if (!ad7173_is_setup_equal(cfg1, cfg2)) { + /* + * At this point, there isn't a way to tell + * which setups are actually programmed in the + * ADC anymore, so we could read them back to + * see, but it is simpler to just turn off all + * of the live flags so that everything gets + * reprogramed on the next attempt read a sample. + */ + for (k = 0; k < st->num_channels; k++) + st->channels[k].cfg.live = false; + + dev_err(&st->sd.spi->dev, + "Too many unique channel configurations requested for scan\n"); + return -EINVAL; + } + } + } + return 0; } diff --git a/drivers/iio/adc/ad7380.c b/drivers/iio/adc/ad7380.c index 6f7034b6c266..fa251dc1aae6 100644 --- a/drivers/iio/adc/ad7380.c +++ b/drivers/iio/adc/ad7380.c @@ -873,6 +873,7 @@ static const struct ad7380_chip_info adaq4381_4_chip_info = { .has_hardware_gain = true, .available_scan_masks = ad7380_4_channel_scan_masks, .timing_specs = &ad7380_4_timing, + .max_conversion_rate_hz = 4 * MEGA, }; static const struct spi_offload_config ad7380_offload_config = { diff --git a/drivers/iio/adc/rzg2l_adc.c b/drivers/iio/adc/rzg2l_adc.c index 9674d48074c9..cadb0446bc29 100644 --- a/drivers/iio/adc/rzg2l_adc.c +++ b/drivers/iio/adc/rzg2l_adc.c @@ -89,7 +89,6 @@ struct rzg2l_adc { struct completion completion; struct mutex lock; u16 last_val[RZG2L_ADC_MAX_CHANNELS]; - bool was_rpm_active; }; /** @@ -428,6 +427,8 @@ static int rzg2l_adc_probe(struct platform_device *pdev) if (!indio_dev) return -ENOMEM; + platform_set_drvdata(pdev, indio_dev); + adc = iio_priv(indio_dev); adc->hw_params = device_get_match_data(dev); @@ -460,8 +461,6 @@ static int rzg2l_adc_probe(struct platform_device *pdev) if (ret) return ret; - platform_set_drvdata(pdev, indio_dev); - ret = rzg2l_adc_hw_init(dev, adc); if (ret) return dev_err_probe(&pdev->dev, ret, @@ -541,14 +540,9 @@ static int rzg2l_adc_suspend(struct device *dev) }; int ret; - if (pm_runtime_suspended(dev)) { - adc->was_rpm_active = false; - } else { - ret = pm_runtime_force_suspend(dev); - if (ret) - return ret; - adc->was_rpm_active = true; - } + ret = pm_runtime_force_suspend(dev); + if (ret) + return ret; ret = reset_control_bulk_assert(ARRAY_SIZE(resets), resets); if (ret) @@ -557,9 +551,7 @@ static int rzg2l_adc_suspend(struct device *dev) return 0; rpm_restore: - if (adc->was_rpm_active) - pm_runtime_force_resume(dev); - + pm_runtime_force_resume(dev); return ret; } @@ -577,11 +569,9 @@ static int rzg2l_adc_resume(struct device *dev) if (ret) return ret; - if (adc->was_rpm_active) { - ret = pm_runtime_force_resume(dev); - if (ret) - goto resets_restore; - } + ret = pm_runtime_force_resume(dev); + if (ret) + goto resets_restore; ret = rzg2l_adc_hw_init(dev, adc); if (ret) @@ -590,10 +580,7 @@ static int rzg2l_adc_resume(struct device *dev) return 0; rpm_restore: - if (adc->was_rpm_active) { - pm_runtime_mark_last_busy(dev); - pm_runtime_put_autosuspend(dev); - } + pm_runtime_force_suspend(dev); resets_restore: reset_control_bulk_assert(ARRAY_SIZE(resets), resets); return ret; diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c index 8b15afca498c..271a4788604a 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c @@ -32,8 +32,12 @@ static int inv_icm42600_temp_read(struct inv_icm42600_state *st, s16 *temp) goto exit; *temp = (s16)be16_to_cpup(raw); + /* + * Temperature data is invalid if both accel and gyro are off. + * Return -EBUSY in this case. + */ if (*temp == INV_ICM42600_DATA_INVALID) - ret = -EINVAL; + ret = -EBUSY; exit: mutex_unlock(&st->lock); diff --git a/drivers/iio/light/as73211.c b/drivers/iio/light/as73211.c index 68f60dc3c79d..32719f584c47 100644 --- a/drivers/iio/light/as73211.c +++ b/drivers/iio/light/as73211.c @@ -639,7 +639,7 @@ static irqreturn_t as73211_trigger_handler(int irq __always_unused, void *p) struct { __le16 chan[4]; aligned_s64 ts; - } scan; + } scan = { }; int data_result, ret; mutex_lock(&data->mutex); diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c index 74505c9ec1a0..6cdc8ed53520 100644 --- a/drivers/iio/pressure/bmp280-core.c +++ b/drivers/iio/pressure/bmp280-core.c @@ -3213,11 +3213,12 @@ int bmp280_common_probe(struct device *dev, /* Bring chip out of reset if there is an assigned GPIO line */ gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(gpiod)) + return dev_err_probe(dev, PTR_ERR(gpiod), "failed to get reset GPIO\n"); + /* Deassert the signal */ - if (gpiod) { - dev_info(dev, "release reset\n"); - gpiod_set_value(gpiod, 0); - } + dev_info(dev, "release reset\n"); + gpiod_set_value(gpiod, 0); data->regmap = regmap; diff --git a/drivers/iio/proximity/isl29501.c b/drivers/iio/proximity/isl29501.c index d1510fe24050..f69db6f2f380 100644 --- a/drivers/iio/proximity/isl29501.c +++ b/drivers/iio/proximity/isl29501.c @@ -938,12 +938,18 @@ static irqreturn_t isl29501_trigger_handler(int irq, void *p) struct iio_dev *indio_dev = pf->indio_dev; struct isl29501_private *isl29501 = iio_priv(indio_dev); const unsigned long *active_mask = indio_dev->active_scan_mask; - u32 buffer[4] __aligned(8) = {}; /* 1x16-bit + naturally aligned ts */ - - if (test_bit(ISL29501_DISTANCE_SCAN_INDEX, active_mask)) - isl29501_register_read(isl29501, REG_DISTANCE, buffer); + u32 value; + struct { + u16 data; + aligned_s64 ts; + } scan = { }; + + if (test_bit(ISL29501_DISTANCE_SCAN_INDEX, active_mask)) { + isl29501_register_read(isl29501, REG_DISTANCE, &value); + scan.data = value; + } - iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); + iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp); iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c index cae8e84821d7..205939680fd4 100644 --- a/drivers/iio/temperature/maxim_thermocouple.c +++ b/drivers/iio/temperature/maxim_thermocouple.c @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/err.h> #include <linux/spi/spi.h> +#include <linux/types.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include <linux/iio/trigger.h> @@ -121,8 +122,15 @@ struct maxim_thermocouple_data { struct spi_device *spi; const struct maxim_thermocouple_chip *chip; char tc_type; - - u8 buffer[16] __aligned(IIO_DMA_MINALIGN); + /* Buffer for reading up to 2 hardware channels. */ + struct { + union { + __be16 raw16; + __be32 raw32; + __be16 raw[2]; + }; + aligned_s64 timestamp; + } buffer __aligned(IIO_DMA_MINALIGN); }; static int maxim_thermocouple_read(struct maxim_thermocouple_data *data, @@ -130,18 +138,16 @@ static int maxim_thermocouple_read(struct maxim_thermocouple_data *data, { unsigned int storage_bytes = data->chip->read_size; unsigned int shift = chan->scan_type.shift + (chan->address * 8); - __be16 buf16; - __be32 buf32; int ret; switch (storage_bytes) { case 2: - ret = spi_read(data->spi, (void *)&buf16, storage_bytes); - *val = be16_to_cpu(buf16); + ret = spi_read(data->spi, &data->buffer.raw16, storage_bytes); + *val = be16_to_cpu(data->buffer.raw16); break; case 4: - ret = spi_read(data->spi, (void *)&buf32, storage_bytes); - *val = be32_to_cpu(buf32); + ret = spi_read(data->spi, &data->buffer.raw32, storage_bytes); + *val = be32_to_cpu(data->buffer.raw32); break; default: ret = -EINVAL; @@ -166,9 +172,9 @@ static irqreturn_t maxim_thermocouple_trigger_handler(int irq, void *private) struct maxim_thermocouple_data *data = iio_priv(indio_dev); int ret; - ret = spi_read(data->spi, data->buffer, data->chip->read_size); + ret = spi_read(data->spi, data->buffer.raw, data->chip->read_size); if (!ret) { - iio_push_to_buffers_with_ts(indio_dev, data->buffer, + iio_push_to_buffers_with_ts(indio_dev, &data->buffer, sizeof(data->buffer), iio_get_time_ns(indio_dev)); } diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index b1c44ec1a3f3..572a91a62a7b 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -115,7 +115,7 @@ static int ib_init_umem_odp(struct ib_umem_odp *umem_odp, out_free_map: if (ib_uses_virt_dma(dev)) - kfree(map->pfn_list); + kvfree(map->pfn_list); else hmm_dma_map_free(dev->dma_device, map); return ret; @@ -287,7 +287,7 @@ static void ib_umem_odp_free(struct ib_umem_odp *umem_odp) mutex_unlock(&umem_odp->umem_mutex); mmu_interval_notifier_remove(&umem_odp->notifier); if (ib_uses_virt_dma(dev)) - kfree(umem_odp->map.pfn_list); + kvfree(umem_odp->map.pfn_list); else hmm_dma_map_free(dev->dma_device, &umem_odp->map); } diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 37c2bc3bdba5..260dc67b8b87 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -1921,7 +1921,6 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr, struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq); struct bnxt_re_dev *rdev = srq->rdev; - int rc; switch (srq_attr_mask) { case IB_SRQ_MAX_WR: @@ -1933,11 +1932,8 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr, return -EINVAL; srq->qplib_srq.threshold = srq_attr->srq_limit; - rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq); - if (rc) { - ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!"); - return rc; - } + bnxt_qplib_srq_arm_db(&srq->qplib_srq.dbinfo, srq->qplib_srq.threshold); + /* On success, update the shadow */ srq->srq_limit = srq_attr->srq_limit; /* No need to Build and send response back to udata */ diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 293b0a96c8e3..df7cf8d68e27 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -2017,6 +2017,28 @@ static void bnxt_re_free_nqr_mem(struct bnxt_re_dev *rdev) rdev->nqr = NULL; } +/* When DEL_GID fails, driver is not freeing GID ctx memory. + * To avoid the memory leak, free the memory during unload + */ +static void bnxt_re_free_gid_ctx(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; + struct bnxt_re_gid_ctx *ctx, **ctx_tbl; + int i; + + if (!sgid_tbl->active) + return; + + ctx_tbl = sgid_tbl->ctx; + for (i = 0; i < sgid_tbl->max; i++) { + if (sgid_tbl->hw_id[i] == 0xFFFF) + continue; + + ctx = ctx_tbl[i]; + kfree(ctx); + } +} + static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type) { u8 type; @@ -2030,6 +2052,7 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type) if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags)) cancel_delayed_work_sync(&rdev->worker); + bnxt_re_free_gid_ctx(rdev); if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags)) bnxt_re_cleanup_res(rdev); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index dfe3177123e5..ee36b3d82cc0 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -705,9 +705,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, srq->dbinfo.db = srq->dpi->dbr; srq->dbinfo.max_slot = 1; srq->dbinfo.priv_db = res->dpi_tbl.priv_db; - if (srq->threshold) - bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA); - srq->arm_req = false; + bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA); return 0; fail: @@ -717,24 +715,6 @@ fail: return rc; } -int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res, - struct bnxt_qplib_srq *srq) -{ - struct bnxt_qplib_hwq *srq_hwq = &srq->hwq; - u32 count; - - count = __bnxt_qplib_get_avail(srq_hwq); - if (count > srq->threshold) { - srq->arm_req = false; - bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold); - } else { - /* Deferred arming */ - srq->arm_req = true; - } - - return 0; -} - int bnxt_qplib_query_srq(struct bnxt_qplib_res *res, struct bnxt_qplib_srq *srq) { @@ -776,7 +756,6 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq, struct bnxt_qplib_hwq *srq_hwq = &srq->hwq; struct rq_wqe *srqe; struct sq_sge *hw_sge; - u32 count = 0; int i, next; spin_lock(&srq_hwq->lock); @@ -808,15 +787,8 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq, bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot); - spin_lock(&srq_hwq->lock); - count = __bnxt_qplib_get_avail(srq_hwq); - spin_unlock(&srq_hwq->lock); /* Ring DB */ bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ); - if (srq->arm_req == true && count > srq->threshold) { - srq->arm_req = false; - bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold); - } return 0; } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index ab125f1d949e..4921a214c34c 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -546,8 +546,6 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, srqn_handler_t srq_handler); int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, struct bnxt_qplib_srq *srq); -int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res, - struct bnxt_qplib_srq *srq); int bnxt_qplib_query_srq(struct bnxt_qplib_res *res, struct bnxt_qplib_srq *srq); void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index 6cd05207ffed..cc5c82d96839 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -121,6 +121,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res, pbl->pg_arr = vmalloc_array(pages, sizeof(void *)); if (!pbl->pg_arr) return -ENOMEM; + memset(pbl->pg_arr, 0, pages * sizeof(void *)); pbl->pg_map_arr = vmalloc_array(pages, sizeof(dma_addr_t)); if (!pbl->pg_map_arr) { @@ -128,6 +129,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res, pbl->pg_arr = NULL; return -ENOMEM; } + memset(pbl->pg_map_arr, 0, pages * sizeof(dma_addr_t)); pbl->pg_count = 0; pbl->pg_size = sginfo->pgsize; diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c index 94c211df09d8..fdeec33c71da 100644 --- a/drivers/infiniband/hw/erdma/erdma_verbs.c +++ b/drivers/infiniband/hw/erdma/erdma_verbs.c @@ -994,6 +994,8 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs, old_entry = xa_store(&dev->qp_xa, 1, qp, GFP_KERNEL); if (xa_is_err(old_entry)) ret = xa_err(old_entry); + else + qp->ibqp.qp_num = 1; } else { ret = xa_alloc_cyclic(&dev->qp_xa, &qp->ibqp.qp_num, qp, XA_LIMIT(1, dev->attrs.max_qp - 1), @@ -1031,7 +1033,9 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs, if (ret) goto err_out_cmd; } else { - init_kernel_qp(dev, qp, attrs); + ret = init_kernel_qp(dev, qp, attrs); + if (ret) + goto err_out_xa; } qp->attrs.max_send_sge = attrs->cap.max_send_sge; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 64bca08f3f1a..f82bdd46a917 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -3043,7 +3043,7 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev) if (!hr_dev->is_vf) hns_roce_free_link_table(hr_dev); - if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09) + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) free_dip_entry(hr_dev); } @@ -5476,7 +5476,7 @@ out: return ret; } -static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 qpn, +static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 sccn, void *buffer) { struct hns_roce_v2_scc_context *context; @@ -5488,7 +5488,7 @@ static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 qpn, return PTR_ERR(mailbox); ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_SCCC, - qpn); + sccn); if (ret) goto out; diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c index f637b73b946e..230187dda6a0 100644 --- a/drivers/infiniband/hw/hns/hns_roce_restrack.c +++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c @@ -100,6 +100,7 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp) struct hns_roce_v2_qp_context qpc; struct hns_roce_v2_scc_context sccc; } context = {}; + u32 sccn = hr_qp->qpn; int ret; if (!hr_dev->hw->query_qpc) @@ -116,7 +117,13 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp) !hr_dev->hw->query_sccc) goto out; - ret = hr_dev->hw->query_sccc(hr_dev, hr_qp->qpn, &context.sccc); + if (hr_qp->cong_type == CONG_TYPE_DIP) { + if (!hr_qp->dip) + goto out; + sccn = hr_qp->dip->dip_idx; + } + + ret = hr_dev->hw->query_sccc(hr_dev, sccn, &context.sccc); if (ret) ibdev_warn_ratelimited(&hr_dev->ib_dev, "failed to query SCCC, ret = %d.\n", diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 132a87e52d5c..ac0183a2ff7a 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -345,33 +345,15 @@ int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt, static void rxe_skb_tx_dtor(struct sk_buff *skb) { - struct net_device *ndev = skb->dev; - struct rxe_dev *rxe; - unsigned int qp_index; - struct rxe_qp *qp; + struct rxe_qp *qp = skb->sk->sk_user_data; int skb_out; - rxe = rxe_get_dev_from_net(ndev); - if (!rxe && is_vlan_dev(ndev)) - rxe = rxe_get_dev_from_net(vlan_dev_real_dev(ndev)); - if (WARN_ON(!rxe)) - return; - - qp_index = (int)(uintptr_t)skb->sk->sk_user_data; - if (!qp_index) - return; - - qp = rxe_pool_get_index(&rxe->qp_pool, qp_index); - if (!qp) - goto put_dev; - skb_out = atomic_dec_return(&qp->skb_out); - if (qp->need_req_skb && skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW) + if (unlikely(qp->need_req_skb && + skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW)) rxe_sched_task(&qp->send_task); rxe_put(qp); -put_dev: - ib_device_put(&rxe->ib_dev); sock_put(skb->sk); } @@ -383,6 +365,7 @@ static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt) sock_hold(sk); skb->sk = sk; skb->destructor = rxe_skb_tx_dtor; + rxe_get(pkt->qp); atomic_inc(&pkt->qp->skb_out); if (skb->protocol == htons(ETH_P_IP)) @@ -405,6 +388,7 @@ static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt) sock_hold(sk); skb->sk = sk; skb->destructor = rxe_skb_tx_dtor; + rxe_get(pkt->qp); atomic_inc(&pkt->qp->skb_out); if (skb->protocol == htons(ETH_P_IP)) @@ -497,6 +481,9 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, goto out; } + /* Add time stamp to skb. */ + skb->tstamp = ktime_get(); + skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(ndev)); /* FIXME: hold reference to this netdev until life of this skb. */ diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index f2af3e0aef35..95f1c1c2949d 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -244,7 +244,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk); if (err < 0) return err; - qp->sk->sk->sk_user_data = (void *)(uintptr_t)qp->elem.index; + qp->sk->sk->sk_user_data = qp; /* pick a source UDP port number for this QP based on * the source QPN. this spreads traffic for different QPs diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 7b5af6176de9..8de689b2c5ed 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -3638,7 +3638,7 @@ static int __init parse_ivrs_acpihid(char *str) { u32 seg = 0, bus, dev, fn; char *hid, *uid, *p, *addr; - char acpiid[ACPIID_LEN] = {0}; + char acpiid[ACPIID_LEN + 1] = { }; /* size with NULL terminator */ int i; addr = strchr(str, '@'); @@ -3664,7 +3664,7 @@ static int __init parse_ivrs_acpihid(char *str) /* We have the '@', make it the terminator to get just the acpiid */ *addr++ = 0; - if (strlen(str) > ACPIID_LEN + 1) + if (strlen(str) > ACPIID_LEN) goto not_found; if (sscanf(str, "=%s", acpiid) != 1) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 5968043ac802..2a8b46b948f0 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -2997,9 +2997,9 @@ void arm_smmu_attach_commit(struct arm_smmu_attach_state *state) /* ATS is being switched off, invalidate the entire ATC */ arm_smmu_atc_inv_master(master, IOMMU_NO_PASID); } - master->ats_enabled = state->ats_enabled; arm_smmu_remove_master_domain(master, state->old_domain, state->ssid); + master->ats_enabled = state->ats_enabled; } static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c index be1aaaf8cd17..378104cd395e 100644 --- a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c +++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c @@ -301,9 +301,11 @@ static void tegra241_vintf_user_handle_error(struct tegra241_vintf *vintf) struct iommu_vevent_tegra241_cmdqv vevent_data; int i; - for (i = 0; i < LVCMDQ_ERR_MAP_NUM_64; i++) - vevent_data.lvcmdq_err_map[i] = - readq_relaxed(REG_VINTF(vintf, LVCMDQ_ERR_MAP_64(i))); + for (i = 0; i < LVCMDQ_ERR_MAP_NUM_64; i++) { + u64 err = readq_relaxed(REG_VINTF(vintf, LVCMDQ_ERR_MAP_64(i))); + + vevent_data.lvcmdq_err_map[i] = cpu_to_le64(err); + } iommufd_viommu_report_event(viommu, IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV, &vevent_data, sizeof(vevent_data)); diff --git a/drivers/iommu/iommufd/viommu.c b/drivers/iommu/iommufd/viommu.c index 2ca5809b238b..462b457ffd0c 100644 --- a/drivers/iommu/iommufd/viommu.c +++ b/drivers/iommu/iommufd/viommu.c @@ -339,7 +339,7 @@ iommufd_hw_queue_alloc_phys(struct iommu_hw_queue_alloc *cmd, } *base_pa = (page_to_pfn(pages[0]) << PAGE_SHIFT) + offset; - kfree(pages); + kvfree(pages); return access; out_unpin: @@ -349,7 +349,7 @@ out_detach: out_destroy: iommufd_access_destroy_internal(viommu->ictx, access); out_free: - kfree(pages); + kvfree(pages); return ERR_PTR(rc); } diff --git a/drivers/iommu/riscv/iommu.c b/drivers/iommu/riscv/iommu.c index 2d0d31ba2886..0eae2f4bdc5e 100644 --- a/drivers/iommu/riscv/iommu.c +++ b/drivers/iommu/riscv/iommu.c @@ -1283,7 +1283,7 @@ static phys_addr_t riscv_iommu_iova_to_phys(struct iommu_domain *iommu_domain, unsigned long *ptr; ptr = riscv_iommu_pte_fetch(domain, iova, &pte_size); - if (_io_pte_none(*ptr) || !_io_pte_present(*ptr)) + if (!ptr) return 0; return pfn_to_phys(__page_val_to_pfn(*ptr)) | (iova & (pte_size - 1)); diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 532db1de201b..b39d6f134ab2 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -998,8 +998,7 @@ static void viommu_get_resv_regions(struct device *dev, struct list_head *head) iommu_dma_get_resv_regions(dev, head); } -static const struct iommu_ops viommu_ops; -static struct virtio_driver virtio_iommu_drv; +static const struct bus_type *virtio_bus_type; static int viommu_match_node(struct device *dev, const void *data) { @@ -1008,8 +1007,9 @@ static int viommu_match_node(struct device *dev, const void *data) static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode) { - struct device *dev = driver_find_device(&virtio_iommu_drv.driver, NULL, - fwnode, viommu_match_node); + struct device *dev = bus_find_device(virtio_bus_type, NULL, fwnode, + viommu_match_node); + put_device(dev); return dev ? dev_to_virtio(dev)->priv : NULL; @@ -1160,6 +1160,9 @@ static int viommu_probe(struct virtio_device *vdev) if (!viommu) return -ENOMEM; + /* Borrow this for easy lookups later */ + virtio_bus_type = dev->bus; + spin_lock_init(&viommu->request_lock); ida_init(&viommu->domain_ids); viommu->dev = dev; @@ -1229,10 +1232,10 @@ static int viommu_probe(struct virtio_device *vdev) if (ret) goto err_free_vqs; - iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev); - vdev->priv = viommu; + iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev); + dev_info(dev, "input address: %u bits\n", order_base_2(viommu->geometry.aperture_end)); dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap); diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index 2b05722d4dbe..ea8a0ab47afd 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -39,12 +39,13 @@ #include "hfc_pci.h" +static void hfcpci_softirq(struct timer_list *unused); static const char *hfcpci_revision = "2.0"; static int HFC_cnt; static uint debug; static uint poll, tics; -static struct timer_list hfc_tl; +static DEFINE_TIMER(hfc_tl, hfcpci_softirq); static unsigned long hfc_jiffies; MODULE_AUTHOR("Karsten Keil"); @@ -2305,8 +2306,7 @@ hfcpci_softirq(struct timer_list *unused) hfc_jiffies = jiffies + 1; else hfc_jiffies += tics; - hfc_tl.expires = hfc_jiffies; - add_timer(&hfc_tl); + mod_timer(&hfc_tl, hfc_jiffies); } static int __init @@ -2332,10 +2332,8 @@ HFC_init(void) if (poll != HFCPCI_BTRANS_THRESHOLD) { printk(KERN_INFO "%s: Using alternative poll value of %d\n", __func__, poll); - timer_setup(&hfc_tl, hfcpci_softirq, 0); - hfc_tl.expires = jiffies + tics; - hfc_jiffies = hfc_tl.expires; - add_timer(&hfc_tl); + hfc_jiffies = jiffies + tics; + mod_timer(&hfc_tl, hfc_jiffies); } else tics = 0; /* indicate the use of controller's timer */ diff --git a/drivers/md/md.c b/drivers/md/md.c index ac85ec73a409..1baaf52c603c 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -339,6 +339,7 @@ static int start_readonly; * so all the races disappear. */ static bool create_on_open = true; +static bool legacy_async_del_gendisk = true; /* * We have a system wide 'event count' that is incremented @@ -877,15 +878,18 @@ void mddev_unlock(struct mddev *mddev) export_rdev(rdev, mddev); } - /* Call del_gendisk after release reconfig_mutex to avoid - * deadlock (e.g. call del_gendisk under the lock and an - * access to sysfs files waits the lock) - * And MD_DELETED is only used for md raid which is set in - * do_md_stop. dm raid only uses md_stop to stop. So dm raid - * doesn't need to check MD_DELETED when getting reconfig lock - */ - if (test_bit(MD_DELETED, &mddev->flags)) - del_gendisk(mddev->gendisk); + if (!legacy_async_del_gendisk) { + /* + * Call del_gendisk after release reconfig_mutex to avoid + * deadlock (e.g. call del_gendisk under the lock and an + * access to sysfs files waits the lock) + * And MD_DELETED is only used for md raid which is set in + * do_md_stop. dm raid only uses md_stop to stop. So dm raid + * doesn't need to check MD_DELETED when getting reconfig lock + */ + if (test_bit(MD_DELETED, &mddev->flags)) + del_gendisk(mddev->gendisk); + } } EXPORT_SYMBOL_GPL(mddev_unlock); @@ -1419,7 +1423,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *freshest, stru else { if (sb->events_hi == sb->cp_events_hi && sb->events_lo == sb->cp_events_lo) { - mddev->resync_offset = sb->resync_offset; + mddev->resync_offset = sb->recovery_cp; } else mddev->resync_offset = 0; } @@ -1547,13 +1551,13 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) mddev->minor_version = sb->minor_version; if (mddev->in_sync) { - sb->resync_offset = mddev->resync_offset; + sb->recovery_cp = mddev->resync_offset; sb->cp_events_hi = (mddev->events>>32); sb->cp_events_lo = (u32)mddev->events; if (mddev->resync_offset == MaxSector) sb->state = (1<< MD_SB_CLEAN); } else - sb->resync_offset = 0; + sb->recovery_cp = 0; sb->layout = mddev->layout; sb->chunk_size = mddev->chunk_sectors << 9; @@ -4835,9 +4839,42 @@ out_unlock: static struct md_sysfs_entry md_metadata = __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); +static bool rdev_needs_recovery(struct md_rdev *rdev, sector_t sectors) +{ + return rdev->raid_disk >= 0 && + !test_bit(Journal, &rdev->flags) && + !test_bit(Faulty, &rdev->flags) && + !test_bit(In_sync, &rdev->flags) && + rdev->recovery_offset < sectors; +} + +static enum sync_action md_get_active_sync_action(struct mddev *mddev) +{ + struct md_rdev *rdev; + bool is_recover = false; + + if (mddev->resync_offset < MaxSector) + return ACTION_RESYNC; + + if (mddev->reshape_position != MaxSector) + return ACTION_RESHAPE; + + rcu_read_lock(); + rdev_for_each_rcu(rdev, mddev) { + if (rdev_needs_recovery(rdev, MaxSector)) { + is_recover = true; + break; + } + } + rcu_read_unlock(); + + return is_recover ? ACTION_RECOVER : ACTION_IDLE; +} + enum sync_action md_sync_action(struct mddev *mddev) { unsigned long recovery = mddev->recovery; + enum sync_action active_action; /* * frozen has the highest priority, means running sync_thread will be @@ -4861,8 +4898,17 @@ enum sync_action md_sync_action(struct mddev *mddev) !test_bit(MD_RECOVERY_NEEDED, &recovery)) return ACTION_IDLE; - if (test_bit(MD_RECOVERY_RESHAPE, &recovery) || - mddev->reshape_position != MaxSector) + /* + * Check if any sync operation (resync/recover/reshape) is + * currently active. This ensures that only one sync operation + * can run at a time. Returns the type of active operation, or + * ACTION_IDLE if none are active. + */ + active_action = md_get_active_sync_action(mddev); + if (active_action != ACTION_IDLE) + return active_action; + + if (test_bit(MD_RECOVERY_RESHAPE, &recovery)) return ACTION_RESHAPE; if (test_bit(MD_RECOVERY_RECOVER, &recovery)) @@ -5818,6 +5864,13 @@ static void md_kobj_release(struct kobject *ko) { struct mddev *mddev = container_of(ko, struct mddev, kobj); + if (legacy_async_del_gendisk) { + if (mddev->sysfs_state) + sysfs_put(mddev->sysfs_state); + if (mddev->sysfs_level) + sysfs_put(mddev->sysfs_level); + del_gendisk(mddev->gendisk); + } put_disk(mddev->gendisk); } @@ -6021,6 +6074,9 @@ static int md_alloc_and_put(dev_t dev, char *name) { struct mddev *mddev = md_alloc(dev, name); + if (legacy_async_del_gendisk) + pr_warn("md: async del_gendisk mode will be removed in future, please upgrade to mdadm-4.5+\n"); + if (IS_ERR(mddev)) return PTR_ERR(mddev); mddev_put(mddev); @@ -6431,10 +6487,22 @@ static void md_clean(struct mddev *mddev) mddev->persistent = 0; mddev->level = LEVEL_NONE; mddev->clevel[0] = 0; - /* if UNTIL_STOP is set, it's cleared here */ - mddev->hold_active = 0; - /* Don't clear MD_CLOSING, or mddev can be opened again. */ - mddev->flags &= BIT_ULL_MASK(MD_CLOSING); + + /* + * For legacy_async_del_gendisk mode, it can stop the array in the + * middle of assembling it, then it still can access the array. So + * it needs to clear MD_CLOSING. If not legacy_async_del_gendisk, + * it can't open the array again after stopping it. So it doesn't + * clear MD_CLOSING. + */ + if (legacy_async_del_gendisk && mddev->hold_active) { + clear_bit(MD_CLOSING, &mddev->flags); + } else { + /* if UNTIL_STOP is set, it's cleared here */ + mddev->hold_active = 0; + /* Don't clear MD_CLOSING, or mddev can be opened again. */ + mddev->flags &= BIT_ULL_MASK(MD_CLOSING); + } mddev->sb_flags = 0; mddev->ro = MD_RDWR; mddev->metadata_type[0] = 0; @@ -6658,7 +6726,8 @@ static int do_md_stop(struct mddev *mddev, int mode) export_array(mddev); md_clean(mddev); - set_bit(MD_DELETED, &mddev->flags); + if (!legacy_async_del_gendisk) + set_bit(MD_DELETED, &mddev->flags); } md_new_event(); sysfs_notify_dirent_safe(mddev->sysfs_state); @@ -8968,11 +9037,7 @@ static sector_t md_sync_position(struct mddev *mddev, enum sync_action action) start = MaxSector; rcu_read_lock(); rdev_for_each_rcu(rdev, mddev) - if (rdev->raid_disk >= 0 && - !test_bit(Journal, &rdev->flags) && - !test_bit(Faulty, &rdev->flags) && - !test_bit(In_sync, &rdev->flags) && - rdev->recovery_offset < start) + if (rdev_needs_recovery(rdev, start)) start = rdev->recovery_offset; rcu_read_unlock(); @@ -9331,12 +9396,8 @@ void md_do_sync(struct md_thread *thread) test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) { rcu_read_lock(); rdev_for_each_rcu(rdev, mddev) - if (rdev->raid_disk >= 0 && - mddev->delta_disks >= 0 && - !test_bit(Journal, &rdev->flags) && - !test_bit(Faulty, &rdev->flags) && - !test_bit(In_sync, &rdev->flags) && - rdev->recovery_offset < mddev->curr_resync) + if (mddev->delta_disks >= 0 && + rdev_needs_recovery(rdev, mddev->curr_resync)) rdev->recovery_offset = mddev->curr_resync; rcu_read_unlock(); } @@ -10392,6 +10453,7 @@ module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR); module_param(create_on_open, bool, S_IRUSR|S_IWUSR); +module_param(legacy_async_del_gendisk, bool, 0600); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MD RAID framework"); diff --git a/drivers/media/i2c/alvium-csi2.c b/drivers/media/i2c/alvium-csi2.c index 05b708bd0a64..1f088acecf36 100644 --- a/drivers/media/i2c/alvium-csi2.c +++ b/drivers/media/i2c/alvium-csi2.c @@ -1841,7 +1841,6 @@ static int alvium_s_stream(struct v4l2_subdev *sd, int enable) } else { alvium_set_stream_mipi(alvium, enable); - pm_runtime_mark_last_busy(&client->dev); pm_runtime_put_autosuspend(&client->dev); } diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c index 487bcabb4a19..1c889c878abd 100644 --- a/drivers/media/i2c/ccs/ccs-core.c +++ b/drivers/media/i2c/ccs/ccs-core.c @@ -787,10 +787,8 @@ static int ccs_set_ctrl(struct v4l2_ctrl *ctrl) rval = -EINVAL; } - if (pm_status > 0) { - pm_runtime_mark_last_busy(&client->dev); + if (pm_status > 0) pm_runtime_put_autosuspend(&client->dev); - } return rval; } @@ -1914,7 +1912,6 @@ static int ccs_set_stream(struct v4l2_subdev *subdev, int enable) if (!enable) { ccs_stop_streaming(sensor); sensor->streaming = false; - pm_runtime_mark_last_busy(&client->dev); pm_runtime_put_autosuspend(&client->dev); return 0; @@ -1929,7 +1926,6 @@ static int ccs_set_stream(struct v4l2_subdev *subdev, int enable) rval = ccs_start_streaming(sensor); if (rval < 0) { sensor->streaming = false; - pm_runtime_mark_last_busy(&client->dev); pm_runtime_put_autosuspend(&client->dev); } @@ -2677,7 +2673,6 @@ nvm_show(struct device *dev, struct device_attribute *attr, char *buf) return -ENODEV; } - pm_runtime_mark_last_busy(&client->dev); pm_runtime_put_autosuspend(&client->dev); /* diff --git a/drivers/media/i2c/dw9768.c b/drivers/media/i2c/dw9768.c index 3a4d100b9199..d434721ba8ed 100644 --- a/drivers/media/i2c/dw9768.c +++ b/drivers/media/i2c/dw9768.c @@ -374,7 +374,6 @@ static int dw9768_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) static int dw9768_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { - pm_runtime_mark_last_busy(sd->dev); pm_runtime_put_autosuspend(sd->dev); return 0; diff --git a/drivers/media/i2c/gc0308.c b/drivers/media/i2c/gc0308.c index 069f42785b3c..cbcda0e18ff1 100644 --- a/drivers/media/i2c/gc0308.c +++ b/drivers/media/i2c/gc0308.c @@ -974,7 +974,6 @@ static int gc0308_s_ctrl(struct v4l2_ctrl *ctrl) if (ret) dev_err(gc0308->dev, "failed to set control: %d\n", ret); - pm_runtime_mark_last_busy(gc0308->dev); pm_runtime_put_autosuspend(gc0308->dev); return ret; @@ -1157,14 +1156,12 @@ static int gc0308_start_stream(struct gc0308 *gc0308) return 0; disable_pm: - pm_runtime_mark_last_busy(gc0308->dev); pm_runtime_put_autosuspend(gc0308->dev); return ret; } static int gc0308_stop_stream(struct gc0308 *gc0308) { - pm_runtime_mark_last_busy(gc0308->dev); pm_runtime_put_autosuspend(gc0308->dev); return 0; } diff --git a/drivers/media/i2c/gc2145.c b/drivers/media/i2c/gc2145.c index ba02161d46e7..559a851669aa 100644 --- a/drivers/media/i2c/gc2145.c +++ b/drivers/media/i2c/gc2145.c @@ -963,7 +963,6 @@ static int gc2145_enable_streams(struct v4l2_subdev *sd, return 0; err_rpm_put: - pm_runtime_mark_last_busy(&client->dev); pm_runtime_put_autosuspend(&client->dev); return ret; } @@ -985,7 +984,6 @@ static int gc2145_disable_streams(struct v4l2_subdev *sd, if (ret) dev_err(&client->dev, "%s failed to write regs\n", __func__); - pm_runtime_mark_last_busy(&client->dev); pm_runtime_put_autosuspend(&client->dev); return ret; @@ -1193,7 +1191,6 @@ static int gc2145_s_ctrl(struct v4l2_ctrl *ctrl) break; } - pm_runtime_mark_last_busy(&client->dev); pm_runtime_put_autosuspend(&client->dev); return ret; diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c index 3b4f68543342..3faf48f34af4 100644 --- a/drivers/media/i2c/imx219.c +++ b/drivers/media/i2c/imx219.c @@ -771,7 +771,6 @@ static int imx219_enable_streams(struct v4l2_subdev *sd, return 0; err_rpm_put: - pm_runtime_mark_last_busy(&client->dev); pm_runtime_put_autosuspend(&client->dev); return ret; } @@ -793,7 +792,6 @@ static int imx219_disable_streams(struct v4l2_subdev *sd, __v4l2_ctrl_grab(imx219->vflip, false); __v4l2_ctrl_grab(imx219->hflip, false); - pm_runtime_mark_last_busy(&client->dev); pm_runtime_put_autosuspend(&client->dev); return ret; diff --git a/drivers/media/i2c/imx283.c b/drivers/media/i2c/imx283.c index da618c8cbadc..67e8bb432d10 100644 --- a/drivers/media/i2c/imx283.c +++ b/drivers/media/i2c/imx283.c @@ -1143,7 +1143,6 @@ static int imx283_enable_streams(struct v4l2_subdev *sd, return 0; err_rpm_put: - pm_runtime_mark_last_busy(imx283->dev); pm_runtime_put_autosuspend(imx283->dev); return ret; @@ -1163,7 +1162,6 @@ static int imx283_disable_streams(struct v4l2_subdev *sd, if (ret) dev_err(imx283->dev, "Failed to stop stream\n"); - pm_runtime_mark_last_busy(imx283->dev); pm_runtime_put_autosuspend(imx283->dev); return ret; @@ -1558,7 +1556,6 @@ static int imx283_probe(struct i2c_client *client) * Decrease the PM usage count. The device will get suspended after the * autosuspend delay, turning the power off. */ - pm_runtime_mark_last_busy(imx283->dev); pm_runtime_put_autosuspend(imx283->dev); return 0; diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c index 4f3f386c5353..ec172556612e 100644 --- a/drivers/media/i2c/imx290.c +++ b/drivers/media/i2c/imx290.c @@ -869,7 +869,6 @@ static int imx290_set_ctrl(struct v4l2_ctrl *ctrl) break; } - pm_runtime_mark_last_busy(imx290->dev); pm_runtime_put_autosuspend(imx290->dev); return ret; @@ -1099,7 +1098,6 @@ static int imx290_set_stream(struct v4l2_subdev *sd, int enable) } } else { imx290_stop_streaming(imx290); - pm_runtime_mark_last_busy(imx290->dev); pm_runtime_put_autosuspend(imx290->dev); } @@ -1294,7 +1292,6 @@ static int imx290_subdev_init(struct imx290 *imx290) * will already be prevented even before the delay. */ v4l2_i2c_subdev_init(&imx290->sd, client, &imx290_subdev_ops); - pm_runtime_mark_last_busy(imx290->dev); pm_runtime_put_autosuspend(imx290->dev); imx290->sd.internal_ops = &imx290_internal_ops; diff --git a/drivers/media/i2c/imx296.c b/drivers/media/i2c/imx296.c index f3bec16b527c..61116f4e3f76 100644 --- a/drivers/media/i2c/imx296.c +++ b/drivers/media/i2c/imx296.c @@ -604,7 +604,6 @@ static int imx296_s_stream(struct v4l2_subdev *sd, int enable) if (!enable) { ret = imx296_stream_off(sensor); - pm_runtime_mark_last_busy(sensor->dev); pm_runtime_put_autosuspend(sensor->dev); goto unlock; diff --git a/drivers/media/i2c/imx415.c b/drivers/media/i2c/imx415.c index 278e743646ea..276bf4d6f39d 100644 --- a/drivers/media/i2c/imx415.c +++ b/drivers/media/i2c/imx415.c @@ -952,7 +952,6 @@ static int imx415_s_stream(struct v4l2_subdev *sd, int enable) if (!enable) { ret = imx415_stream_off(sensor); - pm_runtime_mark_last_busy(sensor->dev); pm_runtime_put_autosuspend(sensor->dev); goto unlock; diff --git a/drivers/media/i2c/mt9m114.c b/drivers/media/i2c/mt9m114.c index 3f540ca40f3c..aa3fd6c6c76c 100644 --- a/drivers/media/i2c/mt9m114.c +++ b/drivers/media/i2c/mt9m114.c @@ -974,7 +974,6 @@ static int mt9m114_start_streaming(struct mt9m114 *sensor, return 0; error: - pm_runtime_mark_last_busy(&sensor->client->dev); pm_runtime_put_autosuspend(&sensor->client->dev); return ret; @@ -988,7 +987,6 @@ static int mt9m114_stop_streaming(struct mt9m114 *sensor) ret = mt9m114_set_state(sensor, MT9M114_SYS_STATE_ENTER_SUSPEND); - pm_runtime_mark_last_busy(&sensor->client->dev); pm_runtime_put_autosuspend(&sensor->client->dev); return ret; @@ -1046,7 +1044,6 @@ static int mt9m114_pa_g_ctrl(struct v4l2_ctrl *ctrl) break; } - pm_runtime_mark_last_busy(&sensor->client->dev); pm_runtime_put_autosuspend(&sensor->client->dev); return ret; @@ -1113,7 +1110,6 @@ static int mt9m114_pa_s_ctrl(struct v4l2_ctrl *ctrl) break; } - pm_runtime_mark_last_busy(&sensor->client->dev); pm_runtime_put_autosuspend(&sensor->client->dev); return ret; @@ -1565,7 +1561,6 @@ static int mt9m114_ifp_s_ctrl(struct v4l2_ctrl *ctrl) break; } - pm_runtime_mark_last_busy(&sensor->client->dev); pm_runtime_put_autosuspend(&sensor->client->dev); return ret; @@ -2472,7 +2467,6 @@ static int mt9m114_probe(struct i2c_client *client) * Decrease the PM usage count. The device will get suspended after the * autosuspend delay, turning the power off. */ - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return 0; diff --git a/drivers/media/i2c/ov4689.c b/drivers/media/i2c/ov4689.c index 1c3a449f9354..7d740ad3926f 100644 --- a/drivers/media/i2c/ov4689.c +++ b/drivers/media/i2c/ov4689.c @@ -497,7 +497,6 @@ static int ov4689_s_stream(struct v4l2_subdev *sd, int on) } else { cci_write(ov4689->regmap, OV4689_REG_CTRL_MODE, OV4689_MODE_SW_STANDBY, NULL); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); } @@ -702,7 +701,6 @@ static int ov4689_set_ctrl(struct v4l2_ctrl *ctrl) break; } - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return ret; @@ -999,7 +997,6 @@ static int ov4689_probe(struct i2c_client *client) goto err_clean_subdev_pm; } - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return 0; diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c index 0dae0438aa80..84198613381d 100644 --- a/drivers/media/i2c/ov5640.c +++ b/drivers/media/i2c/ov5640.c @@ -3341,7 +3341,6 @@ static int ov5640_g_volatile_ctrl(struct v4l2_ctrl *ctrl) break; } - pm_runtime_mark_last_busy(&sensor->i2c_client->dev); pm_runtime_put_autosuspend(&sensor->i2c_client->dev); return 0; @@ -3417,7 +3416,6 @@ static int ov5640_s_ctrl(struct v4l2_ctrl *ctrl) break; } - pm_runtime_mark_last_busy(&sensor->i2c_client->dev); pm_runtime_put_autosuspend(&sensor->i2c_client->dev); return ret; @@ -3754,7 +3752,6 @@ out: mutex_unlock(&sensor->lock); if (!enable || ret) { - pm_runtime_mark_last_busy(&sensor->i2c_client->dev); pm_runtime_put_autosuspend(&sensor->i2c_client->dev); } @@ -3965,7 +3962,6 @@ static int ov5640_probe(struct i2c_client *client) pm_runtime_set_autosuspend_delay(dev, 1000); pm_runtime_use_autosuspend(dev); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return 0; diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c index 004d0ee5c3f5..58c846a44376 100644 --- a/drivers/media/i2c/ov5645.c +++ b/drivers/media/i2c/ov5645.c @@ -808,7 +808,6 @@ static int ov5645_s_ctrl(struct v4l2_ctrl *ctrl) break; } - pm_runtime_mark_last_busy(ov5645->dev); pm_runtime_put_autosuspend(ov5645->dev); return ret; @@ -979,7 +978,6 @@ static int ov5645_disable_streams(struct v4l2_subdev *sd, OV5645_SYSTEM_CTRL0_STOP); rpm_put: - pm_runtime_mark_last_busy(ov5645->dev); pm_runtime_put_autosuspend(ov5645->dev); return ret; @@ -1196,7 +1194,6 @@ static int ov5645_probe(struct i2c_client *client) pm_runtime_set_autosuspend_delay(dev, 1000); pm_runtime_use_autosuspend(dev); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return 0; diff --git a/drivers/media/i2c/ov64a40.c b/drivers/media/i2c/ov64a40.c index a5da4fe47e0b..2031cbd05c26 100644 --- a/drivers/media/i2c/ov64a40.c +++ b/drivers/media/i2c/ov64a40.c @@ -2990,7 +2990,6 @@ static int ov64a40_start_streaming(struct ov64a40 *ov64a40, return 0; error_power_off: - pm_runtime_mark_last_busy(ov64a40->dev); pm_runtime_put_autosuspend(ov64a40->dev); return ret; @@ -3000,7 +2999,6 @@ static int ov64a40_stop_streaming(struct ov64a40 *ov64a40, struct v4l2_subdev_state *state) { cci_update_bits(ov64a40->cci, OV64A40_REG_SMIA, BIT(0), 0, NULL); - pm_runtime_mark_last_busy(ov64a40->dev); pm_runtime_put_autosuspend(ov64a40->dev); __v4l2_ctrl_grab(ov64a40->link_freq, false); @@ -3329,10 +3327,8 @@ static int ov64a40_set_ctrl(struct v4l2_ctrl *ctrl) break; } - if (pm_status > 0) { - pm_runtime_mark_last_busy(ov64a40->dev); + if (pm_status > 0) pm_runtime_put_autosuspend(ov64a40->dev); - } return ret; } @@ -3622,7 +3618,6 @@ static int ov64a40_probe(struct i2c_client *client) goto error_subdev_cleanup; } - pm_runtime_mark_last_busy(&client->dev); pm_runtime_put_autosuspend(&client->dev); return 0; diff --git a/drivers/media/i2c/ov8858.c b/drivers/media/i2c/ov8858.c index 95f9ae794846..6b7193eaea1f 100644 --- a/drivers/media/i2c/ov8858.c +++ b/drivers/media/i2c/ov8858.c @@ -1391,7 +1391,6 @@ static int ov8858_s_stream(struct v4l2_subdev *sd, int on) } } else { ov8858_stop_stream(ov8858); - pm_runtime_mark_last_busy(&client->dev); pm_runtime_put_autosuspend(&client->dev); } @@ -1945,7 +1944,6 @@ static int ov8858_probe(struct i2c_client *client) goto err_power_off; } - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return 0; diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c index f4568e87f018..41ae25b0911f 100644 --- a/drivers/media/i2c/st-mipid02.c +++ b/drivers/media/i2c/st-mipid02.c @@ -465,7 +465,6 @@ static int mipid02_disable_streams(struct v4l2_subdev *sd, if (ret) goto error; - pm_runtime_mark_last_busy(&client->dev); pm_runtime_put_autosuspend(&client->dev); error: @@ -542,7 +541,6 @@ error: cci_write(bridge->regmap, MIPID02_DATA_LANE0_REG1, 0, &ret); cci_write(bridge->regmap, MIPID02_DATA_LANE1_REG1, 0, &ret); - pm_runtime_mark_last_busy(&client->dev); pm_runtime_put_autosuspend(&client->dev); return ret; } diff --git a/drivers/media/i2c/tc358746.c b/drivers/media/i2c/tc358746.c index 143aa1359aba..bcfc274cf891 100644 --- a/drivers/media/i2c/tc358746.c +++ b/drivers/media/i2c/tc358746.c @@ -816,7 +816,6 @@ static int tc358746_s_stream(struct v4l2_subdev *sd, int enable) return 0; err_out: - pm_runtime_mark_last_busy(sd->dev); pm_runtime_put_sync_autosuspend(sd->dev); return err; @@ -838,7 +837,6 @@ err_out: if (err) return err; - pm_runtime_mark_last_busy(sd->dev); pm_runtime_put_sync_autosuspend(sd->dev); return v4l2_subdev_call(src, video, s_stream, 0); @@ -1016,7 +1014,6 @@ tc358746_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) err = tc358746_read(tc358746, reg->reg, &val); reg->val = val; - pm_runtime_mark_last_busy(sd->dev); pm_runtime_put_sync_autosuspend(sd->dev); return err; @@ -1032,7 +1029,6 @@ tc358746_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg) tc358746_write(tc358746, (u32)reg->reg, (u32)reg->val); - pm_runtime_mark_last_busy(sd->dev); pm_runtime_put_sync_autosuspend(sd->dev); return 0; @@ -1395,7 +1391,6 @@ static int tc358746_init_hw(struct tc358746 *tc358746) } err = tc358746_read(tc358746, CHIPID_REG, &val); - pm_runtime_mark_last_busy(dev); pm_runtime_put_sync_autosuspend(dev); if (err) return -ENODEV; diff --git a/drivers/media/i2c/thp7312.c b/drivers/media/i2c/thp7312.c index 8852c56431fe..775cfba188d8 100644 --- a/drivers/media/i2c/thp7312.c +++ b/drivers/media/i2c/thp7312.c @@ -808,7 +808,6 @@ static int thp7312_s_stream(struct v4l2_subdev *sd, int enable) if (!enable) { thp7312_stream_enable(thp7312, false); - pm_runtime_mark_last_busy(thp7312->dev); pm_runtime_put_autosuspend(thp7312->dev); v4l2_subdev_unlock_state(sd_state); @@ -839,7 +838,6 @@ static int thp7312_s_stream(struct v4l2_subdev *sd, int enable) goto finish_unlock; finish_pm: - pm_runtime_mark_last_busy(thp7312->dev); pm_runtime_put_autosuspend(thp7312->dev); finish_unlock: v4l2_subdev_unlock_state(sd_state); @@ -1147,7 +1145,6 @@ static int thp7312_s_ctrl(struct v4l2_ctrl *ctrl) break; } - pm_runtime_mark_last_busy(thp7312->dev); pm_runtime_put_autosuspend(thp7312->dev); return ret; @@ -2183,7 +2180,6 @@ static int thp7312_probe(struct i2c_client *client) * Decrease the PM usage count. The device will get suspended after the * autosuspend delay, turning the power off. */ - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); dev_info(dev, "THP7312 firmware version %02u.%02u\n", diff --git a/drivers/media/i2c/vd55g1.c b/drivers/media/i2c/vd55g1.c index c0754fd03b1d..7c39183dd44b 100644 --- a/drivers/media/i2c/vd55g1.c +++ b/drivers/media/i2c/vd55g1.c @@ -1104,7 +1104,6 @@ static int vd55g1_disable_streams(struct v4l2_subdev *sd, vd55g1_grab_ctrls(sensor, false); - pm_runtime_mark_last_busy(sensor->dev); pm_runtime_put_autosuspend(sensor->dev); return ret; @@ -1338,7 +1337,6 @@ static int vd55g1_g_volatile_ctrl(struct v4l2_ctrl *ctrl) break; } - pm_runtime_mark_last_busy(sensor->dev); pm_runtime_put_autosuspend(sensor->dev); return ret; @@ -1433,7 +1431,6 @@ static int vd55g1_s_ctrl(struct v4l2_ctrl *ctrl) break; } - pm_runtime_mark_last_busy(sensor->dev); pm_runtime_put_autosuspend(sensor->dev); return ret; @@ -1895,7 +1892,6 @@ static int vd55g1_probe(struct i2c_client *client) pm_runtime_enable(dev); pm_runtime_set_autosuspend_delay(dev, 4000); pm_runtime_use_autosuspend(dev); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); ret = vd55g1_subdev_init(sensor); diff --git a/drivers/media/i2c/vd56g3.c b/drivers/media/i2c/vd56g3.c index 5d951ad0b478..d66e21ba4498 100644 --- a/drivers/media/i2c/vd56g3.c +++ b/drivers/media/i2c/vd56g3.c @@ -493,7 +493,6 @@ static int vd56g3_g_volatile_ctrl(struct v4l2_ctrl *ctrl) break; } - pm_runtime_mark_last_busy(sensor->dev); pm_runtime_put_autosuspend(sensor->dev); return ret; @@ -577,7 +576,6 @@ static int vd56g3_s_ctrl(struct v4l2_ctrl *ctrl) break; } - pm_runtime_mark_last_busy(sensor->dev); pm_runtime_put_autosuspend(sensor->dev); return ret; @@ -1021,7 +1019,6 @@ static int vd56g3_disable_streams(struct v4l2_subdev *sd, __v4l2_ctrl_grab(sensor->vflip_ctrl, false); __v4l2_ctrl_grab(sensor->patgen_ctrl, false); - pm_runtime_mark_last_busy(sensor->dev); pm_runtime_put_autosuspend(sensor->dev); return ret; @@ -1527,7 +1524,6 @@ static int vd56g3_probe(struct i2c_client *client) } /* Sensor could now be powered off (after the autosuspend delay) */ - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); dev_dbg(dev, "Successfully probe %s sensor\n", diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c index 0dd991d70d53..1eee2d4f5b40 100644 --- a/drivers/media/i2c/video-i2c.c +++ b/drivers/media/i2c/video-i2c.c @@ -288,7 +288,6 @@ static int amg88xx_read(struct device *dev, enum hwmon_sensor_types type, return tmp; tmp = regmap_bulk_read(data->regmap, AMG88XX_REG_TTHL, &buf, 2); - pm_runtime_mark_last_busy(regmap_get_device(data->regmap)); pm_runtime_put_autosuspend(regmap_get_device(data->regmap)); if (tmp) return tmp; @@ -527,7 +526,6 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count) return 0; error_rpm_put: - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); error_del_list: video_i2c_del_list(vq, VB2_BUF_STATE_QUEUED); @@ -544,7 +542,6 @@ static void stop_streaming(struct vb2_queue *vq) kthread_stop(data->kthread_vid_cap); data->kthread_vid_cap = NULL; - pm_runtime_mark_last_busy(regmap_get_device(data->regmap)); pm_runtime_put_autosuspend(regmap_get_device(data->regmap)); video_i2c_del_list(vq, VB2_BUF_STATE_ERROR); @@ -853,7 +850,6 @@ static int video_i2c_probe(struct i2c_client *client) if (ret < 0) goto error_pm_disable; - pm_runtime_mark_last_busy(&client->dev); pm_runtime_put_autosuspend(&client->dev); return 0; diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c b/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c index fd71f0c43ac3..a9ce032cc5a2 100644 --- a/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c +++ b/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c @@ -451,7 +451,6 @@ static void wave5_vpu_dec_finish_decode(struct vpu_instance *inst) if (q_status.report_queue_count == 0 && (q_status.instance_queue_count == 0 || dec_info.sequence_changed)) { dev_dbg(inst->dev->dev, "%s: finishing job.\n", __func__); - pm_runtime_mark_last_busy(inst->dev->dev); pm_runtime_put_autosuspend(inst->dev->dev); v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx); } @@ -1364,7 +1363,6 @@ static int wave5_vpu_dec_start_streaming(struct vb2_queue *q, unsigned int count } } - pm_runtime_mark_last_busy(inst->dev->dev); pm_runtime_put_autosuspend(inst->dev->dev); return ret; @@ -1498,7 +1496,6 @@ static void wave5_vpu_dec_stop_streaming(struct vb2_queue *q) else streamoff_capture(q); - pm_runtime_mark_last_busy(inst->dev->dev); pm_runtime_put_autosuspend(inst->dev->dev); } @@ -1662,7 +1659,6 @@ static void wave5_vpu_dec_device_run(void *priv) finish_job_and_return: dev_dbg(inst->dev->dev, "%s: leave and finish job", __func__); - pm_runtime_mark_last_busy(inst->dev->dev); pm_runtime_put_autosuspend(inst->dev->dev); v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx); } diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c b/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c index 1e5fc5f8b856..35913a7de834 100644 --- a/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c +++ b/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c @@ -1391,12 +1391,10 @@ static int wave5_vpu_enc_start_streaming(struct vb2_queue *q, unsigned int count if (ret) goto return_buffers; - pm_runtime_mark_last_busy(inst->dev->dev); pm_runtime_put_autosuspend(inst->dev->dev); return 0; return_buffers: wave5_return_bufs(q, VB2_BUF_STATE_QUEUED); - pm_runtime_mark_last_busy(inst->dev->dev); pm_runtime_put_autosuspend(inst->dev->dev); return ret; } @@ -1465,7 +1463,6 @@ static void wave5_vpu_enc_stop_streaming(struct vb2_queue *q) else streamoff_capture(inst, q); - pm_runtime_mark_last_busy(inst->dev->dev); pm_runtime_put_autosuspend(inst->dev->dev); } @@ -1520,7 +1517,6 @@ static void wave5_vpu_enc_device_run(void *priv) break; } dev_dbg(inst->dev->dev, "%s: leave with active job", __func__); - pm_runtime_mark_last_busy(inst->dev->dev); pm_runtime_put_autosuspend(inst->dev->dev); return; default: @@ -1529,7 +1525,6 @@ static void wave5_vpu_enc_device_run(void *priv) break; } dev_dbg(inst->dev->dev, "%s: leave and finish job", __func__); - pm_runtime_mark_last_busy(inst->dev->dev); pm_runtime_put_autosuspend(inst->dev->dev); v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx); } diff --git a/drivers/media/platform/nvidia/tegra-vde/h264.c b/drivers/media/platform/nvidia/tegra-vde/h264.c index 0e56a4331b0d..45f8f6904867 100644 --- a/drivers/media/platform/nvidia/tegra-vde/h264.c +++ b/drivers/media/platform/nvidia/tegra-vde/h264.c @@ -585,7 +585,6 @@ static int tegra_vde_decode_begin(struct tegra_vde *vde, return 0; put_runtime_pm: - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); unlock: @@ -612,7 +611,6 @@ static void tegra_vde_decode_abort(struct tegra_vde *vde) if (err) dev_err(dev, "DEC end: Failed to assert HW reset: %d\n", err); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); mutex_unlock(&vde->lock); diff --git a/drivers/media/platform/qcom/iris/iris_hfi_queue.c b/drivers/media/platform/qcom/iris/iris_hfi_queue.c index 221dcd09e1e1..b3ed06297953 100644 --- a/drivers/media/platform/qcom/iris/iris_hfi_queue.c +++ b/drivers/media/platform/qcom/iris/iris_hfi_queue.c @@ -142,7 +142,6 @@ int iris_hfi_queue_cmd_write(struct iris_core *core, void *pkt, u32 pkt_size) } mutex_unlock(&core->lock); - pm_runtime_mark_last_busy(core->dev); pm_runtime_put_autosuspend(core->dev); return 0; diff --git a/drivers/media/platform/raspberrypi/pisp_be/pisp_be.c b/drivers/media/platform/raspberrypi/pisp_be/pisp_be.c index b30891718d8d..d60d92d2ffa1 100644 --- a/drivers/media/platform/raspberrypi/pisp_be/pisp_be.c +++ b/drivers/media/platform/raspberrypi/pisp_be/pisp_be.c @@ -950,7 +950,6 @@ static void pispbe_node_stop_streaming(struct vb2_queue *q) kfree(job); } - pm_runtime_mark_last_busy(pispbe->dev); pm_runtime_put_autosuspend(pispbe->dev); dev_dbg(pispbe->dev, "Nodes streaming now 0x%x\n", @@ -1742,7 +1741,6 @@ static int pispbe_probe(struct platform_device *pdev) if (ret) goto disable_devs_err; - pm_runtime_mark_last_busy(pispbe->dev); pm_runtime_put_autosuspend(pispbe->dev); return 0; diff --git a/drivers/media/platform/rockchip/rkvdec/rkvdec.c b/drivers/media/platform/rockchip/rkvdec/rkvdec.c index d707088ec0dc..d3b31f461194 100644 --- a/drivers/media/platform/rockchip/rkvdec/rkvdec.c +++ b/drivers/media/platform/rockchip/rkvdec/rkvdec.c @@ -765,7 +765,6 @@ static void rkvdec_job_finish(struct rkvdec_ctx *ctx, { struct rkvdec_dev *rkvdec = ctx->dev; - pm_runtime_mark_last_busy(rkvdec->dev); pm_runtime_put_autosuspend(rkvdec->dev); rkvdec_job_finish_no_pm(ctx, result); } @@ -1159,13 +1158,6 @@ static int rkvdec_probe(struct platform_device *pdev) return ret; } - if (iommu_get_domain_for_dev(&pdev->dev)) { - rkvdec->empty_domain = iommu_paging_domain_alloc(rkvdec->dev); - - if (!rkvdec->empty_domain) - dev_warn(rkvdec->dev, "cannot alloc new empty domain\n"); - } - vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32)); irq = platform_get_irq(pdev, 0); @@ -1188,6 +1180,15 @@ static int rkvdec_probe(struct platform_device *pdev) if (ret) goto err_disable_runtime_pm; + if (iommu_get_domain_for_dev(&pdev->dev)) { + rkvdec->empty_domain = iommu_paging_domain_alloc(rkvdec->dev); + + if (IS_ERR(rkvdec->empty_domain)) { + rkvdec->empty_domain = NULL; + dev_warn(rkvdec->dev, "cannot alloc new empty domain\n"); + } + } + return 0; err_disable_runtime_pm: diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c index 8542238e0fb1..fa972effd4a2 100644 --- a/drivers/media/platform/verisilicon/hantro_drv.c +++ b/drivers/media/platform/verisilicon/hantro_drv.c @@ -89,7 +89,6 @@ static void hantro_job_finish(struct hantro_dev *vpu, struct hantro_ctx *ctx, enum vb2_buffer_state result) { - pm_runtime_mark_last_busy(vpu->dev); pm_runtime_put_autosuspend(vpu->dev); clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks); diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c index bf6d8fa983bf..a6418ef782bc 100644 --- a/drivers/media/rc/gpio-ir-recv.c +++ b/drivers/media/rc/gpio-ir-recv.c @@ -48,10 +48,8 @@ static irqreturn_t gpio_ir_recv_irq(int irq, void *dev_id) if (val >= 0) ir_raw_event_store_edge(gpio_dev->rcdev, val == 1); - if (pmdev) { - pm_runtime_mark_last_busy(pmdev); + if (pmdev) pm_runtime_put_autosuspend(pmdev); - } return IRQ_HANDLED; } diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c index 7f3f47db4c98..e4275f8ee5db 100644 --- a/drivers/memstick/core/memstick.c +++ b/drivers/memstick/core/memstick.c @@ -555,7 +555,6 @@ EXPORT_SYMBOL(memstick_add_host); */ void memstick_remove_host(struct memstick_host *host) { - host->removing = 1; flush_workqueue(workqueue); mutex_lock(&host->lock); if (host->card) diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c index 3878136227e4..5b5e9354fb2e 100644 --- a/drivers/memstick/host/rtsx_usb_ms.c +++ b/drivers/memstick/host/rtsx_usb_ms.c @@ -812,6 +812,7 @@ static void rtsx_usb_ms_drv_remove(struct platform_device *pdev) int err; host->eject = true; + msh->removing = true; cancel_work_sync(&host->handle_req); cancel_delayed_work_sync(&host->poll_card); diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c index 42878474e56e..60dbc815e501 100644 --- a/drivers/mmc/host/sdhci-of-arasan.c +++ b/drivers/mmc/host/sdhci-of-arasan.c @@ -99,6 +99,9 @@ #define HIWORD_UPDATE(val, mask, shift) \ ((val) << (shift) | (mask) << ((shift) + 16)) +#define CD_STABLE_TIMEOUT_US 1000000 +#define CD_STABLE_MAX_SLEEP_US 10 + /** * struct sdhci_arasan_soc_ctl_field - Field used in sdhci_arasan_soc_ctl_map * @@ -206,12 +209,15 @@ struct sdhci_arasan_data { * 19MHz instead */ #define SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN BIT(2) +/* Enable CD stable check before power-up */ +#define SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE BIT(3) }; struct sdhci_arasan_of_data { const struct sdhci_arasan_soc_ctl_map *soc_ctl_map; const struct sdhci_pltfm_data *pdata; const struct sdhci_arasan_clk_ops *clk_ops; + u32 quirks; }; static const struct sdhci_arasan_soc_ctl_map rk3399_soc_ctl_map = { @@ -514,6 +520,24 @@ static int sdhci_arasan_voltage_switch(struct mmc_host *mmc, return -EINVAL; } +static void sdhci_arasan_set_power_and_bus_voltage(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host); + u32 reg; + + /* + * Ensure that the card detect logic has stabilized before powering up, this is + * necessary after a host controller reset. + */ + if (mode == MMC_POWER_UP && sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE) + read_poll_timeout(sdhci_readl, reg, reg & SDHCI_CD_STABLE, CD_STABLE_MAX_SLEEP_US, + CD_STABLE_TIMEOUT_US, false, host, SDHCI_PRESENT_STATE); + + sdhci_set_power_and_bus_voltage(host, mode, vdd); +} + static const struct sdhci_ops sdhci_arasan_ops = { .set_clock = sdhci_arasan_set_clock, .get_max_clock = sdhci_pltfm_clk_get_max_clock, @@ -521,7 +545,7 @@ static const struct sdhci_ops sdhci_arasan_ops = { .set_bus_width = sdhci_set_bus_width, .reset = sdhci_arasan_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, - .set_power = sdhci_set_power_and_bus_voltage, + .set_power = sdhci_arasan_set_power_and_bus_voltage, .hw_reset = sdhci_arasan_hw_reset, }; @@ -570,7 +594,7 @@ static const struct sdhci_ops sdhci_arasan_cqe_ops = { .set_bus_width = sdhci_set_bus_width, .reset = sdhci_arasan_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, - .set_power = sdhci_set_power_and_bus_voltage, + .set_power = sdhci_arasan_set_power_and_bus_voltage, .irq = sdhci_arasan_cqhci_irq, }; @@ -1447,6 +1471,7 @@ static const struct sdhci_arasan_clk_ops zynqmp_clk_ops = { static struct sdhci_arasan_of_data sdhci_arasan_zynqmp_data = { .pdata = &sdhci_arasan_zynqmp_pdata, .clk_ops = &zynqmp_clk_ops, + .quirks = SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE, }; static const struct sdhci_arasan_clk_ops versal_clk_ops = { @@ -1457,6 +1482,7 @@ static const struct sdhci_arasan_clk_ops versal_clk_ops = { static struct sdhci_arasan_of_data sdhci_arasan_versal_data = { .pdata = &sdhci_arasan_zynqmp_pdata, .clk_ops = &versal_clk_ops, + .quirks = SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE, }; static const struct sdhci_arasan_clk_ops versal_net_clk_ops = { @@ -1467,6 +1493,7 @@ static const struct sdhci_arasan_clk_ops versal_net_clk_ops = { static struct sdhci_arasan_of_data sdhci_arasan_versal_net_data = { .pdata = &sdhci_arasan_versal_net_pdata, .clk_ops = &versal_net_clk_ops, + .quirks = SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE, }; static struct sdhci_arasan_of_data intel_keembay_emmc_data = { @@ -1937,6 +1964,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev) if (of_device_is_compatible(np, "rockchip,rk3399-sdhci-5.1")) sdhci_arasan_update_clockmultiplier(host, 0x0); + sdhci_arasan->quirks |= data->quirks; + if (of_device_is_compatible(np, "intel,keembay-sdhci-5.1-emmc") || of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sd") || of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sdio")) { diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c index 4c2ae71770f7..3a1de477e9af 100644 --- a/drivers/mmc/host/sdhci-pci-gli.c +++ b/drivers/mmc/host/sdhci-pci-gli.c @@ -287,6 +287,20 @@ #define GLI_MAX_TUNING_LOOP 40 /* Genesys Logic chipset */ +static void sdhci_gli_mask_replay_timer_timeout(struct pci_dev *pdev) +{ + int aer; + u32 value; + + /* mask the replay timer timeout of AER */ + aer = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); + if (aer) { + pci_read_config_dword(pdev, aer + PCI_ERR_COR_MASK, &value); + value |= PCI_ERR_COR_REP_TIMER; + pci_write_config_dword(pdev, aer + PCI_ERR_COR_MASK, value); + } +} + static inline void gl9750_wt_on(struct sdhci_host *host) { u32 wt_value; @@ -607,7 +621,6 @@ static void gl9750_hw_setting(struct sdhci_host *host) { struct sdhci_pci_slot *slot = sdhci_priv(host); struct pci_dev *pdev; - int aer; u32 value; pdev = slot->chip->pdev; @@ -626,12 +639,7 @@ static void gl9750_hw_setting(struct sdhci_host *host) pci_set_power_state(pdev, PCI_D0); /* mask the replay timer timeout of AER */ - aer = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); - if (aer) { - pci_read_config_dword(pdev, aer + PCI_ERR_COR_MASK, &value); - value |= PCI_ERR_COR_REP_TIMER; - pci_write_config_dword(pdev, aer + PCI_ERR_COR_MASK, value); - } + sdhci_gli_mask_replay_timer_timeout(pdev); gl9750_wt_off(host); } @@ -806,7 +814,6 @@ static void sdhci_gl9755_set_clock(struct sdhci_host *host, unsigned int clock) static void gl9755_hw_setting(struct sdhci_pci_slot *slot) { struct pci_dev *pdev = slot->chip->pdev; - int aer; u32 value; gl9755_wt_on(pdev); @@ -841,12 +848,7 @@ static void gl9755_hw_setting(struct sdhci_pci_slot *slot) pci_set_power_state(pdev, PCI_D0); /* mask the replay timer timeout of AER */ - aer = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); - if (aer) { - pci_read_config_dword(pdev, aer + PCI_ERR_COR_MASK, &value); - value |= PCI_ERR_COR_REP_TIMER; - pci_write_config_dword(pdev, aer + PCI_ERR_COR_MASK, value); - } + sdhci_gli_mask_replay_timer_timeout(pdev); gl9755_wt_off(pdev); } @@ -1751,7 +1753,7 @@ cleanup: return ret; } -static void gli_set_gl9763e(struct sdhci_pci_slot *slot) +static void gl9763e_hw_setting(struct sdhci_pci_slot *slot) { struct pci_dev *pdev = slot->chip->pdev; u32 value; @@ -1780,6 +1782,9 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot) value |= FIELD_PREP(GLI_9763E_HS400_RXDLY, GLI_9763E_HS400_RXDLY_5); pci_write_config_dword(pdev, PCIE_GLI_9763E_CLKRXDLY, value); + /* mask the replay timer timeout of AER */ + sdhci_gli_mask_replay_timer_timeout(pdev); + pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value); value &= ~GLI_9763E_VHS_REV; value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R); @@ -1923,7 +1928,7 @@ static int gli_probe_slot_gl9763e(struct sdhci_pci_slot *slot) gli_pcie_enable_msi(slot); host->mmc_host_ops.hs400_enhanced_strobe = gl9763e_hs400_enhanced_strobe; - gli_set_gl9763e(slot); + gl9763e_hw_setting(slot); sdhci_enable_v4_mode(host); return 0; diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index e4fc345be7e5..17e62c61b6e6 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -156,6 +156,7 @@ struct sdhci_am654_data { #define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0) #define SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA BIT(1) +#define SDHCI_AM654_QUIRK_DISABLE_HS400 BIT(2) }; struct window { @@ -765,6 +766,7 @@ static int sdhci_am654_init(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host); + struct device *dev = mmc_dev(host->mmc); u32 ctl_cfg_2 = 0; u32 mask; u32 val; @@ -820,6 +822,12 @@ static int sdhci_am654_init(struct sdhci_host *host) if (ret) goto err_cleanup_host; + if (sdhci_am654->quirks & SDHCI_AM654_QUIRK_DISABLE_HS400 && + host->mmc->caps2 & (MMC_CAP2_HS400 | MMC_CAP2_HS400_ES)) { + dev_info(dev, "HS400 mode not supported on this silicon revision, disabling it\n"); + host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES); + } + ret = __sdhci_add_host(host); if (ret) goto err_cleanup_host; @@ -883,6 +891,12 @@ static int sdhci_am654_get_of_property(struct platform_device *pdev, return 0; } +static const struct soc_device_attribute sdhci_am654_descope_hs400[] = { + { .family = "AM62PX", .revision = "SR1.0" }, + { .family = "AM62PX", .revision = "SR1.1" }, + { /* sentinel */ } +}; + static const struct of_device_id sdhci_am654_of_match[] = { { .compatible = "ti,am654-sdhci-5.1", @@ -970,6 +984,10 @@ static int sdhci_am654_probe(struct platform_device *pdev) if (ret) return dev_err_probe(dev, ret, "parsing dt failed\n"); + soc = soc_device_match(sdhci_am654_descope_hs400); + if (soc) + sdhci_am654->quirks |= SDHCI_AM654_QUIRK_DISABLE_HS400; + host->mmc_host_ops.start_signal_voltage_switch = sdhci_am654_start_signal_voltage_switch; host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning; diff --git a/drivers/most/core.c b/drivers/most/core.c index a635d5082ebb..da319d108ea1 100644 --- a/drivers/most/core.c +++ b/drivers/most/core.c @@ -538,8 +538,8 @@ static struct most_channel *get_channel(char *mdev, char *mdev_ch) dev = bus_find_device_by_name(&mostbus, NULL, mdev); if (!dev) return NULL; - put_device(dev); iface = dev_get_drvdata(dev); + put_device(dev); list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) { if (!strcmp(dev_name(&c->dev), mdev_ch)) return c; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index c8ce9a326138..f6655ad9dcb0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -8022,7 +8022,8 @@ static int __bnxt_reserve_rings(struct bnxt *bp) } rx_rings = min_t(int, rx_rings, hwr.grp); hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings); - if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp)) + if (bnxt_ulp_registered(bp->edev) && + hwr.stat > bnxt_get_ulp_stat_ctxs(bp)) hwr.stat -= bnxt_get_ulp_stat_ctxs(bp); hwr.cp = min_t(int, hwr.cp, hwr.stat); rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh); @@ -8030,6 +8031,11 @@ static int __bnxt_reserve_rings(struct bnxt *bp) hwr.rx = rx_rings << 1; tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx); hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings; + if (hwr.tx != bp->tx_nr_rings) { + netdev_warn(bp->dev, + "Able to reserve only %d out of %d requested TX rings\n", + hwr.tx, bp->tx_nr_rings); + } bp->tx_nr_rings = hwr.tx; /* If we cannot reserve all the RX rings, reset the RSS map only @@ -12857,6 +12863,17 @@ static int bnxt_set_xps_mapping(struct bnxt *bp) return rc; } +static int bnxt_tx_nr_rings(struct bnxt *bp) +{ + return bp->num_tc ? bp->tx_nr_rings_per_tc * bp->num_tc : + bp->tx_nr_rings_per_tc; +} + +static int bnxt_tx_nr_rings_per_tc(struct bnxt *bp) +{ + return bp->num_tc ? bp->tx_nr_rings / bp->num_tc : bp->tx_nr_rings; +} + static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) { int rc = 0; @@ -12874,6 +12891,13 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) if (rc) return rc; + /* Make adjustments if reserved TX rings are less than requested */ + bp->tx_nr_rings -= bp->tx_nr_rings_xdp; + bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp); + if (bp->tx_nr_rings_xdp) { + bp->tx_nr_rings_xdp = bp->tx_nr_rings_per_tc; + bp->tx_nr_rings += bp->tx_nr_rings_xdp; + } rc = bnxt_alloc_mem(bp, irq_re_init); if (rc) { netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); @@ -16329,7 +16353,7 @@ static void bnxt_trim_dflt_sh_rings(struct bnxt *bp) bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings); bp->rx_nr_rings = bp->cp_nr_rings; bp->tx_nr_rings_per_tc = bp->cp_nr_rings; - bp->tx_nr_rings = bp->tx_nr_rings_per_tc; + bp->tx_nr_rings = bnxt_tx_nr_rings(bp); } static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) @@ -16361,7 +16385,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) bnxt_trim_dflt_sh_rings(bp); else bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings; - bp->tx_nr_rings = bp->tx_nr_rings_per_tc; + bp->tx_nr_rings = bnxt_tx_nr_rings(bp); avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings; if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) { @@ -16374,7 +16398,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) rc = __bnxt_reserve_rings(bp); if (rc && rc != -ENODEV) netdev_warn(bp->dev, "Unable to reserve tx rings\n"); - bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp); if (sh) bnxt_trim_dflt_sh_rings(bp); @@ -16383,7 +16407,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) rc = __bnxt_reserve_rings(bp); if (rc && rc != -ENODEV) netdev_warn(bp->dev, "2nd rings reservation failed.\n"); - bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp); } if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { bp->rx_nr_rings++; @@ -16417,7 +16441,7 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp) if (rc) goto init_dflt_ring_err; - bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp); bnxt_set_dflt_rfs(bp); diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 71d70bd6b256..290d67da704d 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -3091,7 +3091,7 @@ static void gem_update_stats(struct macb *bp) /* Add GEM_OCTTXH, GEM_OCTRXH */ val = bp->macb_reg_readl(bp, offset + 4); bp->ethtool_stats[i] += ((u64)val) << 32; - *(p++) += ((u64)val) << 32; + *p += ((u64)val) << 32; } } @@ -5636,19 +5636,16 @@ static void macb_remove(struct platform_device *pdev) if (dev) { bp = netdev_priv(dev); + unregister_netdev(dev); phy_exit(bp->sgmii_phy); mdiobus_unregister(bp->mii_bus); mdiobus_free(bp->mii_bus); - unregister_netdev(dev); + device_set_wakeup_enable(&bp->pdev->dev, 0); cancel_work_sync(&bp->hresp_err_bh_work); pm_runtime_disable(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); - if (!pm_runtime_suspended(&pdev->dev)) { - macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, - bp->rx_clk, bp->tsu_clk); - pm_runtime_set_suspended(&pdev->dev); - } + pm_runtime_set_suspended(&pdev->dev); phylink_destroy(bp->phylink); free_netdev(dev); } diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index cc60ee454bf9..6bbf6e5584e5 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -1099,7 +1099,7 @@ get_stats (struct net_device *dev) dev->stats.rx_bytes += dr32(OctetRcvOk); dev->stats.tx_bytes += dr32(OctetXmtOk); - dev->stats.multicast = dr32(McstFramesRcvdOk); + dev->stats.multicast += dr32(McstFramesRcvdOk); dev->stats.collisions += dr32(SingleColFrames) + dr32(MultiColFrames); diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index a83fdd22cbe4..e952d67388bf 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -511,6 +511,7 @@ enum ice_pf_flags { ICE_FLAG_LINK_LENIENT_MODE_ENA, ICE_FLAG_PLUG_AUX_DEV, ICE_FLAG_UNPLUG_AUX_DEV, + ICE_FLAG_AUX_DEV_CREATED, ICE_FLAG_MTU_CHANGED, ICE_FLAG_GNSS, /* GNSS successfully initialized */ ICE_FLAG_DPLL, /* SyncE/PTP dplls initialized */ diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c index 9e4adc43e474..b53561c34708 100644 --- a/drivers/net/ethernet/intel/ice/ice_adapter.c +++ b/drivers/net/ethernet/intel/ice/ice_adapter.c @@ -13,16 +13,45 @@ static DEFINE_XARRAY(ice_adapters); static DEFINE_MUTEX(ice_adapters_mutex); -static unsigned long ice_adapter_index(u64 dsn) +#define ICE_ADAPTER_FIXED_INDEX BIT_ULL(63) + +#define ICE_ADAPTER_INDEX_E825C \ + (ICE_DEV_ID_E825C_BACKPLANE | ICE_ADAPTER_FIXED_INDEX) + +static u64 ice_adapter_index(struct pci_dev *pdev) { + switch (pdev->device) { + case ICE_DEV_ID_E825C_BACKPLANE: + case ICE_DEV_ID_E825C_QSFP: + case ICE_DEV_ID_E825C_SFP: + case ICE_DEV_ID_E825C_SGMII: + /* E825C devices have multiple NACs which are connected to the + * same clock source, and which must share the same + * ice_adapter structure. We can't use the serial number since + * each NAC has its own NVM generated with its own unique + * Device Serial Number. Instead, rely on the embedded nature + * of the E825C devices, and use a fixed index. This relies on + * the fact that all E825C physical functions in a given + * system are part of the same overall device. + */ + return ICE_ADAPTER_INDEX_E825C; + default: + return pci_get_dsn(pdev) & ~ICE_ADAPTER_FIXED_INDEX; + } +} + +static unsigned long ice_adapter_xa_index(struct pci_dev *pdev) +{ + u64 index = ice_adapter_index(pdev); + #if BITS_PER_LONG == 64 - return dsn; + return index; #else - return (u32)dsn ^ (u32)(dsn >> 32); + return (u32)index ^ (u32)(index >> 32); #endif } -static struct ice_adapter *ice_adapter_new(u64 dsn) +static struct ice_adapter *ice_adapter_new(struct pci_dev *pdev) { struct ice_adapter *adapter; @@ -30,7 +59,7 @@ static struct ice_adapter *ice_adapter_new(u64 dsn) if (!adapter) return NULL; - adapter->device_serial_number = dsn; + adapter->index = ice_adapter_index(pdev); spin_lock_init(&adapter->ptp_gltsyn_time_lock); spin_lock_init(&adapter->txq_ctx_lock); refcount_set(&adapter->refcount, 1); @@ -64,24 +93,23 @@ static void ice_adapter_free(struct ice_adapter *adapter) */ struct ice_adapter *ice_adapter_get(struct pci_dev *pdev) { - u64 dsn = pci_get_dsn(pdev); struct ice_adapter *adapter; unsigned long index; int err; - index = ice_adapter_index(dsn); + index = ice_adapter_xa_index(pdev); scoped_guard(mutex, &ice_adapters_mutex) { err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL); if (err == -EBUSY) { adapter = xa_load(&ice_adapters, index); refcount_inc(&adapter->refcount); - WARN_ON_ONCE(adapter->device_serial_number != dsn); + WARN_ON_ONCE(adapter->index != ice_adapter_index(pdev)); return adapter; } if (err) return ERR_PTR(err); - adapter = ice_adapter_new(dsn); + adapter = ice_adapter_new(pdev); if (!adapter) return ERR_PTR(-ENOMEM); xa_store(&ice_adapters, index, adapter, GFP_KERNEL); @@ -100,11 +128,10 @@ struct ice_adapter *ice_adapter_get(struct pci_dev *pdev) */ void ice_adapter_put(struct pci_dev *pdev) { - u64 dsn = pci_get_dsn(pdev); struct ice_adapter *adapter; unsigned long index; - index = ice_adapter_index(dsn); + index = ice_adapter_xa_index(pdev); scoped_guard(mutex, &ice_adapters_mutex) { adapter = xa_load(&ice_adapters, index); if (WARN_ON(!adapter)) diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.h b/drivers/net/ethernet/intel/ice/ice_adapter.h index db66d03c9f96..e95266c7f20b 100644 --- a/drivers/net/ethernet/intel/ice/ice_adapter.h +++ b/drivers/net/ethernet/intel/ice/ice_adapter.h @@ -33,7 +33,7 @@ struct ice_port_list { * @txq_ctx_lock: Spinlock protecting access to the GLCOMM_QTX_CNTX_CTL register * @ctrl_pf: Control PF of the adapter * @ports: Ports list - * @device_serial_number: DSN cached for collision detection on 32bit systems + * @index: 64-bit index cached for collision detection on 32bit systems */ struct ice_adapter { refcount_t refcount; @@ -44,7 +44,7 @@ struct ice_adapter { struct ice_pf *ctrl_pf; struct ice_port_list ports; - u64 device_serial_number; + u64 index; }; struct ice_adapter *ice_adapter_get(struct pci_dev *pdev); diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c index e2a036ce76ca..3b2d9c436979 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.c +++ b/drivers/net/ethernet/intel/ice/ice_ddp.c @@ -2377,7 +2377,13 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size, * The function will apply the new Tx topology from the package buffer * if available. * - * Return: zero when update was successful, negative values otherwise. + * Return: + * * 0 - Successfully applied topology configuration. + * * -EBUSY - Failed to acquire global configuration lock. + * * -EEXIST - Topology configuration has already been applied. + * * -EIO - Unable to apply topology configuration. + * * -ENODEV - Failed to re-initialize device after applying configuration. + * * Other negative error codes indicate unexpected failures. */ int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len) { @@ -2410,7 +2416,7 @@ int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len) if (status) { ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n"); - return status; + return -EIO; } /* Is default topology already applied ? */ @@ -2497,31 +2503,45 @@ update_topo: ICE_GLOBAL_CFG_LOCK_TIMEOUT); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n"); - return status; + return -EBUSY; } /* Check if reset was triggered already. */ reg = rd32(hw, GLGEN_RSTAT); if (reg & GLGEN_RSTAT_DEVSTATE_M) { - /* Reset is in progress, re-init the HW again */ ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. Layer topology might be applied already\n"); ice_check_reset(hw); - return 0; + /* Reset is in progress, re-init the HW again */ + goto reinit_hw; } /* Set new topology */ status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true); if (status) { - ice_debug(hw, ICE_DBG_INIT, "Failed setting Tx topology\n"); - return status; + ice_debug(hw, ICE_DBG_INIT, "Failed to set Tx topology, status %pe\n", + ERR_PTR(status)); + /* only report -EIO here as the caller checks the error value + * and reports an informational error message informing that + * the driver failed to program Tx topology. + */ + status = -EIO; } - /* New topology is updated, delay 1 second before issuing the CORER */ + /* Even if Tx topology config failed, we need to CORE reset here to + * clear the global configuration lock. Delay 1 second to allow + * hardware to settle then issue a CORER + */ msleep(1000); ice_reset(hw, ICE_RESET_CORER); - /* CORER will clear the global lock, so no explicit call - * required for release. - */ + ice_check_reset(hw); + +reinit_hw: + /* Since we triggered a CORER, re-initialize hardware */ + ice_deinit_hw(hw); + if (ice_init_hw(hw)) { + ice_debug(hw, ICE_DBG_INIT, "Failed to re-init hardware after setting Tx topology\n"); + return -ENODEV; + } - return 0; + return status; } diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c index 6ab53e430f91..420d45c2558b 100644 --- a/drivers/net/ethernet/intel/ice/ice_idc.c +++ b/drivers/net/ethernet/intel/ice/ice_idc.c @@ -336,6 +336,7 @@ int ice_plug_aux_dev(struct ice_pf *pf) mutex_lock(&pf->adev_mutex); cdev->adev = adev; mutex_unlock(&pf->adev_mutex); + set_bit(ICE_FLAG_AUX_DEV_CREATED, pf->flags); return 0; } @@ -347,15 +348,16 @@ void ice_unplug_aux_dev(struct ice_pf *pf) { struct auxiliary_device *adev; + if (!test_and_clear_bit(ICE_FLAG_AUX_DEV_CREATED, pf->flags)) + return; + mutex_lock(&pf->adev_mutex); adev = pf->cdev_info->adev; pf->cdev_info->adev = NULL; mutex_unlock(&pf->adev_mutex); - if (adev) { - auxiliary_device_delete(adev); - auxiliary_device_uninit(adev); - } + auxiliary_device_delete(adev); + auxiliary_device_uninit(adev); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 93c6e64ed940..5b1f9ca18c85 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -4536,17 +4536,23 @@ ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware) dev_info(dev, "Tx scheduling layers switching feature disabled\n"); else dev_info(dev, "Tx scheduling layers switching feature enabled\n"); - /* if there was a change in topology ice_cfg_tx_topo triggered - * a CORER and we need to re-init hw + return 0; + } else if (err == -ENODEV) { + /* If we failed to re-initialize the device, we can no longer + * continue loading. */ - ice_deinit_hw(hw); - err = ice_init_hw(hw); - + dev_warn(dev, "Failed to initialize hardware after applying Tx scheduling configuration.\n"); return err; } else if (err == -EIO) { dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n"); + return 0; + } else if (err == -EEXIST) { + return 0; } + /* Do not treat this as a fatal error. */ + dev_info(dev, "Failed to apply Tx scheduling configuration, err %pe\n", + ERR_PTR(err)); return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 29e0088ab6b2..d2871757ec94 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1352,7 +1352,7 @@ construct_skb: skb = ice_construct_skb(rx_ring, xdp); /* exit if we failed to retrieve a buffer */ if (!skb) { - rx_ring->ring_stats->rx_stats.alloc_page_failed++; + rx_ring->ring_stats->rx_stats.alloc_buf_failed++; xdp_verdict = ICE_XDP_CONSUMED; } ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict); diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c index 555879b1248d..b19b462e0bb6 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c @@ -180,6 +180,58 @@ static int idpf_tx_singleq_csum(struct sk_buff *skb, } /** + * idpf_tx_singleq_dma_map_error - handle TX DMA map errors + * @txq: queue to send buffer on + * @skb: send buffer + * @first: original first buffer info buffer for packet + * @idx: starting point on ring to unwind + */ +static void idpf_tx_singleq_dma_map_error(struct idpf_tx_queue *txq, + struct sk_buff *skb, + struct idpf_tx_buf *first, u16 idx) +{ + struct libeth_sq_napi_stats ss = { }; + struct libeth_cq_pp cp = { + .dev = txq->dev, + .ss = &ss, + }; + + u64_stats_update_begin(&txq->stats_sync); + u64_stats_inc(&txq->q_stats.dma_map_errs); + u64_stats_update_end(&txq->stats_sync); + + /* clear dma mappings for failed tx_buf map */ + for (;;) { + struct idpf_tx_buf *tx_buf; + + tx_buf = &txq->tx_buf[idx]; + libeth_tx_complete(tx_buf, &cp); + if (tx_buf == first) + break; + if (idx == 0) + idx = txq->desc_count; + idx--; + } + + if (skb_is_gso(skb)) { + union idpf_tx_flex_desc *tx_desc; + + /* If we failed a DMA mapping for a TSO packet, we will have + * used one additional descriptor for a context + * descriptor. Reset that here. + */ + tx_desc = &txq->flex_tx[idx]; + memset(tx_desc, 0, sizeof(*tx_desc)); + if (idx == 0) + idx = txq->desc_count; + idx--; + } + + /* Update tail in case netdev_xmit_more was previously true */ + idpf_tx_buf_hw_update(txq, idx, false); +} + +/** * idpf_tx_singleq_map - Build the Tx base descriptor * @tx_q: queue to send buffer on * @first: first buffer info buffer to use @@ -219,8 +271,9 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q, for (frag = &skb_shinfo(skb)->frags[0];; frag++) { unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED; - if (dma_mapping_error(tx_q->dev, dma)) - return idpf_tx_dma_map_error(tx_q, skb, first, i); + if (unlikely(dma_mapping_error(tx_q->dev, dma))) + return idpf_tx_singleq_dma_map_error(tx_q, skb, + first, i); /* record length, and DMA address */ dma_unmap_len_set(tx_buf, len, size); @@ -362,11 +415,11 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, { struct idpf_tx_offload_params offload = { }; struct idpf_tx_buf *first; + u32 count, buf_count = 1; int csum, tso, needed; - unsigned int count; __be16 protocol; - count = idpf_tx_desc_count_required(tx_q, skb); + count = idpf_tx_res_count_required(tx_q, skb, &buf_count); if (unlikely(!count)) return idpf_tx_drop_skb(tx_q, skb); diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index b868761fad4d..194f924d2bd6 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -8,12 +8,7 @@ #include "idpf_ptp.h" #include "idpf_virtchnl.h" -struct idpf_tx_stash { - struct hlist_node hlist; - struct libeth_sqe buf; -}; - -#define idpf_tx_buf_compl_tag(buf) (*(u32 *)&(buf)->priv) +#define idpf_tx_buf_next(buf) (*(u32 *)&(buf)->priv) LIBETH_SQE_CHECK_PRIV(u32); /** @@ -40,36 +35,6 @@ static bool idpf_chk_linearize(const struct sk_buff *skb, } /** - * idpf_buf_lifo_push - push a buffer pointer onto stack - * @stack: pointer to stack struct - * @buf: pointer to buf to push - * - * Returns 0 on success, negative on failure - **/ -static int idpf_buf_lifo_push(struct idpf_buf_lifo *stack, - struct idpf_tx_stash *buf) -{ - if (unlikely(stack->top == stack->size)) - return -ENOSPC; - - stack->bufs[stack->top++] = buf; - - return 0; -} - -/** - * idpf_buf_lifo_pop - pop a buffer pointer from stack - * @stack: pointer to stack struct - **/ -static struct idpf_tx_stash *idpf_buf_lifo_pop(struct idpf_buf_lifo *stack) -{ - if (unlikely(!stack->top)) - return NULL; - - return stack->bufs[--stack->top]; -} - -/** * idpf_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure * @txqueue: TX queue @@ -97,52 +62,22 @@ void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue) static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq) { struct libeth_sq_napi_stats ss = { }; - struct idpf_buf_lifo *buf_stack; - struct idpf_tx_stash *stash; struct libeth_cq_pp cp = { .dev = txq->dev, .ss = &ss, }; - struct hlist_node *tmp; - u32 i, tag; + u32 i; /* Buffers already cleared, nothing to do */ if (!txq->tx_buf) return; /* Free all the Tx buffer sk_buffs */ - for (i = 0; i < txq->desc_count; i++) + for (i = 0; i < txq->buf_pool_size; i++) libeth_tx_complete(&txq->tx_buf[i], &cp); kfree(txq->tx_buf); txq->tx_buf = NULL; - - if (!idpf_queue_has(FLOW_SCH_EN, txq)) - return; - - buf_stack = &txq->stash->buf_stack; - if (!buf_stack->bufs) - return; - - /* - * If a Tx timeout occurred, there are potentially still bufs in the - * hash table, free them here. - */ - hash_for_each_safe(txq->stash->sched_buf_hash, tag, tmp, stash, - hlist) { - if (!stash) - continue; - - libeth_tx_complete(&stash->buf, &cp); - hash_del(&stash->hlist); - idpf_buf_lifo_push(buf_stack, stash); - } - - for (i = 0; i < buf_stack->size; i++) - kfree(buf_stack->bufs[i]); - - kfree(buf_stack->bufs); - buf_stack->bufs = NULL; } /** @@ -159,6 +94,9 @@ static void idpf_tx_desc_rel(struct idpf_tx_queue *txq) if (!txq->desc_ring) return; + if (txq->refillq) + kfree(txq->refillq->ring); + dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma); txq->desc_ring = NULL; txq->next_to_use = 0; @@ -215,41 +153,18 @@ static void idpf_tx_desc_rel_all(struct idpf_vport *vport) */ static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q) { - struct idpf_buf_lifo *buf_stack; - int buf_size; - int i; - /* Allocate book keeping buffers only. Buffers to be supplied to HW * are allocated by kernel network stack and received as part of skb */ - buf_size = sizeof(struct idpf_tx_buf) * tx_q->desc_count; - tx_q->tx_buf = kzalloc(buf_size, GFP_KERNEL); + if (idpf_queue_has(FLOW_SCH_EN, tx_q)) + tx_q->buf_pool_size = U16_MAX; + else + tx_q->buf_pool_size = tx_q->desc_count; + tx_q->tx_buf = kcalloc(tx_q->buf_pool_size, sizeof(*tx_q->tx_buf), + GFP_KERNEL); if (!tx_q->tx_buf) return -ENOMEM; - if (!idpf_queue_has(FLOW_SCH_EN, tx_q)) - return 0; - - buf_stack = &tx_q->stash->buf_stack; - - /* Initialize tx buf stack for out-of-order completions if - * flow scheduling offload is enabled - */ - buf_stack->bufs = kcalloc(tx_q->desc_count, sizeof(*buf_stack->bufs), - GFP_KERNEL); - if (!buf_stack->bufs) - return -ENOMEM; - - buf_stack->size = tx_q->desc_count; - buf_stack->top = tx_q->desc_count; - - for (i = 0; i < tx_q->desc_count; i++) { - buf_stack->bufs[i] = kzalloc(sizeof(*buf_stack->bufs[i]), - GFP_KERNEL); - if (!buf_stack->bufs[i]) - return -ENOMEM; - } - return 0; } @@ -264,6 +179,7 @@ static int idpf_tx_desc_alloc(const struct idpf_vport *vport, struct idpf_tx_queue *tx_q) { struct device *dev = tx_q->dev; + struct idpf_sw_queue *refillq; int err; err = idpf_tx_buf_alloc_all(tx_q); @@ -287,6 +203,31 @@ static int idpf_tx_desc_alloc(const struct idpf_vport *vport, tx_q->next_to_clean = 0; idpf_queue_set(GEN_CHK, tx_q); + if (!idpf_queue_has(FLOW_SCH_EN, tx_q)) + return 0; + + refillq = tx_q->refillq; + refillq->desc_count = tx_q->buf_pool_size; + refillq->ring = kcalloc(refillq->desc_count, sizeof(u32), + GFP_KERNEL); + if (!refillq->ring) { + err = -ENOMEM; + goto err_alloc; + } + + for (unsigned int i = 0; i < refillq->desc_count; i++) + refillq->ring[i] = + FIELD_PREP(IDPF_RFL_BI_BUFID_M, i) | + FIELD_PREP(IDPF_RFL_BI_GEN_M, + idpf_queue_has(GEN_CHK, refillq)); + + /* Go ahead and flip the GEN bit since this counts as filling + * up the ring, i.e. we already ring wrapped. + */ + idpf_queue_change(GEN_CHK, refillq); + + tx_q->last_re = tx_q->desc_count - IDPF_TX_SPLITQ_RE_MIN_GAP; + return 0; err_alloc: @@ -337,8 +278,6 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport) for (i = 0; i < vport->num_txq_grp; i++) { for (j = 0; j < vport->txq_grps[i].num_txq; j++) { struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j]; - u8 gen_bits = 0; - u16 bufidx_mask; err = idpf_tx_desc_alloc(vport, txq); if (err) { @@ -347,34 +286,6 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport) i); goto err_out; } - - if (!idpf_is_queue_model_split(vport->txq_model)) - continue; - - txq->compl_tag_cur_gen = 0; - - /* Determine the number of bits in the bufid - * mask and add one to get the start of the - * generation bits - */ - bufidx_mask = txq->desc_count - 1; - while (bufidx_mask >> 1) { - txq->compl_tag_gen_s++; - bufidx_mask = bufidx_mask >> 1; - } - txq->compl_tag_gen_s++; - - gen_bits = IDPF_TX_SPLITQ_COMPL_TAG_WIDTH - - txq->compl_tag_gen_s; - txq->compl_tag_gen_max = GETMAXVAL(gen_bits); - - /* Set bufid mask based on location of first - * gen bit; it cannot simply be the descriptor - * ring size-1 since we can have size values - * where not all of those bits are set. - */ - txq->compl_tag_bufid_m = - GETMAXVAL(txq->compl_tag_gen_s); } if (!idpf_is_queue_model_split(vport->txq_model)) @@ -623,18 +534,18 @@ static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq) } /** - * idpf_rx_post_buf_refill - Post buffer id to refill queue + * idpf_post_buf_refill - Post buffer id to refill queue * @refillq: refill queue to post to * @buf_id: buffer id to post */ -static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id) +static void idpf_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id) { u32 nta = refillq->next_to_use; /* store the buffer ID and the SW maintained GEN bit to the refillq */ refillq->ring[nta] = - FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) | - FIELD_PREP(IDPF_RX_BI_GEN_M, + FIELD_PREP(IDPF_RFL_BI_BUFID_M, buf_id) | + FIELD_PREP(IDPF_RFL_BI_GEN_M, idpf_queue_has(GEN_CHK, refillq)); if (unlikely(++nta == refillq->desc_count)) { @@ -1015,6 +926,11 @@ static void idpf_txq_group_rel(struct idpf_vport *vport) struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; for (j = 0; j < txq_grp->num_txq; j++) { + if (flow_sch_en) { + kfree(txq_grp->txqs[j]->refillq); + txq_grp->txqs[j]->refillq = NULL; + } + kfree(txq_grp->txqs[j]); txq_grp->txqs[j] = NULL; } @@ -1024,9 +940,6 @@ static void idpf_txq_group_rel(struct idpf_vport *vport) kfree(txq_grp->complq); txq_grp->complq = NULL; - - if (flow_sch_en) - kfree(txq_grp->stashes); } kfree(vport->txq_grps); vport->txq_grps = NULL; @@ -1387,7 +1300,6 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) for (i = 0; i < vport->num_txq_grp; i++) { struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; struct idpf_adapter *adapter = vport->adapter; - struct idpf_txq_stash *stashes; int j; tx_qgrp->vport = vport; @@ -1400,15 +1312,6 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) goto err_alloc; } - if (split && flow_sch_en) { - stashes = kcalloc(num_txq, sizeof(*stashes), - GFP_KERNEL); - if (!stashes) - goto err_alloc; - - tx_qgrp->stashes = stashes; - } - for (j = 0; j < tx_qgrp->num_txq; j++) { struct idpf_tx_queue *q = tx_qgrp->txqs[j]; @@ -1428,12 +1331,14 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) if (!flow_sch_en) continue; - if (split) { - q->stash = &stashes[j]; - hash_init(q->stash->sched_buf_hash); - } - idpf_queue_set(FLOW_SCH_EN, q); + + q->refillq = kzalloc(sizeof(*q->refillq), GFP_KERNEL); + if (!q->refillq) + goto err_alloc; + + idpf_queue_set(GEN_CHK, q->refillq); + idpf_queue_set(RFL_GEN_CHK, q->refillq); } if (!split) @@ -1717,87 +1622,6 @@ static void idpf_tx_read_tstamp(struct idpf_tx_queue *txq, struct sk_buff *skb) spin_unlock_bh(&tx_tstamp_caps->status_lock); } -/** - * idpf_tx_clean_stashed_bufs - clean bufs that were stored for - * out of order completions - * @txq: queue to clean - * @compl_tag: completion tag of packet to clean (from completion descriptor) - * @cleaned: pointer to stats struct to track cleaned packets/bytes - * @budget: Used to determine if we are in netpoll - */ -static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq, - u16 compl_tag, - struct libeth_sq_napi_stats *cleaned, - int budget) -{ - struct idpf_tx_stash *stash; - struct hlist_node *tmp_buf; - struct libeth_cq_pp cp = { - .dev = txq->dev, - .ss = cleaned, - .napi = budget, - }; - - /* Buffer completion */ - hash_for_each_possible_safe(txq->stash->sched_buf_hash, stash, tmp_buf, - hlist, compl_tag) { - if (unlikely(idpf_tx_buf_compl_tag(&stash->buf) != compl_tag)) - continue; - - hash_del(&stash->hlist); - - if (stash->buf.type == LIBETH_SQE_SKB && - (skb_shinfo(stash->buf.skb)->tx_flags & SKBTX_IN_PROGRESS)) - idpf_tx_read_tstamp(txq, stash->buf.skb); - - libeth_tx_complete(&stash->buf, &cp); - - /* Push shadow buf back onto stack */ - idpf_buf_lifo_push(&txq->stash->buf_stack, stash); - } -} - -/** - * idpf_stash_flow_sch_buffers - store buffer parameters info to be freed at a - * later time (only relevant for flow scheduling mode) - * @txq: Tx queue to clean - * @tx_buf: buffer to store - */ -static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq, - struct idpf_tx_buf *tx_buf) -{ - struct idpf_tx_stash *stash; - - if (unlikely(tx_buf->type <= LIBETH_SQE_CTX)) - return 0; - - stash = idpf_buf_lifo_pop(&txq->stash->buf_stack); - if (unlikely(!stash)) { - net_err_ratelimited("%s: No out-of-order TX buffers left!\n", - netdev_name(txq->netdev)); - - return -ENOMEM; - } - - /* Store buffer params in shadow buffer */ - stash->buf.skb = tx_buf->skb; - stash->buf.bytes = tx_buf->bytes; - stash->buf.packets = tx_buf->packets; - stash->buf.type = tx_buf->type; - stash->buf.nr_frags = tx_buf->nr_frags; - dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma)); - dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len)); - idpf_tx_buf_compl_tag(&stash->buf) = idpf_tx_buf_compl_tag(tx_buf); - - /* Add buffer to buf_hash table to be freed later */ - hash_add(txq->stash->sched_buf_hash, &stash->hlist, - idpf_tx_buf_compl_tag(&stash->buf)); - - tx_buf->type = LIBETH_SQE_EMPTY; - - return 0; -} - #define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf) \ do { \ if (unlikely(++(ntc) == (txq)->desc_count)) { \ @@ -1825,14 +1649,8 @@ do { \ * Separate packet completion events will be reported on the completion queue, * and the buffers will be cleaned separately. The stats are not updated from * this function when using flow-based scheduling. - * - * Furthermore, in flow scheduling mode, check to make sure there are enough - * reserve buffers to stash the packet. If there are not, return early, which - * will leave next_to_clean pointing to the packet that failed to be stashed. - * - * Return: false in the scenario above, true otherwise. */ -static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end, +static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end, int napi_budget, struct libeth_sq_napi_stats *cleaned, bool descs_only) @@ -1846,7 +1664,12 @@ static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end, .napi = napi_budget, }; struct idpf_tx_buf *tx_buf; - bool clean_complete = true; + + if (descs_only) { + /* Bump ring index to mark as cleaned. */ + tx_q->next_to_clean = end; + return; + } tx_desc = &tx_q->flex_tx[ntc]; next_pending_desc = &tx_q->flex_tx[end]; @@ -1866,136 +1689,61 @@ static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end, break; eop_idx = tx_buf->rs_idx; + libeth_tx_complete(tx_buf, &cp); - if (descs_only) { - if (IDPF_TX_BUF_RSV_UNUSED(tx_q) < tx_buf->nr_frags) { - clean_complete = false; - goto tx_splitq_clean_out; - } - - idpf_stash_flow_sch_buffers(tx_q, tx_buf); + /* unmap remaining buffers */ + while (ntc != eop_idx) { + idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, + tx_desc, tx_buf); - while (ntc != eop_idx) { - idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, - tx_desc, tx_buf); - idpf_stash_flow_sch_buffers(tx_q, tx_buf); - } - } else { + /* unmap any remaining paged data */ libeth_tx_complete(tx_buf, &cp); - - /* unmap remaining buffers */ - while (ntc != eop_idx) { - idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, - tx_desc, tx_buf); - - /* unmap any remaining paged data */ - libeth_tx_complete(tx_buf, &cp); - } } fetch_next_txq_desc: idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf); } -tx_splitq_clean_out: tx_q->next_to_clean = ntc; - - return clean_complete; } -#define idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, buf) \ -do { \ - (buf)++; \ - (ntc)++; \ - if (unlikely((ntc) == (txq)->desc_count)) { \ - buf = (txq)->tx_buf; \ - ntc = 0; \ - } \ -} while (0) - /** - * idpf_tx_clean_buf_ring - clean flow scheduling TX queue buffers + * idpf_tx_clean_bufs - clean flow scheduling TX queue buffers * @txq: queue to clean - * @compl_tag: completion tag of packet to clean (from completion descriptor) + * @buf_id: packet's starting buffer ID, from completion descriptor * @cleaned: pointer to stats struct to track cleaned packets/bytes * @budget: Used to determine if we are in netpoll * - * Cleans all buffers associated with the input completion tag either from the - * TX buffer ring or from the hash table if the buffers were previously - * stashed. Returns the byte/segment count for the cleaned packet associated - * this completion tag. + * Clean all buffers associated with the packet starting at buf_id. Returns the + * byte/segment count for the cleaned packet. */ -static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag, - struct libeth_sq_napi_stats *cleaned, - int budget) +static void idpf_tx_clean_bufs(struct idpf_tx_queue *txq, u32 buf_id, + struct libeth_sq_napi_stats *cleaned, + int budget) { - u16 idx = compl_tag & txq->compl_tag_bufid_m; struct idpf_tx_buf *tx_buf = NULL; struct libeth_cq_pp cp = { .dev = txq->dev, .ss = cleaned, .napi = budget, }; - u16 ntc, orig_idx = idx; - - tx_buf = &txq->tx_buf[idx]; - - if (unlikely(tx_buf->type <= LIBETH_SQE_CTX || - idpf_tx_buf_compl_tag(tx_buf) != compl_tag)) - return false; + tx_buf = &txq->tx_buf[buf_id]; if (tx_buf->type == LIBETH_SQE_SKB) { if (skb_shinfo(tx_buf->skb)->tx_flags & SKBTX_IN_PROGRESS) idpf_tx_read_tstamp(txq, tx_buf->skb); libeth_tx_complete(tx_buf, &cp); + idpf_post_buf_refill(txq->refillq, buf_id); } - idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf); + while (idpf_tx_buf_next(tx_buf) != IDPF_TXBUF_NULL) { + buf_id = idpf_tx_buf_next(tx_buf); - while (idpf_tx_buf_compl_tag(tx_buf) == compl_tag) { + tx_buf = &txq->tx_buf[buf_id]; libeth_tx_complete(tx_buf, &cp); - idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf); + idpf_post_buf_refill(txq->refillq, buf_id); } - - /* - * It's possible the packet we just cleaned was an out of order - * completion, which means we can stash the buffers starting from - * the original next_to_clean and reuse the descriptors. We need - * to compare the descriptor ring next_to_clean packet's "first" buffer - * to the "first" buffer of the packet we just cleaned to determine if - * this is the case. Howevever, next_to_clean can point to either a - * reserved buffer that corresponds to a context descriptor used for the - * next_to_clean packet (TSO packet) or the "first" buffer (single - * packet). The orig_idx from the packet we just cleaned will always - * point to the "first" buffer. If next_to_clean points to a reserved - * buffer, let's bump ntc once and start the comparison from there. - */ - ntc = txq->next_to_clean; - tx_buf = &txq->tx_buf[ntc]; - - if (tx_buf->type == LIBETH_SQE_CTX) - idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, tx_buf); - - /* - * If ntc still points to a different "first" buffer, clean the - * descriptor ring and stash all of the buffers for later cleaning. If - * we cannot stash all of the buffers, next_to_clean will point to the - * "first" buffer of the packet that could not be stashed and cleaning - * will start there next time. - */ - if (unlikely(tx_buf != &txq->tx_buf[orig_idx] && - !idpf_tx_splitq_clean(txq, orig_idx, budget, cleaned, - true))) - return true; - - /* - * Otherwise, update next_to_clean to reflect the cleaning that was - * done above. - */ - txq->next_to_clean = idx; - - return true; } /** @@ -2014,22 +1762,17 @@ static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq, struct libeth_sq_napi_stats *cleaned, int budget) { - u16 compl_tag; + /* RS completion contains queue head for queue based scheduling or + * completion tag for flow based scheduling. + */ + u16 rs_compl_val = le16_to_cpu(desc->q_head_compl_tag.q_head); if (!idpf_queue_has(FLOW_SCH_EN, txq)) { - u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head); - - idpf_tx_splitq_clean(txq, head, budget, cleaned, false); + idpf_tx_splitq_clean(txq, rs_compl_val, budget, cleaned, false); return; } - compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag); - - /* If we didn't clean anything on the ring, this packet must be - * in the hash table. Go clean it there. - */ - if (!idpf_tx_clean_buf_ring(txq, compl_tag, cleaned, budget)) - idpf_tx_clean_stashed_bufs(txq, compl_tag, cleaned, budget); + idpf_tx_clean_bufs(txq, rs_compl_val, cleaned, budget); } /** @@ -2146,8 +1889,7 @@ fetch_next_desc: /* Update BQL */ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); - dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) || - np->state != __IDPF_VPORT_UP || + dont_wake = !complq_ok || np->state != __IDPF_VPORT_UP || !netif_carrier_ok(tx_q->netdev); /* Check if the TXQ needs to and can be restarted */ __netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes, @@ -2204,15 +1946,21 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc, desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag); } -/* Global conditions to tell whether the txq (and related resources) - * has room to allow the use of "size" descriptors. +/** + * idpf_tx_splitq_has_room - check if enough Tx splitq resources are available + * @tx_q: the queue to be checked + * @descs_needed: number of descriptors required for this packet + * @bufs_needed: number of Tx buffers required for this packet + * + * Return: 0 if no room available, 1 otherwise */ -static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 size) +static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 descs_needed, + u32 bufs_needed) { - if (IDPF_DESC_UNUSED(tx_q) < size || + if (IDPF_DESC_UNUSED(tx_q) < descs_needed || IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) > IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) || - IDPF_TX_BUF_RSV_LOW(tx_q)) + idpf_tx_splitq_get_free_bufs(tx_q->refillq) < bufs_needed) return 0; return 1; } @@ -2221,14 +1969,21 @@ static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 size) * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions * @tx_q: the queue to be checked * @descs_needed: number of descriptors required for this packet + * @bufs_needed: number of buffers needed for this packet * - * Returns 0 if stop is not needed + * Return: 0 if stop is not needed */ static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q, - unsigned int descs_needed) + u32 descs_needed, + u32 bufs_needed) { + /* Since we have multiple resources to check for splitq, our + * start,stop_thrs becomes a boolean check instead of a count + * threshold. + */ if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx, - idpf_txq_has_room(tx_q, descs_needed), + idpf_txq_has_room(tx_q, descs_needed, + bufs_needed), 1, 1)) return 0; @@ -2270,14 +2025,16 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val, } /** - * idpf_tx_desc_count_required - calculate number of Tx descriptors needed + * idpf_tx_res_count_required - get number of Tx resources needed for this pkt * @txq: queue to send buffer on * @skb: send buffer + * @bufs_needed: (output) number of buffers needed for this skb. * - * Returns number of data descriptors needed for this skb. + * Return: number of data descriptors and buffers needed for this skb. */ -unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq, - struct sk_buff *skb) +unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq, + struct sk_buff *skb, + u32 *bufs_needed) { const struct skb_shared_info *shinfo; unsigned int count = 0, i; @@ -2288,6 +2045,7 @@ unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq, return count; shinfo = skb_shinfo(skb); + *bufs_needed += shinfo->nr_frags; for (i = 0; i < shinfo->nr_frags; i++) { unsigned int size; @@ -2317,71 +2075,89 @@ unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq, } /** - * idpf_tx_dma_map_error - handle TX DMA map errors - * @txq: queue to send buffer on - * @skb: send buffer - * @first: original first buffer info buffer for packet - * @idx: starting point on ring to unwind + * idpf_tx_splitq_bump_ntu - adjust NTU and generation + * @txq: the tx ring to wrap + * @ntu: ring index to bump */ -void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb, - struct idpf_tx_buf *first, u16 idx) +static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu) { - struct libeth_sq_napi_stats ss = { }; - struct libeth_cq_pp cp = { - .dev = txq->dev, - .ss = &ss, - }; + ntu++; - u64_stats_update_begin(&txq->stats_sync); - u64_stats_inc(&txq->q_stats.dma_map_errs); - u64_stats_update_end(&txq->stats_sync); + if (ntu == txq->desc_count) + ntu = 0; - /* clear dma mappings for failed tx_buf map */ - for (;;) { - struct idpf_tx_buf *tx_buf; + return ntu; +} - tx_buf = &txq->tx_buf[idx]; - libeth_tx_complete(tx_buf, &cp); - if (tx_buf == first) - break; - if (idx == 0) - idx = txq->desc_count; - idx--; - } +/** + * idpf_tx_get_free_buf_id - get a free buffer ID from the refill queue + * @refillq: refill queue to get buffer ID from + * @buf_id: return buffer ID + * + * Return: true if a buffer ID was found, false if not + */ +static bool idpf_tx_get_free_buf_id(struct idpf_sw_queue *refillq, + u32 *buf_id) +{ + u32 ntc = refillq->next_to_clean; + u32 refill_desc; - if (skb_is_gso(skb)) { - union idpf_tx_flex_desc *tx_desc; + refill_desc = refillq->ring[ntc]; - /* If we failed a DMA mapping for a TSO packet, we will have - * used one additional descriptor for a context - * descriptor. Reset that here. - */ - tx_desc = &txq->flex_tx[idx]; - memset(tx_desc, 0, sizeof(*tx_desc)); - if (idx == 0) - idx = txq->desc_count; - idx--; + if (unlikely(idpf_queue_has(RFL_GEN_CHK, refillq) != + !!(refill_desc & IDPF_RFL_BI_GEN_M))) + return false; + + *buf_id = FIELD_GET(IDPF_RFL_BI_BUFID_M, refill_desc); + + if (unlikely(++ntc == refillq->desc_count)) { + idpf_queue_change(RFL_GEN_CHK, refillq); + ntc = 0; } - /* Update tail in case netdev_xmit_more was previously true */ - idpf_tx_buf_hw_update(txq, idx, false); + refillq->next_to_clean = ntc; + + return true; } /** - * idpf_tx_splitq_bump_ntu - adjust NTU and generation - * @txq: the tx ring to wrap - * @ntu: ring index to bump + * idpf_tx_splitq_pkt_err_unmap - Unmap buffers and bump tail in case of error + * @txq: Tx queue to unwind + * @params: pointer to splitq params struct + * @first: starting buffer for packet to unmap */ -static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu) +static void idpf_tx_splitq_pkt_err_unmap(struct idpf_tx_queue *txq, + struct idpf_tx_splitq_params *params, + struct idpf_tx_buf *first) { - ntu++; + struct idpf_sw_queue *refillq = txq->refillq; + struct libeth_sq_napi_stats ss = { }; + struct idpf_tx_buf *tx_buf = first; + struct libeth_cq_pp cp = { + .dev = txq->dev, + .ss = &ss, + }; - if (ntu == txq->desc_count) { - ntu = 0; - txq->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(txq); + u64_stats_update_begin(&txq->stats_sync); + u64_stats_inc(&txq->q_stats.dma_map_errs); + u64_stats_update_end(&txq->stats_sync); + + libeth_tx_complete(tx_buf, &cp); + while (idpf_tx_buf_next(tx_buf) != IDPF_TXBUF_NULL) { + tx_buf = &txq->tx_buf[idpf_tx_buf_next(tx_buf)]; + libeth_tx_complete(tx_buf, &cp); } - return ntu; + /* Update tail in case netdev_xmit_more was previously true. */ + idpf_tx_buf_hw_update(txq, params->prev_ntu, false); + + if (!refillq) + return; + + /* Restore refillq state to avoid leaking tags. */ + if (params->prev_refill_gen != idpf_queue_has(RFL_GEN_CHK, refillq)) + idpf_queue_change(RFL_GEN_CHK, refillq); + refillq->next_to_clean = params->prev_refill_ntc; } /** @@ -2405,6 +2181,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q, struct netdev_queue *nq; struct sk_buff *skb; skb_frag_t *frag; + u32 next_buf_id; u16 td_cmd = 0; dma_addr_t dma; @@ -2422,17 +2199,16 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q, tx_buf = first; first->nr_frags = 0; - params->compl_tag = - (tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i; - for (frag = &skb_shinfo(skb)->frags[0];; frag++) { unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED; - if (dma_mapping_error(tx_q->dev, dma)) - return idpf_tx_dma_map_error(tx_q, skb, first, i); + if (unlikely(dma_mapping_error(tx_q->dev, dma))) { + idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL; + return idpf_tx_splitq_pkt_err_unmap(tx_q, params, + first); + } first->nr_frags++; - idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag; tx_buf->type = LIBETH_SQE_FRAG; /* record length, and DMA address */ @@ -2488,29 +2264,12 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q, max_data); if (unlikely(++i == tx_q->desc_count)) { - tx_buf = tx_q->tx_buf; tx_desc = &tx_q->flex_tx[0]; i = 0; - tx_q->compl_tag_cur_gen = - IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); } else { - tx_buf++; tx_desc++; } - /* Since this packet has a buffer that is going to span - * multiple descriptors, it's going to leave holes in - * to the TX buffer ring. To ensure these holes do not - * cause issues in the cleaning routines, we will clear - * them of any stale data and assign them the same - * completion tag as the current packet. Then when the - * packet is being cleaned, the cleaning routines will - * simply pass over these holes and finish cleaning the - * rest of the packet. - */ - tx_buf->type = LIBETH_SQE_EMPTY; - idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag; - /* Adjust the DMA offset and the remaining size of the * fragment. On the first iteration of this loop, * max_data will be >= 12K and <= 16K-1. On any @@ -2535,15 +2294,25 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q, idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size); if (unlikely(++i == tx_q->desc_count)) { - tx_buf = tx_q->tx_buf; tx_desc = &tx_q->flex_tx[0]; i = 0; - tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); } else { - tx_buf++; tx_desc++; } + if (idpf_queue_has(FLOW_SCH_EN, tx_q)) { + if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq, + &next_buf_id))) { + idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL; + return idpf_tx_splitq_pkt_err_unmap(tx_q, params, + first); + } + } else { + next_buf_id = i; + } + idpf_tx_buf_next(tx_buf) = next_buf_id; + tx_buf = &tx_q->tx_buf[next_buf_id]; + size = skb_frag_size(frag); data_len -= size; @@ -2558,6 +2327,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q, /* write last descriptor with RS and EOP bits */ first->rs_idx = i; + idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL; td_cmd |= params->eop_cmd; idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size); i = idpf_tx_splitq_bump_ntu(tx_q, i); @@ -2661,8 +2431,6 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq) union idpf_flex_tx_ctx_desc *desc; int i = txq->next_to_use; - txq->tx_buf[i].type = LIBETH_SQE_CTX; - /* grab the next descriptor */ desc = &txq->flex_ctx[i]; txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i); @@ -2756,6 +2524,21 @@ static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc, #endif /* CONFIG_PTP_1588_CLOCK */ /** + * idpf_tx_splitq_need_re - check whether RE bit needs to be set + * @tx_q: pointer to Tx queue + * + * Return: true if RE bit needs to be set, false otherwise + */ +static bool idpf_tx_splitq_need_re(struct idpf_tx_queue *tx_q) +{ + int gap = tx_q->next_to_use - tx_q->last_re; + + gap += (gap < 0) ? tx_q->desc_count : 0; + + return gap >= IDPF_TX_SPLITQ_RE_MIN_GAP; +} + +/** * idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors * @skb: send buffer * @tx_q: queue to send buffer on @@ -2765,13 +2548,16 @@ static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc, static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, struct idpf_tx_queue *tx_q) { - struct idpf_tx_splitq_params tx_params = { }; + struct idpf_tx_splitq_params tx_params = { + .prev_ntu = tx_q->next_to_use, + }; union idpf_flex_tx_ctx_desc *ctx_desc; struct idpf_tx_buf *first; - unsigned int count; + u32 count, buf_count = 1; int tso, idx; + u32 buf_id; - count = idpf_tx_desc_count_required(tx_q, skb); + count = idpf_tx_res_count_required(tx_q, skb, &buf_count); if (unlikely(!count)) return idpf_tx_drop_skb(tx_q, skb); @@ -2781,7 +2567,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, /* Check for splitq specific TX resources */ count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso); - if (idpf_tx_maybe_stop_splitq(tx_q, count)) { + if (idpf_tx_maybe_stop_splitq(tx_q, count, buf_count)) { idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); return NETDEV_TX_BUSY; @@ -2813,36 +2599,47 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, idpf_tx_set_tstamp_desc(ctx_desc, idx); } - /* record the location of the first descriptor for this packet */ - first = &tx_q->tx_buf[tx_q->next_to_use]; - first->skb = skb; + if (idpf_queue_has(FLOW_SCH_EN, tx_q)) { + struct idpf_sw_queue *refillq = tx_q->refillq; - if (tso) { - first->packets = tx_params.offload.tso_segs; - first->bytes = skb->len + - ((first->packets - 1) * tx_params.offload.tso_hdr_len); - } else { - first->packets = 1; - first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN); - } + /* Save refillq state in case of a packet rollback. Otherwise, + * the tags will be leaked since they will be popped from the + * refillq but never reposted during cleaning. + */ + tx_params.prev_refill_gen = + idpf_queue_has(RFL_GEN_CHK, refillq); + tx_params.prev_refill_ntc = refillq->next_to_clean; + + if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq, + &buf_id))) { + if (tx_params.prev_refill_gen != + idpf_queue_has(RFL_GEN_CHK, refillq)) + idpf_queue_change(RFL_GEN_CHK, refillq); + refillq->next_to_clean = tx_params.prev_refill_ntc; + + tx_q->next_to_use = tx_params.prev_ntu; + return idpf_tx_drop_skb(tx_q, skb); + } + tx_params.compl_tag = buf_id; - if (idpf_queue_has(FLOW_SCH_EN, tx_q)) { tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE; tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP; - /* Set the RE bit to catch any packets that may have not been - * stashed during RS completion cleaning. MIN_GAP is set to - * MIN_RING size to ensure it will be set at least once each - * time around the ring. + /* Set the RE bit to periodically "clean" the descriptor ring. + * MIN_GAP is set to MIN_RING size to ensure it will be set at + * least once each time around the ring. */ - if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) { + if (idpf_tx_splitq_need_re(tx_q)) { tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE; tx_q->txq_grp->num_completions_pending++; + tx_q->last_re = tx_q->next_to_use; } if (skb->ip_summed == CHECKSUM_PARTIAL) tx_params.offload.td_cmd |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN; } else { + buf_id = tx_q->next_to_use; + tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2; tx_params.eop_cmd = IDPF_TXD_LAST_DESC_CMD; @@ -2850,6 +2647,18 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, tx_params.offload.td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN; } + first = &tx_q->tx_buf[buf_id]; + first->skb = skb; + + if (tso) { + first->packets = tx_params.offload.tso_segs; + first->bytes = skb->len + + ((first->packets - 1) * tx_params.offload.tso_hdr_len); + } else { + first->packets = 1; + first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN); + } + idpf_tx_splitq_map(tx_q, &tx_params, first); return NETDEV_TX_OK; @@ -3387,7 +3196,7 @@ payload: skip_data: rx_buf->netmem = 0; - idpf_rx_post_buf_refill(refillq, buf_id); + idpf_post_buf_refill(refillq, buf_id); IDPF_RX_BUMP_NTC(rxq, ntc); /* skip if it is non EOP desc */ @@ -3495,10 +3304,10 @@ static void idpf_rx_clean_refillq(struct idpf_buf_queue *bufq, bool failure; if (idpf_queue_has(RFL_GEN_CHK, refillq) != - !!(refill_desc & IDPF_RX_BI_GEN_M)) + !!(refill_desc & IDPF_RFL_BI_GEN_M)) break; - buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc); + buf_id = FIELD_GET(IDPF_RFL_BI_BUFID_M, refill_desc); failure = idpf_rx_update_bufq_desc(bufq, buf_id, buf_desc); if (failure) break; diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index 281de655a813..52753dff381c 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -108,8 +108,8 @@ do { \ */ #define IDPF_TX_SPLITQ_RE_MIN_GAP 64 -#define IDPF_RX_BI_GEN_M BIT(16) -#define IDPF_RX_BI_BUFID_M GENMASK(15, 0) +#define IDPF_RFL_BI_GEN_M BIT(16) +#define IDPF_RFL_BI_BUFID_M GENMASK(15, 0) #define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M #define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M @@ -118,10 +118,6 @@ do { \ ((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \ (txq)->next_to_clean - (txq)->next_to_use - 1) -#define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->stash->buf_stack.top) -#define IDPF_TX_BUF_RSV_LOW(txq) (IDPF_TX_BUF_RSV_UNUSED(txq) < \ - (txq)->desc_count >> 2) - #define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq) ((txcq)->desc_count >> 1) /* Determine the absolute number of completions pending, i.e. the number of * completions that are expected to arrive on the TX completion queue. @@ -131,11 +127,7 @@ do { \ 0 : U32_MAX) + \ (txq)->num_completions_pending - (txq)->complq->num_completions) -#define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH 16 -/* Adjust the generation for the completion tag and wrap if necessary */ -#define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \ - ((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \ - 0 : (txq)->compl_tag_cur_gen) +#define IDPF_TXBUF_NULL U32_MAX #define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS) @@ -153,18 +145,6 @@ union idpf_tx_flex_desc { #define idpf_tx_buf libeth_sqe /** - * struct idpf_buf_lifo - LIFO for managing OOO completions - * @top: Used to know how many buffers are left - * @size: Total size of LIFO - * @bufs: Backing array - */ -struct idpf_buf_lifo { - u16 top; - u16 size; - struct idpf_tx_stash **bufs; -}; - -/** * struct idpf_tx_offload_params - Offload parameters for a given packet * @tx_flags: Feature flags enabled for this packet * @hdr_offsets: Offset parameter for single queue model @@ -196,6 +176,9 @@ struct idpf_tx_offload_params { * @compl_tag: Associated tag for completion * @td_tag: Descriptor tunneling tag * @offload: Offload parameters + * @prev_ntu: stored TxQ next_to_use in case of rollback + * @prev_refill_ntc: stored refillq next_to_clean in case of packet rollback + * @prev_refill_gen: stored refillq generation bit in case of packet rollback */ struct idpf_tx_splitq_params { enum idpf_tx_desc_dtype_value dtype; @@ -206,6 +189,10 @@ struct idpf_tx_splitq_params { }; struct idpf_tx_offload_params offload; + + u16 prev_ntu; + u16 prev_refill_ntc; + bool prev_refill_gen; }; enum idpf_tx_ctx_desc_eipt_offload { @@ -468,17 +455,6 @@ struct idpf_tx_queue_stats { #define IDPF_DIM_DEFAULT_PROFILE_IX 1 /** - * struct idpf_txq_stash - Tx buffer stash for Flow-based scheduling mode - * @buf_stack: Stack of empty buffers to store buffer info for out of order - * buffer completions. See struct idpf_buf_lifo - * @sched_buf_hash: Hash table to store buffers - */ -struct idpf_txq_stash { - struct idpf_buf_lifo buf_stack; - DECLARE_HASHTABLE(sched_buf_hash, 12); -} ____cacheline_aligned; - -/** * struct idpf_rx_queue - software structure representing a receive queue * @rx: universal receive descriptor array * @single_buf: buffer descriptor array in singleq @@ -610,6 +586,8 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64, * @netdev: &net_device corresponding to this queue * @next_to_use: Next descriptor to use * @next_to_clean: Next descriptor to clean + * @last_re: last descriptor index that RE bit was set + * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on * the TX completion queue, it can be for any TXQ associated * with that completion queue. This means we can clean up to @@ -620,11 +598,7 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64, * only once at the end of the cleaning routine. * @clean_budget: singleq only, queue cleaning budget * @cleaned_pkts: Number of packets cleaned for the above said case - * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather - * @stash: Tx buffer stash for Flow-based scheduling mode - * @compl_tag_bufid_m: Completion tag buffer id mask - * @compl_tag_cur_gen: Used to keep track of current completion tag generation - * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset + * @refillq: Pointer to refill queue * @cached_tstamp_caps: Tx timestamp capabilities negotiated with the CP * @tstamp_task: Work that handles Tx timestamp read * @stats_sync: See struct u64_stats_sync @@ -633,6 +607,7 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64, * @size: Length of descriptor ring in bytes * @dma: Physical address of ring * @q_vector: Backreference to associated vector + * @buf_pool_size: Total number of idpf_tx_buf */ struct idpf_tx_queue { __cacheline_group_begin_aligned(read_mostly); @@ -654,7 +629,6 @@ struct idpf_tx_queue { u16 desc_count; u16 tx_min_pkt_len; - u16 compl_tag_gen_s; struct net_device *netdev; __cacheline_group_end_aligned(read_mostly); @@ -662,6 +636,8 @@ struct idpf_tx_queue { __cacheline_group_begin_aligned(read_write); u16 next_to_use; u16 next_to_clean; + u16 last_re; + u16 tx_max_bufs; union { u32 cleaned_bytes; @@ -669,12 +645,7 @@ struct idpf_tx_queue { }; u16 cleaned_pkts; - u16 tx_max_bufs; - struct idpf_txq_stash *stash; - - u16 compl_tag_bufid_m; - u16 compl_tag_cur_gen; - u16 compl_tag_gen_max; + struct idpf_sw_queue *refillq; struct idpf_ptp_vport_tx_tstamp_caps *cached_tstamp_caps; struct work_struct *tstamp_task; @@ -689,11 +660,12 @@ struct idpf_tx_queue { dma_addr_t dma; struct idpf_q_vector *q_vector; + u32 buf_pool_size; __cacheline_group_end_aligned(cold); }; libeth_cacheline_set_assert(struct idpf_tx_queue, 64, - 112 + sizeof(struct u64_stats_sync), - 24); + 104 + sizeof(struct u64_stats_sync), + 32); /** * struct idpf_buf_queue - software structure representing a buffer queue @@ -903,7 +875,6 @@ struct idpf_rxq_group { * @vport: Vport back pointer * @num_txq: Number of TX queues associated * @txqs: Array of TX queue pointers - * @stashes: array of OOO stashes for the queues * @complq: Associated completion queue pointer, split queue only * @num_completions_pending: Total number of completions pending for the * completion queue, acculumated for all TX queues @@ -918,7 +889,6 @@ struct idpf_txq_group { u16 num_txq; struct idpf_tx_queue *txqs[IDPF_LARGE_MAX_Q]; - struct idpf_txq_stash *stashes; struct idpf_compl_queue *complq; @@ -1011,6 +981,17 @@ static inline void idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector *q_vector) reg->dyn_ctl); } +/** + * idpf_tx_splitq_get_free_bufs - get number of free buf_ids in refillq + * @refillq: pointer to refillq containing buf_ids + */ +static inline u32 idpf_tx_splitq_get_free_bufs(struct idpf_sw_queue *refillq) +{ + return (refillq->next_to_use > refillq->next_to_clean ? + 0 : refillq->desc_count) + + refillq->next_to_use - refillq->next_to_clean - 1; +} + int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget); void idpf_vport_init_num_qs(struct idpf_vport *vport, struct virtchnl2_create_vport *vport_msg); @@ -1038,10 +1019,8 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val, bool xmit_more); unsigned int idpf_size_to_txd_count(unsigned int size); netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb); -void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb, - struct idpf_tx_buf *first, u16 ring_idx); -unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq, - struct sk_buff *skb); +unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq, + struct sk_buff *skb, u32 *buf_count); void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue); netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, struct idpf_tx_queue *tx_q); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c index d74116441d1c..bfeef5b0b99d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c @@ -3125,7 +3125,7 @@ static int ixgbe_get_orom_ver_info(struct ixgbe_hw *hw, if (err) return err; - combo_ver = le32_to_cpu(civd.combo_ver); + combo_ver = get_unaligned_le32(&civd.combo_ver); orom->major = (u8)FIELD_GET(IXGBE_OROM_VER_MASK, combo_ver); orom->patch = (u8)FIELD_GET(IXGBE_OROM_VER_PATCH_MASK, combo_ver); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h index d2f22d8558f8..ff8d640a50b1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h @@ -932,7 +932,7 @@ struct ixgbe_orom_civd_info { __le32 combo_ver; /* Combo Image Version number */ u8 combo_name_len; /* Length of the unicode combo image version string, max of 32 */ __le16 combo_name[32]; /* Unicode string representing the Combo Image version */ -}; +} __packed; /* Function specific capabilities */ struct ixgbe_hw_func_caps { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 4ff19a04b23e..0c46ba8a5adc 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -1978,6 +1978,13 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_release_regions; } + if (!is_cn20k(pdev) && + !is_cgx_mapped_to_nix(pdev->subsystem_device, cgx->cgx_id)) { + dev_notice(dev, "CGX %d not mapped to NIX, skipping probe\n", + cgx->cgx_id); + goto err_release_regions; + } + cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx); if (!cgx->lmac_count) { dev_notice(dev, "CGX %d LMAC count is zero, skipping probe\n", cgx->cgx_id); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 1692033b46b0..b58283341923 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -783,6 +783,20 @@ static inline bool is_cn10kb(struct rvu *rvu) return false; } +static inline bool is_cgx_mapped_to_nix(unsigned short id, u8 cgx_id) +{ + /* On CNF10KA and CNF10KB silicons only two CGX blocks are connected + * to NIX. + */ + if (id == PCI_SUBSYS_DEVID_CNF10K_A || id == PCI_SUBSYS_DEVID_CNF10K_B) + return cgx_id <= 1; + + return !(cgx_id && !(id == PCI_SUBSYS_DEVID_96XX || + id == PCI_SUBSYS_DEVID_98XX || + id == PCI_SUBSYS_DEVID_CN10K_A || + id == PCI_SUBSYS_DEVID_CN10K_B)); +} + static inline bool is_rvu_npc_hash_extract_en(struct rvu *rvu) { u64 npc_const3; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index f674729124e6..aff17c37ddde 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -124,7 +124,9 @@ void otx2_get_dev_stats(struct otx2_nic *pfvf) dev_stats->rx_ucast_frames; dev_stats->tx_bytes = OTX2_GET_TX_STATS(TX_OCTS); - dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP); + dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP) + + (unsigned long)atomic_long_read(&dev_stats->tx_discards); + dev_stats->tx_bcast_frames = OTX2_GET_TX_STATS(TX_BCAST); dev_stats->tx_mcast_frames = OTX2_GET_TX_STATS(TX_MCAST); dev_stats->tx_ucast_frames = OTX2_GET_TX_STATS(TX_UCAST); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index e3765b73c434..1c8a3c078a64 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -153,6 +153,7 @@ struct otx2_dev_stats { u64 tx_bcast_frames; u64 tx_mcast_frames; u64 tx_drops; + atomic_long_t tx_discards; }; /* Driver counted stats */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index b23585c5e5c2..5027fae0aa77 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -2220,6 +2220,7 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev) { struct otx2_nic *pf = netdev_priv(netdev); int qidx = skb_get_queue_mapping(skb); + struct otx2_dev_stats *dev_stats; struct otx2_snd_queue *sq; struct netdev_queue *txq; int sq_idx; @@ -2232,6 +2233,8 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev) /* Check for minimum and maximum packet length */ if (skb->len <= ETH_HLEN || (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) { + dev_stats = &pf->hw.dev_stats; + atomic_long_inc(&dev_stats->tx_discards); dev_kfree_skb(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 5589fccd370b..7ebb6e656884 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -417,9 +417,19 @@ static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev) { struct otx2_nic *vf = netdev_priv(netdev); int qidx = skb_get_queue_mapping(skb); + struct otx2_dev_stats *dev_stats; struct otx2_snd_queue *sq; struct netdev_queue *txq; + /* Check for minimum and maximum packet length */ + if (skb->len <= ETH_HLEN || + (!skb_shinfo(skb)->gso_size && skb->len > vf->tx_max_pktlen)) { + dev_stats = &vf->hw.dev_stats; + atomic_long_inc(&dev_stats->tx_discards); + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + sq = &vf->qset.sq[qidx]; txq = netdev_get_tx_queue(netdev, qidx); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c index 25af98034e2e..b476733a0234 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c @@ -371,7 +371,8 @@ static void rvu_rep_get_stats(struct work_struct *work) stats->rx_mcast_frames = rsp->rx.mcast; stats->tx_bytes = rsp->tx.octs; stats->tx_frames = rsp->tx.ucast + rsp->tx.bcast + rsp->tx.mcast; - stats->tx_drops = rsp->tx.drop; + stats->tx_drops = rsp->tx.drop + + (unsigned long)atomic_long_read(&stats->tx_discards); exit: mutex_unlock(&priv->mbox.lock); } @@ -418,6 +419,16 @@ static netdev_tx_t rvu_rep_xmit(struct sk_buff *skb, struct net_device *dev) struct otx2_nic *pf = rep->mdev; struct otx2_snd_queue *sq; struct netdev_queue *txq; + struct rep_stats *stats; + + /* Check for minimum and maximum packet length */ + if (skb->len <= ETH_HLEN || + (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) { + stats = &rep->stats; + atomic_long_inc(&stats->tx_discards); + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } sq = &pf->qset.sq[rep->rep_id]; txq = netdev_get_tx_queue(dev, 0); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.h b/drivers/net/ethernet/marvell/octeontx2/nic/rep.h index 38446b3e4f13..5bc9e2c7d800 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/rep.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.h @@ -27,6 +27,7 @@ struct rep_stats { u64 tx_bytes; u64 tx_frames; u64 tx_drops; + atomic_long_t tx_discards; }; struct rep_dev { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index 3ffa3fbacd16..2c0e0c16ca90 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -160,7 +160,7 @@ static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netli if (err) return err; - mlx5_unload_one_devl_locked(dev, true); + mlx5_sync_reset_unload_flow(dev, true); err = mlx5_health_wait_pci_up(dev); if (err) NL_SET_ERR_MSG_MOD(extack, "FW activate aborted, PCI reads fail after reset"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c index 3efa8bf1d14e..4720523813b9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c @@ -575,7 +575,6 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, if (err) return err; } - priv->dcbx.xoff = xoff; /* Apply the settings */ if (update_buffer) { @@ -584,6 +583,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, return err; } + priv->dcbx.xoff = xoff; + if (update_prio2buffer) err = mlx5e_port_set_priority2buffer(priv->mdev, prio2buffer); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h index f4a19ffbb641..66d276a1be83 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h @@ -66,11 +66,23 @@ struct mlx5e_port_buffer { struct mlx5e_bufferx_reg buffer[MLX5E_MAX_NETWORK_BUFFER]; }; +#ifdef CONFIG_MLX5_CORE_EN_DCB int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, u32 change, unsigned int mtu, struct ieee_pfc *pfc, u32 *buffer_size, u8 *prio2buffer); +#else +static inline int +mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, + u32 change, unsigned int mtu, + void *pfc, + u32 *buffer_size, + u8 *prio2buffer) +{ + return 0; +} +#endif int mlx5e_port_query_buffer(struct mlx5e_priv *priv, struct mlx5e_port_buffer *port_buffer); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 0e48065a46eb..3970d0ddbcdc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -49,6 +49,7 @@ #include "en.h" #include "en/dim.h" #include "en/txrx.h" +#include "en/port_buffer.h" #include "en_tc.h" #include "en_rep.h" #include "en_accel/ipsec.h" @@ -138,6 +139,8 @@ void mlx5e_update_carrier(struct mlx5e_priv *priv) if (up) { netdev_info(priv->netdev, "Link up\n"); netif_carrier_on(priv->netdev); + mlx5e_port_manual_buffer_config(priv, 0, priv->netdev->mtu, + NULL, NULL, NULL); } else { netdev_info(priv->netdev, "Link down\n"); netif_carrier_off(priv->netdev); @@ -3040,9 +3043,11 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv) struct mlx5e_params *params = &priv->channels.params; struct net_device *netdev = priv->netdev; struct mlx5_core_dev *mdev = priv->mdev; - u16 mtu; + u16 mtu, prev_mtu; int err; + mlx5e_query_mtu(mdev, params, &prev_mtu); + err = mlx5e_set_mtu(mdev, params, params->sw_mtu); if (err) return err; @@ -3052,6 +3057,18 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv) netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n", __func__, mtu, params->sw_mtu); + if (mtu != prev_mtu && MLX5_BUFFER_SUPPORTED(mdev)) { + err = mlx5e_port_manual_buffer_config(priv, 0, mtu, + NULL, NULL, NULL); + if (err) { + netdev_warn(netdev, "%s: Failed to set Xon/Xoff values with MTU %d (err %d), setting back to previous MTU %d\n", + __func__, mtu, err, prev_mtu); + + mlx5e_set_mtu(mdev, params, prev_mtu); + return err; + } + } + params->sw_mtu = mtu; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index d87392360dbd..cb165085a4c1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -3734,6 +3734,13 @@ static int mlx5_fs_mode_validate(struct devlink *devlink, u32 id, char *value = val.vstr; u8 eswitch_mode; + eswitch_mode = mlx5_eswitch_mode(dev); + if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "Changing fs mode is not supported when eswitch offloads enabled."); + return -EOPNOTSUPP; + } + if (!strcmp(value, "dmfs")) return 0; @@ -3759,14 +3766,6 @@ static int mlx5_fs_mode_validate(struct devlink *devlink, u32 id, return -EINVAL; } - eswitch_mode = mlx5_eswitch_mode(dev); - if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) { - NL_SET_ERR_MSG_FMT_MOD(extack, - "Moving to %s is not supported when eswitch offloads enabled.", - value); - return -EOPNOTSUPP; - } - return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index 69933addd921..22995131824a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -6,13 +6,15 @@ #include "fw_reset.h" #include "diag/fw_tracer.h" #include "lib/tout.h" +#include "sf/sf.h" enum { MLX5_FW_RESET_FLAGS_RESET_REQUESTED, MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, MLX5_FW_RESET_FLAGS_PENDING_COMP, MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, - MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED + MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, + MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, }; struct mlx5_fw_reset { @@ -219,7 +221,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev) return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL0, 0, 0, false); } -static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unloaded) +static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev) { struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; struct devlink *devlink = priv_to_devlink(dev); @@ -228,8 +230,7 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unload if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) { complete(&fw_reset->done); } else { - if (!unloaded) - mlx5_unload_one(dev, false); + mlx5_sync_reset_unload_flow(dev, false); if (mlx5_health_wait_pci_up(dev)) mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n"); else @@ -272,7 +273,7 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work) mlx5_sync_reset_clear_reset_requested(dev, false); mlx5_enter_error_state(dev, true); - mlx5_fw_reset_complete_reload(dev, false); + mlx5_fw_reset_complete_reload(dev); } #define MLX5_RESET_POLL_INTERVAL (HZ / 10) @@ -428,6 +429,11 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev, return false; } + if (!mlx5_core_is_ecpf(dev) && !mlx5_sf_table_empty(dev)) { + mlx5_core_warn(dev, "SFs should be removed before reset\n"); + return false; + } + #if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE) if (reset_method != MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET) { err = mlx5_check_hotplug_interrupt(dev, bridge); @@ -586,6 +592,65 @@ static int mlx5_sync_pci_reset(struct mlx5_core_dev *dev, u8 reset_method) return err; } +void mlx5_sync_reset_unload_flow(struct mlx5_core_dev *dev, bool locked) +{ + struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; + unsigned long timeout; + int poll_freq = 20; + bool reset_action; + u8 rst_state; + int err; + + if (locked) + mlx5_unload_one_devl_locked(dev, false); + else + mlx5_unload_one(dev, false); + + if (!test_bit(MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, &fw_reset->reset_flags)) + return; + + mlx5_set_fw_rst_ack(dev); + mlx5_core_warn(dev, "Sync Reset Unload done, device reset expected\n"); + + reset_action = false; + timeout = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, RESET_UNLOAD)); + do { + rst_state = mlx5_get_fw_rst_state(dev); + if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ || + rst_state == MLX5_FW_RST_STATE_IDLE) { + reset_action = true; + break; + } + if (rst_state == MLX5_FW_RST_STATE_DROP_MODE) { + mlx5_core_info(dev, "Sync Reset Drop mode ack\n"); + mlx5_set_fw_rst_ack(dev); + poll_freq = 1000; + } + msleep(poll_freq); + } while (!time_after(jiffies, timeout)); + + if (!reset_action) { + mlx5_core_err(dev, "Got timeout waiting for sync reset action, state = %u\n", + rst_state); + fw_reset->ret = -ETIMEDOUT; + goto done; + } + + mlx5_core_warn(dev, "Sync Reset, got reset action. rst_state = %u\n", + rst_state); + if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ) { + err = mlx5_sync_pci_reset(dev, fw_reset->reset_method); + if (err) { + mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, err %d\n", + err); + fw_reset->ret = err; + } + } + +done: + clear_bit(MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, &fw_reset->reset_flags); +} + static void mlx5_sync_reset_now_event(struct work_struct *work) { struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset, @@ -613,17 +678,13 @@ static void mlx5_sync_reset_now_event(struct work_struct *work) mlx5_enter_error_state(dev, true); done: fw_reset->ret = err; - mlx5_fw_reset_complete_reload(dev, false); + mlx5_fw_reset_complete_reload(dev); } static void mlx5_sync_reset_unload_event(struct work_struct *work) { struct mlx5_fw_reset *fw_reset; struct mlx5_core_dev *dev; - unsigned long timeout; - int poll_freq = 20; - bool reset_action; - u8 rst_state; int err; fw_reset = container_of(work, struct mlx5_fw_reset, reset_unload_work); @@ -632,6 +693,7 @@ static void mlx5_sync_reset_unload_event(struct work_struct *work) if (mlx5_sync_reset_clear_reset_requested(dev, false)) return; + set_bit(MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, &fw_reset->reset_flags); mlx5_core_warn(dev, "Sync Reset Unload. Function is forced down.\n"); err = mlx5_cmd_fast_teardown_hca(dev); @@ -640,49 +702,7 @@ static void mlx5_sync_reset_unload_event(struct work_struct *work) else mlx5_enter_error_state(dev, true); - if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) - mlx5_unload_one_devl_locked(dev, false); - else - mlx5_unload_one(dev, false); - - mlx5_set_fw_rst_ack(dev); - mlx5_core_warn(dev, "Sync Reset Unload done, device reset expected\n"); - - reset_action = false; - timeout = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, RESET_UNLOAD)); - do { - rst_state = mlx5_get_fw_rst_state(dev); - if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ || - rst_state == MLX5_FW_RST_STATE_IDLE) { - reset_action = true; - break; - } - if (rst_state == MLX5_FW_RST_STATE_DROP_MODE) { - mlx5_core_info(dev, "Sync Reset Drop mode ack\n"); - mlx5_set_fw_rst_ack(dev); - poll_freq = 1000; - } - msleep(poll_freq); - } while (!time_after(jiffies, timeout)); - - if (!reset_action) { - mlx5_core_err(dev, "Got timeout waiting for sync reset action, state = %u\n", - rst_state); - fw_reset->ret = -ETIMEDOUT; - goto done; - } - - mlx5_core_warn(dev, "Sync Reset, got reset action. rst_state = %u\n", rst_state); - if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ) { - err = mlx5_sync_pci_reset(dev, fw_reset->reset_method); - if (err) { - mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, err %d\n", err); - fw_reset->ret = err; - } - } - -done: - mlx5_fw_reset_complete_reload(dev, true); + mlx5_fw_reset_complete_reload(dev); } static void mlx5_sync_reset_abort_event(struct work_struct *work) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h index ea527d06a85f..d5b28525c960 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h @@ -12,6 +12,7 @@ int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel, int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev); int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev); +void mlx5_sync_reset_unload_flow(struct mlx5_core_dev *dev, bool locked); int mlx5_fw_reset_verify_fw_complete(struct mlx5_core_dev *dev, struct netlink_ext_ack *extack); void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c index 0864ba625c07..3304f25cc805 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c @@ -518,3 +518,13 @@ void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev) WARN_ON(!xa_empty(&table->function_ids)); kfree(table); } + +bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev) +{ + struct mlx5_sf_table *table = dev->priv.sf_table; + + if (!table) + return true; + + return xa_empty(&table->function_ids); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h index 860f9ddb7107..89559a37997a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h @@ -17,6 +17,7 @@ void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev); int mlx5_sf_table_init(struct mlx5_core_dev *dev); void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev); +bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev); int mlx5_devlink_sf_port_new(struct devlink *devlink, const struct devlink_port_new_attrs *add_attr, @@ -61,6 +62,11 @@ static inline void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev) { } +static inline bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev) +{ + return true; +} + #endif #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c index 396804369b00..6b36a4a7d895 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c @@ -117,7 +117,7 @@ static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx, mlx5hws_err(ctx, "No such stc_type: %d\n", stc_type); pr_warn("HWS: Invalid stc_type: %d\n", stc_type); ret = -EINVAL; - goto unlock_and_out; + goto free_shared_stc; } ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c index 51e4c551e0ef..d56271a9e4f0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c @@ -279,7 +279,7 @@ int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx, return ret; clean_pattern: - mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, *pattern_id); + mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, ptrn_id); out_unlock: mutex_unlock(&ctx->pattern_cache->lock); return ret; @@ -527,7 +527,6 @@ int mlx5hws_pat_calc_nop(__be64 *pattern, size_t num_actions, u32 *nop_locations, __be64 *new_pat) { u16 prev_src_field = INVALID_FIELD, prev_dst_field = INVALID_FIELD; - u16 src_field, dst_field; u8 action_type; bool dependent; size_t i, j; @@ -539,6 +538,9 @@ int mlx5hws_pat_calc_nop(__be64 *pattern, size_t num_actions, return 0; for (i = 0, j = 0; i < num_actions; i++, j++) { + u16 src_field = INVALID_FIELD; + u16 dst_field = INVALID_FIELD; + if (j >= max_actions) return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c index 7e37d6e9eb83..7b5071c3df36 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c @@ -124,6 +124,7 @@ static int hws_pool_buddy_init(struct mlx5hws_pool *pool) mlx5hws_err(pool->ctx, "Failed to create resource type: %d size %zu\n", pool->type, pool->alloc_log_sz); mlx5hws_buddy_cleanup(buddy); + kfree(buddy); return -ENOMEM; } diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c index 0cf1ea927cc0..6dac42ca28c6 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c @@ -52,6 +52,8 @@ int __fbnic_open(struct fbnic_net *fbn) fbnic_bmc_rpc_init(fbd); fbnic_rss_reinit(fbd, fbn); + phylink_resume(fbn->phylink); + return 0; time_stop: fbnic_time_stop(fbn); @@ -84,6 +86,8 @@ static int fbnic_stop(struct net_device *netdev) { struct fbnic_net *fbn = netdev_priv(netdev); + phylink_suspend(fbn->phylink, fbnic_bmc_present(fbn->fbd)); + fbnic_down(fbn); fbnic_pcs_free_irq(fbn->fbd); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c index b3c27c566f52..a4db97652fb4 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c @@ -118,14 +118,12 @@ static void fbnic_service_task_start(struct fbnic_net *fbn) struct fbnic_dev *fbd = fbn->fbd; schedule_delayed_work(&fbd->service_task, HZ); - phylink_resume(fbn->phylink); } static void fbnic_service_task_stop(struct fbnic_net *fbn) { struct fbnic_dev *fbd = fbn->fbd; - phylink_suspend(fbn->phylink, fbnic_bmc_present(fbd)); cancel_delayed_work(&fbd->service_task); } @@ -444,11 +442,10 @@ static int __fbnic_pm_resume(struct device *dev) /* Re-enable mailbox */ err = fbnic_fw_request_mbx(fbd); + devl_unlock(priv_to_devlink(fbd)); if (err) goto err_free_irqs; - devl_unlock(priv_to_devlink(fbd)); - /* Only send log history if log buffer is empty to prevent duplicate * log entries. */ @@ -465,20 +462,20 @@ static int __fbnic_pm_resume(struct device *dev) rtnl_lock(); - if (netif_running(netdev)) { + if (netif_running(netdev)) err = __fbnic_open(fbn); - if (err) - goto err_free_mbx; - } rtnl_unlock(); + if (err) + goto err_free_mbx; return 0; err_free_mbx: fbnic_fw_log_disable(fbd); - rtnl_unlock(); + devl_lock(priv_to_devlink(fbd)); fbnic_fw_free_mbx(fbd); + devl_unlock(priv_to_devlink(fbd)); err_free_irqs: fbnic_free_irqs(fbd); err_invalidate_uc_addr: diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index 6cadf8de4fdf..00e929bf280b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -49,6 +49,14 @@ static void dwxgmac2_core_init(struct mac_device_info *hw, writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN); } +static void dwxgmac2_update_caps(struct stmmac_priv *priv) +{ + if (!priv->dma_cap.mbps_10_100) + priv->hw->link.caps &= ~(MAC_10 | MAC_100); + else if (!priv->dma_cap.half_duplex) + priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD); +} + static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable) { u32 tx = readl(ioaddr + XGMAC_TX_CONFIG); @@ -1424,6 +1432,7 @@ static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en, const struct stmmac_ops dwxgmac210_ops = { .core_init = dwxgmac2_core_init, + .update_caps = dwxgmac2_update_caps, .set_mac = dwxgmac2_set_mac, .rx_ipc = dwxgmac2_rx_ipc, .rx_queue_enable = dwxgmac2_rx_queue_enable, @@ -1532,8 +1541,8 @@ int dwxgmac2_setup(struct stmmac_priv *priv) mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | - MAC_1000FD | MAC_2500FD | MAC_5000FD | - MAC_10000FD; + MAC_10 | MAC_100 | MAC_1000FD | + MAC_2500FD | MAC_5000FD | MAC_10000FD; mac->link.duplex = 0; mac->link.speed10 = XGMAC_CONFIG_SS_10_MII; mac->link.speed100 = XGMAC_CONFIG_SS_100_MII; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index 5dcc95bc0ad2..4d6bb995d8d8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -203,10 +203,6 @@ static void dwxgmac2_dma_rx_mode(struct stmmac_priv *priv, void __iomem *ioaddr, } writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel)); - - /* Enable MTL RX overflow */ - value = readl(ioaddr + XGMAC_MTL_QINTEN(channel)); - writel(value | XGMAC_RXOIE, ioaddr + XGMAC_MTL_QINTEN(channel)); } static void dwxgmac2_dma_tx_mode(struct stmmac_priv *priv, void __iomem *ioaddr, @@ -386,8 +382,11 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv, static int dwxgmac2_get_hw_feature(void __iomem *ioaddr, struct dma_features *dma_cap) { + struct stmmac_priv *priv; u32 hw_cap; + priv = container_of(dma_cap, struct stmmac_priv, dma_cap); + /* MAC HW feature 0 */ hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0); dma_cap->edma = (hw_cap & XGMAC_HWFEAT_EDMA) >> 31; @@ -410,6 +409,8 @@ static int dwxgmac2_get_hw_feature(void __iomem *ioaddr, dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4; dma_cap->half_duplex = (hw_cap & XGMAC_HWFEAT_HDSEL) >> 3; dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1; + if (dma_cap->mbps_1000 && priv->synopsys_id >= DWXGMAC_CORE_2_20) + dma_cap->mbps_10_100 = 1; /* MAC HW feature 1 */ hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index ebf278263d5e..f0abd99fd137 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2586,6 +2586,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; + bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported; struct xsk_buff_pool *pool = tx_q->xsk_pool; unsigned int entry = tx_q->cur_tx; struct dma_desc *tx_desc = NULL; @@ -2673,7 +2674,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) } stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len, - true, priv->mode, true, true, + csum, priv->mode, true, true, xdp_desc.len); stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); @@ -4988,6 +4989,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, { struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported; unsigned int entry = tx_q->cur_tx; struct dma_desc *tx_desc; dma_addr_t dma_addr; @@ -5039,7 +5041,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, stmmac_set_desc_addr(priv, tx_desc, dma_addr); stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len, - true, priv->mode, true, true, + csum, priv->mode, true, true, xdpf->len); tx_q->tx_count_frames++; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 720104661d7f..60a4629fe6ba 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -1812,6 +1812,11 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, /* Enable NAPI handler before init callbacks */ netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll); + napi_enable(&net_device->chan_table[0].napi); + netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX, + &net_device->chan_table[0].napi); + netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX, + &net_device->chan_table[0].napi); /* Open the channel */ device->channel->next_request_id_callback = vmbus_next_request_id; @@ -1831,12 +1836,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, /* Channel is opened */ netdev_dbg(ndev, "hv_netvsc channel opened successfully\n"); - napi_enable(&net_device->chan_table[0].napi); - netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX, - &net_device->chan_table[0].napi); - netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX, - &net_device->chan_table[0].napi); - /* Connect with the NetVsp */ ret = netvsc_connect_vsp(device, net_device, device_info); if (ret != 0) { @@ -1854,14 +1853,14 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, close: RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); - netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX, NULL); - netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX, NULL); - napi_disable(&net_device->chan_table[0].napi); /* Now, we can close the channel safely */ vmbus_close(device->channel); cleanup: + netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX, NULL); + netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX, NULL); + napi_disable(&net_device->chan_table[0].napi); netif_napi_del(&net_device->chan_table[0].napi); cleanup2: diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 9e73959e61ee..c35f9685b6bf 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1252,17 +1252,26 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) new_sc->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes); new_sc->max_pkt_size = NETVSC_MAX_PKT_SIZE; + /* Enable napi before opening the vmbus channel to avoid races + * as the host placing data on the host->guest ring may be left + * out if napi was not enabled. + */ + napi_enable(&nvchan->napi); + netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX, + &nvchan->napi); + netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX, + &nvchan->napi); + ret = vmbus_open(new_sc, netvsc_ring_bytes, netvsc_ring_bytes, NULL, 0, netvsc_channel_cb, nvchan); - if (ret == 0) { - napi_enable(&nvchan->napi); - netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX, - &nvchan->napi); - netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX, - &nvchan->napi); - } else { + if (ret != 0) { netdev_notice(ndev, "sub channel open failed: %d\n", ret); + netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX, + NULL); + netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX, + NULL); + napi_disable(&nvchan->napi); } if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn) diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h index b8c6ba7c7834..2d8eca54c40a 100644 --- a/drivers/net/phy/mscc/mscc.h +++ b/drivers/net/phy/mscc/mscc.h @@ -484,6 +484,7 @@ static inline void vsc8584_config_macsec_intr(struct phy_device *phydev) void vsc85xx_link_change_notify(struct phy_device *phydev); void vsc8584_config_ts_intr(struct phy_device *phydev); int vsc8584_ptp_init(struct phy_device *phydev); +void vsc8584_ptp_deinit(struct phy_device *phydev); int vsc8584_ptp_probe_once(struct phy_device *phydev); int vsc8584_ptp_probe(struct phy_device *phydev); irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev); @@ -498,6 +499,9 @@ static inline int vsc8584_ptp_init(struct phy_device *phydev) { return 0; } +static inline void vsc8584_ptp_deinit(struct phy_device *phydev) +{ +} static inline int vsc8584_ptp_probe_once(struct phy_device *phydev) { return 0; diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index 2407281297d3..ef0ef1570d39 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -2359,9 +2359,7 @@ static int vsc85xx_probe(struct phy_device *phydev) static void vsc85xx_remove(struct phy_device *phydev) { - struct vsc8531_private *priv = phydev->priv; - - skb_queue_purge(&priv->rx_skbs_list); + vsc8584_ptp_deinit(phydev); } /* Microsemi VSC85xx PHYs */ diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c index de6c7312e8f2..72847320cb65 100644 --- a/drivers/net/phy/mscc/mscc_ptp.c +++ b/drivers/net/phy/mscc/mscc_ptp.c @@ -1298,7 +1298,6 @@ static void vsc8584_set_input_clk_configured(struct phy_device *phydev) static int __vsc8584_init_ptp(struct phy_device *phydev) { - struct vsc8531_private *vsc8531 = phydev->priv; static const u32 ltc_seq_e[] = { 0, 400000, 0, 0, 0 }; static const u8 ltc_seq_a[] = { 8, 6, 5, 4, 2 }; u32 val; @@ -1515,17 +1514,7 @@ static int __vsc8584_init_ptp(struct phy_device *phydev) vsc85xx_ts_eth_cmp1_sig(phydev); - vsc8531->mii_ts.rxtstamp = vsc85xx_rxtstamp; - vsc8531->mii_ts.txtstamp = vsc85xx_txtstamp; - vsc8531->mii_ts.hwtstamp = vsc85xx_hwtstamp; - vsc8531->mii_ts.ts_info = vsc85xx_ts_info; - phydev->mii_ts = &vsc8531->mii_ts; - - memcpy(&vsc8531->ptp->caps, &vsc85xx_clk_caps, sizeof(vsc85xx_clk_caps)); - - vsc8531->ptp->ptp_clock = ptp_clock_register(&vsc8531->ptp->caps, - &phydev->mdio.dev); - return PTR_ERR_OR_ZERO(vsc8531->ptp->ptp_clock); + return 0; } void vsc8584_config_ts_intr(struct phy_device *phydev) @@ -1552,6 +1541,16 @@ int vsc8584_ptp_init(struct phy_device *phydev) return 0; } +void vsc8584_ptp_deinit(struct phy_device *phydev) +{ + struct vsc8531_private *vsc8531 = phydev->priv; + + if (vsc8531->ptp->ptp_clock) { + ptp_clock_unregister(vsc8531->ptp->ptp_clock); + skb_queue_purge(&vsc8531->rx_skbs_list); + } +} + irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev) { struct vsc8531_private *priv = phydev->priv; @@ -1612,7 +1611,16 @@ int vsc8584_ptp_probe(struct phy_device *phydev) vsc8531->ptp->phydev = phydev; - return 0; + vsc8531->mii_ts.rxtstamp = vsc85xx_rxtstamp; + vsc8531->mii_ts.txtstamp = vsc85xx_txtstamp; + vsc8531->mii_ts.hwtstamp = vsc85xx_hwtstamp; + vsc8531->mii_ts.ts_info = vsc85xx_ts_info; + phydev->mii_ts = &vsc8531->mii_ts; + + memcpy(&vsc8531->ptp->caps, &vsc85xx_clk_caps, sizeof(vsc85xx_clk_caps)); + vsc8531->ptp->ptp_clock = ptp_clock_register(&vsc8531->ptp->caps, + &phydev->mdio.dev); + return PTR_ERR_OR_ZERO(vsc8531->ptp->ptp_clock); } int vsc8584_ptp_probe_once(struct phy_device *phydev) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index e56901bb6ebc..11352d85475a 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1355,6 +1355,9 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1034, 2)}, /* Telit LE910C4-WWX */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1037, 4)}, /* Telit LE910C4-WWX */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1038, 3)}, /* Telit LE910C4-WWX */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x103a, 0)}, /* Telit LE910C4-WWX */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index d14e6d602273..975bdc5dab84 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -5758,14 +5758,15 @@ static void virtnet_freeze_down(struct virtio_device *vdev) disable_rx_mode_work(vi); flush_work(&vi->rx_mode_work); - netif_tx_lock_bh(vi->dev); - netif_device_detach(vi->dev); - netif_tx_unlock_bh(vi->dev); if (netif_running(vi->dev)) { rtnl_lock(); virtnet_close(vi->dev); rtnl_unlock(); } + + netif_tx_lock_bh(vi->dev); + netif_device_detach(vi->dev); + netif_tx_unlock_bh(vi->dev); } static int init_vqs(struct virtnet_info *vi); diff --git a/drivers/of/device.c b/drivers/of/device.c index c80426510ec2..f7e75e527667 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -17,8 +17,8 @@ /** * of_match_device - Tell if a struct device matches an of_device_id list - * @matches: array of of device match structures to search in - * @dev: the of device structure to match against + * @matches: array of of_device_id match structures to search in + * @dev: the OF device structure to match against * * Used by a driver to check whether an platform_device present in the * system is in its list of supported devices. diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index 0aba760f7577..2eaaddcb0ec4 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c @@ -935,10 +935,15 @@ static int of_changeset_add_prop_helper(struct of_changeset *ocs, return -ENOMEM; ret = of_changeset_add_property(ocs, np, new_pp); - if (ret) + if (ret) { __of_prop_free(new_pp); + return ret; + } - return ret; + new_pp->next = np->deadprops; + np->deadprops = new_pp; + + return 0; } /** diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index 77016c0cc296..2e9ea751ed2d 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c @@ -25,6 +25,7 @@ #include <linux/memblock.h> #include <linux/kmemleak.h> #include <linux/cma.h> +#include <linux/dma-map-ops.h> #include "of_private.h" @@ -175,13 +176,17 @@ static int __init __reserved_mem_reserve_reg(unsigned long node, base = dt_mem_next_cell(dt_root_addr_cells, &prop); size = dt_mem_next_cell(dt_root_size_cells, &prop); - if (size && - early_init_dt_reserve_memory(base, size, nomap) == 0) + if (size && early_init_dt_reserve_memory(base, size, nomap) == 0) { + /* Architecture specific contiguous memory fixup. */ + if (of_flat_dt_is_compatible(node, "shared-dma-pool") && + of_get_flat_dt_prop(node, "reusable", NULL)) + dma_contiguous_early_fixup(base, size); pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n", uname, &base, (unsigned long)(size / SZ_1M)); - else + } else { pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n", uname, &base, (unsigned long)(size / SZ_1M)); + } len -= t_len; } @@ -472,7 +477,10 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam uname, (unsigned long)(size / SZ_1M)); return -ENOMEM; } - + /* Architecture specific contiguous memory fixup. */ + if (of_flat_dt_is_compatible(node, "shared-dma-pool") && + of_get_flat_dt_prop(node, "reusable", NULL)) + dma_contiguous_early_fixup(base, size); /* Save region in the reserved_mem array */ fdt_reserved_mem_save_node(node, uname, base, size); return 0; @@ -771,6 +779,7 @@ int of_reserved_mem_region_to_resource(const struct device_node *np, return -EINVAL; resource_set_range(res, rmem->base, rmem->size); + res->flags = IORESOURCE_MEM; res->name = rmem->name; return 0; } diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index ddd11668457c..be1ca8e85754 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -539,6 +539,7 @@ config PINCTRL_STMFX tristate "STMicroelectronics STMFX GPIO expander pinctrl driver" depends on I2C depends on OF_GPIO + depends on HAS_IOMEM select GENERIC_PINCONF select GPIOLIB_IRQCHIP select MFD_STMFX diff --git a/drivers/pinctrl/mediatek/pinctrl-airoha.c b/drivers/pinctrl/mediatek/pinctrl-airoha.c index 5f1ec9e0de21..1b2f132d76f0 100644 --- a/drivers/pinctrl/mediatek/pinctrl-airoha.c +++ b/drivers/pinctrl/mediatek/pinctrl-airoha.c @@ -2696,7 +2696,7 @@ static int airoha_pinconf_get(struct pinctrl_dev *pctrl_dev, arg = 1; break; default: - return -EOPNOTSUPP; + return -ENOTSUPP; } *config = pinconf_to_config_packed(param, arg); @@ -2788,7 +2788,7 @@ static int airoha_pinconf_set(struct pinctrl_dev *pctrl_dev, break; } default: - return -EOPNOTSUPP; + return -ENOTSUPP; } } @@ -2805,10 +2805,10 @@ static int airoha_pinconf_group_get(struct pinctrl_dev *pctrl_dev, if (airoha_pinconf_get(pctrl_dev, airoha_pinctrl_groups[group].pins[i], config)) - return -EOPNOTSUPP; + return -ENOTSUPP; if (i && cur_config != *config) - return -EOPNOTSUPP; + return -ENOTSUPP; cur_config = *config; } diff --git a/drivers/pinctrl/meson/pinctrl-amlogic-a4.c b/drivers/pinctrl/meson/pinctrl-amlogic-a4.c index e34e984c2b38..6132710aff68 100644 --- a/drivers/pinctrl/meson/pinctrl-amlogic-a4.c +++ b/drivers/pinctrl/meson/pinctrl-amlogic-a4.c @@ -1093,7 +1093,7 @@ static const struct of_device_id aml_pctl_of_match[] = { { .compatible = "amlogic,pinctrl-s6", .data = &s6_priv_data, }, { /* sentinel */ } }; -MODULE_DEVICE_TABLE(of, aml_pctl_dt_match); +MODULE_DEVICE_TABLE(of, aml_pctl_of_match); static struct platform_driver aml_pctl_driver = { .driver = { diff --git a/drivers/platform/x86/amd/hsmp/acpi.c b/drivers/platform/x86/amd/hsmp/acpi.c index 54986a752f7d..a94009203e01 100644 --- a/drivers/platform/x86/amd/hsmp/acpi.c +++ b/drivers/platform/x86/amd/hsmp/acpi.c @@ -504,7 +504,7 @@ static int init_acpi(struct device *dev) dev_set_drvdata(dev, &hsmp_pdev->sock[sock_ind]); - return ret; + return 0; } static const struct bin_attribute hsmp_metric_tbl_attr = { diff --git a/drivers/platform/x86/amd/hsmp/hsmp.c b/drivers/platform/x86/amd/hsmp/hsmp.c index 885e2f8136fd..19f82c1d3090 100644 --- a/drivers/platform/x86/amd/hsmp/hsmp.c +++ b/drivers/platform/x86/amd/hsmp/hsmp.c @@ -356,6 +356,11 @@ ssize_t hsmp_metric_tbl_read(struct hsmp_socket *sock, char *buf, size_t size) if (!sock || !buf) return -EINVAL; + if (!sock->metric_tbl_addr) { + dev_err(sock->dev, "Metrics table address not available\n"); + return -ENOMEM; + } + /* Do not support lseek(), also don't allow more than the size of metric table */ if (size != sizeof(struct hsmp_metric_table)) { dev_err(sock->dev, "Wrong buffer size\n"); diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c index ded4c84f5ed1..7ffc659b2794 100644 --- a/drivers/platform/x86/amd/pmc/pmc-quirks.c +++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c @@ -28,10 +28,15 @@ static struct quirk_entry quirk_spurious_8042 = { .spurious_8042 = true, }; +static struct quirk_entry quirk_s2idle_spurious_8042 = { + .s2idle_bug_mmio = FCH_PM_BASE + FCH_PM_SCRATCH, + .spurious_8042 = true, +}; + static const struct dmi_system_id fwbug_list[] = { { .ident = "L14 Gen2 AMD", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "20X5"), @@ -39,7 +44,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "T14s Gen2 AMD", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "20XF"), @@ -47,7 +52,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "X13 Gen2 AMD", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "20XH"), @@ -55,7 +60,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "T14 Gen2 AMD", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "20XK"), @@ -63,7 +68,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "T14 Gen1 AMD", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "20UD"), @@ -71,7 +76,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "T14 Gen1 AMD", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "20UE"), @@ -79,7 +84,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "T14s Gen1 AMD", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "20UH"), @@ -87,7 +92,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "T14s Gen1 AMD", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "20UJ"), @@ -95,7 +100,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "P14s Gen1 AMD", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "20Y1"), @@ -103,7 +108,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "P14s Gen2 AMD", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "21A0"), @@ -111,7 +116,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "P14s Gen2 AMD", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "21A1"), @@ -152,7 +157,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "IdeaPad 1 14AMN7", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "82VF"), @@ -160,7 +165,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "IdeaPad 1 15AMN7", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "82VG"), @@ -168,7 +173,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "IdeaPad 1 15AMN7", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "82X5"), @@ -176,7 +181,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "IdeaPad Slim 3 14AMN8", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "82XN"), @@ -184,7 +189,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "IdeaPad Slim 3 15AMN8", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "82XQ"), @@ -193,7 +198,7 @@ static const struct dmi_system_id fwbug_list[] = { /* https://gitlab.freedesktop.org/drm/amd/-/issues/4434 */ { .ident = "Lenovo Yoga 6 13ALC6", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "82ND"), @@ -202,7 +207,7 @@ static const struct dmi_system_id fwbug_list[] = { /* https://gitlab.freedesktop.org/drm/amd/-/issues/2684 */ { .ident = "HP Laptop 15s-eq2xxx", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "HP"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Laptop 15s-eq2xxx"), @@ -285,6 +290,16 @@ void amd_pmc_quirks_init(struct amd_pmc_dev *dev) { const struct dmi_system_id *dmi_id; + /* + * IRQ1 may cause an interrupt during resume even without a keyboard + * press. + * + * Affects Renoir, Cezanne and Barcelo SoCs + * + * A solution is available in PMFW 64.66.0, but it must be activated by + * SBIOS. If SBIOS is known to have the fix a quirk can be added for + * a given system to avoid workaround. + */ if (dev->cpu_id == AMD_CPU_ID_CZN) dev->disable_8042_wakeup = true; @@ -295,6 +310,5 @@ void amd_pmc_quirks_init(struct amd_pmc_dev *dev) if (dev->quirks->s2idle_bug_mmio) pr_info("Using s2idle quirk to avoid %s platform firmware bug\n", dmi_id->ident); - if (dev->quirks->spurious_8042) - dev->disable_8042_wakeup = true; + dev->disable_8042_wakeup = dev->quirks->spurious_8042; } diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c index 0b9b23eb7c2c..bd318fd02ccf 100644 --- a/drivers/platform/x86/amd/pmc/pmc.c +++ b/drivers/platform/x86/amd/pmc/pmc.c @@ -530,19 +530,6 @@ static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev) static int amd_pmc_wa_irq1(struct amd_pmc_dev *pdev) { struct device *d; - int rc; - - /* cezanne platform firmware has a fix in 64.66.0 */ - if (pdev->cpu_id == AMD_CPU_ID_CZN) { - if (!pdev->major) { - rc = amd_pmc_get_smu_version(pdev); - if (rc) - return rc; - } - - if (pdev->major > 64 || (pdev->major == 64 && pdev->minor > 65)) - return 0; - } d = bus_find_device_by_name(&serio_bus, NULL, "serio0"); if (!d) diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c index 01c72b91a50d..444786102f02 100644 --- a/drivers/platform/x86/dell/dell-smbios-base.c +++ b/drivers/platform/x86/dell/dell-smbios-base.c @@ -39,6 +39,7 @@ struct token_sysfs_data { struct smbios_device { struct list_head list; struct device *device; + int priority; int (*call_fn)(struct calling_interface_buffer *arg); }; @@ -145,7 +146,7 @@ int dell_smbios_error(int value) } EXPORT_SYMBOL_GPL(dell_smbios_error); -int dell_smbios_register_device(struct device *d, void *call_fn) +int dell_smbios_register_device(struct device *d, int priority, void *call_fn) { struct smbios_device *priv; @@ -154,6 +155,7 @@ int dell_smbios_register_device(struct device *d, void *call_fn) return -ENOMEM; get_device(d); priv->device = d; + priv->priority = priority; priv->call_fn = call_fn; mutex_lock(&smbios_mutex); list_add_tail(&priv->list, &smbios_device_list); @@ -292,28 +294,25 @@ EXPORT_SYMBOL_GPL(dell_smbios_call_filter); int dell_smbios_call(struct calling_interface_buffer *buffer) { - int (*call_fn)(struct calling_interface_buffer *) = NULL; - struct device *selected_dev = NULL; + struct smbios_device *selected = NULL; struct smbios_device *priv; int ret; mutex_lock(&smbios_mutex); list_for_each_entry(priv, &smbios_device_list, list) { - if (!selected_dev || priv->device->id >= selected_dev->id) { - dev_dbg(priv->device, "Trying device ID: %d\n", - priv->device->id); - call_fn = priv->call_fn; - selected_dev = priv->device; + if (!selected || priv->priority >= selected->priority) { + dev_dbg(priv->device, "Trying device ID: %d\n", priv->priority); + selected = priv; } } - if (!selected_dev) { + if (!selected) { ret = -ENODEV; pr_err("No dell-smbios drivers are loaded\n"); goto out_smbios_call; } - ret = call_fn(buffer); + ret = selected->call_fn(buffer); out_smbios_call: mutex_unlock(&smbios_mutex); diff --git a/drivers/platform/x86/dell/dell-smbios-smm.c b/drivers/platform/x86/dell/dell-smbios-smm.c index 4d375985c85f..7055e2c40f34 100644 --- a/drivers/platform/x86/dell/dell-smbios-smm.c +++ b/drivers/platform/x86/dell/dell-smbios-smm.c @@ -125,8 +125,7 @@ int init_dell_smbios_smm(void) if (ret) goto fail_platform_device_add; - ret = dell_smbios_register_device(&platform_device->dev, - &dell_smbios_smm_call); + ret = dell_smbios_register_device(&platform_device->dev, 0, &dell_smbios_smm_call); if (ret) goto fail_register; diff --git a/drivers/platform/x86/dell/dell-smbios-wmi.c b/drivers/platform/x86/dell/dell-smbios-wmi.c index ae9012549560..a7dca8c59d60 100644 --- a/drivers/platform/x86/dell/dell-smbios-wmi.c +++ b/drivers/platform/x86/dell/dell-smbios-wmi.c @@ -264,9 +264,7 @@ static int dell_smbios_wmi_probe(struct wmi_device *wdev, const void *context) if (ret) return ret; - /* ID is used by dell-smbios to set priority of drivers */ - wdev->dev.id = 1; - ret = dell_smbios_register_device(&wdev->dev, &dell_smbios_wmi_call); + ret = dell_smbios_register_device(&wdev->dev, 1, &dell_smbios_wmi_call); if (ret) return ret; diff --git a/drivers/platform/x86/dell/dell-smbios.h b/drivers/platform/x86/dell/dell-smbios.h index 77baa15eb523..f421b8533a9e 100644 --- a/drivers/platform/x86/dell/dell-smbios.h +++ b/drivers/platform/x86/dell/dell-smbios.h @@ -64,7 +64,7 @@ struct calling_interface_structure { struct calling_interface_token tokens[]; } __packed; -int dell_smbios_register_device(struct device *d, void *call_fn); +int dell_smbios_register_device(struct device *d, int priority, void *call_fn); void dell_smbios_unregister_device(struct device *d); int dell_smbios_error(int value); diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c index db5fdee2109c..60c8ac8d902c 100644 --- a/drivers/platform/x86/hp/hp-wmi.c +++ b/drivers/platform/x86/hp/hp-wmi.c @@ -92,9 +92,9 @@ static const char * const victus_thermal_profile_boards[] = { "8A25" }; -/* DMI Board names of Victus 16-s1000 laptops */ +/* DMI Board names of Victus 16-r1000 and Victus 16-s1000 laptops */ static const char * const victus_s_thermal_profile_boards[] = { - "8C9C" + "8C99", "8C9C" }; enum hp_wmi_radio { diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c index 4c0aed6e626f..bdfb8a800c54 100644 --- a/drivers/platform/x86/intel/int3472/discrete.c +++ b/drivers/platform/x86/intel/int3472/discrete.c @@ -193,6 +193,10 @@ static void int3472_get_con_id_and_polarity(struct int3472_discrete_device *int3 *con_id = "privacy-led"; *gpio_flags = GPIO_ACTIVE_HIGH; break; + case INT3472_GPIO_TYPE_HOTPLUG_DETECT: + *con_id = "hpd"; + *gpio_flags = GPIO_ACTIVE_HIGH; + break; case INT3472_GPIO_TYPE_POWER_ENABLE: *con_id = "avdd"; *gpio_flags = GPIO_ACTIVE_HIGH; @@ -223,6 +227,7 @@ static void int3472_get_con_id_and_polarity(struct int3472_discrete_device *int3 * 0x0b Power enable * 0x0c Clock enable * 0x0d Privacy LED + * 0x13 Hotplug detect * * There are some known platform specific quirks where that does not quite * hold up; for example where a pin with type 0x01 (Power down) is mapped to @@ -292,6 +297,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares, switch (type) { case INT3472_GPIO_TYPE_RESET: case INT3472_GPIO_TYPE_POWERDOWN: + case INT3472_GPIO_TYPE_HOTPLUG_DETECT: ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, con_id, gpio_flags); if (ret) err_msg = "Failed to map GPIO pin to sensor\n"; diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c index 6df55c8e16b7..bfcf92aa4d69 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c @@ -192,9 +192,14 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *valu static int write_eff_lat_ctrl(struct uncore_data *data, unsigned int val, enum uncore_index index) { struct tpmi_uncore_cluster_info *cluster_info; + struct tpmi_uncore_struct *uncore_root; u64 control; cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data); + uncore_root = cluster_info->uncore_root; + + if (uncore_root->write_blocked) + return -EPERM; if (cluster_info->root_domain) return -ENODATA; diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c index feadb21a8f30..4be270f4d6c3 100644 --- a/drivers/regulator/pca9450-regulator.c +++ b/drivers/regulator/pca9450-regulator.c @@ -40,7 +40,6 @@ struct pca9450 { struct device *dev; struct regmap *regmap; struct gpio_desc *sd_vsel_gpio; - struct notifier_block restart_nb; enum pca9450_chip_type type; unsigned int rcnt; int irq; @@ -1100,10 +1099,9 @@ static irqreturn_t pca9450_irq_handler(int irq, void *data) return IRQ_HANDLED; } -static int pca9450_i2c_restart_handler(struct notifier_block *nb, - unsigned long action, void *data) +static int pca9450_i2c_restart_handler(struct sys_off_data *data) { - struct pca9450 *pca9450 = container_of(nb, struct pca9450, restart_nb); + struct pca9450 *pca9450 = data->cb_data; struct i2c_client *i2c = container_of(pca9450->dev, struct i2c_client, dev); dev_dbg(&i2c->dev, "Restarting device..\n"); @@ -1261,10 +1259,9 @@ static int pca9450_i2c_probe(struct i2c_client *i2c) pca9450->sd_vsel_fixed_low = of_property_read_bool(ldo5->dev.of_node, "nxp,sd-vsel-fixed-low"); - pca9450->restart_nb.notifier_call = pca9450_i2c_restart_handler; - pca9450->restart_nb.priority = PCA9450_RESTART_HANDLER_PRIORITY; - - if (register_restart_handler(&pca9450->restart_nb)) + if (devm_register_sys_off_handler(&i2c->dev, SYS_OFF_MODE_RESTART, + PCA9450_RESTART_HANDLER_PRIORITY, + pca9450_i2c_restart_handler, pca9450)) dev_warn(&i2c->dev, "Failed to register restart handler\n"); dev_info(&i2c->dev, "%s probed.\n", diff --git a/drivers/regulator/tps65219-regulator.c b/drivers/regulator/tps65219-regulator.c index 5e67fdc88f49..d77ca486879f 100644 --- a/drivers/regulator/tps65219-regulator.c +++ b/drivers/regulator/tps65219-regulator.c @@ -454,9 +454,9 @@ static int tps65219_regulator_probe(struct platform_device *pdev) irq_type->irq_name, irq_data); if (error) - return dev_err_probe(tps->dev, PTR_ERR(rdev), - "Failed to request %s IRQ %d: %d\n", - irq_type->irq_name, irq, error); + return dev_err_probe(tps->dev, error, + "Failed to request %s IRQ %d\n", + irq_type->irq_name, irq); } for (i = 0; i < pmic->dev_irq_size; ++i) { @@ -477,9 +477,9 @@ static int tps65219_regulator_probe(struct platform_device *pdev) irq_type->irq_name, irq_data); if (error) - return dev_err_probe(tps->dev, PTR_ERR(rdev), - "Failed to request %s IRQ %d: %d\n", - irq_type->irq_name, irq, error); + return dev_err_probe(tps->dev, error, + "Failed to request %s IRQ %d\n", + irq_type->irq_name, irq); } return 0; diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index f2e42c1d51aa..98e334724a62 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -77,6 +77,13 @@ unsigned long sclp_console_full; /* The currently active SCLP command word. */ static sclp_cmdw_t active_cmd; +static inline struct sccb_header *sclpint_to_sccb(u32 sccb_int) +{ + if (sccb_int) + return __va(sccb_int); + return NULL; +} + static inline void sclp_trace(int prio, char *id, u32 a, u64 b, bool err) { struct sclp_trace_entry e; @@ -620,7 +627,7 @@ __sclp_find_req(u32 sccb) static bool ok_response(u32 sccb_int, sclp_cmdw_t cmd) { - struct sccb_header *sccb = (struct sccb_header *)__va(sccb_int); + struct sccb_header *sccb = sclpint_to_sccb(sccb_int); struct evbuf_header *evbuf; u16 response; @@ -659,7 +666,7 @@ static void sclp_interrupt_handler(struct ext_code ext_code, /* INT: Interrupt received (a=intparm, b=cmd) */ sclp_trace_sccb(0, "INT", param32, active_cmd, active_cmd, - (struct sccb_header *)__va(finished_sccb), + sclpint_to_sccb(finished_sccb), !ok_response(finished_sccb, active_cmd)); if (finished_sccb) { diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index c2fdc6553e62..1199d701c3f5 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -323,8 +323,6 @@ enum fnic_state { FNIC_IN_ETH_TRANS_FC_MODE, }; -struct mempool; - enum fnic_role_e { FNIC_ROLE_FCP_INITIATOR = 0, }; diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index a39f1da4ce47..a761c0aa5127 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -6606,6 +6606,8 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha, ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0); vfree(dst_addr); + if (IS_ERR(ep)) + return NULL; return ep; } diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index 67d4000c3cef..313e444a34f3 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -330,13 +330,11 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi) } if (config.speed_hz > perclk_rate / 2) { - dev_err(fsl_lpspi->dev, - "per-clk should be at least two times of transfer speed"); - return -EINVAL; + div = 2; + } else { + div = DIV_ROUND_UP(perclk_rate, config.speed_hz); } - div = DIV_ROUND_UP(perclk_rate, config.speed_hz); - for (prescale = 0; prescale <= prescale_max; prescale++) { scldiv = div / (1 << prescale) - 2; if (scldiv >= 0 && scldiv < 256) { diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index d3b7e857b377..064b99204d9a 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -265,6 +265,9 @@ static bool spi_mem_internal_supports_op(struct spi_mem *mem, */ bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) { + /* Make sure the operation frequency is correct before going futher */ + spi_mem_adjust_op_freq(mem, (struct spi_mem_op *)op); + if (spi_mem_check_op(op)) return false; @@ -577,6 +580,7 @@ EXPORT_SYMBOL_GPL(spi_mem_adjust_op_freq); * spi_mem_calc_op_duration() - Derives the theoretical length (in ns) of an * operation. This helps finding the best variant * among a list of possible choices. + * @mem: the SPI memory * @op: the operation to benchmark * * Some chips have per-op frequency limitations, PCBs usually have their own diff --git a/drivers/spi/spi-qpic-snand.c b/drivers/spi/spi-qpic-snand.c index a8c4eb1cbde1..0ceaad7dba3c 100644 --- a/drivers/spi/spi-qpic-snand.c +++ b/drivers/spi/spi-qpic-snand.c @@ -210,13 +210,21 @@ static int qcom_spi_ooblayout_ecc(struct mtd_info *mtd, int section, struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand); struct qpic_ecc *qecc = snandc->qspi->ecc; - if (section > 1) - return -ERANGE; - - oobregion->length = qecc->ecc_bytes_hw + qecc->spare_bytes; - oobregion->offset = mtd->oobsize - oobregion->length; + switch (section) { + case 0: + oobregion->offset = 0; + oobregion->length = qecc->bytes * (qecc->steps - 1) + + qecc->bbm_size; + return 0; + case 1: + oobregion->offset = qecc->bytes * (qecc->steps - 1) + + qecc->bbm_size + + qecc->steps * 4; + oobregion->length = mtd->oobsize - oobregion->offset; + return 0; + } - return 0; + return -ERANGE; } static int qcom_spi_ooblayout_free(struct mtd_info *mtd, int section, @@ -1196,7 +1204,7 @@ static int qcom_spi_program_oob(struct qcom_nand_controller *snandc, u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg; cfg0 = (ecc_cfg->cfg0 & ~CW_PER_PAGE_MASK) | - FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1); + FIELD_PREP(CW_PER_PAGE_MASK, 0); cfg1 = ecc_cfg->cfg1; ecc_bch_cfg = ecc_cfg->ecc_bch_cfg; ecc_buf_cfg = ecc_cfg->ecc_buf_cfg; diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c index 49ab4c515156..c07c61dc4938 100644 --- a/drivers/spi/spi-st-ssc4.c +++ b/drivers/spi/spi-st-ssc4.c @@ -378,7 +378,7 @@ static void spi_st_remove(struct platform_device *pdev) pinctrl_pm_select_sleep_state(&pdev->dev); } -static int __maybe_unused spi_st_runtime_suspend(struct device *dev) +static int spi_st_runtime_suspend(struct device *dev) { struct spi_controller *host = dev_get_drvdata(dev); struct spi_st *spi_st = spi_controller_get_devdata(host); @@ -391,7 +391,7 @@ static int __maybe_unused spi_st_runtime_suspend(struct device *dev) return 0; } -static int __maybe_unused spi_st_runtime_resume(struct device *dev) +static int spi_st_runtime_resume(struct device *dev) { struct spi_controller *host = dev_get_drvdata(dev); struct spi_st *spi_st = spi_controller_get_devdata(host); @@ -428,8 +428,8 @@ static int __maybe_unused spi_st_resume(struct device *dev) } static const struct dev_pm_ops spi_st_pm = { - SET_SYSTEM_SLEEP_PM_OPS(spi_st_suspend, spi_st_resume) - SET_RUNTIME_PM_OPS(spi_st_runtime_suspend, spi_st_runtime_resume, NULL) + SYSTEM_SLEEP_PM_OPS(spi_st_suspend, spi_st_resume) + RUNTIME_PM_OPS(spi_st_runtime_suspend, spi_st_runtime_resume, NULL) }; static const struct of_device_id stm_spi_match[] = { @@ -441,7 +441,7 @@ MODULE_DEVICE_TABLE(of, stm_spi_match); static struct platform_driver spi_st_driver = { .driver = { .name = "spi-st", - .pm = pm_sleep_ptr(&spi_st_pm), + .pm = pm_ptr(&spi_st_pm), .of_match_table = of_match_ptr(stm_spi_match), }, .probe = spi_st_probe, diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index efd7a811a002..9a43102b2b21 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -1303,7 +1303,7 @@ static u32 ufshcd_pending_cmds(struct ufs_hba *hba) * * Return: 0 upon success; -EBUSY upon timeout. */ -static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, +static int ufshcd_wait_for_pending_cmds(struct ufs_hba *hba, u64 wait_timeout_us) { int ret = 0; @@ -1431,7 +1431,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us) down_write(&hba->clk_scaling_lock); if (!hba->clk_scaling.is_allowed || - ufshcd_wait_for_doorbell_clr(hba, timeout_us)) { + ufshcd_wait_for_pending_cmds(hba, timeout_us)) { ret = -EBUSY; up_write(&hba->clk_scaling_lock); mutex_unlock(&hba->wb_mutex); @@ -3199,7 +3199,8 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) } /* - * Return: 0 upon success; < 0 upon failure. + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. */ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int max_timeout) @@ -3275,7 +3276,6 @@ retry: } } - WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } @@ -3294,7 +3294,8 @@ static void ufshcd_dev_man_unlock(struct ufs_hba *hba) } /* - * Return: 0 upon success; < 0 upon failure. + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. */ static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, const u32 tag, int timeout) @@ -3317,7 +3318,8 @@ static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, * @cmd_type: specifies the type (NOP, Query...) * @timeout: timeout in milliseconds * - * Return: 0 upon success; < 0 upon failure. + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. * * NOTE: Since there is only one available tag for device management commands, * it is expected you hold the hba->dev_cmd.lock mutex. @@ -3363,6 +3365,10 @@ static inline void ufshcd_init_query(struct ufs_hba *hba, (*request)->upiu_req.selector = selector; } +/* + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. + */ static int ufshcd_query_flag_retry(struct ufs_hba *hba, enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res) { @@ -3383,7 +3389,6 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba, dev_err(hba->dev, "%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n", __func__, opcode, idn, ret, retries); - WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret); return ret; } @@ -3395,7 +3400,8 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba, * @index: flag index to access * @flag_res: the flag value after the query request completes * - * Return: 0 for success; < 0 upon failure. + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. */ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res) @@ -3451,7 +3457,6 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, out_unlock: ufshcd_dev_man_unlock(hba); - WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } @@ -3464,8 +3469,9 @@ out_unlock: * @selector: selector field * @attr_val: the attribute value after the query request completes * - * Return: 0 upon success; < 0 upon failure. -*/ + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. + */ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) { @@ -3513,7 +3519,6 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, out_unlock: ufshcd_dev_man_unlock(hba); - WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } @@ -3528,8 +3533,9 @@ out_unlock: * @attr_val: the attribute value after the query request * completes * - * Return: 0 for success; < 0 upon failure. -*/ + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. + */ int ufshcd_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) @@ -3551,12 +3557,12 @@ int ufshcd_query_attr_retry(struct ufs_hba *hba, dev_err(hba->dev, "%s: query attribute, idn %d, failed with error %d after %d retries\n", __func__, idn, ret, QUERY_REQ_RETRIES); - WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret); return ret; } /* - * Return: 0 if successful; < 0 upon failure. + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. */ static int __ufshcd_query_descriptor(struct ufs_hba *hba, enum query_opcode opcode, enum desc_idn idn, u8 index, @@ -3615,7 +3621,6 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba, out_unlock: hba->dev_cmd.query.descriptor = NULL; ufshcd_dev_man_unlock(hba); - WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } @@ -3632,7 +3637,8 @@ out_unlock: * The buf_len parameter will contain, on return, the length parameter * received on the response. * - * Return: 0 for success; < 0 upon failure. + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. */ int ufshcd_query_descriptor_retry(struct ufs_hba *hba, enum query_opcode opcode, @@ -3650,7 +3656,6 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba, break; } - WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } @@ -3663,7 +3668,8 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba, * @param_read_buf: pointer to buffer where parameter would be read * @param_size: sizeof(param_read_buf) * - * Return: 0 in case of success; < 0 upon failure. + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. */ int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id, @@ -3730,7 +3736,6 @@ int ufshcd_read_desc_param(struct ufs_hba *hba, out: if (is_kmalloc) kfree(desc_buf); - WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret); return ret; } @@ -4781,7 +4786,8 @@ EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode); * * Set fDeviceInit flag and poll until device toggles it. * - * Return: 0 upon success; < 0 upon failure. + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. */ static int ufshcd_complete_dev_init(struct ufs_hba *hba) { @@ -5135,7 +5141,8 @@ out: * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations. * - * Return: 0 upon success; < 0 upon failure. + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. */ static int ufshcd_verify_dev_init(struct ufs_hba *hba) { @@ -5559,9 +5566,9 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) irqreturn_t retval = IRQ_NONE; struct uic_command *cmd; - spin_lock(hba->host->host_lock); + guard(spinlock_irqsave)(hba->host->host_lock); cmd = hba->active_uic_cmd; - if (WARN_ON_ONCE(!cmd)) + if (!cmd) goto unlock; if (ufshcd_is_auto_hibern8_error(hba, intr_status)) @@ -5586,8 +5593,6 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) ufshcd_add_uic_command_trace(hba, cmd, UFS_CMD_COMP); unlock: - spin_unlock(hba->host->host_lock); - return retval; } @@ -5869,7 +5874,8 @@ static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask) * as the device is allowed to manage its own way of handling background * operations. * - * Return: zero on success, non-zero on failure. + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. */ static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) { @@ -5908,7 +5914,8 @@ out: * host is idle so that BKOPS are managed effectively without any negative * impacts. * - * Return: zero on success, non-zero on failure. + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. */ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) { @@ -6058,6 +6065,10 @@ out: __func__, err); } +/* + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. + */ int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id) { struct utp_upiu_query_v4_0 *upiu_resp; @@ -6920,7 +6931,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status) bool queue_eh_work = false; irqreturn_t retval = IRQ_NONE; - spin_lock(hba->host->host_lock); + guard(spinlock_irqsave)(hba->host->host_lock); hba->errors |= UFSHCD_ERROR_MASK & intr_status; if (hba->errors & INT_FATAL_ERRORS) { @@ -6979,7 +6990,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status) */ hba->errors = 0; hba->uic_error = 0; - spin_unlock(hba->host->host_lock); + return retval; } @@ -7454,7 +7465,8 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, * @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation * @dir: DMA direction * - * Return: zero on success, non-zero on failure. + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. */ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu, struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs, diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 76fc70503a62..9574fdc2bb0f 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -2070,17 +2070,6 @@ static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data) return IRQ_HANDLED; } -static void ufs_qcom_irq_free(struct ufs_qcom_irq *uqi) -{ - for (struct ufs_qcom_irq *q = uqi; q->irq; q++) - devm_free_irq(q->hba->dev, q->irq, q->hba); - - platform_device_msi_free_irqs_all(uqi->hba->dev); - devm_kfree(uqi->hba->dev, uqi); -} - -DEFINE_FREE(ufs_qcom_irq, struct ufs_qcom_irq *, if (_T) ufs_qcom_irq_free(_T)) - static int ufs_qcom_config_esi(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); @@ -2095,18 +2084,18 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) */ nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]; - struct ufs_qcom_irq *qi __free(ufs_qcom_irq) = - devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL); - if (!qi) - return -ENOMEM; - /* Preset so __free() has a pointer to hba in all error paths */ - qi[0].hba = hba; - ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs, ufs_qcom_write_msi_msg); if (ret) { - dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret); - return ret; + dev_warn(hba->dev, "Platform MSI not supported or failed, continuing without ESI\n"); + return ret; /* Continue without ESI */ + } + + struct ufs_qcom_irq *qi = devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL); + + if (!qi) { + platform_device_msi_free_irqs_all(hba->dev); + return -ENOMEM; } for (int idx = 0; idx < nr_irqs; idx++) { @@ -2117,15 +2106,17 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) ret = devm_request_irq(hba->dev, qi[idx].irq, ufs_qcom_mcq_esi_handler, IRQF_SHARED, "qcom-mcq-esi", qi + idx); if (ret) { - dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n", + dev_err(hba->dev, "%s: Failed to request IRQ for %d, err = %d\n", __func__, qi[idx].irq, ret); - qi[idx].irq = 0; + /* Free previously allocated IRQs */ + for (int j = 0; j < idx; j++) + devm_free_irq(hba->dev, qi[j].irq, qi + j); + platform_device_msi_free_irqs_all(hba->dev); + devm_kfree(hba->dev, qi); return ret; } } - retain_and_null_ptr(qi); - if (host->hw_ver.major >= 6) { ufshcd_rmwl(hba, ESI_VEC_MASK, FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1), REG_UFS_CFG3); diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c index b39239f641f2..b87e03777395 100644 --- a/drivers/ufs/host/ufshcd-pci.c +++ b/drivers/ufs/host/ufshcd-pci.c @@ -630,6 +630,7 @@ static const struct pci_device_id ufshcd_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0xA847), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, { PCI_VDEVICE(INTEL, 0x7747), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, { PCI_VDEVICE(INTEL, 0xE447), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, + { PCI_VDEVICE(INTEL, 0x4D47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, { } /* terminate list */ }; diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index e1ec9b38f5b9..d7c2a1a3c271 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -338,7 +338,8 @@ static int ci_hdrc_imx_notify_event(struct ci_hdrc *ci, unsigned int event) schedule_work(&ci->usb_phy->chg_work); break; case CI_HDRC_CONTROLLER_PULLUP_EVENT: - if (ci->role == CI_ROLE_GADGET) + if (ci->role == CI_ROLE_GADGET && + ci->gadget.speed == USB_SPEED_HIGH) imx_usbmisc_pullup(data->usbmisc_data, ci->gadget.connected); break; diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c index 3d20c5e76c6a..b1418885707c 100644 --- a/drivers/usb/chipidea/usbmisc_imx.c +++ b/drivers/usb/chipidea/usbmisc_imx.c @@ -1068,15 +1068,24 @@ static void usbmisc_imx7d_pullup(struct imx_usbmisc_data *data, bool on) unsigned long flags; u32 val; + if (on) + return; + spin_lock_irqsave(&usbmisc->lock, flags); val = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2); - if (!on) { - val &= ~MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_MASK; - val |= MX7D_USBNC_USB_CTRL2_OPMODE(1); - val |= MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_EN; - } else { - val &= ~MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_EN; - } + val &= ~MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_MASK; + val |= MX7D_USBNC_USB_CTRL2_OPMODE(1); + val |= MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_EN; + writel(val, usbmisc->base + MX7D_USBNC_USB_CTRL2); + spin_unlock_irqrestore(&usbmisc->lock, flags); + + /* Last for at least 1 micro-frame to let host see disconnect signal */ + usleep_range(125, 150); + + spin_lock_irqsave(&usbmisc->lock, flags); + val &= ~MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_MASK; + val |= MX7D_USBNC_USB_CTRL2_OPMODE(0); + val &= ~MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_EN; writel(val, usbmisc->base + MX7D_USBNC_USB_CTRL2); spin_unlock_irqrestore(&usbmisc->lock, flags); } diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 03771bbc6c01..9dd79769cad1 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1636,7 +1636,6 @@ static void __usb_hcd_giveback_urb(struct urb *urb) struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus); struct usb_anchor *anchor = urb->anchor; int status = urb->unlinked; - unsigned long flags; urb->hcpriv = NULL; if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) && @@ -1654,14 +1653,13 @@ static void __usb_hcd_giveback_urb(struct urb *urb) /* pass ownership to the completion handler */ urb->status = status; /* - * Only collect coverage in the softirq context and disable interrupts - * to avoid scenarios with nested remote coverage collection sections - * that KCOV does not support. - * See the comment next to kcov_remote_start_usb_softirq() for details. + * This function can be called in task context inside another remote + * coverage collection section, but kcov doesn't support that kind of + * recursion yet. Only collect coverage in softirq context for now. */ - flags = kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum); + kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum); urb->complete(urb); - kcov_remote_stop_softirq(flags); + kcov_remote_stop_softirq(); usb_anchor_resume_wakeups(anchor); atomic_dec(&urb->use_count); @@ -1719,10 +1717,10 @@ static void usb_giveback_urb_bh(struct work_struct *work) * @urb: urb being returned to the USB device driver. * @status: completion status code for the URB. * - * Context: atomic. The completion callback is invoked in caller's context. - * For HCDs with HCD_BH flag set, the completion callback is invoked in BH - * context (except for URBs submitted to the root hub which always complete in - * caller's context). + * Context: atomic. The completion callback is invoked either in a work queue + * (BH) context or in the caller's context, depending on whether the HCD_BH + * flag is set in the @hcd structure, except that URBs submitted to the + * root hub always complete in BH context. * * This hands the URB from HCD to its USB device driver, using its * completion function. The HCD has freed all per-urb resources @@ -2166,7 +2164,7 @@ static struct urb *request_single_step_set_feature_urb( urb->complete = usb_ehset_completion; urb->status = -EINPROGRESS; urb->actual_length = 0; - urb->transfer_flags = URB_DIR_IN; + urb->transfer_flags = URB_DIR_IN | URB_NO_TRANSFER_DMA_MAP; usb_get_urb(urb); atomic_inc(&urb->use_count); atomic_inc(&urb->dev->urbnum); @@ -2230,9 +2228,15 @@ int ehset_single_step_set_feature(struct usb_hcd *hcd, int port) /* Complete remaining DATA and STATUS stages using the same URB */ urb->status = -EINPROGRESS; + urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP; usb_get_urb(urb); atomic_inc(&urb->use_count); atomic_inc(&urb->dev->urbnum); + if (map_urb_for_dma(hcd, urb, GFP_KERNEL)) { + usb_put_urb(urb); + goto out1; + } + retval = hcd->driver->submit_single_step_set_feature(hcd, urb, 0); if (!retval && !wait_for_completion_timeout(&done, msecs_to_jiffies(2000))) { diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index ff0ff95d5cca..f5bc53875330 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -371,6 +371,7 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM }, /* SanDisk Corp. SanDisk 3.2Gen1 */ + { USB_DEVICE(0x0781, 0x5596), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x0781, 0x55a3), .driver_info = USB_QUIRK_DELAY_INIT }, /* SanDisk Extreme 55AE */ diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 54a4ee2b90b7..39c72cb52ce7 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -41,6 +41,7 @@ #define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee #define PCI_DEVICE_ID_INTEL_TGPH 0x43ee #define PCI_DEVICE_ID_INTEL_JSP 0x4dee +#define PCI_DEVICE_ID_INTEL_WCL 0x4d7e #define PCI_DEVICE_ID_INTEL_ADL 0x460e #define PCI_DEVICE_ID_INTEL_ADL_PCH 0x51ee #define PCI_DEVICE_ID_INTEL_ADLN 0x465e @@ -431,6 +432,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_DEVICE_DATA(INTEL, TGPLP, &dwc3_pci_intel_swnode) }, { PCI_DEVICE_DATA(INTEL, TGPH, &dwc3_pci_intel_swnode) }, { PCI_DEVICE_DATA(INTEL, JSP, &dwc3_pci_intel_swnode) }, + { PCI_DEVICE_DATA(INTEL, WCL, &dwc3_pci_intel_swnode) }, { PCI_DEVICE_DATA(INTEL, ADL, &dwc3_pci_intel_swnode) }, { PCI_DEVICE_DATA(INTEL, ADL_PCH, &dwc3_pci_intel_swnode) }, { PCI_DEVICE_DATA(INTEL, ADLN, &dwc3_pci_intel_swnode) }, diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 666ac432f52d..b4229aa13f37 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -288,7 +288,9 @@ void dwc3_ep0_out_start(struct dwc3 *dwc) dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 8, DWC3_TRBCTL_CONTROL_SETUP, false); ret = dwc3_ep0_start_trans(dep); - WARN_ON(ret < 0); + if (ret < 0) + dev_err(dwc->dev, "ep0 out start transfer failed: %d\n", ret); + for (i = 2; i < DWC3_ENDPOINTS_NUM; i++) { struct dwc3_ep *dwc3_ep; @@ -1061,7 +1063,9 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, ret = dwc3_ep0_start_trans(dep); } - WARN_ON(ret < 0); + if (ret < 0) + dev_err(dwc->dev, + "ep0 data phase start transfer failed: %d\n", ret); } static int dwc3_ep0_start_control_status(struct dwc3_ep *dep) @@ -1078,7 +1082,12 @@ static int dwc3_ep0_start_control_status(struct dwc3_ep *dep) static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep) { - WARN_ON(dwc3_ep0_start_control_status(dep)); + int ret; + + ret = dwc3_ep0_start_control_status(dep); + if (ret) + dev_err(dwc->dev, + "ep0 status phase start transfer failed: %d\n", ret); } static void dwc3_ep0_do_control_status(struct dwc3 *dwc, @@ -1121,7 +1130,10 @@ void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep) cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); memset(¶ms, 0, sizeof(params)); ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); - WARN_ON_ONCE(ret); + if (ret) + dev_err_ratelimited(dwc->dev, + "ep0 data phase end transfer failed: %d\n", ret); + dep->resource_index = 0; } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 25db36c63951..554f997eb8c4 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1772,7 +1772,11 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int dep->flags |= DWC3_EP_DELAY_STOP; return 0; } - WARN_ON_ONCE(ret); + + if (ret) + dev_err_ratelimited(dep->dwc->dev, + "end transfer failed: %d\n", ret); + dep->resource_index = 0; if (!interrupt) @@ -3777,6 +3781,15 @@ static void dwc3_gadget_endpoint_transfer_complete(struct dwc3_ep *dep, static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep *dep, const struct dwc3_event_depevt *event) { + /* + * During a device-initiated disconnect, a late xferNotReady event can + * be generated after the End Transfer command resets the event filter, + * but before the controller is halted. Ignore it to prevent a new + * transfer from starting. + */ + if (!dep->dwc->connected) + return; + dwc3_gadget_endpoint_frame_from_event(dep, event); /* @@ -4039,7 +4052,9 @@ static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) dep->flags &= ~DWC3_EP_STALL; ret = dwc3_send_clear_stall_ep_cmd(dep); - WARN_ON_ONCE(ret); + if (ret) + dev_err_ratelimited(dwc->dev, + "failed to clear STALL on %s\n", dep->name); } } diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c index 2957316fd3d0..1d3085cc9d22 100644 --- a/drivers/usb/gadget/udc/tegra-xudc.c +++ b/drivers/usb/gadget/udc/tegra-xudc.c @@ -502,6 +502,7 @@ struct tegra_xudc { struct clk_bulk_data *clks; bool device_mode; + bool current_device_mode; struct work_struct usb_role_sw_work; struct phy **usb3_phy; @@ -715,6 +716,8 @@ static void tegra_xudc_device_mode_on(struct tegra_xudc *xudc) phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG, USB_ROLE_DEVICE); + + xudc->current_device_mode = true; } static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc) @@ -725,6 +728,8 @@ static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc) dev_dbg(xudc->dev, "device mode off\n"); + xudc->current_device_mode = false; + connected = !!(xudc_readl(xudc, PORTSC) & PORTSC_CCS); reinit_completion(&xudc->disconnect_complete); @@ -4044,10 +4049,10 @@ static int __maybe_unused tegra_xudc_resume(struct device *dev) spin_lock_irqsave(&xudc->lock, flags); xudc->suspended = false; + if (xudc->device_mode != xudc->current_device_mode) + schedule_work(&xudc->usb_role_sw_work); spin_unlock_irqrestore(&xudc->lock, flags); - schedule_work(&xudc->usb_role_sw_work); - pm_runtime_enable(dev); return 0; diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 92bb84f8132a..b3a59ce1b3f4 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -704,8 +704,7 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci, if (!xhci->devs[i]) continue; - retval = xhci_disable_slot(xhci, i); - xhci_free_virt_device(xhci, i); + retval = xhci_disable_and_free_slot(xhci, i); if (retval) xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n", i, retval); diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 07289333a1e8..81eaad87a3d9 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -865,21 +865,20 @@ free_tts: * will be manipulated by the configure endpoint, allocate device, or update * hub functions while this function is removing the TT entries from the list. */ -void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) +void xhci_free_virt_device(struct xhci_hcd *xhci, struct xhci_virt_device *dev, + int slot_id) { - struct xhci_virt_device *dev; int i; int old_active_eps = 0; /* Slot ID 0 is reserved */ - if (slot_id == 0 || !xhci->devs[slot_id]) + if (slot_id == 0 || !dev) return; - dev = xhci->devs[slot_id]; - - xhci->dcbaa->dev_context_ptrs[slot_id] = 0; - if (!dev) - return; + /* If device ctx array still points to _this_ device, clear it */ + if (dev->out_ctx && + xhci->dcbaa->dev_context_ptrs[slot_id] == cpu_to_le64(dev->out_ctx->dma)) + xhci->dcbaa->dev_context_ptrs[slot_id] = 0; trace_xhci_free_virt_device(dev); @@ -920,8 +919,9 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) dev->udev->slot_id = 0; if (dev->rhub_port && dev->rhub_port->slot_id == slot_id) dev->rhub_port->slot_id = 0; - kfree(xhci->devs[slot_id]); - xhci->devs[slot_id] = NULL; + if (xhci->devs[slot_id] == dev) + xhci->devs[slot_id] = NULL; + kfree(dev); } /* @@ -962,7 +962,7 @@ static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_i out: /* we are now at a leaf device */ xhci_debugfs_remove_slot(xhci, slot_id); - xhci_free_virt_device(xhci, slot_id); + xhci_free_virt_device(xhci, vdev, slot_id); } int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, diff --git a/drivers/usb/host/xhci-pci-renesas.c b/drivers/usb/host/xhci-pci-renesas.c index 620f8f0febb8..86df80399c9f 100644 --- a/drivers/usb/host/xhci-pci-renesas.c +++ b/drivers/usb/host/xhci-pci-renesas.c @@ -47,8 +47,9 @@ #define RENESAS_ROM_ERASE_MAGIC 0x5A65726F #define RENESAS_ROM_WRITE_MAGIC 0x53524F4D -#define RENESAS_RETRY 10000 -#define RENESAS_DELAY 10 +#define RENESAS_RETRY 50000 /* 50000 * RENESAS_DELAY ~= 500ms */ +#define RENESAS_CHIP_ERASE_RETRY 500000 /* 500000 * RENESAS_DELAY ~= 5s */ +#define RENESAS_DELAY 10 #define RENESAS_FW_NAME "renesas_usb_fw.mem" @@ -407,7 +408,7 @@ static void renesas_rom_erase(struct pci_dev *pdev) /* sleep a bit while ROM is erased */ msleep(20); - for (i = 0; i < RENESAS_RETRY; i++) { + for (i = 0; i < RENESAS_CHIP_ERASE_RETRY; i++) { retval = pci_read_config_byte(pdev, RENESAS_ROM_STATUS, &status); status &= RENESAS_ROM_STATUS_ERASE; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index ecd757d482c5..4f8f5aab109d 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1592,7 +1592,8 @@ static void xhci_handle_cmd_enable_slot(int slot_id, struct xhci_command *comman command->slot_id = 0; } -static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) +static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id, + u32 cmd_comp_code) { struct xhci_virt_device *virt_dev; struct xhci_slot_ctx *slot_ctx; @@ -1607,6 +1608,10 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) /* Delete default control endpoint resources */ xhci_free_device_endpoint_resources(xhci, virt_dev, true); + if (cmd_comp_code == COMP_SUCCESS) { + xhci->dcbaa->dev_context_ptrs[slot_id] = 0; + xhci->devs[slot_id] = NULL; + } } static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id) @@ -1856,7 +1861,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, xhci_handle_cmd_enable_slot(slot_id, cmd, cmd_comp_code); break; case TRB_DISABLE_SLOT: - xhci_handle_cmd_disable_slot(xhci, slot_id); + xhci_handle_cmd_disable_slot(xhci, slot_id, cmd_comp_code); break; case TRB_CONFIG_EP: if (!cmd->completion) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 47151ca527bf..742c23826e17 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -309,6 +309,7 @@ int xhci_enable_interrupter(struct xhci_interrupter *ir) return -EINVAL; iman = readl(&ir->ir_set->iman); + iman &= ~IMAN_IP; iman |= IMAN_IE; writel(iman, &ir->ir_set->iman); @@ -325,6 +326,7 @@ int xhci_disable_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir) return -EINVAL; iman = readl(&ir->ir_set->iman); + iman &= ~IMAN_IP; iman &= ~IMAN_IE; writel(iman, &ir->ir_set->iman); @@ -3932,8 +3934,7 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd, * Obtaining a new device slot to inform the xHCI host that * the USB device has been reset. */ - ret = xhci_disable_slot(xhci, udev->slot_id); - xhci_free_virt_device(xhci, udev->slot_id); + ret = xhci_disable_and_free_slot(xhci, udev->slot_id); if (!ret) { ret = xhci_alloc_dev(hcd, udev); if (ret == 1) @@ -4090,7 +4091,7 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) xhci_disable_slot(xhci, udev->slot_id); spin_lock_irqsave(&xhci->lock, flags); - xhci_free_virt_device(xhci, udev->slot_id); + xhci_free_virt_device(xhci, virt_dev, udev->slot_id); spin_unlock_irqrestore(&xhci->lock, flags); } @@ -4139,6 +4140,16 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) return 0; } +int xhci_disable_and_free_slot(struct xhci_hcd *xhci, u32 slot_id) +{ + struct xhci_virt_device *vdev = xhci->devs[slot_id]; + int ret; + + ret = xhci_disable_slot(xhci, slot_id); + xhci_free_virt_device(xhci, vdev, slot_id); + return ret; +} + /* * Checks if we have enough host controller resources for the default control * endpoint. @@ -4245,8 +4256,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) return 1; disable_slot: - xhci_disable_slot(xhci, udev->slot_id); - xhci_free_virt_device(xhci, udev->slot_id); + xhci_disable_and_free_slot(xhci, udev->slot_id); return 0; } @@ -4382,8 +4392,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, dev_warn(&udev->dev, "Device not responding to setup %s.\n", act); mutex_unlock(&xhci->mutex); - ret = xhci_disable_slot(xhci, udev->slot_id); - xhci_free_virt_device(xhci, udev->slot_id); + ret = xhci_disable_and_free_slot(xhci, udev->slot_id); if (!ret) { if (xhci_alloc_dev(hcd, udev) == 1) xhci_setup_addressable_virt_dev(xhci, udev); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index a20f4e7cd43a..85d5b964bf1e 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1791,7 +1791,7 @@ void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *), /* xHCI memory management */ void xhci_mem_cleanup(struct xhci_hcd *xhci); int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags); -void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id); +void xhci_free_virt_device(struct xhci_hcd *xhci, struct xhci_virt_device *dev, int slot_id); int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags); int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev); void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci, @@ -1888,6 +1888,7 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, struct usb_tt *tt, gfp_t mem_flags); int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id); +int xhci_disable_and_free_slot(struct xhci_hcd *xhci, u32 slot_id); int xhci_ext_cap_init(struct xhci_hcd *xhci); int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup); diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c index 7dea28c2b8ee..cb5bbb19060e 100644 --- a/drivers/usb/storage/realtek_cr.c +++ b/drivers/usb/storage/realtek_cr.c @@ -252,7 +252,7 @@ static int rts51x_bulk_transport(struct us_data *us, u8 lun, return USB_STOR_TRANSPORT_ERROR; } - residue = bcs->Residue; + residue = le32_to_cpu(bcs->Residue); if (bcs->Tag != us->tag) return USB_STOR_TRANSPORT_ERROR; diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 54f0b1c83317..dfa5276a5a43 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -934,6 +934,13 @@ UNUSUAL_DEV( 0x05e3, 0x0723, 0x9451, 0x9451, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_SANE_SENSE ), +/* Added by MaĆ«l GUERIN <mael.guerin@murena.io> */ +UNUSUAL_DEV( 0x0603, 0x8611, 0x0000, 0xffff, + "Novatek", + "NTK96550-based camera", + USB_SC_SCSI, USB_PR_BULK, NULL, + US_FL_BULK_IGNORE_TAG ), + /* * Reported by Hanno Boeck <hanno@gmx.de> * Taken from the Lycoris Kernel @@ -1494,6 +1501,28 @@ UNUSUAL_DEV( 0x0bc2, 0x3332, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_WP_DETECT ), +/* + * Reported by Zenm Chen <zenmchen@gmail.com> + * Ignore driver CD mode, otherwise usb_modeswitch may fail to switch + * the device into Wi-Fi mode. + */ +UNUSUAL_DEV( 0x0bda, 0x1a2b, 0x0000, 0xffff, + "Realtek", + "DISK", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_DEVICE ), + +/* + * Reported by Zenm Chen <zenmchen@gmail.com> + * Ignore driver CD mode, otherwise usb_modeswitch may fail to switch + * the device into Wi-Fi mode. + */ +UNUSUAL_DEV( 0x0bda, 0xa192, 0x0000, 0xffff, + "Realtek", + "DISK", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_DEVICE ), + UNUSUAL_DEV( 0x0d49, 0x7310, 0x0000, 0x9999, "Maxtor", "USB to SATA", diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c index a4ff2403ddd6..870a71f953f6 100644 --- a/drivers/usb/typec/tcpm/fusb302.c +++ b/drivers/usb/typec/tcpm/fusb302.c @@ -1485,6 +1485,9 @@ static irqreturn_t fusb302_irq_intn(int irq, void *dev_id) struct fusb302_chip *chip = dev_id; unsigned long flags; + /* Disable our level triggered IRQ until our irq_work has cleared it */ + disable_irq_nosync(chip->gpio_int_n_irq); + spin_lock_irqsave(&chip->irq_lock, flags); if (chip->irq_suspended) chip->irq_while_suspended = true; @@ -1627,6 +1630,7 @@ static void fusb302_irq_work(struct work_struct *work) } done: mutex_unlock(&chip->lock); + enable_irq(chip->gpio_int_n_irq); } static int init_gpio(struct fusb302_chip *chip) @@ -1751,10 +1755,9 @@ static int fusb302_probe(struct i2c_client *client) goto destroy_workqueue; } - ret = devm_request_threaded_irq(dev, chip->gpio_int_n_irq, - NULL, fusb302_irq_intn, - IRQF_ONESHOT | IRQF_TRIGGER_LOW, - "fsc_interrupt_int_n", chip); + ret = request_irq(chip->gpio_int_n_irq, fusb302_irq_intn, + IRQF_ONESHOT | IRQF_TRIGGER_LOW, + "fsc_interrupt_int_n", chip); if (ret < 0) { dev_err(dev, "cannot request IRQ for GPIO Int_N, ret=%d", ret); goto tcpm_unregister_port; @@ -1779,6 +1782,7 @@ static void fusb302_remove(struct i2c_client *client) struct fusb302_chip *chip = i2c_get_clientdata(client); disable_irq_wake(chip->gpio_int_n_irq); + free_irq(chip->gpio_int_n_irq, chip); cancel_work_sync(&chip->irq_work); cancel_delayed_work_sync(&chip->bc_lvl_handler); tcpm_unregister_port(chip->tcpm_port); diff --git a/drivers/usb/typec/tcpm/maxim_contaminant.c b/drivers/usb/typec/tcpm/maxim_contaminant.c index 0cdda06592fd..af8da6dc60ae 100644 --- a/drivers/usb/typec/tcpm/maxim_contaminant.c +++ b/drivers/usb/typec/tcpm/maxim_contaminant.c @@ -188,6 +188,11 @@ static int max_contaminant_read_comparators(struct max_tcpci_chip *chip, u8 *ven if (ret < 0) return ret; + /* Disable low power mode */ + ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL, + FIELD_PREP(CCLPMODESEL, + LOW_POWER_MODE_DISABLE)); + /* Sleep to allow comparators settle */ usleep_range(5000, 6000); ret = regmap_update_bits(regmap, TCPC_TCPC_CTRL, TCPC_TCPC_CTRL_ORIENTATION, PLUG_ORNT_CC1); @@ -324,6 +329,39 @@ static int max_contaminant_enable_dry_detection(struct max_tcpci_chip *chip) return 0; } +static int max_contaminant_enable_toggling(struct max_tcpci_chip *chip) +{ + struct regmap *regmap = chip->data.regmap; + int ret; + + /* Disable dry detection if enabled. */ + ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL, + FIELD_PREP(CCLPMODESEL, + LOW_POWER_MODE_DISABLE)); + if (ret) + return ret; + + ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL1, CCCONNDRY, 0); + if (ret) + return ret; + + ret = max_tcpci_write8(chip, TCPC_ROLE_CTRL, TCPC_ROLE_CTRL_DRP | + FIELD_PREP(TCPC_ROLE_CTRL_CC1, + TCPC_ROLE_CTRL_CC_RD) | + FIELD_PREP(TCPC_ROLE_CTRL_CC2, + TCPC_ROLE_CTRL_CC_RD)); + if (ret) + return ret; + + ret = regmap_update_bits(regmap, TCPC_TCPC_CTRL, + TCPC_TCPC_CTRL_EN_LK4CONN_ALRT, + TCPC_TCPC_CTRL_EN_LK4CONN_ALRT); + if (ret) + return ret; + + return max_tcpci_write8(chip, TCPC_COMMAND, TCPC_CMD_LOOK4CONNECTION); +} + bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect_while_debounce, bool *cc_handled) { @@ -340,6 +378,12 @@ bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect if (ret < 0) return false; + if (cc_status & TCPC_CC_STATUS_TOGGLING) { + if (chip->contaminant_state == DETECTED) + return true; + return false; + } + if (chip->contaminant_state == NOT_DETECTED || chip->contaminant_state == SINK) { if (!disconnect_while_debounce) msleep(100); @@ -372,6 +416,12 @@ bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect max_contaminant_enable_dry_detection(chip); return true; } + + ret = max_contaminant_enable_toggling(chip); + if (ret) + dev_err(chip->dev, + "Failed to enable toggling, ret=%d", + ret); } } else if (chip->contaminant_state == DETECTED) { if (!(cc_status & TCPC_CC_STATUS_TOGGLING)) { @@ -379,6 +429,14 @@ bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect if (chip->contaminant_state == DETECTED) { max_contaminant_enable_dry_detection(chip); return true; + } else { + ret = max_contaminant_enable_toggling(chip); + if (ret) { + dev_err(chip->dev, + "Failed to enable toggling, ret=%d", + ret); + return true; + } } } } diff --git a/drivers/usb/typec/tcpm/tcpci_maxim.h b/drivers/usb/typec/tcpm/tcpci_maxim.h index 76270d5c2838..b33540a42a95 100644 --- a/drivers/usb/typec/tcpm/tcpci_maxim.h +++ b/drivers/usb/typec/tcpm/tcpci_maxim.h @@ -21,6 +21,7 @@ #define CCOVPDIS BIT(6) #define SBURPCTRL BIT(5) #define CCLPMODESEL GENMASK(4, 3) +#define LOW_POWER_MODE_DISABLE 0 #define ULTRA_LOW_POWER_MODE 1 #define CCRPCTRL GENMASK(2, 0) #define UA_1_SRC 1 diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 6edac0c1ba9b..c6508fe0d5c8 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -99,6 +99,7 @@ struct vhost_net_ubuf_ref { atomic_t refcount; wait_queue_head_t wait; struct vhost_virtqueue *vq; + struct rcu_head rcu; }; #define VHOST_NET_BATCH 64 @@ -250,9 +251,13 @@ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) { - int r = atomic_sub_return(1, &ubufs->refcount); + int r; + + rcu_read_lock(); + r = atomic_sub_return(1, &ubufs->refcount); if (unlikely(!r)) wake_up(&ubufs->wait); + rcu_read_unlock(); return r; } @@ -265,7 +270,7 @@ static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs) static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs) { vhost_net_ubuf_put_and_wait(ubufs); - kfree(ubufs); + kfree_rcu(ubufs, rcu); } static void vhost_net_clear_ubuf_info(struct vhost_net *n) diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c index a5d63269f20b..d0728285b6ce 100644 --- a/drivers/virtio/virtio_input.c +++ b/drivers/virtio/virtio_input.c @@ -360,11 +360,15 @@ static int virtinput_freeze(struct virtio_device *vdev) { struct virtio_input *vi = vdev->priv; unsigned long flags; + void *buf; spin_lock_irqsave(&vi->lock, flags); vi->ready = false; spin_unlock_irqrestore(&vi->lock, flags); + virtio_reset_device(vdev); + while ((buf = virtqueue_detach_unused_buf(vi->sts)) != NULL) + kfree(buf); vdev->config->del_vqs(vdev); return 0; } diff --git a/drivers/virtio/virtio_pci_legacy_dev.c b/drivers/virtio/virtio_pci_legacy_dev.c index 677d1f68bc9b..bbbf89c22880 100644 --- a/drivers/virtio/virtio_pci_legacy_dev.c +++ b/drivers/virtio/virtio_pci_legacy_dev.c @@ -140,9 +140,9 @@ EXPORT_SYMBOL_GPL(vp_legacy_set_status); * vp_legacy_queue_vector - set the MSIX vector for a specific virtqueue * @ldev: the legacy virtio-pci device * @index: queue index - * @vector: the config vector + * @vector: the queue vector * - * Returns the config vector read from the device + * Returns the queue vector read from the device */ u16 vp_legacy_queue_vector(struct virtio_pci_legacy_device *ldev, u16 index, u16 vector) diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c index d665f8f73ea8..9e503b7a58d8 100644 --- a/drivers/virtio/virtio_pci_modern_dev.c +++ b/drivers/virtio/virtio_pci_modern_dev.c @@ -546,9 +546,9 @@ EXPORT_SYMBOL_GPL(vp_modern_set_queue_reset); * vp_modern_queue_vector - set the MSIX vector for a specific virtqueue * @mdev: the modern virtio-pci device * @index: queue index - * @vector: the config vector + * @vector: the queue vector * - * Returns the config vector read from the device + * Returns the queue vector read from the device */ u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev, u16 index, u16 vector) diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index 3c9da446b85d..528682bf0c7f 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c @@ -718,26 +718,6 @@ int xs_watch_msg(struct xs_watch_event *event) return 0; } -/* - * Certain older XenBus toolstack cannot handle reading values that are - * not populated. Some Xen 3.4 installation are incapable of doing this - * so if we are running on anything older than 4 do not attempt to read - * control/platform-feature-xs_reset_watches. - */ -static bool xen_strict_xenbus_quirk(void) -{ -#ifdef CONFIG_X86 - uint32_t eax, ebx, ecx, edx, base; - - base = xen_cpuid_base(); - cpuid(base + 1, &eax, &ebx, &ecx, &edx); - - if ((eax >> 16) < 4) - return true; -#endif - return false; - -} static void xs_reset_watches(void) { int err; @@ -745,9 +725,6 @@ static void xs_reset_watches(void) if (!xen_hvm_domain() || xen_initial_domain()) return; - if (xen_strict_xenbus_quirk()) - return; - if (!xenbus_read_unsigned("control", "platform-feature-xs_reset_watches", 0)) return; |