summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2026-01-21 18:57:09 -0800
committerJakub Kicinski <kuba@kernel.org>2026-01-21 18:57:10 -0800
commit938b404e25d67be9004b3c853afa4a2e9723fe5b (patch)
tree80a6fc6d395fa9e05381407bbd10dedc4e16022a /drivers/net
parentd8f87aa5fa0a4276491fa8ef436cd22605a3f9ba (diff)
parent808d077f5b538065c524b76b340007c51fad215c (diff)
Merge branch 'add-devm_clk_bulk_get_optional_enable-helper-and-use-in-axi-ethernet-driver'
Suraj Gupta says: ==================== Add devm_clk_bulk_get_optional_enable() helper and use in AXI Ethernet driver This patch series introduces a new managed clock framework helper function and demonstrates its usage in AXI ethernet driver. Device drivers frequently need to get optional bulk clocks, prepare them, and enable them during probe, while ensuring automatic cleanup on device unbind. Currently, this requires three separate operations with manual cleanup handling. The new devm_clk_bulk_get_optional_enable() helper combines these operations into a single managed call, eliminating boilerplate code and following the established pattern of devm_clk_bulk_get_all_enabled(). ==================== Link: https://patch.msgid.link/20260116192725.972966-1-suraj.gupta2@amd.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c83
1 files changed, 27 insertions, 56 deletions
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 284031fb2e2c..998bacd508b8 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -2787,7 +2787,7 @@ static int axienet_probe(struct platform_device *pdev)
int addr_width = 32;
u32 value;
- ndev = alloc_etherdev(sizeof(*lp));
+ ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp));
if (!ndev)
return -ENOMEM;
@@ -2815,41 +2815,32 @@ static int axienet_probe(struct platform_device *pdev)
seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock);
INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats);
- lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
+ lp->axi_clk = devm_clk_get_optional_enabled(&pdev->dev,
+ "s_axi_lite_clk");
if (!lp->axi_clk) {
/* For backward compatibility, if named AXI clock is not present,
* treat the first clock specified as the AXI clock.
*/
- lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
- }
- if (IS_ERR(lp->axi_clk)) {
- ret = PTR_ERR(lp->axi_clk);
- goto free_netdev;
- }
- ret = clk_prepare_enable(lp->axi_clk);
- if (ret) {
- dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
- goto free_netdev;
+ lp->axi_clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
}
+ if (IS_ERR(lp->axi_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(lp->axi_clk),
+ "could not get AXI clock\n");
lp->misc_clks[0].id = "axis_clk";
lp->misc_clks[1].id = "ref_clk";
lp->misc_clks[2].id = "mgt_clk";
- ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
- if (ret)
- goto cleanup_clk;
-
- ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
+ ret = devm_clk_bulk_get_optional_enable(&pdev->dev, XAE_NUM_MISC_CLOCKS,
+ lp->misc_clks);
if (ret)
- goto cleanup_clk;
+ return dev_err_probe(&pdev->dev, ret,
+ "could not get/enable misc. clocks\n");
/* Map device registers */
lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
- if (IS_ERR(lp->regs)) {
- ret = PTR_ERR(lp->regs);
- goto cleanup_clk;
- }
+ if (IS_ERR(lp->regs))
+ return PTR_ERR(lp->regs);
lp->regs_start = ethres->start;
/* Setup checksum offload, but default to off if not specified */
@@ -2918,19 +2909,17 @@ static int axienet_probe(struct platform_device *pdev)
lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
break;
default:
- ret = -EINVAL;
- goto cleanup_clk;
+ return -EINVAL;
}
} else {
ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
if (ret)
- goto cleanup_clk;
+ return ret;
}
if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
- ret = -EINVAL;
- goto cleanup_clk;
+ return -EINVAL;
}
if (!of_property_present(pdev->dev.of_node, "dmas")) {
@@ -2945,7 +2934,7 @@ static int axienet_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"unable to get DMA resource\n");
of_node_put(np);
- goto cleanup_clk;
+ return ret;
}
lp->dma_regs = devm_ioremap_resource(&pdev->dev,
&dmares);
@@ -2962,19 +2951,17 @@ static int axienet_probe(struct platform_device *pdev)
}
if (IS_ERR(lp->dma_regs)) {
dev_err(&pdev->dev, "could not map DMA regs\n");
- ret = PTR_ERR(lp->dma_regs);
- goto cleanup_clk;
+ return PTR_ERR(lp->dma_regs);
}
if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
dev_err(&pdev->dev, "could not determine irqs\n");
- ret = -ENOMEM;
- goto cleanup_clk;
+ return -ENOMEM;
}
/* Reset core now that clocks are enabled, prior to accessing MDIO */
ret = __axienet_device_reset(lp);
if (ret)
- goto cleanup_clk;
+ return ret;
/* Autodetect the need for 64-bit DMA pointers.
* When the IP is configured for a bus width bigger than 32 bits,
@@ -3001,14 +2988,13 @@ static int axienet_probe(struct platform_device *pdev)
}
if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit architecture\n");
- ret = -EINVAL;
- goto cleanup_clk;
+ return -EINVAL;
}
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
if (ret) {
dev_err(&pdev->dev, "No suitable DMA available\n");
- goto cleanup_clk;
+ return ret;
}
netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
@@ -3018,15 +3004,12 @@ static int axienet_probe(struct platform_device *pdev)
lp->eth_irq = platform_get_irq_optional(pdev, 0);
if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
- ret = lp->eth_irq;
- goto cleanup_clk;
+ return lp->eth_irq;
}
tx_chan = dma_request_chan(lp->dev, "tx_chan0");
- if (IS_ERR(tx_chan)) {
- ret = PTR_ERR(tx_chan);
- dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
- goto cleanup_clk;
- }
+ if (IS_ERR(tx_chan))
+ return dev_err_probe(lp->dev, PTR_ERR(tx_chan),
+ "No Ethernet DMA (TX) channel found\n");
cfg.reset = 1;
/* As name says VDMA but it has support for DMA channel reset */
@@ -3034,7 +3017,7 @@ static int axienet_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "Reset channel failed\n");
dma_release_channel(tx_chan);
- goto cleanup_clk;
+ return ret;
}
dma_release_channel(tx_chan);
@@ -3139,13 +3122,6 @@ cleanup_mdio:
put_device(&lp->pcs_phy->dev);
if (lp->mii_bus)
axienet_mdio_teardown(lp);
-cleanup_clk:
- clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
- clk_disable_unprepare(lp->axi_clk);
-
-free_netdev:
- free_netdev(ndev);
-
return ret;
}
@@ -3163,11 +3139,6 @@ static void axienet_remove(struct platform_device *pdev)
put_device(&lp->pcs_phy->dev);
axienet_mdio_teardown(lp);
-
- clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
- clk_disable_unprepare(lp->axi_clk);
-
- free_netdev(ndev);
}
static void axienet_shutdown(struct platform_device *pdev)