summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/clk/clk-devres.c50
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c83
-rw-r--r--include/linux/clk.h23
3 files changed, 100 insertions, 56 deletions
diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
index 5368d92d9b39..994d5bc5168b 100644
--- a/drivers/clk/clk-devres.c
+++ b/drivers/clk/clk-devres.c
@@ -179,6 +179,56 @@ int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks,
}
EXPORT_SYMBOL_GPL(devm_clk_bulk_get_optional);
+static void devm_clk_bulk_release_enable(struct device *dev, void *res)
+{
+ struct clk_bulk_devres *devres = res;
+
+ clk_bulk_disable_unprepare(devres->num_clks, devres->clks);
+ clk_bulk_put(devres->num_clks, devres->clks);
+}
+
+static int __devm_clk_bulk_get_enable(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks, bool optional)
+{
+ struct clk_bulk_devres *devres;
+ int ret;
+
+ devres = devres_alloc(devm_clk_bulk_release_enable,
+ sizeof(*devres), GFP_KERNEL);
+ if (!devres)
+ return -ENOMEM;
+
+ if (optional)
+ ret = clk_bulk_get_optional(dev, num_clks, clks);
+ else
+ ret = clk_bulk_get(dev, num_clks, clks);
+ if (ret)
+ goto err_clk_get;
+
+ ret = clk_bulk_prepare_enable(num_clks, clks);
+ if (ret)
+ goto err_clk_prepare;
+
+ devres->clks = clks;
+ devres->num_clks = num_clks;
+ devres_add(dev, devres);
+
+ return 0;
+
+err_clk_prepare:
+ clk_bulk_put(num_clks, clks);
+err_clk_get:
+ devres_free(devres);
+ return ret;
+}
+
+int __must_check devm_clk_bulk_get_optional_enable(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks)
+{
+ return __devm_clk_bulk_get_enable(dev, num_clks, clks, true);
+}
+EXPORT_SYMBOL_GPL(devm_clk_bulk_get_optional_enable);
+
static void devm_clk_bulk_release_all(struct device *dev, void *res)
{
struct clk_bulk_devres *devres = res;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 284031fb2e2c..998bacd508b8 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -2787,7 +2787,7 @@ static int axienet_probe(struct platform_device *pdev)
int addr_width = 32;
u32 value;
- ndev = alloc_etherdev(sizeof(*lp));
+ ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp));
if (!ndev)
return -ENOMEM;
@@ -2815,41 +2815,32 @@ static int axienet_probe(struct platform_device *pdev)
seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock);
INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats);
- lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
+ lp->axi_clk = devm_clk_get_optional_enabled(&pdev->dev,
+ "s_axi_lite_clk");
if (!lp->axi_clk) {
/* For backward compatibility, if named AXI clock is not present,
* treat the first clock specified as the AXI clock.
*/
- lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
- }
- if (IS_ERR(lp->axi_clk)) {
- ret = PTR_ERR(lp->axi_clk);
- goto free_netdev;
- }
- ret = clk_prepare_enable(lp->axi_clk);
- if (ret) {
- dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
- goto free_netdev;
+ lp->axi_clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
}
+ if (IS_ERR(lp->axi_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(lp->axi_clk),
+ "could not get AXI clock\n");
lp->misc_clks[0].id = "axis_clk";
lp->misc_clks[1].id = "ref_clk";
lp->misc_clks[2].id = "mgt_clk";
- ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
- if (ret)
- goto cleanup_clk;
-
- ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
+ ret = devm_clk_bulk_get_optional_enable(&pdev->dev, XAE_NUM_MISC_CLOCKS,
+ lp->misc_clks);
if (ret)
- goto cleanup_clk;
+ return dev_err_probe(&pdev->dev, ret,
+ "could not get/enable misc. clocks\n");
/* Map device registers */
lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
- if (IS_ERR(lp->regs)) {
- ret = PTR_ERR(lp->regs);
- goto cleanup_clk;
- }
+ if (IS_ERR(lp->regs))
+ return PTR_ERR(lp->regs);
lp->regs_start = ethres->start;
/* Setup checksum offload, but default to off if not specified */
@@ -2918,19 +2909,17 @@ static int axienet_probe(struct platform_device *pdev)
lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
break;
default:
- ret = -EINVAL;
- goto cleanup_clk;
+ return -EINVAL;
}
} else {
ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
if (ret)
- goto cleanup_clk;
+ return ret;
}
if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
- ret = -EINVAL;
- goto cleanup_clk;
+ return -EINVAL;
}
if (!of_property_present(pdev->dev.of_node, "dmas")) {
@@ -2945,7 +2934,7 @@ static int axienet_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"unable to get DMA resource\n");
of_node_put(np);
- goto cleanup_clk;
+ return ret;
}
lp->dma_regs = devm_ioremap_resource(&pdev->dev,
&dmares);
@@ -2962,19 +2951,17 @@ static int axienet_probe(struct platform_device *pdev)
}
if (IS_ERR(lp->dma_regs)) {
dev_err(&pdev->dev, "could not map DMA regs\n");
- ret = PTR_ERR(lp->dma_regs);
- goto cleanup_clk;
+ return PTR_ERR(lp->dma_regs);
}
if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
dev_err(&pdev->dev, "could not determine irqs\n");
- ret = -ENOMEM;
- goto cleanup_clk;
+ return -ENOMEM;
}
/* Reset core now that clocks are enabled, prior to accessing MDIO */
ret = __axienet_device_reset(lp);
if (ret)
- goto cleanup_clk;
+ return ret;
/* Autodetect the need for 64-bit DMA pointers.
* When the IP is configured for a bus width bigger than 32 bits,
@@ -3001,14 +2988,13 @@ static int axienet_probe(struct platform_device *pdev)
}
if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit architecture\n");
- ret = -EINVAL;
- goto cleanup_clk;
+ return -EINVAL;
}
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
if (ret) {
dev_err(&pdev->dev, "No suitable DMA available\n");
- goto cleanup_clk;
+ return ret;
}
netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
@@ -3018,15 +3004,12 @@ static int axienet_probe(struct platform_device *pdev)
lp->eth_irq = platform_get_irq_optional(pdev, 0);
if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
- ret = lp->eth_irq;
- goto cleanup_clk;
+ return lp->eth_irq;
}
tx_chan = dma_request_chan(lp->dev, "tx_chan0");
- if (IS_ERR(tx_chan)) {
- ret = PTR_ERR(tx_chan);
- dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
- goto cleanup_clk;
- }
+ if (IS_ERR(tx_chan))
+ return dev_err_probe(lp->dev, PTR_ERR(tx_chan),
+ "No Ethernet DMA (TX) channel found\n");
cfg.reset = 1;
/* As name says VDMA but it has support for DMA channel reset */
@@ -3034,7 +3017,7 @@ static int axienet_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "Reset channel failed\n");
dma_release_channel(tx_chan);
- goto cleanup_clk;
+ return ret;
}
dma_release_channel(tx_chan);
@@ -3139,13 +3122,6 @@ cleanup_mdio:
put_device(&lp->pcs_phy->dev);
if (lp->mii_bus)
axienet_mdio_teardown(lp);
-cleanup_clk:
- clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
- clk_disable_unprepare(lp->axi_clk);
-
-free_netdev:
- free_netdev(ndev);
-
return ret;
}
@@ -3163,11 +3139,6 @@ static void axienet_remove(struct platform_device *pdev)
put_device(&lp->pcs_phy->dev);
axienet_mdio_teardown(lp);
-
- clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
- clk_disable_unprepare(lp->axi_clk);
-
- free_netdev(ndev);
}
static void axienet_shutdown(struct platform_device *pdev)
diff --git a/include/linux/clk.h b/include/linux/clk.h
index b607482ca77e..c571e294f0ef 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -479,6 +479,22 @@ int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks,
struct clk_bulk_data *clks);
/**
+ * devm_clk_bulk_get_optional_enable - Get and enable optional bulk clocks (managed)
+ * @dev: device for clock "consumer"
+ * @num_clks: the number of clk_bulk_data
+ * @clks: pointer to the clk_bulk_data table of consumer
+ *
+ * Behaves the same as devm_clk_bulk_get_optional() but also prepares and enables
+ * the clocks in one operation with management. The clks will automatically be
+ * disabled, unprepared and freed when the device is unbound.
+ *
+ * Return: 0 if all clocks specified in clk_bulk_data table are obtained
+ * and enabled successfully, or for any clk there was no clk provider available.
+ * Otherwise returns valid IS_ERR() condition containing errno.
+ */
+int __must_check devm_clk_bulk_get_optional_enable(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks);
+/**
* devm_clk_bulk_get_all - managed get multiple clk consumers
* @dev: device for clock "consumer"
* @clks: pointer to the clk_bulk_data table of consumer
@@ -1029,6 +1045,13 @@ static inline int __must_check devm_clk_bulk_get_optional(struct device *dev,
return 0;
}
+static inline int __must_check devm_clk_bulk_get_optional_enable(struct device *dev,
+ int num_clks,
+ struct clk_bulk_data *clks)
+{
+ return 0;
+}
+
static inline int __must_check devm_clk_bulk_get_all(struct device *dev,
struct clk_bulk_data **clks)
{