}
EXPORT_SYMBOL(devm_clk_get);
+struct clk_bulk_devres {
+ struct clk_bulk_data *clks;
+ int num_clks;
+};
+
+static void devm_clk_bulk_release(struct device *dev, void *res)
+{
+ struct clk_bulk_devres *devres = res;
+
+ clk_bulk_put(devres->num_clks, devres->clks);
+}
+
+int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks)
+{
+ struct clk_bulk_devres *devres;
+ int ret;
+
+ devres = devres_alloc(devm_clk_bulk_release,
+ sizeof(*devres), GFP_KERNEL);
+ if (!devres)
+ return -ENOMEM;
+
+ ret = clk_bulk_get(dev, num_clks, clks);
+ if (!ret) {
+ devres->clks = clks;
+ devres->num_clks = num_clks;
+ devres_add(dev, devres);
+ } else {
+ devres_free(devres);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_clk_bulk_get);
+
static int devm_clk_match(struct device *dev, void *res, void *data)
{
struct clk **c = res;
int __must_check clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks);
+/**
+ * devm_clk_bulk_get - managed get multiple clk consumers
+ * @dev: device for clock "consumer"
+ * @num_clks: the number of clk_bulk_data
+ * @clks: the clk_bulk_data table of consumer
+ *
+ * Return 0 on success, an errno on failure.
+ *
+ * This helper function allows drivers to get several clk
+ * consumers in one operation with management, the clks will
+ * automatically be freed when the device is unbound.
+ */
+int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks);
+
/**
* devm_clk_get - lookup and obtain a managed reference to a clock producer.
* @dev: device for clock "consumer"
return NULL;
}
+static inline int devm_clk_bulk_get(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks)
+{
+ return 0;
+}
+
static inline struct clk *devm_get_clk_from_child(struct device *dev,
struct device_node *np, const char *con_id)
{