From 8caab75fd2c2a92667cbb1cd315720bede3feaa9 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 13 Jun 2017 13:23:52 +0200 Subject: [PATCH] spi: Generalize SPI "master" to "controller" Now struct spi_master is used for both SPI master and slave controllers, it makes sense to rename it to struct spi_controller, and replace "master" by "controller" where appropriate. For now this conversion is done for SPI core infrastructure only. Wrappers are provided for backwards compatibility, until all SPI drivers have been converted. Noteworthy details: - SPI_MASTER_GPIO_SS is retained, as it only makes sense for SPI master controllers, - spi_busnum_to_master() is retained, as it looks up masters only, - A new field spi_device.controller is added, but spi_device.master is retained for compatibility (both are always initialized by spi_alloc_device()), - spi_flash_read() is used by SPI masters only. Signed-off-by: Geert Uytterhoeven Signed-off-by: Mark Brown --- drivers/spi/spi.c | 1081 ++++++++++++++++++------------------ include/linux/spi/spi.h | 198 ++++--- include/trace/events/spi.h | 26 +- 3 files changed, 668 insertions(+), 637 deletions(-) diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index c3f6b524b3ce..4fcbb0aa71d3 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -48,11 +48,11 @@ static void spidev_release(struct device *dev) { struct spi_device *spi = to_spi_device(dev); - /* spi masters may cleanup for released devices */ - if (spi->master->cleanup) - spi->master->cleanup(spi); + /* spi controllers may cleanup for released devices */ + if (spi->controller->cleanup) + spi->controller->cleanup(spi); - spi_master_put(spi->master); + spi_controller_put(spi->controller); kfree(spi); } @@ -71,17 +71,17 @@ modalias_show(struct device *dev, struct device_attribute *a, char *buf) static DEVICE_ATTR_RO(modalias); #define SPI_STATISTICS_ATTRS(field, file) \ -static ssize_t spi_master_##field##_show(struct device *dev, \ - struct device_attribute *attr, \ - char *buf) \ +static ssize_t spi_controller_##field##_show(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ { \ - struct spi_master *master = container_of(dev, \ - struct spi_master, dev); \ - return spi_statistics_##field##_show(&master->statistics, buf); \ + struct spi_controller *ctlr = container_of(dev, \ + struct spi_controller, dev); \ + return spi_statistics_##field##_show(&ctlr->statistics, buf); \ } \ -static struct device_attribute dev_attr_spi_master_##field = { \ +static struct device_attribute dev_attr_spi_controller_##field = { \ .attr = { .name = file, .mode = 0444 }, \ - .show = spi_master_##field##_show, \ + .show = spi_controller_##field##_show, \ }; \ static ssize_t spi_device_##field##_show(struct device *dev, \ struct device_attribute *attr, \ @@ -201,51 +201,51 @@ static const struct attribute_group *spi_dev_groups[] = { NULL, }; -static struct attribute *spi_master_statistics_attrs[] = { - &dev_attr_spi_master_messages.attr, - &dev_attr_spi_master_transfers.attr, - &dev_attr_spi_master_errors.attr, - &dev_attr_spi_master_timedout.attr, - &dev_attr_spi_master_spi_sync.attr, - &dev_attr_spi_master_spi_sync_immediate.attr, - &dev_attr_spi_master_spi_async.attr, - &dev_attr_spi_master_bytes.attr, - &dev_attr_spi_master_bytes_rx.attr, - &dev_attr_spi_master_bytes_tx.attr, - &dev_attr_spi_master_transfer_bytes_histo0.attr, - &dev_attr_spi_master_transfer_bytes_histo1.attr, - &dev_attr_spi_master_transfer_bytes_histo2.attr, - &dev_attr_spi_master_transfer_bytes_histo3.attr, - &dev_attr_spi_master_transfer_bytes_histo4.attr, - &dev_attr_spi_master_transfer_bytes_histo5.attr, - &dev_attr_spi_master_transfer_bytes_histo6.attr, - &dev_attr_spi_master_transfer_bytes_histo7.attr, - &dev_attr_spi_master_transfer_bytes_histo8.attr, - &dev_attr_spi_master_transfer_bytes_histo9.attr, - &dev_attr_spi_master_transfer_bytes_histo10.attr, - &dev_attr_spi_master_transfer_bytes_histo11.attr, - &dev_attr_spi_master_transfer_bytes_histo12.attr, - &dev_attr_spi_master_transfer_bytes_histo13.attr, - &dev_attr_spi_master_transfer_bytes_histo14.attr, - &dev_attr_spi_master_transfer_bytes_histo15.attr, - &dev_attr_spi_master_transfer_bytes_histo16.attr, - &dev_attr_spi_master_transfers_split_maxsize.attr, +static struct attribute *spi_controller_statistics_attrs[] = { + &dev_attr_spi_controller_messages.attr, + &dev_attr_spi_controller_transfers.attr, + &dev_attr_spi_controller_errors.attr, + &dev_attr_spi_controller_timedout.attr, + &dev_attr_spi_controller_spi_sync.attr, + &dev_attr_spi_controller_spi_sync_immediate.attr, + &dev_attr_spi_controller_spi_async.attr, + &dev_attr_spi_controller_bytes.attr, + &dev_attr_spi_controller_bytes_rx.attr, + &dev_attr_spi_controller_bytes_tx.attr, + &dev_attr_spi_controller_transfer_bytes_histo0.attr, + &dev_attr_spi_controller_transfer_bytes_histo1.attr, + &dev_attr_spi_controller_transfer_bytes_histo2.attr, + &dev_attr_spi_controller_transfer_bytes_histo3.attr, + &dev_attr_spi_controller_transfer_bytes_histo4.attr, + &dev_attr_spi_controller_transfer_bytes_histo5.attr, + &dev_attr_spi_controller_transfer_bytes_histo6.attr, + &dev_attr_spi_controller_transfer_bytes_histo7.attr, + &dev_attr_spi_controller_transfer_bytes_histo8.attr, + &dev_attr_spi_controller_transfer_bytes_histo9.attr, + &dev_attr_spi_controller_transfer_bytes_histo10.attr, + &dev_attr_spi_controller_transfer_bytes_histo11.attr, + &dev_attr_spi_controller_transfer_bytes_histo12.attr, + &dev_attr_spi_controller_transfer_bytes_histo13.attr, + &dev_attr_spi_controller_transfer_bytes_histo14.attr, + &dev_attr_spi_controller_transfer_bytes_histo15.attr, + &dev_attr_spi_controller_transfer_bytes_histo16.attr, + &dev_attr_spi_controller_transfers_split_maxsize.attr, NULL, }; -static const struct attribute_group spi_master_statistics_group = { +static const struct attribute_group spi_controller_statistics_group = { .name = "statistics", - .attrs = spi_master_statistics_attrs, + .attrs = spi_controller_statistics_attrs, }; static const struct attribute_group *spi_master_groups[] = { - &spi_master_statistics_group, + &spi_controller_statistics_group, NULL, }; void spi_statistics_add_transfer_stats(struct spi_statistics *stats, struct spi_transfer *xfer, - struct spi_master *master) + struct spi_controller *ctlr) { unsigned long flags; int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; @@ -260,10 +260,10 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats, stats->bytes += xfer->len; if ((xfer->tx_buf) && - (xfer->tx_buf != master->dummy_tx)) + (xfer->tx_buf != ctlr->dummy_tx)) stats->bytes_tx += xfer->len; if ((xfer->rx_buf) && - (xfer->rx_buf != master->dummy_rx)) + (xfer->rx_buf != ctlr->dummy_rx)) stats->bytes_rx += xfer->len; spin_unlock_irqrestore(&stats->lock, flags); @@ -405,7 +405,7 @@ EXPORT_SYMBOL_GPL(__spi_register_driver); /*-------------------------------------------------------------------------*/ /* SPI devices should normally not be created by SPI device drivers; that - * would make them board-specific. Similarly with SPI master drivers. + * would make them board-specific. Similarly with SPI controller drivers. * Device registration normally goes into like arch/.../mach.../board-YYY.c * with other readonly (flashable) information about mainboard devices. */ @@ -416,17 +416,17 @@ struct boardinfo { }; static LIST_HEAD(board_list); -static LIST_HEAD(spi_master_list); +static LIST_HEAD(spi_controller_list); /* * Used to protect add/del opertion for board_info list and - * spi_master list, and their matching process + * spi_controller list, and their matching process */ static DEFINE_MUTEX(board_lock); /** * spi_alloc_device - Allocate a new SPI device - * @master: Controller to which device is connected + * @ctlr: Controller to which device is connected * Context: can sleep * * Allows a driver to allocate and initialize a spi_device without @@ -435,27 +435,27 @@ static DEFINE_MUTEX(board_lock); * spi_add_device() on it. * * Caller is responsible to call spi_add_device() on the returned - * spi_device structure to add it to the SPI master. If the caller + * spi_device structure to add it to the SPI controller. If the caller * needs to discard the spi_device without adding it, then it should * call spi_dev_put() on it. * * Return: a pointer to the new device, or NULL. */ -struct spi_device *spi_alloc_device(struct spi_master *master) +struct spi_device *spi_alloc_device(struct spi_controller *ctlr) { struct spi_device *spi; - if (!spi_master_get(master)) + if (!spi_controller_get(ctlr)) return NULL; spi = kzalloc(sizeof(*spi), GFP_KERNEL); if (!spi) { - spi_master_put(master); + spi_controller_put(ctlr); return NULL; } - spi->master = master; - spi->dev.parent = &master->dev; + spi->master = spi->controller = ctlr; + spi->dev.parent = &ctlr->dev; spi->dev.bus = &spi_bus_type; spi->dev.release = spidev_release; spi->cs_gpio = -ENOENT; @@ -476,7 +476,7 @@ static void spi_dev_set_name(struct spi_device *spi) return; } - dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), + dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev), spi->chip_select); } @@ -485,7 +485,7 @@ static int spi_dev_check(struct device *dev, void *data) struct spi_device *spi = to_spi_device(dev); struct spi_device *new_spi = data; - if (spi->master == new_spi->master && + if (spi->controller == new_spi->controller && spi->chip_select == new_spi->chip_select) return -EBUSY; return 0; @@ -503,15 +503,14 @@ static int spi_dev_check(struct device *dev, void *data) int spi_add_device(struct spi_device *spi) { static DEFINE_MUTEX(spi_add_lock); - struct spi_master *master = spi->master; - struct device *dev = master->dev.parent; + struct spi_controller *ctlr = spi->controller; + struct device *dev = ctlr->dev.parent; int status; /* Chipselects are numbered 0..max; validate. */ - if (spi->chip_select >= master->num_chipselect) { - dev_err(dev, "cs%d >= max %d\n", - spi->chip_select, - master->num_chipselect); + if (spi->chip_select >= ctlr->num_chipselect) { + dev_err(dev, "cs%d >= max %d\n", spi->chip_select, + ctlr->num_chipselect); return -EINVAL; } @@ -531,8 +530,8 @@ int spi_add_device(struct spi_device *spi) goto done; } - if (master->cs_gpios) - spi->cs_gpio = master->cs_gpios[spi->chip_select]; + if (ctlr->cs_gpios) + spi->cs_gpio = ctlr->cs_gpios[spi->chip_select]; /* Drivers may modify this initial i/o setup, but will * normally rely on the device being setup. Devices @@ -561,7 +560,7 @@ EXPORT_SYMBOL_GPL(spi_add_device); /** * spi_new_device - instantiate one new SPI device - * @master: Controller to which device is connected + * @ctlr: Controller to which device is connected * @chip: Describes the SPI device * Context: can sleep * @@ -573,7 +572,7 @@ EXPORT_SYMBOL_GPL(spi_add_device); * * Return: the new device, or NULL. */ -struct spi_device *spi_new_device(struct spi_master *master, +struct spi_device *spi_new_device(struct spi_controller *ctlr, struct spi_board_info *chip) { struct spi_device *proxy; @@ -586,7 +585,7 @@ struct spi_device *spi_new_device(struct spi_master *master, * suggests syslogged diagnostics are best here (ugh). */ - proxy = spi_alloc_device(master); + proxy = spi_alloc_device(ctlr); if (!proxy) return NULL; @@ -604,7 +603,7 @@ struct spi_device *spi_new_device(struct spi_master *master, if (chip->properties) { status = device_add_properties(&proxy->dev, chip->properties); if (status) { - dev_err(&master->dev, + dev_err(&ctlr->dev, "failed to add properties to '%s': %d\n", chip->modalias, status); goto err_dev_put; @@ -631,7 +630,7 @@ EXPORT_SYMBOL_GPL(spi_new_device); * @spi: spi_device to unregister * * Start making the passed SPI device vanish. Normally this would be handled - * by spi_unregister_master(). + * by spi_unregister_controller(). */ void spi_unregister_device(struct spi_device *spi) { @@ -648,17 +647,17 @@ void spi_unregister_device(struct spi_device *spi) } EXPORT_SYMBOL_GPL(spi_unregister_device); -static void spi_match_master_to_boardinfo(struct spi_master *master, - struct spi_board_info *bi) +static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr, + struct spi_board_info *bi) { struct spi_device *dev; - if (master->bus_num != bi->bus_num) + if (ctlr->bus_num != bi->bus_num) return; - dev = spi_new_device(master, bi); + dev = spi_new_device(ctlr, bi); if (!dev) - dev_err(master->dev.parent, "can't create new device for %s\n", + dev_err(ctlr->dev.parent, "can't create new device for %s\n", bi->modalias); } @@ -697,7 +696,7 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n) return -ENOMEM; for (i = 0; i < n; i++, bi++, info++) { - struct spi_master *master; + struct spi_controller *ctlr; memcpy(&bi->board_info, info, sizeof(*info)); if (info->properties) { @@ -709,8 +708,9 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n) mutex_lock(&board_lock); list_add_tail(&bi->list, &board_list); - list_for_each_entry(master, &spi_master_list, list) - spi_match_master_to_boardinfo(master, &bi->board_info); + list_for_each_entry(ctlr, &spi_controller_list, list) + spi_match_controller_to_boardinfo(ctlr, + &bi->board_info); mutex_unlock(&board_lock); } @@ -727,16 +727,16 @@ static void spi_set_cs(struct spi_device *spi, bool enable) if (gpio_is_valid(spi->cs_gpio)) { gpio_set_value(spi->cs_gpio, !enable); /* Some SPI masters need both GPIO CS & slave_select */ - if ((spi->master->flags & SPI_MASTER_GPIO_SS) && - spi->master->set_cs) - spi->master->set_cs(spi, !enable); - } else if (spi->master->set_cs) { - spi->master->set_cs(spi, !enable); + if ((spi->controller->flags & SPI_MASTER_GPIO_SS) && + spi->controller->set_cs) + spi->controller->set_cs(spi, !enable); + } else if (spi->controller->set_cs) { + spi->controller->set_cs(spi, !enable); } } #ifdef CONFIG_HAS_DMA -static int spi_map_buf(struct spi_master *master, struct device *dev, +static int spi_map_buf(struct spi_controller *ctlr, struct device *dev, struct sg_table *sgt, void *buf, size_t len, enum dma_data_direction dir) { @@ -761,7 +761,7 @@ static int spi_map_buf(struct spi_master *master, struct device *dev, desc_len = min_t(int, max_seg_size, PAGE_SIZE); sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); } else if (virt_addr_valid(buf)) { - desc_len = min_t(int, max_seg_size, master->max_dma_len); + desc_len = min_t(int, max_seg_size, ctlr->max_dma_len); sgs = DIV_ROUND_UP(len, desc_len); } else { return -EINVAL; @@ -811,7 +811,7 @@ static int spi_map_buf(struct spi_master *master, struct device *dev, return 0; } -static void spi_unmap_buf(struct spi_master *master, struct device *dev, +static void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, struct sg_table *sgt, enum dma_data_direction dir) { if (sgt->orig_nents) { @@ -820,31 +820,31 @@ static void spi_unmap_buf(struct spi_master *master, struct device *dev, } } -static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) +static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) { struct device *tx_dev, *rx_dev; struct spi_transfer *xfer; int ret; - if (!master->can_dma) + if (!ctlr->can_dma) return 0; - if (master->dma_tx) - tx_dev = master->dma_tx->device->dev; + if (ctlr->dma_tx) + tx_dev = ctlr->dma_tx->device->dev; else - tx_dev = master->dev.parent; + tx_dev = ctlr->dev.parent; - if (master->dma_rx) - rx_dev = master->dma_rx->device->dev; + if (ctlr->dma_rx) + rx_dev = ctlr->dma_rx->device->dev; else - rx_dev = master->dev.parent; + rx_dev = ctlr->dev.parent; list_for_each_entry(xfer, &msg->transfers, transfer_list) { - if (!master->can_dma(master, msg->spi, xfer)) + if (!ctlr->can_dma(ctlr, msg->spi, xfer)) continue; if (xfer->tx_buf != NULL) { - ret = spi_map_buf(master, tx_dev, &xfer->tx_sg, + ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg, (void *)xfer->tx_buf, xfer->len, DMA_TO_DEVICE); if (ret != 0) @@ -852,79 +852,78 @@ static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) } if (xfer->rx_buf != NULL) { - ret = spi_map_buf(master, rx_dev, &xfer->rx_sg, + ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg, xfer->rx_buf, xfer->len, DMA_FROM_DEVICE); if (ret != 0) { - spi_unmap_buf(master, tx_dev, &xfer->tx_sg, + spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); return ret; } } } - master->cur_msg_mapped = true; + ctlr->cur_msg_mapped = true; return 0; } -static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg) +static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) { struct spi_transfer *xfer; struct device *tx_dev, *rx_dev; - if (!master->cur_msg_mapped || !master->can_dma) + if (!ctlr->cur_msg_mapped || !ctlr->can_dma) return 0; - if (master->dma_tx) - tx_dev = master->dma_tx->device->dev; + if (ctlr->dma_tx) + tx_dev = ctlr->dma_tx->device->dev; else - tx_dev = master->dev.parent; + tx_dev = ctlr->dev.parent; - if (master->dma_rx) - rx_dev = master->dma_rx->device->dev; + if (ctlr->dma_rx) + rx_dev = ctlr->dma_rx->device->dev; else - rx_dev = master->dev.parent; + rx_dev = ctlr->dev.parent; list_for_each_entry(xfer, &msg->transfers, transfer_list) { - if (!master->can_dma(master, msg->spi, xfer)) + if (!ctlr->can_dma(ctlr, msg->spi, xfer)) continue; - spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); - spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); + spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); + spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); } return 0; } #else /* !CONFIG_HAS_DMA */ -static inline int spi_map_buf(struct spi_master *master, - struct device *dev, struct sg_table *sgt, - void *buf, size_t len, +static inline int spi_map_buf(struct spi_controller *ctlr, struct device *dev, + struct sg_table *sgt, void *buf, size_t len, enum dma_data_direction dir) { return -EINVAL; } -static inline void spi_unmap_buf(struct spi_master *master, +static inline void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, struct sg_table *sgt, enum dma_data_direction dir) { } -static inline int __spi_map_msg(struct spi_master *master, +static inline int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) { return 0; } -static inline int __spi_unmap_msg(struct spi_master *master, +static inline int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) { return 0; } #endif /* !CONFIG_HAS_DMA */ -static inline int spi_unmap_msg(struct spi_master *master, +static inline int spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) { struct spi_transfer *xfer; @@ -934,63 +933,63 @@ static inline int spi_unmap_msg(struct spi_master *master, * Restore the original value of tx_buf or rx_buf if they are * NULL. */ - if (xfer->tx_buf == master->dummy_tx) + if (xfer->tx_buf == ctlr->dummy_tx) xfer->tx_buf = NULL; - if (xfer->rx_buf == master->dummy_rx) + if (xfer->rx_buf == ctlr->dummy_rx) xfer->rx_buf = NULL; } - return __spi_unmap_msg(master, msg); + return __spi_unmap_msg(ctlr, msg); } -static int spi_map_msg(struct spi_master *master, struct spi_message *msg) +static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) { struct spi_transfer *xfer; void *tmp; unsigned int max_tx, max_rx; - if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { + if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) { max_tx = 0; max_rx = 0; list_for_each_entry(xfer, &msg->transfers, transfer_list) { - if ((master->flags & SPI_MASTER_MUST_TX) && + if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) && !xfer->tx_buf) max_tx = max(xfer->len, max_tx); - if ((master->flags & SPI_MASTER_MUST_RX) && + if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) && !xfer->rx_buf) max_rx = max(xfer->len, max_rx); } if (max_tx) { - tmp = krealloc(master->dummy_tx, max_tx, + tmp = krealloc(ctlr->dummy_tx, max_tx, GFP_KERNEL | GFP_DMA); if (!tmp) return -ENOMEM; - master->dummy_tx = tmp; + ctlr->dummy_tx = tmp; memset(tmp, 0, max_tx); } if (max_rx) { - tmp = krealloc(master->dummy_rx, max_rx, + tmp = krealloc(ctlr->dummy_rx, max_rx, GFP_KERNEL | GFP_DMA); if (!tmp) return -ENOMEM; - master->dummy_rx = tmp; + ctlr->dummy_rx = tmp; } if (max_tx || max_rx) { list_for_each_entry(xfer, &msg->transfers, transfer_list) { if (!xfer->tx_buf) - xfer->tx_buf = master->dummy_tx; + xfer->tx_buf = ctlr->dummy_tx; if (!xfer->rx_buf) - xfer->rx_buf = master->dummy_rx; + xfer->rx_buf = ctlr->dummy_rx; } } } - return __spi_map_msg(master, msg); + return __spi_map_msg(ctlr, msg); } /* @@ -1000,14 +999,14 @@ static int spi_map_msg(struct spi_master *master, struct spi_message *msg) * drivers which implement a transfer_one() operation. It provides * standard handling of delays and chip select management. */ -static int spi_transfer_one_message(struct spi_master *master, +static int spi_transfer_one_message(struct spi_controller *ctlr, struct spi_message *msg) { struct spi_transfer *xfer; bool keep_cs = false; int ret = 0; unsigned long long ms = 1; - struct spi_statistics *statm = &master->statistics; + struct spi_statistics *statm = &ctlr->statistics; struct spi_statistics *stats = &msg->spi->statistics; spi_set_cs(msg->spi, true); @@ -1018,13 +1017,13 @@ static int spi_transfer_one_message(struct spi_master *master, list_for_each_entry(xfer, &msg->transfers, transfer_list) { trace_spi_transfer_start(msg, xfer); - spi_statistics_add_transfer_stats(statm, xfer, master); - spi_statistics_add_transfer_stats(stats, xfer, master); + spi_statistics_add_transfer_stats(statm, xfer, ctlr); + spi_statistics_add_transfer_stats(stats, xfer, ctlr); if (xfer->tx_buf || xfer->rx_buf) { - reinit_completion(&master->xfer_completion); + reinit_completion(&ctlr->xfer_completion); - ret = master->transfer_one(master, msg->spi, xfer); + ret = ctlr->transfer_one(ctlr, msg->spi, xfer); if (ret < 0) { SPI_STATISTICS_INCREMENT_FIELD(statm, errors); @@ -1044,7 +1043,7 @@ static int spi_transfer_one_message(struct spi_master *master, if (ms > UINT_MAX) ms = UINT_MAX; - ms = wait_for_completion_timeout(&master->xfer_completion, + ms = wait_for_completion_timeout(&ctlr->xfer_completion, msecs_to_jiffies(ms)); } @@ -1099,33 +1098,33 @@ out: if (msg->status == -EINPROGRESS) msg->status = ret; - if (msg->status && master->handle_err) - master->handle_err(master, msg); + if (msg->status && ctlr->handle_err) + ctlr->handle_err(ctlr, msg); - spi_res_release(master, msg); + spi_res_release(ctlr, msg); - spi_finalize_current_message(master); + spi_finalize_current_message(ctlr); return ret; } /** * spi_finalize_current_transfer - report completion of a transfer - * @master: the master reporting completion + * @ctlr: the controller reporting completion * * Called by SPI drivers using the core transfer_one_message() * implementation to notify it that the current interrupt driven * transfer has finished and the next one may be scheduled. */ -void spi_finalize_current_transfer(struct spi_master *master) +void spi_finalize_current_transfer(struct spi_controller *ctlr) { - complete(&master->xfer_completion); + complete(&ctlr->xfer_completion); } EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); /** * __spi_pump_messages - function which processes spi message queue - * @master: master to process queue for + * @ctlr: controller to process queue for * @in_kthread: true if we are in the context of the message pump thread * * This function checks if there is any spi message in the queue that @@ -1136,136 +1135,136 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); * inside spi_sync(); the queue extraction handling at the top of the * function should deal with this safely. */ -static void __spi_pump_messages(struct spi_master *master, bool in_kthread) +static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) { unsigned long flags; bool was_busy = false; int ret; /* Lock queue */ - spin_lock_irqsave(&master->queue_lock, flags); + spin_lock_irqsave(&ctlr->queue_lock, flags); /* Make sure we are not already running a message */ - if (master->cur_msg) { - spin_unlock_irqrestore(&master->queue_lock, flags); + if (ctlr->cur_msg) { + spin_unlock_irqrestore(&ctlr->queue_lock, flags); return; } /* If another context is idling the device then defer */ - if (master->idling) { - kthread_queue_work(&master->kworker, &master->pump_messages); - spin_unlock_irqrestore(&master->queue_lock, flags); + if (ctlr->idling) { + kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); + spin_unlock_irqrestore(&ctlr->queue_lock, flags); return; } /* Check if the queue is idle */ - if (list_empty(&master->queue) || !master->running) { - if (!master->busy) { - spin_unlock_irqrestore(&master->queue_lock, flags); + if (list_empty(&ctlr->queue) || !ctlr->running) { + if (!ctlr->busy) { + spin_unlock_irqrestore(&ctlr->queue_lock, flags); return; } /* Only do teardown in the thread */ if (!in_kthread) { - kthread_queue_work(&master->kworker, - &master->pump_messages); - spin_unlock_irqrestore(&master->queue_lock, flags); + kthread_queue_work(&ctlr->kworker, + &ctlr->pump_messages); + spin_unlock_irqrestore(&ctlr->queue_lock, flags); return; } - master->busy = false; - master->idling = true; - spin_unlock_irqrestore(&master->queue_lock, flags); - - kfree(master->dummy_rx); - master->dummy_rx = NULL; - kfree(master->dummy_tx); - master->dummy_tx = NULL; - if (master->unprepare_transfer_hardware && - master->unprepare_transfer_hardware(master)) - dev_err(&master->dev, + ctlr->busy = false; + ctlr->idling = true; + spin_unlock_irqrestore(&ctlr->queue_lock, flags); + + kfree(ctlr->dummy_rx); + ctlr->dummy_rx = NULL; + kfree(ctlr->dummy_tx); + ctlr->dummy_tx = NULL; + if (ctlr->unprepare_transfer_hardware && + ctlr->unprepare_transfer_hardware(ctlr)) + dev_err(&ctlr->dev, "failed to unprepare transfer hardware\n"); - if (master->auto_runtime_pm) { - pm_runtime_mark_last_busy(master->dev.parent); - pm_runtime_put_autosuspend(master->dev.parent); + if (ctlr->auto_runtime_pm) { + pm_runtime_mark_last_busy(ctlr->dev.parent); + pm_runtime_put_autosuspend(ctlr->dev.parent); } - trace_spi_master_idle(master); + trace_spi_controller_idle(ctlr); - spin_lock_irqsave(&master->queue_lock, flags); - master->idling = false; - spin_unlock_irqrestore(&master->queue_lock, flags); + spin_lock_irqsave(&ctlr->queue_lock, flags); + ctlr->idling = false; + spin_unlock_irqrestore(&ctlr->queue_lock, flags); return; } /* Extract head of queue */ - master->cur_msg = - list_first_entry(&master->queue, struct spi_message, queue); + ctlr->cur_msg = + list_first_entry(&ctlr->queue, struct spi_message, queue); - list_del_init(&master->cur_msg->queue); - if (master->busy) + list_del_init(&ctlr->cur_msg->queue); + if (ctlr->busy) was_busy = true; else - master->busy = true; - spin_unlock_irqrestore(&master->queue_lock, flags); + ctlr->busy = true; + spin_unlock_irqrestore(&ctlr->queue_lock, flags); - mutex_lock(&master->io_mutex); + mutex_lock(&ctlr->io_mutex); - if (!was_busy && master->auto_runtime_pm) { - ret = pm_runtime_get_sync(master->dev.parent); + if (!was_busy && ctlr->auto_runtime_pm) { + ret = pm_runtime_get_sync(ctlr->dev.parent); if (ret < 0) { - dev_err(&master->dev, "Failed to power device: %d\n", + dev_err(&ctlr->dev, "Failed to power device: %d\n", ret); - mutex_unlock(&master->io_mutex); + mutex_unlock(&ctlr->io_mutex); return; } } if (!was_busy) - trace_spi_master_busy(master); + trace_spi_controller_busy(ctlr); - if (!was_busy && master->prepare_transfer_hardware) { - ret = master->prepare_transfer_hardware(master); + if (!was_busy && ctlr->prepare_transfer_hardware) { + ret = ctlr->prepare_transfer_hardware(ctlr); if (ret) { - dev_err(&master->dev, + dev_err(&ctlr->dev, "failed to prepare transfer hardware\n"); - if (master->auto_runtime_pm) - pm_runtime_put(master->dev.parent); - mutex_unlock(&master->io_mutex); + if (ctlr->auto_runtime_pm) + pm_runtime_put(ctlr->dev.parent); + mutex_unlock(&ctlr->io_mutex); return; } } - trace_spi_message_start(master->cur_msg); + trace_spi_message_start(ctlr->cur_msg); - if (master->prepare_message) { - ret = master->prepare_message(master, master->cur_msg); + if (ctlr->prepare_message) { + ret = ctlr->prepare_message(ctlr, ctlr->cur_msg); if (ret) { - dev_err(&master->dev, - "failed to prepare message: %d\n", ret); - master->cur_msg->status = ret; - spi_finalize_current_message(master); + dev_err(&ctlr->dev, "failed to prepare message: %d\n", + ret); + ctlr->cur_msg->status = ret; + spi_finalize_current_message(ctlr); goto out; } - master->cur_msg_prepared = true; + ctlr->cur_msg_prepared = true; } - ret = spi_map_msg(master, master->cur_msg); + ret = spi_map_msg(ctlr, ctlr->cur_msg); if (ret) { - master->cur_msg->status = ret; - spi_finalize_current_message(master); + ctlr->cur_msg->status = ret; + spi_finalize_current_message(ctlr); goto out; } - ret = master->transfer_one_message(master, master->cur_msg); + ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg); if (ret) { - dev_err(&master->dev, + dev_err(&ctlr->dev, "failed to transfer one message from queue\n"); goto out; } out: - mutex_unlock(&master->io_mutex); + mutex_unlock(&ctlr->io_mutex); /* Prod the scheduler in case transfer_one() was busy waiting */ if (!ret) @@ -1274,44 +1273,43 @@ out: /** * spi_pump_messages - kthread work function which processes spi message queue - * @work: pointer to kthread work struct contained in the master struct + * @work: pointer to kthread work struct contained in the controller struct */ static void spi_pump_messages(struct kthread_work *work) { - struct spi_master *master = - container_of(work, struct spi_master, pump_messages); + struct spi_controller *ctlr = + container_of(work, struct spi_controller, pump_messages); - __spi_pump_messages(master, true); + __spi_pump_messages(ctlr, true); } -static int spi_init_queue(struct spi_master *master) +static int spi_init_queue(struct spi_controller *ctlr) { struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; - master->running = false; - master->busy = false; + ctlr->running = false; + ctlr->busy = false; - kthread_init_worker(&master->kworker); - master->kworker_task = kthread_run(kthread_worker_fn, - &master->kworker, "%s", - dev_name(&master->dev)); - if (IS_ERR(master->kworker_task)) { - dev_err(&master->dev, "failed to create message pump task\n"); - return PTR_ERR(master->kworker_task); + kthread_init_worker(&ctlr->kworker); + ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker, + "%s", dev_name(&ctlr->dev)); + if (IS_ERR(ctlr->kworker_task)) { + dev_err(&ctlr->dev, "failed to create message pump task\n"); + return PTR_ERR(ctlr->kworker_task); } - kthread_init_work(&master->pump_messages, spi_pump_messages); + kthread_init_work(&ctlr->pump_messages, spi_pump_messages); /* - * Master config will indicate if this controller should run the + * Controller config will indicate if this controller should run the * message pump with high (realtime) priority to reduce the transfer * latency on the bus by minimising the delay between a transfer * request and the scheduling of the message pump thread. Without this * setting the message pump thread will remain at default priority. */ - if (master->rt) { - dev_info(&master->dev, + if (ctlr->rt) { + dev_info(&ctlr->dev, "will run message pump with realtime priority\n"); - sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); + sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, ¶m); } return 0; @@ -1320,23 +1318,23 @@ static int spi_init_queue(struct spi_master *master) /** * spi_get_next_queued_message() - called by driver to check for queued * messages - * @master: the master to check for queued messages + * @ctlr: the controller to check for queued messages * * If there are more messages in the queue, the next message is returned from * this call. * * Return: the next message in the queue, else NULL if the queue is empty. */ -struct spi_message *spi_get_next_queued_message(struct spi_master *master) +struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr) { struct spi_message *next; unsigned long flags; /* get a pointer to the next message, if any */ - spin_lock_irqsave(&master->queue_lock, flags); - next = list_first_entry_or_null(&master->queue, struct spi_message, + spin_lock_irqsave(&ctlr->queue_lock, flags); + next = list_first_entry_or_null(&ctlr->queue, struct spi_message, queue); - spin_unlock_irqrestore(&master->queue_lock, flags); + spin_unlock_irqrestore(&ctlr->queue_lock, flags); return next; } @@ -1344,36 +1342,36 @@ EXPORT_SYMBOL_GPL(spi_get_next_queued_message); /** * spi_finalize_current_message() - the current message is complete - * @master: the master to return the message to + * @ctlr: the controller to return the message to * * Called by the driver to notify the core that the message in the front of the * queue is complete and can be removed from the queue. */ -void spi_finalize_current_message(struct spi_master *master) +void spi_finalize_current_message(struct spi_controller *ctlr) { struct spi_message *mesg; unsigned long flags; int ret; - spin_lock_irqsave(&master->queue_lock, flags); - mesg = master->cur_msg; - spin_unlock_irqrestore(&master->queue_lock, flags); + spin_lock_irqsave(&ctlr->queue_lock, flags); + mesg = ctlr->cur_msg; + spin_unlock_irqrestore(&ctlr->queue_lock, flags); - spi_unmap_msg(master, mesg); + spi_unmap_msg(ctlr, mesg); - if (master->cur_msg_prepared && master->unprepare_message) { - ret = master->unprepare_message(master, mesg); + if (ctlr->cur_msg_prepared && ctlr->unprepare_message) { + ret = ctlr->unprepare_message(ctlr, mesg); if (ret) { - dev_err(&master->dev, - "failed to unprepare message: %d\n", ret); + dev_err(&ctlr->dev, "failed to unprepare message: %d\n", + ret); } } - spin_lock_irqsave(&master->queue_lock, flags); - master->cur_msg = NULL; - master->cur_msg_prepared = false; - kthread_queue_work(&master->kworker, &master->pump_messages); - spin_unlock_irqrestore(&master->queue_lock, flags); + spin_lock_irqsave(&ctlr->queue_lock, flags); + ctlr->cur_msg = NULL; + ctlr->cur_msg_prepared = false; + kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); + spin_unlock_irqrestore(&ctlr->queue_lock, flags); trace_spi_message_done(mesg); @@ -1383,66 +1381,65 @@ void spi_finalize_current_message(struct spi_master *master) } EXPORT_SYMBOL_GPL(spi_finalize_current_message); -static int spi_start_queue(struct spi_master *master) +static int spi_start_queue(struct spi_controller *ctlr) { unsigned long flags; - spin_lock_irqsave(&master->queue_lock, flags); + spin_lock_irqsave(&ctlr->queue_lock, flags); - if (master->running || master->busy) { - spin_unlock_irqrestore(&master->queue_lock, flags); + if (ctlr->running || ctlr->busy) { + spin_unlock_irqrestore(&ctlr->queue_lock, flags); return -EBUSY; } - master->running = true; - master->cur_msg = NULL; - spin_unlock_irqrestore(&master->queue_lock, flags); + ctlr->running = true; + ctlr->cur_msg = NULL; + spin_unlock_irqrestore(&ctlr->queue_lock, flags); - kthread_queue_work(&master->kworker, &master->pump_messages); + kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); return 0; } -static int spi_stop_queue(struct spi_master *master) +static int spi_stop_queue(struct spi_controller *ctlr) { unsigned long flags; unsigned limit = 500; int ret = 0; - spin_lock_irqsave(&master->queue_lock, flags); + spin_lock_irqsave(&ctlr->queue_lock, flags); /* * This is a bit lame, but is optimized for the common execution path. - * A wait_queue on the master->busy could be used, but then the common + * A wait_queue on the ctlr->busy could be used, but then the common * execution path (pump_messages) would be required to call wake_up or * friends on every SPI message. Do this instead. */ - while ((!list_empty(&master->queue) || master->busy) && limit--) { - spin_unlock_irqrestore(&master->queue_lock, flags); + while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) { + spin_unlock_irqrestore(&ctlr->queue_lock, flags); usleep_range(10000, 11000); - spin_lock_irqsave(&master->queue_lock, flags); + spin_lock_irqsave(&ctlr->queue_lock, flags); } - if (!list_empty(&master->queue) || master->busy) + if (!list_empty(&ctlr->queue) || ctlr->busy) ret = -EBUSY; else - master->running = false; + ctlr->running = false; - spin_unlock_irqrestore(&master->queue_lock, flags); + spin_unlock_irqrestore(&ctlr->queue_lock, flags); if (ret) { - dev_warn(&master->dev, - "could not stop message queue\n"); + dev_warn(&ctlr->dev, "could not stop message queue\n"); return ret; } return ret; } -static int spi_destroy_queue(struct spi_master *master) +static int spi_destroy_queue(struct spi_controller *ctlr) { int ret; - ret = spi_stop_queue(master); + ret = spi_stop_queue(ctlr); /* * kthread_flush_worker will block until all work is done. @@ -1451,12 +1448,12 @@ static int spi_destroy_queue(struct spi_master *master) * return anyway. */ if (ret) { - dev_err(&master->dev, "problem destroying queue\n"); + dev_err(&ctlr->dev, "problem destroying queue\n"); return ret; } - kthread_flush_worker(&master->kworker); - kthread_stop(master->kworker_task); + kthread_flush_worker(&ctlr->kworker); + kthread_stop(ctlr->kworker_task); return 0; } @@ -1465,23 +1462,23 @@ static int __spi_queued_transfer(struct spi_device *spi, struct spi_message *msg, bool need_pump) { - struct spi_master *master = spi->master; + struct spi_controller *ctlr = spi->controller; unsigned long flags; - spin_lock_irqsave(&master->queue_lock, flags); + spin_lock_irqsave(&ctlr->queue_lock, flags); - if (!master->running) { - spin_unlock_irqrestore(&master->queue_lock, flags); + if (!ctlr->running) { + spin_unlock_irqrestore(&ctlr->queue_lock, flags); return -ESHUTDOWN; } msg->actual_length = 0; msg->status = -EINPROGRESS; - list_add_tail(&msg->queue, &master->queue); - if (!master->busy && need_pump) - kthread_queue_work(&master->kworker, &master->pump_messages); + list_add_tail(&msg->queue, &ctlr->queue); + if (!ctlr->busy && need_pump) + kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); - spin_unlock_irqrestore(&master->queue_lock, flags); + spin_unlock_irqrestore(&ctlr->queue_lock, flags); return 0; } @@ -1497,31 +1494,31 @@ static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) return __spi_queued_transfer(spi, msg, true); } -static int spi_master_initialize_queue(struct spi_master *master) +static int spi_controller_initialize_queue(struct spi_controller *ctlr) { int ret; - master->transfer = spi_queued_transfer; - if (!master->transfer_one_message) - master->transfer_one_message = spi_transfer_one_message; + ctlr->transfer = spi_queued_transfer; + if (!ctlr->transfer_one_message) + ctlr->transfer_one_message = spi_transfer_one_message; /* Initialize and start queue */ - ret = spi_init_queue(master); + ret = spi_init_queue(ctlr); if (ret) { - dev_err(&master->dev, "problem initializing queue\n"); + dev_err(&ctlr->dev, "problem initializing queue\n"); goto err_init_queue; } - master->queued = true; - ret = spi_start_queue(master); + ctlr->queued = true; + ret = spi_start_queue(ctlr); if (ret) { - dev_err(&master->dev, "problem starting queue\n"); + dev_err(&ctlr->dev, "problem starting queue\n"); goto err_start_queue; } return 0; err_start_queue: - spi_destroy_queue(master); + spi_destroy_queue(ctlr); err_init_queue: return ret; } @@ -1529,7 +1526,7 @@ err_init_queue: /*-------------------------------------------------------------------------*/ #if defined(CONFIG_OF) -static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi, +static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, struct device_node *nc) { u32 value; @@ -1559,7 +1556,7 @@ static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi, spi->mode |= SPI_TX_QUAD; break; default: - dev_warn(&master->dev, + dev_warn(&ctlr->dev, "spi-tx-bus-width %d not supported\n", value); break; @@ -1577,16 +1574,16 @@ static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi, spi->mode |= SPI_RX_QUAD; break; default: - dev_warn(&master->dev, + dev_warn(&ctlr->dev, "spi-rx-bus-width %d not supported\n", value); break; } } - if (spi_controller_is_slave(master)) { + if (spi_controller_is_slave(ctlr)) { if (strcmp(nc->name, "slave")) { - dev_err(&master->dev, "%s is not called 'slave'\n", + dev_err(&ctlr->dev, "%s is not called 'slave'\n", nc->full_name); return -EINVAL; } @@ -1596,7 +1593,7 @@ static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi, /* Device address */ rc = of_property_read_u32(nc, "reg", &value); if (rc) { - dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n", + dev_err(&ctlr->dev, "%s has no valid 'reg' property (%d)\n", nc->full_name, rc); return rc; } @@ -1605,7 +1602,8 @@ static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi, /* Device speed */ rc = of_property_read_u32(nc, "spi-max-frequency", &value); if (rc) { - dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n", + dev_err(&ctlr->dev, + "%s has no valid 'spi-max-frequency' property (%d)\n", nc->full_name, rc); return rc; } @@ -1615,15 +1613,15 @@ static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi, } static struct spi_device * -of_register_spi_device(struct spi_master *master, struct device_node *nc) +of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc) { struct spi_device *spi; int rc; /* Alloc an spi_device */ - spi = spi_alloc_device(master); + spi = spi_alloc_device(ctlr); if (!spi) { - dev_err(&master->dev, "spi_device alloc error for %s\n", + dev_err(&ctlr->dev, "spi_device alloc error for %s\n", nc->full_name); rc = -ENOMEM; goto err_out; @@ -1633,12 +1631,12 @@ of_register_spi_device(struct spi_master *master, struct device_node *nc) rc = of_modalias_node(nc, spi->modalias, sizeof(spi->modalias)); if (rc < 0) { - dev_err(&master->dev, "cannot find modalias for %s\n", + dev_err(&ctlr->dev, "cannot find modalias for %s\n", nc->full_name); goto err_out; } - rc = of_spi_parse_dt(master, spi, nc); + rc = of_spi_parse_dt(ctlr, spi, nc); if (rc) goto err_out; @@ -1649,7 +1647,7 @@ of_register_spi_device(struct spi_master *master, struct device_node *nc) /* Register the new device */ rc = spi_add_device(spi); if (rc) { - dev_err(&master->dev, "spi_device register error %s\n", + dev_err(&ctlr->dev, "spi_device register error %s\n", nc->full_name); goto err_of_node_put; } @@ -1665,39 +1663,40 @@ err_out: /** * of_register_spi_devices() - Register child devices onto the SPI bus - * @master: Pointer to spi_master device + * @ctlr: Pointer to spi_controller device * * Registers an spi_device for each child node of controller node which * represents a valid SPI slave. */ -static void of_register_spi_devices(struct spi_master *master) +static void of_register_spi_devices(struct spi_controller *ctlr) { struct spi_device *spi; struct device_node *nc; - if (!master->dev.of_node) + if (!ctlr->dev.of_node) return; - for_each_available_child_of_node(master->dev.of_node, nc) { + for_each_available_child_of_node(ctlr->dev.of_node, nc) { if (of_node_test_and_set_flag(nc, OF_POPULATED)) continue; - spi = of_register_spi_device(master, nc); + spi = of_register_spi_device(ctlr, nc); if (IS_ERR(spi)) { - dev_warn(&master->dev, "Failed to create SPI device for %s\n", - nc->full_name); + dev_warn(&ctlr->dev, + "Failed to create SPI device for %s\n", + nc->full_name); of_node_clear_flag(nc, OF_POPULATED); } } } #else -static void of_register_spi_devices(struct spi_master *master) { } +static void of_register_spi_devices(struct spi_controller *ctlr) { } #endif #ifdef CONFIG_ACPI static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) { struct spi_device *spi = data; - struct spi_master *master = spi->master; + struct spi_controller *ctlr = spi->controller; if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { struct acpi_resource_spi_serialbus *sb; @@ -1711,8 +1710,8 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) * 0 .. max - 1 so we need to ask the driver to * translate between the two schemes. */ - if (master->fw_translate_cs) { - int cs = master->fw_translate_cs(master, + if (ctlr->fw_translate_cs) { + int cs = ctlr->fw_translate_cs(ctlr, sb->device_selection); if (cs < 0) return cs; @@ -1741,7 +1740,7 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) return 1; } -static acpi_status acpi_register_spi_device(struct spi_master *master, +static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, struct acpi_device *adev) { struct list_head resource_list; @@ -1752,9 +1751,9 @@ static acpi_status acpi_register_spi_device(struct spi_master *master, acpi_device_enumerated(adev)) return AE_OK; - spi = spi_alloc_device(master); + spi = spi_alloc_device(ctlr); if (!spi) { - dev_err(&master->dev, "failed to allocate SPI device for %s\n", + dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n", dev_name(&adev->dev)); return AE_NO_MEMORY; } @@ -1783,7 +1782,7 @@ static acpi_status acpi_register_spi_device(struct spi_master *master, adev->power.flags.ignore_parent = true; if (spi_add_device(spi)) { adev->power.flags.ignore_parent = false; - dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", + dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n", dev_name(&adev->dev)); spi_dev_put(spi); } @@ -1794,46 +1793,45 @@ static acpi_status acpi_register_spi_device(struct spi_master *master, static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, void *data, void **return_value) { - struct spi_master *master = data; + struct spi_controller *ctlr = data; struct acpi_device *adev; if (acpi_bus_get_device(handle, &adev)) return AE_OK; - return acpi_register_spi_device(master, adev); + return acpi_register_spi_device(ctlr, adev); } -static void acpi_register_spi_devices(struct spi_master *master) +static void acpi_register_spi_devices(struct spi_controller *ctlr) { acpi_status status; acpi_handle handle; - handle = ACPI_HANDLE(master->dev.parent); + handle = ACPI_HANDLE(ctlr->dev.parent); if (!handle) return; status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, - acpi_spi_add_device, NULL, - master, NULL); + acpi_spi_add_device, NULL, ctlr, NULL); if (ACPI_FAILURE(status)) - dev_warn(&master->dev, "failed to enumerate SPI slaves\n"); + dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n"); } #else -static inline void acpi_register_spi_devices(struct spi_master *master) {} +static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {} #endif /* CONFIG_ACPI */ -static void spi_master_release(struct device *dev) +static void spi_controller_release(struct device *dev) { - struct spi_master *master; + struct spi_controller *ctlr; - master = container_of(dev, struct spi_master, dev); - kfree(master); + ctlr = container_of(dev, struct spi_controller, dev); + kfree(ctlr); } static struct class spi_master_class = { .name = "spi_master", .owner = THIS_MODULE, - .dev_release = spi_master_release, + .dev_release = spi_controller_release, .dev_groups = spi_master_groups, }; @@ -1845,10 +1843,10 @@ static struct class spi_master_class = { */ int spi_slave_abort(struct spi_device *spi) { - struct spi_master *master = spi->master; + struct spi_controller *ctlr = spi->controller; - if (spi_controller_is_slave(master) && master->slave_abort) - return master->slave_abort(master); + if (spi_controller_is_slave(ctlr) && ctlr->slave_abort) + return ctlr->slave_abort(ctlr); return -ENOTSUPP; } @@ -1862,7 +1860,8 @@ static int match_true(struct device *dev, void *data) static ssize_t spi_slave_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct spi_master *ctlr = container_of(dev, struct spi_master, dev); + struct spi_controller *ctlr = container_of(dev, struct spi_controller, + dev); struct device *child; child = device_find_child(&ctlr->dev, NULL, match_true); @@ -1874,7 +1873,8 @@ static ssize_t spi_slave_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct spi_master *ctlr = container_of(dev, struct spi_master, dev); + struct spi_controller *ctlr = container_of(dev, struct spi_controller, + dev); struct spi_device *spi; struct device *child; char name[32]; @@ -1921,7 +1921,7 @@ static const struct attribute_group spi_slave_group = { }; static const struct attribute_group *spi_slave_groups[] = { - &spi_master_statistics_group, + &spi_controller_statistics_group, &spi_slave_group, NULL, }; @@ -1929,7 +1929,7 @@ static const struct attribute_group *spi_slave_groups[] = { static struct class spi_slave_class = { .name = "spi_slave", .owner = THIS_MODULE, - .dev_release = spi_master_release, + .dev_release = spi_controller_release, .dev_groups = spi_slave_groups, }; #else @@ -1941,62 +1941,63 @@ extern struct class spi_slave_class; /* dummy */ * @dev: the controller, possibly using the platform_bus * @size: how much zeroed driver-private data to allocate; the pointer to this * memory is in the driver_data field of the returned device, - * accessible with spi_master_get_devdata(). + * accessible with spi_controller_get_devdata(). * @slave: flag indicating whether to allocate an SPI master (false) or SPI * slave (true) controller * Context: can sleep * * This call is used only by SPI controller drivers, which are the * only ones directly touching chip registers. It's how they allocate - * an spi_master structure, prior to calling spi_register_master(). + * an spi_controller structure, prior to calling spi_register_controller(). * * This must be called from context that can sleep. * * The caller is responsible for assigning the bus number and initializing the - * controller's methods before calling spi_register_master(); and (after errors - * adding the device) calling spi_master_put() to prevent a memory leak. + * controller's methods before calling spi_register_controller(); and (after + * errors adding the device) calling spi_controller_put() to prevent a memory + * leak. * * Return: the SPI controller structure on success, else NULL. */ -struct spi_master *__spi_alloc_controller(struct device *dev, - unsigned int size, bool slave) +struct spi_controller *__spi_alloc_controller(struct device *dev, + unsigned int size, bool slave) { - struct spi_master *master; + struct spi_controller *ctlr; if (!dev) return NULL; - master = kzalloc(size + sizeof(*master), GFP_KERNEL); - if (!master) + ctlr = kzalloc(size + sizeof(*ctlr), GFP_KERNEL); + if (!ctlr) return NULL; - device_initialize(&master->dev); - master->bus_num = -1; - master->num_chipselect = 1; - master->slave = slave; + device_initialize(&ctlr->dev); + ctlr->bus_num = -1; + ctlr->num_chipselect = 1; + ctlr->slave = slave; if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave) - master->dev.class = &spi_slave_class; + ctlr->dev.class = &spi_slave_class; else - master->dev.class = &spi_master_class; - master->dev.parent = dev; - pm_suspend_ignore_children(&master->dev, true); - spi_master_set_devdata(master, &master[1]); + ctlr->dev.class = &spi_master_class; + ctlr->dev.parent = dev; + pm_suspend_ignore_children(&ctlr->dev, true); + spi_controller_set_devdata(ctlr, &ctlr[1]); - return master; + return ctlr; } EXPORT_SYMBOL_GPL(__spi_alloc_controller); #ifdef CONFIG_OF -static int of_spi_register_master(struct spi_master *master) +static int of_spi_register_master(struct spi_controller *ctlr) { int nb, i, *cs; - struct device_node *np = master->dev.of_node; + struct device_node *np = ctlr->dev.of_node; if (!np) return 0; nb = of_gpio_named_count(np, "cs-gpios"); - master->num_chipselect = max_t(int, nb, master->num_chipselect); + ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); /* Return error only for an incorrectly formed cs-gpios property */ if (nb == 0 || nb == -ENOENT) @@ -2004,15 +2005,14 @@ static int of_spi_register_master(struct spi_master *master) else if (nb < 0) return nb; - cs = devm_kzalloc(&master->dev, - sizeof(int) * master->num_chipselect, + cs = devm_kzalloc(&ctlr->dev, sizeof(int) * ctlr->num_chipselect, GFP_KERNEL); - master->cs_gpios = cs; + ctlr->cs_gpios = cs; - if (!master->cs_gpios) + if (!ctlr->cs_gpios) return -ENOMEM; - for (i = 0; i < master->num_chipselect; i++) + for (i = 0; i < ctlr->num_chipselect; i++) cs[i] = -ENOENT; for (i = 0; i < nb; i++) @@ -2021,20 +2021,21 @@ static int of_spi_register_master(struct spi_master *master) return 0; } #else -static int of_spi_register_master(struct spi_master *master) +static int of_spi_register_master(struct spi_controller *ctlr) { return 0; } #endif /** - * spi_register_master - register SPI master controller - * @master: initialized master, originally from spi_alloc_master() + * spi_register_controller - register SPI master or slave controller + * @ctlr: initialized master, originally from spi_alloc_master() or + * spi_alloc_slave() * Context: can sleep * - * SPI master controllers connect to their drivers using some non-SPI bus, + * SPI controllers connect to their drivers using some non-SPI bus, * such as the platform bus. The final stage of probe() in that code - * includes calling spi_register_master() to hook up to this SPI bus glue. + * includes calling spi_register_controller() to hook up to this SPI bus glue. * * SPI controllers use board specific (often SOC specific) bus numbers, * and board-specific addressing for SPI devices combines those numbers @@ -2043,16 +2044,16 @@ static int of_spi_register_master(struct spi_master *master) * chip is at which address. * * This must be called from context that can sleep. It returns zero on - * success, else a negative error code (dropping the master's refcount). + * success, else a negative error code (dropping the controller's refcount). * After a successful return, the caller is responsible for calling - * spi_unregister_master(). + * spi_unregister_controller(). * * Return: zero on success, else a negative error code. */ -int spi_register_master(struct spi_master *master) +int spi_register_controller(struct spi_controller *ctlr) { static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); - struct device *dev = master->dev.parent; + struct device *dev = ctlr->dev.parent; struct boardinfo *bi; int status = -ENODEV; int dynamic = 0; @@ -2060,8 +2061,8 @@ int spi_register_master(struct spi_master *master) if (!dev) return -ENODEV; - if (!spi_controller_is_slave(master)) { - status = of_spi_register_master(master); + if (!spi_controller_is_slave(ctlr)) { + status = of_spi_register_master(ctlr); if (status) return status; } @@ -2069,97 +2070,100 @@ int spi_register_master(struct spi_master *master) /* even if it's just one always-selected device, there must * be at least one chipselect */ - if (master->num_chipselect == 0) + if (ctlr->num_chipselect == 0) return -EINVAL; - if ((master->bus_num < 0) && master->dev.of_node) - master->bus_num = of_alias_get_id(master->dev.of_node, "spi"); + if ((ctlr->bus_num < 0) && ctlr->dev.of_node) + ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi"); /* convention: dynamically assigned bus IDs count down from the max */ - if (master->bus_num < 0) { + if (ctlr->bus_num < 0) { /* FIXME switch to an IDR based scheme, something like * I2C now uses, so we can't run out of "dynamic" IDs */ - master->bus_num = atomic_dec_return(&dyn_bus_id); + ctlr->bus_num = atomic_dec_return(&dyn_bus_id); dynamic = 1; } - INIT_LIST_HEAD(&master->queue); - spin_lock_init(&master->queue_lock); - spin_lock_init(&master->bus_lock_spinlock); - mutex_init(&master->bus_lock_mutex); - mutex_init(&master->io_mutex); - master->bus_lock_flag = 0; - init_completion(&master->xfer_completion); - if (!master->max_dma_len) - master->max_dma_len = INT_MAX; + INIT_LIST_HEAD(&ctlr->queue); + spin_lock_init(&ctlr->queue_lock); + spin_lock_init(&ctlr->bus_lock_spinlock); + mutex_init(&ctlr->bus_lock_mutex); + mutex_init(&ctlr->io_mutex); + ctlr->bus_lock_flag = 0; + init_completion(&ctlr->xfer_completion); + if (!ctlr->max_dma_len) + ctlr->max_dma_len = INT_MAX; /* register the device, then userspace will see it. * registration fails if the bus ID is in use. */ - dev_set_name(&master->dev, "spi%u", master->bus_num); - status = device_add(&master->dev); + dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num); + status = device_add(&ctlr->dev); if (status < 0) goto done; dev_dbg(dev, "registered %s %s%s\n", - spi_controller_is_slave(master) ? "slave" : "master", - dev_name(&master->dev), dynamic ? " (dynamic)" : ""); + spi_controller_is_slave(ctlr) ? "slave" : "master", + dev_name(&ctlr->dev), dynamic ? " (dynamic)" : ""); /* If we're using a queued driver, start the queue */ - if (master->transfer) - dev_info(dev, "master is unqueued, this is deprecated\n"); + if (ctlr->transfer) + dev_info(dev, "controller is unqueued, this is deprecated\n"); else { - status = spi_master_initialize_queue(master); + status = spi_controller_initialize_queue(ctlr); if (status) { - device_del(&master->dev); + device_del(&ctlr->dev); goto done; } } /* add statistics */ - spin_lock_init(&master->statistics.lock); + spin_lock_init(&ctlr->statistics.lock); mutex_lock(&board_lock); - list_add_tail(&master->list, &spi_master_list); + list_add_tail(&ctlr->list, &spi_controller_list); list_for_each_entry(bi, &board_list, list) - spi_match_master_to_boardinfo(master, &bi->board_info); + spi_match_controller_to_boardinfo(ctlr, &bi->board_info); mutex_unlock(&board_lock); /* Register devices from the device tree and ACPI */ - of_register_spi_devices(master); - acpi_register_spi_devices(master); + of_register_spi_devices(ctlr); + acpi_register_spi_devices(ctlr); done: return status; } -EXPORT_SYMBOL_GPL(spi_register_master); +EXPORT_SYMBOL_GPL(spi_register_controller); static void devm_spi_unregister(struct device *dev, void *res) { - spi_unregister_master(*(struct spi_master **)res); + spi_unregister_controller(*(struct spi_controller **)res); } /** - * devm_spi_register_master - register managed SPI master controller - * @dev: device managing SPI master - * @master: initialized master, originally from spi_alloc_master() + * devm_spi_register_controller - register managed SPI master or slave + * controller + * @dev: device managing SPI controller + * @ctlr: initialized controller, originally from spi_alloc_master() or + * spi_alloc_slave() * Context: can sleep * - * Register a SPI device as with spi_register_master() which will + * Register a SPI device as with spi_register_controller() which will * automatically be unregister * * Return: zero on success, else a negative error code. */ -int devm_spi_register_master(struct device *dev, struct spi_master *master) +int devm_spi_register_controller(struct device *dev, + struct spi_controller *ctlr) { - struct spi_master **ptr; + struct spi_controller **ptr; int ret; ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; - ret = spi_register_master(master); + ret = spi_register_controller(ctlr); if (!ret) { - *ptr = master; + *ptr = ctlr; devres_add(dev, ptr); } else { devres_free(ptr); @@ -2167,7 +2171,7 @@ int devm_spi_register_master(struct device *dev, struct spi_master *master) return ret; } -EXPORT_SYMBOL_GPL(devm_spi_register_master); +EXPORT_SYMBOL_GPL(devm_spi_register_controller); static int __unregister(struct device *dev, void *null) { @@ -2176,71 +2180,71 @@ static int __unregister(struct device *dev, void *null) } /** - * spi_unregister_master - unregister SPI master controller - * @master: the master being unregistered + * spi_unregister_controller - unregister SPI master or slave controller + * @ctlr: the controller being unregistered * Context: can sleep * - * This call is used only by SPI master controller drivers, which are the + * This call is used only by SPI controller drivers, which are the * only ones directly touching chip registers. * * This must be called from context that can sleep. */ -void spi_unregister_master(struct spi_master *master) +void spi_unregister_controller(struct spi_controller *ctlr) { int dummy; - if (master->queued) { - if (spi_destroy_queue(master)) - dev_err(&master->dev, "queue remove failed\n"); + if (ctlr->queued) { + if (spi_destroy_queue(ctlr)) + dev_err(&ctlr->dev, "queue remove failed\n"); } mutex_lock(&board_lock); - list_del(&master->list); + list_del(&ctlr->list); mutex_unlock(&board_lock); - dummy = device_for_each_child(&master->dev, NULL, __unregister); - device_unregister(&master->dev); + dummy = device_for_each_child(&ctlr->dev, NULL, __unregister); + device_unregister(&ctlr->dev); } -EXPORT_SYMBOL_GPL(spi_unregister_master); +EXPORT_SYMBOL_GPL(spi_unregister_controller); -int spi_master_suspend(struct spi_master *master) +int spi_controller_suspend(struct spi_controller *ctlr) { int ret; - /* Basically no-ops for non-queued masters */ - if (!master->queued) + /* Basically no-ops for non-queued controllers */ + if (!ctlr->queued) return 0; - ret = spi_stop_queue(master); + ret = spi_stop_queue(ctlr); if (ret) - dev_err(&master->dev, "queue stop failed\n"); + dev_err(&ctlr->dev, "queue stop failed\n"); return ret; } -EXPORT_SYMBOL_GPL(spi_master_suspend); +EXPORT_SYMBOL_GPL(spi_controller_suspend); -int spi_master_resume(struct spi_master *master) +int spi_controller_resume(struct spi_controller *ctlr) { int ret; - if (!master->queued) + if (!ctlr->queued) return 0; - ret = spi_start_queue(master); + ret = spi_start_queue(ctlr); if (ret) - dev_err(&master->dev, "queue restart failed\n"); + dev_err(&ctlr->dev, "queue restart failed\n"); return ret; } -EXPORT_SYMBOL_GPL(spi_master_resume); +EXPORT_SYMBOL_GPL(spi_controller_resume); -static int __spi_master_match(struct device *dev, const void *data) +static int __spi_controller_match(struct device *dev, const void *data) { - struct spi_master *m; + struct spi_controller *ctlr; const u16 *bus_num = data; - m = container_of(dev, struct spi_master, dev); - return m->bus_num == *bus_num; + ctlr = container_of(dev, struct spi_controller, dev); + return ctlr->bus_num == *bus_num; } /** @@ -2250,22 +2254,22 @@ static int __spi_master_match(struct device *dev, const void *data) * * This call may be used with devices that are registered after * arch init time. It returns a refcounted pointer to the relevant - * spi_master (which the caller must release), or NULL if there is + * spi_controller (which the caller must release), or NULL if there is * no such master registered. * * Return: the SPI master structure on success, else NULL. */ -struct spi_master *spi_busnum_to_master(u16 bus_num) +struct spi_controller *spi_busnum_to_master(u16 bus_num) { struct device *dev; - struct spi_master *master = NULL; + struct spi_controller *ctlr = NULL; dev = class_find_device(&spi_master_class, NULL, &bus_num, - __spi_master_match); + __spi_controller_match); if (dev) - master = container_of(dev, struct spi_master, dev); + ctlr = container_of(dev, struct spi_controller, dev); /* reference got in class_find_device */ - return master; + return ctlr; } EXPORT_SYMBOL_GPL(spi_busnum_to_master); @@ -2285,7 +2289,7 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master); * Return: the pointer to the allocated data * * This may get enhanced in the future to allocate from a memory pool - * of the @spi_device or @spi_master to avoid repeated allocations. + * of the @spi_device or @spi_controller to avoid repeated allocations. */ void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release, @@ -2337,11 +2341,10 @@ EXPORT_SYMBOL_GPL(spi_res_add); /** * spi_res_release - release all spi resources for this message - * @master: the @spi_master + * @ctlr: the @spi_controller * @message: the @spi_message */ -void spi_res_release(struct spi_master *master, - struct spi_message *message) +void spi_res_release(struct spi_controller *ctlr, struct spi_message *message) { struct spi_res *res; @@ -2350,7 +2353,7 @@ void spi_res_release(struct spi_master *master, struct spi_res, entry); if (res->release) - res->release(master, message, res->data); + res->release(ctlr, message, res->data); list_del(&res->entry); @@ -2363,7 +2366,7 @@ EXPORT_SYMBOL_GPL(spi_res_release); /* Core methods for spi_message alterations */ -static void __spi_replace_transfers_release(struct spi_master *master, +static void __spi_replace_transfers_release(struct spi_controller *ctlr, struct spi_message *msg, void *res) { @@ -2372,7 +2375,7 @@ static void __spi_replace_transfers_release(struct spi_master *master, /* call extra callback if requested */ if (rxfer->release) - rxfer->release(master, msg, res); + rxfer->release(ctlr, msg, res); /* insert replaced transfers back into the message */ list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); @@ -2492,7 +2495,7 @@ struct spi_replaced_transfers *spi_replace_transfers( } EXPORT_SYMBOL_GPL(spi_replace_transfers); -static int __spi_split_transfer_maxsize(struct spi_master *master, +static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, struct spi_message *msg, struct spi_transfer **xferp, size_t maxsize, @@ -2554,7 +2557,7 @@ static int __spi_split_transfer_maxsize(struct spi_master *master, *xferp = &xfers[count - 1]; /* increment statistics counters */ - SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, + SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, transfers_split_maxsize); SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics, transfers_split_maxsize); @@ -2566,14 +2569,14 @@ static int __spi_split_transfer_maxsize(struct spi_master *master, * spi_split_tranfers_maxsize - split spi transfers into multiple transfers * when an individual transfer exceeds a * certain size - * @master: the @spi_master for this transfer + * @ctlr: the @spi_controller for this transfer * @msg: the @spi_message to transform * @maxsize: the maximum when to apply this * @gfp: GFP allocation flags * * Return: status of transformation */ -int spi_split_transfers_maxsize(struct spi_master *master, +int spi_split_transfers_maxsize(struct spi_controller *ctlr, struct spi_message *msg, size_t maxsize, gfp_t gfp) @@ -2589,8 +2592,8 @@ int spi_split_transfers_maxsize(struct spi_master *master, */ list_for_each_entry(xfer, &msg->transfers, transfer_list) { if (xfer->len > maxsize) { - ret = __spi_split_transfer_maxsize( - master, msg, &xfer, maxsize, gfp); + ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer, + maxsize, gfp); if (ret) return ret; } @@ -2602,18 +2605,18 @@ EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize); /*-------------------------------------------------------------------------*/ -/* Core methods for SPI master protocol drivers. Some of the +/* Core methods for SPI controller protocol drivers. Some of the * other core methods are currently defined as inline functions. */ -static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word) +static int __spi_validate_bits_per_word(struct spi_controller *ctlr, + u8 bits_per_word) { - if (master->bits_per_word_mask) { + if (ctlr->bits_per_word_mask) { /* Only 32 bits fit in the mask */ if (bits_per_word > 32) return -EINVAL; - if (!(master->bits_per_word_mask & - SPI_BPW_MASK(bits_per_word))) + if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word))) return -EINVAL; } @@ -2659,9 +2662,9 @@ int spi_setup(struct spi_device *spi) (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD))) return -EINVAL; /* help drivers fail *cleanly* when they need options - * that aren't supported with their current master + * that aren't supported with their current controller */ - bad_bits = spi->mode & ~spi->master->mode_bits; + bad_bits = spi->mode & ~spi->controller->mode_bits; ugly_bits = bad_bits & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); if (ugly_bits) { @@ -2680,15 +2683,16 @@ int spi_setup(struct spi_device *spi) if (!spi->bits_per_word) spi->bits_per_word = 8; - status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word); + status = __spi_validate_bits_per_word(spi->controller, + spi->bits_per_word); if (status) return status; if (!spi->max_speed_hz) - spi->max_speed_hz = spi->master->max_speed_hz; + spi->max_speed_hz = spi->controller->max_speed_hz; - if (spi->master->setup) - status = spi->master->setup(spi); + if (spi->controller->setup) + status = spi->controller->setup(spi); spi_set_cs(spi, false); @@ -2707,7 +2711,7 @@ EXPORT_SYMBOL_GPL(spi_setup); static int __spi_validate(struct spi_device *spi, struct spi_message *message) { - struct spi_master *master = spi->master; + struct spi_controller *ctlr = spi->controller; struct spi_transfer *xfer; int w_size; @@ -2719,16 +2723,16 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) * either MOSI or MISO is missing. They can also be caused by * software limitations. */ - if ((master->flags & SPI_MASTER_HALF_DUPLEX) - || (spi->mode & SPI_3WIRE)) { - unsigned flags = master->flags; + if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) || + (spi->mode & SPI_3WIRE)) { + unsigned flags = ctlr->flags; list_for_each_entry(xfer, &message->transfers, transfer_list) { if (xfer->rx_buf && xfer->tx_buf) return -EINVAL; - if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) + if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf) return -EINVAL; - if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) + if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf) return -EINVAL; } } @@ -2748,13 +2752,12 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) if (!xfer->speed_hz) xfer->speed_hz = spi->max_speed_hz; if (!xfer->speed_hz) - xfer->speed_hz = master->max_speed_hz; + xfer->speed_hz = ctlr->max_speed_hz; - if (master->max_speed_hz && - xfer->speed_hz > master->max_speed_hz) - xfer->speed_hz = master->max_speed_hz; + if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz) + xfer->speed_hz = ctlr->max_speed_hz; - if (__spi_validate_bits_per_word(master, xfer->bits_per_word)) + if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word)) return -EINVAL; /* @@ -2772,8 +2775,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) if (xfer->len % w_size) return -EINVAL; - if (xfer->speed_hz && master->min_speed_hz && - xfer->speed_hz < master->min_speed_hz) + if (xfer->speed_hz && ctlr->min_speed_hz && + xfer->speed_hz < ctlr->min_speed_hz) return -EINVAL; if (xfer->tx_buf && !xfer->tx_nbits) @@ -2818,16 +2821,16 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) static int __spi_async(struct spi_device *spi, struct spi_message *message) { - struct spi_master *master = spi->master; + struct spi_controller *ctlr = spi->controller; message->spi = spi; - SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async); + SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async); SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async); trace_spi_message_submit(message); - return master->transfer(spi, message); + return ctlr->transfer(spi, message); } /** @@ -2863,7 +2866,7 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message) */ int spi_async(struct spi_device *spi, struct spi_message *message) { - struct spi_master *master = spi->master; + struct spi_controller *ctlr = spi->controller; int ret; unsigned long flags; @@ -2871,14 +2874,14 @@ int spi_async(struct spi_device *spi, struct spi_message *message) if (ret != 0) return ret; - spin_lock_irqsave(&master->bus_lock_spinlock, flags); + spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); - if (master->bus_lock_flag) + if (ctlr->bus_lock_flag) ret = -EBUSY; else ret = __spi_async(spi, message); - spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); + spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); return ret; } @@ -2917,7 +2920,7 @@ EXPORT_SYMBOL_GPL(spi_async); */ int spi_async_locked(struct spi_device *spi, struct spi_message *message) { - struct spi_master *master = spi->master; + struct spi_controller *ctlr = spi->controller; int ret; unsigned long flags; @@ -2925,11 +2928,11 @@ int spi_async_locked(struct spi_device *spi, struct spi_message *message) if (ret != 0) return ret; - spin_lock_irqsave(&master->bus_lock_spinlock, flags); + spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); ret = __spi_async(spi, message); - spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); + spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); return ret; @@ -2941,7 +2944,7 @@ int spi_flash_read(struct spi_device *spi, struct spi_flash_read_message *msg) { - struct spi_master *master = spi->master; + struct spi_controller *master = spi->controller; struct device *rx_dev = NULL; int ret; @@ -2995,7 +2998,7 @@ EXPORT_SYMBOL_GPL(spi_flash_read); /*-------------------------------------------------------------------------*/ -/* Utility methods for SPI master protocol drivers, layered on +/* Utility methods for SPI protocol drivers, layered on * top of the core. Some other utility methods are defined as * inline functions. */ @@ -3009,7 +3012,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message) { DECLARE_COMPLETION_ONSTACK(done); int status; - struct spi_master *master = spi->master; + struct spi_controller *ctlr = spi->controller; unsigned long flags; status = __spi_validate(spi, message); @@ -3020,7 +3023,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message) message->context = &done; message->spi = spi; - SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync); + SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync); SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync); /* If we're not using the legacy transfer method then we will @@ -3028,14 +3031,14 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message) * This code would be less tricky if we could remove the * support for driver implemented message queues. */ - if (master->transfer == spi_queued_transfer) { - spin_lock_irqsave(&master->bus_lock_spinlock, flags); + if (ctlr->transfer == spi_queued_transfer) { + spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); trace_spi_message_submit(message); status = __spi_queued_transfer(spi, message, false); - spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); + spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); } else { status = spi_async_locked(spi, message); } @@ -3044,12 +3047,12 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message) /* Push out the messages in the calling context if we * can. */ - if (master->transfer == spi_queued_transfer) { - SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, + if (ctlr->transfer == spi_queued_transfer) { + SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync_immediate); SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync_immediate); - __spi_pump_messages(master, false); + __spi_pump_messages(ctlr, false); } wait_for_completion(&done); @@ -3084,9 +3087,9 @@ int spi_sync(struct spi_device *spi, struct spi_message *message) { int ret; - mutex_lock(&spi->master->bus_lock_mutex); + mutex_lock(&spi->controller->bus_lock_mutex); ret = __spi_sync(spi, message); - mutex_unlock(&spi->master->bus_lock_mutex); + mutex_unlock(&spi->controller->bus_lock_mutex); return ret; } @@ -3116,7 +3119,7 @@ EXPORT_SYMBOL_GPL(spi_sync_locked); /** * spi_bus_lock - obtain a lock for exclusive SPI bus usage - * @master: SPI bus master that should be locked for exclusive bus access + * @ctlr: SPI bus master that should be locked for exclusive bus access * Context: can sleep * * This call may only be used from a context that may sleep. The sleep @@ -3129,15 +3132,15 @@ EXPORT_SYMBOL_GPL(spi_sync_locked); * * Return: always zero. */ -int spi_bus_lock(struct spi_master *master) +int spi_bus_lock(struct spi_controller *ctlr) { unsigned long flags; - mutex_lock(&master->bus_lock_mutex); + mutex_lock(&ctlr->bus_lock_mutex); - spin_lock_irqsave(&master->bus_lock_spinlock, flags); - master->bus_lock_flag = 1; - spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); + spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); + ctlr->bus_lock_flag = 1; + spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); /* mutex remains locked until spi_bus_unlock is called */ @@ -3147,7 +3150,7 @@ EXPORT_SYMBOL_GPL(spi_bus_lock); /** * spi_bus_unlock - release the lock for exclusive SPI bus usage - * @master: SPI bus master that was locked for exclusive bus access + * @ctlr: SPI bus master that was locked for exclusive bus access * Context: can sleep * * This call may only be used from a context that may sleep. The sleep @@ -3158,11 +3161,11 @@ EXPORT_SYMBOL_GPL(spi_bus_lock); * * Return: always zero. */ -int spi_bus_unlock(struct spi_master *master) +int spi_bus_unlock(struct spi_controller *ctlr) { - master->bus_lock_flag = 0; + ctlr->bus_lock_flag = 0; - mutex_unlock(&master->bus_lock_mutex); + mutex_unlock(&ctlr->bus_lock_mutex); return 0; } @@ -3264,48 +3267,48 @@ static struct spi_device *of_find_spi_device_by_node(struct device_node *node) return dev ? to_spi_device(dev) : NULL; } -static int __spi_of_master_match(struct device *dev, const void *data) +static int __spi_of_controller_match(struct device *dev, const void *data) { return dev->of_node == data; } -/* the spi masters are not using spi_bus, so we find it with another way */ -static struct spi_master *of_find_spi_master_by_node(struct device_node *node) +/* the spi controllers are not using spi_bus, so we find it with another way */ +static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node) { struct device *dev; dev = class_find_device(&spi_master_class, NULL, node, - __spi_of_master_match); + __spi_of_controller_match); if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) dev = class_find_device(&spi_slave_class, NULL, node, - __spi_of_master_match); + __spi_of_controller_match); if (!dev) return NULL; /* reference got in class_find_device */ - return container_of(dev, struct spi_master, dev); + return container_of(dev, struct spi_controller, dev); } static int of_spi_notify(struct notifier_block *nb, unsigned long action, void *arg) { struct of_reconfig_data *rd = arg; - struct spi_master *master; + struct spi_controller *ctlr; struct spi_device *spi; switch (of_reconfig_get_state_change(action, arg)) { case OF_RECONFIG_CHANGE_ADD: - master = of_find_spi_master_by_node(rd->dn->parent); - if (master == NULL) + ctlr = of_find_spi_controller_by_node(rd->dn->parent); + if (ctlr == NULL) return NOTIFY_OK; /* not for us */ if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { - put_device(&master->dev); + put_device(&ctlr->dev); return NOTIFY_OK; } - spi = of_register_spi_device(master, rd->dn); - put_device(&master->dev); + spi = of_register_spi_device(ctlr, rd->dn); + put_device(&ctlr->dev); if (IS_ERR(spi)) { pr_err("%s: failed to create for '%s'\n", @@ -3344,7 +3347,7 @@ extern struct notifier_block spi_of_notifier; #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ #if IS_ENABLED(CONFIG_ACPI) -static int spi_acpi_master_match(struct device *dev, const void *data) +static int spi_acpi_controller_match(struct device *dev, const void *data) { return ACPI_COMPANION(dev->parent) == data; } @@ -3354,19 +3357,19 @@ static int spi_acpi_device_match(struct device *dev, void *data) return ACPI_COMPANION(dev) == data; } -static struct spi_master *acpi_spi_find_master_by_adev(struct acpi_device *adev) +static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev) { struct device *dev; dev = class_find_device(&spi_master_class, NULL, adev, - spi_acpi_master_match); + spi_acpi_controller_match); if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) dev = class_find_device(&spi_slave_class, NULL, adev, - spi_acpi_master_match); + spi_acpi_controller_match); if (!dev) return NULL; - return container_of(dev, struct spi_master, dev); + return container_of(dev, struct spi_controller, dev); } static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) @@ -3382,17 +3385,17 @@ static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, void *arg) { struct acpi_device *adev = arg; - struct spi_master *master; + struct spi_controller *ctlr; struct spi_device *spi; switch (value) { case ACPI_RECONFIG_DEVICE_ADD: - master = acpi_spi_find_master_by_adev(adev->parent); - if (!master) + ctlr = acpi_spi_find_controller_by_adev(adev->parent); + if (!ctlr) break; - acpi_register_spi_device(master, adev); - put_device(&master->dev); + acpi_register_spi_device(ctlr, adev); + put_device(&ctlr->dev); break; case ACPI_RECONFIG_DEVICE_REMOVE: if (!acpi_device_enumerated(adev)) diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 0a78745e5766..7b2170bfd6e7 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -24,7 +24,7 @@ struct dma_chan; struct property_entry; -struct spi_master; +struct spi_controller; struct spi_transfer; struct spi_flash_read_message; @@ -84,7 +84,7 @@ struct spi_statistics { void spi_statistics_add_transfer_stats(struct spi_statistics *stats, struct spi_transfer *xfer, - struct spi_master *master); + struct spi_controller *ctlr); #define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count) \ do { \ @@ -98,13 +98,14 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats, SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1) /** - * struct spi_device - Master side proxy for an SPI slave device + * struct spi_device - Controller side proxy for an SPI slave device * @dev: Driver model representation of the device. - * @master: SPI controller used with the device. + * @controller: SPI controller used with the device. + * @master: Copy of controller, for backwards compatibility. * @max_speed_hz: Maximum clock rate to be used with this chip * (on this board); may be changed by the device's driver. * The spi_transfer.speed_hz can override this for each transfer. - * @chip_select: Chipselect, distinguishing chips handled by @master. + * @chip_select: Chipselect, distinguishing chips handled by @controller. * @mode: The spi mode defines how data is clocked out and in. * This may be changed by the device's driver. * The "active low" default for chipselect mode can be overridden @@ -140,7 +141,8 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats, */ struct spi_device { struct device dev; - struct spi_master *master; + struct spi_controller *controller; + struct spi_controller *master; /* compatibility layer */ u32 max_speed_hz; u8 chip_select; u8 bits_per_word; @@ -198,7 +200,7 @@ static inline void spi_dev_put(struct spi_device *spi) put_device(&spi->dev); } -/* ctldata is for the bus_master driver's runtime state */ +/* ctldata is for the bus_controller driver's runtime state */ static inline void *spi_get_ctldata(struct spi_device *spi) { return spi->controller_state; @@ -292,9 +294,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) spi_unregister_driver) /** - * struct spi_master - interface to SPI master controller + * struct spi_controller - interface to SPI master or slave controller * @dev: device interface to this driver - * @list: link with the global spi_master list + * @list: link with the global spi_controller list * @bus_num: board-specific (and often SOC-specific) identifier for a * given SPI controller. * @num_chipselect: chipselects are used to distinguish individual @@ -327,8 +329,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * the device whose settings are being modified. * @transfer: adds a message to the controller's transfer queue. * @cleanup: frees controller-specific state - * @can_dma: determine whether this master supports DMA - * @queued: whether this master is providing an internal message queue + * @can_dma: determine whether this controller supports DMA + * @queued: whether this controller is providing an internal message queue * @kworker: thread struct for message pump * @kworker_task: pointer to task for message pump kworker thread * @pump_messages: work struct for scheduling work to the message pump @@ -384,7 +386,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS * number. Any individual value may be -ENOENT for CS lines that * are not GPIOs (driven by the SPI controller itself). - * @statistics: statistics for the spi_master + * @statistics: statistics for the spi_controller * @dma_tx: DMA transmit channel * @dma_rx: DMA receive channel * @dummy_rx: dummy receive buffer for full-duplex devices @@ -393,7 +395,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * what Linux expects, this optional hook can be used to translate * between the two. * - * Each SPI master controller can communicate with one or more @spi_device + * Each SPI controller can communicate with one or more @spi_device * children. These make a small bus, sharing MOSI, MISO and SCK signals * but not chip select signals. Each device may be configured to use a * different clock rate, since those shared signals are ignored unless @@ -404,7 +406,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * an SPI slave device. For each such message it queues, it calls the * message's completion function when the transaction completes. */ -struct spi_master { +struct spi_controller { struct device dev; struct list_head list; @@ -442,12 +444,13 @@ struct spi_master { /* other constraints relevant to this driver */ u16 flags; -#define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */ -#define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */ -#define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */ -#define SPI_MASTER_MUST_RX BIT(3) /* requires rx */ -#define SPI_MASTER_MUST_TX BIT(4) /* requires tx */ -#define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */ +#define SPI_CONTROLLER_HALF_DUPLEX BIT(0) /* can't do full duplex */ +#define SPI_CONTROLLER_NO_RX BIT(1) /* can't do buffer read */ +#define SPI_CONTROLLER_NO_TX BIT(2) /* can't do buffer write */ +#define SPI_CONTROLLER_MUST_RX BIT(3) /* requires rx */ +#define SPI_CONTROLLER_MUST_TX BIT(4) /* requires tx */ + +#define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */ /* flag indicating this is an SPI slave controller */ bool slave; @@ -485,8 +488,8 @@ struct spi_master { * any other request management * + To a given spi_device, message queueing is pure fifo * - * + The master's main job is to process its message queue, - * selecting a chip then transferring data + * + The controller's main job is to process its message queue, + * selecting a chip (for masters), then transferring data * + If there are multiple spi_device children, the i/o queue * arbitration algorithm is unspecified (round robin, fifo, * priority, reservations, preemption, etc) @@ -499,7 +502,7 @@ struct spi_master { int (*transfer)(struct spi_device *spi, struct spi_message *mesg); - /* called on release() to free memory provided by spi_master */ + /* called on release() to free memory provided by spi_controller */ void (*cleanup)(struct spi_device *spi); /* @@ -509,13 +512,13 @@ struct spi_master { * not modify or store xfer and dma_tx and dma_rx must be set * while the device is prepared. */ - bool (*can_dma)(struct spi_master *master, + bool (*can_dma)(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *xfer); /* * These hooks are for drivers that want to use the generic - * master transfer queueing mechanism. If these are used, the + * controller transfer queueing mechanism. If these are used, the * transfer() function above must NOT be specified by the driver. * Over time we expect SPI drivers to be phased over to this API. */ @@ -536,15 +539,15 @@ struct spi_master { struct completion xfer_completion; size_t max_dma_len; - int (*prepare_transfer_hardware)(struct spi_master *master); - int (*transfer_one_message)(struct spi_master *master, + int (*prepare_transfer_hardware)(struct spi_controller *ctlr); + int (*transfer_one_message)(struct spi_controller *ctlr, struct spi_message *mesg); - int (*unprepare_transfer_hardware)(struct spi_master *master); - int (*prepare_message)(struct spi_master *master, + int (*unprepare_transfer_hardware)(struct spi_controller *ctlr); + int (*prepare_message)(struct spi_controller *ctlr, struct spi_message *message); - int (*unprepare_message)(struct spi_master *master, + int (*unprepare_message)(struct spi_controller *ctlr, struct spi_message *message); - int (*slave_abort)(struct spi_master *spi); + int (*slave_abort)(struct spi_controller *ctlr); int (*spi_flash_read)(struct spi_device *spi, struct spi_flash_read_message *msg); bool (*spi_flash_can_dma)(struct spi_device *spi, @@ -556,9 +559,9 @@ struct spi_master { * of transfer_one_message() provied by the core. */ void (*set_cs)(struct spi_device *spi, bool enable); - int (*transfer_one)(struct spi_master *master, struct spi_device *spi, + int (*transfer_one)(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *transfer); - void (*handle_err)(struct spi_master *master, + void (*handle_err)(struct spi_controller *ctlr, struct spi_message *message); /* gpio chip select */ @@ -575,58 +578,59 @@ struct spi_master { void *dummy_rx; void *dummy_tx; - int (*fw_translate_cs)(struct spi_master *master, unsigned cs); + int (*fw_translate_cs)(struct spi_controller *ctlr, unsigned cs); }; -static inline void *spi_master_get_devdata(struct spi_master *master) +static inline void *spi_controller_get_devdata(struct spi_controller *ctlr) { - return dev_get_drvdata(&master->dev); + return dev_get_drvdata(&ctlr->dev); } -static inline void spi_master_set_devdata(struct spi_master *master, void *data) +static inline void spi_controller_set_devdata(struct spi_controller *ctlr, + void *data) { - dev_set_drvdata(&master->dev, data); + dev_set_drvdata(&ctlr->dev, data); } -static inline struct spi_master *spi_master_get(struct spi_master *master) +static inline struct spi_controller *spi_controller_get(struct spi_controller *ctlr) { - if (!master || !get_device(&master->dev)) + if (!ctlr || !get_device(&ctlr->dev)) return NULL; - return master; + return ctlr; } -static inline void spi_master_put(struct spi_master *master) +static inline void spi_controller_put(struct spi_controller *ctlr) { - if (master) - put_device(&master->dev); + if (ctlr) + put_device(&ctlr->dev); } -static inline bool spi_controller_is_slave(struct spi_master *ctlr) +static inline bool spi_controller_is_slave(struct spi_controller *ctlr) { return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->slave; } /* PM calls that need to be issued by the driver */ -extern int spi_master_suspend(struct spi_master *master); -extern int spi_master_resume(struct spi_master *master); +extern int spi_controller_suspend(struct spi_controller *ctlr); +extern int spi_controller_resume(struct spi_controller *ctlr); /* Calls the driver make to interact with the message queue */ -extern struct spi_message *spi_get_next_queued_message(struct spi_master *master); -extern void spi_finalize_current_message(struct spi_master *master); -extern void spi_finalize_current_transfer(struct spi_master *master); +extern struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr); +extern void spi_finalize_current_message(struct spi_controller *ctlr); +extern void spi_finalize_current_transfer(struct spi_controller *ctlr); -/* the spi driver core manages memory for the spi_master classdev */ -extern struct spi_master *__spi_alloc_controller(struct device *host, - unsigned int size, bool slave); +/* the spi driver core manages memory for the spi_controller classdev */ +extern struct spi_controller *__spi_alloc_controller(struct device *host, + unsigned int size, bool slave); -static inline struct spi_master *spi_alloc_master(struct device *host, - unsigned int size) +static inline struct spi_controller *spi_alloc_master(struct device *host, + unsigned int size) { return __spi_alloc_controller(host, size, false); } -static inline struct spi_master *spi_alloc_slave(struct device *host, - unsigned int size) +static inline struct spi_controller *spi_alloc_slave(struct device *host, + unsigned int size) { if (!IS_ENABLED(CONFIG_SPI_SLAVE)) return NULL; @@ -634,18 +638,18 @@ static inline struct spi_master *spi_alloc_slave(struct device *host, return __spi_alloc_controller(host, size, true); } -extern int spi_register_master(struct spi_master *master); -extern int devm_spi_register_master(struct device *dev, - struct spi_master *master); -extern void spi_unregister_master(struct spi_master *master); +extern int spi_register_controller(struct spi_controller *ctlr); +extern int devm_spi_register_controller(struct device *dev, + struct spi_controller *ctlr); +extern void spi_unregister_controller(struct spi_controller *ctlr); -extern struct spi_master *spi_busnum_to_master(u16 busnum); +extern struct spi_controller *spi_busnum_to_master(u16 busnum); /* * SPI resource management while processing a SPI message */ -typedef void (*spi_res_release_t)(struct spi_master *master, +typedef void (*spi_res_release_t)(struct spi_controller *ctlr, struct spi_message *msg, void *res); @@ -670,7 +674,7 @@ extern void *spi_res_alloc(struct spi_device *spi, extern void spi_res_add(struct spi_message *message, void *res); extern void spi_res_free(void *res); -extern void spi_res_release(struct spi_master *master, +extern void spi_res_release(struct spi_controller *ctlr, struct spi_message *message); /*---------------------------------------------------------------------------*/ @@ -854,7 +858,7 @@ struct spi_message { /* for optional use by whatever driver currently owns the * spi_message ... between calls to spi_async and then later - * complete(), that's the spi_master controller driver. + * complete(), that's the spi_controller controller driver. */ struct list_head queue; void *state; @@ -943,21 +947,22 @@ extern int spi_slave_abort(struct spi_device *spi); static inline size_t spi_max_message_size(struct spi_device *spi) { - struct spi_master *master = spi->master; - if (!master->max_message_size) + struct spi_controller *ctlr = spi->controller; + + if (!ctlr->max_message_size) return SIZE_MAX; - return master->max_message_size(spi); + return ctlr->max_message_size(spi); } static inline size_t spi_max_transfer_size(struct spi_device *spi) { - struct spi_master *master = spi->master; + struct spi_controller *ctlr = spi->controller; size_t tr_max = SIZE_MAX; size_t msg_max = spi_max_message_size(spi); - if (master->max_transfer_size) - tr_max = master->max_transfer_size(spi); + if (ctlr->max_transfer_size) + tr_max = ctlr->max_transfer_size(spi); /* transfer size limit must not be greater than messsage size limit */ return min(tr_max, msg_max); @@ -968,7 +973,7 @@ spi_max_transfer_size(struct spi_device *spi) /* SPI transfer replacement methods which make use of spi_res */ struct spi_replaced_transfers; -typedef void (*spi_replaced_release_t)(struct spi_master *master, +typedef void (*spi_replaced_release_t)(struct spi_controller *ctlr, struct spi_message *msg, struct spi_replaced_transfers *res); /** @@ -1012,7 +1017,7 @@ extern struct spi_replaced_transfers *spi_replace_transfers( /* SPI transfer transformation methods */ -extern int spi_split_transfers_maxsize(struct spi_master *master, +extern int spi_split_transfers_maxsize(struct spi_controller *ctlr, struct spi_message *msg, size_t maxsize, gfp_t gfp); @@ -1026,8 +1031,8 @@ extern int spi_split_transfers_maxsize(struct spi_master *master, extern int spi_sync(struct spi_device *spi, struct spi_message *message); extern int spi_sync_locked(struct spi_device *spi, struct spi_message *message); -extern int spi_bus_lock(struct spi_master *master); -extern int spi_bus_unlock(struct spi_master *master); +extern int spi_bus_lock(struct spi_controller *ctlr); +extern int spi_bus_unlock(struct spi_controller *ctlr); /** * spi_sync_transfer - synchronous SPI data transfer @@ -1212,9 +1217,9 @@ struct spi_flash_read_message { /* SPI core interface for flash read support */ static inline bool spi_flash_read_supported(struct spi_device *spi) { - return spi->master->spi_flash_read && - (!spi->master->flash_read_supported || - spi->master->flash_read_supported(spi)); + return spi->controller->spi_flash_read && + (!spi->controller->flash_read_supported || + spi->controller->flash_read_supported(spi)); } int spi_flash_read(struct spi_device *spi, @@ -1247,7 +1252,7 @@ int spi_flash_read(struct spi_device *spi, * @irq: Initializes spi_device.irq; depends on how the board is wired. * @max_speed_hz: Initializes spi_device.max_speed_hz; based on limits * from the chip datasheet and board-specific signal quality issues. - * @bus_num: Identifies which spi_master parents the spi_device; unused + * @bus_num: Identifies which spi_controller parents the spi_device; unused * by spi_new_device(), and otherwise depends on board wiring. * @chip_select: Initializes spi_device.chip_select; depends on how * the board is wired. @@ -1288,7 +1293,7 @@ struct spi_board_info { /* bus_num is board specific and matches the bus_num of some - * spi_master that will probably be registered later. + * spi_controller that will probably be registered later. * * chip_select reflects how this chip is wired to that master; * it's less than num_chipselect. @@ -1322,7 +1327,7 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n) /* If you're hotplugging an adapter with devices (parport, usb, etc) * use spi_new_device() to describe each device. You can also call * spi_unregister_device() to start making that device vanish, but - * normally that would be handled by spi_unregister_master(). + * normally that would be handled by spi_unregister_controller(). * * You can also use spi_alloc_device() and spi_add_device() to use a two * stage registration sequence for each spi_device. This gives the caller @@ -1331,13 +1336,13 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n) * be defined using the board info. */ extern struct spi_device * -spi_alloc_device(struct spi_master *master); +spi_alloc_device(struct spi_controller *ctlr); extern int spi_add_device(struct spi_device *spi); extern struct spi_device * -spi_new_device(struct spi_master *, struct spi_board_info *); +spi_new_device(struct spi_controller *, struct spi_board_info *); extern void spi_unregister_device(struct spi_device *spi); @@ -1345,9 +1350,32 @@ extern const struct spi_device_id * spi_get_device_id(const struct spi_device *sdev); static inline bool -spi_transfer_is_last(struct spi_master *master, struct spi_transfer *xfer) +spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer) { - return list_is_last(&xfer->transfer_list, &master->cur_msg->transfers); + return list_is_last(&xfer->transfer_list, &ctlr->cur_msg->transfers); } + +/* Compatibility layer */ +#define spi_master spi_controller + +#define SPI_MASTER_HALF_DUPLEX SPI_CONTROLLER_HALF_DUPLEX +#define SPI_MASTER_NO_RX SPI_CONTROLLER_NO_RX +#define SPI_MASTER_NO_TX SPI_CONTROLLER_NO_TX +#define SPI_MASTER_MUST_RX SPI_CONTROLLER_MUST_RX +#define SPI_MASTER_MUST_TX SPI_CONTROLLER_MUST_TX + +#define spi_master_get_devdata(_ctlr) spi_controller_get_devdata(_ctlr) +#define spi_master_set_devdata(_ctlr, _data) \ + spi_controller_set_devdata(_ctlr, _data) +#define spi_master_get(_ctlr) spi_controller_get(_ctlr) +#define spi_master_put(_ctlr) spi_controller_put(_ctlr) +#define spi_master_suspend(_ctlr) spi_controller_suspend(_ctlr) +#define spi_master_resume(_ctlr) spi_controller_resume(_ctlr) + +#define spi_register_master(_ctlr) spi_register_controller(_ctlr) +#define devm_spi_register_master(_dev, _ctlr) \ + devm_spi_register_controller(_dev, _ctlr) +#define spi_unregister_master(_ctlr) spi_unregister_controller(_ctlr) + #endif /* __LINUX_SPI_H */ diff --git a/include/trace/events/spi.h b/include/trace/events/spi.h index 7e02c983bbe2..f9f702b6ae2e 100644 --- a/include/trace/events/spi.h +++ b/include/trace/events/spi.h @@ -7,37 +7,37 @@ #include #include -DECLARE_EVENT_CLASS(spi_master, +DECLARE_EVENT_CLASS(spi_controller, - TP_PROTO(struct spi_master *master), + TP_PROTO(struct spi_controller *controller), - TP_ARGS(master), + TP_ARGS(controller), TP_STRUCT__entry( __field( int, bus_num ) ), TP_fast_assign( - __entry->bus_num = master->bus_num; + __entry->bus_num = controller->bus_num; ), TP_printk("spi%d", (int)__entry->bus_num) ); -DEFINE_EVENT(spi_master, spi_master_idle, +DEFINE_EVENT(spi_controller, spi_controller_idle, - TP_PROTO(struct spi_master *master), + TP_PROTO(struct spi_controller *controller), - TP_ARGS(master) + TP_ARGS(controller) ); -DEFINE_EVENT(spi_master, spi_master_busy, +DEFINE_EVENT(spi_controller, spi_controller_busy, - TP_PROTO(struct spi_master *master), + TP_PROTO(struct spi_controller *controller), - TP_ARGS(master) + TP_ARGS(controller) ); @@ -54,7 +54,7 @@ DECLARE_EVENT_CLASS(spi_message, ), TP_fast_assign( - __entry->bus_num = msg->spi->master->bus_num; + __entry->bus_num = msg->spi->controller->bus_num; __entry->chip_select = msg->spi->chip_select; __entry->msg = msg; ), @@ -95,7 +95,7 @@ TRACE_EVENT(spi_message_done, ), TP_fast_assign( - __entry->bus_num = msg->spi->master->bus_num; + __entry->bus_num = msg->spi->controller->bus_num; __entry->chip_select = msg->spi->chip_select; __entry->msg = msg; __entry->frame = msg->frame_length; @@ -122,7 +122,7 @@ DECLARE_EVENT_CLASS(spi_transfer, ), TP_fast_assign( - __entry->bus_num = msg->spi->master->bus_num; + __entry->bus_num = msg->spi->controller->bus_num; __entry->chip_select = msg->spi->chip_select; __entry->xfer = xfer; __entry->len = xfer->len; -- 2.20.1