4 * Copyright (C) 2005 David Brownell
5 * Copyright (C) 2008 Secret Lab Technologies Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/kernel.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/cache.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/mutex.h>
25 #include <linux/of_device.h>
26 #include <linux/of_irq.h>
27 #include <linux/clk/clk-conf.h>
28 #include <linux/slab.h>
29 #include <linux/mod_devicetable.h>
30 #include <linux/spi/spi.h>
31 #include <linux/of_gpio.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/pm_domain.h>
34 #include <linux/export.h>
35 #include <linux/sched/rt.h>
36 #include <linux/delay.h>
37 #include <linux/kthread.h>
38 #include <linux/ioport.h>
39 #include <linux/acpi.h>
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/spi.h>
44 static void spidev_release(struct device
*dev
)
46 struct spi_device
*spi
= to_spi_device(dev
);
48 /* spi masters may cleanup for released devices */
49 if (spi
->master
->cleanup
)
50 spi
->master
->cleanup(spi
);
52 spi_master_put(spi
->master
);
57 modalias_show(struct device
*dev
, struct device_attribute
*a
, char *buf
)
59 const struct spi_device
*spi
= to_spi_device(dev
);
62 len
= acpi_device_modalias(dev
, buf
, PAGE_SIZE
- 1);
66 return sprintf(buf
, "%s%s\n", SPI_MODULE_PREFIX
, spi
->modalias
);
68 static DEVICE_ATTR_RO(modalias
);
70 static struct attribute
*spi_dev_attrs
[] = {
71 &dev_attr_modalias
.attr
,
74 ATTRIBUTE_GROUPS(spi_dev
);
76 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
77 * and the sysfs version makes coldplug work too.
80 static const struct spi_device_id
*spi_match_id(const struct spi_device_id
*id
,
81 const struct spi_device
*sdev
)
84 if (!strcmp(sdev
->modalias
, id
->name
))
91 const struct spi_device_id
*spi_get_device_id(const struct spi_device
*sdev
)
93 const struct spi_driver
*sdrv
= to_spi_driver(sdev
->dev
.driver
);
95 return spi_match_id(sdrv
->id_table
, sdev
);
97 EXPORT_SYMBOL_GPL(spi_get_device_id
);
99 static int spi_match_device(struct device
*dev
, struct device_driver
*drv
)
101 const struct spi_device
*spi
= to_spi_device(dev
);
102 const struct spi_driver
*sdrv
= to_spi_driver(drv
);
104 /* Attempt an OF style match */
105 if (of_driver_match_device(dev
, drv
))
109 if (acpi_driver_match_device(dev
, drv
))
113 return !!spi_match_id(sdrv
->id_table
, spi
);
115 return strcmp(spi
->modalias
, drv
->name
) == 0;
118 static int spi_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
120 const struct spi_device
*spi
= to_spi_device(dev
);
123 rc
= acpi_device_uevent_modalias(dev
, env
);
127 add_uevent_var(env
, "MODALIAS=%s%s", SPI_MODULE_PREFIX
, spi
->modalias
);
131 struct bus_type spi_bus_type
= {
133 .dev_groups
= spi_dev_groups
,
134 .match
= spi_match_device
,
135 .uevent
= spi_uevent
,
137 EXPORT_SYMBOL_GPL(spi_bus_type
);
140 static int spi_drv_probe(struct device
*dev
)
142 const struct spi_driver
*sdrv
= to_spi_driver(dev
->driver
);
145 ret
= of_clk_set_defaults(dev
->of_node
, false);
149 ret
= dev_pm_domain_attach(dev
, true);
150 if (ret
!= -EPROBE_DEFER
) {
151 ret
= sdrv
->probe(to_spi_device(dev
));
153 dev_pm_domain_detach(dev
, true);
159 static int spi_drv_remove(struct device
*dev
)
161 const struct spi_driver
*sdrv
= to_spi_driver(dev
->driver
);
164 ret
= sdrv
->remove(to_spi_device(dev
));
165 dev_pm_domain_detach(dev
, true);
170 static void spi_drv_shutdown(struct device
*dev
)
172 const struct spi_driver
*sdrv
= to_spi_driver(dev
->driver
);
174 sdrv
->shutdown(to_spi_device(dev
));
178 * spi_register_driver - register a SPI driver
179 * @sdrv: the driver to register
182 int spi_register_driver(struct spi_driver
*sdrv
)
184 sdrv
->driver
.bus
= &spi_bus_type
;
186 sdrv
->driver
.probe
= spi_drv_probe
;
188 sdrv
->driver
.remove
= spi_drv_remove
;
190 sdrv
->driver
.shutdown
= spi_drv_shutdown
;
191 return driver_register(&sdrv
->driver
);
193 EXPORT_SYMBOL_GPL(spi_register_driver
);
195 /*-------------------------------------------------------------------------*/
197 /* SPI devices should normally not be created by SPI device drivers; that
198 * would make them board-specific. Similarly with SPI master drivers.
199 * Device registration normally goes into like arch/.../mach.../board-YYY.c
200 * with other readonly (flashable) information about mainboard devices.
204 struct list_head list
;
205 struct spi_board_info board_info
;
208 static LIST_HEAD(board_list
);
209 static LIST_HEAD(spi_master_list
);
212 * Used to protect add/del opertion for board_info list and
213 * spi_master list, and their matching process
215 static DEFINE_MUTEX(board_lock
);
218 * spi_alloc_device - Allocate a new SPI device
219 * @master: Controller to which device is connected
222 * Allows a driver to allocate and initialize a spi_device without
223 * registering it immediately. This allows a driver to directly
224 * fill the spi_device with device parameters before calling
225 * spi_add_device() on it.
227 * Caller is responsible to call spi_add_device() on the returned
228 * spi_device structure to add it to the SPI master. If the caller
229 * needs to discard the spi_device without adding it, then it should
230 * call spi_dev_put() on it.
232 * Returns a pointer to the new device, or NULL.
234 struct spi_device
*spi_alloc_device(struct spi_master
*master
)
236 struct spi_device
*spi
;
238 if (!spi_master_get(master
))
241 spi
= kzalloc(sizeof(*spi
), GFP_KERNEL
);
243 spi_master_put(master
);
247 spi
->master
= master
;
248 spi
->dev
.parent
= &master
->dev
;
249 spi
->dev
.bus
= &spi_bus_type
;
250 spi
->dev
.release
= spidev_release
;
251 spi
->cs_gpio
= -ENOENT
;
252 device_initialize(&spi
->dev
);
255 EXPORT_SYMBOL_GPL(spi_alloc_device
);
257 static void spi_dev_set_name(struct spi_device
*spi
)
259 struct acpi_device
*adev
= ACPI_COMPANION(&spi
->dev
);
262 dev_set_name(&spi
->dev
, "spi-%s", acpi_dev_name(adev
));
266 dev_set_name(&spi
->dev
, "%s.%u", dev_name(&spi
->master
->dev
),
270 static int spi_dev_check(struct device
*dev
, void *data
)
272 struct spi_device
*spi
= to_spi_device(dev
);
273 struct spi_device
*new_spi
= data
;
275 if (spi
->master
== new_spi
->master
&&
276 spi
->chip_select
== new_spi
->chip_select
)
282 * spi_add_device - Add spi_device allocated with spi_alloc_device
283 * @spi: spi_device to register
285 * Companion function to spi_alloc_device. Devices allocated with
286 * spi_alloc_device can be added onto the spi bus with this function.
288 * Returns 0 on success; negative errno on failure
290 int spi_add_device(struct spi_device
*spi
)
292 static DEFINE_MUTEX(spi_add_lock
);
293 struct spi_master
*master
= spi
->master
;
294 struct device
*dev
= master
->dev
.parent
;
297 /* Chipselects are numbered 0..max; validate. */
298 if (spi
->chip_select
>= master
->num_chipselect
) {
299 dev_err(dev
, "cs%d >= max %d\n",
301 master
->num_chipselect
);
305 /* Set the bus ID string */
306 spi_dev_set_name(spi
);
308 /* We need to make sure there's no other device with this
309 * chipselect **BEFORE** we call setup(), else we'll trash
310 * its configuration. Lock against concurrent add() calls.
312 mutex_lock(&spi_add_lock
);
314 status
= bus_for_each_dev(&spi_bus_type
, NULL
, spi
, spi_dev_check
);
316 dev_err(dev
, "chipselect %d already in use\n",
321 if (master
->cs_gpios
)
322 spi
->cs_gpio
= master
->cs_gpios
[spi
->chip_select
];
324 /* Drivers may modify this initial i/o setup, but will
325 * normally rely on the device being setup. Devices
326 * using SPI_CS_HIGH can't coexist well otherwise...
328 status
= spi_setup(spi
);
330 dev_err(dev
, "can't setup %s, status %d\n",
331 dev_name(&spi
->dev
), status
);
335 /* Device may be bound to an active driver when this returns */
336 status
= device_add(&spi
->dev
);
338 dev_err(dev
, "can't add %s, status %d\n",
339 dev_name(&spi
->dev
), status
);
341 dev_dbg(dev
, "registered child %s\n", dev_name(&spi
->dev
));
344 mutex_unlock(&spi_add_lock
);
347 EXPORT_SYMBOL_GPL(spi_add_device
);
350 * spi_new_device - instantiate one new SPI device
351 * @master: Controller to which device is connected
352 * @chip: Describes the SPI device
355 * On typical mainboards, this is purely internal; and it's not needed
356 * after board init creates the hard-wired devices. Some development
357 * platforms may not be able to use spi_register_board_info though, and
358 * this is exported so that for example a USB or parport based adapter
359 * driver could add devices (which it would learn about out-of-band).
361 * Returns the new device, or NULL.
363 struct spi_device
*spi_new_device(struct spi_master
*master
,
364 struct spi_board_info
*chip
)
366 struct spi_device
*proxy
;
369 /* NOTE: caller did any chip->bus_num checks necessary.
371 * Also, unless we change the return value convention to use
372 * error-or-pointer (not NULL-or-pointer), troubleshootability
373 * suggests syslogged diagnostics are best here (ugh).
376 proxy
= spi_alloc_device(master
);
380 WARN_ON(strlen(chip
->modalias
) >= sizeof(proxy
->modalias
));
382 proxy
->chip_select
= chip
->chip_select
;
383 proxy
->max_speed_hz
= chip
->max_speed_hz
;
384 proxy
->mode
= chip
->mode
;
385 proxy
->irq
= chip
->irq
;
386 strlcpy(proxy
->modalias
, chip
->modalias
, sizeof(proxy
->modalias
));
387 proxy
->dev
.platform_data
= (void *) chip
->platform_data
;
388 proxy
->controller_data
= chip
->controller_data
;
389 proxy
->controller_state
= NULL
;
391 status
= spi_add_device(proxy
);
399 EXPORT_SYMBOL_GPL(spi_new_device
);
401 static void spi_match_master_to_boardinfo(struct spi_master
*master
,
402 struct spi_board_info
*bi
)
404 struct spi_device
*dev
;
406 if (master
->bus_num
!= bi
->bus_num
)
409 dev
= spi_new_device(master
, bi
);
411 dev_err(master
->dev
.parent
, "can't create new device for %s\n",
416 * spi_register_board_info - register SPI devices for a given board
417 * @info: array of chip descriptors
418 * @n: how many descriptors are provided
421 * Board-specific early init code calls this (probably during arch_initcall)
422 * with segments of the SPI device table. Any device nodes are created later,
423 * after the relevant parent SPI controller (bus_num) is defined. We keep
424 * this table of devices forever, so that reloading a controller driver will
425 * not make Linux forget about these hard-wired devices.
427 * Other code can also call this, e.g. a particular add-on board might provide
428 * SPI devices through its expansion connector, so code initializing that board
429 * would naturally declare its SPI devices.
431 * The board info passed can safely be __initdata ... but be careful of
432 * any embedded pointers (platform_data, etc), they're copied as-is.
434 int spi_register_board_info(struct spi_board_info
const *info
, unsigned n
)
436 struct boardinfo
*bi
;
442 bi
= kzalloc(n
* sizeof(*bi
), GFP_KERNEL
);
446 for (i
= 0; i
< n
; i
++, bi
++, info
++) {
447 struct spi_master
*master
;
449 memcpy(&bi
->board_info
, info
, sizeof(*info
));
450 mutex_lock(&board_lock
);
451 list_add_tail(&bi
->list
, &board_list
);
452 list_for_each_entry(master
, &spi_master_list
, list
)
453 spi_match_master_to_boardinfo(master
, &bi
->board_info
);
454 mutex_unlock(&board_lock
);
460 /*-------------------------------------------------------------------------*/
462 static void spi_set_cs(struct spi_device
*spi
, bool enable
)
464 if (spi
->mode
& SPI_CS_HIGH
)
467 if (spi
->cs_gpio
>= 0)
468 gpio_set_value(spi
->cs_gpio
, !enable
);
469 else if (spi
->master
->set_cs
)
470 spi
->master
->set_cs(spi
, !enable
);
473 #ifdef CONFIG_HAS_DMA
474 static int spi_map_buf(struct spi_master
*master
, struct device
*dev
,
475 struct sg_table
*sgt
, void *buf
, size_t len
,
476 enum dma_data_direction dir
)
478 const bool vmalloced_buf
= is_vmalloc_addr(buf
);
479 const int desc_len
= vmalloced_buf
? PAGE_SIZE
: master
->max_dma_len
;
480 const int sgs
= DIV_ROUND_UP(len
, desc_len
);
481 struct page
*vm_page
;
486 ret
= sg_alloc_table(sgt
, sgs
, GFP_KERNEL
);
490 for (i
= 0; i
< sgs
; i
++) {
491 min
= min_t(size_t, len
, desc_len
);
494 vm_page
= vmalloc_to_page(buf
);
499 sg_set_page(&sgt
->sgl
[i
], vm_page
,
500 min
, offset_in_page(buf
));
503 sg_set_buf(&sgt
->sgl
[i
], sg_buf
, min
);
511 ret
= dma_map_sg(dev
, sgt
->sgl
, sgt
->nents
, dir
);
524 static void spi_unmap_buf(struct spi_master
*master
, struct device
*dev
,
525 struct sg_table
*sgt
, enum dma_data_direction dir
)
527 if (sgt
->orig_nents
) {
528 dma_unmap_sg(dev
, sgt
->sgl
, sgt
->orig_nents
, dir
);
533 static int __spi_map_msg(struct spi_master
*master
, struct spi_message
*msg
)
535 struct device
*tx_dev
, *rx_dev
;
536 struct spi_transfer
*xfer
;
539 if (!master
->can_dma
)
542 tx_dev
= master
->dma_tx
->device
->dev
;
543 rx_dev
= master
->dma_rx
->device
->dev
;
545 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
546 if (!master
->can_dma(master
, msg
->spi
, xfer
))
549 if (xfer
->tx_buf
!= NULL
) {
550 ret
= spi_map_buf(master
, tx_dev
, &xfer
->tx_sg
,
551 (void *)xfer
->tx_buf
, xfer
->len
,
557 if (xfer
->rx_buf
!= NULL
) {
558 ret
= spi_map_buf(master
, rx_dev
, &xfer
->rx_sg
,
559 xfer
->rx_buf
, xfer
->len
,
562 spi_unmap_buf(master
, tx_dev
, &xfer
->tx_sg
,
569 master
->cur_msg_mapped
= true;
574 static int spi_unmap_msg(struct spi_master
*master
, struct spi_message
*msg
)
576 struct spi_transfer
*xfer
;
577 struct device
*tx_dev
, *rx_dev
;
579 if (!master
->cur_msg_mapped
|| !master
->can_dma
)
582 tx_dev
= master
->dma_tx
->device
->dev
;
583 rx_dev
= master
->dma_rx
->device
->dev
;
585 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
586 if (!master
->can_dma(master
, msg
->spi
, xfer
))
589 spi_unmap_buf(master
, rx_dev
, &xfer
->rx_sg
, DMA_FROM_DEVICE
);
590 spi_unmap_buf(master
, tx_dev
, &xfer
->tx_sg
, DMA_TO_DEVICE
);
595 #else /* !CONFIG_HAS_DMA */
596 static inline int __spi_map_msg(struct spi_master
*master
,
597 struct spi_message
*msg
)
602 static inline int spi_unmap_msg(struct spi_master
*master
,
603 struct spi_message
*msg
)
607 #endif /* !CONFIG_HAS_DMA */
609 static int spi_map_msg(struct spi_master
*master
, struct spi_message
*msg
)
611 struct spi_transfer
*xfer
;
613 unsigned int max_tx
, max_rx
;
615 if (master
->flags
& (SPI_MASTER_MUST_RX
| SPI_MASTER_MUST_TX
)) {
619 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
620 if ((master
->flags
& SPI_MASTER_MUST_TX
) &&
622 max_tx
= max(xfer
->len
, max_tx
);
623 if ((master
->flags
& SPI_MASTER_MUST_RX
) &&
625 max_rx
= max(xfer
->len
, max_rx
);
629 tmp
= krealloc(master
->dummy_tx
, max_tx
,
630 GFP_KERNEL
| GFP_DMA
);
633 master
->dummy_tx
= tmp
;
634 memset(tmp
, 0, max_tx
);
638 tmp
= krealloc(master
->dummy_rx
, max_rx
,
639 GFP_KERNEL
| GFP_DMA
);
642 master
->dummy_rx
= tmp
;
645 if (max_tx
|| max_rx
) {
646 list_for_each_entry(xfer
, &msg
->transfers
,
649 xfer
->tx_buf
= master
->dummy_tx
;
651 xfer
->rx_buf
= master
->dummy_rx
;
656 return __spi_map_msg(master
, msg
);
660 * spi_transfer_one_message - Default implementation of transfer_one_message()
662 * This is a standard implementation of transfer_one_message() for
663 * drivers which impelment a transfer_one() operation. It provides
664 * standard handling of delays and chip select management.
666 static int spi_transfer_one_message(struct spi_master
*master
,
667 struct spi_message
*msg
)
669 struct spi_transfer
*xfer
;
670 bool keep_cs
= false;
672 unsigned long ms
= 1;
674 spi_set_cs(msg
->spi
, true);
676 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
677 trace_spi_transfer_start(msg
, xfer
);
679 if (xfer
->tx_buf
|| xfer
->rx_buf
) {
680 reinit_completion(&master
->xfer_completion
);
682 ret
= master
->transfer_one(master
, msg
->spi
, xfer
);
684 dev_err(&msg
->spi
->dev
,
685 "SPI transfer failed: %d\n", ret
);
691 ms
= xfer
->len
* 8 * 1000 / xfer
->speed_hz
;
692 ms
+= ms
+ 100; /* some tolerance */
694 ms
= wait_for_completion_timeout(&master
->xfer_completion
,
695 msecs_to_jiffies(ms
));
699 dev_err(&msg
->spi
->dev
,
700 "SPI transfer timed out\n");
701 msg
->status
= -ETIMEDOUT
;
705 dev_err(&msg
->spi
->dev
,
706 "Bufferless transfer has length %u\n",
710 trace_spi_transfer_stop(msg
, xfer
);
712 if (msg
->status
!= -EINPROGRESS
)
715 if (xfer
->delay_usecs
)
716 udelay(xfer
->delay_usecs
);
718 if (xfer
->cs_change
) {
719 if (list_is_last(&xfer
->transfer_list
,
723 spi_set_cs(msg
->spi
, false);
725 spi_set_cs(msg
->spi
, true);
729 msg
->actual_length
+= xfer
->len
;
733 if (ret
!= 0 || !keep_cs
)
734 spi_set_cs(msg
->spi
, false);
736 if (msg
->status
== -EINPROGRESS
)
739 if (msg
->status
&& master
->handle_err
)
740 master
->handle_err(master
, msg
);
742 spi_finalize_current_message(master
);
748 * spi_finalize_current_transfer - report completion of a transfer
749 * @master: the master reporting completion
751 * Called by SPI drivers using the core transfer_one_message()
752 * implementation to notify it that the current interrupt driven
753 * transfer has finished and the next one may be scheduled.
755 void spi_finalize_current_transfer(struct spi_master
*master
)
757 complete(&master
->xfer_completion
);
759 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer
);
762 * __spi_pump_messages - function which processes spi message queue
763 * @master: master to process queue for
764 * @in_kthread: true if we are in the context of the message pump thread
766 * This function checks if there is any spi message in the queue that
767 * needs processing and if so call out to the driver to initialize hardware
768 * and transfer each message.
770 * Note that it is called both from the kthread itself and also from
771 * inside spi_sync(); the queue extraction handling at the top of the
772 * function should deal with this safely.
774 static void __spi_pump_messages(struct spi_master
*master
, bool in_kthread
)
777 bool was_busy
= false;
781 spin_lock_irqsave(&master
->queue_lock
, flags
);
783 /* Make sure we are not already running a message */
784 if (master
->cur_msg
) {
785 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
789 /* If another context is idling the device then defer */
790 if (master
->idling
) {
791 queue_kthread_work(&master
->kworker
, &master
->pump_messages
);
792 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
796 /* Check if the queue is idle */
797 if (list_empty(&master
->queue
) || !master
->running
) {
799 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
803 /* Only do teardown in the thread */
805 queue_kthread_work(&master
->kworker
,
806 &master
->pump_messages
);
807 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
811 master
->busy
= false;
812 master
->idling
= true;
813 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
815 kfree(master
->dummy_rx
);
816 master
->dummy_rx
= NULL
;
817 kfree(master
->dummy_tx
);
818 master
->dummy_tx
= NULL
;
819 if (master
->unprepare_transfer_hardware
&&
820 master
->unprepare_transfer_hardware(master
))
821 dev_err(&master
->dev
,
822 "failed to unprepare transfer hardware\n");
823 if (master
->auto_runtime_pm
) {
824 pm_runtime_mark_last_busy(master
->dev
.parent
);
825 pm_runtime_put_autosuspend(master
->dev
.parent
);
827 trace_spi_master_idle(master
);
829 spin_lock_irqsave(&master
->queue_lock
, flags
);
830 master
->idling
= false;
831 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
835 /* Extract head of queue */
837 list_first_entry(&master
->queue
, struct spi_message
, queue
);
839 list_del_init(&master
->cur_msg
->queue
);
844 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
846 if (!was_busy
&& master
->auto_runtime_pm
) {
847 ret
= pm_runtime_get_sync(master
->dev
.parent
);
849 dev_err(&master
->dev
, "Failed to power device: %d\n",
856 trace_spi_master_busy(master
);
858 if (!was_busy
&& master
->prepare_transfer_hardware
) {
859 ret
= master
->prepare_transfer_hardware(master
);
861 dev_err(&master
->dev
,
862 "failed to prepare transfer hardware\n");
864 if (master
->auto_runtime_pm
)
865 pm_runtime_put(master
->dev
.parent
);
870 trace_spi_message_start(master
->cur_msg
);
872 if (master
->prepare_message
) {
873 ret
= master
->prepare_message(master
, master
->cur_msg
);
875 dev_err(&master
->dev
,
876 "failed to prepare message: %d\n", ret
);
877 master
->cur_msg
->status
= ret
;
878 spi_finalize_current_message(master
);
881 master
->cur_msg_prepared
= true;
884 ret
= spi_map_msg(master
, master
->cur_msg
);
886 master
->cur_msg
->status
= ret
;
887 spi_finalize_current_message(master
);
891 ret
= master
->transfer_one_message(master
, master
->cur_msg
);
893 dev_err(&master
->dev
,
894 "failed to transfer one message from queue\n");
900 * spi_pump_messages - kthread work function which processes spi message queue
901 * @work: pointer to kthread work struct contained in the master struct
903 static void spi_pump_messages(struct kthread_work
*work
)
905 struct spi_master
*master
=
906 container_of(work
, struct spi_master
, pump_messages
);
908 __spi_pump_messages(master
, true);
911 static int spi_init_queue(struct spi_master
*master
)
913 struct sched_param param
= { .sched_priority
= MAX_RT_PRIO
- 1 };
915 master
->running
= false;
916 master
->busy
= false;
918 init_kthread_worker(&master
->kworker
);
919 master
->kworker_task
= kthread_run(kthread_worker_fn
,
920 &master
->kworker
, "%s",
921 dev_name(&master
->dev
));
922 if (IS_ERR(master
->kworker_task
)) {
923 dev_err(&master
->dev
, "failed to create message pump task\n");
924 return PTR_ERR(master
->kworker_task
);
926 init_kthread_work(&master
->pump_messages
, spi_pump_messages
);
929 * Master config will indicate if this controller should run the
930 * message pump with high (realtime) priority to reduce the transfer
931 * latency on the bus by minimising the delay between a transfer
932 * request and the scheduling of the message pump thread. Without this
933 * setting the message pump thread will remain at default priority.
936 dev_info(&master
->dev
,
937 "will run message pump with realtime priority\n");
938 sched_setscheduler(master
->kworker_task
, SCHED_FIFO
, ¶m
);
945 * spi_get_next_queued_message() - called by driver to check for queued
947 * @master: the master to check for queued messages
949 * If there are more messages in the queue, the next message is returned from
952 struct spi_message
*spi_get_next_queued_message(struct spi_master
*master
)
954 struct spi_message
*next
;
957 /* get a pointer to the next message, if any */
958 spin_lock_irqsave(&master
->queue_lock
, flags
);
959 next
= list_first_entry_or_null(&master
->queue
, struct spi_message
,
961 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
965 EXPORT_SYMBOL_GPL(spi_get_next_queued_message
);
968 * spi_finalize_current_message() - the current message is complete
969 * @master: the master to return the message to
971 * Called by the driver to notify the core that the message in the front of the
972 * queue is complete and can be removed from the queue.
974 void spi_finalize_current_message(struct spi_master
*master
)
976 struct spi_message
*mesg
;
980 spin_lock_irqsave(&master
->queue_lock
, flags
);
981 mesg
= master
->cur_msg
;
982 master
->cur_msg
= NULL
;
984 queue_kthread_work(&master
->kworker
, &master
->pump_messages
);
985 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
987 spi_unmap_msg(master
, mesg
);
989 if (master
->cur_msg_prepared
&& master
->unprepare_message
) {
990 ret
= master
->unprepare_message(master
, mesg
);
992 dev_err(&master
->dev
,
993 "failed to unprepare message: %d\n", ret
);
997 trace_spi_message_done(mesg
);
999 master
->cur_msg_prepared
= false;
1003 mesg
->complete(mesg
->context
);
1005 EXPORT_SYMBOL_GPL(spi_finalize_current_message
);
1007 static int spi_start_queue(struct spi_master
*master
)
1009 unsigned long flags
;
1011 spin_lock_irqsave(&master
->queue_lock
, flags
);
1013 if (master
->running
|| master
->busy
) {
1014 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1018 master
->running
= true;
1019 master
->cur_msg
= NULL
;
1020 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1022 queue_kthread_work(&master
->kworker
, &master
->pump_messages
);
1027 static int spi_stop_queue(struct spi_master
*master
)
1029 unsigned long flags
;
1030 unsigned limit
= 500;
1033 spin_lock_irqsave(&master
->queue_lock
, flags
);
1036 * This is a bit lame, but is optimized for the common execution path.
1037 * A wait_queue on the master->busy could be used, but then the common
1038 * execution path (pump_messages) would be required to call wake_up or
1039 * friends on every SPI message. Do this instead.
1041 while ((!list_empty(&master
->queue
) || master
->busy
) && limit
--) {
1042 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1043 usleep_range(10000, 11000);
1044 spin_lock_irqsave(&master
->queue_lock
, flags
);
1047 if (!list_empty(&master
->queue
) || master
->busy
)
1050 master
->running
= false;
1052 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1055 dev_warn(&master
->dev
,
1056 "could not stop message queue\n");
1062 static int spi_destroy_queue(struct spi_master
*master
)
1066 ret
= spi_stop_queue(master
);
1069 * flush_kthread_worker will block until all work is done.
1070 * If the reason that stop_queue timed out is that the work will never
1071 * finish, then it does no good to call flush/stop thread, so
1075 dev_err(&master
->dev
, "problem destroying queue\n");
1079 flush_kthread_worker(&master
->kworker
);
1080 kthread_stop(master
->kworker_task
);
1085 static int __spi_queued_transfer(struct spi_device
*spi
,
1086 struct spi_message
*msg
,
1089 struct spi_master
*master
= spi
->master
;
1090 unsigned long flags
;
1092 spin_lock_irqsave(&master
->queue_lock
, flags
);
1094 if (!master
->running
) {
1095 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1098 msg
->actual_length
= 0;
1099 msg
->status
= -EINPROGRESS
;
1101 list_add_tail(&msg
->queue
, &master
->queue
);
1102 if (!master
->busy
&& need_pump
)
1103 queue_kthread_work(&master
->kworker
, &master
->pump_messages
);
1105 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1110 * spi_queued_transfer - transfer function for queued transfers
1111 * @spi: spi device which is requesting transfer
1112 * @msg: spi message which is to handled is queued to driver queue
1114 static int spi_queued_transfer(struct spi_device
*spi
, struct spi_message
*msg
)
1116 return __spi_queued_transfer(spi
, msg
, true);
1119 static int spi_master_initialize_queue(struct spi_master
*master
)
1123 master
->transfer
= spi_queued_transfer
;
1124 if (!master
->transfer_one_message
)
1125 master
->transfer_one_message
= spi_transfer_one_message
;
1127 /* Initialize and start queue */
1128 ret
= spi_init_queue(master
);
1130 dev_err(&master
->dev
, "problem initializing queue\n");
1131 goto err_init_queue
;
1133 master
->queued
= true;
1134 ret
= spi_start_queue(master
);
1136 dev_err(&master
->dev
, "problem starting queue\n");
1137 goto err_start_queue
;
1143 spi_destroy_queue(master
);
1148 /*-------------------------------------------------------------------------*/
1150 #if defined(CONFIG_OF)
1151 static struct spi_device
*
1152 of_register_spi_device(struct spi_master
*master
, struct device_node
*nc
)
1154 struct spi_device
*spi
;
1158 /* Alloc an spi_device */
1159 spi
= spi_alloc_device(master
);
1161 dev_err(&master
->dev
, "spi_device alloc error for %s\n",
1167 /* Select device driver */
1168 rc
= of_modalias_node(nc
, spi
->modalias
,
1169 sizeof(spi
->modalias
));
1171 dev_err(&master
->dev
, "cannot find modalias for %s\n",
1176 /* Device address */
1177 rc
= of_property_read_u32(nc
, "reg", &value
);
1179 dev_err(&master
->dev
, "%s has no valid 'reg' property (%d)\n",
1183 spi
->chip_select
= value
;
1185 /* Mode (clock phase/polarity/etc.) */
1186 if (of_find_property(nc
, "spi-cpha", NULL
))
1187 spi
->mode
|= SPI_CPHA
;
1188 if (of_find_property(nc
, "spi-cpol", NULL
))
1189 spi
->mode
|= SPI_CPOL
;
1190 if (of_find_property(nc
, "spi-cs-high", NULL
))
1191 spi
->mode
|= SPI_CS_HIGH
;
1192 if (of_find_property(nc
, "spi-3wire", NULL
))
1193 spi
->mode
|= SPI_3WIRE
;
1194 if (of_find_property(nc
, "spi-lsb-first", NULL
))
1195 spi
->mode
|= SPI_LSB_FIRST
;
1197 /* Device DUAL/QUAD mode */
1198 if (!of_property_read_u32(nc
, "spi-tx-bus-width", &value
)) {
1203 spi
->mode
|= SPI_TX_DUAL
;
1206 spi
->mode
|= SPI_TX_QUAD
;
1209 dev_warn(&master
->dev
,
1210 "spi-tx-bus-width %d not supported\n",
1216 if (!of_property_read_u32(nc
, "spi-rx-bus-width", &value
)) {
1221 spi
->mode
|= SPI_RX_DUAL
;
1224 spi
->mode
|= SPI_RX_QUAD
;
1227 dev_warn(&master
->dev
,
1228 "spi-rx-bus-width %d not supported\n",
1235 rc
= of_property_read_u32(nc
, "spi-max-frequency", &value
);
1237 dev_err(&master
->dev
, "%s has no valid 'spi-max-frequency' property (%d)\n",
1241 spi
->max_speed_hz
= value
;
1244 spi
->irq
= irq_of_parse_and_map(nc
, 0);
1246 /* Store a pointer to the node in the device structure */
1248 spi
->dev
.of_node
= nc
;
1250 /* Register the new device */
1251 rc
= spi_add_device(spi
);
1253 dev_err(&master
->dev
, "spi_device register error %s\n",
1266 * of_register_spi_devices() - Register child devices onto the SPI bus
1267 * @master: Pointer to spi_master device
1269 * Registers an spi_device for each child node of master node which has a 'reg'
1272 static void of_register_spi_devices(struct spi_master
*master
)
1274 struct spi_device
*spi
;
1275 struct device_node
*nc
;
1277 if (!master
->dev
.of_node
)
1280 for_each_available_child_of_node(master
->dev
.of_node
, nc
) {
1281 spi
= of_register_spi_device(master
, nc
);
1283 dev_warn(&master
->dev
, "Failed to create SPI device for %s\n",
1288 static void of_register_spi_devices(struct spi_master
*master
) { }
1292 static int acpi_spi_add_resource(struct acpi_resource
*ares
, void *data
)
1294 struct spi_device
*spi
= data
;
1296 if (ares
->type
== ACPI_RESOURCE_TYPE_SERIAL_BUS
) {
1297 struct acpi_resource_spi_serialbus
*sb
;
1299 sb
= &ares
->data
.spi_serial_bus
;
1300 if (sb
->type
== ACPI_RESOURCE_SERIAL_TYPE_SPI
) {
1301 spi
->chip_select
= sb
->device_selection
;
1302 spi
->max_speed_hz
= sb
->connection_speed
;
1304 if (sb
->clock_phase
== ACPI_SPI_SECOND_PHASE
)
1305 spi
->mode
|= SPI_CPHA
;
1306 if (sb
->clock_polarity
== ACPI_SPI_START_HIGH
)
1307 spi
->mode
|= SPI_CPOL
;
1308 if (sb
->device_polarity
== ACPI_SPI_ACTIVE_HIGH
)
1309 spi
->mode
|= SPI_CS_HIGH
;
1311 } else if (spi
->irq
< 0) {
1314 if (acpi_dev_resource_interrupt(ares
, 0, &r
))
1318 /* Always tell the ACPI core to skip this resource */
1322 static acpi_status
acpi_spi_add_device(acpi_handle handle
, u32 level
,
1323 void *data
, void **return_value
)
1325 struct spi_master
*master
= data
;
1326 struct list_head resource_list
;
1327 struct acpi_device
*adev
;
1328 struct spi_device
*spi
;
1331 if (acpi_bus_get_device(handle
, &adev
))
1333 if (acpi_bus_get_status(adev
) || !adev
->status
.present
)
1336 spi
= spi_alloc_device(master
);
1338 dev_err(&master
->dev
, "failed to allocate SPI device for %s\n",
1339 dev_name(&adev
->dev
));
1340 return AE_NO_MEMORY
;
1343 ACPI_COMPANION_SET(&spi
->dev
, adev
);
1346 INIT_LIST_HEAD(&resource_list
);
1347 ret
= acpi_dev_get_resources(adev
, &resource_list
,
1348 acpi_spi_add_resource
, spi
);
1349 acpi_dev_free_resource_list(&resource_list
);
1351 if (ret
< 0 || !spi
->max_speed_hz
) {
1356 adev
->power
.flags
.ignore_parent
= true;
1357 strlcpy(spi
->modalias
, acpi_device_hid(adev
), sizeof(spi
->modalias
));
1358 if (spi_add_device(spi
)) {
1359 adev
->power
.flags
.ignore_parent
= false;
1360 dev_err(&master
->dev
, "failed to add SPI device %s from ACPI\n",
1361 dev_name(&adev
->dev
));
1368 static void acpi_register_spi_devices(struct spi_master
*master
)
1373 handle
= ACPI_HANDLE(master
->dev
.parent
);
1377 status
= acpi_walk_namespace(ACPI_TYPE_DEVICE
, handle
, 1,
1378 acpi_spi_add_device
, NULL
,
1380 if (ACPI_FAILURE(status
))
1381 dev_warn(&master
->dev
, "failed to enumerate SPI slaves\n");
1384 static inline void acpi_register_spi_devices(struct spi_master
*master
) {}
1385 #endif /* CONFIG_ACPI */
1387 static void spi_master_release(struct device
*dev
)
1389 struct spi_master
*master
;
1391 master
= container_of(dev
, struct spi_master
, dev
);
1395 static struct class spi_master_class
= {
1396 .name
= "spi_master",
1397 .owner
= THIS_MODULE
,
1398 .dev_release
= spi_master_release
,
1404 * spi_alloc_master - allocate SPI master controller
1405 * @dev: the controller, possibly using the platform_bus
1406 * @size: how much zeroed driver-private data to allocate; the pointer to this
1407 * memory is in the driver_data field of the returned device,
1408 * accessible with spi_master_get_devdata().
1409 * Context: can sleep
1411 * This call is used only by SPI master controller drivers, which are the
1412 * only ones directly touching chip registers. It's how they allocate
1413 * an spi_master structure, prior to calling spi_register_master().
1415 * This must be called from context that can sleep. It returns the SPI
1416 * master structure on success, else NULL.
1418 * The caller is responsible for assigning the bus number and initializing
1419 * the master's methods before calling spi_register_master(); and (after errors
1420 * adding the device) calling spi_master_put() and kfree() to prevent a memory
1423 struct spi_master
*spi_alloc_master(struct device
*dev
, unsigned size
)
1425 struct spi_master
*master
;
1430 master
= kzalloc(size
+ sizeof(*master
), GFP_KERNEL
);
1434 device_initialize(&master
->dev
);
1435 master
->bus_num
= -1;
1436 master
->num_chipselect
= 1;
1437 master
->dev
.class = &spi_master_class
;
1438 master
->dev
.parent
= get_device(dev
);
1439 spi_master_set_devdata(master
, &master
[1]);
1443 EXPORT_SYMBOL_GPL(spi_alloc_master
);
1446 static int of_spi_register_master(struct spi_master
*master
)
1449 struct device_node
*np
= master
->dev
.of_node
;
1454 nb
= of_gpio_named_count(np
, "cs-gpios");
1455 master
->num_chipselect
= max_t(int, nb
, master
->num_chipselect
);
1457 /* Return error only for an incorrectly formed cs-gpios property */
1458 if (nb
== 0 || nb
== -ENOENT
)
1463 cs
= devm_kzalloc(&master
->dev
,
1464 sizeof(int) * master
->num_chipselect
,
1466 master
->cs_gpios
= cs
;
1468 if (!master
->cs_gpios
)
1471 for (i
= 0; i
< master
->num_chipselect
; i
++)
1474 for (i
= 0; i
< nb
; i
++)
1475 cs
[i
] = of_get_named_gpio(np
, "cs-gpios", i
);
1480 static int of_spi_register_master(struct spi_master
*master
)
1487 * spi_register_master - register SPI master controller
1488 * @master: initialized master, originally from spi_alloc_master()
1489 * Context: can sleep
1491 * SPI master controllers connect to their drivers using some non-SPI bus,
1492 * such as the platform bus. The final stage of probe() in that code
1493 * includes calling spi_register_master() to hook up to this SPI bus glue.
1495 * SPI controllers use board specific (often SOC specific) bus numbers,
1496 * and board-specific addressing for SPI devices combines those numbers
1497 * with chip select numbers. Since SPI does not directly support dynamic
1498 * device identification, boards need configuration tables telling which
1499 * chip is at which address.
1501 * This must be called from context that can sleep. It returns zero on
1502 * success, else a negative error code (dropping the master's refcount).
1503 * After a successful return, the caller is responsible for calling
1504 * spi_unregister_master().
1506 int spi_register_master(struct spi_master
*master
)
1508 static atomic_t dyn_bus_id
= ATOMIC_INIT((1<<15) - 1);
1509 struct device
*dev
= master
->dev
.parent
;
1510 struct boardinfo
*bi
;
1511 int status
= -ENODEV
;
1517 status
= of_spi_register_master(master
);
1521 /* even if it's just one always-selected device, there must
1522 * be at least one chipselect
1524 if (master
->num_chipselect
== 0)
1527 if ((master
->bus_num
< 0) && master
->dev
.of_node
)
1528 master
->bus_num
= of_alias_get_id(master
->dev
.of_node
, "spi");
1530 /* convention: dynamically assigned bus IDs count down from the max */
1531 if (master
->bus_num
< 0) {
1532 /* FIXME switch to an IDR based scheme, something like
1533 * I2C now uses, so we can't run out of "dynamic" IDs
1535 master
->bus_num
= atomic_dec_return(&dyn_bus_id
);
1539 INIT_LIST_HEAD(&master
->queue
);
1540 spin_lock_init(&master
->queue_lock
);
1541 spin_lock_init(&master
->bus_lock_spinlock
);
1542 mutex_init(&master
->bus_lock_mutex
);
1543 master
->bus_lock_flag
= 0;
1544 init_completion(&master
->xfer_completion
);
1545 if (!master
->max_dma_len
)
1546 master
->max_dma_len
= INT_MAX
;
1548 /* register the device, then userspace will see it.
1549 * registration fails if the bus ID is in use.
1551 dev_set_name(&master
->dev
, "spi%u", master
->bus_num
);
1552 status
= device_add(&master
->dev
);
1555 dev_dbg(dev
, "registered master %s%s\n", dev_name(&master
->dev
),
1556 dynamic
? " (dynamic)" : "");
1558 /* If we're using a queued driver, start the queue */
1559 if (master
->transfer
)
1560 dev_info(dev
, "master is unqueued, this is deprecated\n");
1562 status
= spi_master_initialize_queue(master
);
1564 device_del(&master
->dev
);
1569 mutex_lock(&board_lock
);
1570 list_add_tail(&master
->list
, &spi_master_list
);
1571 list_for_each_entry(bi
, &board_list
, list
)
1572 spi_match_master_to_boardinfo(master
, &bi
->board_info
);
1573 mutex_unlock(&board_lock
);
1575 /* Register devices from the device tree and ACPI */
1576 of_register_spi_devices(master
);
1577 acpi_register_spi_devices(master
);
1581 EXPORT_SYMBOL_GPL(spi_register_master
);
1583 static void devm_spi_unregister(struct device
*dev
, void *res
)
1585 spi_unregister_master(*(struct spi_master
**)res
);
1589 * dev_spi_register_master - register managed SPI master controller
1590 * @dev: device managing SPI master
1591 * @master: initialized master, originally from spi_alloc_master()
1592 * Context: can sleep
1594 * Register a SPI device as with spi_register_master() which will
1595 * automatically be unregister
1597 int devm_spi_register_master(struct device
*dev
, struct spi_master
*master
)
1599 struct spi_master
**ptr
;
1602 ptr
= devres_alloc(devm_spi_unregister
, sizeof(*ptr
), GFP_KERNEL
);
1606 ret
= spi_register_master(master
);
1609 devres_add(dev
, ptr
);
1616 EXPORT_SYMBOL_GPL(devm_spi_register_master
);
1618 static int __unregister(struct device
*dev
, void *null
)
1620 spi_unregister_device(to_spi_device(dev
));
1625 * spi_unregister_master - unregister SPI master controller
1626 * @master: the master being unregistered
1627 * Context: can sleep
1629 * This call is used only by SPI master controller drivers, which are the
1630 * only ones directly touching chip registers.
1632 * This must be called from context that can sleep.
1634 void spi_unregister_master(struct spi_master
*master
)
1638 if (master
->queued
) {
1639 if (spi_destroy_queue(master
))
1640 dev_err(&master
->dev
, "queue remove failed\n");
1643 mutex_lock(&board_lock
);
1644 list_del(&master
->list
);
1645 mutex_unlock(&board_lock
);
1647 dummy
= device_for_each_child(&master
->dev
, NULL
, __unregister
);
1648 device_unregister(&master
->dev
);
1650 EXPORT_SYMBOL_GPL(spi_unregister_master
);
1652 int spi_master_suspend(struct spi_master
*master
)
1656 /* Basically no-ops for non-queued masters */
1657 if (!master
->queued
)
1660 ret
= spi_stop_queue(master
);
1662 dev_err(&master
->dev
, "queue stop failed\n");
1666 EXPORT_SYMBOL_GPL(spi_master_suspend
);
1668 int spi_master_resume(struct spi_master
*master
)
1672 if (!master
->queued
)
1675 ret
= spi_start_queue(master
);
1677 dev_err(&master
->dev
, "queue restart failed\n");
1681 EXPORT_SYMBOL_GPL(spi_master_resume
);
1683 static int __spi_master_match(struct device
*dev
, const void *data
)
1685 struct spi_master
*m
;
1686 const u16
*bus_num
= data
;
1688 m
= container_of(dev
, struct spi_master
, dev
);
1689 return m
->bus_num
== *bus_num
;
1693 * spi_busnum_to_master - look up master associated with bus_num
1694 * @bus_num: the master's bus number
1695 * Context: can sleep
1697 * This call may be used with devices that are registered after
1698 * arch init time. It returns a refcounted pointer to the relevant
1699 * spi_master (which the caller must release), or NULL if there is
1700 * no such master registered.
1702 struct spi_master
*spi_busnum_to_master(u16 bus_num
)
1705 struct spi_master
*master
= NULL
;
1707 dev
= class_find_device(&spi_master_class
, NULL
, &bus_num
,
1708 __spi_master_match
);
1710 master
= container_of(dev
, struct spi_master
, dev
);
1711 /* reference got in class_find_device */
1714 EXPORT_SYMBOL_GPL(spi_busnum_to_master
);
1717 /*-------------------------------------------------------------------------*/
1719 /* Core methods for SPI master protocol drivers. Some of the
1720 * other core methods are currently defined as inline functions.
1724 * spi_setup - setup SPI mode and clock rate
1725 * @spi: the device whose settings are being modified
1726 * Context: can sleep, and no requests are queued to the device
1728 * SPI protocol drivers may need to update the transfer mode if the
1729 * device doesn't work with its default. They may likewise need
1730 * to update clock rates or word sizes from initial values. This function
1731 * changes those settings, and must be called from a context that can sleep.
1732 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
1733 * effect the next time the device is selected and data is transferred to
1734 * or from it. When this function returns, the spi device is deselected.
1736 * Note that this call will fail if the protocol driver specifies an option
1737 * that the underlying controller or its driver does not support. For
1738 * example, not all hardware supports wire transfers using nine bit words,
1739 * LSB-first wire encoding, or active-high chipselects.
1741 int spi_setup(struct spi_device
*spi
)
1743 unsigned bad_bits
, ugly_bits
;
1746 /* check mode to prevent that DUAL and QUAD set at the same time
1748 if (((spi
->mode
& SPI_TX_DUAL
) && (spi
->mode
& SPI_TX_QUAD
)) ||
1749 ((spi
->mode
& SPI_RX_DUAL
) && (spi
->mode
& SPI_RX_QUAD
))) {
1751 "setup: can not select dual and quad at the same time\n");
1754 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
1756 if ((spi
->mode
& SPI_3WIRE
) && (spi
->mode
&
1757 (SPI_TX_DUAL
| SPI_TX_QUAD
| SPI_RX_DUAL
| SPI_RX_QUAD
)))
1759 /* help drivers fail *cleanly* when they need options
1760 * that aren't supported with their current master
1762 bad_bits
= spi
->mode
& ~spi
->master
->mode_bits
;
1763 ugly_bits
= bad_bits
&
1764 (SPI_TX_DUAL
| SPI_TX_QUAD
| SPI_RX_DUAL
| SPI_RX_QUAD
);
1767 "setup: ignoring unsupported mode bits %x\n",
1769 spi
->mode
&= ~ugly_bits
;
1770 bad_bits
&= ~ugly_bits
;
1773 dev_err(&spi
->dev
, "setup: unsupported mode bits %x\n",
1778 if (!spi
->bits_per_word
)
1779 spi
->bits_per_word
= 8;
1781 if (!spi
->max_speed_hz
)
1782 spi
->max_speed_hz
= spi
->master
->max_speed_hz
;
1784 spi_set_cs(spi
, false);
1786 if (spi
->master
->setup
)
1787 status
= spi
->master
->setup(spi
);
1789 dev_dbg(&spi
->dev
, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
1790 (int) (spi
->mode
& (SPI_CPOL
| SPI_CPHA
)),
1791 (spi
->mode
& SPI_CS_HIGH
) ? "cs_high, " : "",
1792 (spi
->mode
& SPI_LSB_FIRST
) ? "lsb, " : "",
1793 (spi
->mode
& SPI_3WIRE
) ? "3wire, " : "",
1794 (spi
->mode
& SPI_LOOP
) ? "loopback, " : "",
1795 spi
->bits_per_word
, spi
->max_speed_hz
,
1800 EXPORT_SYMBOL_GPL(spi_setup
);
1802 static int __spi_validate(struct spi_device
*spi
, struct spi_message
*message
)
1804 struct spi_master
*master
= spi
->master
;
1805 struct spi_transfer
*xfer
;
1808 if (list_empty(&message
->transfers
))
1811 /* Half-duplex links include original MicroWire, and ones with
1812 * only one data pin like SPI_3WIRE (switches direction) or where
1813 * either MOSI or MISO is missing. They can also be caused by
1814 * software limitations.
1816 if ((master
->flags
& SPI_MASTER_HALF_DUPLEX
)
1817 || (spi
->mode
& SPI_3WIRE
)) {
1818 unsigned flags
= master
->flags
;
1820 list_for_each_entry(xfer
, &message
->transfers
, transfer_list
) {
1821 if (xfer
->rx_buf
&& xfer
->tx_buf
)
1823 if ((flags
& SPI_MASTER_NO_TX
) && xfer
->tx_buf
)
1825 if ((flags
& SPI_MASTER_NO_RX
) && xfer
->rx_buf
)
1831 * Set transfer bits_per_word and max speed as spi device default if
1832 * it is not set for this transfer.
1833 * Set transfer tx_nbits and rx_nbits as single transfer default
1834 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
1836 list_for_each_entry(xfer
, &message
->transfers
, transfer_list
) {
1837 message
->frame_length
+= xfer
->len
;
1838 if (!xfer
->bits_per_word
)
1839 xfer
->bits_per_word
= spi
->bits_per_word
;
1841 if (!xfer
->speed_hz
)
1842 xfer
->speed_hz
= spi
->max_speed_hz
;
1844 if (master
->max_speed_hz
&&
1845 xfer
->speed_hz
> master
->max_speed_hz
)
1846 xfer
->speed_hz
= master
->max_speed_hz
;
1848 if (master
->bits_per_word_mask
) {
1849 /* Only 32 bits fit in the mask */
1850 if (xfer
->bits_per_word
> 32)
1852 if (!(master
->bits_per_word_mask
&
1853 BIT(xfer
->bits_per_word
- 1)))
1858 * SPI transfer length should be multiple of SPI word size
1859 * where SPI word size should be power-of-two multiple
1861 if (xfer
->bits_per_word
<= 8)
1863 else if (xfer
->bits_per_word
<= 16)
1868 /* No partial transfers accepted */
1869 if (xfer
->len
% w_size
)
1872 if (xfer
->speed_hz
&& master
->min_speed_hz
&&
1873 xfer
->speed_hz
< master
->min_speed_hz
)
1876 if (xfer
->tx_buf
&& !xfer
->tx_nbits
)
1877 xfer
->tx_nbits
= SPI_NBITS_SINGLE
;
1878 if (xfer
->rx_buf
&& !xfer
->rx_nbits
)
1879 xfer
->rx_nbits
= SPI_NBITS_SINGLE
;
1880 /* check transfer tx/rx_nbits:
1881 * 1. check the value matches one of single, dual and quad
1882 * 2. check tx/rx_nbits match the mode in spi_device
1885 if (xfer
->tx_nbits
!= SPI_NBITS_SINGLE
&&
1886 xfer
->tx_nbits
!= SPI_NBITS_DUAL
&&
1887 xfer
->tx_nbits
!= SPI_NBITS_QUAD
)
1889 if ((xfer
->tx_nbits
== SPI_NBITS_DUAL
) &&
1890 !(spi
->mode
& (SPI_TX_DUAL
| SPI_TX_QUAD
)))
1892 if ((xfer
->tx_nbits
== SPI_NBITS_QUAD
) &&
1893 !(spi
->mode
& SPI_TX_QUAD
))
1896 /* check transfer rx_nbits */
1898 if (xfer
->rx_nbits
!= SPI_NBITS_SINGLE
&&
1899 xfer
->rx_nbits
!= SPI_NBITS_DUAL
&&
1900 xfer
->rx_nbits
!= SPI_NBITS_QUAD
)
1902 if ((xfer
->rx_nbits
== SPI_NBITS_DUAL
) &&
1903 !(spi
->mode
& (SPI_RX_DUAL
| SPI_RX_QUAD
)))
1905 if ((xfer
->rx_nbits
== SPI_NBITS_QUAD
) &&
1906 !(spi
->mode
& SPI_RX_QUAD
))
1911 message
->status
= -EINPROGRESS
;
1916 static int __spi_async(struct spi_device
*spi
, struct spi_message
*message
)
1918 struct spi_master
*master
= spi
->master
;
1922 trace_spi_message_submit(message
);
1924 return master
->transfer(spi
, message
);
1928 * spi_async - asynchronous SPI transfer
1929 * @spi: device with which data will be exchanged
1930 * @message: describes the data transfers, including completion callback
1931 * Context: any (irqs may be blocked, etc)
1933 * This call may be used in_irq and other contexts which can't sleep,
1934 * as well as from task contexts which can sleep.
1936 * The completion callback is invoked in a context which can't sleep.
1937 * Before that invocation, the value of message->status is undefined.
1938 * When the callback is issued, message->status holds either zero (to
1939 * indicate complete success) or a negative error code. After that
1940 * callback returns, the driver which issued the transfer request may
1941 * deallocate the associated memory; it's no longer in use by any SPI
1942 * core or controller driver code.
1944 * Note that although all messages to a spi_device are handled in
1945 * FIFO order, messages may go to different devices in other orders.
1946 * Some device might be higher priority, or have various "hard" access
1947 * time requirements, for example.
1949 * On detection of any fault during the transfer, processing of
1950 * the entire message is aborted, and the device is deselected.
1951 * Until returning from the associated message completion callback,
1952 * no other spi_message queued to that device will be processed.
1953 * (This rule applies equally to all the synchronous transfer calls,
1954 * which are wrappers around this core asynchronous primitive.)
1956 int spi_async(struct spi_device
*spi
, struct spi_message
*message
)
1958 struct spi_master
*master
= spi
->master
;
1960 unsigned long flags
;
1962 ret
= __spi_validate(spi
, message
);
1966 spin_lock_irqsave(&master
->bus_lock_spinlock
, flags
);
1968 if (master
->bus_lock_flag
)
1971 ret
= __spi_async(spi
, message
);
1973 spin_unlock_irqrestore(&master
->bus_lock_spinlock
, flags
);
1977 EXPORT_SYMBOL_GPL(spi_async
);
1980 * spi_async_locked - version of spi_async with exclusive bus usage
1981 * @spi: device with which data will be exchanged
1982 * @message: describes the data transfers, including completion callback
1983 * Context: any (irqs may be blocked, etc)
1985 * This call may be used in_irq and other contexts which can't sleep,
1986 * as well as from task contexts which can sleep.
1988 * The completion callback is invoked in a context which can't sleep.
1989 * Before that invocation, the value of message->status is undefined.
1990 * When the callback is issued, message->status holds either zero (to
1991 * indicate complete success) or a negative error code. After that
1992 * callback returns, the driver which issued the transfer request may
1993 * deallocate the associated memory; it's no longer in use by any SPI
1994 * core or controller driver code.
1996 * Note that although all messages to a spi_device are handled in
1997 * FIFO order, messages may go to different devices in other orders.
1998 * Some device might be higher priority, or have various "hard" access
1999 * time requirements, for example.
2001 * On detection of any fault during the transfer, processing of
2002 * the entire message is aborted, and the device is deselected.
2003 * Until returning from the associated message completion callback,
2004 * no other spi_message queued to that device will be processed.
2005 * (This rule applies equally to all the synchronous transfer calls,
2006 * which are wrappers around this core asynchronous primitive.)
2008 int spi_async_locked(struct spi_device
*spi
, struct spi_message
*message
)
2010 struct spi_master
*master
= spi
->master
;
2012 unsigned long flags
;
2014 ret
= __spi_validate(spi
, message
);
2018 spin_lock_irqsave(&master
->bus_lock_spinlock
, flags
);
2020 ret
= __spi_async(spi
, message
);
2022 spin_unlock_irqrestore(&master
->bus_lock_spinlock
, flags
);
2027 EXPORT_SYMBOL_GPL(spi_async_locked
);
2030 /*-------------------------------------------------------------------------*/
2032 /* Utility methods for SPI master protocol drivers, layered on
2033 * top of the core. Some other utility methods are defined as
2037 static void spi_complete(void *arg
)
2042 static int __spi_sync(struct spi_device
*spi
, struct spi_message
*message
,
2045 DECLARE_COMPLETION_ONSTACK(done
);
2047 struct spi_master
*master
= spi
->master
;
2048 unsigned long flags
;
2050 status
= __spi_validate(spi
, message
);
2054 message
->complete
= spi_complete
;
2055 message
->context
= &done
;
2059 mutex_lock(&master
->bus_lock_mutex
);
2061 /* If we're not using the legacy transfer method then we will
2062 * try to transfer in the calling context so special case.
2063 * This code would be less tricky if we could remove the
2064 * support for driver implemented message queues.
2066 if (master
->transfer
== spi_queued_transfer
) {
2067 spin_lock_irqsave(&master
->bus_lock_spinlock
, flags
);
2069 trace_spi_message_submit(message
);
2071 status
= __spi_queued_transfer(spi
, message
, false);
2073 spin_unlock_irqrestore(&master
->bus_lock_spinlock
, flags
);
2075 status
= spi_async_locked(spi
, message
);
2079 mutex_unlock(&master
->bus_lock_mutex
);
2082 /* Push out the messages in the calling context if we
2085 if (master
->transfer
== spi_queued_transfer
)
2086 __spi_pump_messages(master
, false);
2088 wait_for_completion(&done
);
2089 status
= message
->status
;
2091 message
->context
= NULL
;
2096 * spi_sync - blocking/synchronous SPI data transfers
2097 * @spi: device with which data will be exchanged
2098 * @message: describes the data transfers
2099 * Context: can sleep
2101 * This call may only be used from a context that may sleep. The sleep
2102 * is non-interruptible, and has no timeout. Low-overhead controller
2103 * drivers may DMA directly into and out of the message buffers.
2105 * Note that the SPI device's chip select is active during the message,
2106 * and then is normally disabled between messages. Drivers for some
2107 * frequently-used devices may want to minimize costs of selecting a chip,
2108 * by leaving it selected in anticipation that the next message will go
2109 * to the same chip. (That may increase power usage.)
2111 * Also, the caller is guaranteeing that the memory associated with the
2112 * message will not be freed before this call returns.
2114 * It returns zero on success, else a negative error code.
2116 int spi_sync(struct spi_device
*spi
, struct spi_message
*message
)
2118 return __spi_sync(spi
, message
, 0);
2120 EXPORT_SYMBOL_GPL(spi_sync
);
2123 * spi_sync_locked - version of spi_sync with exclusive bus usage
2124 * @spi: device with which data will be exchanged
2125 * @message: describes the data transfers
2126 * Context: can sleep
2128 * This call may only be used from a context that may sleep. The sleep
2129 * is non-interruptible, and has no timeout. Low-overhead controller
2130 * drivers may DMA directly into and out of the message buffers.
2132 * This call should be used by drivers that require exclusive access to the
2133 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
2134 * be released by a spi_bus_unlock call when the exclusive access is over.
2136 * It returns zero on success, else a negative error code.
2138 int spi_sync_locked(struct spi_device
*spi
, struct spi_message
*message
)
2140 return __spi_sync(spi
, message
, 1);
2142 EXPORT_SYMBOL_GPL(spi_sync_locked
);
2145 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
2146 * @master: SPI bus master that should be locked for exclusive bus access
2147 * Context: can sleep
2149 * This call may only be used from a context that may sleep. The sleep
2150 * is non-interruptible, and has no timeout.
2152 * This call should be used by drivers that require exclusive access to the
2153 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
2154 * exclusive access is over. Data transfer must be done by spi_sync_locked
2155 * and spi_async_locked calls when the SPI bus lock is held.
2157 * It returns zero on success, else a negative error code.
2159 int spi_bus_lock(struct spi_master
*master
)
2161 unsigned long flags
;
2163 mutex_lock(&master
->bus_lock_mutex
);
2165 spin_lock_irqsave(&master
->bus_lock_spinlock
, flags
);
2166 master
->bus_lock_flag
= 1;
2167 spin_unlock_irqrestore(&master
->bus_lock_spinlock
, flags
);
2169 /* mutex remains locked until spi_bus_unlock is called */
2173 EXPORT_SYMBOL_GPL(spi_bus_lock
);
2176 * spi_bus_unlock - release the lock for exclusive SPI bus usage
2177 * @master: SPI bus master that was locked for exclusive bus access
2178 * Context: can sleep
2180 * This call may only be used from a context that may sleep. The sleep
2181 * is non-interruptible, and has no timeout.
2183 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
2186 * It returns zero on success, else a negative error code.
2188 int spi_bus_unlock(struct spi_master
*master
)
2190 master
->bus_lock_flag
= 0;
2192 mutex_unlock(&master
->bus_lock_mutex
);
2196 EXPORT_SYMBOL_GPL(spi_bus_unlock
);
2198 /* portable code must never pass more than 32 bytes */
2199 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
2204 * spi_write_then_read - SPI synchronous write followed by read
2205 * @spi: device with which data will be exchanged
2206 * @txbuf: data to be written (need not be dma-safe)
2207 * @n_tx: size of txbuf, in bytes
2208 * @rxbuf: buffer into which data will be read (need not be dma-safe)
2209 * @n_rx: size of rxbuf, in bytes
2210 * Context: can sleep
2212 * This performs a half duplex MicroWire style transaction with the
2213 * device, sending txbuf and then reading rxbuf. The return value
2214 * is zero for success, else a negative errno status code.
2215 * This call may only be used from a context that may sleep.
2217 * Parameters to this routine are always copied using a small buffer;
2218 * portable code should never use this for more than 32 bytes.
2219 * Performance-sensitive or bulk transfer code should instead use
2220 * spi_{async,sync}() calls with dma-safe buffers.
2222 int spi_write_then_read(struct spi_device
*spi
,
2223 const void *txbuf
, unsigned n_tx
,
2224 void *rxbuf
, unsigned n_rx
)
2226 static DEFINE_MUTEX(lock
);
2229 struct spi_message message
;
2230 struct spi_transfer x
[2];
2233 /* Use preallocated DMA-safe buffer if we can. We can't avoid
2234 * copying here, (as a pure convenience thing), but we can
2235 * keep heap costs out of the hot path unless someone else is
2236 * using the pre-allocated buffer or the transfer is too large.
2238 if ((n_tx
+ n_rx
) > SPI_BUFSIZ
|| !mutex_trylock(&lock
)) {
2239 local_buf
= kmalloc(max((unsigned)SPI_BUFSIZ
, n_tx
+ n_rx
),
2240 GFP_KERNEL
| GFP_DMA
);
2247 spi_message_init(&message
);
2248 memset(x
, 0, sizeof(x
));
2251 spi_message_add_tail(&x
[0], &message
);
2255 spi_message_add_tail(&x
[1], &message
);
2258 memcpy(local_buf
, txbuf
, n_tx
);
2259 x
[0].tx_buf
= local_buf
;
2260 x
[1].rx_buf
= local_buf
+ n_tx
;
2263 status
= spi_sync(spi
, &message
);
2265 memcpy(rxbuf
, x
[1].rx_buf
, n_rx
);
2267 if (x
[0].tx_buf
== buf
)
2268 mutex_unlock(&lock
);
2274 EXPORT_SYMBOL_GPL(spi_write_then_read
);
2276 /*-------------------------------------------------------------------------*/
2278 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
2279 static int __spi_of_device_match(struct device
*dev
, void *data
)
2281 return dev
->of_node
== data
;
2284 /* must call put_device() when done with returned spi_device device */
2285 static struct spi_device
*of_find_spi_device_by_node(struct device_node
*node
)
2287 struct device
*dev
= bus_find_device(&spi_bus_type
, NULL
, node
,
2288 __spi_of_device_match
);
2289 return dev
? to_spi_device(dev
) : NULL
;
2292 static int __spi_of_master_match(struct device
*dev
, const void *data
)
2294 return dev
->of_node
== data
;
2297 /* the spi masters are not using spi_bus, so we find it with another way */
2298 static struct spi_master
*of_find_spi_master_by_node(struct device_node
*node
)
2302 dev
= class_find_device(&spi_master_class
, NULL
, node
,
2303 __spi_of_master_match
);
2307 /* reference got in class_find_device */
2308 return container_of(dev
, struct spi_master
, dev
);
2311 static int of_spi_notify(struct notifier_block
*nb
, unsigned long action
,
2314 struct of_reconfig_data
*rd
= arg
;
2315 struct spi_master
*master
;
2316 struct spi_device
*spi
;
2318 switch (of_reconfig_get_state_change(action
, arg
)) {
2319 case OF_RECONFIG_CHANGE_ADD
:
2320 master
= of_find_spi_master_by_node(rd
->dn
->parent
);
2322 return NOTIFY_OK
; /* not for us */
2324 spi
= of_register_spi_device(master
, rd
->dn
);
2325 put_device(&master
->dev
);
2328 pr_err("%s: failed to create for '%s'\n",
2329 __func__
, rd
->dn
->full_name
);
2330 return notifier_from_errno(PTR_ERR(spi
));
2334 case OF_RECONFIG_CHANGE_REMOVE
:
2335 /* find our device by node */
2336 spi
= of_find_spi_device_by_node(rd
->dn
);
2338 return NOTIFY_OK
; /* no? not meant for us */
2340 /* unregister takes one ref away */
2341 spi_unregister_device(spi
);
2343 /* and put the reference of the find */
2344 put_device(&spi
->dev
);
2351 static struct notifier_block spi_of_notifier
= {
2352 .notifier_call
= of_spi_notify
,
2354 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
2355 extern struct notifier_block spi_of_notifier
;
2356 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
2358 static int __init
spi_init(void)
2362 buf
= kmalloc(SPI_BUFSIZ
, GFP_KERNEL
);
2368 status
= bus_register(&spi_bus_type
);
2372 status
= class_register(&spi_master_class
);
2376 if (IS_ENABLED(CONFIG_OF_DYNAMIC
))
2377 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier
));
2382 bus_unregister(&spi_bus_type
);
2390 /* board_info is normally registered in arch_initcall(),
2391 * but even essential drivers wait till later
2393 * REVISIT only boardinfo really needs static linking. the rest (device and
2394 * driver registration) _could_ be dynamically linked (modular) ... costs
2395 * include needing to have boardinfo data structures be much more public.
2397 postcore_initcall(spi_init
);