Date:2010-08-31 22:06:42 (13 years 6 months ago)
Author:juhosg
Commit:c433567bd3abf153f116b01fec22538a3c9d7fe7
Message:generic: bacport SPI bus locking API

git-svn-id: svn://svn.openwrt.org/openwrt/trunk@22862 3c298f89-4303-0410-b956-a3cf2f4a3e73
Files: target/linux/generic/patches-2.6.32/910-backport-spi-bus-locking-api.patch (1 diff)
target/linux/generic/patches-2.6.32/911-backport-mmc_spi-use-spi-bus-locking-api.patch (1 diff)
target/linux/generic/patches-2.6.33/910-backport-spi-bus-locking-api.patch (1 diff)
target/linux/generic/patches-2.6.33/911-backport-mmc_spi-use-spi-bus-locking-api.patch (1 diff)
target/linux/generic/patches-2.6.34/910-backport-spi-bus-locking-api.patch (1 diff)
target/linux/generic/patches-2.6.34/911-backport-mmc_spi-use-spi-bus-locking-api.patch (1 diff)
target/linux/generic/patches-2.6.35/910-backport-spi-bus-locking-api.patch (1 diff)
target/linux/generic/patches-2.6.35/911-backport-mmc_spi-use-spi-bus-locking-api.patch (1 diff)

Change Details

target/linux/generic/patches-2.6.32/910-backport-spi-bus-locking-api.patch
1From cf32b71e981ca63e8f349d8585ca2a3583b556e0 Mon Sep 17 00:00:00 2001
2From: Ernst Schwab <eschwab@online.de>
3Date: Mon, 28 Jun 2010 17:49:29 -0700
4Subject: [PATCH] spi/mmc_spi: SPI bus locking API, using mutex
5
6SPI bus locking API to allow exclusive access to the SPI bus, especially, but
7not limited to, for the mmc_spi driver.
8
9Coded according to an outline from Grant Likely; here is his
10specification (accidentally swapped function names corrected):
11
12It requires 3 things to be added to struct spi_master.
13- 1 Mutex
14- 1 spin lock
15- 1 flag.
16
17The mutex protects spi_sync, and provides sleeping "for free"
18The spinlock protects the atomic spi_async call.
19The flag is set when the lock is obtained, and checked while holding
20the spinlock in spi_async(). If the flag is checked, then spi_async()
21must fail immediately.
22
23The current runtime API looks like this:
24spi_async(struct spi_device*, struct spi_message*);
25spi_sync(struct spi_device*, struct spi_message*);
26
27The API needs to be extended to this:
28spi_async(struct spi_device*, struct spi_message*)
29spi_sync(struct spi_device*, struct spi_message*)
30spi_bus_lock(struct spi_master*) /* although struct spi_device* might
31be easier */
32spi_bus_unlock(struct spi_master*)
33spi_async_locked(struct spi_device*, struct spi_message*)
34spi_sync_locked(struct spi_device*, struct spi_message*)
35
36Drivers can only call the last two if they already hold the spi_master_lock().
37
38spi_bus_lock() obtains the mutex, obtains the spin lock, sets the
39flag, and releases the spin lock before returning. It doesn't even
40need to sleep while waiting for "in-flight" spi_transactions to
41complete because its purpose is to guarantee no additional
42transactions are added. It does not guarantee that the bus is idle.
43
44spi_bus_unlock() clears the flag and releases the mutex, which will
45wake up any waiters.
46
47The difference between spi_async() and spi_async_locked() is that the
48locked version bypasses the check of the lock flag. Both versions
49need to obtain the spinlock.
50
51The difference between spi_sync() and spi_sync_locked() is that
52spi_sync() must hold the mutex while enqueuing a new transfer.
53spi_sync_locked() doesn't because the mutex is already held. Note
54however that spi_sync must *not* continue to hold the mutex while
55waiting for the transfer to complete, otherwise only one transfer
56could be queued up at a time!
57
58Almost no code needs to be written. The current spi_async() and
59spi_sync() can probably be renamed to __spi_async() and __spi_sync()
60so that spi_async(), spi_sync(), spi_async_locked() and
61spi_sync_locked() can just become wrappers around the common code.
62
63spi_sync() is protected by a mutex because it can sleep
64spi_async() needs to be protected with a flag and a spinlock because
65it can be called atomically and must not sleep
66
67Signed-off-by: Ernst Schwab <eschwab@online.de>
68[grant.likely@secretlab.ca: use spin_lock_irqsave()]
69Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
70Tested-by: Matt Fleming <matt@console-pimps.org>
71Tested-by: Antonio Ospite <ospite@studenti.unina.it>
72---
73 drivers/spi/spi.c | 225 ++++++++++++++++++++++++++++++++++++++++-------
74 include/linux/spi/spi.h | 12 +++
75 2 files changed, 204 insertions(+), 33 deletions(-)
76
77--- a/drivers/spi/spi.c
78@@ -524,6 +524,10 @@ int spi_register_master(struct spi_maste
79         dynamic = 1;
80     }
81
82+ spin_lock_init(&master->bus_lock_spinlock);
83+ mutex_init(&master->bus_lock_mutex);
84+ master->bus_lock_flag = 0;
85+
86     /* register the device, then userspace will see it.
87      * registration fails if the bus ID is in use.
88      */
89@@ -663,6 +667,35 @@ int spi_setup(struct spi_device *spi)
90 }
91 EXPORT_SYMBOL_GPL(spi_setup);
92
93+static int __spi_async(struct spi_device *spi, struct spi_message *message)
94+{
95+ struct spi_master *master = spi->master;
96+
97+ /* Half-duplex links include original MicroWire, and ones with
98+ * only one data pin like SPI_3WIRE (switches direction) or where
99+ * either MOSI or MISO is missing. They can also be caused by
100+ * software limitations.
101+ */
102+ if ((master->flags & SPI_MASTER_HALF_DUPLEX)
103+ || (spi->mode & SPI_3WIRE)) {
104+ struct spi_transfer *xfer;
105+ unsigned flags = master->flags;
106+
107+ list_for_each_entry(xfer, &message->transfers, transfer_list) {
108+ if (xfer->rx_buf && xfer->tx_buf)
109+ return -EINVAL;
110+ if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
111+ return -EINVAL;
112+ if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
113+ return -EINVAL;
114+ }
115+ }
116+
117+ message->spi = spi;
118+ message->status = -EINPROGRESS;
119+ return master->transfer(spi, message);
120+}
121+
122 /**
123  * spi_async - asynchronous SPI transfer
124  * @spi: device with which data will be exchanged
125@@ -695,33 +728,68 @@ EXPORT_SYMBOL_GPL(spi_setup);
126 int spi_async(struct spi_device *spi, struct spi_message *message)
127 {
128     struct spi_master *master = spi->master;
129+ int ret;
130+ unsigned long flags;
131
132- /* Half-duplex links include original MicroWire, and ones with
133- * only one data pin like SPI_3WIRE (switches direction) or where
134- * either MOSI or MISO is missing. They can also be caused by
135- * software limitations.
136- */
137- if ((master->flags & SPI_MASTER_HALF_DUPLEX)
138- || (spi->mode & SPI_3WIRE)) {
139- struct spi_transfer *xfer;
140- unsigned flags = master->flags;
141+ spin_lock_irqsave(&master->bus_lock_spinlock, flags);
142
143- list_for_each_entry(xfer, &message->transfers, transfer_list) {
144- if (xfer->rx_buf && xfer->tx_buf)
145- return -EINVAL;
146- if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
147- return -EINVAL;
148- if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
149- return -EINVAL;
150- }
151- }
152+ if (master->bus_lock_flag)
153+ ret = -EBUSY;
154+ else
155+ ret = __spi_async(spi, message);
156
157- message->spi = spi;
158- message->status = -EINPROGRESS;
159- return master->transfer(spi, message);
160+ spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
161+
162+ return ret;
163 }
164 EXPORT_SYMBOL_GPL(spi_async);
165
166+/**
167+ * spi_async_locked - version of spi_async with exclusive bus usage
168+ * @spi: device with which data will be exchanged
169+ * @message: describes the data transfers, including completion callback
170+ * Context: any (irqs may be blocked, etc)
171+ *
172+ * This call may be used in_irq and other contexts which can't sleep,
173+ * as well as from task contexts which can sleep.
174+ *
175+ * The completion callback is invoked in a context which can't sleep.
176+ * Before that invocation, the value of message->status is undefined.
177+ * When the callback is issued, message->status holds either zero (to
178+ * indicate complete success) or a negative error code. After that
179+ * callback returns, the driver which issued the transfer request may
180+ * deallocate the associated memory; it's no longer in use by any SPI
181+ * core or controller driver code.
182+ *
183+ * Note that although all messages to a spi_device are handled in
184+ * FIFO order, messages may go to different devices in other orders.
185+ * Some device might be higher priority, or have various "hard" access
186+ * time requirements, for example.
187+ *
188+ * On detection of any fault during the transfer, processing of
189+ * the entire message is aborted, and the device is deselected.
190+ * Until returning from the associated message completion callback,
191+ * no other spi_message queued to that device will be processed.
192+ * (This rule applies equally to all the synchronous transfer calls,
193+ * which are wrappers around this core asynchronous primitive.)
194+ */
195+int spi_async_locked(struct spi_device *spi, struct spi_message *message)
196+{
197+ struct spi_master *master = spi->master;
198+ int ret;
199+ unsigned long flags;
200+
201+ spin_lock_irqsave(&master->bus_lock_spinlock, flags);
202+
203+ ret = __spi_async(spi, message);
204+
205+ spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
206+
207+ return ret;
208+
209+}
210+EXPORT_SYMBOL_GPL(spi_async_locked);
211+
212
213 /*-------------------------------------------------------------------------*/
214
215@@ -735,6 +803,32 @@ static void spi_complete(void *arg)
216     complete(arg);
217 }
218
219+static int __spi_sync(struct spi_device *spi, struct spi_message *message,
220+ int bus_locked)
221+{
222+ DECLARE_COMPLETION_ONSTACK(done);
223+ int status;
224+ struct spi_master *master = spi->master;
225+
226+ message->complete = spi_complete;
227+ message->context = &done;
228+
229+ if (!bus_locked)
230+ mutex_lock(&master->bus_lock_mutex);
231+
232+ status = spi_async_locked(spi, message);
233+
234+ if (!bus_locked)
235+ mutex_unlock(&master->bus_lock_mutex);
236+
237+ if (status == 0) {
238+ wait_for_completion(&done);
239+ status = message->status;
240+ }
241+ message->context = NULL;
242+ return status;
243+}
244+
245 /**
246  * spi_sync - blocking/synchronous SPI data transfers
247  * @spi: device with which data will be exchanged
248@@ -758,21 +852,86 @@ static void spi_complete(void *arg)
249  */
250 int spi_sync(struct spi_device *spi, struct spi_message *message)
251 {
252- DECLARE_COMPLETION_ONSTACK(done);
253- int status;
254-
255- message->complete = spi_complete;
256- message->context = &done;
257- status = spi_async(spi, message);
258- if (status == 0) {
259- wait_for_completion(&done);
260- status = message->status;
261- }
262- message->context = NULL;
263- return status;
264+ return __spi_sync(spi, message, 0);
265 }
266 EXPORT_SYMBOL_GPL(spi_sync);
267
268+/**
269+ * spi_sync_locked - version of spi_sync with exclusive bus usage
270+ * @spi: device with which data will be exchanged
271+ * @message: describes the data transfers
272+ * Context: can sleep
273+ *
274+ * This call may only be used from a context that may sleep. The sleep
275+ * is non-interruptible, and has no timeout. Low-overhead controller
276+ * drivers may DMA directly into and out of the message buffers.
277+ *
278+ * This call should be used by drivers that require exclusive access to the
279+ * SPI bus. It has to be preceeded by a spi_bus_lock call. The SPI bus must
280+ * be released by a spi_bus_unlock call when the exclusive access is over.
281+ *
282+ * It returns zero on success, else a negative error code.
283+ */
284+int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
285+{
286+ return __spi_sync(spi, message, 1);
287+}
288+EXPORT_SYMBOL_GPL(spi_sync_locked);
289+
290+/**
291+ * spi_bus_lock - obtain a lock for exclusive SPI bus usage
292+ * @master: SPI bus master that should be locked for exclusive bus access
293+ * Context: can sleep
294+ *
295+ * This call may only be used from a context that may sleep. The sleep
296+ * is non-interruptible, and has no timeout.
297+ *
298+ * This call should be used by drivers that require exclusive access to the
299+ * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
300+ * exclusive access is over. Data transfer must be done by spi_sync_locked
301+ * and spi_async_locked calls when the SPI bus lock is held.
302+ *
303+ * It returns zero on success, else a negative error code.
304+ */
305+int spi_bus_lock(struct spi_master *master)
306+{
307+ unsigned long flags;
308+
309+ mutex_lock(&master->bus_lock_mutex);
310+
311+ spin_lock_irqsave(&master->bus_lock_spinlock, flags);
312+ master->bus_lock_flag = 1;
313+ spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
314+
315+ /* mutex remains locked until spi_bus_unlock is called */
316+
317+ return 0;
318+}
319+EXPORT_SYMBOL_GPL(spi_bus_lock);
320+
321+/**
322+ * spi_bus_unlock - release the lock for exclusive SPI bus usage
323+ * @master: SPI bus master that was locked for exclusive bus access
324+ * Context: can sleep
325+ *
326+ * This call may only be used from a context that may sleep. The sleep
327+ * is non-interruptible, and has no timeout.
328+ *
329+ * This call releases an SPI bus lock previously obtained by an spi_bus_lock
330+ * call.
331+ *
332+ * It returns zero on success, else a negative error code.
333+ */
334+int spi_bus_unlock(struct spi_master *master)
335+{
336+ master->bus_lock_flag = 0;
337+
338+ mutex_unlock(&master->bus_lock_mutex);
339+
340+ return 0;
341+}
342+EXPORT_SYMBOL_GPL(spi_bus_unlock);
343+
344 /* portable code must never pass more than 32 bytes */
345 #define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
346
347--- a/include/linux/spi/spi.h
348@@ -261,6 +261,13 @@ struct spi_master {
349 #define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */
350 #define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */
351
352+ /* lock and mutex for SPI bus locking */
353+ spinlock_t bus_lock_spinlock;
354+ struct mutex bus_lock_mutex;
355+
356+ /* flag indicating that the SPI bus is locked for exclusive use */
357+ bool bus_lock_flag;
358+
359     /* Setup mode and clock, etc (spi driver may call many times).
360      *
361      * IMPORTANT: this may be called when transfers to another
362@@ -541,6 +548,8 @@ static inline void spi_message_free(stru
363
364 extern int spi_setup(struct spi_device *spi);
365 extern int spi_async(struct spi_device *spi, struct spi_message *message);
366+extern int spi_async_locked(struct spi_device *spi,
367+ struct spi_message *message);
368
369 /*---------------------------------------------------------------------------*/
370
371@@ -550,6 +559,9 @@ extern int spi_async(struct spi_device *
372  */
373
374 extern int spi_sync(struct spi_device *spi, struct spi_message *message);
375+extern int spi_sync_locked(struct spi_device *spi, struct spi_message *message);
376+extern int spi_bus_lock(struct spi_master *master);
377+extern int spi_bus_unlock(struct spi_master *master);
378
379 /**
380  * spi_write - SPI synchronous write
target/linux/generic/patches-2.6.32/911-backport-mmc_spi-use-spi-bus-locking-api.patch
1From 4751c1c74bc7b596db5de0c93be1a22a570145c0 Mon Sep 17 00:00:00 2001
2From: Ernst Schwab <eschwab@online.de>
3Date: Thu, 18 Feb 2010 12:47:46 +0100
4Subject: [PATCH] spi/mmc_spi: mmc_spi adaptations for SPI bus locking API
5
6Modification of the mmc_spi driver to use the SPI bus locking API.
7With this, the mmc_spi driver can be used together with other SPI
8devices on the same SPI bus. The exclusive access to the SPI bus is
9now managed in the SPI layer. The counting of chip selects in the probe
10function is no longer needed.
11
12Signed-off-by: Ernst Schwab <eschwab@online.de>
13Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
14Tested-by: Matt Fleming <matt@console-pimps.org>
15Tested-by: Antonio Ospite <ospite@studenti.unina.it>
16---
17 drivers/mmc/host/mmc_spi.c | 59 ++++++++-----------------------------------
18 1 files changed, 11 insertions(+), 48 deletions(-)
19
20--- a/drivers/mmc/host/mmc_spi.c
21@@ -181,7 +181,7 @@ mmc_spi_readbytes(struct mmc_spi_host *h
22                 host->data_dma, sizeof(*host->data),
23                 DMA_FROM_DEVICE);
24
25- status = spi_sync(host->spi, &host->readback);
26+ status = spi_sync_locked(host->spi, &host->readback);
27
28     if (host->dma_dev)
29         dma_sync_single_for_cpu(host->dma_dev,
30@@ -540,7 +540,7 @@ mmc_spi_command_send(struct mmc_spi_host
31                 host->data_dma, sizeof(*host->data),
32                 DMA_BIDIRECTIONAL);
33     }
34- status = spi_sync(host->spi, &host->m);
35+ status = spi_sync_locked(host->spi, &host->m);
36
37     if (host->dma_dev)
38         dma_sync_single_for_cpu(host->dma_dev,
39@@ -684,7 +684,7 @@ mmc_spi_writeblock(struct mmc_spi_host *
40                 host->data_dma, sizeof(*scratch),
41                 DMA_BIDIRECTIONAL);
42
43- status = spi_sync(spi, &host->m);
44+ status = spi_sync_locked(spi, &host->m);
45
46     if (status != 0) {
47         dev_dbg(&spi->dev, "write error (%d)\n", status);
48@@ -821,7 +821,7 @@ mmc_spi_readblock(struct mmc_spi_host *h
49                 DMA_FROM_DEVICE);
50     }
51
52- status = spi_sync(spi, &host->m);
53+ status = spi_sync_locked(spi, &host->m);
54
55     if (host->dma_dev) {
56         dma_sync_single_for_cpu(host->dma_dev,
57@@ -1017,7 +1017,7 @@ mmc_spi_data_do(struct mmc_spi_host *hos
58                     host->data_dma, sizeof(*scratch),
59                     DMA_BIDIRECTIONAL);
60
61- tmp = spi_sync(spi, &host->m);
62+ tmp = spi_sync_locked(spi, &host->m);
63
64         if (host->dma_dev)
65             dma_sync_single_for_cpu(host->dma_dev,
66@@ -1083,6 +1083,9 @@ static void mmc_spi_request(struct mmc_h
67     }
68 #endif
69
70+ /* request exclusive bus access */
71+ spi_bus_lock(host->spi->master);
72+
73     /* issue command; then optionally data and stop */
74     status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL);
75     if (status == 0 && mrq->data) {
76@@ -1093,6 +1096,9 @@ static void mmc_spi_request(struct mmc_h
77             mmc_cs_off(host);
78     }
79
80+ /* release the bus */
81+ spi_bus_unlock(host->spi->master);
82+
83     mmc_request_done(host->mmc, mrq);
84 }
85
86@@ -1289,23 +1295,6 @@ mmc_spi_detect_irq(int irq, void *mmc)
87     return IRQ_HANDLED;
88 }
89
90-struct count_children {
91- unsigned n;
92- struct bus_type *bus;
93-};
94-
95-static int maybe_count_child(struct device *dev, void *c)
96-{
97- struct count_children *ccp = c;
98-
99- if (dev->bus == ccp->bus) {
100- if (ccp->n)
101- return -EBUSY;
102- ccp->n++;
103- }
104- return 0;
105-}
106-
107 static int mmc_spi_probe(struct spi_device *spi)
108 {
109     void *ones;
110@@ -1337,32 +1326,6 @@ static int mmc_spi_probe(struct spi_devi
111         return status;
112     }
113
114- /* We can use the bus safely iff nobody else will interfere with us.
115- * Most commands consist of one SPI message to issue a command, then
116- * several more to collect its response, then possibly more for data
117- * transfer. Clocking access to other devices during that period will
118- * corrupt the command execution.
119- *
120- * Until we have software primitives which guarantee non-interference,
121- * we'll aim for a hardware-level guarantee.
122- *
123- * REVISIT we can't guarantee another device won't be added later...
124- */
125- if (spi->master->num_chipselect > 1) {
126- struct count_children cc;
127-
128- cc.n = 0;
129- cc.bus = spi->dev.bus;
130- status = device_for_each_child(spi->dev.parent, &cc,
131- maybe_count_child);
132- if (status < 0) {
133- dev_err(&spi->dev, "can't share SPI bus\n");
134- return status;
135- }
136-
137- dev_warn(&spi->dev, "ASSUMING SPI bus stays unshared!\n");
138- }
139-
140     /* We need a supply of ones to transmit. This is the only time
141      * the CPU touches these, so cache coherency isn't a concern.
142      *
target/linux/generic/patches-2.6.33/910-backport-spi-bus-locking-api.patch
1From cf32b71e981ca63e8f349d8585ca2a3583b556e0 Mon Sep 17 00:00:00 2001
2From: Ernst Schwab <eschwab@online.de>
3Date: Mon, 28 Jun 2010 17:49:29 -0700
4Subject: [PATCH] spi/mmc_spi: SPI bus locking API, using mutex
5
6SPI bus locking API to allow exclusive access to the SPI bus, especially, but
7not limited to, for the mmc_spi driver.
8
9Coded according to an outline from Grant Likely; here is his
10specification (accidentally swapped function names corrected):
11
12It requires 3 things to be added to struct spi_master.
13- 1 Mutex
14- 1 spin lock
15- 1 flag.
16
17The mutex protects spi_sync, and provides sleeping "for free"
18The spinlock protects the atomic spi_async call.
19The flag is set when the lock is obtained, and checked while holding
20the spinlock in spi_async(). If the flag is checked, then spi_async()
21must fail immediately.
22
23The current runtime API looks like this:
24spi_async(struct spi_device*, struct spi_message*);
25spi_sync(struct spi_device*, struct spi_message*);
26
27The API needs to be extended to this:
28spi_async(struct spi_device*, struct spi_message*)
29spi_sync(struct spi_device*, struct spi_message*)
30spi_bus_lock(struct spi_master*) /* although struct spi_device* might
31be easier */
32spi_bus_unlock(struct spi_master*)
33spi_async_locked(struct spi_device*, struct spi_message*)
34spi_sync_locked(struct spi_device*, struct spi_message*)
35
36Drivers can only call the last two if they already hold the spi_master_lock().
37
38spi_bus_lock() obtains the mutex, obtains the spin lock, sets the
39flag, and releases the spin lock before returning. It doesn't even
40need to sleep while waiting for "in-flight" spi_transactions to
41complete because its purpose is to guarantee no additional
42transactions are added. It does not guarantee that the bus is idle.
43
44spi_bus_unlock() clears the flag and releases the mutex, which will
45wake up any waiters.
46
47The difference between spi_async() and spi_async_locked() is that the
48locked version bypasses the check of the lock flag. Both versions
49need to obtain the spinlock.
50
51The difference between spi_sync() and spi_sync_locked() is that
52spi_sync() must hold the mutex while enqueuing a new transfer.
53spi_sync_locked() doesn't because the mutex is already held. Note
54however that spi_sync must *not* continue to hold the mutex while
55waiting for the transfer to complete, otherwise only one transfer
56could be queued up at a time!
57
58Almost no code needs to be written. The current spi_async() and
59spi_sync() can probably be renamed to __spi_async() and __spi_sync()
60so that spi_async(), spi_sync(), spi_async_locked() and
61spi_sync_locked() can just become wrappers around the common code.
62
63spi_sync() is protected by a mutex because it can sleep
64spi_async() needs to be protected with a flag and a spinlock because
65it can be called atomically and must not sleep
66
67Signed-off-by: Ernst Schwab <eschwab@online.de>
68[grant.likely@secretlab.ca: use spin_lock_irqsave()]
69Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
70Tested-by: Matt Fleming <matt@console-pimps.org>
71Tested-by: Antonio Ospite <ospite@studenti.unina.it>
72---
73 drivers/spi/spi.c | 225 ++++++++++++++++++++++++++++++++++++++++-------
74 include/linux/spi/spi.h | 12 +++
75 2 files changed, 204 insertions(+), 33 deletions(-)
76
77--- a/drivers/spi/spi.c
78@@ -524,6 +524,10 @@ int spi_register_master(struct spi_maste
79         dynamic = 1;
80     }
81
82+ spin_lock_init(&master->bus_lock_spinlock);
83+ mutex_init(&master->bus_lock_mutex);
84+ master->bus_lock_flag = 0;
85+
86     /* register the device, then userspace will see it.
87      * registration fails if the bus ID is in use.
88      */
89@@ -663,6 +667,35 @@ int spi_setup(struct spi_device *spi)
90 }
91 EXPORT_SYMBOL_GPL(spi_setup);
92
93+static int __spi_async(struct spi_device *spi, struct spi_message *message)
94+{
95+ struct spi_master *master = spi->master;
96+
97+ /* Half-duplex links include original MicroWire, and ones with
98+ * only one data pin like SPI_3WIRE (switches direction) or where
99+ * either MOSI or MISO is missing. They can also be caused by
100+ * software limitations.
101+ */
102+ if ((master->flags & SPI_MASTER_HALF_DUPLEX)
103+ || (spi->mode & SPI_3WIRE)) {
104+ struct spi_transfer *xfer;
105+ unsigned flags = master->flags;
106+
107+ list_for_each_entry(xfer, &message->transfers, transfer_list) {
108+ if (xfer->rx_buf && xfer->tx_buf)
109+ return -EINVAL;
110+ if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
111+ return -EINVAL;
112+ if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
113+ return -EINVAL;
114+ }
115+ }
116+
117+ message->spi = spi;
118+ message->status = -EINPROGRESS;
119+ return master->transfer(spi, message);
120+}
121+
122 /**
123  * spi_async - asynchronous SPI transfer
124  * @spi: device with which data will be exchanged
125@@ -695,33 +728,68 @@ EXPORT_SYMBOL_GPL(spi_setup);
126 int spi_async(struct spi_device *spi, struct spi_message *message)
127 {
128     struct spi_master *master = spi->master;
129+ int ret;
130+ unsigned long flags;
131
132- /* Half-duplex links include original MicroWire, and ones with
133- * only one data pin like SPI_3WIRE (switches direction) or where
134- * either MOSI or MISO is missing. They can also be caused by
135- * software limitations.
136- */
137- if ((master->flags & SPI_MASTER_HALF_DUPLEX)
138- || (spi->mode & SPI_3WIRE)) {
139- struct spi_transfer *xfer;
140- unsigned flags = master->flags;
141+ spin_lock_irqsave(&master->bus_lock_spinlock, flags);
142
143- list_for_each_entry(xfer, &message->transfers, transfer_list) {
144- if (xfer->rx_buf && xfer->tx_buf)
145- return -EINVAL;
146- if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
147- return -EINVAL;
148- if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
149- return -EINVAL;
150- }
151- }
152+ if (master->bus_lock_flag)
153+ ret = -EBUSY;
154+ else
155+ ret = __spi_async(spi, message);
156
157- message->spi = spi;
158- message->status = -EINPROGRESS;
159- return master->transfer(spi, message);
160+ spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
161+
162+ return ret;
163 }
164 EXPORT_SYMBOL_GPL(spi_async);
165
166+/**
167+ * spi_async_locked - version of spi_async with exclusive bus usage
168+ * @spi: device with which data will be exchanged
169+ * @message: describes the data transfers, including completion callback
170+ * Context: any (irqs may be blocked, etc)
171+ *
172+ * This call may be used in_irq and other contexts which can't sleep,
173+ * as well as from task contexts which can sleep.
174+ *
175+ * The completion callback is invoked in a context which can't sleep.
176+ * Before that invocation, the value of message->status is undefined.
177+ * When the callback is issued, message->status holds either zero (to
178+ * indicate complete success) or a negative error code. After that
179+ * callback returns, the driver which issued the transfer request may
180+ * deallocate the associated memory; it's no longer in use by any SPI
181+ * core or controller driver code.
182+ *
183+ * Note that although all messages to a spi_device are handled in
184+ * FIFO order, messages may go to different devices in other orders.
185+ * Some device might be higher priority, or have various "hard" access
186+ * time requirements, for example.
187+ *
188+ * On detection of any fault during the transfer, processing of
189+ * the entire message is aborted, and the device is deselected.
190+ * Until returning from the associated message completion callback,
191+ * no other spi_message queued to that device will be processed.
192+ * (This rule applies equally to all the synchronous transfer calls,
193+ * which are wrappers around this core asynchronous primitive.)
194+ */
195+int spi_async_locked(struct spi_device *spi, struct spi_message *message)
196+{
197+ struct spi_master *master = spi->master;
198+ int ret;
199+ unsigned long flags;
200+
201+ spin_lock_irqsave(&master->bus_lock_spinlock, flags);
202+
203+ ret = __spi_async(spi, message);
204+
205+ spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
206+
207+ return ret;
208+
209+}
210+EXPORT_SYMBOL_GPL(spi_async_locked);
211+
212
213 /*-------------------------------------------------------------------------*/
214
215@@ -735,6 +803,32 @@ static void spi_complete(void *arg)
216     complete(arg);
217 }
218
219+static int __spi_sync(struct spi_device *spi, struct spi_message *message,
220+ int bus_locked)
221+{
222+ DECLARE_COMPLETION_ONSTACK(done);
223+ int status;
224+ struct spi_master *master = spi->master;
225+
226+ message->complete = spi_complete;
227+ message->context = &done;
228+
229+ if (!bus_locked)
230+ mutex_lock(&master->bus_lock_mutex);
231+
232+ status = spi_async_locked(spi, message);
233+
234+ if (!bus_locked)
235+ mutex_unlock(&master->bus_lock_mutex);
236+
237+ if (status == 0) {
238+ wait_for_completion(&done);
239+ status = message->status;
240+ }
241+ message->context = NULL;
242+ return status;
243+}
244+
245 /**
246  * spi_sync - blocking/synchronous SPI data transfers
247  * @spi: device with which data will be exchanged
248@@ -758,21 +852,86 @@ static void spi_complete(void *arg)
249  */
250 int spi_sync(struct spi_device *spi, struct spi_message *message)
251 {
252- DECLARE_COMPLETION_ONSTACK(done);
253- int status;
254-
255- message->complete = spi_complete;
256- message->context = &done;
257- status = spi_async(spi, message);
258- if (status == 0) {
259- wait_for_completion(&done);
260- status = message->status;
261- }
262- message->context = NULL;
263- return status;
264+ return __spi_sync(spi, message, 0);
265 }
266 EXPORT_SYMBOL_GPL(spi_sync);
267
268+/**
269+ * spi_sync_locked - version of spi_sync with exclusive bus usage
270+ * @spi: device with which data will be exchanged
271+ * @message: describes the data transfers
272+ * Context: can sleep
273+ *
274+ * This call may only be used from a context that may sleep. The sleep
275+ * is non-interruptible, and has no timeout. Low-overhead controller
276+ * drivers may DMA directly into and out of the message buffers.
277+ *
278+ * This call should be used by drivers that require exclusive access to the
279+ * SPI bus. It has to be preceeded by a spi_bus_lock call. The SPI bus must
280+ * be released by a spi_bus_unlock call when the exclusive access is over.
281+ *
282+ * It returns zero on success, else a negative error code.
283+ */
284+int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
285+{
286+ return __spi_sync(spi, message, 1);
287+}
288+EXPORT_SYMBOL_GPL(spi_sync_locked);
289+
290+/**
291+ * spi_bus_lock - obtain a lock for exclusive SPI bus usage
292+ * @master: SPI bus master that should be locked for exclusive bus access
293+ * Context: can sleep
294+ *
295+ * This call may only be used from a context that may sleep. The sleep
296+ * is non-interruptible, and has no timeout.
297+ *
298+ * This call should be used by drivers that require exclusive access to the
299+ * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
300+ * exclusive access is over. Data transfer must be done by spi_sync_locked
301+ * and spi_async_locked calls when the SPI bus lock is held.
302+ *
303+ * It returns zero on success, else a negative error code.
304+ */
305+int spi_bus_lock(struct spi_master *master)
306+{
307+ unsigned long flags;
308+
309+ mutex_lock(&master->bus_lock_mutex);
310+
311+ spin_lock_irqsave(&master->bus_lock_spinlock, flags);
312+ master->bus_lock_flag = 1;
313+ spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
314+
315+ /* mutex remains locked until spi_bus_unlock is called */
316+
317+ return 0;
318+}
319+EXPORT_SYMBOL_GPL(spi_bus_lock);
320+
321+/**
322+ * spi_bus_unlock - release the lock for exclusive SPI bus usage
323+ * @master: SPI bus master that was locked for exclusive bus access
324+ * Context: can sleep
325+ *
326+ * This call may only be used from a context that may sleep. The sleep
327+ * is non-interruptible, and has no timeout.
328+ *
329+ * This call releases an SPI bus lock previously obtained by an spi_bus_lock
330+ * call.
331+ *
332+ * It returns zero on success, else a negative error code.
333+ */
334+int spi_bus_unlock(struct spi_master *master)
335+{
336+ master->bus_lock_flag = 0;
337+
338+ mutex_unlock(&master->bus_lock_mutex);
339+
340+ return 0;
341+}
342+EXPORT_SYMBOL_GPL(spi_bus_unlock);
343+
344 /* portable code must never pass more than 32 bytes */
345 #define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
346
347--- a/include/linux/spi/spi.h
348@@ -261,6 +261,13 @@ struct spi_master {
349 #define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */
350 #define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */
351
352+ /* lock and mutex for SPI bus locking */
353+ spinlock_t bus_lock_spinlock;
354+ struct mutex bus_lock_mutex;
355+
356+ /* flag indicating that the SPI bus is locked for exclusive use */
357+ bool bus_lock_flag;
358+
359     /* Setup mode and clock, etc (spi driver may call many times).
360      *
361      * IMPORTANT: this may be called when transfers to another
362@@ -541,6 +548,8 @@ static inline void spi_message_free(stru
363
364 extern int spi_setup(struct spi_device *spi);
365 extern int spi_async(struct spi_device *spi, struct spi_message *message);
366+extern int spi_async_locked(struct spi_device *spi,
367+ struct spi_message *message);
368
369 /*---------------------------------------------------------------------------*/
370
371@@ -550,6 +559,9 @@ extern int spi_async(struct spi_device *
372  */
373
374 extern int spi_sync(struct spi_device *spi, struct spi_message *message);
375+extern int spi_sync_locked(struct spi_device *spi, struct spi_message *message);
376+extern int spi_bus_lock(struct spi_master *master);
377+extern int spi_bus_unlock(struct spi_master *master);
378
379 /**
380  * spi_write - SPI synchronous write
target/linux/generic/patches-2.6.33/911-backport-mmc_spi-use-spi-bus-locking-api.patch
1From 4751c1c74bc7b596db5de0c93be1a22a570145c0 Mon Sep 17 00:00:00 2001
2From: Ernst Schwab <eschwab@online.de>
3Date: Thu, 18 Feb 2010 12:47:46 +0100
4Subject: [PATCH] spi/mmc_spi: mmc_spi adaptations for SPI bus locking API
5
6Modification of the mmc_spi driver to use the SPI bus locking API.
7With this, the mmc_spi driver can be used together with other SPI
8devices on the same SPI bus. The exclusive access to the SPI bus is
9now managed in the SPI layer. The counting of chip selects in the probe
10function is no longer needed.
11
12Signed-off-by: Ernst Schwab <eschwab@online.de>
13Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
14Tested-by: Matt Fleming <matt@console-pimps.org>
15Tested-by: Antonio Ospite <ospite@studenti.unina.it>
16---
17 drivers/mmc/host/mmc_spi.c | 59 ++++++++-----------------------------------
18 1 files changed, 11 insertions(+), 48 deletions(-)
19
20--- a/drivers/mmc/host/mmc_spi.c
21@@ -181,7 +181,7 @@ mmc_spi_readbytes(struct mmc_spi_host *h
22                 host->data_dma, sizeof(*host->data),
23                 DMA_FROM_DEVICE);
24
25- status = spi_sync(host->spi, &host->readback);
26+ status = spi_sync_locked(host->spi, &host->readback);
27
28     if (host->dma_dev)
29         dma_sync_single_for_cpu(host->dma_dev,
30@@ -540,7 +540,7 @@ mmc_spi_command_send(struct mmc_spi_host
31                 host->data_dma, sizeof(*host->data),
32                 DMA_BIDIRECTIONAL);
33     }
34- status = spi_sync(host->spi, &host->m);
35+ status = spi_sync_locked(host->spi, &host->m);
36
37     if (host->dma_dev)
38         dma_sync_single_for_cpu(host->dma_dev,
39@@ -684,7 +684,7 @@ mmc_spi_writeblock(struct mmc_spi_host *
40                 host->data_dma, sizeof(*scratch),
41                 DMA_BIDIRECTIONAL);
42
43- status = spi_sync(spi, &host->m);
44+ status = spi_sync_locked(spi, &host->m);
45
46     if (status != 0) {
47         dev_dbg(&spi->dev, "write error (%d)\n", status);
48@@ -821,7 +821,7 @@ mmc_spi_readblock(struct mmc_spi_host *h
49                 DMA_FROM_DEVICE);
50     }
51
52- status = spi_sync(spi, &host->m);
53+ status = spi_sync_locked(spi, &host->m);
54
55     if (host->dma_dev) {
56         dma_sync_single_for_cpu(host->dma_dev,
57@@ -1017,7 +1017,7 @@ mmc_spi_data_do(struct mmc_spi_host *hos
58                     host->data_dma, sizeof(*scratch),
59                     DMA_BIDIRECTIONAL);
60
61- tmp = spi_sync(spi, &host->m);
62+ tmp = spi_sync_locked(spi, &host->m);
63
64         if (host->dma_dev)
65             dma_sync_single_for_cpu(host->dma_dev,
66@@ -1083,6 +1083,9 @@ static void mmc_spi_request(struct mmc_h
67     }
68 #endif
69
70+ /* request exclusive bus access */
71+ spi_bus_lock(host->spi->master);
72+
73     /* issue command; then optionally data and stop */
74     status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL);
75     if (status == 0 && mrq->data) {
76@@ -1093,6 +1096,9 @@ static void mmc_spi_request(struct mmc_h
77             mmc_cs_off(host);
78     }
79
80+ /* release the bus */
81+ spi_bus_unlock(host->spi->master);
82+
83     mmc_request_done(host->mmc, mrq);
84 }
85
86@@ -1289,23 +1295,6 @@ mmc_spi_detect_irq(int irq, void *mmc)
87     return IRQ_HANDLED;
88 }
89
90-struct count_children {
91- unsigned n;
92- struct bus_type *bus;
93-};
94-
95-static int maybe_count_child(struct device *dev, void *c)
96-{
97- struct count_children *ccp = c;
98-
99- if (dev->bus == ccp->bus) {
100- if (ccp->n)
101- return -EBUSY;
102- ccp->n++;
103- }
104- return 0;
105-}
106-
107 static int mmc_spi_probe(struct spi_device *spi)
108 {
109     void *ones;
110@@ -1337,32 +1326,6 @@ static int mmc_spi_probe(struct spi_devi
111         return status;
112     }
113
114- /* We can use the bus safely iff nobody else will interfere with us.
115- * Most commands consist of one SPI message to issue a command, then
116- * several more to collect its response, then possibly more for data
117- * transfer. Clocking access to other devices during that period will
118- * corrupt the command execution.
119- *
120- * Until we have software primitives which guarantee non-interference,
121- * we'll aim for a hardware-level guarantee.
122- *
123- * REVISIT we can't guarantee another device won't be added later...
124- */
125- if (spi->master->num_chipselect > 1) {
126- struct count_children cc;
127-
128- cc.n = 0;
129- cc.bus = spi->dev.bus;
130- status = device_for_each_child(spi->dev.parent, &cc,
131- maybe_count_child);
132- if (status < 0) {
133- dev_err(&spi->dev, "can't share SPI bus\n");
134- return status;
135- }
136-
137- dev_warn(&spi->dev, "ASSUMING SPI bus stays unshared!\n");
138- }
139-
140     /* We need a supply of ones to transmit. This is the only time
141      * the CPU touches these, so cache coherency isn't a concern.
142      *
target/linux/generic/patches-2.6.34/910-backport-spi-bus-locking-api.patch
1From cf32b71e981ca63e8f349d8585ca2a3583b556e0 Mon Sep 17 00:00:00 2001
2From: Ernst Schwab <eschwab@online.de>
3Date: Mon, 28 Jun 2010 17:49:29 -0700
4Subject: [PATCH] spi/mmc_spi: SPI bus locking API, using mutex
5
6SPI bus locking API to allow exclusive access to the SPI bus, especially, but
7not limited to, for the mmc_spi driver.
8
9Coded according to an outline from Grant Likely; here is his
10specification (accidentally swapped function names corrected):
11
12It requires 3 things to be added to struct spi_master.
13- 1 Mutex
14- 1 spin lock
15- 1 flag.
16
17The mutex protects spi_sync, and provides sleeping "for free"
18The spinlock protects the atomic spi_async call.
19The flag is set when the lock is obtained, and checked while holding
20the spinlock in spi_async(). If the flag is checked, then spi_async()
21must fail immediately.
22
23The current runtime API looks like this:
24spi_async(struct spi_device*, struct spi_message*);
25spi_sync(struct spi_device*, struct spi_message*);
26
27The API needs to be extended to this:
28spi_async(struct spi_device*, struct spi_message*)
29spi_sync(struct spi_device*, struct spi_message*)
30spi_bus_lock(struct spi_master*) /* although struct spi_device* might
31be easier */
32spi_bus_unlock(struct spi_master*)
33spi_async_locked(struct spi_device*, struct spi_message*)
34spi_sync_locked(struct spi_device*, struct spi_message*)
35
36Drivers can only call the last two if they already hold the spi_master_lock().
37
38spi_bus_lock() obtains the mutex, obtains the spin lock, sets the
39flag, and releases the spin lock before returning. It doesn't even
40need to sleep while waiting for "in-flight" spi_transactions to
41complete because its purpose is to guarantee no additional
42transactions are added. It does not guarantee that the bus is idle.
43
44spi_bus_unlock() clears the flag and releases the mutex, which will
45wake up any waiters.
46
47The difference between spi_async() and spi_async_locked() is that the
48locked version bypasses the check of the lock flag. Both versions
49need to obtain the spinlock.
50
51The difference between spi_sync() and spi_sync_locked() is that
52spi_sync() must hold the mutex while enqueuing a new transfer.
53spi_sync_locked() doesn't because the mutex is already held. Note
54however that spi_sync must *not* continue to hold the mutex while
55waiting for the transfer to complete, otherwise only one transfer
56could be queued up at a time!
57
58Almost no code needs to be written. The current spi_async() and
59spi_sync() can probably be renamed to __spi_async() and __spi_sync()
60so that spi_async(), spi_sync(), spi_async_locked() and
61spi_sync_locked() can just become wrappers around the common code.
62
63spi_sync() is protected by a mutex because it can sleep
64spi_async() needs to be protected with a flag and a spinlock because
65it can be called atomically and must not sleep
66
67Signed-off-by: Ernst Schwab <eschwab@online.de>
68[grant.likely@secretlab.ca: use spin_lock_irqsave()]
69Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
70Tested-by: Matt Fleming <matt@console-pimps.org>
71Tested-by: Antonio Ospite <ospite@studenti.unina.it>
72---
73 drivers/spi/spi.c | 225 ++++++++++++++++++++++++++++++++++++++++-------
74 include/linux/spi/spi.h | 12 +++
75 2 files changed, 204 insertions(+), 33 deletions(-)
76
77--- a/drivers/spi/spi.c
78@@ -527,6 +527,10 @@ int spi_register_master(struct spi_maste
79         dynamic = 1;
80     }
81
82+ spin_lock_init(&master->bus_lock_spinlock);
83+ mutex_init(&master->bus_lock_mutex);
84+ master->bus_lock_flag = 0;
85+
86     /* register the device, then userspace will see it.
87      * registration fails if the bus ID is in use.
88      */
89@@ -666,6 +670,35 @@ int spi_setup(struct spi_device *spi)
90 }
91 EXPORT_SYMBOL_GPL(spi_setup);
92
93+static int __spi_async(struct spi_device *spi, struct spi_message *message)
94+{
95+ struct spi_master *master = spi->master;
96+
97+ /* Half-duplex links include original MicroWire, and ones with
98+ * only one data pin like SPI_3WIRE (switches direction) or where
99+ * either MOSI or MISO is missing. They can also be caused by
100+ * software limitations.
101+ */
102+ if ((master->flags & SPI_MASTER_HALF_DUPLEX)
103+ || (spi->mode & SPI_3WIRE)) {
104+ struct spi_transfer *xfer;
105+ unsigned flags = master->flags;
106+
107+ list_for_each_entry(xfer, &message->transfers, transfer_list) {
108+ if (xfer->rx_buf && xfer->tx_buf)
109+ return -EINVAL;
110+ if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
111+ return -EINVAL;
112+ if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
113+ return -EINVAL;
114+ }
115+ }
116+
117+ message->spi = spi;
118+ message->status = -EINPROGRESS;
119+ return master->transfer(spi, message);
120+}
121+
122 /**
123  * spi_async - asynchronous SPI transfer
124  * @spi: device with which data will be exchanged
125@@ -698,33 +731,68 @@ EXPORT_SYMBOL_GPL(spi_setup);
126 int spi_async(struct spi_device *spi, struct spi_message *message)
127 {
128     struct spi_master *master = spi->master;
129+ int ret;
130+ unsigned long flags;
131
132- /* Half-duplex links include original MicroWire, and ones with
133- * only one data pin like SPI_3WIRE (switches direction) or where
134- * either MOSI or MISO is missing. They can also be caused by
135- * software limitations.
136- */
137- if ((master->flags & SPI_MASTER_HALF_DUPLEX)
138- || (spi->mode & SPI_3WIRE)) {
139- struct spi_transfer *xfer;
140- unsigned flags = master->flags;
141+ spin_lock_irqsave(&master->bus_lock_spinlock, flags);
142
143- list_for_each_entry(xfer, &message->transfers, transfer_list) {
144- if (xfer->rx_buf && xfer->tx_buf)
145- return -EINVAL;
146- if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
147- return -EINVAL;
148- if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
149- return -EINVAL;
150- }
151- }
152+ if (master->bus_lock_flag)
153+ ret = -EBUSY;
154+ else
155+ ret = __spi_async(spi, message);
156
157- message->spi = spi;
158- message->status = -EINPROGRESS;
159- return master->transfer(spi, message);
160+ spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
161+
162+ return ret;
163 }
164 EXPORT_SYMBOL_GPL(spi_async);
165
166+/**
167+ * spi_async_locked - version of spi_async with exclusive bus usage
168+ * @spi: device with which data will be exchanged
169+ * @message: describes the data transfers, including completion callback
170+ * Context: any (irqs may be blocked, etc)
171+ *
172+ * This call may be used in_irq and other contexts which can't sleep,
173+ * as well as from task contexts which can sleep.
174+ *
175+ * The completion callback is invoked in a context which can't sleep.
176+ * Before that invocation, the value of message->status is undefined.
177+ * When the callback is issued, message->status holds either zero (to
178+ * indicate complete success) or a negative error code. After that
179+ * callback returns, the driver which issued the transfer request may
180+ * deallocate the associated memory; it's no longer in use by any SPI
181+ * core or controller driver code.
182+ *
183+ * Note that although all messages to a spi_device are handled in
184+ * FIFO order, messages may go to different devices in other orders.
185+ * Some device might be higher priority, or have various "hard" access
186+ * time requirements, for example.
187+ *
188+ * On detection of any fault during the transfer, processing of
189+ * the entire message is aborted, and the device is deselected.
190+ * Until returning from the associated message completion callback,
191+ * no other spi_message queued to that device will be processed.
192+ * (This rule applies equally to all the synchronous transfer calls,
193+ * which are wrappers around this core asynchronous primitive.)
194+ */
195+int spi_async_locked(struct spi_device *spi, struct spi_message *message)
196+{
197+ struct spi_master *master = spi->master;
198+ int ret;
199+ unsigned long flags;
200+
201+ spin_lock_irqsave(&master->bus_lock_spinlock, flags);
202+
203+ ret = __spi_async(spi, message);
204+
205+ spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
206+
207+ return ret;
208+
209+}
210+EXPORT_SYMBOL_GPL(spi_async_locked);
211+
212
213 /*-------------------------------------------------------------------------*/
214
215@@ -738,6 +806,32 @@ static void spi_complete(void *arg)
216     complete(arg);
217 }
218
219+static int __spi_sync(struct spi_device *spi, struct spi_message *message,
220+ int bus_locked)
221+{
222+ DECLARE_COMPLETION_ONSTACK(done);
223+ int status;
224+ struct spi_master *master = spi->master;
225+
226+ message->complete = spi_complete;
227+ message->context = &done;
228+
229+ if (!bus_locked)
230+ mutex_lock(&master->bus_lock_mutex);
231+
232+ status = spi_async_locked(spi, message);
233+
234+ if (!bus_locked)
235+ mutex_unlock(&master->bus_lock_mutex);
236+
237+ if (status == 0) {
238+ wait_for_completion(&done);
239+ status = message->status;
240+ }
241+ message->context = NULL;
242+ return status;
243+}
244+
245 /**
246  * spi_sync - blocking/synchronous SPI data transfers
247  * @spi: device with which data will be exchanged
248@@ -761,21 +855,86 @@ static void spi_complete(void *arg)
249  */
250 int spi_sync(struct spi_device *spi, struct spi_message *message)
251 {
252- DECLARE_COMPLETION_ONSTACK(done);
253- int status;
254-
255- message->complete = spi_complete;
256- message->context = &done;
257- status = spi_async(spi, message);
258- if (status == 0) {
259- wait_for_completion(&done);
260- status = message->status;
261- }
262- message->context = NULL;
263- return status;
264+ return __spi_sync(spi, message, 0);
265 }
266 EXPORT_SYMBOL_GPL(spi_sync);
267
268+/**
269+ * spi_sync_locked - version of spi_sync with exclusive bus usage
270+ * @spi: device with which data will be exchanged
271+ * @message: describes the data transfers
272+ * Context: can sleep
273+ *
274+ * This call may only be used from a context that may sleep. The sleep
275+ * is non-interruptible, and has no timeout. Low-overhead controller
276+ * drivers may DMA directly into and out of the message buffers.
277+ *
278+ * This call should be used by drivers that require exclusive access to the
279+ * SPI bus. It has to be preceeded by a spi_bus_lock call. The SPI bus must
280+ * be released by a spi_bus_unlock call when the exclusive access is over.
281+ *
282+ * It returns zero on success, else a negative error code.
283+ */
284+int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
285+{
286+ return __spi_sync(spi, message, 1);
287+}
288+EXPORT_SYMBOL_GPL(spi_sync_locked);
289+
290+/**
291+ * spi_bus_lock - obtain a lock for exclusive SPI bus usage
292+ * @master: SPI bus master that should be locked for exclusive bus access
293+ * Context: can sleep
294+ *
295+ * This call may only be used from a context that may sleep. The sleep
296+ * is non-interruptible, and has no timeout.
297+ *
298+ * This call should be used by drivers that require exclusive access to the
299+ * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
300+ * exclusive access is over. Data transfer must be done by spi_sync_locked
301+ * and spi_async_locked calls when the SPI bus lock is held.
302+ *
303+ * It returns zero on success, else a negative error code.
304+ */
305+int spi_bus_lock(struct spi_master *master)
306+{
307+ unsigned long flags;
308+
309+ mutex_lock(&master->bus_lock_mutex);
310+
311+ spin_lock_irqsave(&master->bus_lock_spinlock, flags);
312+ master->bus_lock_flag = 1;
313+ spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
314+
315+ /* mutex remains locked until spi_bus_unlock is called */
316+
317+ return 0;
318+}
319+EXPORT_SYMBOL_GPL(spi_bus_lock);
320+
321+/**
322+ * spi_bus_unlock - release the lock for exclusive SPI bus usage
323+ * @master: SPI bus master that was locked for exclusive bus access
324+ * Context: can sleep
325+ *
326+ * This call may only be used from a context that may sleep. The sleep
327+ * is non-interruptible, and has no timeout.
328+ *
329+ * This call releases an SPI bus lock previously obtained by an spi_bus_lock
330+ * call.
331+ *
332+ * It returns zero on success, else a negative error code.
333+ */
334+int spi_bus_unlock(struct spi_master *master)
335+{
336+ master->bus_lock_flag = 0;
337+
338+ mutex_unlock(&master->bus_lock_mutex);
339+
340+ return 0;
341+}
342+EXPORT_SYMBOL_GPL(spi_bus_unlock);
343+
344 /* portable code must never pass more than 32 bytes */
345 #define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
346
347--- a/include/linux/spi/spi.h
348@@ -262,6 +262,13 @@ struct spi_master {
349 #define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */
350 #define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */
351
352+ /* lock and mutex for SPI bus locking */
353+ spinlock_t bus_lock_spinlock;
354+ struct mutex bus_lock_mutex;
355+
356+ /* flag indicating that the SPI bus is locked for exclusive use */
357+ bool bus_lock_flag;
358+
359     /* Setup mode and clock, etc (spi driver may call many times).
360      *
361      * IMPORTANT: this may be called when transfers to another
362@@ -542,6 +549,8 @@ static inline void spi_message_free(stru
363
364 extern int spi_setup(struct spi_device *spi);
365 extern int spi_async(struct spi_device *spi, struct spi_message *message);
366+extern int spi_async_locked(struct spi_device *spi,
367+ struct spi_message *message);
368
369 /*---------------------------------------------------------------------------*/
370
371@@ -551,6 +560,9 @@ extern int spi_async(struct spi_device *
372  */
373
374 extern int spi_sync(struct spi_device *spi, struct spi_message *message);
375+extern int spi_sync_locked(struct spi_device *spi, struct spi_message *message);
376+extern int spi_bus_lock(struct spi_master *master);
377+extern int spi_bus_unlock(struct spi_master *master);
378
379 /**
380  * spi_write - SPI synchronous write
target/linux/generic/patches-2.6.34/911-backport-mmc_spi-use-spi-bus-locking-api.patch
1From 4751c1c74bc7b596db5de0c93be1a22a570145c0 Mon Sep 17 00:00:00 2001
2From: Ernst Schwab <eschwab@online.de>
3Date: Thu, 18 Feb 2010 12:47:46 +0100
4Subject: [PATCH] spi/mmc_spi: mmc_spi adaptations for SPI bus locking API
5
6Modification of the mmc_spi driver to use the SPI bus locking API.
7With this, the mmc_spi driver can be used together with other SPI
8devices on the same SPI bus. The exclusive access to the SPI bus is
9now managed in the SPI layer. The counting of chip selects in the probe
10function is no longer needed.
11
12Signed-off-by: Ernst Schwab <eschwab@online.de>
13Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
14Tested-by: Matt Fleming <matt@console-pimps.org>
15Tested-by: Antonio Ospite <ospite@studenti.unina.it>
16---
17 drivers/mmc/host/mmc_spi.c | 59 ++++++++-----------------------------------
18 1 files changed, 11 insertions(+), 48 deletions(-)
19
20--- a/drivers/mmc/host/mmc_spi.c
21@@ -182,7 +182,7 @@ mmc_spi_readbytes(struct mmc_spi_host *h
22                 host->data_dma, sizeof(*host->data),
23                 DMA_FROM_DEVICE);
24
25- status = spi_sync(host->spi, &host->readback);
26+ status = spi_sync_locked(host->spi, &host->readback);
27
28     if (host->dma_dev)
29         dma_sync_single_for_cpu(host->dma_dev,
30@@ -541,7 +541,7 @@ mmc_spi_command_send(struct mmc_spi_host
31                 host->data_dma, sizeof(*host->data),
32                 DMA_BIDIRECTIONAL);
33     }
34- status = spi_sync(host->spi, &host->m);
35+ status = spi_sync_locked(host->spi, &host->m);
36
37     if (host->dma_dev)
38         dma_sync_single_for_cpu(host->dma_dev,
39@@ -685,7 +685,7 @@ mmc_spi_writeblock(struct mmc_spi_host *
40                 host->data_dma, sizeof(*scratch),
41                 DMA_BIDIRECTIONAL);
42
43- status = spi_sync(spi, &host->m);
44+ status = spi_sync_locked(spi, &host->m);
45
46     if (status != 0) {
47         dev_dbg(&spi->dev, "write error (%d)\n", status);
48@@ -822,7 +822,7 @@ mmc_spi_readblock(struct mmc_spi_host *h
49                 DMA_FROM_DEVICE);
50     }
51
52- status = spi_sync(spi, &host->m);
53+ status = spi_sync_locked(spi, &host->m);
54
55     if (host->dma_dev) {
56         dma_sync_single_for_cpu(host->dma_dev,
57@@ -1018,7 +1018,7 @@ mmc_spi_data_do(struct mmc_spi_host *hos
58                     host->data_dma, sizeof(*scratch),
59                     DMA_BIDIRECTIONAL);
60
61- tmp = spi_sync(spi, &host->m);
62+ tmp = spi_sync_locked(spi, &host->m);
63
64         if (host->dma_dev)
65             dma_sync_single_for_cpu(host->dma_dev,
66@@ -1084,6 +1084,9 @@ static void mmc_spi_request(struct mmc_h
67     }
68 #endif
69
70+ /* request exclusive bus access */
71+ spi_bus_lock(host->spi->master);
72+
73     /* issue command; then optionally data and stop */
74     status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL);
75     if (status == 0 && mrq->data) {
76@@ -1094,6 +1097,9 @@ static void mmc_spi_request(struct mmc_h
77             mmc_cs_off(host);
78     }
79
80+ /* release the bus */
81+ spi_bus_unlock(host->spi->master);
82+
83     mmc_request_done(host->mmc, mrq);
84 }
85
86@@ -1290,23 +1296,6 @@ mmc_spi_detect_irq(int irq, void *mmc)
87     return IRQ_HANDLED;
88 }
89
90-struct count_children {
91- unsigned n;
92- struct bus_type *bus;
93-};
94-
95-static int maybe_count_child(struct device *dev, void *c)
96-{
97- struct count_children *ccp = c;
98-
99- if (dev->bus == ccp->bus) {
100- if (ccp->n)
101- return -EBUSY;
102- ccp->n++;
103- }
104- return 0;
105-}
106-
107 static int mmc_spi_probe(struct spi_device *spi)
108 {
109     void *ones;
110@@ -1338,32 +1327,6 @@ static int mmc_spi_probe(struct spi_devi
111         return status;
112     }
113
114- /* We can use the bus safely iff nobody else will interfere with us.
115- * Most commands consist of one SPI message to issue a command, then
116- * several more to collect its response, then possibly more for data
117- * transfer. Clocking access to other devices during that period will
118- * corrupt the command execution.
119- *
120- * Until we have software primitives which guarantee non-interference,
121- * we'll aim for a hardware-level guarantee.
122- *
123- * REVISIT we can't guarantee another device won't be added later...
124- */
125- if (spi->master->num_chipselect > 1) {
126- struct count_children cc;
127-
128- cc.n = 0;
129- cc.bus = spi->dev.bus;
130- status = device_for_each_child(spi->dev.parent, &cc,
131- maybe_count_child);
132- if (status < 0) {
133- dev_err(&spi->dev, "can't share SPI bus\n");
134- return status;
135- }
136-
137- dev_warn(&spi->dev, "ASSUMING SPI bus stays unshared!\n");
138- }
139-
140     /* We need a supply of ones to transmit. This is the only time
141      * the CPU touches these, so cache coherency isn't a concern.
142      *
target/linux/generic/patches-2.6.35/910-backport-spi-bus-locking-api.patch
1From cf32b71e981ca63e8f349d8585ca2a3583b556e0 Mon Sep 17 00:00:00 2001
2From: Ernst Schwab <eschwab@online.de>
3Date: Mon, 28 Jun 2010 17:49:29 -0700
4Subject: [PATCH] spi/mmc_spi: SPI bus locking API, using mutex
5
6SPI bus locking API to allow exclusive access to the SPI bus, especially, but
7not limited to, for the mmc_spi driver.
8
9Coded according to an outline from Grant Likely; here is his
10specification (accidentally swapped function names corrected):
11
12It requires 3 things to be added to struct spi_master.
13- 1 Mutex
14- 1 spin lock
15- 1 flag.
16
17The mutex protects spi_sync, and provides sleeping "for free"
18The spinlock protects the atomic spi_async call.
19The flag is set when the lock is obtained, and checked while holding
20the spinlock in spi_async(). If the flag is checked, then spi_async()
21must fail immediately.
22
23The current runtime API looks like this:
24spi_async(struct spi_device*, struct spi_message*);
25spi_sync(struct spi_device*, struct spi_message*);
26
27The API needs to be extended to this:
28spi_async(struct spi_device*, struct spi_message*)
29spi_sync(struct spi_device*, struct spi_message*)
30spi_bus_lock(struct spi_master*) /* although struct spi_device* might
31be easier */
32spi_bus_unlock(struct spi_master*)
33spi_async_locked(struct spi_device*, struct spi_message*)
34spi_sync_locked(struct spi_device*, struct spi_message*)
35
36Drivers can only call the last two if they already hold the spi_master_lock().
37
38spi_bus_lock() obtains the mutex, obtains the spin lock, sets the
39flag, and releases the spin lock before returning. It doesn't even
40need to sleep while waiting for "in-flight" spi_transactions to
41complete because its purpose is to guarantee no additional
42transactions are added. It does not guarantee that the bus is idle.
43
44spi_bus_unlock() clears the flag and releases the mutex, which will
45wake up any waiters.
46
47The difference between spi_async() and spi_async_locked() is that the
48locked version bypasses the check of the lock flag. Both versions
49need to obtain the spinlock.
50
51The difference between spi_sync() and spi_sync_locked() is that
52spi_sync() must hold the mutex while enqueuing a new transfer.
53spi_sync_locked() doesn't because the mutex is already held. Note
54however that spi_sync must *not* continue to hold the mutex while
55waiting for the transfer to complete, otherwise only one transfer
56could be queued up at a time!
57
58Almost no code needs to be written. The current spi_async() and
59spi_sync() can probably be renamed to __spi_async() and __spi_sync()
60so that spi_async(), spi_sync(), spi_async_locked() and
61spi_sync_locked() can just become wrappers around the common code.
62
63spi_sync() is protected by a mutex because it can sleep
64spi_async() needs to be protected with a flag and a spinlock because
65it can be called atomically and must not sleep
66
67Signed-off-by: Ernst Schwab <eschwab@online.de>
68[grant.likely@secretlab.ca: use spin_lock_irqsave()]
69Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
70Tested-by: Matt Fleming <matt@console-pimps.org>
71Tested-by: Antonio Ospite <ospite@studenti.unina.it>
72---
73 drivers/spi/spi.c | 225 ++++++++++++++++++++++++++++++++++++++++-------
74 include/linux/spi/spi.h | 12 +++
75 2 files changed, 204 insertions(+), 33 deletions(-)
76
77--- a/drivers/spi/spi.c
78@@ -527,6 +527,10 @@ int spi_register_master(struct spi_maste
79         dynamic = 1;
80     }
81
82+ spin_lock_init(&master->bus_lock_spinlock);
83+ mutex_init(&master->bus_lock_mutex);
84+ master->bus_lock_flag = 0;
85+
86     /* register the device, then userspace will see it.
87      * registration fails if the bus ID is in use.
88      */
89@@ -666,6 +670,35 @@ int spi_setup(struct spi_device *spi)
90 }
91 EXPORT_SYMBOL_GPL(spi_setup);
92
93+static int __spi_async(struct spi_device *spi, struct spi_message *message)
94+{
95+ struct spi_master *master = spi->master;
96+
97+ /* Half-duplex links include original MicroWire, and ones with
98+ * only one data pin like SPI_3WIRE (switches direction) or where
99+ * either MOSI or MISO is missing. They can also be caused by
100+ * software limitations.
101+ */
102+ if ((master->flags & SPI_MASTER_HALF_DUPLEX)
103+ || (spi->mode & SPI_3WIRE)) {
104+ struct spi_transfer *xfer;
105+ unsigned flags = master->flags;
106+
107+ list_for_each_entry(xfer, &message->transfers, transfer_list) {
108+ if (xfer->rx_buf && xfer->tx_buf)
109+ return -EINVAL;
110+ if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
111+ return -EINVAL;
112+ if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
113+ return -EINVAL;
114+ }
115+ }
116+
117+ message->spi = spi;
118+ message->status = -EINPROGRESS;
119+ return master->transfer(spi, message);
120+}
121+
122 /**
123  * spi_async - asynchronous SPI transfer
124  * @spi: device with which data will be exchanged
125@@ -698,33 +731,68 @@ EXPORT_SYMBOL_GPL(spi_setup);
126 int spi_async(struct spi_device *spi, struct spi_message *message)
127 {
128     struct spi_master *master = spi->master;
129+ int ret;
130+ unsigned long flags;
131
132- /* Half-duplex links include original MicroWire, and ones with
133- * only one data pin like SPI_3WIRE (switches direction) or where
134- * either MOSI or MISO is missing. They can also be caused by
135- * software limitations.
136- */
137- if ((master->flags & SPI_MASTER_HALF_DUPLEX)
138- || (spi->mode & SPI_3WIRE)) {
139- struct spi_transfer *xfer;
140- unsigned flags = master->flags;
141+ spin_lock_irqsave(&master->bus_lock_spinlock, flags);
142
143- list_for_each_entry(xfer, &message->transfers, transfer_list) {
144- if (xfer->rx_buf && xfer->tx_buf)
145- return -EINVAL;
146- if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
147- return -EINVAL;
148- if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
149- return -EINVAL;
150- }
151- }
152+ if (master->bus_lock_flag)
153+ ret = -EBUSY;
154+ else
155+ ret = __spi_async(spi, message);
156
157- message->spi = spi;
158- message->status = -EINPROGRESS;
159- return master->transfer(spi, message);
160+ spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
161+
162+ return ret;
163 }
164 EXPORT_SYMBOL_GPL(spi_async);
165
166+/**
167+ * spi_async_locked - version of spi_async with exclusive bus usage
168+ * @spi: device with which data will be exchanged
169+ * @message: describes the data transfers, including completion callback
170+ * Context: any (irqs may be blocked, etc)
171+ *
172+ * This call may be used in_irq and other contexts which can't sleep,
173+ * as well as from task contexts which can sleep.
174+ *
175+ * The completion callback is invoked in a context which can't sleep.
176+ * Before that invocation, the value of message->status is undefined.
177+ * When the callback is issued, message->status holds either zero (to
178+ * indicate complete success) or a negative error code. After that
179+ * callback returns, the driver which issued the transfer request may
180+ * deallocate the associated memory; it's no longer in use by any SPI
181+ * core or controller driver code.
182+ *
183+ * Note that although all messages to a spi_device are handled in
184+ * FIFO order, messages may go to different devices in other orders.
185+ * Some device might be higher priority, or have various "hard" access
186+ * time requirements, for example.
187+ *
188+ * On detection of any fault during the transfer, processing of
189+ * the entire message is aborted, and the device is deselected.
190+ * Until returning from the associated message completion callback,
191+ * no other spi_message queued to that device will be processed.
192+ * (This rule applies equally to all the synchronous transfer calls,
193+ * which are wrappers around this core asynchronous primitive.)
194+ */
195+int spi_async_locked(struct spi_device *spi, struct spi_message *message)
196+{
197+ struct spi_master *master = spi->master;
198+ int ret;
199+ unsigned long flags;
200+
201+ spin_lock_irqsave(&master->bus_lock_spinlock, flags);
202+
203+ ret = __spi_async(spi, message);
204+
205+ spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
206+
207+ return ret;
208+
209+}
210+EXPORT_SYMBOL_GPL(spi_async_locked);
211+
212
213 /*-------------------------------------------------------------------------*/
214
215@@ -738,6 +806,32 @@ static void spi_complete(void *arg)
216     complete(arg);
217 }
218
219+static int __spi_sync(struct spi_device *spi, struct spi_message *message,
220+ int bus_locked)
221+{
222+ DECLARE_COMPLETION_ONSTACK(done);
223+ int status;
224+ struct spi_master *master = spi->master;
225+
226+ message->complete = spi_complete;
227+ message->context = &done;
228+
229+ if (!bus_locked)
230+ mutex_lock(&master->bus_lock_mutex);
231+
232+ status = spi_async_locked(spi, message);
233+
234+ if (!bus_locked)
235+ mutex_unlock(&master->bus_lock_mutex);
236+
237+ if (status == 0) {
238+ wait_for_completion(&done);
239+ status = message->status;
240+ }
241+ message->context = NULL;
242+ return status;
243+}
244+
245 /**
246  * spi_sync - blocking/synchronous SPI data transfers
247  * @spi: device with which data will be exchanged
248@@ -761,21 +855,86 @@ static void spi_complete(void *arg)
249  */
250 int spi_sync(struct spi_device *spi, struct spi_message *message)
251 {
252- DECLARE_COMPLETION_ONSTACK(done);
253- int status;
254-
255- message->complete = spi_complete;
256- message->context = &done;
257- status = spi_async(spi, message);
258- if (status == 0) {
259- wait_for_completion(&done);
260- status = message->status;
261- }
262- message->context = NULL;
263- return status;
264+ return __spi_sync(spi, message, 0);
265 }
266 EXPORT_SYMBOL_GPL(spi_sync);
267
268+/**
269+ * spi_sync_locked - version of spi_sync with exclusive bus usage
270+ * @spi: device with which data will be exchanged
271+ * @message: describes the data transfers
272+ * Context: can sleep
273+ *
274+ * This call may only be used from a context that may sleep. The sleep
275+ * is non-interruptible, and has no timeout. Low-overhead controller
276+ * drivers may DMA directly into and out of the message buffers.
277+ *
278+ * This call should be used by drivers that require exclusive access to the
279+ * SPI bus. It has to be preceeded by a spi_bus_lock call. The SPI bus must
280+ * be released by a spi_bus_unlock call when the exclusive access is over.
281+ *
282+ * It returns zero on success, else a negative error code.
283+ */
284+int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
285+{
286+ return __spi_sync(spi, message, 1);
287+}
288+EXPORT_SYMBOL_GPL(spi_sync_locked);
289+
290+/**
291+ * spi_bus_lock - obtain a lock for exclusive SPI bus usage
292+ * @master: SPI bus master that should be locked for exclusive bus access
293+ * Context: can sleep
294+ *
295+ * This call may only be used from a context that may sleep. The sleep
296+ * is non-interruptible, and has no timeout.
297+ *
298+ * This call should be used by drivers that require exclusive access to the
299+ * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
300+ * exclusive access is over. Data transfer must be done by spi_sync_locked
301+ * and spi_async_locked calls when the SPI bus lock is held.
302+ *
303+ * It returns zero on success, else a negative error code.
304+ */
305+int spi_bus_lock(struct spi_master *master)
306+{
307+ unsigned long flags;
308+
309+ mutex_lock(&master->bus_lock_mutex);
310+
311+ spin_lock_irqsave(&master->bus_lock_spinlock, flags);
312+ master->bus_lock_flag = 1;
313+ spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
314+
315+ /* mutex remains locked until spi_bus_unlock is called */
316+
317+ return 0;
318+}
319+EXPORT_SYMBOL_GPL(spi_bus_lock);
320+
321+/**
322+ * spi_bus_unlock - release the lock for exclusive SPI bus usage
323+ * @master: SPI bus master that was locked for exclusive bus access
324+ * Context: can sleep
325+ *
326+ * This call may only be used from a context that may sleep. The sleep
327+ * is non-interruptible, and has no timeout.
328+ *
329+ * This call releases an SPI bus lock previously obtained by an spi_bus_lock
330+ * call.
331+ *
332+ * It returns zero on success, else a negative error code.
333+ */
334+int spi_bus_unlock(struct spi_master *master)
335+{
336+ master->bus_lock_flag = 0;
337+
338+ mutex_unlock(&master->bus_lock_mutex);
339+
340+ return 0;
341+}
342+EXPORT_SYMBOL_GPL(spi_bus_unlock);
343+
344 /* portable code must never pass more than 32 bytes */
345 #define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
346
347--- a/include/linux/spi/spi.h
348@@ -262,6 +262,13 @@ struct spi_master {
349 #define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */
350 #define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */
351
352+ /* lock and mutex for SPI bus locking */
353+ spinlock_t bus_lock_spinlock;
354+ struct mutex bus_lock_mutex;
355+
356+ /* flag indicating that the SPI bus is locked for exclusive use */
357+ bool bus_lock_flag;
358+
359     /* Setup mode and clock, etc (spi driver may call many times).
360      *
361      * IMPORTANT: this may be called when transfers to another
362@@ -542,6 +549,8 @@ static inline void spi_message_free(stru
363
364 extern int spi_setup(struct spi_device *spi);
365 extern int spi_async(struct spi_device *spi, struct spi_message *message);
366+extern int spi_async_locked(struct spi_device *spi,
367+ struct spi_message *message);
368
369 /*---------------------------------------------------------------------------*/
370
371@@ -551,6 +560,9 @@ extern int spi_async(struct spi_device *
372  */
373
374 extern int spi_sync(struct spi_device *spi, struct spi_message *message);
375+extern int spi_sync_locked(struct spi_device *spi, struct spi_message *message);
376+extern int spi_bus_lock(struct spi_master *master);
377+extern int spi_bus_unlock(struct spi_master *master);
378
379 /**
380  * spi_write - SPI synchronous write
target/linux/generic/patches-2.6.35/911-backport-mmc_spi-use-spi-bus-locking-api.patch
1From 4751c1c74bc7b596db5de0c93be1a22a570145c0 Mon Sep 17 00:00:00 2001
2From: Ernst Schwab <eschwab@online.de>
3Date: Thu, 18 Feb 2010 12:47:46 +0100
4Subject: [PATCH] spi/mmc_spi: mmc_spi adaptations for SPI bus locking API
5
6Modification of the mmc_spi driver to use the SPI bus locking API.
7With this, the mmc_spi driver can be used together with other SPI
8devices on the same SPI bus. The exclusive access to the SPI bus is
9now managed in the SPI layer. The counting of chip selects in the probe
10function is no longer needed.
11
12Signed-off-by: Ernst Schwab <eschwab@online.de>
13Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
14Tested-by: Matt Fleming <matt@console-pimps.org>
15Tested-by: Antonio Ospite <ospite@studenti.unina.it>
16---
17 drivers/mmc/host/mmc_spi.c | 59 ++++++++-----------------------------------
18 1 files changed, 11 insertions(+), 48 deletions(-)
19
20--- a/drivers/mmc/host/mmc_spi.c
21@@ -182,7 +182,7 @@ mmc_spi_readbytes(struct mmc_spi_host *h
22                 host->data_dma, sizeof(*host->data),
23                 DMA_FROM_DEVICE);
24
25- status = spi_sync(host->spi, &host->readback);
26+ status = spi_sync_locked(host->spi, &host->readback);
27
28     if (host->dma_dev)
29         dma_sync_single_for_cpu(host->dma_dev,
30@@ -541,7 +541,7 @@ mmc_spi_command_send(struct mmc_spi_host
31                 host->data_dma, sizeof(*host->data),
32                 DMA_BIDIRECTIONAL);
33     }
34- status = spi_sync(host->spi, &host->m);
35+ status = spi_sync_locked(host->spi, &host->m);
36
37     if (host->dma_dev)
38         dma_sync_single_for_cpu(host->dma_dev,
39@@ -685,7 +685,7 @@ mmc_spi_writeblock(struct mmc_spi_host *
40                 host->data_dma, sizeof(*scratch),
41                 DMA_BIDIRECTIONAL);
42
43- status = spi_sync(spi, &host->m);
44+ status = spi_sync_locked(spi, &host->m);
45
46     if (status != 0) {
47         dev_dbg(&spi->dev, "write error (%d)\n", status);
48@@ -822,7 +822,7 @@ mmc_spi_readblock(struct mmc_spi_host *h
49                 DMA_FROM_DEVICE);
50     }
51
52- status = spi_sync(spi, &host->m);
53+ status = spi_sync_locked(spi, &host->m);
54
55     if (host->dma_dev) {
56         dma_sync_single_for_cpu(host->dma_dev,
57@@ -1018,7 +1018,7 @@ mmc_spi_data_do(struct mmc_spi_host *hos
58                     host->data_dma, sizeof(*scratch),
59                     DMA_BIDIRECTIONAL);
60
61- tmp = spi_sync(spi, &host->m);
62+ tmp = spi_sync_locked(spi, &host->m);
63
64         if (host->dma_dev)
65             dma_sync_single_for_cpu(host->dma_dev,
66@@ -1084,6 +1084,9 @@ static void mmc_spi_request(struct mmc_h
67     }
68 #endif
69
70+ /* request exclusive bus access */
71+ spi_bus_lock(host->spi->master);
72+
73     /* issue command; then optionally data and stop */
74     status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL);
75     if (status == 0 && mrq->data) {
76@@ -1094,6 +1097,9 @@ static void mmc_spi_request(struct mmc_h
77             mmc_cs_off(host);
78     }
79
80+ /* release the bus */
81+ spi_bus_unlock(host->spi->master);
82+
83     mmc_request_done(host->mmc, mrq);
84 }
85
86@@ -1290,23 +1296,6 @@ mmc_spi_detect_irq(int irq, void *mmc)
87     return IRQ_HANDLED;
88 }
89
90-struct count_children {
91- unsigned n;
92- struct bus_type *bus;
93-};
94-
95-static int maybe_count_child(struct device *dev, void *c)
96-{
97- struct count_children *ccp = c;
98-
99- if (dev->bus == ccp->bus) {
100- if (ccp->n)
101- return -EBUSY;
102- ccp->n++;
103- }
104- return 0;
105-}
106-
107 static int mmc_spi_probe(struct spi_device *spi)
108 {
109     void *ones;
110@@ -1338,32 +1327,6 @@ static int mmc_spi_probe(struct spi_devi
111         return status;
112     }
113
114- /* We can use the bus safely iff nobody else will interfere with us.
115- * Most commands consist of one SPI message to issue a command, then
116- * several more to collect its response, then possibly more for data
117- * transfer. Clocking access to other devices during that period will
118- * corrupt the command execution.
119- *
120- * Until we have software primitives which guarantee non-interference,
121- * we'll aim for a hardware-level guarantee.
122- *
123- * REVISIT we can't guarantee another device won't be added later...
124- */
125- if (spi->master->num_chipselect > 1) {
126- struct count_children cc;
127-
128- cc.n = 0;
129- cc.bus = spi->dev.bus;
130- status = device_for_each_child(spi->dev.parent, &cc,
131- maybe_count_child);
132- if (status < 0) {
133- dev_err(&spi->dev, "can't share SPI bus\n");
134- return status;
135- }
136-
137- dev_warn(&spi->dev, "ASSUMING SPI bus stays unshared!\n");
138- }
139-
140     /* We need a supply of ones to transmit. This is the only time
141      * the CPU touches these, so cache coherency isn't a concern.
142      *

Archive Download the corresponding diff file



interactive