Date:2014-07-10 00:17:53 (9 years 8 months ago)
Author:Apelete Seketeli
Commit:651ef2fc7a6fbe42fe9391104c95bb17ca6ee227
Message:mmc: jz4740: prepare next dma transfer in parallel with current transfer

Make use of the MMC asynchronous request capability to prepare the
next DMA transfer request in parallel with the current transfer.
This is done by adding pre-request and post-request callbacks that are
used by the MMC framework during an active data transfer.

It should help reduce the impact of DMA preparation overhead on the SD
card performance.

Signed-off-by: Apelete Seketeli <apelete@seketeli.net>
Files: drivers/mmc/host/jz4740_mmc.c (9 diffs)

Change Details

drivers/mmc/host/jz4740_mmc.c
115115    JZ4740_MMC_STATE_DONE,
116116};
117117
118struct jz4740_mmc_host_next {
119    int sg_len;
120    s32 cookie;
121};
122
118123struct jz4740_mmc_host {
119124    struct mmc_host *mmc;
120125    struct platform_device *pdev;
...... 
144149    /* DMA support */
145150    struct dma_chan *dma_rx;
146151    struct dma_chan *dma_tx;
152    struct jz4740_mmc_host_next next_data;
147153    bool use_dma;
148154    int sg_len;
149155
...... 
185191        goto free_master_write;
186192    }
187193
194    /* Initialize DMA pre request cookie */
195    host->next_data.cookie = 1;
196
188197    return 0;
189198
190199free_master_write:
...... 
197206    return (data->flags & MMC_DATA_READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
198207}
199208
209static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host,
210                               struct mmc_data *data)
211{
212    return (data->flags & MMC_DATA_READ) ? host->dma_rx : host->dma_tx;
213}
214
200215static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host,
201216                 struct mmc_data *data)
202217{
203    struct dma_chan *chan;
218    struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
204219    enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data);
205220
206    if (dir == DMA_TO_DEVICE)
207        chan = host->dma_tx;
208    else
209        chan = host->dma_rx;
210
211221    dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
212222}
213223
224/* Prepares DMA data for current/next transfer, returns non-zero on failure */
225static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
226                       struct mmc_data *data,
227                       struct jz4740_mmc_host_next *next,
228                       struct dma_chan *chan)
229{
230    struct jz4740_mmc_host_next *next_data = &host->next_data;
231    enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data);
232    int sg_len;
233
234    if (!next && data->host_cookie &&
235        data->host_cookie != host->next_data.cookie) {
236        dev_warn(mmc_dev(host->mmc),
237             "[%s] invalid cookie: data->host_cookie %d host->next_data.cookie %d\n",
238             __func__,
239             data->host_cookie,
240             host->next_data.cookie);
241        data->host_cookie = 0;
242    }
243
244    /* Check if next job is already prepared */
245    if (next || data->host_cookie != host->next_data.cookie) {
246        sg_len = dma_map_sg(chan->device->dev,
247                    data->sg,
248                    data->sg_len,
249                    dir);
250
251    } else {
252        sg_len = next_data->sg_len;
253        next_data->sg_len = 0;
254    }
255
256    if (sg_len <= 0) {
257        dev_err(mmc_dev(host->mmc),
258            "Failed to map scatterlist for DMA operation\n");
259        return -EINVAL;
260    }
261
262    if (next) {
263        next->sg_len = sg_len;
264        data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
265    } else
266        host->sg_len = sg_len;
267
268    return 0;
269}
270
214271static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
215272                     struct mmc_data *data)
216273{
274    int ret;
217275    struct dma_chan *chan;
218276    struct dma_async_tx_descriptor *desc;
219277    struct dma_slave_config conf = {
...... 
222280        .src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
223281        .dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
224282    };
225    enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data);
226283
227    if (dir == DMA_TO_DEVICE) {
284    if (data->flags & MMC_DATA_WRITE) {
228285        conf.direction = DMA_MEM_TO_DEV;
229286        conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO;
230287        conf.slave_id = JZ4740_DMA_TYPE_MMC_TRANSMIT;
...... 
236293        chan = host->dma_rx;
237294    }
238295
239    host->sg_len = dma_map_sg(chan->device->dev,
240                  data->sg,
241                  data->sg_len,
242                  dir);
243
244    if (host->sg_len == 0) {
245        dev_err(mmc_dev(host->mmc),
246            "Failed to map scatterlist for DMA operation\n");
247        return -EINVAL;
248    }
296    ret = jz4740_mmc_prepare_dma_data(host, data, NULL, chan);
297    if (ret)
298        return ret;
249299
250300    dmaengine_slave_config(chan, &conf);
251301    desc = dmaengine_prep_slave_sg(chan,
...... 
270320    return -ENOMEM;
271321}
272322
323static void jz4740_mmc_pre_request(struct mmc_host *mmc,
324                   struct mmc_request *mrq,
325                   bool is_first_req)
326{
327    struct jz4740_mmc_host *host = mmc_priv(mmc);
328    struct mmc_data *data = mrq->data;
329    struct jz4740_mmc_host_next *next_data = &host->next_data;
330
331    BUG_ON(data->host_cookie);
332
333    if (host->use_dma) {
334        struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
335
336        if (jz4740_mmc_prepare_dma_data(host, data, next_data, chan))
337            data->host_cookie = 0;
338    }
339}
340
341static void jz4740_mmc_post_request(struct mmc_host *mmc,
342                    struct mmc_request *mrq,
343                    int err)
344{
345    struct jz4740_mmc_host *host = mmc_priv(mmc);
346    struct mmc_data *data = mrq->data;
347
348    if (host->use_dma && data->host_cookie) {
349        jz4740_mmc_dma_unmap(host, data);
350        data->host_cookie = 0;
351    }
352
353    if (err) {
354        struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
355
356        dmaengine_terminate_all(chan);
357    }
358}
359
273360/*----------------------------------------------------------------------------*/
274361
275362static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
...... 
628715
629716    case JZ4740_MMC_STATE_TRANSFER_DATA:
630717        if (host->use_dma) {
631            /* Use DMA if enabled, data transfer direction was
632             * defined before in jz_mmc_prepare_data_transfer().
718            /* Use DMA if enabled.
719             * Data transfer direction is defined later by
720             * relying on data flags in jz4740_prepare_dma_data().
633721             */
634722            timeout = jz4740_mmc_start_dma_transfer(host, data);
635723            data->bytes_xfered = data->blocks * data->blksz;
636724        } else if (data->flags & MMC_DATA_READ)
637            /* If DMA is not enabled, rely on data flags
638             * to establish data transfer direction.
725            /* Use PIO if DMA is not enabled.
726             * Data transfer direction was defined before
727             * by relying on data flags in
728             * jz_mmc_prepare_data_transfer().
639729             */
640730            timeout = jz4740_mmc_read_data(host, data);
641731        else
...... 
860950
861951static const struct mmc_host_ops jz4740_mmc_ops = {
862952    .request = jz4740_mmc_request,
953    .pre_req = jz4740_mmc_pre_request,
954    .post_req = jz4740_mmc_post_request,
863955    .set_ios = jz4740_mmc_set_ios,
864956    .get_ro = mmc_gpio_get_ro,
865957    .get_cd = mmc_gpio_get_cd,

Archive Download the corresponding diff file



interactive