Dmaengine是linux內核dma驅動框架,針對DMA驅動的混亂局面內核社區提出了一個全新的框架驅動,目標在統一dma API讓各個模塊使用DMA時不用關心硬件細節,同時代碼復用提高,並且實現異步的數據傳輸,降低機器負載。
dmaengine向其他模塊提供接口;virt-dma,Virtual DMA向dmaengine提供初始化函數,傳輸各階段狀態登記鏈表,desc_free函數等;dma drivers為具體DMA控制器的驅動代碼,其通過dma_async_device_register()注冊到dmaengine。
lli: linked list ltem, the DMA block descriptor
linux-3.4/drivers/dma
dmaengine.c
注冊dma類,在調用dma_async_device_register注冊具體平台的DMA設備時會用到。
sunxi-dma.c
sunxi_probe解析:
1.1 ret = request_irq(irq, sunxi_dma_interrupt, IRQF_SHARED,dev_name(&pdev->dev), sunxi_dev);//注冊中斷
1.2 clk_get(&pdev->dev, "dma");//獲取時鐘
drivers/clk/sunxi/clk-sun8iw7.c
SUNXI_CLK_PERIPH (dma,0,0,0,0,0,0,0,0,0,0,BUS_RST0,BUS_GATE0,0,0,6, 6,0,&clk_lock,NULL, 0)
struct periph_init_data sunxi_periphs_init[] = {
{"dma",0,ahb1mod_parents,ARRAY_SIZE(ahb1mod_parents),&sunxi_clk_periph_dma}
}
drivers/clk/sunxi/clk-periph.h定義SUNXI_CLK_PERIPH宏
sunxi_init_clocks-> sunxi_clk_register_periph-> clk_register(NULL, &periph->hw);
1.3dma_pool_create, 創建一個一致內存塊池,其參數name是DMA池的名字,用於診斷用,參數dev是將做DMA的設備,參數size是DMA池裡的塊的大小,參數align是塊的對齊要求,是2的冪,參數allocation返回沒有跨越邊界的塊數(或0)。
1.4vchan_init初始化virt_dma_chan
1.5初始化sunxi_dev->dma_dev結構成員後,調用dma_async_device_register(&sunxi_dev->dma_dev)注冊到dmaengine
1.5.1 get_dma_id(device)//建立idr機制,分配一個新idr entry,將返回值存儲在&sunxi_dev->dma_dev->dev_id中。
1.5.2 在DMA完全准備好之前,已經有客戶端在等待channel時,遍歷所有的channel,調用dma_chan_get(chan)
1.5.3 list_add_tail_rcu(&device->global_node, &dma_device_list)//將&dma_device.global_node加入靜態鏈表中。
1.5.4 dma_channel_rebalance
1.5.4.1 chan->device->device_alloc_chan_resources(chan); <=> sunxi_alloc_chan_resources//無實際作用,最後返回0
1.5.4.2 balance_ref_count
1.6最後調用sunxi_dma_hw_init(sunxi_dev);使能時鐘, -> clk_prepare_enable(sunxi_dev->ahb_clk);
dma_device結構體集成DMA控制器的核心函數,實現最底層的具體操作; 新建立的sunxi_chan對應每個通道,該結構體一面存放用戶配置的cfg成員,另外就是包含virt_dma_chan,它用virt-dma提供的函數初始,並將vc->chan.device_node鏈接到dma_device結構體的channels鏈表中,後續實際的DMA通道的申請就是申請vc->chan通道,而vc->chan在內核中建立相應的文件節點,即chan->dev,生成的節點如下:
/sys/devices/platform/sunxi_dmac/dma/dma0chan0
在准備一次傳輸時,e.g chan->device->device_prep_dma_sg 即調用sunxi_prep_dma_sg,創建sunxi_desc結構體並初始化,該結構體可以理解為通道中具體運載工具,在傳輸完成後銷毀。
在不同的傳輸階段,會將Virt_dma_desc.node鏈入不同的virt_dma_chan鏈表中,如desc_submitted,desc_issued,desc_completed。
其他模塊在調用dmaengine提供的接口來使用DMA時,是通過遍歷&dma_device_list鏈表找到相應的dma device。
在sunxi_start_desc函數中,會將txd->lli_phys寫入DMA寄存器中,之前設置的相應參數,如slave_id,起始地址等,將由DMA控制器來解析,選擇等。
include/linux/dmaengine.h
#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)// mask,所有申請的傳輸類型的掩碼;fn, DMA驅動私有的過濾函數;fn_param,傳入的私有參數
1.從dma_device_list全局鏈表中找到可用的dma_device
2.private_candidate(mask, device, fn, fn_param);//根據mask判斷device是否符合要求,在滿足條件的情況下,a.如果所有的通道都是公有的,在沒有用戶使用的情況下執行b,否則沖突,返回NULL;b.遍歷dam_device.channels鏈表中的dma_chan,找到第一個client_count為0的chan.具體代碼如下:
static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev, dma_filter_fn fn, void *fn_param) { struct dma_chan *chan; if (!__dma_device_satisfies_mask(dev, mask)) { pr_debug("%s: wrong capabilities\n", __func__); return NULL; } /* devices with multiple channels need special handling as we need to * ensure that all channels are either private or public. */ if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) list_for_each_entry(chan, &dev->channels, device_node) { /* some channels are already publicly allocated */ if (chan->client_count) return NULL; } list_for_each_entry(chan, &dev->channels, device_node) { if (chan->client_count) { pr_debug("%s: %s busy\n", __func__, dma_chan_name(chan)); continue; } if (fn && !fn(chan, fn_param)) { pr_debug("%s: %s filter said false\n", __func__, dma_chan_name(chan)); continue; } return chan; } return NULL; }
3.找到合適的chan之後,
dma_cap_set(DMA_PRIVATE, device->cap_mask);//to disable balance_ref_count as this channel will not bepublished in the general-purpose allocator
device->privatecnt++;
err = dma_chan_get(chan);//a.獲取dma channel’s parent driver模塊,即該模塊計數加1;
b.int desc_cnt = chan->device->device_alloc_chan_resources(chan);// <=> sunxi_alloc_chan_resources//設置schan->cyclic = false;
c.balance_ref_count(chan)//其目的是確保dma channel’s parent driver模塊數與client數一致
include/linux/dmaengine.h
static inline int dmaengine_slave_config(struct dma_chan *chan,struct dma_slave_config *config)
<=> dmaengine_device_control(chan, DMA_SLAVE_CONFIG,(unsigned long)config);
<=> chan->device->device_control(chan, cmd, arg);
<=> sunxi_control <=> sunxi_set_runtime_config(chan, (struct dma_slave_config *)arg);//最終將用戶配置的相關參數存儲到sunxi_chan.cfg中。
static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction dir,
unsigned long flags)
{
return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,period_len, dir, flags, NULL);
}
dmaengine_submit(struct dma_async_tx_descriptor *desc)
<=> drivers/dma/virt-dma.c : vchan_tx_submit
cookie = dma_cookie_assign(tx);//遞增cookie
list_add_tail(&vd->node, &vc->desc_submitted);//加入desc_submitted隊列
static inline void dma_async_issue_pending(struct dma_chan *chan)
{
chan->device->device_issue_pending(chan);
}
<=> sunxi_issue_pending
a.vchan_issue_pending(&schan->vc)->
list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
__list_splice(list, head->prev, head);//
INIT_LIST_HEAD(list);//清空list
b.if (list_empty(&schan->node))
list_add_tail(&schan->node, &sdev->pending);//將schan->node加入鏈表sdev->pending
c.tasklet_schedule(&sdev->task);//調度sdev->task,執行sunxi_dma_tasklet->
sunxi_start_desc//virt_dma_desc結構變量vd為空時停止本次DMA傳輸;否則,設置對應channel的中斷號,DMA操作模式,將txd->lli_phys寫入寄存器,正式DMA傳輸。
drivers/dma/dmaengine.c
void dma_release_channel(struct dma_chan *chan)
1.dma_chan_put
chan->client_count--;
module_put(dma_chan_to_owner(chan));
if (chan->client_count == 0)
chan->device->device_free_chan_resources(chan); <=> sunxi_free_chan_resources->
vchan_free_chan_resources: 獲取所有的desc_submitted、desc_issued、desc_completeddescriptors之後,調用vchan_dma_desc_free_list(vc, &head):
while (!list_empty(head)) { struct virt_dma_desc *vd = list_first_entry(head,struct virt_dma_desc, node); list_del(&vd->node); dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); vc->desc_free(vd); // <=> sunxi_free_desc,釋放virt_dma_desc }
2.判斷--chan->device->privatecnt為0時,
dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask)
注意事項:
回調函數裡不允許休眠,以及調度;
回調函數時間不宜過長;
Pending並不是立即傳輸而是等待軟中斷的到來,cyclic模式除外;
在dma_slave_config中的slave_id對於devices必須要指定.
drivers/char/dma_test/sunxi_dma_test.c
創建sunxi_dma_test類及其屬性文件test、help
當向test輸入0時,即調用dma_test_main(0)-> case_memcpy_single_chan()
1.buf_group *buffers = NULL;buffers = init_buf();//分配好內存
typedef struct {
unsigned int src_va;
unsigned int src_pa;
unsigned int dst_va;
unsigned int dst_pa;
unsigned int size;
}buf_item;
typedef struct {
unsigned int cnt;
buf_item item[BUF_MAX_CNT];
}buf_group;
2. chan = dma_request_channel(mask , NULL , NULL);//根據mask申請一個可用的通道
3.sg_alloc_table(&src_sg_table, buffers->cnt, GFP_KERNEL)//Allocate and initialize an sg table
使用scatterlist的原因就是系統在運行的時候內存會產生很多碎片,比如4k,100k的,1M的,有時候對應磁盤碎片,總之就是碎片。而在網絡 和磁盤操作中很多時候需要傳送大塊的數據,尤其是使用DMA的時候,因為DMA操作的物理地址必須是連續的。假設要1M內存,此時可以分配一個整的1M內 存,也可以把10個10K的和9個100K的組成一塊1M的內存,當然這19個塊可能是不連續的,也可能其中某些或全部是連續的,總之情況不定,為了描述 這種情況,就引入了scatterlist,其實看成一個關於內存塊構成的鏈表就OK了。
sg_set_buf(sg, phys_to_virt(buffers->item[i].src_pa), buffers->item[i].size);
sg_dma_address(sg) = buffers->item[i].src_pa;
4.dmaengine_slave_config(chan , &config);//配置參數,如傳輸方向,slave_id等。
5.tx = chan->device->device_prep_dma_sg(chan, dst_sg_table.sgl, buffers->cnt,
src_sg_table.sgl, buffers->cnt, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
准備一次多包傳輸,散列形式,返回一個傳輸描述符指針。
<=> sunxi_prep_dma_sg: //
a. vchan_tx_prep,初始化virt_dma_desc;
b.sunxi_alloc_lli//調用dma_pool_alloc從sunxi_dmadev.lli_pool內存塊池分配內存。
c.sunxi_cfg_lli//配置sunxi_dma_lli相應參數,如flag,源地址,目的地址,數據長度等。
6.設置回調函數
dma_info.chan = chan;
init_waitqueue_head(&dma_info.dma_wq);
atomic_set(&dma_info.dma_done, 0);
tx->callback = __dma_callback;//喚醒中斷;設置pinfo->dma_done為1。
tx->callback_param = &dma_info;
7.加入傳輸隊列, cookie = dmaengine_submit(tx);
8.開始傳輸,dma_async_issue_pending(chan);
9.等待傳輸結束,ret = wait_event_interruptible_timeout(dma_info.dma_wq,atomic_read(&dma_info.dma_done)==1, timeout);
10.DMA傳輸完成後,產生中斷,其中斷處理函數為sunxi_dma_interrupt,
ch->desc = NULL;
vchan_cookie_complete(&desc->vd);
sunxi_start_desc(ch);//vchan_cookie_complete會釋放virt_dma_desc,故會正常退出此次DMA傳輸。
vchan_cookie_complete解析:
a.dma_cookie_complete(&vd->tx);//設置cookie
b.list_add_tail(&vd->node, &vc->desc_completed);
c.tasklet_schedule(&vc->task); -> vchan_complete
list_splice_tail_init(&vc->desc_completed, &head);
list_del(&vd->node);
vc->desc_free(vd);ósunxi_free_desc//釋放virt_dma_desc
if (cb)
cb(cb_data);//執行回調函數
11.dma_release_channel(chan);
實例源碼:
sunxi_dma_test.c
/* * drivers/char/dma_test/sunxi_dma_test.c * * Copyright(c) 2013-2015 Allwinnertech Co., Ltd. * http://www.allwinnertech.com * * Author: liugang* * sunxi dma test driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include "sunxi_dma_test.h" /* wait queue for waiting dma done */ wait_queue_head_t g_dtc_queue[DTC_MAX]; atomic_t g_adma_done = ATOMIC_INIT(0); /* dma done flag */ //int size_available[] = {SZ_4K, SZ_64K, SZ_256K, SZ_512K, SZ_512K + SZ_64K, SZ_512K + SZ_256K, SZ_1M}; int size_available[] = {SZ_4K, SZ_16K, SZ_32K, SZ_64K, SZ_128K, SZ_256K, SZ_512K}; static void __dma_test_init_waitqueue(void) { u32 i = 0; for(i = 0; i < DTC_MAX; i++) init_waitqueue_head(&g_dtc_queue[i]); } static void __dma_callback(void *dma_async_param) { chan_info *pinfo = (chan_info *)dma_async_param; wake_up_interruptible(&pinfo->dma_wq); atomic_set(&pinfo->dma_done, 1); } buf_group *init_buf(void) { buf_group *pbuf = kmalloc(sizeof(buf_group), GFP_KERNEL); int i, buf_cnt = (get_random_int()%BUF_MAX_CNT) + 1; int size, index; if (!pbuf) return NULL; for (i = 0; i < buf_cnt; i++) { index = get_random_int() % ARRAY_SIZE(size_available); size = size_available[index]; printk("%s(%d): buf %d, index %d, size 0x%x\n", __func__, __LINE__, i, index, size); pbuf->item[i].src_va = (u32)dma_alloc_coherent(NULL, size, (dma_addr_t *)&pbuf->item[i].src_pa, GFP_KERNEL); if (!pbuf->item[i].src_va) break; pbuf->item[i].dst_va = (u32)dma_alloc_coherent(NULL, size, (dma_addr_t *)&pbuf->item[i].dst_pa, GFP_KERNEL); if (!pbuf->item[i].dst_va) { dma_free_coherent(NULL, size, (void *)pbuf->item[i].src_va, (dma_addr_t)pbuf->item[i].src_pa); break; } memset((void *)pbuf->item[i].src_va, 0x54, size); memset((void *)pbuf->item[i].dst_va, 0xab, size); pbuf->item[i].size = size; } pbuf->cnt = i; if(0 == pbuf->cnt) return NULL; printk("%s(%d): buf cnt %d, buffers:\n", __func__, __LINE__, pbuf->cnt); for (i = 0; i < pbuf->cnt; i++) printk(" src: va 0x%08x, pa 0x%08x; dst: va 0x%08x, pa 0x%08x\n", pbuf->item[i].src_va, pbuf->item[i].src_pa, pbuf->item[i].dst_va, pbuf->item[i].dst_pa); return pbuf; } void deinit_buf(buf_group *pbuf) { int i; if (!pbuf) return; for (i = 0; i < pbuf->cnt; i++) { dma_free_coherent(NULL, pbuf->item[i].size, (void *)pbuf->item[i].src_va, (dma_addr_t)pbuf->item[i].src_pa); dma_free_coherent(NULL, pbuf->item[i].size, (void *)pbuf->item[i].dst_va, (dma_addr_t)pbuf->item[i].dst_pa); } kfree(pbuf); } int check_result(buf_group *pbuf) { int i, j; if (!pbuf) return -EINVAL; for (i = 0; i < pbuf->cnt; i++) { if(memcmp((void *)pbuf->item[i].src_va, (void *)pbuf->item[i].dst_va, pbuf->item[i].size)) { printk("%s(%d) err: buffer %d memcmp failed!\n", __func__, __LINE__, i); printk(" src buffer: "); for (j = 0; j < 16; j++) printk("%d ", *((char *)pbuf->item[i].src_va + j)); printk("\n"); printk(" dst buffer: "); for (j = 0; j < 16; j++) printk("%d ", *((char *)pbuf->item[i].dst_va + j)); printk("\n"); return -EIO; } } return 0; } int case_memcpy_single_chan(void) { struct sg_table src_sg_table, dst_sg_table; struct dma_async_tx_descriptor *tx = NULL; struct dma_slave_config config; struct dma_chan *chan; struct scatterlist *sg; buf_group *buffers = NULL; long timeout = 5 * HZ; chan_info dma_info; dma_cap_mask_t mask; dma_cookie_t cookie; int i, ret = -EINVAL; buffers = init_buf(); if (!buffers) { pr_err("%s(%d) err: init_buf failed!\n", __func__, __LINE__); return -EBUSY; } dma_cap_zero(mask); dma_cap_set(DMA_SG, mask); dma_cap_set(DMA_MEMCPY, mask); chan = dma_request_channel(mask , NULL , NULL); if (!chan) { pr_err("%s(%d) err: dma_request_channel failed!\n", __func__, __LINE__); goto out1; } if (sg_alloc_table(&src_sg_table, buffers->cnt, GFP_KERNEL)) { pr_err("%s(%d) err: alloc src sg_table failed!\n", __func__, __LINE__); goto out2; } if (sg_alloc_table(&dst_sg_table, buffers->cnt, GFP_KERNEL)) { pr_err("%s(%d) err: alloc dst sg_table failed!\n", __func__, __LINE__); goto out3; } /* assign sg buf */ sg = src_sg_table.sgl; for (i = 0; i < buffers->cnt; i++, sg = sg_next(sg)) { sg_set_buf(sg, phys_to_virt(buffers->item[i].src_pa), buffers->item[i].size); sg_dma_address(sg) = buffers->item[i].src_pa; } sg = dst_sg_table.sgl; for (i = 0; i < buffers->cnt; i++, sg = sg_next(sg)) { sg_set_buf(sg, phys_to_virt(buffers->item[i].dst_pa), buffers->item[i].size); sg_dma_address(sg) = buffers->item[i].dst_pa; } config.direction = DMA_MEM_TO_MEM; config.src_addr = 0; /* not used for memcpy */ config.dst_addr = 0; config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; config.src_maxburst = 8; config.dst_maxburst = 8; config.slave_id = sunxi_slave_id(DRQDST_SDRAM, DRQSRC_SDRAM); dmaengine_slave_config(chan , &config); tx = chan->device->device_prep_dma_sg(chan, dst_sg_table.sgl, buffers->cnt, src_sg_table.sgl, buffers->cnt, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); /* set callback */ dma_info.chan = chan; init_waitqueue_head(&dma_info.dma_wq); atomic_set(&dma_info.dma_done, 0); tx->callback = __dma_callback; tx->callback_param = &dma_info; /* enqueue */ cookie = dmaengine_submit(tx); /* start dma */ dma_async_issue_pending(chan); /* wait transfer over */ ret = wait_event_interruptible_timeout(dma_info.dma_wq, atomic_read(&dma_info.dma_done)==1, timeout); if (unlikely(-ERESTARTSYS == ret || 0 == ret)) { pr_err("%s(%d) err: wait dma done failed!\n", __func__, __LINE__); goto out4; } ret = check_result(buffers); out4: sg_free_table(&src_sg_table); out3: sg_free_table(&dst_sg_table); out2: dma_release_channel(chan); out1: if (buffers) deinit_buf(buffers); if(ret) printk("%s(%d) err: test failed!\n", __func__, __LINE__); else printk("%s(%d): test success!\n", __func__, __LINE__); return ret; } int case_memcpy_multi_chan(void) { struct dma_async_tx_descriptor *tx = NULL; struct dma_slave_config config; chan_info dma_chanl[DMA_MAX_CHAN], *pchan_info; int buf_left, cur_trans, start_index; int i, ret = -EINVAL, chan_cnt = 0; long timeout = 5 * HZ; dma_cap_mask_t mask; dma_cookie_t cookie; buf_group *buffers = NULL; buffers = init_buf(); if (!buffers) { pr_err("%s(%d) err: init_buf failed!\n", __func__, __LINE__); return -EBUSY; } /* request channel */ dma_cap_zero(mask); dma_cap_set(DMA_MEMCPY, mask); for(i = 0; i < ARRAY_SIZE(dma_chanl); i++, chan_cnt++) { if(chan_cnt == buffers->cnt) /* channel enough */ break; pchan_info = &dma_chanl[i]; pchan_info->chan = dma_request_channel(mask , NULL , NULL); if(!pchan_info->chan) break; init_waitqueue_head(&pchan_info->dma_wq); atomic_set(&pchan_info->dma_done, 0); } buf_left = buffers->cnt; again: start_index = buffers->cnt - buf_left; for(i = 0; i < chan_cnt; ) { pchan_info = &dma_chanl[i]; config.direction = DMA_MEM_TO_MEM; config.src_addr = 0; /* not used for memcpy */ config.dst_addr = 0; config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; config.src_maxburst = 8; config.dst_maxburst = 8; config.slave_id = sunxi_slave_id(DRQDST_SDRAM, DRQSRC_SDRAM); dmaengine_slave_config(pchan_info->chan, &config); tx = pchan_info->chan->device->device_prep_dma_memcpy(pchan_info->chan, buffers->item[start_index + i].dst_pa, buffers->item[start_index + i].src_pa, buffers->item[start_index + i].size, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); tx->callback = __dma_callback; tx->callback_param = pchan_info; cookie = dmaengine_submit(tx); if(++i == buf_left) break; } cur_trans = i; /* start dma */ for(i = 0; i < cur_trans; i++) dma_async_issue_pending(dma_chanl[i].chan); for(i = 0; i < cur_trans; i++) { ret = wait_event_interruptible_timeout(dma_chanl[i].dma_wq, atomic_read(&dma_chanl[i].dma_done)==1, timeout); if(unlikely(-ERESTARTSYS == ret || 0 == ret)) { pr_err("%s(%d) err: wait dma done failed!\n", __func__, __LINE__); ret = -EIO; goto end; } } buf_left -= cur_trans; if(buf_left) goto again; ret = check_result(buffers); end: for(i = 0; i < chan_cnt; i++) dma_release_channel(dma_chanl[i].chan); if (buffers) deinit_buf(buffers); if(ret) printk("%s(%d) err: test failed!\n", __func__, __LINE__); else printk("%s(%d): test success!\n", __func__, __LINE__); return ret; } static int dma_test_main(int id) { u32 uret = 0; switch(id) { case DTC_MEMCPY_SINGLE_CHAN: uret = case_memcpy_single_chan(); break; case DTC_MEMCPY_MULTI_CHAN: uret = case_memcpy_multi_chan(); break; default: uret = __LINE__; break; } if(0 == uret) printk("%s: test success!\n", __func__); else printk("%s: test failed!\n", __func__); return uret; } const char *case_name[] = { "DTC_MEMCPY_SINGLE_CHAN", "DTC_MEMCPY_MULTI_CHAN", }; ssize_t test_store(struct class *class, struct class_attribute *attr, const char *buf, size_t size) { int id = 0; if(strict_strtoul(buf, 10, (long unsigned int *)&id)) { pr_err("%s: invalid string %s\n", __func__, buf); return -EINVAL; } pr_info("%s: string %s, test case %s\n", __func__, buf, case_name[id]); if(0 != dma_test_main(id)) pr_err("%s: dma_test_main failed! id %d\n", __func__, id); else pr_info("%s: dma_test_main success! id %d\n", __func__, id); return size; } ssize_t help_show(struct class *class, struct class_attribute *attr, char *buf) { ssize_t cnt = 0; cnt += sprintf(buf + cnt, "usage: echo id > test\n"); cnt += sprintf(buf + cnt, " id for case DTC_MEMCPY_SINGLE_CHAN is %d\n", (int)DTC_MEMCPY_SINGLE_CHAN); cnt += sprintf(buf + cnt, " id for case DTC_MEMCPY_MULTI_CHAN is %d\n", (int)DTC_MEMCPY_MULTI_CHAN); cnt += sprintf(buf + cnt, "case description:\n"); cnt += sprintf(buf + cnt, " DTC_MEMCPY_SINGLE_CHAN: case for single channel\n"); cnt += sprintf(buf + cnt, " DTC_MEMCPY_MULTI_CHAN: case for multi channel\n"); return cnt; } static struct class_attribute dma_test_class_attrs[] = { __ATTR(test, 0220, NULL, test_store), /* not 222, for CTS, other group cannot have write permission */ __ATTR(help, 0444, help_show, NULL), __ATTR_NULL, }; static struct class dma_test_class = { .name = "sunxi_dma_test", .owner = THIS_MODULE, .class_attrs = dma_test_class_attrs, }; static int __init sw_dma_test_init(void) { int status; pr_info("%s enter\n", __func__); /* init dma wait queue */ __dma_test_init_waitqueue(); status = class_register(&dma_test_class); if(status < 0) pr_info("%s err, status %d\n", __func__, status); else pr_info("%s success\n", __func__); return 0; } static void __exit sw_dma_test_exit(void) { pr_info("sw_dma_test_exit: enter\n"); class_unregister(&dma_test_class); } module_init(sw_dma_test_init); module_exit(sw_dma_test_exit); MODULE_LICENSE ("GPL"); MODULE_AUTHOR ("liugang"); MODULE_DESCRIPTION ("sunxi dma test driver");
/* * drivers/char/dma_test/sunxi_dma_test.h * * Copyright(c) 2013-2015 Allwinnertech Co., Ltd. * http://www.allwinnertech.com * * Author: liugang* * sunxi dma test driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #ifndef __SUNXI_DMA_TEST_H #define __SUNXI_DMA_TEST_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include enum dma_test_case_e { DTC_MEMCPY_SINGLE_CHAN, DTC_MEMCPY_MULTI_CHAN, DTC_MAX }; extern wait_queue_head_t g_dtc_queue[]; extern atomic_t g_adma_done; #define BUF_MAX_CNT 8 #define DMA_MAX_CHAN 6 typedef struct { unsigned int src_va; unsigned int src_pa; unsigned int dst_va; unsigned int dst_pa; unsigned int size; }buf_item; typedef struct { unsigned int cnt; buf_item item[BUF_MAX_CNT]; }buf_group; typedef struct { struct dma_chan *chan; /* dma channel handle */ wait_queue_head_t dma_wq; /* wait dma transfer done */ atomic_t dma_done; /* dma done flag, used with dma_wq */ }chan_info; #endif /* __SUNXI_DMA_TEST_H */