/* CCCFG register */
#define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */
+#define GET_NUM_QDMACH(x) (x & 0x70 >> 4) /* bits 4-6 */
#define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */
#define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */
#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
#define TCCHEN BIT(22)
#define ITCCHEN BIT(23)
-/*ch_status parameter of callback function possible values*/
-#define EDMA_DMA_COMPLETE 1
-#define EDMA_DMA_CC_ERROR 2
-#define EDMA_DMA_TC1_ERROR 3
-#define EDMA_DMA_TC2_ERROR 4
-
struct edma_pset {
u32 len;
dma_addr_t addr;
/* eDMA3 resource information */
unsigned num_channels;
+ unsigned num_qchannels;
unsigned num_region;
unsigned num_slots;
unsigned num_tc;
+ bool chmap_exist;
enum dma_event_q default_queue;
bool unused_chan_list_done;
- /* The edma_inuse bit for each PaRAM slot is clear unless the
+ /* The slot_inuse bit for each PaRAM slot is clear unless the
* channel is in use ... by ARM or DSP, for QDMA, or whatever.
*/
- unsigned long *edma_inuse;
+ unsigned long *slot_inuse;
- /* The edma_unused bit for each channel is clear unless
+ /* The channel_unused bit for each channel is clear unless
* it is not being used on this platform. It uses a bit
* of SOC-specific initialization code.
*/
- unsigned long *edma_unused;
-
- struct dma_interrupt_data {
- void (*callback)(unsigned channel, unsigned short ch_status,
- void *data);
- void *data;
- } *intr_data;
+ unsigned long *channel_unused;
struct dma_device dma_slave;
struct edma_chan *slave_chans;
edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val);
}
-static inline unsigned int edma_parm_read(struct edma_cc *ecc, int offset,
- int param_no)
+static inline unsigned int edma_param_read(struct edma_cc *ecc, int offset,
+ int param_no)
{
return edma_read(ecc, EDMA_PARM + offset + (param_no << 5));
}
-static inline void edma_parm_write(struct edma_cc *ecc, int offset,
- int param_no, unsigned val)
+static inline void edma_param_write(struct edma_cc *ecc, int offset,
+ int param_no, unsigned val)
{
edma_write(ecc, EDMA_PARM + offset + (param_no << 5), val);
}
-static inline void edma_parm_modify(struct edma_cc *ecc, int offset,
- int param_no, unsigned and, unsigned or)
+static inline void edma_param_modify(struct edma_cc *ecc, int offset,
+ int param_no, unsigned and, unsigned or)
{
edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or);
}
-static inline void edma_parm_and(struct edma_cc *ecc, int offset, int param_no,
- unsigned and)
+static inline void edma_param_and(struct edma_cc *ecc, int offset, int param_no,
+ unsigned and)
{
edma_and(ecc, EDMA_PARM + offset + (param_no << 5), and);
}
-static inline void edma_parm_or(struct edma_cc *ecc, int offset, int param_no,
- unsigned or)
+static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no,
+ unsigned or)
{
edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or);
}
clear_bit(offset + (len - 1), p);
}
-static void edma_map_dmach_to_queue(struct edma_cc *ecc, unsigned ch_no,
- enum dma_event_q queue_no)
-{
- int bit = (ch_no & 0x7) * 4;
-
- /* default to low priority queue */
- if (queue_no == EVENTQ_DEFAULT)
- queue_no = ecc->default_queue;
-
- queue_no &= 7;
- edma_modify_array(ecc, EDMA_DMAQNUM, (ch_no >> 3), ~(0x7 << bit),
- queue_no << bit);
-}
-
static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
int priority)
{
edma_modify(ecc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit));
}
-static void edma_direct_dmach_to_param_mapping(struct edma_cc *ecc)
+static void edma_set_chmap(struct edma_chan *echan, int slot)
{
- int i;
+ struct edma_cc *ecc = echan->ecc;
+ int channel = EDMA_CHAN_SLOT(echan->ch_num);
- for (i = 0; i < ecc->num_channels; i++)
- edma_write_array(ecc, EDMA_DCHMAP, i, (i << 5));
+ if (ecc->chmap_exist) {
+ slot = EDMA_CHAN_SLOT(slot);
+ edma_write_array(ecc, EDMA_DCHMAP, channel, (slot << 5));
+ }
}
static int prepare_unused_channel_list(struct device *dev, void *data)
continue;
clear_bit(EDMA_CHAN_SLOT(dma_spec.args[0]),
- ecc->edma_unused);
+ ecc->channel_unused);
of_node_put(dma_spec.np);
}
return 0;
dma_req = (int)res->start;
if (dma_req >= dma_req_min && dma_req < dma_req_max)
clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
- ecc->edma_unused);
+ ecc->channel_unused);
}
return 0;
}
-static void edma_setup_interrupt(struct edma_cc *ecc, unsigned lch,
- void (*callback)(unsigned channel, u16 ch_status, void *data),
- void *data)
+static void edma_setup_interrupt(struct edma_chan *echan, bool enable)
{
- lch = EDMA_CHAN_SLOT(lch);
-
- if (!callback)
- edma_shadow0_write_array(ecc, SH_IECR, lch >> 5,
- BIT(lch & 0x1f));
-
- ecc->intr_data[lch].callback = callback;
- ecc->intr_data[lch].data = data;
+ struct edma_cc *ecc = echan->ecc;
+ int channel = EDMA_CHAN_SLOT(echan->ch_num);
- if (callback) {
- edma_shadow0_write_array(ecc, SH_ICR, lch >> 5,
- BIT(lch & 0x1f));
- edma_shadow0_write_array(ecc, SH_IESR, lch >> 5,
- BIT(lch & 0x1f));
+ if (enable) {
+ edma_shadow0_write_array(ecc, SH_ICR, channel >> 5,
+ BIT(channel & 0x1f));
+ edma_shadow0_write_array(ecc, SH_IESR, channel >> 5,
+ BIT(channel & 0x1f));
+ } else {
+ edma_shadow0_write_array(ecc, SH_IECR, channel >> 5,
+ BIT(channel & 0x1f));
}
}
*/
static int edma_alloc_slot(struct edma_cc *ecc, int slot)
{
- if (slot > 0)
+ if (slot > 0) {
slot = EDMA_CHAN_SLOT(slot);
+ /* Requesting entry paRAM slot for a HW triggered channel. */
+ if (ecc->chmap_exist && slot < ecc->num_channels)
+ slot = EDMA_SLOT_ANY;
+ }
+
if (slot < 0) {
- slot = ecc->num_channels;
+ if (ecc->chmap_exist)
+ slot = 0;
+ else
+ slot = ecc->num_channels;
for (;;) {
- slot = find_next_zero_bit(ecc->edma_inuse,
+ slot = find_next_zero_bit(ecc->slot_inuse,
ecc->num_slots,
slot);
if (slot == ecc->num_slots)
return -ENOMEM;
- if (!test_and_set_bit(slot, ecc->edma_inuse))
+ if (!test_and_set_bit(slot, ecc->slot_inuse))
break;
}
- } else if (slot < ecc->num_channels || slot >= ecc->num_slots) {
+ } else if (slot >= ecc->num_slots) {
return -EINVAL;
- } else if (test_and_set_bit(slot, ecc->edma_inuse)) {
+ } else if (test_and_set_bit(slot, ecc->slot_inuse)) {
return -EBUSY;
}
static void edma_free_slot(struct edma_cc *ecc, unsigned slot)
{
slot = EDMA_CHAN_SLOT(slot);
- if (slot < ecc->num_channels || slot >= ecc->num_slots)
+ if (slot >= ecc->num_slots)
return;
edma_write_slot(ecc, slot, &dummy_paramset);
- clear_bit(slot, ecc->edma_inuse);
+ clear_bit(slot, ecc->slot_inuse);
}
/**
if (from >= ecc->num_slots || to >= ecc->num_slots)
return;
- edma_parm_modify(ecc, PARM_LINK_BCNTRLD, from, 0xffff0000,
- PARM_OFFSET(to));
+ edma_param_modify(ecc, PARM_LINK_BCNTRLD, from, 0xffff0000,
+ PARM_OFFSET(to));
}
/**
return edma_read(ecc, offs);
}
-/*-----------------------------------------------------------------------*/
-/**
- * edma_start - start dma on a channel
- * @ecc: pointer to edma_cc struct
- * @channel: channel being activated
- *
+/*
* Channels with event associations will be triggered by their hardware
* events, and channels without such associations will be triggered by
* software. (At this writing there is no interface for using software
* triggers except with channels that don't support hardware triggers.)
- *
- * Returns zero on success, else negative errno.
*/
-static int edma_start(struct edma_cc *ecc, unsigned channel)
+static void edma_start(struct edma_chan *echan)
{
- if (ecc->id != EDMA_CTLR(channel)) {
- dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__,
- ecc->id, EDMA_CTLR(channel));
- return -EINVAL;
- }
- channel = EDMA_CHAN_SLOT(channel);
-
- if (channel < ecc->num_channels) {
- int j = channel >> 5;
- unsigned int mask = BIT(channel & 0x1f);
+ struct edma_cc *ecc = echan->ecc;
+ int channel = EDMA_CHAN_SLOT(echan->ch_num);
+ int j = (channel >> 5);
+ unsigned int mask = BIT(channel & 0x1f);
+ if (test_bit(channel, ecc->channel_unused)) {
/* EDMA channels without event association */
- if (test_bit(channel, ecc->edma_unused)) {
- dev_dbg(ecc->dev, "ESR%d %08x\n", j,
- edma_shadow0_read_array(ecc, SH_ESR, j));
- edma_shadow0_write_array(ecc, SH_ESR, j, mask);
- return 0;
- }
-
+ dev_dbg(ecc->dev, "ESR%d %08x\n", j,
+ edma_shadow0_read_array(ecc, SH_ESR, j));
+ edma_shadow0_write_array(ecc, SH_ESR, j, mask);
+ } else {
/* EDMA channel with event association */
dev_dbg(ecc->dev, "ER%d %08x\n", j,
edma_shadow0_read_array(ecc, SH_ER, j));
edma_shadow0_write_array(ecc, SH_EESR, j, mask);
dev_dbg(ecc->dev, "EER%d %08x\n", j,
edma_shadow0_read_array(ecc, SH_EER, j));
- return 0;
}
-
- return -EINVAL;
}
-/**
- * edma_stop - stops dma on the channel passed
- * @ecc: pointer to edma_cc struct
- * @channel: channel being deactivated
- *
- * Any active transfer is paused and all pending hardware events are cleared.
- * The current transfer may not be resumed, and the channel's Parameter RAM
- * should be reinitialized before being reused.
- */
-static void edma_stop(struct edma_cc *ecc, unsigned channel)
+static void edma_stop(struct edma_chan *echan)
{
- if (ecc->id != EDMA_CTLR(channel)) {
- dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__,
- ecc->id, EDMA_CTLR(channel));
- return;
- }
- channel = EDMA_CHAN_SLOT(channel);
+ struct edma_cc *ecc = echan->ecc;
+ int channel = EDMA_CHAN_SLOT(echan->ch_num);
+ int j = (channel >> 5);
+ unsigned int mask = BIT(channel & 0x1f);
- if (channel < ecc->num_channels) {
- int j = channel >> 5;
- unsigned int mask = BIT(channel & 0x1f);
+ edma_shadow0_write_array(ecc, SH_EECR, j, mask);
+ edma_shadow0_write_array(ecc, SH_ECR, j, mask);
+ edma_shadow0_write_array(ecc, SH_SECR, j, mask);
+ edma_write_array(ecc, EDMA_EMCR, j, mask);
- edma_shadow0_write_array(ecc, SH_EECR, j, mask);
- edma_shadow0_write_array(ecc, SH_ECR, j, mask);
- edma_shadow0_write_array(ecc, SH_SECR, j, mask);
- edma_write_array(ecc, EDMA_EMCR, j, mask);
+ /* clear possibly pending completion interrupt */
+ edma_shadow0_write_array(ecc, SH_ICR, j, mask);
- /* clear possibly pending completion interrupt */
- edma_shadow0_write_array(ecc, SH_ICR, j, mask);
+ dev_dbg(ecc->dev, "EER%d %08x\n", j,
+ edma_shadow0_read_array(ecc, SH_EER, j));
- dev_dbg(ecc->dev, "EER%d %08x\n", j,
- edma_shadow0_read_array(ecc, SH_EER, j));
-
- /* REVISIT: consider guarding against inappropriate event
- * chaining by overwriting with dummy_paramset.
- */
- }
+ /* REVISIT: consider guarding against inappropriate event
+ * chaining by overwriting with dummy_paramset.
+ */
}
/*
* Temporarily disable EDMA hardware events on the specified channel,
* preventing them from triggering new transfers
*/
-static void edma_pause(struct edma_cc *ecc, unsigned channel)
+static void edma_pause(struct edma_chan *echan)
{
- if (ecc->id != EDMA_CTLR(channel)) {
- dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__,
- ecc->id, EDMA_CTLR(channel));
- return;
- }
- channel = EDMA_CHAN_SLOT(channel);
-
- if (channel < ecc->num_channels) {
- unsigned int mask = BIT(channel & 0x1f);
+ int channel = EDMA_CHAN_SLOT(echan->ch_num);
+ unsigned int mask = BIT(channel & 0x1f);
- edma_shadow0_write_array(ecc, SH_EECR, channel >> 5, mask);
- }
+ edma_shadow0_write_array(echan->ecc, SH_EECR, channel >> 5, mask);
}
/* Re-enable EDMA hardware events on the specified channel. */
-static void edma_resume(struct edma_cc *ecc, unsigned channel)
+static void edma_resume(struct edma_chan *echan)
{
- if (ecc->id != EDMA_CTLR(channel)) {
- dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__,
- ecc->id, EDMA_CTLR(channel));
- return;
- }
- channel = EDMA_CHAN_SLOT(channel);
-
- if (channel < ecc->num_channels) {
- unsigned int mask = BIT(channel & 0x1f);
+ int channel = EDMA_CHAN_SLOT(echan->ch_num);
+ unsigned int mask = BIT(channel & 0x1f);
- edma_shadow0_write_array(ecc, SH_EESR, channel >> 5, mask);
- }
+ edma_shadow0_write_array(echan->ecc, SH_EESR, channel >> 5, mask);
}
-static int edma_trigger_channel(struct edma_cc *ecc, unsigned channel)
+static void edma_trigger_channel(struct edma_chan *echan)
{
- unsigned int mask;
-
- if (ecc->id != EDMA_CTLR(channel)) {
- dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__,
- ecc->id, EDMA_CTLR(channel));
- return -EINVAL;
- }
- channel = EDMA_CHAN_SLOT(channel);
- mask = BIT(channel & 0x1f);
+ struct edma_cc *ecc = echan->ecc;
+ int channel = EDMA_CHAN_SLOT(echan->ch_num);
+ unsigned int mask = BIT(channel & 0x1f);
edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask);
dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5),
edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5)));
- return 0;
}
-static void edma_clean_channel(struct edma_cc *ecc, unsigned channel)
+static void edma_clean_channel(struct edma_chan *echan)
{
- if (ecc->id != EDMA_CTLR(channel)) {
- dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__,
- ecc->id, EDMA_CTLR(channel));
- return;
- }
- channel = EDMA_CHAN_SLOT(channel);
+ struct edma_cc *ecc = echan->ecc;
+ int channel = EDMA_CHAN_SLOT(echan->ch_num);
+ int j = (channel >> 5);
+ unsigned int mask = BIT(channel & 0x1f);
- if (channel < ecc->num_channels) {
- int j = (channel >> 5);
- unsigned int mask = BIT(channel & 0x1f);
+ dev_dbg(ecc->dev, "EMR%d %08x\n", j, edma_read_array(ecc, EDMA_EMR, j));
+ edma_shadow0_write_array(ecc, SH_ECR, j, mask);
+ /* Clear the corresponding EMR bits */
+ edma_write_array(ecc, EDMA_EMCR, j, mask);
+ /* Clear any SER */
+ edma_shadow0_write_array(ecc, SH_SECR, j, mask);
+ edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
+}
- dev_dbg(ecc->dev, "EMR%d %08x\n", j,
- edma_read_array(ecc, EDMA_EMR, j));
- edma_shadow0_write_array(ecc, SH_ECR, j, mask);
- /* Clear the corresponding EMR bits */
- edma_write_array(ecc, EDMA_EMCR, j, mask);
- /* Clear any SER */
- edma_shadow0_write_array(ecc, SH_SECR, j, mask);
- edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
- }
+/* Move channel to a specific event queue */
+static void edma_assign_channel_eventq(struct edma_chan *echan,
+ enum dma_event_q eventq_no)
+{
+ struct edma_cc *ecc = echan->ecc;
+ int channel = EDMA_CHAN_SLOT(echan->ch_num);
+ int bit = (channel & 0x7) * 4;
+
+ /* default to low priority queue */
+ if (eventq_no == EVENTQ_DEFAULT)
+ eventq_no = ecc->default_queue;
+ if (eventq_no >= ecc->num_tc)
+ return;
+
+ eventq_no &= 7;
+ edma_modify_array(ecc, EDMA_DMAQNUM, (channel >> 3), ~(0x7 << bit),
+ eventq_no << bit);
}
-/**
- * edma_alloc_channel - allocate DMA channel and paired parameter RAM
- * @ecc: pointer to edma_cc struct
- * @channel: specific channel to allocate; negative for "any unmapped channel"
- * @callback: optional; to be issued on DMA completion or errors
- * @data: passed to callback
- * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
- * Controller (TC) executes requests using this channel. Use
- * EVENTQ_DEFAULT unless you really need a high priority queue.
- *
- * This allocates a DMA channel and its associated parameter RAM slot.
- * The parameter RAM is initialized to hold a dummy transfer.
- *
- * Normal use is to pass a specific channel number as @channel, to make
- * use of hardware events mapped to that channel. When the channel will
- * be used only for software triggering or event chaining, channels not
- * mapped to hardware events (or mapped to unused events) are preferable.
- *
- * DMA transfers start from a channel using edma_start(), or by
- * chaining. When the transfer described in that channel's parameter RAM
- * slot completes, that slot's data may be reloaded through a link.
- *
- * DMA errors are only reported to the @callback associated with the
- * channel driving that transfer, but transfer completion callbacks can
- * be sent to another channel under control of the TCC field in
- * the option word of the transfer's parameter RAM set. Drivers must not
- * use DMA transfer completion callbacks for channels they did not allocate.
- * (The same applies to TCC codes used in transfer chaining.)
- *
- * Returns the number of the channel, else negative errno.
- */
-static int edma_alloc_channel(struct edma_cc *ecc, int channel,
- void (*callback)(unsigned channel, u16 ch_status, void *data),
- void *data,
- enum dma_event_q eventq_no)
+static int edma_alloc_channel(struct edma_chan *echan,
+ enum dma_event_q eventq_no)
{
- unsigned done = 0;
- int ret = 0;
+ struct edma_cc *ecc = echan->ecc;
+ int channel = EDMA_CHAN_SLOT(echan->ch_num);
if (!ecc->unused_chan_list_done) {
/*
* used and clear them in the unused list, making the rest
* available for ARM usage.
*/
- ret = bus_for_each_dev(&platform_bus_type, NULL, ecc,
- prepare_unused_channel_list);
+ int ret = bus_for_each_dev(&platform_bus_type, NULL, ecc,
+ prepare_unused_channel_list);
if (ret < 0)
return ret;
ecc->unused_chan_list_done = true;
}
- if (channel >= 0) {
- if (ecc->id != EDMA_CTLR(channel)) {
- dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n",
- __func__, ecc->id, EDMA_CTLR(channel));
- return -EINVAL;
- }
- channel = EDMA_CHAN_SLOT(channel);
- }
-
- if (channel < 0) {
- channel = 0;
- for (;;) {
- channel = find_next_bit(ecc->edma_unused,
- ecc->num_channels, channel);
- if (channel == ecc->num_channels)
- break;
- if (!test_and_set_bit(channel, ecc->edma_inuse)) {
- done = 1;
- break;
- }
- channel++;
- }
- if (!done)
- return -ENOMEM;
- } else if (channel >= ecc->num_channels) {
- return -EINVAL;
- } else if (test_and_set_bit(channel, ecc->edma_inuse)) {
- return -EBUSY;
- }
-
/* ensure access through shadow region 0 */
edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
/* ensure no events are pending */
- edma_stop(ecc, EDMA_CTLR_CHAN(ecc->id, channel));
- edma_write_slot(ecc, channel, &dummy_paramset);
+ edma_stop(echan);
- if (callback)
- edma_setup_interrupt(ecc, EDMA_CTLR_CHAN(ecc->id, channel),
- callback, data);
+ edma_setup_interrupt(echan, true);
- edma_map_dmach_to_queue(ecc, channel, eventq_no);
+ edma_assign_channel_eventq(echan, eventq_no);
- return EDMA_CTLR_CHAN(ecc->id, channel);
+ return 0;
}
-/**
- * edma_free_channel - deallocate DMA channel
- * @ecc: pointer to edma_cc struct
- * @channel: dma channel returned from edma_alloc_channel()
- *
- * This deallocates the DMA channel and associated parameter RAM slot
- * allocated by edma_alloc_channel().
- *
- * Callers are responsible for ensuring the channel is inactive, and
- * will not be reactivated by linking, chaining, or software calls to
- * edma_start().
- */
-static void edma_free_channel(struct edma_cc *ecc, unsigned channel)
+static void edma_free_channel(struct edma_chan *echan)
{
- if (ecc->id != EDMA_CTLR(channel)) {
- dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__,
- ecc->id, EDMA_CTLR(channel));
- return;
- }
- channel = EDMA_CHAN_SLOT(channel);
-
- if (channel >= ecc->num_channels)
- return;
-
- edma_setup_interrupt(ecc, channel, NULL, NULL);
+ /* ensure no events are pending */
+ edma_stop(echan);
/* REVISIT should probably take out of shadow region 0 */
-
- edma_write_slot(ecc, channel, &dummy_paramset);
- clear_bit(channel, ecc->edma_inuse);
-}
-
-/* Move channel to a specific event queue */
-static void edma_assign_channel_eventq(struct edma_cc *ecc, unsigned channel,
- enum dma_event_q eventq_no)
-{
- if (ecc->id != EDMA_CTLR(channel)) {
- dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__,
- ecc->id, EDMA_CTLR(channel));
- return;
- }
- channel = EDMA_CHAN_SLOT(channel);
-
- if (channel >= ecc->num_channels)
- return;
-
- /* default to low priority queue */
- if (eventq_no == EVENTQ_DEFAULT)
- eventq_no = ecc->default_queue;
- if (eventq_no >= ecc->num_tc)
- return;
-
- edma_map_dmach_to_queue(ecc, channel, eventq_no);
-}
-
-/* eDMA interrupt handler */
-static irqreturn_t dma_irq_handler(int irq, void *data)
-{
- struct edma_cc *ecc = data;
- int ctlr;
- u32 sh_ier;
- u32 sh_ipr;
- u32 bank;
-
- ctlr = ecc->id;
- if (ctlr < 0)
- return IRQ_NONE;
-
- dev_dbg(ecc->dev, "dma_irq_handler\n");
-
- sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 0);
- if (!sh_ipr) {
- sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 1);
- if (!sh_ipr)
- return IRQ_NONE;
- sh_ier = edma_shadow0_read_array(ecc, SH_IER, 1);
- bank = 1;
- } else {
- sh_ier = edma_shadow0_read_array(ecc, SH_IER, 0);
- bank = 0;
- }
-
- do {
- u32 slot;
- u32 channel;
-
- dev_dbg(ecc->dev, "IPR%d %08x\n", bank, sh_ipr);
-
- slot = __ffs(sh_ipr);
- sh_ipr &= ~(BIT(slot));
-
- if (sh_ier & BIT(slot)) {
- channel = (bank << 5) | slot;
- /* Clear the corresponding IPR bits */
- edma_shadow0_write_array(ecc, SH_ICR, bank, BIT(slot));
- if (ecc->intr_data[channel].callback)
- ecc->intr_data[channel].callback(
- EDMA_CTLR_CHAN(ctlr, channel),
- EDMA_DMA_COMPLETE,
- ecc->intr_data[channel].data);
- }
- } while (sh_ipr);
-
- edma_shadow0_write(ecc, SH_IEVAL, 1);
- return IRQ_HANDLED;
-}
-
-/* eDMA error interrupt handler */
-static irqreturn_t dma_ccerr_handler(int irq, void *data)
-{
- struct edma_cc *ecc = data;
- int i;
- int ctlr;
- unsigned int cnt = 0;
-
- ctlr = ecc->id;
- if (ctlr < 0)
- return IRQ_NONE;
-
- dev_dbg(ecc->dev, "dma_ccerr_handler\n");
-
- if ((edma_read_array(ecc, EDMA_EMR, 0) == 0) &&
- (edma_read_array(ecc, EDMA_EMR, 1) == 0) &&
- (edma_read(ecc, EDMA_QEMR) == 0) &&
- (edma_read(ecc, EDMA_CCERR) == 0))
- return IRQ_NONE;
-
- while (1) {
- int j = -1;
-
- if (edma_read_array(ecc, EDMA_EMR, 0))
- j = 0;
- else if (edma_read_array(ecc, EDMA_EMR, 1))
- j = 1;
- if (j >= 0) {
- dev_dbg(ecc->dev, "EMR%d %08x\n", j,
- edma_read_array(ecc, EDMA_EMR, j));
- for (i = 0; i < 32; i++) {
- int k = (j << 5) + i;
-
- if (edma_read_array(ecc, EDMA_EMR, j) &
- BIT(i)) {
- /* Clear the corresponding EMR bits */
- edma_write_array(ecc, EDMA_EMCR, j,
- BIT(i));
- /* Clear any SER */
- edma_shadow0_write_array(ecc, SH_SECR,
- j, BIT(i));
- if (ecc->intr_data[k].callback) {
- ecc->intr_data[k].callback(
- EDMA_CTLR_CHAN(ctlr, k),
- EDMA_DMA_CC_ERROR,
- ecc->intr_data[k].data);
- }
- }
- }
- } else if (edma_read(ecc, EDMA_QEMR)) {
- dev_dbg(ecc->dev, "QEMR %02x\n",
- edma_read(ecc, EDMA_QEMR));
- for (i = 0; i < 8; i++) {
- if (edma_read(ecc, EDMA_QEMR) & BIT(i)) {
- /* Clear the corresponding IPR bits */
- edma_write(ecc, EDMA_QEMCR, BIT(i));
- edma_shadow0_write(ecc, SH_QSECR,
- BIT(i));
-
- /* NOTE: not reported!! */
- }
- }
- } else if (edma_read(ecc, EDMA_CCERR)) {
- dev_dbg(ecc->dev, "CCERR %08x\n",
- edma_read(ecc, EDMA_CCERR));
- /* FIXME: CCERR.BIT(16) ignored! much better
- * to just write CCERRCLR with CCERR value...
- */
- for (i = 0; i < 8; i++) {
- if (edma_read(ecc, EDMA_CCERR) & BIT(i)) {
- /* Clear the corresponding IPR bits */
- edma_write(ecc, EDMA_CCERRCLR, BIT(i));
-
- /* NOTE: not reported!! */
- }
- }
- }
- if ((edma_read_array(ecc, EDMA_EMR, 0) == 0) &&
- (edma_read_array(ecc, EDMA_EMR, 1) == 0) &&
- (edma_read(ecc, EDMA_QEMR) == 0) &&
- (edma_read(ecc, EDMA_CCERR) == 0))
- break;
- cnt++;
- if (cnt > 10)
- break;
- }
- edma_write(ecc, EDMA_EEVAL, 1);
- return IRQ_HANDLED;
+ edma_setup_interrupt(echan, false);
}
static inline struct edma_cc *to_edma_cc(struct dma_device *d)
* transfers of MAX_NR_SG
*/
dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
- edma_clean_channel(ecc, echan->ch_num);
- edma_stop(ecc, echan->ch_num);
- edma_start(ecc, echan->ch_num);
- edma_trigger_channel(ecc, echan->ch_num);
+ edma_clean_channel(echan);
+ edma_stop(echan);
+ edma_start(echan);
+ edma_trigger_channel(echan);
echan->missed = 0;
} else if (edesc->processed <= MAX_NR_SG) {
dev_dbg(dev, "first transfer starting on channel %d\n",
echan->ch_num);
- edma_start(ecc, echan->ch_num);
+ edma_start(echan);
} else {
dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
echan->ch_num, edesc->processed);
- edma_resume(ecc, echan->ch_num);
+ edma_resume(echan);
}
}
* echan->edesc is NULL and exit.)
*/
if (echan->edesc) {
- edma_stop(echan->ecc, echan->ch_num);
+ edma_stop(echan);
/* Move the cyclic channel back to default queue */
if (echan->edesc->cyclic)
- edma_assign_channel_eventq(echan->ecc, echan->ch_num,
- EVENTQ_DEFAULT);
+ edma_assign_channel_eventq(echan, EVENTQ_DEFAULT);
/*
* free the running request descriptor
* since it is not in any of the vdesc lists
if (!echan->edesc)
return -EINVAL;
- edma_pause(echan->ecc, echan->ch_num);
+ edma_pause(echan);
return 0;
}
{
struct edma_chan *echan = to_edma_chan(chan);
- edma_resume(echan->ecc, echan->ch_num);
+ edma_resume(echan);
return 0;
}
*/
static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
- enum dma_slave_buswidth dev_width,
- unsigned int dma_length,
+ unsigned int acnt, unsigned int dma_length,
enum dma_transfer_direction direction)
{
struct edma_chan *echan = to_edma_chan(chan);
struct device *dev = chan->device->dev;
struct edmacc_param *param = &epset->param;
- int acnt, bcnt, ccnt, cidx;
+ int bcnt, ccnt, cidx;
int src_bidx, dst_bidx, src_cidx, dst_cidx;
int absync;
- acnt = dev_width;
-
/* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
if (!burst)
burst = 1;
struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long tx_flags)
{
- int ret;
+ int ret, nslots;
struct edma_desc *edesc;
struct device *dev = chan->device->dev;
struct edma_chan *echan = to_edma_chan(chan);
+ unsigned int width, pset_len;
if (unlikely(!echan || !len))
return NULL;
- edesc = kzalloc(sizeof(*edesc) + sizeof(edesc->pset[0]), GFP_ATOMIC);
+ if (len < SZ_64K) {
+ /*
+ * Transfer size less than 64K can be handled with one paRAM
+ * slot and with one burst.
+ * ACNT = length
+ */
+ width = len;
+ pset_len = len;
+ nslots = 1;
+ } else {
+ /*
+ * Transfer size bigger than 64K will be handled with maximum of
+ * two paRAM slots.
+ * slot1: (full_length / 32767) times 32767 bytes bursts.
+ * ACNT = 32767, length1: (full_length / 32767) * 32767
+ * slot2: the remaining amount of data after slot1.
+ * ACNT = full_length - length1, length2 = ACNT
+ *
+ * When the full_length is multibple of 32767 one slot can be
+ * used to complete the transfer.
+ */
+ width = SZ_32K - 1;
+ pset_len = rounddown(len, width);
+ /* One slot is enough for lengths multiple of (SZ_32K -1) */
+ if (unlikely(pset_len == len))
+ nslots = 1;
+ else
+ nslots = 2;
+ }
+
+ edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
+ GFP_ATOMIC);
if (!edesc) {
dev_dbg(dev, "Failed to allocate a descriptor\n");
return NULL;
}
- edesc->pset_nr = 1;
+ edesc->pset_nr = nslots;
+ edesc->residue = edesc->residue_stat = len;
+ edesc->direction = DMA_MEM_TO_MEM;
+ edesc->echan = echan;
ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1,
- DMA_SLAVE_BUSWIDTH_4_BYTES, len, DMA_MEM_TO_MEM);
- if (ret < 0)
+ width, pset_len, DMA_MEM_TO_MEM);
+ if (ret < 0) {
+ kfree(edesc);
return NULL;
+ }
edesc->absync = ret;
- /*
- * Enable intermediate transfer chaining to re-trigger channel
- * on completion of every TR, and enable transfer-completion
- * interrupt on completion of the whole transfer.
- */
edesc->pset[0].param.opt |= ITCCHEN;
- edesc->pset[0].param.opt |= TCINTEN;
+ if (nslots == 1) {
+ /* Enable transfer complete interrupt */
+ edesc->pset[0].param.opt |= TCINTEN;
+ } else {
+ /* Enable transfer complete chaining for the first slot */
+ edesc->pset[0].param.opt |= TCCHEN;
+
+ if (echan->slot[1] < 0) {
+ echan->slot[1] = edma_alloc_slot(echan->ecc,
+ EDMA_SLOT_ANY);
+ if (echan->slot[1] < 0) {
+ kfree(edesc);
+ dev_err(dev, "%s: Failed to allocate slot\n",
+ __func__);
+ return NULL;
+ }
+ }
+ dest += pset_len;
+ src += pset_len;
+ pset_len = width = len % (SZ_32K - 1);
+
+ ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
+ width, pset_len, DMA_MEM_TO_MEM);
+ if (ret < 0) {
+ kfree(edesc);
+ return NULL;
+ }
+
+ edesc->pset[1].param.opt |= ITCCHEN;
+ edesc->pset[1].param.opt |= TCINTEN;
+ }
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}
}
/* Place the cyclic channel to highest priority queue */
- edma_assign_channel_eventq(echan->ecc, echan->ch_num, EVENTQ_0);
+ edma_assign_channel_eventq(echan, EVENTQ_0);
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}
-static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
+static void edma_completion_handler(struct edma_chan *echan)
+{
+ struct device *dev = echan->vchan.chan.device->dev;
+ struct edma_desc *edesc = echan->edesc;
+
+ if (!edesc)
+ return;
+
+ spin_lock(&echan->vchan.lock);
+ if (edesc->cyclic) {
+ vchan_cyclic_callback(&edesc->vdesc);
+ spin_unlock(&echan->vchan.lock);
+ return;
+ } else if (edesc->processed == edesc->pset_nr) {
+ edesc->residue = 0;
+ edma_stop(echan);
+ vchan_cookie_complete(&edesc->vdesc);
+ echan->edesc = NULL;
+
+ dev_dbg(dev, "Transfer completed on channel %d\n",
+ echan->ch_num);
+ } else {
+ dev_dbg(dev, "Sub transfer completed on channel %d\n",
+ echan->ch_num);
+
+ edma_pause(echan);
+
+ /* Update statistics for tx_status */
+ edesc->residue -= edesc->sg_len;
+ edesc->residue_stat = edesc->residue;
+ edesc->processed_stat = edesc->processed;
+ }
+ edma_execute(echan);
+
+ spin_unlock(&echan->vchan.lock);
+}
+
+/* eDMA interrupt handler */
+static irqreturn_t dma_irq_handler(int irq, void *data)
+{
+ struct edma_cc *ecc = data;
+ int ctlr;
+ u32 sh_ier;
+ u32 sh_ipr;
+ u32 bank;
+
+ ctlr = ecc->id;
+ if (ctlr < 0)
+ return IRQ_NONE;
+
+ dev_vdbg(ecc->dev, "dma_irq_handler\n");
+
+ sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 0);
+ if (!sh_ipr) {
+ sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 1);
+ if (!sh_ipr)
+ return IRQ_NONE;
+ sh_ier = edma_shadow0_read_array(ecc, SH_IER, 1);
+ bank = 1;
+ } else {
+ sh_ier = edma_shadow0_read_array(ecc, SH_IER, 0);
+ bank = 0;
+ }
+
+ do {
+ u32 slot;
+ u32 channel;
+
+ slot = __ffs(sh_ipr);
+ sh_ipr &= ~(BIT(slot));
+
+ if (sh_ier & BIT(slot)) {
+ channel = (bank << 5) | slot;
+ /* Clear the corresponding IPR bits */
+ edma_shadow0_write_array(ecc, SH_ICR, bank, BIT(slot));
+ edma_completion_handler(&ecc->slave_chans[channel]);
+ }
+ } while (sh_ipr);
+
+ edma_shadow0_write(ecc, SH_IEVAL, 1);
+ return IRQ_HANDLED;
+}
+
+static void edma_error_handler(struct edma_chan *echan)
{
- struct edma_chan *echan = data;
struct edma_cc *ecc = echan->ecc;
struct device *dev = echan->vchan.chan.device->dev;
- struct edma_desc *edesc;
struct edmacc_param p;
- edesc = echan->edesc;
+ if (!echan->edesc)
+ return;
spin_lock(&echan->vchan.lock);
- switch (ch_status) {
- case EDMA_DMA_COMPLETE:
- if (edesc) {
- if (edesc->cyclic) {
- vchan_cyclic_callback(&edesc->vdesc);
- goto out;
- } else if (edesc->processed == edesc->pset_nr) {
- dev_dbg(dev,
- "Transfer completed on channel %d\n",
- ch_num);
- edesc->residue = 0;
- edma_stop(ecc, echan->ch_num);
- vchan_cookie_complete(&edesc->vdesc);
- echan->edesc = NULL;
- } else {
- dev_dbg(dev,
- "Sub transfer completed on channel %d\n",
- ch_num);
-
- edma_pause(ecc, echan->ch_num);
-
- /* Update statistics for tx_status */
- edesc->residue -= edesc->sg_len;
- edesc->residue_stat = edesc->residue;
- edesc->processed_stat = edesc->processed;
- }
- edma_execute(echan);
- }
- break;
- case EDMA_DMA_CC_ERROR:
- edma_read_slot(ecc, echan->slot[0], &p);
+ edma_read_slot(ecc, echan->slot[0], &p);
+ /*
+ * Issue later based on missed flag which will be sure
+ * to happen as:
+ * (1) we finished transmitting an intermediate slot and
+ * edma_execute is coming up.
+ * (2) or we finished current transfer and issue will
+ * call edma_execute.
+ *
+ * Important note: issuing can be dangerous here and
+ * lead to some nasty recursion when we are in a NULL
+ * slot. So we avoid doing so and set the missed flag.
+ */
+ if (p.a_b_cnt == 0 && p.ccnt == 0) {
+ dev_dbg(dev, "Error on null slot, setting miss\n");
+ echan->missed = 1;
+ } else {
/*
- * Issue later based on missed flag which will be sure
- * to happen as:
- * (1) we finished transmitting an intermediate slot and
- * edma_execute is coming up.
- * (2) or we finished current transfer and issue will
- * call edma_execute.
- *
- * Important note: issuing can be dangerous here and
- * lead to some nasty recursion when we are in a NULL
- * slot. So we avoid doing so and set the missed flag.
+ * The slot is already programmed but the event got
+ * missed, so its safe to issue it here.
*/
- if (p.a_b_cnt == 0 && p.ccnt == 0) {
- dev_dbg(dev, "Error on null slot, setting miss\n");
- echan->missed = 1;
- } else {
- /*
- * The slot is already programmed but the event got
- * missed, so its safe to issue it here.
- */
- dev_dbg(dev, "Missed event, TRIGGERING\n");
- edma_clean_channel(ecc, echan->ch_num);
- edma_stop(ecc, echan->ch_num);
- edma_start(ecc, echan->ch_num);
- edma_trigger_channel(ecc, echan->ch_num);
- }
- break;
- default:
- break;
+ dev_dbg(dev, "Missed event, TRIGGERING\n");
+ edma_clean_channel(echan);
+ edma_stop(echan);
+ edma_start(echan);
+ edma_trigger_channel(echan);
}
-out:
spin_unlock(&echan->vchan.lock);
}
+static inline bool edma_error_pending(struct edma_cc *ecc)
+{
+ if (edma_read_array(ecc, EDMA_EMR, 0) ||
+ edma_read_array(ecc, EDMA_EMR, 1) ||
+ edma_read(ecc, EDMA_QEMR) || edma_read(ecc, EDMA_CCERR))
+ return true;
+
+ return false;
+}
+
+/* eDMA error interrupt handler */
+static irqreturn_t dma_ccerr_handler(int irq, void *data)
+{
+ struct edma_cc *ecc = data;
+ int i, j;
+ int ctlr;
+ unsigned int cnt = 0;
+ unsigned int val;
+
+ ctlr = ecc->id;
+ if (ctlr < 0)
+ return IRQ_NONE;
+
+ dev_vdbg(ecc->dev, "dma_ccerr_handler\n");
+
+ if (!edma_error_pending(ecc))
+ return IRQ_NONE;
+
+ while (1) {
+ /* Event missed register(s) */
+ for (j = 0; j < 2; j++) {
+ unsigned long emr;
+
+ val = edma_read_array(ecc, EDMA_EMR, j);
+ if (!val)
+ continue;
+
+ dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val);
+ emr = val;
+ for (i = find_next_bit(&emr, 32, 0); i < 32;
+ i = find_next_bit(&emr, 32, i + 1)) {
+ int k = (j << 5) + i;
+
+ /* Clear the corresponding EMR bits */
+ edma_write_array(ecc, EDMA_EMCR, j, BIT(i));
+ /* Clear any SER */
+ edma_shadow0_write_array(ecc, SH_SECR, j,
+ BIT(i));
+ edma_error_handler(&ecc->slave_chans[k]);
+ }
+ }
+
+ val = edma_read(ecc, EDMA_QEMR);
+ if (val) {
+ dev_dbg(ecc->dev, "QEMR 0x%02x\n", val);
+ /* Not reported, just clear the interrupt reason. */
+ edma_write(ecc, EDMA_QEMCR, val);
+ edma_shadow0_write(ecc, SH_QSECR, val);
+ }
+
+ val = edma_read(ecc, EDMA_CCERR);
+ if (val) {
+ dev_warn(ecc->dev, "CCERR 0x%08x\n", val);
+ /* Not reported, just clear the interrupt reason. */
+ edma_write(ecc, EDMA_CCERRCLR, val);
+ }
+
+ if (!edma_error_pending(ecc))
+ break;
+ cnt++;
+ if (cnt > 10)
+ break;
+ }
+ edma_write(ecc, EDMA_EEVAL, 1);
+ return IRQ_HANDLED;
+}
+
/* Alloc channel resources */
static int edma_alloc_chan_resources(struct dma_chan *chan)
{
struct edma_chan *echan = to_edma_chan(chan);
struct device *dev = chan->device->dev;
int ret;
- int a_ch_num;
- LIST_HEAD(descs);
-
- a_ch_num = edma_alloc_channel(echan->ecc, echan->ch_num,
- edma_callback, echan, EVENTQ_DEFAULT);
- if (a_ch_num < 0) {
- ret = -ENODEV;
- goto err_no_chan;
- }
+ ret = edma_alloc_channel(echan, EVENTQ_DEFAULT);
+ if (ret)
+ return ret;
- if (a_ch_num != echan->ch_num) {
- dev_err(dev, "failed to allocate requested channel %u:%u\n",
- EDMA_CTLR(echan->ch_num),
+ echan->slot[0] = edma_alloc_slot(echan->ecc, echan->ch_num);
+ if (echan->slot[0] < 0) {
+ dev_err(dev, "Entry slot allocation failed for channel %u\n",
EDMA_CHAN_SLOT(echan->ch_num));
- ret = -ENODEV;
- goto err_wrong_chan;
+ goto err_slot;
}
+ /* Set up channel -> slot mapping for the entry slot */
+ edma_set_chmap(echan, echan->slot[0]);
echan->alloced = true;
- echan->slot[0] = echan->ch_num;
dev_dbg(dev, "allocated channel %d for %u:%u\n", echan->ch_num,
EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
return 0;
-err_wrong_chan:
- edma_free_channel(echan->ecc, a_ch_num);
-err_no_chan:
+err_slot:
+ edma_free_channel(echan);
return ret;
}
int i;
/* Terminate transfers */
- edma_stop(echan->ecc, echan->ch_num);
+ edma_stop(echan);
vchan_free_chan_resources(&echan->vchan);
/* Free EDMA PaRAM slots */
- for (i = 1; i < EDMA_MAX_SLOTS; i++) {
+ for (i = 0; i < EDMA_MAX_SLOTS; i++) {
if (echan->slot[i] >= 0) {
edma_free_slot(echan->ecc, echan->slot[i]);
echan->slot[i] = -1;
}
}
+ /* Set entry slot to the dummy slot */
+ edma_set_chmap(echan, echan->ecc->dummy_slot);
+
/* Free EDMA channel */
if (echan->alloced) {
- edma_free_channel(echan->ecc, echan->ch_num);
+ edma_free_channel(echan);
echan->alloced = false;
}
return ret;
}
-static void __init edma_chan_init(struct edma_cc *ecc, struct dma_device *dma,
- struct edma_chan *echans)
+#define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+static void edma_dma_init(struct edma_cc *ecc)
{
+ struct dma_device *ddev = &ecc->dma_slave;
int i, j;
+ dma_cap_zero(ddev->cap_mask);
+ dma_cap_set(DMA_SLAVE, ddev->cap_mask);
+ dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
+ dma_cap_set(DMA_MEMCPY, ddev->cap_mask);
+
+ ddev->device_prep_slave_sg = edma_prep_slave_sg;
+ ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic;
+ ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
+ ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
+ ddev->device_free_chan_resources = edma_free_chan_resources;
+ ddev->device_issue_pending = edma_issue_pending;
+ ddev->device_tx_status = edma_tx_status;
+ ddev->device_config = edma_slave_config;
+ ddev->device_pause = edma_dma_pause;
+ ddev->device_resume = edma_dma_resume;
+ ddev->device_terminate_all = edma_terminate_all;
+
+ ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
+ ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
+ ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+ ddev->dev = ecc->dev;
+
+ INIT_LIST_HEAD(&ddev->channels);
+
for (i = 0; i < ecc->num_channels; i++) {
- struct edma_chan *echan = &echans[i];
+ struct edma_chan *echan = &ecc->slave_chans[i];
echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i);
echan->ecc = ecc;
echan->vchan.desc_free = edma_desc_free;
- vchan_init(&echan->vchan, dma);
+ vchan_init(&echan->vchan, ddev);
INIT_LIST_HEAD(&echan->node);
for (j = 0; j < EDMA_MAX_SLOTS; j++)
}
}
-#define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
- BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
- BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
- BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
-
-static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
- struct device *dev)
-{
- dma->device_prep_slave_sg = edma_prep_slave_sg;
- dma->device_prep_dma_cyclic = edma_prep_dma_cyclic;
- dma->device_prep_dma_memcpy = edma_prep_dma_memcpy;
- dma->device_alloc_chan_resources = edma_alloc_chan_resources;
- dma->device_free_chan_resources = edma_free_chan_resources;
- dma->device_issue_pending = edma_issue_pending;
- dma->device_tx_status = edma_tx_status;
- dma->device_config = edma_slave_config;
- dma->device_pause = edma_dma_pause;
- dma->device_resume = edma_dma_resume;
- dma->device_terminate_all = edma_terminate_all;
-
- dma->src_addr_widths = EDMA_DMA_BUSWIDTHS;
- dma->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
- dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
- dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
-
- dma->dev = dev;
-
- /*
- * code using dma memcpy must make sure alignment of
- * length is at dma->copy_align boundary.
- */
- dma->copy_align = DMAENGINE_ALIGN_4_BYTES;
-
- INIT_LIST_HEAD(&dma->channels);
-}
-
static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
struct edma_cc *ecc)
{
value = GET_NUM_DMACH(cccfg);
ecc->num_channels = BIT(value + 1);
+ value = GET_NUM_QDMACH(cccfg);
+ ecc->num_qchannels = value * 2;
+
value = GET_NUM_PAENTRY(cccfg);
ecc->num_slots = BIT(value + 4);
value = GET_NUM_EVQUE(cccfg);
ecc->num_tc = value + 1;
+ ecc->chmap_exist = (cccfg & CHMAP_EXIST) ? true : false;
+
dev_dbg(dev, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg);
dev_dbg(dev, "num_region: %u\n", ecc->num_region);
dev_dbg(dev, "num_channels: %u\n", ecc->num_channels);
+ dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels);
dev_dbg(dev, "num_slots: %u\n", ecc->num_slots);
dev_dbg(dev, "num_tc: %u\n", ecc->num_tc);
+ dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no");
/* Nothing need to be done if queue priority is provided */
if (pdata->queue_priority_mapping)
if (!ecc->slave_chans)
return -ENOMEM;
- ecc->intr_data = devm_kcalloc(dev, ecc->num_channels,
- sizeof(*ecc->intr_data), GFP_KERNEL);
- if (!ecc->intr_data)
+ ecc->channel_unused = devm_kcalloc(dev,
+ BITS_TO_LONGS(ecc->num_channels),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!ecc->channel_unused)
return -ENOMEM;
- ecc->edma_unused = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_channels),
- sizeof(unsigned long), GFP_KERNEL);
- if (!ecc->edma_unused)
- return -ENOMEM;
-
- ecc->edma_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
+ ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
sizeof(unsigned long), GFP_KERNEL);
- if (!ecc->edma_inuse)
+ if (!ecc->slot_inuse)
return -ENOMEM;
ecc->default_queue = info->default_queue;
edma_write_slot(ecc, i, &dummy_paramset);
/* Mark all channels as unused */
- memset(ecc->edma_unused, 0xff, sizeof(ecc->edma_unused));
+ memset(ecc->channel_unused, 0xff, sizeof(ecc->channel_unused));
if (info->rsv) {
/* Clear the reserved channels in unused list */
for (i = 0; rsv_chans[i][0] != -1; i++) {
off = rsv_chans[i][0];
ln = rsv_chans[i][1];
- clear_bits(off, ln, ecc->edma_unused);
+ clear_bits(off, ln, ecc->channel_unused);
}
}
for (i = 0; rsv_slots[i][0] != -1; i++) {
off = rsv_slots[i][0];
ln = rsv_slots[i][1];
- set_bits(off, ln, ecc->edma_inuse);
+ set_bits(off, ln, ecc->slot_inuse);
}
}
}
if (xbar_chans) {
for (i = 0; xbar_chans[i][1] != -1; i++) {
off = xbar_chans[i][1];
- clear_bits(off, 1, ecc->edma_unused);
+ clear_bits(off, 1, ecc->channel_unused);
}
}
}
}
- for (i = 0; i < ecc->num_channels; i++)
- edma_map_dmach_to_queue(ecc, i, info->default_queue);
+ ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
+ if (ecc->dummy_slot < 0) {
+ dev_err(dev, "Can't allocate PaRAM dummy slot\n");
+ return ecc->dummy_slot;
+ }
queue_priority_mapping = info->queue_priority_mapping;
edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
queue_priority_mapping[i][1]);
- /* Map the channel to param entry if channel mapping logic exist */
- if (edma_read(ecc, EDMA_CCCFG) & CHMAP_EXIST)
- edma_direct_dmach_to_param_mapping(ecc);
-
for (i = 0; i < ecc->num_region; i++) {
edma_write_array2(ecc, EDMA_DRAE, i, 0, 0x0);
edma_write_array2(ecc, EDMA_DRAE, i, 1, 0x0);
}
ecc->info = info;
- ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
- if (ecc->dummy_slot < 0) {
- dev_err(dev, "Can't allocate PaRAM dummy slot\n");
- return ecc->dummy_slot;
- }
-
- dma_cap_zero(ecc->dma_slave.cap_mask);
- dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask);
- dma_cap_set(DMA_CYCLIC, ecc->dma_slave.cap_mask);
- dma_cap_set(DMA_MEMCPY, ecc->dma_slave.cap_mask);
-
- edma_dma_init(ecc, &ecc->dma_slave, dev);
+ /* Init the dma device and channels */
+ edma_dma_init(ecc);
- edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans);
+ for (i = 0; i < ecc->num_channels; i++) {
+ /* Assign all channels to the default queue */
+ edma_assign_channel_eventq(&ecc->slave_chans[i],
+ info->default_queue);
+ /* Set entry slot to the dummy slot */
+ edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot);
+ }
ret = dma_async_device_register(&ecc->dma_slave);
if (ret)
static int edma_pm_resume(struct device *dev)
{
struct edma_cc *ecc = dev_get_drvdata(dev);
+ struct edma_chan *echan = ecc->slave_chans;
int i;
s8 (*queue_priority_mapping)[2];
edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
queue_priority_mapping[i][1]);
- /* Map the channel to param entry if channel mapping logic */
- if (edma_read(ecc, EDMA_CCCFG) & CHMAP_EXIST)
- edma_direct_dmach_to_param_mapping(ecc);
-
for (i = 0; i < ecc->num_channels; i++) {
- if (test_bit(i, ecc->edma_inuse)) {
+ if (echan[i].alloced) {
/* ensure access through shadow region 0 */
edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5,
BIT(i & 0x1f));
- edma_setup_interrupt(ecc, EDMA_CTLR_CHAN(ecc->id, i),
- ecc->intr_data[i].callback,
- ecc->intr_data[i].data);
+ edma_setup_interrupt(&echan[i], true);
+
+ /* Set up channel -> slot mapping for the entry slot */
+ edma_set_chmap(&echan[i], echan[i].slot[0]);
}
}