2019-07-04 18:11:43 +02:00
|
|
|
#include <gint/hardware.h>
|
2019-03-10 15:45:34 +01:00
|
|
|
#include <gint/mpu/dma.h>
|
|
|
|
#include <gint/mpu/power.h>
|
|
|
|
#include <gint/mpu/intc.h>
|
2020-06-20 17:18:51 +02:00
|
|
|
#include <gint/intc.h>
|
2019-03-10 15:45:34 +01:00
|
|
|
#include <gint/dma.h>
|
|
|
|
#include <gint/drivers.h>
|
2021-04-23 18:50:20 +02:00
|
|
|
#include <gint/drivers/states.h>
|
2019-03-10 15:45:34 +01:00
|
|
|
#include <gint/clock.h>
|
2019-09-03 22:15:00 +02:00
|
|
|
#include <gint/exc.h>
|
2021-06-16 21:50:41 +02:00
|
|
|
#include <gint/cpu.h>
|
2019-03-10 15:45:34 +01:00
|
|
|
|
|
|
|
#define DMA SH7305_DMA
|
|
|
|
#define POWER SH7305_POWER
|
|
|
|
|
2019-08-08 11:20:49 +02:00
|
|
|
typedef volatile sh7305_dma_channel_t channel_t;
|
|
|
|
|
2021-04-28 17:53:19 +02:00
|
|
|
/* Callbacks for all channels */
|
|
|
|
static gint_call_t dma_callbacks[6] = { 0 };
|
2021-05-05 15:17:23 +02:00
|
|
|
/* Sleep blocking flags for all channels */
|
|
|
|
static bool dma_sleep_blocking[6] = { 0 };
|
2021-06-16 21:50:41 +02:00
|
|
|
/* ICS for dma_channel_wait() for all channels */
|
|
|
|
static cpu_csleep_t *dma_wait_ics[6] = { 0 };
|
2021-04-28 17:53:19 +02:00
|
|
|
|
2019-08-27 21:18:44 +02:00
|
|
|
/* dma_channel(): Get address of a DMA channel */
|
2019-08-08 11:20:49 +02:00
|
|
|
static channel_t *dma_channel(int channel)
|
|
|
|
{
|
|
|
|
channel_t *addr[6] = {
|
|
|
|
&DMA.DMA0, &DMA.DMA1, &DMA.DMA2,
|
|
|
|
&DMA.DMA3, &DMA.DMA4, &DMA.DMA5,
|
|
|
|
};
|
|
|
|
|
|
|
|
return ((uint)channel >= 6 ? NULL : addr[channel]);
|
|
|
|
}
|
|
|
|
|
2019-08-27 21:18:44 +02:00
|
|
|
/* dma_translate(): Translate virtual address to DMA-suitable form */
|
|
|
|
static uint32_t dma_translate(void const *address)
|
|
|
|
{
|
|
|
|
uint32_t a = (uint32_t)address;
|
|
|
|
|
|
|
|
/* Preserve RS addresses (as of SH7724 Reference, 11.2.2) */
|
|
|
|
if(a >= 0xfd800000 && a < 0xfd800800)
|
|
|
|
return a;
|
|
|
|
|
|
|
|
/* Translate virtual addresses to IL memory to physical addresses; the
|
|
|
|
same address is used (as of SH7724 Reference, 10.3.3) */
|
|
|
|
if(a >= 0xe5200000 && a < 0xe5204000)
|
|
|
|
return a;
|
|
|
|
|
|
|
|
/* First additional on-chip memory area (XRAM) */
|
2019-09-15 15:09:32 +02:00
|
|
|
if(a >= 0xe5007000 && a < 0xe5009000)
|
2019-08-27 21:18:44 +02:00
|
|
|
return a;
|
|
|
|
|
|
|
|
/* Second on-chip memory area (YRAM) */
|
|
|
|
if(a >= 0xe5017000 && a < 0xe5019000)
|
|
|
|
return a;
|
|
|
|
|
|
|
|
/* Translate P1 and P2 addresses to ROM and RAM to physical form */
|
|
|
|
if(a >= 0x80000000 && a < 0xc0000000)
|
|
|
|
return a & 0x1fffffff;
|
|
|
|
|
|
|
|
/* By default: I don't know what this is, let's preserve it */
|
|
|
|
return a;
|
|
|
|
}
|
|
|
|
|
2019-03-10 15:45:34 +01:00
|
|
|
//---
|
|
|
|
// Driver interface
|
|
|
|
//---
|
|
|
|
|
2019-08-08 11:20:49 +02:00
|
|
|
/* dma_setup(): Setup the DMA in interrupt or no-interrupt mode.
|
2021-04-28 17:53:19 +02:00
|
|
|
The first parameters are as for dma_transfer() and dma_transfer_atomic().
|
|
|
|
The last parameter indicates whether interrupts should be used.
|
2019-07-04 17:46:26 +02:00
|
|
|
Returns non-zero if the DMA is busy or a configuration error occurs. */
|
2019-08-08 11:20:49 +02:00
|
|
|
static int dma_setup(int channel, dma_size_t size, uint blocks,
|
2019-08-27 21:18:44 +02:00
|
|
|
void const *src, dma_address_t src_mode,
|
2019-07-04 17:46:26 +02:00
|
|
|
void *dst, dma_address_t dst_mode,
|
|
|
|
int interrupts)
|
2019-03-10 15:45:34 +01:00
|
|
|
{
|
2019-08-08 11:20:49 +02:00
|
|
|
channel_t *ch = dma_channel(channel);
|
|
|
|
if(!ch) return 1;
|
|
|
|
|
2019-03-10 15:45:34 +01:00
|
|
|
/* Safety guard: only start a transfer if there's not one running */
|
2019-08-08 11:20:49 +02:00
|
|
|
if(ch->CHCR.DE) return 1;
|
2019-03-10 15:45:34 +01:00
|
|
|
|
2019-08-08 11:20:49 +02:00
|
|
|
/* Disable channel and disable the master DMA switch */
|
|
|
|
ch->CHCR.DE = 0;
|
2019-03-10 15:45:34 +01:00
|
|
|
DMA.OR.DME = 0;
|
|
|
|
|
|
|
|
/* Set DMA source and target address */
|
2019-08-27 21:18:44 +02:00
|
|
|
ch->SAR = dma_translate(src);
|
|
|
|
ch->DAR = dma_translate(dst);
|
2019-03-10 15:45:34 +01:00
|
|
|
|
|
|
|
/* Set the number of blocks to be transferred */
|
2019-08-08 11:20:49 +02:00
|
|
|
ch->TCR = blocks;
|
2019-03-10 15:45:34 +01:00
|
|
|
|
|
|
|
/* Fill in CHCR. Set RS=0100 (auto-request) and the user-provided
|
|
|
|
values for TS (transfer size), DM and SM (address modes) */
|
2019-08-08 11:20:49 +02:00
|
|
|
ch->CHCR.lword = 0x00000400;
|
|
|
|
ch->CHCR.TS_32 = (size >> 2);
|
|
|
|
ch->CHCR.TS_10 = (size & 3);
|
|
|
|
ch->CHCR.DM = dst_mode;
|
|
|
|
ch->CHCR.SM = src_mode;
|
|
|
|
ch->CHCR.IE = !!interrupts;
|
2019-03-10 15:45:34 +01:00
|
|
|
|
|
|
|
/* Prepare DMAOR by enabling the master switch and clearing the
|
|
|
|
blocking flags. */
|
|
|
|
DMA.OR.DME = 1;
|
|
|
|
DMA.OR.AE = 0;
|
|
|
|
DMA.OR.NMIF = 0;
|
|
|
|
|
2021-05-05 15:17:23 +02:00
|
|
|
/* Block sleep when the transfer involves on-chip memory */
|
|
|
|
dma_sleep_blocking[channel] = false;
|
|
|
|
|
|
|
|
if(ch->SAR >= 0xe5007000 && ch->SAR <= 0xe5204000)
|
|
|
|
dma_sleep_blocking[channel] = true;
|
|
|
|
if(ch->DAR >= 0xe5007000 && ch->DAR <= 0xe5204000)
|
|
|
|
dma_sleep_blocking[channel] = true;
|
|
|
|
|
2021-06-17 14:32:27 +02:00
|
|
|
if(ch->SAR >= 0xfe200000 && ch->SAR <= 0xfe3fffff)
|
|
|
|
dma_sleep_blocking[channel] = true;
|
|
|
|
if(ch->DAR >= 0xfe200000 && ch->DAR <= 0xfe3fffff)
|
|
|
|
dma_sleep_blocking[channel] = true;
|
|
|
|
|
2019-07-04 17:46:26 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-04-28 17:53:19 +02:00
|
|
|
bool dma_transfer_async(int channel, dma_size_t size, uint blocks,
|
|
|
|
void const *src, dma_address_t src_mode, void *dst,
|
|
|
|
dma_address_t dst_mode, gint_call_t callback)
|
2019-07-04 17:46:26 +02:00
|
|
|
{
|
2019-08-08 11:20:49 +02:00
|
|
|
if(dma_setup(channel, size, blocks, src, src_mode, dst, dst_mode, 1))
|
2021-04-28 17:53:19 +02:00
|
|
|
return false;
|
|
|
|
|
|
|
|
dma_callbacks[channel] = callback;
|
2019-07-04 17:46:26 +02:00
|
|
|
|
2021-05-05 15:17:23 +02:00
|
|
|
if(dma_sleep_blocking[channel])
|
|
|
|
sleep_block();
|
|
|
|
|
2019-08-08 11:20:49 +02:00
|
|
|
/* Enable channel, starting the DMA transfer. */
|
|
|
|
channel_t *ch = dma_channel(channel);
|
|
|
|
ch->CHCR.DE = 1;
|
2021-04-28 17:53:19 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Interrupt handler for all finished DMA transfers */
|
|
|
|
static void dma_interrupt_transfer_ended(int channel)
|
|
|
|
{
|
|
|
|
channel_t *ch = dma_channel(channel);
|
2022-01-10 13:31:20 +01:00
|
|
|
ch->CHCR.IE = 0;
|
2021-04-28 17:53:19 +02:00
|
|
|
ch->CHCR.DE = 0;
|
|
|
|
ch->CHCR.TE = 0;
|
|
|
|
|
|
|
|
DMA.OR.AE = 0;
|
|
|
|
DMA.OR.NMIF = 0;
|
|
|
|
|
2021-05-05 15:17:23 +02:00
|
|
|
if(dma_sleep_blocking[channel])
|
|
|
|
sleep_unblock();
|
|
|
|
|
2021-06-16 21:50:41 +02:00
|
|
|
/* Cancel any sleep operation that is synchronized with this interrupt */
|
|
|
|
if(dma_wait_ics[channel])
|
|
|
|
cpu_csleep_cancel(dma_wait_ics[channel]);
|
|
|
|
|
2021-04-28 17:53:19 +02:00
|
|
|
if(dma_callbacks[channel].function)
|
|
|
|
{
|
|
|
|
gint_call(dma_callbacks[channel]);
|
|
|
|
dma_callbacks[channel] = GINT_CALL_NULL;
|
|
|
|
}
|
2019-03-10 15:45:34 +01:00
|
|
|
}
|
|
|
|
|
2022-01-10 13:36:57 +01:00
|
|
|
/* dma_channel_wait(): Wait for a particular channel's transfer to finish
|
2020-05-10 14:03:41 +02:00
|
|
|
|
2022-01-10 13:36:57 +01:00
|
|
|
This function is used both during normal gint operation and during foreign
|
|
|
|
unbinds of the DMA driver. The waiting method varies with interrupt settings
|
|
|
|
and device ownership. */
|
|
|
|
static void dma_channel_wait(int channel, bool foreign)
|
|
|
|
{
|
2019-08-08 11:20:49 +02:00
|
|
|
channel_t *ch = dma_channel(channel);
|
|
|
|
if(!ch) return;
|
|
|
|
|
2022-01-10 13:36:57 +01:00
|
|
|
/* If interrupts are disabled or we don't own the device, spin-wait by
|
|
|
|
checking either for TE to be set (Transfere Ended) or DE to be gone
|
|
|
|
(channel disabled).
|
|
|
|
|
|
|
|
There are definitely race conditions if the DMA is restarted between
|
|
|
|
our checks; only the context of the calls guarantee soundness.
|
|
|
|
|
|
|
|
* If interrupts are disabled, we assume there is no one that could
|
|
|
|
start the DMA again, since we are the only thread of execution.
|
|
|
|
* If the device is owned by another kernel, then we're transitioning
|
|
|
|
so we have to wait for *all* tasks to complete anyway. The risk is
|
|
|
|
rather to stop too early. */
|
|
|
|
if(!ch->CHCR.IE || foreign)
|
2019-09-15 15:09:32 +02:00
|
|
|
{
|
2021-06-16 21:50:41 +02:00
|
|
|
while(ch->CHCR.DE && !ch->CHCR.TE) {}
|
|
|
|
return;
|
2019-09-15 15:09:32 +02:00
|
|
|
}
|
2021-06-16 21:50:41 +02:00
|
|
|
|
2022-01-10 13:36:57 +01:00
|
|
|
/* Initialize an interrupt-cancellable sleep, to ensure
|
|
|
|
synchronization */
|
2021-06-16 21:50:41 +02:00
|
|
|
cpu_csleep_t ics;
|
|
|
|
cpu_csleep_init(&ics);
|
|
|
|
dma_wait_ics[channel] = &ics;
|
|
|
|
|
|
|
|
/* Now the ICS is set; if the interrupt has not occurred yet then the
|
|
|
|
handler is guaranteed to cancel the sleep at some point */
|
|
|
|
if(ch->CHCR.DE && !ch->CHCR.TE) cpu_csleep(&ics);
|
|
|
|
|
|
|
|
/* Clear the ICS pointer for next time */
|
|
|
|
dma_wait_ics[channel] = NULL;
|
2019-03-10 15:45:34 +01:00
|
|
|
}
|
|
|
|
|
2022-01-10 13:36:57 +01:00
|
|
|
/* dma_transfer_wait(): Wait for a transfer to finish */
|
|
|
|
void dma_transfer_wait(int channel)
|
|
|
|
{
|
|
|
|
dma_channel_wait(channel, false);
|
|
|
|
}
|
|
|
|
|
2021-04-28 17:53:19 +02:00
|
|
|
bool dma_transfer_sync(int channel, dma_size_t size, uint length,
|
|
|
|
void const *src, dma_address_t src_mode, void *dst,
|
|
|
|
dma_address_t dst_mode)
|
|
|
|
{
|
|
|
|
if(!dma_transfer_async(channel, size, length, src, src_mode, dst,
|
|
|
|
dst_mode, GINT_CALL_NULL)) return false;
|
|
|
|
dma_transfer_wait(channel);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* dma_transfer_atomic(): Perform a data transfer without interruptions */
|
|
|
|
void dma_transfer_atomic(int channel, dma_size_t size, uint blocks,
|
2019-08-27 21:18:44 +02:00
|
|
|
void const *src, dma_address_t src_mode,
|
2019-07-04 17:46:26 +02:00
|
|
|
void *dst, dma_address_t dst_mode)
|
|
|
|
{
|
2019-08-08 11:20:49 +02:00
|
|
|
if(dma_setup(channel, size, blocks, src, src_mode, dst, dst_mode, 0))
|
|
|
|
return;
|
2019-07-04 17:46:26 +02:00
|
|
|
|
2019-08-08 11:20:49 +02:00
|
|
|
/* Enable channel, starting the DMA transfer. */
|
|
|
|
channel_t *ch = dma_channel(channel);
|
|
|
|
ch->CHCR.DE = 1;
|
2019-07-04 17:46:26 +02:00
|
|
|
|
|
|
|
/* Actively wait until the transfer is finished */
|
2019-08-08 11:20:49 +02:00
|
|
|
while(!ch->CHCR.TE);
|
2019-07-04 17:46:26 +02:00
|
|
|
|
|
|
|
/* Disable the channel and clear the TE flag. Disable the channel first
|
|
|
|
as clearing the TE flag will allow the transfer to restart */
|
2019-08-08 11:20:49 +02:00
|
|
|
ch->CHCR.DE = 0;
|
|
|
|
ch->CHCR.TE = 0;
|
2019-07-04 17:46:26 +02:00
|
|
|
|
2019-08-08 11:20:49 +02:00
|
|
|
/* Clear the AE and NMIF status flags */
|
2019-07-04 17:46:26 +02:00
|
|
|
DMA.OR.AE = 0;
|
|
|
|
DMA.OR.NMIF = 0;
|
|
|
|
}
|
|
|
|
|
2021-04-28 17:53:19 +02:00
|
|
|
/* Deprecated version of dma_transfer_async() that did not have a callback */
|
|
|
|
void dma_transfer(int channel, dma_size_t size, uint length, void const *src,
|
|
|
|
dma_address_t src_mode, void *dst, dma_address_t dst_mode)
|
|
|
|
{
|
|
|
|
dma_transfer_async(channel, size, length, src, src_mode, dst, dst_mode,
|
|
|
|
GINT_CALL_NULL);
|
|
|
|
}
|
|
|
|
|
2019-03-10 15:45:34 +01:00
|
|
|
//---
|
|
|
|
// Initialization
|
|
|
|
//---
|
|
|
|
|
2021-04-23 18:50:20 +02:00
|
|
|
static void configure(void)
|
2019-03-10 15:45:34 +01:00
|
|
|
{
|
|
|
|
if(isSH3()) return;
|
|
|
|
|
|
|
|
/* Install the interrupt handler from dma/inth.s */
|
2019-08-08 11:20:49 +02:00
|
|
|
int codes[] = { 0x800, 0x820, 0x840, 0x860, 0xb80, 0xba0 };
|
|
|
|
extern void inth_dma_te(void);
|
|
|
|
|
|
|
|
for(int i = 0; i < 6; i++)
|
|
|
|
{
|
2021-04-28 17:53:19 +02:00
|
|
|
intc_handler_function(codes[i],
|
|
|
|
GINT_CALL(dma_interrupt_transfer_ended, i));
|
|
|
|
|
|
|
|
/* Disable the channel */
|
|
|
|
dma_channel(i)->CHCR.DE = 0;
|
2019-08-08 11:20:49 +02:00
|
|
|
}
|
|
|
|
|
2019-08-27 21:18:44 +02:00
|
|
|
/* Install the address error gate */
|
|
|
|
extern void inth_dma_ae(void);
|
2021-04-23 18:50:20 +02:00
|
|
|
intc_handler(0xbc0, inth_dma_ae, 32);
|
2019-08-27 21:18:44 +02:00
|
|
|
|
2021-05-07 17:41:36 +02:00
|
|
|
/* Set interrupt priority to 3, except 11 for the channels that are
|
|
|
|
used by the USB driver */
|
2020-06-20 17:18:51 +02:00
|
|
|
intc_priority(INTC_DMA_DEI0, 3);
|
|
|
|
intc_priority(INTC_DMA_DEI1, 3);
|
|
|
|
intc_priority(INTC_DMA_DEI2, 3);
|
2021-05-07 17:41:36 +02:00
|
|
|
intc_priority(INTC_DMA_DEI3, 9);
|
|
|
|
intc_priority(INTC_DMA_DEI4, 9);
|
2020-06-20 17:18:51 +02:00
|
|
|
intc_priority(INTC_DMA_DEI5, 3);
|
|
|
|
intc_priority(INTC_DMA_DADERR, 3);
|
2019-08-08 11:20:49 +02:00
|
|
|
|
|
|
|
/* Clear blocking flags and enable the master switch */
|
|
|
|
DMA.OR.AE = 0;
|
|
|
|
DMA.OR.NMIF = 0;
|
|
|
|
DMA.OR.DME = 1;
|
2019-03-10 15:45:34 +01:00
|
|
|
}
|
|
|
|
|
2022-01-10 13:36:57 +01:00
|
|
|
static void funbind(void)
|
|
|
|
{
|
|
|
|
/* Wait for all OS transfers to finish before taking over */
|
|
|
|
for(int channel = 0; channel < 6; channel++)
|
|
|
|
dma_channel_wait(channel, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void unbind(void)
|
2019-08-08 11:20:49 +02:00
|
|
|
{
|
2022-01-10 13:36:57 +01:00
|
|
|
/* Make sure all DMA transfers are finished before leaving gint */
|
|
|
|
for(int channel = 0; channel < 6; channel++)
|
|
|
|
dma_channel_wait(channel, false);
|
2019-08-08 11:20:49 +02:00
|
|
|
}
|
|
|
|
|
2021-04-23 18:50:20 +02:00
|
|
|
static bool hpowered(void)
|
|
|
|
{
|
|
|
|
if(isSH3()) return false;
|
|
|
|
return (POWER.MSTPCR0.DMAC0 == 0);
|
|
|
|
}
|
2019-03-10 15:45:34 +01:00
|
|
|
|
2021-04-23 18:50:20 +02:00
|
|
|
static void hpoweron(void)
|
2019-03-10 15:45:34 +01:00
|
|
|
{
|
2021-04-23 18:50:20 +02:00
|
|
|
if(isSH3()) return;
|
|
|
|
POWER.MSTPCR0.DMAC0 = 0;
|
|
|
|
}
|
2019-03-10 15:45:34 +01:00
|
|
|
|
2021-04-23 18:50:20 +02:00
|
|
|
static void hpoweroff(void)
|
|
|
|
{
|
|
|
|
if(isSH3()) return;
|
|
|
|
POWER.MSTPCR0.DMAC0 = 1;
|
|
|
|
}
|
2019-03-10 15:45:34 +01:00
|
|
|
|
2021-04-23 18:50:20 +02:00
|
|
|
//---
|
|
|
|
// State and driver metadata
|
|
|
|
//---
|
2019-03-10 15:45:34 +01:00
|
|
|
|
2021-04-23 18:50:20 +02:00
|
|
|
static void hsave(dma_state_t *s)
|
2019-03-10 15:45:34 +01:00
|
|
|
{
|
2021-04-23 18:50:20 +02:00
|
|
|
if(isSH3()) return;
|
2019-03-10 15:45:34 +01:00
|
|
|
|
2019-08-08 11:20:49 +02:00
|
|
|
for(int i = 0; i < 6; i++)
|
|
|
|
{
|
|
|
|
channel_t *ch = dma_channel(i);
|
2021-04-23 18:50:20 +02:00
|
|
|
s->ch[i].SAR = ch->SAR;
|
|
|
|
s->ch[i].DAR = ch->DAR;
|
|
|
|
s->ch[i].TCR = ch->TCR;
|
|
|
|
s->ch[i].CHCR.lword = ch->CHCR.lword;
|
2019-08-08 11:20:49 +02:00
|
|
|
}
|
2021-04-23 18:50:20 +02:00
|
|
|
s->OR = DMA.OR.word;
|
2019-03-10 15:45:34 +01:00
|
|
|
}
|
|
|
|
|
2021-04-23 18:50:20 +02:00
|
|
|
static void hrestore(dma_state_t const *s)
|
2019-03-10 15:45:34 +01:00
|
|
|
{
|
2021-04-23 18:50:20 +02:00
|
|
|
if(isSH3()) return;
|
2020-05-10 14:03:41 +02:00
|
|
|
|
2020-05-31 15:52:00 +02:00
|
|
|
/* Disable the DMA while editing */
|
|
|
|
DMA.OR.DME = 0;
|
|
|
|
|
2019-08-08 11:20:49 +02:00
|
|
|
for(int i = 0; i < 6; i++)
|
|
|
|
{
|
|
|
|
channel_t *ch = dma_channel(i);
|
2021-04-23 18:50:20 +02:00
|
|
|
ch->SAR = s->ch[i].SAR;
|
|
|
|
ch->DAR = s->ch[i].DAR;
|
|
|
|
ch->TCR = s->ch[i].TCR;
|
|
|
|
ch->CHCR.lword = s->ch[i].CHCR.lword;
|
2019-08-08 11:20:49 +02:00
|
|
|
}
|
2021-04-23 18:50:20 +02:00
|
|
|
DMA.OR.word = s->OR;
|
2019-03-10 15:45:34 +01:00
|
|
|
}
|
|
|
|
|
2019-08-08 11:20:49 +02:00
|
|
|
gint_driver_t drv_dma0 = {
|
2021-04-23 18:50:20 +02:00
|
|
|
.name = "DMA",
|
|
|
|
.configure = configure,
|
2022-01-10 13:36:57 +01:00
|
|
|
.funbind = funbind,
|
|
|
|
.unbind = unbind,
|
2021-04-23 18:50:20 +02:00
|
|
|
.hpowered = hpowered,
|
|
|
|
.hpoweron = hpoweron,
|
|
|
|
.hpoweroff = hpoweroff,
|
|
|
|
.hsave = (void *)hsave,
|
|
|
|
.hrestore = (void *)hrestore,
|
|
|
|
.state_size = sizeof(dma_state_t),
|
2019-03-10 15:45:34 +01:00
|
|
|
};
|
2021-04-23 18:50:20 +02:00
|
|
|
GINT_DECLARE_DRIVER(05, drv_dma0);
|