2 * dim2_hal.c - DIM2 HAL implementation
3 * (MediaLB, Device Interface Macro IP, OS62420)
5 * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * This file is licensed under GPLv2.
15 /* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
18 #include "dim2_errors.h"
20 #include <linux/stddef.h>
23 * The number of frames per sub-buffer for synchronous channels.
24 * Allowed values: 1, 2, 4, 8, 16, 32, 64.
26 #define FRAMES_PER_SUBBUFF 16
29 * Size factor for synchronous DBR buffer.
30 * Minimal value is 4*FRAMES_PER_SUBBUFF.
32 #define SYNC_DBR_FACTOR (4u * (u16)FRAMES_PER_SUBBUFF)
35 * Size factor for isochronous DBR buffer.
38 #define ISOC_DBR_FACTOR 3u
41 * Number of 32-bit units for DBR map.
43 * 1: block size is 512, max allocation is 16K
44 * 2: block size is 256, max allocation is 8K
45 * 4: block size is 128, max allocation is 4K
46 * 8: block size is 64, max allocation is 2K
48 * Min allocated space is block size.
49 * Max possible allocated space is 32 blocks.
51 #define DBR_MAP_SIZE 2
53 /* -------------------------------------------------------------------------- */
54 /* not configurable area */
61 #define DBR_SIZE (16 * 1024) /* specified by IP */
62 #define DBR_BLOCK_SIZE (DBR_SIZE / 32 / DBR_MAP_SIZE)
64 /* -------------------------------------------------------------------------- */
65 /* generic helper functions and macros */
67 #define MLBC0_FCNT_VAL_MACRO(n) MLBC0_FCNT_VAL_ ## n ## FPSB
68 #define MLBC0_FCNT_VAL(fpsb) MLBC0_FCNT_VAL_MACRO(fpsb)
70 static inline u32
bit_mask(u8 position
)
72 return (u32
)1 << position
;
75 static inline bool dim_on_error(u8 error_id
, const char *error_message
)
77 dimcb_on_error(error_id
, error_message
);
81 /* -------------------------------------------------------------------------- */
82 /* types and local variables */
84 struct lld_global_vars_t
{
85 bool dim_is_initialized
;
86 bool mcm_is_initialized
;
87 struct dim2_regs
*dim2
; /* DIM2 core base address */
88 u32 dbr_map
[DBR_MAP_SIZE
];
91 static struct lld_global_vars_t g
= { false };
93 /* -------------------------------------------------------------------------- */
95 static int dbr_get_mask_size(u16 size
)
99 for (i
= 0; i
< 6; i
++)
100 if (size
<= (DBR_BLOCK_SIZE
<< i
))
106 * Allocates DBR memory.
107 * @param size Allocating memory size.
108 * @return Offset in DBR memory by success or DBR_SIZE if out of memory.
110 static int alloc_dbr(u16 size
)
113 int i
, block_idx
= 0;
116 return DBR_SIZE
; /* out of memory */
118 mask_size
= dbr_get_mask_size(size
);
120 return DBR_SIZE
; /* out of memory */
122 for (i
= 0; i
< DBR_MAP_SIZE
; i
++) {
123 u32
const blocks
= (size
+ DBR_BLOCK_SIZE
- 1) / DBR_BLOCK_SIZE
;
124 u32 mask
= ~((~(u32
)0) << blocks
);
127 if ((g
.dbr_map
[i
] & mask
) == 0) {
128 g
.dbr_map
[i
] |= mask
;
129 return block_idx
* DBR_BLOCK_SIZE
;
131 block_idx
+= mask_size
;
132 /* do shift left with 2 steps in case mask_size == 32 */
133 mask
<<= mask_size
- 1;
134 } while ((mask
<<= 1) != 0);
137 return DBR_SIZE
; /* out of memory */
140 static void free_dbr(int offs
, int size
)
142 int block_idx
= offs
/ DBR_BLOCK_SIZE
;
143 u32
const blocks
= (size
+ DBR_BLOCK_SIZE
- 1) / DBR_BLOCK_SIZE
;
144 u32 mask
= ~((~(u32
)0) << blocks
);
146 mask
<<= block_idx
% 32;
147 g
.dbr_map
[block_idx
/ 32] &= ~mask
;
150 /* -------------------------------------------------------------------------- */
152 static u32
dim2_read_ctr(u32 ctr_addr
, u16 mdat_idx
)
154 dimcb_io_write(&g
.dim2
->MADR
, ctr_addr
);
156 /* wait till transfer is completed */
157 while ((dimcb_io_read(&g
.dim2
->MCTL
) & 1) != 1)
160 dimcb_io_write(&g
.dim2
->MCTL
, 0); /* clear transfer complete */
162 return dimcb_io_read((&g
.dim2
->MDAT0
) + mdat_idx
);
165 static void dim2_write_ctr_mask(u32 ctr_addr
, const u32
*mask
, const u32
*value
)
167 enum { MADR_WNR_BIT
= 31 };
169 dimcb_io_write(&g
.dim2
->MCTL
, 0); /* clear transfer complete */
172 dimcb_io_write(&g
.dim2
->MDAT0
, value
[0]);
174 dimcb_io_write(&g
.dim2
->MDAT1
, value
[1]);
176 dimcb_io_write(&g
.dim2
->MDAT2
, value
[2]);
178 dimcb_io_write(&g
.dim2
->MDAT3
, value
[3]);
180 dimcb_io_write(&g
.dim2
->MDWE0
, mask
[0]);
181 dimcb_io_write(&g
.dim2
->MDWE1
, mask
[1]);
182 dimcb_io_write(&g
.dim2
->MDWE2
, mask
[2]);
183 dimcb_io_write(&g
.dim2
->MDWE3
, mask
[3]);
185 dimcb_io_write(&g
.dim2
->MADR
, bit_mask(MADR_WNR_BIT
) | ctr_addr
);
187 /* wait till transfer is completed */
188 while ((dimcb_io_read(&g
.dim2
->MCTL
) & 1) != 1)
191 dimcb_io_write(&g
.dim2
->MCTL
, 0); /* clear transfer complete */
194 static inline void dim2_write_ctr(u32 ctr_addr
, const u32
*value
)
196 u32
const mask
[4] = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
198 dim2_write_ctr_mask(ctr_addr
, mask
, value
);
201 static inline void dim2_clear_ctr(u32 ctr_addr
)
203 u32
const value
[4] = { 0, 0, 0, 0 };
205 dim2_write_ctr(ctr_addr
, value
);
208 static void dim2_configure_cat(u8 cat_base
, u8 ch_addr
, u8 ch_type
,
209 bool read_not_write
, bool sync_mfe
)
212 (read_not_write
<< CAT_RNW_BIT
) |
213 (ch_type
<< CAT_CT_SHIFT
) |
214 (ch_addr
<< CAT_CL_SHIFT
) |
215 (sync_mfe
<< CAT_MFE_BIT
) |
216 (false << CAT_MT_BIT
) |
217 (true << CAT_CE_BIT
);
218 u8
const ctr_addr
= cat_base
+ ch_addr
/ 8;
219 u8
const idx
= (ch_addr
% 8) / 2;
220 u8
const shift
= (ch_addr
% 2) * 16;
221 u32 mask
[4] = { 0, 0, 0, 0 };
222 u32 value
[4] = { 0, 0, 0, 0 };
224 mask
[idx
] = (u32
)0xFFFF << shift
;
225 value
[idx
] = cat
<< shift
;
226 dim2_write_ctr_mask(ctr_addr
, mask
, value
);
229 static void dim2_clear_cat(u8 cat_base
, u8 ch_addr
)
231 u8
const ctr_addr
= cat_base
+ ch_addr
/ 8;
232 u8
const idx
= (ch_addr
% 8) / 2;
233 u8
const shift
= (ch_addr
% 2) * 16;
234 u32 mask
[4] = { 0, 0, 0, 0 };
235 u32 value
[4] = { 0, 0, 0, 0 };
237 mask
[idx
] = (u32
)0xFFFF << shift
;
238 dim2_write_ctr_mask(ctr_addr
, mask
, value
);
241 static void dim2_configure_cdt(u8 ch_addr
, u16 dbr_address
, u16 hw_buffer_size
,
244 u32 cdt
[4] = { 0, 0, 0, 0 };
247 cdt
[1] = ((packet_length
- 1) << CDT1_BS_ISOC_SHIFT
);
250 ((hw_buffer_size
- 1) << CDT3_BD_SHIFT
) |
251 (dbr_address
<< CDT3_BA_SHIFT
);
252 dim2_write_ctr(CDT
+ ch_addr
, cdt
);
255 static void dim2_clear_cdt(u8 ch_addr
)
257 u32 cdt
[4] = { 0, 0, 0, 0 };
259 dim2_write_ctr(CDT
+ ch_addr
, cdt
);
262 static void dim2_configure_adt(u8 ch_addr
)
264 u32 adt
[4] = { 0, 0, 0, 0 };
267 (true << ADT0_CE_BIT
) |
268 (true << ADT0_LE_BIT
) |
271 dim2_write_ctr(ADT
+ ch_addr
, adt
);
274 static void dim2_clear_adt(u8 ch_addr
)
276 u32 adt
[4] = { 0, 0, 0, 0 };
278 dim2_write_ctr(ADT
+ ch_addr
, adt
);
281 static void dim2_start_ctrl_async(u8 ch_addr
, u8 idx
, u32 buf_addr
,
284 u8
const shift
= idx
* 16;
286 u32 mask
[4] = { 0, 0, 0, 0 };
287 u32 adt
[4] = { 0, 0, 0, 0 };
290 bit_mask(ADT1_PS_BIT
+ shift
) |
291 bit_mask(ADT1_RDY_BIT
+ shift
) |
292 (ADT1_CTRL_ASYNC_BD_MASK
<< (ADT1_BD_SHIFT
+ shift
));
294 (true << (ADT1_PS_BIT
+ shift
)) |
295 (true << (ADT1_RDY_BIT
+ shift
)) |
296 ((buffer_size
- 1) << (ADT1_BD_SHIFT
+ shift
));
298 mask
[idx
+ 2] = 0xFFFFFFFF;
299 adt
[idx
+ 2] = buf_addr
;
301 dim2_write_ctr_mask(ADT
+ ch_addr
, mask
, adt
);
304 static void dim2_start_isoc_sync(u8 ch_addr
, u8 idx
, u32 buf_addr
,
307 u8
const shift
= idx
* 16;
309 u32 mask
[4] = { 0, 0, 0, 0 };
310 u32 adt
[4] = { 0, 0, 0, 0 };
313 bit_mask(ADT1_RDY_BIT
+ shift
) |
314 (ADT1_ISOC_SYNC_BD_MASK
<< (ADT1_BD_SHIFT
+ shift
));
316 (true << (ADT1_RDY_BIT
+ shift
)) |
317 ((buffer_size
- 1) << (ADT1_BD_SHIFT
+ shift
));
319 mask
[idx
+ 2] = 0xFFFFFFFF;
320 adt
[idx
+ 2] = buf_addr
;
322 dim2_write_ctr_mask(ADT
+ ch_addr
, mask
, adt
);
325 static void dim2_clear_ctram(void)
329 for (ctr_addr
= 0; ctr_addr
< 0x90; ctr_addr
++)
330 dim2_clear_ctr(ctr_addr
);
333 static void dim2_configure_channel(
334 u8 ch_addr
, u8 type
, u8 is_tx
, u16 dbr_address
, u16 hw_buffer_size
,
335 u16 packet_length
, bool sync_mfe
)
337 dim2_configure_cdt(ch_addr
, dbr_address
, hw_buffer_size
, packet_length
);
338 dim2_configure_cat(MLB_CAT
, ch_addr
, type
, is_tx
? 1 : 0, sync_mfe
);
340 dim2_configure_adt(ch_addr
);
341 dim2_configure_cat(AHB_CAT
, ch_addr
, type
, is_tx
? 0 : 1, sync_mfe
);
343 /* unmask interrupt for used channel, enable mlb_sys_int[0] interrupt */
344 dimcb_io_write(&g
.dim2
->ACMR0
,
345 dimcb_io_read(&g
.dim2
->ACMR0
) | bit_mask(ch_addr
));
348 static void dim2_clear_channel(u8 ch_addr
)
350 /* mask interrupt for used channel, disable mlb_sys_int[0] interrupt */
351 dimcb_io_write(&g
.dim2
->ACMR0
,
352 dimcb_io_read(&g
.dim2
->ACMR0
) & ~bit_mask(ch_addr
));
354 dim2_clear_cat(AHB_CAT
, ch_addr
);
355 dim2_clear_adt(ch_addr
);
357 dim2_clear_cat(MLB_CAT
, ch_addr
);
358 dim2_clear_cdt(ch_addr
);
361 /* -------------------------------------------------------------------------- */
362 /* channel state helpers */
364 static void state_init(struct int_ch_state
*state
)
366 state
->request_counter
= 0;
367 state
->service_counter
= 0;
374 /* -------------------------------------------------------------------------- */
375 /* macro helper functions */
377 static inline bool check_channel_address(u32 ch_address
)
379 return ch_address
> 0 && (ch_address
% 2) == 0 &&
380 (ch_address
/ 2) <= (u32
)CAT_CL_MASK
;
383 static inline bool check_packet_length(u32 packet_length
)
385 u16
const max_size
= ((u16
)CDT3_BD_ISOC_MASK
+ 1u) / ISOC_DBR_FACTOR
;
387 if (packet_length
<= 0)
388 return false; /* too small */
390 if (packet_length
> max_size
)
391 return false; /* too big */
393 if (packet_length
- 1u > (u32
)CDT1_BS_ISOC_MASK
)
394 return false; /* too big */
399 static inline bool check_bytes_per_frame(u32 bytes_per_frame
)
401 u16
const max_size
= ((u16
)CDT3_BD_MASK
+ 1u) / SYNC_DBR_FACTOR
;
403 if (bytes_per_frame
<= 0)
404 return false; /* too small */
406 if (bytes_per_frame
> max_size
)
407 return false; /* too big */
412 static inline u16
norm_ctrl_async_buffer_size(u16 buf_size
)
414 u16
const max_size
= (u16
)ADT1_CTRL_ASYNC_BD_MASK
+ 1u;
416 if (buf_size
> max_size
)
422 static inline u16
norm_isoc_buffer_size(u16 buf_size
, u16 packet_length
)
425 u16
const max_size
= (u16
)ADT1_ISOC_SYNC_BD_MASK
+ 1u;
427 if (buf_size
> max_size
)
430 n
= buf_size
/ packet_length
;
433 return 0; /* too small buffer for given packet_length */
435 return packet_length
* n
;
438 static inline u16
norm_sync_buffer_size(u16 buf_size
, u16 bytes_per_frame
)
441 u16
const max_size
= (u16
)ADT1_ISOC_SYNC_BD_MASK
+ 1u;
442 u32
const unit
= bytes_per_frame
* (u16
)FRAMES_PER_SUBBUFF
;
444 if (buf_size
> max_size
)
450 return 0; /* too small buffer for given bytes_per_frame */
455 static void dim2_cleanup(void)
457 /* disable MediaLB */
458 dimcb_io_write(&g
.dim2
->MLBC0
, false << MLBC0_MLBEN_BIT
);
462 /* disable mlb_int interrupt */
463 dimcb_io_write(&g
.dim2
->MIEN
, 0);
465 /* clear status for all dma channels */
466 dimcb_io_write(&g
.dim2
->ACSR0
, 0xFFFFFFFF);
467 dimcb_io_write(&g
.dim2
->ACSR1
, 0xFFFFFFFF);
469 /* mask interrupts for all channels */
470 dimcb_io_write(&g
.dim2
->ACMR0
, 0);
471 dimcb_io_write(&g
.dim2
->ACMR1
, 0);
474 static void dim2_initialize(bool enable_6pin
, u8 mlb_clock
)
478 /* configure and enable MediaLB */
479 dimcb_io_write(&g
.dim2
->MLBC0
,
480 enable_6pin
<< MLBC0_MLBPEN_BIT
|
481 mlb_clock
<< MLBC0_MLBCLK_SHIFT
|
482 MLBC0_FCNT_VAL(FRAMES_PER_SUBBUFF
) << MLBC0_FCNT_SHIFT
|
483 true << MLBC0_MLBEN_BIT
);
485 /* activate all HBI channels */
486 dimcb_io_write(&g
.dim2
->HCMR0
, 0xFFFFFFFF);
487 dimcb_io_write(&g
.dim2
->HCMR1
, 0xFFFFFFFF);
490 dimcb_io_write(&g
.dim2
->HCTL
, bit_mask(HCTL_EN_BIT
));
493 dimcb_io_write(&g
.dim2
->ACTL
,
494 ACTL_DMA_MODE_VAL_DMA_MODE_1
<< ACTL_DMA_MODE_BIT
|
495 true << ACTL_SCE_BIT
);
498 static bool dim2_is_mlb_locked(void)
500 u32
const mask0
= bit_mask(MLBC0_MLBLK_BIT
);
501 u32
const mask1
= bit_mask(MLBC1_CLKMERR_BIT
) |
502 bit_mask(MLBC1_LOCKERR_BIT
);
503 u32
const c1
= dimcb_io_read(&g
.dim2
->MLBC1
);
504 u32
const nda_mask
= (u32
)MLBC1_NDA_MASK
<< MLBC1_NDA_SHIFT
;
506 dimcb_io_write(&g
.dim2
->MLBC1
, c1
& nda_mask
);
507 return (dimcb_io_read(&g
.dim2
->MLBC1
) & mask1
) == 0 &&
508 (dimcb_io_read(&g
.dim2
->MLBC0
) & mask0
) != 0;
511 /* -------------------------------------------------------------------------- */
512 /* channel help routines */
514 static inline bool service_channel(u8 ch_addr
, u8 idx
)
516 u8
const shift
= idx
* 16;
517 u32
const adt1
= dim2_read_ctr(ADT
+ ch_addr
, 1);
519 if (((adt1
>> (ADT1_DNE_BIT
+ shift
)) & 1) == 0)
523 u32 mask
[4] = { 0, 0, 0, 0 };
524 u32 adt_w
[4] = { 0, 0, 0, 0 };
527 bit_mask(ADT1_DNE_BIT
+ shift
) |
528 bit_mask(ADT1_ERR_BIT
+ shift
) |
529 bit_mask(ADT1_RDY_BIT
+ shift
);
530 dim2_write_ctr_mask(ADT
+ ch_addr
, mask
, adt_w
);
533 /* clear channel status bit */
534 dimcb_io_write(&g
.dim2
->ACSR0
, bit_mask(ch_addr
));
539 /* -------------------------------------------------------------------------- */
540 /* channel init routines */
542 static void isoc_init(struct dim_channel
*ch
, u8 ch_addr
, u16 packet_length
)
544 state_init(&ch
->state
);
548 ch
->packet_length
= packet_length
;
549 ch
->bytes_per_frame
= 0;
550 ch
->done_sw_buffers_number
= 0;
553 static void sync_init(struct dim_channel
*ch
, u8 ch_addr
, u16 bytes_per_frame
)
555 state_init(&ch
->state
);
559 ch
->packet_length
= 0;
560 ch
->bytes_per_frame
= bytes_per_frame
;
561 ch
->done_sw_buffers_number
= 0;
564 static void channel_init(struct dim_channel
*ch
, u8 ch_addr
)
566 state_init(&ch
->state
);
570 ch
->packet_length
= 0;
571 ch
->bytes_per_frame
= 0;
572 ch
->done_sw_buffers_number
= 0;
575 /* returns true if channel interrupt state is cleared */
576 static bool channel_service_interrupt(struct dim_channel
*ch
)
578 struct int_ch_state
*const state
= &ch
->state
;
580 if (!service_channel(ch
->addr
, state
->idx2
))
584 state
->request_counter
++;
588 static bool channel_start(struct dim_channel
*ch
, u32 buf_addr
, u16 buf_size
)
590 struct int_ch_state
*const state
= &ch
->state
;
593 return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE
, "Bad buffer size");
595 if (ch
->packet_length
== 0 && ch
->bytes_per_frame
== 0 &&
596 buf_size
!= norm_ctrl_async_buffer_size(buf_size
))
597 return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE
,
598 "Bad control/async buffer size");
600 if (ch
->packet_length
&&
601 buf_size
!= norm_isoc_buffer_size(buf_size
, ch
->packet_length
))
602 return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE
,
603 "Bad isochronous buffer size");
605 if (ch
->bytes_per_frame
&&
606 buf_size
!= norm_sync_buffer_size(buf_size
, ch
->bytes_per_frame
))
607 return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE
,
608 "Bad synchronous buffer size");
610 if (state
->level
>= 2u)
611 return dim_on_error(DIM_ERR_OVERFLOW
, "Channel overflow");
615 if (ch
->packet_length
|| ch
->bytes_per_frame
)
616 dim2_start_isoc_sync(ch
->addr
, state
->idx1
, buf_addr
, buf_size
);
618 dim2_start_ctrl_async(ch
->addr
, state
->idx1
, buf_addr
,
625 static u8
channel_service(struct dim_channel
*ch
)
627 struct int_ch_state
*const state
= &ch
->state
;
629 if (state
->service_counter
!= state
->request_counter
) {
630 state
->service_counter
++;
631 if (state
->level
== 0)
632 return DIM_ERR_UNDERFLOW
;
635 ch
->done_sw_buffers_number
++;
641 static bool channel_detach_buffers(struct dim_channel
*ch
, u16 buffers_number
)
643 if (buffers_number
> ch
->done_sw_buffers_number
)
644 return dim_on_error(DIM_ERR_UNDERFLOW
, "Channel underflow");
646 ch
->done_sw_buffers_number
-= buffers_number
;
650 /* -------------------------------------------------------------------------- */
653 u8
dim_startup(void *dim_base_address
, u32 mlb_clock
)
655 g
.dim_is_initialized
= false;
657 if (!dim_base_address
)
658 return DIM_INIT_ERR_DIM_ADDR
;
660 /* MediaLB clock: 0 - 256 fs, 1 - 512 fs, 2 - 1024 fs, 3 - 2048 fs */
661 /* MediaLB clock: 4 - 3072 fs, 5 - 4096 fs, 6 - 6144 fs, 7 - 8192 fs */
663 return DIM_INIT_ERR_MLB_CLOCK
;
665 g
.dim2
= dim_base_address
;
669 dim2_initialize(mlb_clock
>= 3, mlb_clock
);
671 g
.dim_is_initialized
= true;
676 void dim_shutdown(void)
678 g
.dim_is_initialized
= false;
682 bool dim_get_lock_state(void)
684 return dim2_is_mlb_locked();
687 static u8
init_ctrl_async(struct dim_channel
*ch
, u8 type
, u8 is_tx
,
688 u16 ch_address
, u16 hw_buffer_size
)
690 if (!g
.dim_is_initialized
|| !ch
)
691 return DIM_ERR_DRIVER_NOT_INITIALIZED
;
693 if (!check_channel_address(ch_address
))
694 return DIM_INIT_ERR_CHANNEL_ADDRESS
;
696 ch
->dbr_size
= hw_buffer_size
;
697 ch
->dbr_addr
= alloc_dbr(ch
->dbr_size
);
698 if (ch
->dbr_addr
>= DBR_SIZE
)
699 return DIM_INIT_ERR_OUT_OF_MEMORY
;
701 channel_init(ch
, ch_address
/ 2);
703 dim2_configure_channel(ch
->addr
, type
, is_tx
,
704 ch
->dbr_addr
, ch
->dbr_size
, 0, false);
709 u16
DIM_NormCtrlAsyncBufferSize(u16 buf_size
)
711 return norm_ctrl_async_buffer_size(buf_size
);
715 * Retrieves maximal possible correct buffer size for isochronous data type
716 * conform to given packet length and not bigger than given buffer size.
718 * Returns non-zero correct buffer size or zero by error.
720 u16
DIM_NormIsocBufferSize(u16 buf_size
, u16 packet_length
)
722 if (!check_packet_length(packet_length
))
725 return norm_isoc_buffer_size(buf_size
, packet_length
);
729 * Retrieves maximal possible correct buffer size for synchronous data type
730 * conform to given bytes per frame and not bigger than given buffer size.
732 * Returns non-zero correct buffer size or zero by error.
734 u16
DIM_NormSyncBufferSize(u16 buf_size
, u16 bytes_per_frame
)
736 if (!check_bytes_per_frame(bytes_per_frame
))
739 return norm_sync_buffer_size(buf_size
, bytes_per_frame
);
742 u8
DIM_InitControl(struct dim_channel
*ch
, u8 is_tx
, u16 ch_address
,
745 return init_ctrl_async(ch
, CAT_CT_VAL_CONTROL
, is_tx
, ch_address
,
749 u8
DIM_InitAsync(struct dim_channel
*ch
, u8 is_tx
, u16 ch_address
,
752 return init_ctrl_async(ch
, CAT_CT_VAL_ASYNC
, is_tx
, ch_address
,
756 u8
DIM_InitIsoc(struct dim_channel
*ch
, u8 is_tx
, u16 ch_address
,
759 if (!g
.dim_is_initialized
|| !ch
)
760 return DIM_ERR_DRIVER_NOT_INITIALIZED
;
762 if (!check_channel_address(ch_address
))
763 return DIM_INIT_ERR_CHANNEL_ADDRESS
;
765 if (!check_packet_length(packet_length
))
766 return DIM_ERR_BAD_CONFIG
;
768 ch
->dbr_size
= packet_length
* ISOC_DBR_FACTOR
;
769 ch
->dbr_addr
= alloc_dbr(ch
->dbr_size
);
770 if (ch
->dbr_addr
>= DBR_SIZE
)
771 return DIM_INIT_ERR_OUT_OF_MEMORY
;
773 isoc_init(ch
, ch_address
/ 2, packet_length
);
775 dim2_configure_channel(ch
->addr
, CAT_CT_VAL_ISOC
, is_tx
, ch
->dbr_addr
,
776 ch
->dbr_size
, packet_length
, false);
781 u8
DIM_InitSync(struct dim_channel
*ch
, u8 is_tx
, u16 ch_address
,
784 if (!g
.dim_is_initialized
|| !ch
)
785 return DIM_ERR_DRIVER_NOT_INITIALIZED
;
787 if (!check_channel_address(ch_address
))
788 return DIM_INIT_ERR_CHANNEL_ADDRESS
;
790 if (!check_bytes_per_frame(bytes_per_frame
))
791 return DIM_ERR_BAD_CONFIG
;
793 ch
->dbr_size
= bytes_per_frame
* SYNC_DBR_FACTOR
;
794 ch
->dbr_addr
= alloc_dbr(ch
->dbr_size
);
795 if (ch
->dbr_addr
>= DBR_SIZE
)
796 return DIM_INIT_ERR_OUT_OF_MEMORY
;
798 sync_init(ch
, ch_address
/ 2, bytes_per_frame
);
800 dim2_configure_channel(ch
->addr
, CAT_CT_VAL_SYNC
, is_tx
,
801 ch
->dbr_addr
, ch
->dbr_size
, 0, true);
806 u8
DIM_DestroyChannel(struct dim_channel
*ch
)
808 if (!g
.dim_is_initialized
|| !ch
)
809 return DIM_ERR_DRIVER_NOT_INITIALIZED
;
811 dim2_clear_channel(ch
->addr
);
812 if (ch
->dbr_addr
< DBR_SIZE
)
813 free_dbr(ch
->dbr_addr
, ch
->dbr_size
);
814 ch
->dbr_addr
= DBR_SIZE
;
819 void DIM_ServiceIrq(struct dim_channel
*const *channels
)
823 if (!g
.dim_is_initialized
) {
824 dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED
,
825 "DIM is not initialized");
830 dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED
, "Bad channels");
835 * Use while-loop and a flag to make sure the age is changed back at
836 * least once, otherwise the interrupt may never come if CPU generates
837 * interrupt on changing age.
838 * This cycle runs not more than number of channels, because
839 * channel_service_interrupt() routine doesn't start the channel again.
842 struct dim_channel
*const *ch
= channels
;
844 state_changed
= false;
847 state_changed
|= channel_service_interrupt(*ch
);
850 } while (state_changed
);
852 /* clear pending Interrupts */
853 dimcb_io_write(&g
.dim2
->MS0
, 0);
854 dimcb_io_write(&g
.dim2
->MS1
, 0);
857 u8
DIM_ServiceChannel(struct dim_channel
*ch
)
859 if (!g
.dim_is_initialized
|| !ch
)
860 return DIM_ERR_DRIVER_NOT_INITIALIZED
;
862 return channel_service(ch
);
865 struct dim_ch_state_t
*DIM_GetChannelState(struct dim_channel
*ch
,
866 struct dim_ch_state_t
*state_ptr
)
868 if (!ch
|| !state_ptr
)
871 state_ptr
->ready
= ch
->state
.level
< 2;
872 state_ptr
->done_buffers
= ch
->done_sw_buffers_number
;
877 bool DIM_EnqueueBuffer(struct dim_channel
*ch
, u32 buffer_addr
, u16 buffer_size
)
880 return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED
,
883 return channel_start(ch
, buffer_addr
, buffer_size
);
886 bool dim_detach_buffers(struct dim_channel
*ch
, u16 buffers_number
)
889 return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED
,
892 return channel_detach_buffers(ch
, buffers_number
);