2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2007 Maciej W. Rozycki
8 * Copyright (C) 2008 Thiemo Seufer
9 * Copyright (C) 2012 MIPS Technologies, Inc.
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
16 #include <linux/module.h>
17 #include <linux/proc_fs.h>
20 #include <asm/cacheops.h>
24 #include <asm/pgtable.h>
25 #include <asm/prefetch.h>
26 #include <asm/bootinfo.h>
27 #include <asm/mipsregs.h>
28 #include <asm/mmu_context.h>
32 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
33 #include <asm/sibyte/sb1250.h>
34 #include <asm/sibyte/sb1250_regs.h>
35 #include <asm/sibyte/sb1250_dma.h>
40 /* Registers used in the assembled routines. */
53 /* Handle labels (which must be positive integers). */
55 label_clear_nopref
= 1,
59 label_copy_pref_store
,
62 UASM_L_LA(_clear_nopref
)
63 UASM_L_LA(_clear_pref
)
64 UASM_L_LA(_copy_nopref
)
65 UASM_L_LA(_copy_pref_both
)
66 UASM_L_LA(_copy_pref_store
)
68 /* We need one branch and therefore one relocation per target label. */
69 static struct uasm_label __cpuinitdata labels
[5];
70 static struct uasm_reloc __cpuinitdata relocs
[5];
72 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
73 #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
75 static int pref_bias_clear_store __cpuinitdata
;
76 static int pref_bias_copy_load __cpuinitdata
;
77 static int pref_bias_copy_store __cpuinitdata
;
79 static u32 pref_src_mode __cpuinitdata
;
80 static u32 pref_dst_mode __cpuinitdata
;
82 static int clear_word_size __cpuinitdata
;
83 static int copy_word_size __cpuinitdata
;
85 static int half_clear_loop_size __cpuinitdata
;
86 static int half_copy_loop_size __cpuinitdata
;
88 static int cache_line_size __cpuinitdata
;
89 #define cache_line_mask() (cache_line_size - 1)
91 static inline void __cpuinit
92 pg_addiu(u32
**buf
, unsigned int reg1
, unsigned int reg2
, unsigned int off
)
94 if (cpu_has_64bit_gp_regs
&& DADDI_WAR
&& r4k_daddiu_bug()) {
96 uasm_i_lui(buf
, T9
, uasm_rel_hi(off
));
97 uasm_i_addiu(buf
, T9
, T9
, uasm_rel_lo(off
));
99 uasm_i_addiu(buf
, T9
, ZERO
, off
);
100 uasm_i_daddu(buf
, reg1
, reg2
, T9
);
103 uasm_i_lui(buf
, T9
, uasm_rel_hi(off
));
104 uasm_i_addiu(buf
, T9
, T9
, uasm_rel_lo(off
));
105 UASM_i_ADDU(buf
, reg1
, reg2
, T9
);
107 UASM_i_ADDIU(buf
, reg1
, reg2
, off
);
111 static void __cpuinit
set_prefetch_parameters(void)
113 if (cpu_has_64bit_gp_regs
|| cpu_has_64bit_zero_reg
)
118 if (cpu_has_64bit_gp_regs
)
124 * The pref's used here are using "streaming" hints, which cause the
125 * copied data to be kicked out of the cache sooner. A page copy often
126 * ends up copying a lot more data than is commonly used, so this seems
127 * to make sense in terms of reducing cache pollution, but I've no real
128 * performance data to back this up.
130 if (cpu_has_prefetch
) {
132 * XXX: Most prefetch bias values in here are based on
135 cache_line_size
= cpu_dcache_line_size();
136 switch (current_cpu_type()) {
139 /* These processors only support the Pref_Load. */
140 pref_bias_copy_load
= 256;
145 * As a workaround for erratum G105 which make the
146 * PrepareForStore hint unusable we fall back to
147 * StoreRetained on the RM9000. Once it is known which
148 * versions of the RM9000 we'll be able to condition-
156 * Those values have been experimentally tuned for an
159 pref_bias_clear_store
= 512;
160 pref_bias_copy_load
= 256;
161 pref_bias_copy_store
= 256;
162 pref_src_mode
= Pref_LoadStreamed
;
163 pref_dst_mode
= Pref_StoreStreamed
;
168 pref_bias_clear_store
= 128;
169 pref_bias_copy_load
= 128;
170 pref_bias_copy_store
= 128;
172 * SB1 pass1 Pref_LoadStreamed/Pref_StoreStreamed
175 if (current_cpu_type() == CPU_SB1
&&
176 (current_cpu_data
.processor_id
& 0xff) < 0x02) {
177 pref_src_mode
= Pref_Load
;
178 pref_dst_mode
= Pref_Store
;
180 pref_src_mode
= Pref_LoadStreamed
;
181 pref_dst_mode
= Pref_StoreStreamed
;
186 pref_bias_clear_store
= 128;
187 pref_bias_copy_load
= 256;
188 pref_bias_copy_store
= 128;
189 pref_src_mode
= Pref_LoadStreamed
;
190 pref_dst_mode
= Pref_PrepareForStore
;
194 if (cpu_has_cache_cdex_s
)
195 cache_line_size
= cpu_scache_line_size();
196 else if (cpu_has_cache_cdex_p
)
197 cache_line_size
= cpu_dcache_line_size();
200 * Too much unrolling will overflow the available space in
201 * clear_space_array / copy_page_array.
203 half_clear_loop_size
= min(16 * clear_word_size
,
204 max(cache_line_size
>> 1,
205 4 * clear_word_size
));
206 half_copy_loop_size
= min(16 * copy_word_size
,
207 max(cache_line_size
>> 1,
208 4 * copy_word_size
));
211 static void __cpuinit
build_clear_store(u32
**buf
, int off
)
213 if (cpu_has_64bit_gp_regs
|| cpu_has_64bit_zero_reg
) {
214 uasm_i_sd(buf
, ZERO
, off
, A0
);
216 uasm_i_sw(buf
, ZERO
, off
, A0
);
220 static inline void __cpuinit
build_clear_pref(u32
**buf
, int off
)
222 if (off
& cache_line_mask())
225 if (pref_bias_clear_store
) {
226 uasm_i_pref(buf
, pref_dst_mode
, pref_bias_clear_store
+ off
,
228 } else if (cache_line_size
== (half_clear_loop_size
<< 1)) {
229 if (cpu_has_cache_cdex_s
) {
230 uasm_i_cache(buf
, Create_Dirty_Excl_SD
, off
, A0
);
231 } else if (cpu_has_cache_cdex_p
) {
232 if (R4600_V1_HIT_CACHEOP_WAR
&& cpu_is_r4600_v1_x()) {
239 if (R4600_V2_HIT_CACHEOP_WAR
&& cpu_is_r4600_v2_x())
240 uasm_i_lw(buf
, ZERO
, ZERO
, AT
);
242 uasm_i_cache(buf
, Create_Dirty_Excl_D
, off
, A0
);
247 extern u32 __clear_page_start
;
248 extern u32 __clear_page_end
;
249 extern u32 __copy_page_start
;
250 extern u32 __copy_page_end
;
252 void __cpuinit
build_clear_page(void)
255 u32
*buf
= &__clear_page_start
;
256 struct uasm_label
*l
= labels
;
257 struct uasm_reloc
*r
= relocs
;
260 memset(labels
, 0, sizeof(labels
));
261 memset(relocs
, 0, sizeof(relocs
));
263 set_prefetch_parameters();
266 * This algorithm makes the following assumptions:
267 * - The prefetch bias is a multiple of 2 words.
268 * - The prefetch bias is less than one page.
270 BUG_ON(pref_bias_clear_store
% (2 * clear_word_size
));
271 BUG_ON(PAGE_SIZE
< pref_bias_clear_store
);
273 off
= PAGE_SIZE
- pref_bias_clear_store
;
274 if (off
> 0xffff || !pref_bias_clear_store
)
275 pg_addiu(&buf
, A2
, A0
, off
);
277 uasm_i_ori(&buf
, A2
, A0
, off
);
279 if (R4600_V2_HIT_CACHEOP_WAR
&& cpu_is_r4600_v2_x())
280 uasm_i_lui(&buf
, AT
, 0xa000);
282 off
= cache_line_size
? min(8, pref_bias_clear_store
/ cache_line_size
)
283 * cache_line_size
: 0;
285 build_clear_pref(&buf
, -off
);
286 off
-= cache_line_size
;
288 uasm_l_clear_pref(&l
, buf
);
290 build_clear_pref(&buf
, off
);
291 build_clear_store(&buf
, off
);
292 off
+= clear_word_size
;
293 } while (off
< half_clear_loop_size
);
294 pg_addiu(&buf
, A0
, A0
, 2 * off
);
297 build_clear_pref(&buf
, off
);
298 if (off
== -clear_word_size
)
299 uasm_il_bne(&buf
, &r
, A0
, A2
, label_clear_pref
);
300 build_clear_store(&buf
, off
);
301 off
+= clear_word_size
;
304 if (pref_bias_clear_store
) {
305 pg_addiu(&buf
, A2
, A0
, pref_bias_clear_store
);
306 uasm_l_clear_nopref(&l
, buf
);
309 build_clear_store(&buf
, off
);
310 off
+= clear_word_size
;
311 } while (off
< half_clear_loop_size
);
312 pg_addiu(&buf
, A0
, A0
, 2 * off
);
315 if (off
== -clear_word_size
)
316 uasm_il_bne(&buf
, &r
, A0
, A2
,
318 build_clear_store(&buf
, off
);
319 off
+= clear_word_size
;
326 BUG_ON(buf
> &__clear_page_end
);
328 uasm_resolve_relocs(relocs
, labels
);
330 pr_debug("Synthesized clear page handler (%u instructions).\n",
331 (u32
)(buf
- &__clear_page_start
));
333 pr_debug("\t.set push\n");
334 pr_debug("\t.set noreorder\n");
335 for (i
= 0; i
< (buf
- &__clear_page_start
); i
++)
336 pr_debug("\t.word 0x%08x\n", (&__clear_page_start
)[i
]);
337 pr_debug("\t.set pop\n");
340 static void __cpuinit
build_copy_load(u32
**buf
, int reg
, int off
)
342 if (cpu_has_64bit_gp_regs
) {
343 uasm_i_ld(buf
, reg
, off
, A1
);
345 uasm_i_lw(buf
, reg
, off
, A1
);
349 static void __cpuinit
build_copy_store(u32
**buf
, int reg
, int off
)
351 if (cpu_has_64bit_gp_regs
) {
352 uasm_i_sd(buf
, reg
, off
, A0
);
354 uasm_i_sw(buf
, reg
, off
, A0
);
358 static inline void build_copy_load_pref(u32
**buf
, int off
)
360 if (off
& cache_line_mask())
363 if (pref_bias_copy_load
)
364 uasm_i_pref(buf
, pref_src_mode
, pref_bias_copy_load
+ off
, A1
);
367 static inline void build_copy_store_pref(u32
**buf
, int off
)
369 if (off
& cache_line_mask())
372 if (pref_bias_copy_store
) {
373 uasm_i_pref(buf
, pref_dst_mode
, pref_bias_copy_store
+ off
,
375 } else if (cache_line_size
== (half_copy_loop_size
<< 1)) {
376 if (cpu_has_cache_cdex_s
) {
377 uasm_i_cache(buf
, Create_Dirty_Excl_SD
, off
, A0
);
378 } else if (cpu_has_cache_cdex_p
) {
379 if (R4600_V1_HIT_CACHEOP_WAR
&& cpu_is_r4600_v1_x()) {
386 if (R4600_V2_HIT_CACHEOP_WAR
&& cpu_is_r4600_v2_x())
387 uasm_i_lw(buf
, ZERO
, ZERO
, AT
);
389 uasm_i_cache(buf
, Create_Dirty_Excl_D
, off
, A0
);
394 void __cpuinit
build_copy_page(void)
397 u32
*buf
= &__copy_page_start
;
398 struct uasm_label
*l
= labels
;
399 struct uasm_reloc
*r
= relocs
;
402 memset(labels
, 0, sizeof(labels
));
403 memset(relocs
, 0, sizeof(relocs
));
405 set_prefetch_parameters();
408 * This algorithm makes the following assumptions:
409 * - All prefetch biases are multiples of 8 words.
410 * - The prefetch biases are less than one page.
411 * - The store prefetch bias isn't greater than the load
414 BUG_ON(pref_bias_copy_load
% (8 * copy_word_size
));
415 BUG_ON(pref_bias_copy_store
% (8 * copy_word_size
));
416 BUG_ON(PAGE_SIZE
< pref_bias_copy_load
);
417 BUG_ON(pref_bias_copy_store
> pref_bias_copy_load
);
419 off
= PAGE_SIZE
- pref_bias_copy_load
;
420 if (off
> 0xffff || !pref_bias_copy_load
)
421 pg_addiu(&buf
, A2
, A0
, off
);
423 uasm_i_ori(&buf
, A2
, A0
, off
);
425 if (R4600_V2_HIT_CACHEOP_WAR
&& cpu_is_r4600_v2_x())
426 uasm_i_lui(&buf
, AT
, 0xa000);
428 off
= cache_line_size
? min(8, pref_bias_copy_load
/ cache_line_size
) *
431 build_copy_load_pref(&buf
, -off
);
432 off
-= cache_line_size
;
434 off
= cache_line_size
? min(8, pref_bias_copy_store
/ cache_line_size
) *
437 build_copy_store_pref(&buf
, -off
);
438 off
-= cache_line_size
;
440 uasm_l_copy_pref_both(&l
, buf
);
442 build_copy_load_pref(&buf
, off
);
443 build_copy_load(&buf
, T0
, off
);
444 build_copy_load_pref(&buf
, off
+ copy_word_size
);
445 build_copy_load(&buf
, T1
, off
+ copy_word_size
);
446 build_copy_load_pref(&buf
, off
+ 2 * copy_word_size
);
447 build_copy_load(&buf
, T2
, off
+ 2 * copy_word_size
);
448 build_copy_load_pref(&buf
, off
+ 3 * copy_word_size
);
449 build_copy_load(&buf
, T3
, off
+ 3 * copy_word_size
);
450 build_copy_store_pref(&buf
, off
);
451 build_copy_store(&buf
, T0
, off
);
452 build_copy_store_pref(&buf
, off
+ copy_word_size
);
453 build_copy_store(&buf
, T1
, off
+ copy_word_size
);
454 build_copy_store_pref(&buf
, off
+ 2 * copy_word_size
);
455 build_copy_store(&buf
, T2
, off
+ 2 * copy_word_size
);
456 build_copy_store_pref(&buf
, off
+ 3 * copy_word_size
);
457 build_copy_store(&buf
, T3
, off
+ 3 * copy_word_size
);
458 off
+= 4 * copy_word_size
;
459 } while (off
< half_copy_loop_size
);
460 pg_addiu(&buf
, A1
, A1
, 2 * off
);
461 pg_addiu(&buf
, A0
, A0
, 2 * off
);
464 build_copy_load_pref(&buf
, off
);
465 build_copy_load(&buf
, T0
, off
);
466 build_copy_load_pref(&buf
, off
+ copy_word_size
);
467 build_copy_load(&buf
, T1
, off
+ copy_word_size
);
468 build_copy_load_pref(&buf
, off
+ 2 * copy_word_size
);
469 build_copy_load(&buf
, T2
, off
+ 2 * copy_word_size
);
470 build_copy_load_pref(&buf
, off
+ 3 * copy_word_size
);
471 build_copy_load(&buf
, T3
, off
+ 3 * copy_word_size
);
472 build_copy_store_pref(&buf
, off
);
473 build_copy_store(&buf
, T0
, off
);
474 build_copy_store_pref(&buf
, off
+ copy_word_size
);
475 build_copy_store(&buf
, T1
, off
+ copy_word_size
);
476 build_copy_store_pref(&buf
, off
+ 2 * copy_word_size
);
477 build_copy_store(&buf
, T2
, off
+ 2 * copy_word_size
);
478 build_copy_store_pref(&buf
, off
+ 3 * copy_word_size
);
479 if (off
== -(4 * copy_word_size
))
480 uasm_il_bne(&buf
, &r
, A2
, A0
, label_copy_pref_both
);
481 build_copy_store(&buf
, T3
, off
+ 3 * copy_word_size
);
482 off
+= 4 * copy_word_size
;
485 if (pref_bias_copy_load
- pref_bias_copy_store
) {
486 pg_addiu(&buf
, A2
, A0
,
487 pref_bias_copy_load
- pref_bias_copy_store
);
488 uasm_l_copy_pref_store(&l
, buf
);
491 build_copy_load(&buf
, T0
, off
);
492 build_copy_load(&buf
, T1
, off
+ copy_word_size
);
493 build_copy_load(&buf
, T2
, off
+ 2 * copy_word_size
);
494 build_copy_load(&buf
, T3
, off
+ 3 * copy_word_size
);
495 build_copy_store_pref(&buf
, off
);
496 build_copy_store(&buf
, T0
, off
);
497 build_copy_store_pref(&buf
, off
+ copy_word_size
);
498 build_copy_store(&buf
, T1
, off
+ copy_word_size
);
499 build_copy_store_pref(&buf
, off
+ 2 * copy_word_size
);
500 build_copy_store(&buf
, T2
, off
+ 2 * copy_word_size
);
501 build_copy_store_pref(&buf
, off
+ 3 * copy_word_size
);
502 build_copy_store(&buf
, T3
, off
+ 3 * copy_word_size
);
503 off
+= 4 * copy_word_size
;
504 } while (off
< half_copy_loop_size
);
505 pg_addiu(&buf
, A1
, A1
, 2 * off
);
506 pg_addiu(&buf
, A0
, A0
, 2 * off
);
509 build_copy_load(&buf
, T0
, off
);
510 build_copy_load(&buf
, T1
, off
+ copy_word_size
);
511 build_copy_load(&buf
, T2
, off
+ 2 * copy_word_size
);
512 build_copy_load(&buf
, T3
, off
+ 3 * copy_word_size
);
513 build_copy_store_pref(&buf
, off
);
514 build_copy_store(&buf
, T0
, off
);
515 build_copy_store_pref(&buf
, off
+ copy_word_size
);
516 build_copy_store(&buf
, T1
, off
+ copy_word_size
);
517 build_copy_store_pref(&buf
, off
+ 2 * copy_word_size
);
518 build_copy_store(&buf
, T2
, off
+ 2 * copy_word_size
);
519 build_copy_store_pref(&buf
, off
+ 3 * copy_word_size
);
520 if (off
== -(4 * copy_word_size
))
521 uasm_il_bne(&buf
, &r
, A2
, A0
,
522 label_copy_pref_store
);
523 build_copy_store(&buf
, T3
, off
+ 3 * copy_word_size
);
524 off
+= 4 * copy_word_size
;
528 if (pref_bias_copy_store
) {
529 pg_addiu(&buf
, A2
, A0
, pref_bias_copy_store
);
530 uasm_l_copy_nopref(&l
, buf
);
533 build_copy_load(&buf
, T0
, off
);
534 build_copy_load(&buf
, T1
, off
+ copy_word_size
);
535 build_copy_load(&buf
, T2
, off
+ 2 * copy_word_size
);
536 build_copy_load(&buf
, T3
, off
+ 3 * copy_word_size
);
537 build_copy_store(&buf
, T0
, off
);
538 build_copy_store(&buf
, T1
, off
+ copy_word_size
);
539 build_copy_store(&buf
, T2
, off
+ 2 * copy_word_size
);
540 build_copy_store(&buf
, T3
, off
+ 3 * copy_word_size
);
541 off
+= 4 * copy_word_size
;
542 } while (off
< half_copy_loop_size
);
543 pg_addiu(&buf
, A1
, A1
, 2 * off
);
544 pg_addiu(&buf
, A0
, A0
, 2 * off
);
547 build_copy_load(&buf
, T0
, off
);
548 build_copy_load(&buf
, T1
, off
+ copy_word_size
);
549 build_copy_load(&buf
, T2
, off
+ 2 * copy_word_size
);
550 build_copy_load(&buf
, T3
, off
+ 3 * copy_word_size
);
551 build_copy_store(&buf
, T0
, off
);
552 build_copy_store(&buf
, T1
, off
+ copy_word_size
);
553 build_copy_store(&buf
, T2
, off
+ 2 * copy_word_size
);
554 if (off
== -(4 * copy_word_size
))
555 uasm_il_bne(&buf
, &r
, A2
, A0
,
557 build_copy_store(&buf
, T3
, off
+ 3 * copy_word_size
);
558 off
+= 4 * copy_word_size
;
565 BUG_ON(buf
> &__copy_page_end
);
567 uasm_resolve_relocs(relocs
, labels
);
569 pr_debug("Synthesized copy page handler (%u instructions).\n",
570 (u32
)(buf
- &__copy_page_start
));
572 pr_debug("\t.set push\n");
573 pr_debug("\t.set noreorder\n");
574 for (i
= 0; i
< (buf
- &__copy_page_start
); i
++)
575 pr_debug("\t.word 0x%08x\n", (&__copy_page_start
)[i
]);
576 pr_debug("\t.set pop\n");
579 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
580 extern void clear_page_cpu(void *page
);
581 extern void copy_page_cpu(void *to
, void *from
);
584 * Pad descriptors to cacheline, since each is exclusively owned by a
592 } ____cacheline_aligned_in_smp page_descr
[DM_NUM_CHANNELS
];
594 void sb1_dma_init(void)
598 for (i
= 0; i
< DM_NUM_CHANNELS
; i
++) {
599 const u64 base_val
= CPHYSADDR((unsigned long)&page_descr
[i
]) |
600 V_DM_DSCR_BASE_RINGSZ(1);
601 void *base_reg
= IOADDR(A_DM_REGISTER(i
, R_DM_DSCR_BASE
));
603 __raw_writeq(base_val
, base_reg
);
604 __raw_writeq(base_val
| M_DM_DSCR_BASE_RESET
, base_reg
);
605 __raw_writeq(base_val
| M_DM_DSCR_BASE_ENABL
, base_reg
);
609 void clear_page(void *page
)
611 u64 to_phys
= CPHYSADDR((unsigned long)page
);
612 unsigned int cpu
= smp_processor_id();
614 /* if the page is not in KSEG0, use old way */
615 if ((long)KSEGX((unsigned long)page
) != (long)CKSEG0
)
616 return clear_page_cpu(page
);
618 page_descr
[cpu
].dscr_a
= to_phys
| M_DM_DSCRA_ZERO_MEM
|
619 M_DM_DSCRA_L2C_DEST
| M_DM_DSCRA_INTERRUPT
;
620 page_descr
[cpu
].dscr_b
= V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE
);
621 __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu
, R_DM_DSCR_COUNT
)));
624 * Don't really want to do it this way, but there's no
625 * reliable way to delay completion detection.
627 while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu
, R_DM_DSCR_BASE_DEBUG
)))
628 & M_DM_DSCR_BASE_INTERRUPT
))
630 __raw_readq(IOADDR(A_DM_REGISTER(cpu
, R_DM_DSCR_BASE
)));
633 void copy_page(void *to
, void *from
)
635 u64 from_phys
= CPHYSADDR((unsigned long)from
);
636 u64 to_phys
= CPHYSADDR((unsigned long)to
);
637 unsigned int cpu
= smp_processor_id();
639 /* if any page is not in KSEG0, use old way */
640 if ((long)KSEGX((unsigned long)to
) != (long)CKSEG0
641 || (long)KSEGX((unsigned long)from
) != (long)CKSEG0
)
642 return copy_page_cpu(to
, from
);
644 page_descr
[cpu
].dscr_a
= to_phys
| M_DM_DSCRA_L2C_DEST
|
645 M_DM_DSCRA_INTERRUPT
;
646 page_descr
[cpu
].dscr_b
= from_phys
| V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE
);
647 __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu
, R_DM_DSCR_COUNT
)));
650 * Don't really want to do it this way, but there's no
651 * reliable way to delay completion detection.
653 while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu
, R_DM_DSCR_BASE_DEBUG
)))
654 & M_DM_DSCR_BASE_INTERRUPT
))
656 __raw_readq(IOADDR(A_DM_REGISTER(cpu
, R_DM_DSCR_BASE
)));
659 #endif /* CONFIG_SIBYTE_DMA_PAGEOPS */