2 * SGI UltraViolet TLB flush routines.
4 * (c) 2008-2010 Cliff Wickman <cpw@sgi.com>, SGI.
6 * This code is released under the GNU General Public License version 2 or
9 #include <linux/seq_file.h>
10 #include <linux/proc_fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
15 #include <asm/mmu_context.h>
16 #include <asm/uv/uv.h>
17 #include <asm/uv/uv_mmrs.h>
18 #include <asm/uv/uv_hub.h>
19 #include <asm/uv/uv_bau.h>
23 #include <asm/irq_vectors.h>
24 #include <asm/timer.h>
27 struct bau_payload_queue_entry
*msg
;
30 struct bau_payload_queue_entry
*va_queue_first
;
31 struct bau_payload_queue_entry
*va_queue_last
;
34 /* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
35 static int timeout_base_ns
[] = {
45 static int timeout_us
;
47 static int baudisabled
;
48 static spinlock_t disable_lock
;
49 static cycles_t congested_cycles
;
52 static int max_bau_concurrent
= MAX_BAU_CONCURRENT
;
53 static int max_bau_concurrent_constant
= MAX_BAU_CONCURRENT
;
54 static int plugged_delay
= PLUGGED_DELAY
;
55 static int plugsb4reset
= PLUGSB4RESET
;
56 static int timeoutsb4reset
= TIMEOUTSB4RESET
;
57 static int ipi_reset_limit
= IPI_RESET_LIMIT
;
58 static int complete_threshold
= COMPLETE_THRESHOLD
;
59 static int congested_response_us
= CONGESTED_RESPONSE_US
;
60 static int congested_reps
= CONGESTED_REPS
;
61 static int congested_period
= CONGESTED_PERIOD
;
62 static struct dentry
*tunables_dir
;
63 static struct dentry
*tunables_file
;
65 static int __init
setup_nobau(char *arg
)
70 early_param("nobau", setup_nobau
);
72 /* base pnode in this partition */
73 static int uv_partition_base_pnode __read_mostly
;
74 /* position of pnode (which is nasid>>1): */
75 static int uv_nshift __read_mostly
;
76 static unsigned long uv_mmask __read_mostly
;
78 static DEFINE_PER_CPU(struct ptc_stats
, ptcstats
);
79 static DEFINE_PER_CPU(struct bau_control
, bau_control
);
80 static DEFINE_PER_CPU(cpumask_var_t
, uv_flush_tlb_mask
);
87 * Determine the first node on a uvhub. 'Nodes' are used for kernel
90 static int __init
uvhub_to_first_node(int uvhub
)
94 for_each_online_node(node
) {
95 b
= uv_node_to_blade_id(node
);
103 * Determine the apicid of the first cpu on a uvhub.
105 static int __init
uvhub_to_first_apicid(int uvhub
)
109 for_each_present_cpu(cpu
)
110 if (uvhub
== uv_cpu_to_blade_id(cpu
))
111 return per_cpu(x86_cpu_to_apicid
, cpu
);
116 * Free a software acknowledge hardware resource by clearing its Pending
117 * bit. This will return a reply to the sender.
118 * If the message has timed out, a reply has already been sent by the
119 * hardware but the resource has not been released. In that case our
120 * clear of the Timeout bit (as well) will free the resource. No reply will
121 * be sent (the hardware will only do one reply per message).
123 static inline void uv_reply_to_message(struct msg_desc
*mdp
,
124 struct bau_control
*bcp
)
127 struct bau_payload_queue_entry
*msg
;
130 if (!msg
->canceled
) {
131 dw
= (msg
->sw_ack_vector
<< UV_SW_ACK_NPENDING
) |
134 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS
, dw
);
137 msg
->sw_ack_vector
= 0;
141 * Process the receipt of a RETRY message
143 static inline void uv_bau_process_retry_msg(struct msg_desc
*mdp
,
144 struct bau_control
*bcp
)
147 int cancel_count
= 0;
149 unsigned long msg_res
;
150 unsigned long mmr
= 0;
151 struct bau_payload_queue_entry
*msg
;
152 struct bau_payload_queue_entry
*msg2
;
153 struct ptc_stats
*stat
;
156 stat
= &per_cpu(ptcstats
, bcp
->cpu
);
159 * cancel any message from msg+1 to the retry itself
161 for (msg2
= msg
+1, i
= 0; i
< DEST_Q_SIZE
; msg2
++, i
++) {
162 if (msg2
> mdp
->va_queue_last
)
163 msg2
= mdp
->va_queue_first
;
167 /* same conditions for cancellation as uv_do_reset */
168 if ((msg2
->replied_to
== 0) && (msg2
->canceled
== 0) &&
169 (msg2
->sw_ack_vector
) && ((msg2
->sw_ack_vector
&
170 msg
->sw_ack_vector
) == 0) &&
171 (msg2
->sending_cpu
== msg
->sending_cpu
) &&
172 (msg2
->msg_type
!= MSG_NOOP
)) {
173 slot2
= msg2
- mdp
->va_queue_first
;
174 mmr
= uv_read_local_mmr
175 (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE
);
176 msg_res
= ((msg2
->sw_ack_vector
<< 8) |
177 msg2
->sw_ack_vector
);
179 * This is a message retry; clear the resources held
180 * by the previous message only if they timed out.
181 * If it has not timed out we have an unexpected
182 * situation to report.
184 if (mmr
& (msg_res
<< 8)) {
186 * is the resource timed out?
187 * make everyone ignore the cancelled message.
193 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS
,
194 (msg_res
<< 8) | msg_res
);
196 printk(KERN_INFO
"note bau retry: no effect\n");
200 stat
->d_nocanceled
++;
204 * Do all the things a cpu should do for a TLB shootdown message.
205 * Other cpu's may come here at the same time for this message.
207 static void uv_bau_process_message(struct msg_desc
*mdp
,
208 struct bau_control
*bcp
)
211 short socket_ack_count
= 0;
212 struct ptc_stats
*stat
;
213 struct bau_payload_queue_entry
*msg
;
214 struct bau_control
*smaster
= bcp
->socket_master
;
217 * This must be a normal message, or retry of a normal message
220 stat
= &per_cpu(ptcstats
, bcp
->cpu
);
221 if (msg
->address
== TLB_FLUSH_ALL
) {
225 __flush_tlb_one(msg
->address
);
231 * One cpu on each uvhub has the additional job on a RETRY
232 * of releasing the resource held by the message that is
233 * being retried. That message is identified by sending
236 if (msg
->msg_type
== MSG_RETRY
&& bcp
== bcp
->uvhub_master
)
237 uv_bau_process_retry_msg(mdp
, bcp
);
240 * This is a sw_ack message, so we have to reply to it.
241 * Count each responding cpu on the socket. This avoids
242 * pinging the count's cache line back and forth between
245 socket_ack_count
= atomic_add_short_return(1, (struct atomic_short
*)
246 &smaster
->socket_acknowledge_count
[mdp
->msg_slot
]);
247 if (socket_ack_count
== bcp
->cpus_in_socket
) {
249 * Both sockets dump their completed count total into
250 * the message's count.
252 smaster
->socket_acknowledge_count
[mdp
->msg_slot
] = 0;
253 msg_ack_count
= atomic_add_short_return(socket_ack_count
,
254 (struct atomic_short
*)&msg
->acknowledge_count
);
256 if (msg_ack_count
== bcp
->cpus_in_uvhub
) {
258 * All cpus in uvhub saw it; reply
260 uv_reply_to_message(mdp
, bcp
);
268 * Determine the first cpu on a uvhub.
270 static int uvhub_to_first_cpu(int uvhub
)
273 for_each_present_cpu(cpu
)
274 if (uvhub
== uv_cpu_to_blade_id(cpu
))
280 * Last resort when we get a large number of destination timeouts is
281 * to clear resources held by a given cpu.
282 * Do this with IPI so that all messages in the BAU message queue
283 * can be identified by their nonzero sw_ack_vector field.
285 * This is entered for a single cpu on the uvhub.
286 * The sender want's this uvhub to free a specific message's
290 uv_do_reset(void *ptr
)
296 unsigned long msg_res
;
297 struct bau_control
*bcp
;
298 struct reset_args
*rap
;
299 struct bau_payload_queue_entry
*msg
;
300 struct ptc_stats
*stat
;
302 bcp
= &per_cpu(bau_control
, smp_processor_id());
303 rap
= (struct reset_args
*)ptr
;
304 stat
= &per_cpu(ptcstats
, bcp
->cpu
);
308 * We're looking for the given sender, and
309 * will free its sw_ack resource.
310 * If all cpu's finally responded after the timeout, its
311 * message 'replied_to' was set.
313 for (msg
= bcp
->va_queue_first
, i
= 0; i
< DEST_Q_SIZE
; msg
++, i
++) {
314 /* uv_do_reset: same conditions for cancellation as
315 uv_bau_process_retry_msg() */
316 if ((msg
->replied_to
== 0) &&
317 (msg
->canceled
== 0) &&
318 (msg
->sending_cpu
== rap
->sender
) &&
319 (msg
->sw_ack_vector
) &&
320 (msg
->msg_type
!= MSG_NOOP
)) {
322 * make everyone else ignore this message
325 slot
= msg
- bcp
->va_queue_first
;
328 * only reset the resource if it is still pending
330 mmr
= uv_read_local_mmr
331 (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE
);
332 msg_res
= ((msg
->sw_ack_vector
<< 8) |
337 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS
,
346 * Use IPI to get all target uvhubs to release resources held by
347 * a given sending cpu number.
349 static void uv_reset_with_ipi(struct bau_target_uvhubmask
*distribution
,
355 struct reset_args reset_args
;
357 reset_args
.sender
= sender
;
360 /* find a single cpu for each uvhub in this distribution mask */
362 uvhub
< sizeof(struct bau_target_uvhubmask
) * BITSPERBYTE
;
364 if (!bau_uvhub_isset(uvhub
, distribution
))
366 /* find a cpu for this uvhub */
367 cpu
= uvhub_to_first_cpu(uvhub
);
370 /* IPI all cpus; Preemption is already disabled */
371 smp_call_function_many(&mask
, uv_do_reset
, (void *)&reset_args
, 1);
375 static inline unsigned long
376 cycles_2_us(unsigned long long cyc
)
378 unsigned long long ns
;
380 ns
= (cyc
* per_cpu(cyc2ns
, smp_processor_id()))
381 >> CYC2NS_SCALE_FACTOR
;
387 * wait for all cpus on this hub to finish their sends and go quiet
388 * leaves uvhub_quiesce set so that no new broadcasts are started by
389 * bau_flush_send_and_wait()
392 quiesce_local_uvhub(struct bau_control
*hmaster
)
394 atomic_add_short_return(1, (struct atomic_short
*)
395 &hmaster
->uvhub_quiesce
);
399 * mark this quiet-requestor as done
402 end_uvhub_quiesce(struct bau_control
*hmaster
)
404 atomic_add_short_return(-1, (struct atomic_short
*)
405 &hmaster
->uvhub_quiesce
);
409 * Wait for completion of a broadcast software ack message
410 * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
412 static int uv_wait_completion(struct bau_desc
*bau_desc
,
413 unsigned long mmr_offset
, int right_shift
, int this_cpu
,
414 struct bau_control
*bcp
, struct bau_control
*smaster
, long try)
417 unsigned long descriptor_status
;
421 cycles_t timeout_time
;
422 struct ptc_stats
*stat
= &per_cpu(ptcstats
, this_cpu
);
423 struct bau_control
*hmaster
;
425 hmaster
= bcp
->uvhub_master
;
426 timeout_time
= get_cycles() + bcp
->timeout_interval
;
428 /* spin on the status MMR, waiting for it to go idle */
429 while ((descriptor_status
= (((unsigned long)
430 uv_read_local_mmr(mmr_offset
) >>
431 right_shift
) & UV_ACT_STATUS_MASK
)) !=
434 * Our software ack messages may be blocked because there are
435 * no swack resources available. As long as none of them
436 * has timed out hardware will NACK our message and its
437 * state will stay IDLE.
439 if (descriptor_status
== DESC_STATUS_SOURCE_TIMEOUT
) {
442 } else if (descriptor_status
==
443 DESC_STATUS_DESTINATION_TIMEOUT
) {
445 ttime
= get_cycles();
448 * Our retries may be blocked by all destination
449 * swack resources being consumed, and a timeout
450 * pending. In that case hardware returns the
451 * ERROR that looks like a destination timeout.
453 if (cycles_2_us(ttime
- bcp
->send_message
) <
455 bcp
->conseccompletes
= 0;
456 return FLUSH_RETRY_PLUGGED
;
459 bcp
->conseccompletes
= 0;
460 return FLUSH_RETRY_TIMEOUT
;
463 * descriptor_status is still BUSY
467 if (relaxes
>= 10000) {
469 if (get_cycles() > timeout_time
) {
470 quiesce_local_uvhub(hmaster
);
472 /* single-thread the register change */
473 spin_lock(&hmaster
->masks_lock
);
474 mmr
= uv_read_local_mmr(mmr_offset
);
476 mask
|= (3UL < right_shift
);
479 uv_write_local_mmr(mmr_offset
, mmr
);
480 spin_unlock(&hmaster
->masks_lock
);
481 end_uvhub_quiesce(hmaster
);
488 bcp
->conseccompletes
++;
489 return FLUSH_COMPLETE
;
492 static inline cycles_t
493 sec_2_cycles(unsigned long sec
)
498 ns
= sec
* 1000000000;
499 cyc
= (ns
<< CYC2NS_SCALE_FACTOR
)/(per_cpu(cyc2ns
, smp_processor_id()));
504 * conditionally add 1 to *v, unless *v is >= u
505 * return 0 if we cannot add 1 to *v because it is >= u
506 * return 1 if we can add 1 to *v because it is < u
509 * This is close to atomic_add_unless(), but this allows the 'u' value
510 * to be lowered below the current 'v'. atomic_add_unless can only stop
513 static inline int atomic_inc_unless_ge(spinlock_t
*lock
, atomic_t
*v
, int u
)
516 if (atomic_read(v
) >= u
) {
526 * Completions are taking a very long time due to a congested numalink
530 disable_for_congestion(struct bau_control
*bcp
, struct ptc_stats
*stat
)
533 struct bau_control
*tbcp
;
535 /* let only one cpu do this disabling */
536 spin_lock(&disable_lock
);
537 if (!baudisabled
&& bcp
->period_requests
&&
538 ((bcp
->period_time
/ bcp
->period_requests
) > congested_cycles
)) {
539 /* it becomes this cpu's job to turn on the use of the
542 bcp
->set_bau_off
= 1;
543 bcp
->set_bau_on_time
= get_cycles() +
544 sec_2_cycles(bcp
->congested_period
);
545 stat
->s_bau_disabled
++;
546 for_each_present_cpu(tcpu
) {
547 tbcp
= &per_cpu(bau_control
, tcpu
);
548 tbcp
->baudisabled
= 1;
551 spin_unlock(&disable_lock
);
555 * uv_flush_send_and_wait
557 * Send a broadcast and wait for it to complete.
559 * The flush_mask contains the cpus the broadcast is to be sent to, plus
560 * cpus that are on the local uvhub.
562 * Returns NULL if all flushing represented in the mask was done. The mask
564 * Returns @flush_mask if some remote flushing remains to be done. The
565 * mask will have some bits still set, representing any cpus on the local
566 * uvhub (not current cpu) and any on remote uvhubs if the broadcast failed.
568 const struct cpumask
*uv_flush_send_and_wait(struct bau_desc
*bau_desc
,
569 struct cpumask
*flush_mask
,
570 struct bau_control
*bcp
)
575 int completion_status
= 0;
578 int cpu
= bcp
->uvhub_cpu
;
579 int this_cpu
= bcp
->cpu
;
580 int this_uvhub
= bcp
->uvhub
;
581 unsigned long mmr_offset
;
586 struct ptc_stats
*stat
= &per_cpu(ptcstats
, bcp
->cpu
);
587 struct bau_control
*smaster
= bcp
->socket_master
;
588 struct bau_control
*hmaster
= bcp
->uvhub_master
;
591 * Spin here while there are hmaster->max_bau_concurrent or more active
592 * descriptors. This is the per-uvhub 'throttle'.
594 if (!atomic_inc_unless_ge(&hmaster
->uvhub_lock
,
595 &hmaster
->active_descriptor_count
,
596 hmaster
->max_bau_concurrent
)) {
600 } while (!atomic_inc_unless_ge(&hmaster
->uvhub_lock
,
601 &hmaster
->active_descriptor_count
,
602 hmaster
->max_bau_concurrent
));
605 while (hmaster
->uvhub_quiesce
)
608 if (cpu
< UV_CPUS_PER_ACT_STATUS
) {
609 mmr_offset
= UVH_LB_BAU_SB_ACTIVATION_STATUS_0
;
610 right_shift
= cpu
* UV_ACT_STATUS_SIZE
;
612 mmr_offset
= UVH_LB_BAU_SB_ACTIVATION_STATUS_1
;
614 ((cpu
- UV_CPUS_PER_ACT_STATUS
) * UV_ACT_STATUS_SIZE
);
616 time1
= get_cycles();
619 * Every message from any given cpu gets a unique message
620 * sequence number. But retries use that same number.
621 * Our message may have timed out at the destination because
622 * all sw-ack resources are in use and there is a timeout
623 * pending there. In that case, our last send never got
624 * placed into the queue and we need to persist until it
627 * Make any retry a type MSG_RETRY so that the destination will
628 * free any resource held by a previous message from this cpu.
631 /* use message type set by the caller the first time */
632 seq_number
= bcp
->message_number
++;
634 /* use RETRY type on all the rest; same sequence */
635 bau_desc
->header
.msg_type
= MSG_RETRY
;
636 stat
->s_retry_messages
++;
638 bau_desc
->header
.sequence
= seq_number
;
639 index
= (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT
) |
641 bcp
->send_message
= get_cycles();
643 uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL
, index
);
646 completion_status
= uv_wait_completion(bau_desc
, mmr_offset
,
647 right_shift
, this_cpu
, bcp
, smaster
, try);
649 if (completion_status
== FLUSH_RETRY_PLUGGED
) {
651 * Our retries may be blocked by all destination swack
652 * resources being consumed, and a timeout pending. In
653 * that case hardware immediately returns the ERROR
654 * that looks like a destination timeout.
656 udelay(bcp
->plugged_delay
);
657 bcp
->plugged_tries
++;
658 if (bcp
->plugged_tries
>= bcp
->plugsb4reset
) {
659 bcp
->plugged_tries
= 0;
660 quiesce_local_uvhub(hmaster
);
661 spin_lock(&hmaster
->queue_lock
);
662 uv_reset_with_ipi(&bau_desc
->distribution
,
664 spin_unlock(&hmaster
->queue_lock
);
665 end_uvhub_quiesce(hmaster
);
667 stat
->s_resets_plug
++;
669 } else if (completion_status
== FLUSH_RETRY_TIMEOUT
) {
670 hmaster
->max_bau_concurrent
= 1;
671 bcp
->timeout_tries
++;
672 udelay(TIMEOUT_DELAY
);
673 if (bcp
->timeout_tries
>= bcp
->timeoutsb4reset
) {
674 bcp
->timeout_tries
= 0;
675 quiesce_local_uvhub(hmaster
);
676 spin_lock(&hmaster
->queue_lock
);
677 uv_reset_with_ipi(&bau_desc
->distribution
,
679 spin_unlock(&hmaster
->queue_lock
);
680 end_uvhub_quiesce(hmaster
);
682 stat
->s_resets_timeout
++;
685 if (bcp
->ipi_attempts
>= bcp
->ipi_reset_limit
) {
686 bcp
->ipi_attempts
= 0;
687 completion_status
= FLUSH_GIVEUP
;
691 } while ((completion_status
== FLUSH_RETRY_PLUGGED
) ||
692 (completion_status
== FLUSH_RETRY_TIMEOUT
));
693 time2
= get_cycles();
695 bcp
->plugged_tries
= 0;
696 bcp
->timeout_tries
= 0;
698 if ((completion_status
== FLUSH_COMPLETE
) &&
699 (bcp
->conseccompletes
> bcp
->complete_threshold
) &&
700 (hmaster
->max_bau_concurrent
<
701 hmaster
->max_bau_concurrent_constant
))
702 hmaster
->max_bau_concurrent
++;
705 * hold any cpu not timing out here; no other cpu currently held by
706 * the 'throttle' should enter the activation code
708 while (hmaster
->uvhub_quiesce
)
710 atomic_dec(&hmaster
->active_descriptor_count
);
712 /* guard against cycles wrap */
714 elapsed
= time2
- time1
;
715 stat
->s_time
+= elapsed
;
716 if ((completion_status
== FLUSH_COMPLETE
) && (try == 1)) {
717 bcp
->period_requests
++;
718 bcp
->period_time
+= elapsed
;
719 if ((elapsed
> congested_cycles
) &&
720 (bcp
->period_requests
> bcp
->congested_reps
)) {
721 disable_for_congestion(bcp
, stat
);
725 stat
->s_requestor
--; /* don't count this one */
726 if (completion_status
== FLUSH_COMPLETE
&& try > 1)
728 else if (completion_status
== FLUSH_GIVEUP
) {
730 * Cause the caller to do an IPI-style TLB shootdown on
731 * the target cpu's, all of which are still in the mask.
738 * Success, so clear the remote cpu's from the mask so we don't
739 * use the IPI method of shootdown on them.
741 for_each_cpu(bit
, flush_mask
) {
742 uvhub
= uv_cpu_to_blade_id(bit
);
743 if (uvhub
== this_uvhub
)
745 cpumask_clear_cpu(bit
, flush_mask
);
747 if (!cpumask_empty(flush_mask
))
754 * uv_flush_tlb_others - globally purge translation cache of a virtual
755 * address or all TLB's
756 * @cpumask: mask of all cpu's in which the address is to be removed
757 * @mm: mm_struct containing virtual address range
758 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
759 * @cpu: the current cpu
761 * This is the entry point for initiating any UV global TLB shootdown.
763 * Purges the translation caches of all specified processors of the given
764 * virtual address, or purges all TLB's on specified processors.
766 * The caller has derived the cpumask from the mm_struct. This function
767 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
769 * The cpumask is converted into a uvhubmask of the uvhubs containing
772 * Note that this function should be called with preemption disabled.
774 * Returns NULL if all remote flushing was done.
775 * Returns pointer to cpumask if some remote flushing remains to be
776 * done. The returned pointer is valid till preemption is re-enabled.
778 const struct cpumask
*uv_flush_tlb_others(const struct cpumask
*cpumask
,
779 struct mm_struct
*mm
,
780 unsigned long va
, unsigned int cpu
)
786 struct bau_desc
*bau_desc
;
787 struct cpumask
*flush_mask
;
788 struct ptc_stats
*stat
;
789 struct bau_control
*bcp
;
790 struct bau_control
*tbcp
;
792 /* kernel was booted 'nobau' */
796 bcp
= &per_cpu(bau_control
, cpu
);
797 stat
= &per_cpu(ptcstats
, cpu
);
799 /* bau was disabled due to slow response */
800 if (bcp
->baudisabled
) {
801 /* the cpu that disabled it must re-enable it */
802 if (bcp
->set_bau_off
) {
803 if (get_cycles() >= bcp
->set_bau_on_time
) {
804 stat
->s_bau_reenabled
++;
806 for_each_present_cpu(tcpu
) {
807 tbcp
= &per_cpu(bau_control
, tcpu
);
808 tbcp
->baudisabled
= 0;
809 tbcp
->period_requests
= 0;
810 tbcp
->period_time
= 0;
818 * Each sending cpu has a per-cpu mask which it fills from the caller's
819 * cpu mask. Only remote cpus are converted to uvhubs and copied.
821 flush_mask
= (struct cpumask
*)per_cpu(uv_flush_tlb_mask
, cpu
);
823 * copy cpumask to flush_mask, removing current cpu
824 * (current cpu should already have been flushed by the caller and
825 * should never be returned if we return flush_mask)
827 cpumask_andnot(flush_mask
, cpumask
, cpumask_of(cpu
));
828 if (cpu_isset(cpu
, *cpumask
))
829 locals
++; /* current cpu was targeted */
831 bau_desc
= bcp
->descriptor_base
;
832 bau_desc
+= UV_ITEMS_PER_DESCRIPTOR
* bcp
->uvhub_cpu
;
834 bau_uvhubs_clear(&bau_desc
->distribution
, UV_DISTRIBUTION_SIZE
);
836 for_each_cpu(tcpu
, flush_mask
) {
837 uvhub
= uv_cpu_to_blade_id(tcpu
);
838 if (uvhub
== bcp
->uvhub
) {
842 bau_uvhub_set(uvhub
, &bau_desc
->distribution
);
847 * No off_hub flushing; return status for local hub.
848 * Return the caller's mask if all were local (the current
849 * cpu may be in that mask).
857 stat
->s_ntargcpu
+= remotes
;
858 remotes
= bau_uvhub_weight(&bau_desc
->distribution
);
859 stat
->s_ntarguvhub
+= remotes
;
861 stat
->s_ntarguvhub16
++;
862 else if (remotes
>= 8)
863 stat
->s_ntarguvhub8
++;
864 else if (remotes
>= 4)
865 stat
->s_ntarguvhub4
++;
866 else if (remotes
>= 2)
867 stat
->s_ntarguvhub2
++;
869 stat
->s_ntarguvhub1
++;
871 bau_desc
->payload
.address
= va
;
872 bau_desc
->payload
.sending_cpu
= cpu
;
875 * uv_flush_send_and_wait returns null if all cpu's were messaged, or
876 * the adjusted flush_mask if any cpu's were not messaged.
878 return uv_flush_send_and_wait(bau_desc
, flush_mask
, bcp
);
882 * The BAU message interrupt comes here. (registered by set_intr_gate)
885 * We received a broadcast assist message.
887 * Interrupts are disabled; this interrupt could represent
888 * the receipt of several messages.
890 * All cores/threads on this hub get this interrupt.
891 * The last one to see it does the software ack.
892 * (the resource will not be freed until noninterruptable cpus see this
893 * interrupt; hardware may timeout the s/w ack and reply ERROR)
895 void uv_bau_message_interrupt(struct pt_regs
*regs
)
899 struct bau_payload_queue_entry
*msg
;
900 struct bau_control
*bcp
;
901 struct ptc_stats
*stat
;
902 struct msg_desc msgdesc
;
904 time_start
= get_cycles();
905 bcp
= &per_cpu(bau_control
, smp_processor_id());
906 stat
= &per_cpu(ptcstats
, smp_processor_id());
907 msgdesc
.va_queue_first
= bcp
->va_queue_first
;
908 msgdesc
.va_queue_last
= bcp
->va_queue_last
;
909 msg
= bcp
->bau_msg_head
;
910 while (msg
->sw_ack_vector
) {
912 msgdesc
.msg_slot
= msg
- msgdesc
.va_queue_first
;
913 msgdesc
.sw_ack_slot
= ffs(msg
->sw_ack_vector
) - 1;
915 uv_bau_process_message(&msgdesc
, bcp
);
917 if (msg
> msgdesc
.va_queue_last
)
918 msg
= msgdesc
.va_queue_first
;
919 bcp
->bau_msg_head
= msg
;
921 stat
->d_time
+= (get_cycles() - time_start
);
932 * Each target uvhub (i.e. a uvhub that has no cpu's) needs to have
933 * shootdown message timeouts enabled. The timeout does not cause
934 * an interrupt, but causes an error message to be returned to
937 static void uv_enable_timeouts(void)
942 unsigned long mmr_image
;
944 nuvhubs
= uv_num_possible_blades();
946 for (uvhub
= 0; uvhub
< nuvhubs
; uvhub
++) {
947 if (!uv_blade_nr_possible_cpus(uvhub
))
950 pnode
= uv_blade_to_pnode(uvhub
);
952 uv_read_global_mmr64(pnode
, UVH_LB_BAU_MISC_CONTROL
);
954 * Set the timeout period and then lock it in, in three
955 * steps; captures and locks in the period.
957 * To program the period, the SOFT_ACK_MODE must be off.
959 mmr_image
&= ~((unsigned long)1 <<
960 UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT
);
961 uv_write_global_mmr64
962 (pnode
, UVH_LB_BAU_MISC_CONTROL
, mmr_image
);
964 * Set the 4-bit period.
966 mmr_image
&= ~((unsigned long)0xf <<
967 UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT
);
968 mmr_image
|= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD
<<
969 UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT
);
970 uv_write_global_mmr64
971 (pnode
, UVH_LB_BAU_MISC_CONTROL
, mmr_image
);
973 * Subsequent reversals of the timebase bit (3) cause an
974 * immediate timeout of one or all INTD resources as
975 * indicated in bits 2:0 (7 causes all of them to timeout).
977 mmr_image
|= ((unsigned long)1 <<
978 UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT
);
979 uv_write_global_mmr64
980 (pnode
, UVH_LB_BAU_MISC_CONTROL
, mmr_image
);
984 static void *uv_ptc_seq_start(struct seq_file
*file
, loff_t
*offset
)
986 if (*offset
< num_possible_cpus())
991 static void *uv_ptc_seq_next(struct seq_file
*file
, void *data
, loff_t
*offset
)
994 if (*offset
< num_possible_cpus())
999 static void uv_ptc_seq_stop(struct seq_file
*file
, void *data
)
1003 static inline unsigned long long
1004 microsec_2_cycles(unsigned long microsec
)
1007 unsigned long long cyc
;
1009 ns
= microsec
* 1000;
1010 cyc
= (ns
<< CYC2NS_SCALE_FACTOR
)/(per_cpu(cyc2ns
, smp_processor_id()));
1015 * Display the statistics thru /proc.
1016 * 'data' points to the cpu number
1018 static int uv_ptc_seq_show(struct seq_file
*file
, void *data
)
1020 struct ptc_stats
*stat
;
1023 cpu
= *(loff_t
*)data
;
1027 "# cpu sent stime numuvhubs numuvhubs16 numuvhubs8 ");
1029 "numuvhubs4 numuvhubs2 numuvhubs1 numcpus dto ");
1031 "retries rok resetp resett giveup sto bz throt ");
1033 "sw_ack recv rtime all ");
1035 "one mult none retry canc nocan reset rcan ");
1037 "disable enable\n");
1039 if (cpu
< num_possible_cpus() && cpu_online(cpu
)) {
1040 stat
= &per_cpu(ptcstats
, cpu
);
1041 /* source side statistics */
1043 "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1044 cpu
, stat
->s_requestor
, cycles_2_us(stat
->s_time
),
1045 stat
->s_ntarguvhub
, stat
->s_ntarguvhub16
,
1046 stat
->s_ntarguvhub8
, stat
->s_ntarguvhub4
,
1047 stat
->s_ntarguvhub2
, stat
->s_ntarguvhub1
,
1048 stat
->s_ntargcpu
, stat
->s_dtimeout
);
1049 seq_printf(file
, "%ld %ld %ld %ld %ld %ld %ld %ld ",
1050 stat
->s_retry_messages
, stat
->s_retriesok
,
1051 stat
->s_resets_plug
, stat
->s_resets_timeout
,
1052 stat
->s_giveup
, stat
->s_stimeout
,
1053 stat
->s_busy
, stat
->s_throttles
);
1055 /* destination side statistics */
1057 "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1058 uv_read_global_mmr64(uv_cpu_to_pnode(cpu
),
1059 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE
),
1060 stat
->d_requestee
, cycles_2_us(stat
->d_time
),
1061 stat
->d_alltlb
, stat
->d_onetlb
, stat
->d_multmsg
,
1062 stat
->d_nomsg
, stat
->d_retries
, stat
->d_canceled
,
1063 stat
->d_nocanceled
, stat
->d_resets
,
1065 seq_printf(file
, "%ld %ld\n",
1066 stat
->s_bau_disabled
, stat
->s_bau_reenabled
);
1073 * Display the tunables thru debugfs
1075 static ssize_t
tunables_read(struct file
*file
, char __user
*userbuf
,
1076 size_t count
, loff_t
*ppos
)
1081 ret
= snprintf(buf
, 300, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
1082 "max_bau_concurrent plugged_delay plugsb4reset",
1083 "timeoutsb4reset ipi_reset_limit complete_threshold",
1084 "congested_response_us congested_reps congested_period",
1085 max_bau_concurrent
, plugged_delay
, plugsb4reset
,
1086 timeoutsb4reset
, ipi_reset_limit
, complete_threshold
,
1087 congested_response_us
, congested_reps
, congested_period
);
1089 return simple_read_from_buffer(userbuf
, count
, ppos
, buf
, ret
);
1093 * -1: resetf the statistics
1094 * 0: display meaning of the statistics
1096 static ssize_t
uv_ptc_proc_write(struct file
*file
, const char __user
*user
,
1097 size_t count
, loff_t
*data
)
1102 struct ptc_stats
*stat
;
1104 if (count
== 0 || count
> sizeof(optstr
))
1106 if (copy_from_user(optstr
, user
, count
))
1108 optstr
[count
- 1] = '\0';
1109 if (strict_strtol(optstr
, 10, &input_arg
) < 0) {
1110 printk(KERN_DEBUG
"%s is invalid\n", optstr
);
1114 if (input_arg
== 0) {
1115 printk(KERN_DEBUG
"# cpu: cpu number\n");
1116 printk(KERN_DEBUG
"Sender statistics:\n");
1118 "sent: number of shootdown messages sent\n");
1120 "stime: time spent sending messages\n");
1122 "numuvhubs: number of hubs targeted with shootdown\n");
1124 "numuvhubs16: number times 16 or more hubs targeted\n");
1126 "numuvhubs8: number times 8 or more hubs targeted\n");
1128 "numuvhubs4: number times 4 or more hubs targeted\n");
1130 "numuvhubs2: number times 2 or more hubs targeted\n");
1132 "numuvhubs1: number times 1 hub targeted\n");
1134 "numcpus: number of cpus targeted with shootdown\n");
1136 "dto: number of destination timeouts\n");
1138 "retries: destination timeout retries sent\n");
1140 "rok: : destination timeouts successfully retried\n");
1142 "resetp: ipi-style resource resets for plugs\n");
1144 "resett: ipi-style resource resets for timeouts\n");
1146 "giveup: fall-backs to ipi-style shootdowns\n");
1148 "sto: number of source timeouts\n");
1150 "bz: number of stay-busy's\n");
1152 "throt: number times spun in throttle\n");
1153 printk(KERN_DEBUG
"Destination side statistics:\n");
1155 "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n");
1157 "recv: shootdown messages received\n");
1159 "rtime: time spent processing messages\n");
1161 "all: shootdown all-tlb messages\n");
1163 "one: shootdown one-tlb messages\n");
1165 "mult: interrupts that found multiple messages\n");
1167 "none: interrupts that found no messages\n");
1169 "retry: number of retry messages processed\n");
1171 "canc: number messages canceled by retries\n");
1173 "nocan: number retries that found nothing to cancel\n");
1175 "reset: number of ipi-style reset requests processed\n");
1177 "rcan: number messages canceled by reset requests\n");
1179 "disable: number times use of the BAU was disabled\n");
1181 "enable: number times use of the BAU was re-enabled\n");
1182 } else if (input_arg
== -1) {
1183 for_each_present_cpu(cpu
) {
1184 stat
= &per_cpu(ptcstats
, cpu
);
1185 memset(stat
, 0, sizeof(struct ptc_stats
));
1192 static int local_atoi(const char *name
)
1199 val
= 10*val
+(*name
-'0');
1209 * 0 values reset them to defaults
1211 static ssize_t
tunables_write(struct file
*file
, const char __user
*user
,
1212 size_t count
, loff_t
*data
)
1220 struct bau_control
*bcp
;
1222 if (count
== 0 || count
> sizeof(instr
)-1)
1224 if (copy_from_user(instr
, user
, count
))
1227 instr
[count
] = '\0';
1228 /* count the fields */
1229 p
= instr
+ strspn(instr
, WHITESPACE
);
1231 for (; *p
; p
= q
+ strspn(q
, WHITESPACE
)) {
1232 q
= p
+ strcspn(p
, WHITESPACE
);
1238 printk(KERN_INFO
"bau tunable error: should be 9 numbers\n");
1242 p
= instr
+ strspn(instr
, WHITESPACE
);
1244 for (cnt
= 0; *p
; p
= q
+ strspn(q
, WHITESPACE
), cnt
++) {
1245 q
= p
+ strcspn(p
, WHITESPACE
);
1246 val
= local_atoi(p
);
1250 max_bau_concurrent
= MAX_BAU_CONCURRENT
;
1251 max_bau_concurrent_constant
=
1255 bcp
= &per_cpu(bau_control
, smp_processor_id());
1256 if (val
< 1 || val
> bcp
->cpus_in_uvhub
) {
1258 "Error: BAU max concurrent %d is invalid\n",
1262 max_bau_concurrent
= val
;
1263 max_bau_concurrent_constant
= val
;
1267 plugged_delay
= PLUGGED_DELAY
;
1269 plugged_delay
= val
;
1273 plugsb4reset
= PLUGSB4RESET
;
1279 timeoutsb4reset
= TIMEOUTSB4RESET
;
1281 timeoutsb4reset
= val
;
1285 ipi_reset_limit
= IPI_RESET_LIMIT
;
1287 ipi_reset_limit
= val
;
1291 complete_threshold
= COMPLETE_THRESHOLD
;
1293 complete_threshold
= val
;
1297 congested_response_us
= CONGESTED_RESPONSE_US
;
1299 congested_response_us
= val
;
1303 congested_reps
= CONGESTED_REPS
;
1305 congested_reps
= val
;
1309 congested_period
= CONGESTED_PERIOD
;
1311 congested_period
= val
;
1317 for_each_present_cpu(cpu
) {
1318 bcp
= &per_cpu(bau_control
, cpu
);
1319 bcp
->max_bau_concurrent
= max_bau_concurrent
;
1320 bcp
->max_bau_concurrent_constant
= max_bau_concurrent
;
1321 bcp
->plugged_delay
= plugged_delay
;
1322 bcp
->plugsb4reset
= plugsb4reset
;
1323 bcp
->timeoutsb4reset
= timeoutsb4reset
;
1324 bcp
->ipi_reset_limit
= ipi_reset_limit
;
1325 bcp
->complete_threshold
= complete_threshold
;
1326 bcp
->congested_response_us
= congested_response_us
;
1327 bcp
->congested_reps
= congested_reps
;
1328 bcp
->congested_period
= congested_period
;
1333 static const struct seq_operations uv_ptc_seq_ops
= {
1334 .start
= uv_ptc_seq_start
,
1335 .next
= uv_ptc_seq_next
,
1336 .stop
= uv_ptc_seq_stop
,
1337 .show
= uv_ptc_seq_show
1340 static int uv_ptc_proc_open(struct inode
*inode
, struct file
*file
)
1342 return seq_open(file
, &uv_ptc_seq_ops
);
1345 static int tunables_open(struct inode
*inode
, struct file
*file
)
1350 static const struct file_operations proc_uv_ptc_operations
= {
1351 .open
= uv_ptc_proc_open
,
1353 .write
= uv_ptc_proc_write
,
1354 .llseek
= seq_lseek
,
1355 .release
= seq_release
,
1358 static const struct file_operations tunables_fops
= {
1359 .open
= tunables_open
,
1360 .read
= tunables_read
,
1361 .write
= tunables_write
,
1364 static int __init
uv_ptc_init(void)
1366 struct proc_dir_entry
*proc_uv_ptc
;
1368 if (!is_uv_system())
1371 proc_uv_ptc
= proc_create(UV_PTC_BASENAME
, 0444, NULL
,
1372 &proc_uv_ptc_operations
);
1374 printk(KERN_ERR
"unable to create %s proc entry\n",
1379 tunables_dir
= debugfs_create_dir(UV_BAU_TUNABLES_DIR
, NULL
);
1380 if (!tunables_dir
) {
1381 printk(KERN_ERR
"unable to create debugfs directory %s\n",
1382 UV_BAU_TUNABLES_DIR
);
1385 tunables_file
= debugfs_create_file(UV_BAU_TUNABLES_FILE
, 0600,
1386 tunables_dir
, NULL
, &tunables_fops
);
1387 if (!tunables_file
) {
1388 printk(KERN_ERR
"unable to create debugfs file %s\n",
1389 UV_BAU_TUNABLES_FILE
);
1396 * initialize the sending side's sending buffers
1399 uv_activation_descriptor_init(int node
, int pnode
)
1406 struct bau_desc
*bau_desc
;
1407 struct bau_desc
*bd2
;
1408 struct bau_control
*bcp
;
1411 * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
1412 * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub
1414 bau_desc
= (struct bau_desc
*)kmalloc_node(sizeof(struct bau_desc
)*
1415 UV_ADP_SIZE
*UV_ITEMS_PER_DESCRIPTOR
, GFP_KERNEL
, node
);
1418 pa
= uv_gpa(bau_desc
); /* need the real nasid*/
1419 n
= pa
>> uv_nshift
;
1422 uv_write_global_mmr64(pnode
, UVH_LB_BAU_SB_DESCRIPTOR_BASE
,
1423 (n
<< UV_DESC_BASE_PNODE_SHIFT
| m
));
1426 * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
1427 * cpu even though we only use the first one; one descriptor can
1428 * describe a broadcast to 256 uv hubs.
1430 for (i
= 0, bd2
= bau_desc
; i
< (UV_ADP_SIZE
*UV_ITEMS_PER_DESCRIPTOR
);
1432 memset(bd2
, 0, sizeof(struct bau_desc
));
1433 bd2
->header
.sw_ack_flag
= 1;
1435 * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub
1436 * in the partition. The bit map will indicate uvhub numbers,
1437 * which are 0-N in a partition. Pnodes are unique system-wide.
1439 bd2
->header
.base_dest_nodeid
= uv_partition_base_pnode
<< 1;
1440 bd2
->header
.dest_subnodeid
= 0x10; /* the LB */
1441 bd2
->header
.command
= UV_NET_ENDPOINT_INTD
;
1442 bd2
->header
.int_both
= 1;
1444 * all others need to be set to zero:
1445 * fairness chaining multilevel count replied_to
1448 for_each_present_cpu(cpu
) {
1449 if (pnode
!= uv_blade_to_pnode(uv_cpu_to_blade_id(cpu
)))
1451 bcp
= &per_cpu(bau_control
, cpu
);
1452 bcp
->descriptor_base
= bau_desc
;
1457 * initialize the destination side's receiving buffers
1458 * entered for each uvhub in the partition
1459 * - node is first node (kernel memory notion) on the uvhub
1460 * - pnode is the uvhub's physical identifier
1463 uv_payload_queue_init(int node
, int pnode
)
1469 struct bau_payload_queue_entry
*pqp
;
1470 struct bau_payload_queue_entry
*pqp_malloc
;
1471 struct bau_control
*bcp
;
1473 pqp
= (struct bau_payload_queue_entry
*) kmalloc_node(
1474 (DEST_Q_SIZE
+ 1) * sizeof(struct bau_payload_queue_entry
),
1479 cp
= (char *)pqp
+ 31;
1480 pqp
= (struct bau_payload_queue_entry
*)(((unsigned long)cp
>> 5) << 5);
1482 for_each_present_cpu(cpu
) {
1483 if (pnode
!= uv_cpu_to_pnode(cpu
))
1485 /* for every cpu on this pnode: */
1486 bcp
= &per_cpu(bau_control
, cpu
);
1487 bcp
->va_queue_first
= pqp
;
1488 bcp
->bau_msg_head
= pqp
;
1489 bcp
->va_queue_last
= pqp
+ (DEST_Q_SIZE
- 1);
1492 * need the pnode of where the memory was really allocated
1495 pn
= pa
>> uv_nshift
;
1496 uv_write_global_mmr64(pnode
,
1497 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST
,
1498 ((unsigned long)pn
<< UV_PAYLOADQ_PNODE_SHIFT
) |
1499 uv_physnodeaddr(pqp
));
1500 uv_write_global_mmr64(pnode
, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL
,
1501 uv_physnodeaddr(pqp
));
1502 uv_write_global_mmr64(pnode
, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST
,
1504 uv_physnodeaddr(pqp
+ (DEST_Q_SIZE
- 1)));
1505 /* in effect, all msg_type's are set to MSG_NOOP */
1506 memset(pqp
, 0, sizeof(struct bau_payload_queue_entry
) * DEST_Q_SIZE
);
1510 * Initialization of each UV hub's structures
1512 static void __init
uv_init_uvhub(int uvhub
, int vector
)
1516 unsigned long apicid
;
1518 node
= uvhub_to_first_node(uvhub
);
1519 pnode
= uv_blade_to_pnode(uvhub
);
1520 uv_activation_descriptor_init(node
, pnode
);
1521 uv_payload_queue_init(node
, pnode
);
1523 * the below initialization can't be in firmware because the
1524 * messaging IRQ will be determined by the OS
1526 apicid
= uvhub_to_first_apicid(uvhub
);
1527 uv_write_global_mmr64(pnode
, UVH_BAU_DATA_CONFIG
,
1528 ((apicid
<< 32) | vector
));
1532 * We will set BAU_MISC_CONTROL with a timeout period.
1533 * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
1534 * So the destination timeout period has be be calculated from them.
1537 calculate_destination_timeout(void)
1539 unsigned long mmr_image
;
1545 unsigned long ts_ns
;
1547 mult1
= UV_INTD_SOFT_ACK_TIMEOUT_PERIOD
& BAU_MISC_CONTROL_MULT_MASK
;
1548 mmr_image
= uv_read_local_mmr(UVH_AGING_PRESCALE_SEL
);
1549 index
= (mmr_image
>> BAU_URGENCY_7_SHIFT
) & BAU_URGENCY_7_MASK
;
1550 mmr_image
= uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT
);
1551 mult2
= (mmr_image
>> BAU_TRANS_SHIFT
) & BAU_TRANS_MASK
;
1552 base
= timeout_base_ns
[index
];
1553 ts_ns
= base
* mult1
* mult2
;
1559 * initialize the bau_control structure for each cpu
1561 static void uv_init_per_cpu(int nuvhubs
)
1568 struct bau_control
*bcp
;
1569 struct uvhub_desc
*bdp
;
1570 struct socket_desc
*sdp
;
1571 struct bau_control
*hmaster
= NULL
;
1572 struct bau_control
*smaster
= NULL
;
1573 struct socket_desc
{
1575 short cpu_number
[16];
1582 struct socket_desc socket
[2];
1584 struct uvhub_desc
*uvhub_descs
;
1586 timeout_us
= calculate_destination_timeout();
1588 uvhub_descs
= (struct uvhub_desc
*)
1589 kmalloc(nuvhubs
* sizeof(struct uvhub_desc
), GFP_KERNEL
);
1590 memset(uvhub_descs
, 0, nuvhubs
* sizeof(struct uvhub_desc
));
1591 for_each_present_cpu(cpu
) {
1592 bcp
= &per_cpu(bau_control
, cpu
);
1593 memset(bcp
, 0, sizeof(struct bau_control
));
1594 spin_lock_init(&bcp
->masks_lock
);
1595 pnode
= uv_cpu_hub_info(cpu
)->pnode
;
1596 uvhub
= uv_cpu_hub_info(cpu
)->numa_blade_id
;
1597 bdp
= &uvhub_descs
[uvhub
];
1601 /* kludge: assume uv_hub.h is constant */
1602 socket
= (cpu_physical_id(cpu
)>>5)&1;
1603 if (socket
>= bdp
->num_sockets
)
1604 bdp
->num_sockets
= socket
+1;
1605 sdp
= &bdp
->socket
[socket
];
1606 sdp
->cpu_number
[sdp
->num_cpus
] = cpu
;
1610 for_each_possible_blade(uvhub
) {
1611 bdp
= &uvhub_descs
[uvhub
];
1612 for (i
= 0; i
< bdp
->num_sockets
; i
++) {
1613 sdp
= &bdp
->socket
[i
];
1614 for (j
= 0; j
< sdp
->num_cpus
; j
++) {
1615 cpu
= sdp
->cpu_number
[j
];
1616 bcp
= &per_cpu(bau_control
, cpu
);
1623 bcp
->cpus_in_uvhub
= bdp
->num_cpus
;
1624 bcp
->cpus_in_socket
= sdp
->num_cpus
;
1625 bcp
->socket_master
= smaster
;
1626 bcp
->uvhub_master
= hmaster
;
1627 for (k
= 0; k
< DEST_Q_SIZE
; k
++)
1628 bcp
->socket_acknowledge_count
[k
] = 0;
1630 uv_cpu_hub_info(cpu
)->blade_processor_id
;
1636 for_each_present_cpu(cpu
) {
1637 bcp
= &per_cpu(bau_control
, cpu
);
1638 bcp
->baudisabled
= 0;
1639 /* time interval to catch a hardware stay-busy bug */
1640 bcp
->timeout_interval
= microsec_2_cycles(2*timeout_us
);
1641 bcp
->max_bau_concurrent
= max_bau_concurrent
;
1642 bcp
->max_bau_concurrent_constant
= max_bau_concurrent
;
1643 bcp
->plugged_delay
= plugged_delay
;
1644 bcp
->plugsb4reset
= plugsb4reset
;
1645 bcp
->timeoutsb4reset
= timeoutsb4reset
;
1646 bcp
->ipi_reset_limit
= ipi_reset_limit
;
1647 bcp
->complete_threshold
= complete_threshold
;
1648 bcp
->congested_response_us
= congested_response_us
;
1649 bcp
->congested_reps
= congested_reps
;
1650 bcp
->congested_period
= congested_period
;
1655 * Initialization of BAU-related structures
1657 static int __init
uv_bau_init(void)
1666 if (!is_uv_system())
1672 for_each_possible_cpu(cur_cpu
)
1673 zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask
, cur_cpu
),
1674 GFP_KERNEL
, cpu_to_node(cur_cpu
));
1676 max_bau_concurrent
= MAX_BAU_CONCURRENT
;
1677 uv_nshift
= uv_hub_info
->m_val
;
1678 uv_mmask
= (1UL << uv_hub_info
->m_val
) - 1;
1679 nuvhubs
= uv_num_possible_blades();
1680 spin_lock_init(&disable_lock
);
1681 congested_cycles
= microsec_2_cycles(congested_response_us
);
1683 uv_init_per_cpu(nuvhubs
);
1685 uv_partition_base_pnode
= 0x7fffffff;
1686 for (uvhub
= 0; uvhub
< nuvhubs
; uvhub
++)
1687 if (uv_blade_nr_possible_cpus(uvhub
) &&
1688 (uv_blade_to_pnode(uvhub
) < uv_partition_base_pnode
))
1689 uv_partition_base_pnode
= uv_blade_to_pnode(uvhub
);
1691 vector
= UV_BAU_MESSAGE
;
1692 for_each_possible_blade(uvhub
)
1693 if (uv_blade_nr_possible_cpus(uvhub
))
1694 uv_init_uvhub(uvhub
, vector
);
1696 uv_enable_timeouts();
1697 alloc_intr_gate(vector
, uv_bau_message_intr1
);
1699 for_each_possible_blade(uvhub
) {
1700 pnode
= uv_blade_to_pnode(uvhub
);
1702 uv_write_global_mmr64(pnode
, UVH_LB_BAU_SB_ACTIVATION_CONTROL
,
1703 ((unsigned long)1 << 63));
1704 mmr
= 1; /* should be 1 to broadcast to both sockets */
1705 uv_write_global_mmr64(pnode
, UVH_BAU_DATA_BROADCAST
, mmr
);
1710 core_initcall(uv_bau_init
);
1711 fs_initcall(uv_ptc_init
);