Commit | Line | Data |
---|---|---|
1812924b CW |
1 | /* |
2 | * SGI UltraViolet TLB flush routines. | |
3 | * | |
4 | * (c) 2008 Cliff Wickman <cpw@sgi.com>, SGI. | |
5 | * | |
6 | * This code is released under the GNU General Public License version 2 or | |
7 | * later. | |
8 | */ | |
aef8f5b8 | 9 | #include <linux/seq_file.h> |
1812924b CW |
10 | #include <linux/proc_fs.h> |
11 | #include <linux/kernel.h> | |
12 | ||
1812924b | 13 | #include <asm/mmu_context.h> |
bdbcdd48 | 14 | #include <asm/uv/uv.h> |
1812924b | 15 | #include <asm/uv/uv_mmrs.h> |
b4c286e6 | 16 | #include <asm/uv/uv_hub.h> |
1812924b | 17 | #include <asm/uv/uv_bau.h> |
7b6aa335 | 18 | #include <asm/apic.h> |
b4c286e6 | 19 | #include <asm/idle.h> |
b194b120 | 20 | #include <asm/tsc.h> |
99dd8713 | 21 | #include <asm/irq_vectors.h> |
1812924b | 22 | |
b4c286e6 IM |
23 | static struct bau_control **uv_bau_table_bases __read_mostly; |
24 | static int uv_bau_retry_limit __read_mostly; | |
25 | ||
26 | /* position of pnode (which is nasid>>1): */ | |
27 | static int uv_nshift __read_mostly; | |
94ca8e48 CW |
28 | /* base pnode in this partition */ |
29 | static int uv_partition_base_pnode __read_mostly; | |
b4c286e6 IM |
30 | |
31 | static unsigned long uv_mmask __read_mostly; | |
1812924b | 32 | |
dc163a41 IM |
33 | static DEFINE_PER_CPU(struct ptc_stats, ptcstats); |
34 | static DEFINE_PER_CPU(struct bau_control, bau_control); | |
1812924b | 35 | |
9674f35b CW |
36 | /* |
37 | * Determine the first node on a blade. | |
38 | */ | |
39 | static int __init blade_to_first_node(int blade) | |
40 | { | |
41 | int node, b; | |
42 | ||
43 | for_each_online_node(node) { | |
44 | b = uv_node_to_blade_id(node); | |
45 | if (blade == b) | |
46 | return node; | |
47 | } | |
94ca8e48 | 48 | return -1; /* shouldn't happen */ |
9674f35b CW |
49 | } |
50 | ||
51 | /* | |
52 | * Determine the apicid of the first cpu on a blade. | |
53 | */ | |
54 | static int __init blade_to_first_apicid(int blade) | |
55 | { | |
56 | int cpu; | |
57 | ||
58 | for_each_present_cpu(cpu) | |
59 | if (blade == uv_cpu_to_blade_id(cpu)) | |
60 | return per_cpu(x86_cpu_to_apicid, cpu); | |
61 | return -1; | |
62 | } | |
63 | ||
1812924b CW |
64 | /* |
65 | * Free a software acknowledge hardware resource by clearing its Pending | |
66 | * bit. This will return a reply to the sender. | |
67 | * If the message has timed out, a reply has already been sent by the | |
68 | * hardware but the resource has not been released. In that case our | |
69 | * clear of the Timeout bit (as well) will free the resource. No reply will | |
70 | * be sent (the hardware will only do one reply per message). | |
71 | */ | |
b194b120 | 72 | static void uv_reply_to_message(int resource, |
b4c286e6 IM |
73 | struct bau_payload_queue_entry *msg, |
74 | struct bau_msg_status *msp) | |
1812924b | 75 | { |
b194b120 | 76 | unsigned long dw; |
1812924b | 77 | |
b194b120 | 78 | dw = (1 << (resource + UV_SW_ACK_NPENDING)) | (1 << resource); |
1812924b CW |
79 | msg->replied_to = 1; |
80 | msg->sw_ack_vector = 0; | |
81 | if (msp) | |
82 | msp->seen_by.bits = 0; | |
b194b120 | 83 | uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw); |
1812924b CW |
84 | } |
85 | ||
86 | /* | |
87 | * Do all the things a cpu should do for a TLB shootdown message. | |
88 | * Other cpu's may come here at the same time for this message. | |
89 | */ | |
b194b120 | 90 | static void uv_bau_process_message(struct bau_payload_queue_entry *msg, |
b4c286e6 | 91 | int msg_slot, int sw_ack_slot) |
1812924b | 92 | { |
1812924b CW |
93 | unsigned long this_cpu_mask; |
94 | struct bau_msg_status *msp; | |
b4c286e6 | 95 | int cpu; |
1812924b CW |
96 | |
97 | msp = __get_cpu_var(bau_control).msg_statuses + msg_slot; | |
98 | cpu = uv_blade_processor_id(); | |
99 | msg->number_of_cpus = | |
9674f35b | 100 | uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id())); |
dc163a41 | 101 | this_cpu_mask = 1UL << cpu; |
1812924b CW |
102 | if (msp->seen_by.bits & this_cpu_mask) |
103 | return; | |
104 | atomic_or_long(&msp->seen_by.bits, this_cpu_mask); | |
105 | ||
106 | if (msg->replied_to == 1) | |
107 | return; | |
108 | ||
109 | if (msg->address == TLB_FLUSH_ALL) { | |
110 | local_flush_tlb(); | |
111 | __get_cpu_var(ptcstats).alltlb++; | |
112 | } else { | |
113 | __flush_tlb_one(msg->address); | |
114 | __get_cpu_var(ptcstats).onetlb++; | |
115 | } | |
116 | ||
117 | __get_cpu_var(ptcstats).requestee++; | |
118 | ||
119 | atomic_inc_short(&msg->acknowledge_count); | |
120 | if (msg->number_of_cpus == msg->acknowledge_count) | |
121 | uv_reply_to_message(sw_ack_slot, msg, msp); | |
1812924b CW |
122 | } |
123 | ||
124 | /* | |
dc163a41 | 125 | * Examine the payload queue on one distribution node to see |
1812924b CW |
126 | * which messages have not been seen, and which cpu(s) have not seen them. |
127 | * | |
128 | * Returns the number of cpu's that have not responded. | |
129 | */ | |
dc163a41 | 130 | static int uv_examine_destination(struct bau_control *bau_tablesp, int sender) |
1812924b | 131 | { |
1812924b CW |
132 | struct bau_payload_queue_entry *msg; |
133 | struct bau_msg_status *msp; | |
b4c286e6 IM |
134 | int count = 0; |
135 | int i; | |
136 | int j; | |
1812924b | 137 | |
dc163a41 IM |
138 | for (msg = bau_tablesp->va_queue_first, i = 0; i < DEST_Q_SIZE; |
139 | msg++, i++) { | |
140 | if ((msg->sending_cpu == sender) && (!msg->replied_to)) { | |
141 | msp = bau_tablesp->msg_statuses + i; | |
142 | printk(KERN_DEBUG | |
143 | "blade %d: address:%#lx %d of %d, not cpu(s): ", | |
144 | i, msg->address, msg->acknowledge_count, | |
145 | msg->number_of_cpus); | |
146 | for (j = 0; j < msg->number_of_cpus; j++) { | |
b4c286e6 | 147 | if (!((1L << j) & msp->seen_by.bits)) { |
dc163a41 IM |
148 | count++; |
149 | printk("%d ", j); | |
150 | } | |
151 | } | |
152 | printk("\n"); | |
153 | } | |
154 | } | |
155 | return count; | |
156 | } | |
157 | ||
158 | /* | |
159 | * Examine the payload queue on all the distribution nodes to see | |
160 | * which messages have not been seen, and which cpu(s) have not seen them. | |
161 | * | |
162 | * Returns the number of cpu's that have not responded. | |
163 | */ | |
164 | static int uv_examine_destinations(struct bau_target_nodemask *distribution) | |
165 | { | |
166 | int sender; | |
167 | int i; | |
168 | int count = 0; | |
169 | ||
1812924b | 170 | sender = smp_processor_id(); |
b4c286e6 | 171 | for (i = 0; i < sizeof(struct bau_target_nodemask) * BITSPERBYTE; i++) { |
b194b120 CW |
172 | if (!bau_node_isset(i, distribution)) |
173 | continue; | |
dc163a41 | 174 | count += uv_examine_destination(uv_bau_table_bases[i], sender); |
1812924b CW |
175 | } |
176 | return count; | |
177 | } | |
178 | ||
b194b120 CW |
179 | /* |
180 | * wait for completion of a broadcast message | |
181 | * | |
182 | * return COMPLETE, RETRY or GIVEUP | |
183 | */ | |
dc163a41 | 184 | static int uv_wait_completion(struct bau_desc *bau_desc, |
b194b120 CW |
185 | unsigned long mmr_offset, int right_shift) |
186 | { | |
187 | int exams = 0; | |
188 | long destination_timeouts = 0; | |
189 | long source_timeouts = 0; | |
190 | unsigned long descriptor_status; | |
191 | ||
192 | while ((descriptor_status = (((unsigned long) | |
193 | uv_read_local_mmr(mmr_offset) >> | |
194 | right_shift) & UV_ACT_STATUS_MASK)) != | |
195 | DESC_STATUS_IDLE) { | |
196 | if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) { | |
197 | source_timeouts++; | |
198 | if (source_timeouts > SOURCE_TIMEOUT_LIMIT) | |
199 | source_timeouts = 0; | |
200 | __get_cpu_var(ptcstats).s_retry++; | |
201 | return FLUSH_RETRY; | |
202 | } | |
203 | /* | |
204 | * spin here looking for progress at the destinations | |
205 | */ | |
206 | if (descriptor_status == DESC_STATUS_DESTINATION_TIMEOUT) { | |
207 | destination_timeouts++; | |
208 | if (destination_timeouts > DESTINATION_TIMEOUT_LIMIT) { | |
209 | /* | |
210 | * returns number of cpus not responding | |
211 | */ | |
212 | if (uv_examine_destinations | |
213 | (&bau_desc->distribution) == 0) { | |
214 | __get_cpu_var(ptcstats).d_retry++; | |
215 | return FLUSH_RETRY; | |
216 | } | |
217 | exams++; | |
218 | if (exams >= uv_bau_retry_limit) { | |
219 | printk(KERN_DEBUG | |
220 | "uv_flush_tlb_others"); | |
221 | printk("giving up on cpu %d\n", | |
222 | smp_processor_id()); | |
223 | return FLUSH_GIVEUP; | |
224 | } | |
225 | /* | |
226 | * delays can hang the simulator | |
227 | udelay(1000); | |
228 | */ | |
229 | destination_timeouts = 0; | |
230 | } | |
231 | } | |
18c07cf5 | 232 | cpu_relax(); |
b194b120 CW |
233 | } |
234 | return FLUSH_COMPLETE; | |
235 | } | |
236 | ||
237 | /** | |
238 | * uv_flush_send_and_wait | |
239 | * | |
240 | * Send a broadcast and wait for a broadcast message to complete. | |
241 | * | |
bdbcdd48 | 242 | * The flush_mask contains the cpus the broadcast was sent to. |
b194b120 | 243 | * |
bdbcdd48 TH |
244 | * Returns NULL if all remote flushing was done. The mask is zeroed. |
245 | * Returns @flush_mask if some remote flushing remains to be done. The | |
246 | * mask will have some bits still set. | |
b194b120 | 247 | */ |
9674f35b | 248 | const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode, |
bdbcdd48 TH |
249 | struct bau_desc *bau_desc, |
250 | struct cpumask *flush_mask) | |
b194b120 CW |
251 | { |
252 | int completion_status = 0; | |
253 | int right_shift; | |
b194b120 | 254 | int tries = 0; |
9674f35b | 255 | int pnode; |
b4c286e6 | 256 | int bit; |
b194b120 | 257 | unsigned long mmr_offset; |
b4c286e6 | 258 | unsigned long index; |
b194b120 CW |
259 | cycles_t time1; |
260 | cycles_t time2; | |
261 | ||
262 | if (cpu < UV_CPUS_PER_ACT_STATUS) { | |
263 | mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0; | |
264 | right_shift = cpu * UV_ACT_STATUS_SIZE; | |
265 | } else { | |
266 | mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1; | |
267 | right_shift = | |
268 | ((cpu - UV_CPUS_PER_ACT_STATUS) * UV_ACT_STATUS_SIZE); | |
269 | } | |
270 | time1 = get_cycles(); | |
271 | do { | |
272 | tries++; | |
dc163a41 IM |
273 | index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) | |
274 | cpu; | |
b194b120 CW |
275 | uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index); |
276 | completion_status = uv_wait_completion(bau_desc, mmr_offset, | |
277 | right_shift); | |
278 | } while (completion_status == FLUSH_RETRY); | |
279 | time2 = get_cycles(); | |
280 | __get_cpu_var(ptcstats).sflush += (time2 - time1); | |
281 | if (tries > 1) | |
282 | __get_cpu_var(ptcstats).retriesok++; | |
283 | ||
284 | if (completion_status == FLUSH_GIVEUP) { | |
285 | /* | |
286 | * Cause the caller to do an IPI-style TLB shootdown on | |
287 | * the cpu's, all of which are still in the mask. | |
288 | */ | |
289 | __get_cpu_var(ptcstats).ptc_i++; | |
2749ebe3 | 290 | return flush_mask; |
b194b120 CW |
291 | } |
292 | ||
293 | /* | |
294 | * Success, so clear the remote cpu's from the mask so we don't | |
295 | * use the IPI method of shootdown on them. | |
296 | */ | |
bdbcdd48 | 297 | for_each_cpu(bit, flush_mask) { |
9674f35b CW |
298 | pnode = uv_cpu_to_pnode(bit); |
299 | if (pnode == this_pnode) | |
b194b120 | 300 | continue; |
bdbcdd48 | 301 | cpumask_clear_cpu(bit, flush_mask); |
b194b120 | 302 | } |
bdbcdd48 TH |
303 | if (!cpumask_empty(flush_mask)) |
304 | return flush_mask; | |
305 | return NULL; | |
b194b120 CW |
306 | } |
307 | ||
76ba0ecd RR |
308 | static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask); |
309 | ||
1812924b CW |
310 | /** |
311 | * uv_flush_tlb_others - globally purge translation cache of a virtual | |
312 | * address or all TLB's | |
bdbcdd48 | 313 | * @cpumask: mask of all cpu's in which the address is to be removed |
1812924b CW |
314 | * @mm: mm_struct containing virtual address range |
315 | * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu) | |
bdbcdd48 | 316 | * @cpu: the current cpu |
1812924b CW |
317 | * |
318 | * This is the entry point for initiating any UV global TLB shootdown. | |
319 | * | |
320 | * Purges the translation caches of all specified processors of the given | |
321 | * virtual address, or purges all TLB's on specified processors. | |
322 | * | |
bdbcdd48 TH |
323 | * The caller has derived the cpumask from the mm_struct. This function |
324 | * is called only if there are bits set in the mask. (e.g. flush_tlb_page()) | |
1812924b | 325 | * |
bdbcdd48 | 326 | * The cpumask is converted into a nodemask of the nodes containing |
1812924b | 327 | * the cpus. |
b194b120 | 328 | * |
bdbcdd48 TH |
329 | * Note that this function should be called with preemption disabled. |
330 | * | |
331 | * Returns NULL if all remote flushing was done. | |
332 | * Returns pointer to cpumask if some remote flushing remains to be | |
333 | * done. The returned pointer is valid till preemption is re-enabled. | |
1812924b | 334 | */ |
bdbcdd48 TH |
335 | const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, |
336 | struct mm_struct *mm, | |
337 | unsigned long va, unsigned int cpu) | |
1812924b | 338 | { |
76ba0ecd | 339 | struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask); |
1812924b | 340 | int i; |
b194b120 | 341 | int bit; |
9674f35b | 342 | int pnode; |
bdbcdd48 | 343 | int uv_cpu; |
9674f35b | 344 | int this_pnode; |
b194b120 | 345 | int locals = 0; |
dc163a41 | 346 | struct bau_desc *bau_desc; |
bdbcdd48 TH |
347 | |
348 | cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); | |
349 | ||
350 | uv_cpu = uv_blade_processor_id(); | |
9674f35b | 351 | this_pnode = uv_hub_info->pnode; |
1812924b | 352 | bau_desc = __get_cpu_var(bau_control).descriptor_base; |
bdbcdd48 | 353 | bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu; |
1812924b CW |
354 | |
355 | bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); | |
356 | ||
357 | i = 0; | |
bdbcdd48 | 358 | for_each_cpu(bit, flush_mask) { |
9674f35b CW |
359 | pnode = uv_cpu_to_pnode(bit); |
360 | BUG_ON(pnode > (UV_DISTRIBUTION_SIZE - 1)); | |
361 | if (pnode == this_pnode) { | |
b194b120 | 362 | locals++; |
1812924b | 363 | continue; |
b194b120 | 364 | } |
94ca8e48 CW |
365 | bau_node_set(pnode - uv_partition_base_pnode, |
366 | &bau_desc->distribution); | |
1812924b CW |
367 | i++; |
368 | } | |
b194b120 CW |
369 | if (i == 0) { |
370 | /* | |
371 | * no off_node flushing; return status for local node | |
372 | */ | |
373 | if (locals) | |
bdbcdd48 | 374 | return flush_mask; |
b194b120 | 375 | else |
bdbcdd48 | 376 | return NULL; |
b194b120 | 377 | } |
1812924b CW |
378 | __get_cpu_var(ptcstats).requestor++; |
379 | __get_cpu_var(ptcstats).ntargeted += i; | |
380 | ||
381 | bau_desc->payload.address = va; | |
bdbcdd48 | 382 | bau_desc->payload.sending_cpu = cpu; |
1812924b | 383 | |
9674f35b | 384 | return uv_flush_send_and_wait(uv_cpu, this_pnode, bau_desc, flush_mask); |
1812924b CW |
385 | } |
386 | ||
387 | /* | |
388 | * The BAU message interrupt comes here. (registered by set_intr_gate) | |
389 | * See entry_64.S | |
390 | * | |
391 | * We received a broadcast assist message. | |
392 | * | |
393 | * Interrupts may have been disabled; this interrupt could represent | |
394 | * the receipt of several messages. | |
395 | * | |
396 | * All cores/threads on this node get this interrupt. | |
397 | * The last one to see it does the s/w ack. | |
398 | * (the resource will not be freed until noninterruptable cpus see this | |
399 | * interrupt; hardware will timeout the s/w ack and reply ERROR) | |
400 | */ | |
b194b120 | 401 | void uv_bau_message_interrupt(struct pt_regs *regs) |
1812924b | 402 | { |
dc163a41 IM |
403 | struct bau_payload_queue_entry *va_queue_first; |
404 | struct bau_payload_queue_entry *va_queue_last; | |
b4c286e6 | 405 | struct bau_payload_queue_entry *msg; |
1812924b | 406 | struct pt_regs *old_regs = set_irq_regs(regs); |
b4c286e6 IM |
407 | cycles_t time1; |
408 | cycles_t time2; | |
1812924b CW |
409 | int msg_slot; |
410 | int sw_ack_slot; | |
411 | int fw; | |
412 | int count = 0; | |
413 | unsigned long local_pnode; | |
414 | ||
415 | ack_APIC_irq(); | |
416 | exit_idle(); | |
417 | irq_enter(); | |
418 | ||
b194b120 | 419 | time1 = get_cycles(); |
1812924b CW |
420 | |
421 | local_pnode = uv_blade_to_pnode(uv_numa_blade_id()); | |
422 | ||
b4c286e6 | 423 | va_queue_first = __get_cpu_var(bau_control).va_queue_first; |
dc163a41 | 424 | va_queue_last = __get_cpu_var(bau_control).va_queue_last; |
b4c286e6 | 425 | |
1812924b CW |
426 | msg = __get_cpu_var(bau_control).bau_msg_head; |
427 | while (msg->sw_ack_vector) { | |
428 | count++; | |
429 | fw = msg->sw_ack_vector; | |
b4c286e6 | 430 | msg_slot = msg - va_queue_first; |
1812924b CW |
431 | sw_ack_slot = ffs(fw) - 1; |
432 | ||
433 | uv_bau_process_message(msg, msg_slot, sw_ack_slot); | |
434 | ||
435 | msg++; | |
dc163a41 IM |
436 | if (msg > va_queue_last) |
437 | msg = va_queue_first; | |
1812924b CW |
438 | __get_cpu_var(bau_control).bau_msg_head = msg; |
439 | } | |
440 | if (!count) | |
441 | __get_cpu_var(ptcstats).nomsg++; | |
442 | else if (count > 1) | |
443 | __get_cpu_var(ptcstats).multmsg++; | |
444 | ||
b194b120 CW |
445 | time2 = get_cycles(); |
446 | __get_cpu_var(ptcstats).dflush += (time2 - time1); | |
1812924b CW |
447 | |
448 | irq_exit(); | |
449 | set_irq_regs(old_regs); | |
1812924b CW |
450 | } |
451 | ||
c4c4688f CW |
452 | /* |
453 | * uv_enable_timeouts | |
454 | * | |
455 | * Each target blade (i.e. blades that have cpu's) needs to have | |
456 | * shootdown message timeouts enabled. The timeout does not cause | |
457 | * an interrupt, but causes an error message to be returned to | |
458 | * the sender. | |
459 | */ | |
b194b120 | 460 | static void uv_enable_timeouts(void) |
1812924b | 461 | { |
1812924b | 462 | int blade; |
c4c4688f | 463 | int nblades; |
1812924b | 464 | int pnode; |
c4c4688f | 465 | unsigned long mmr_image; |
1812924b | 466 | |
c4c4688f | 467 | nblades = uv_num_possible_blades(); |
1812924b | 468 | |
c4c4688f CW |
469 | for (blade = 0; blade < nblades; blade++) { |
470 | if (!uv_blade_nr_possible_cpus(blade)) | |
1812924b | 471 | continue; |
c4c4688f | 472 | |
1812924b | 473 | pnode = uv_blade_to_pnode(blade); |
c4c4688f CW |
474 | mmr_image = |
475 | uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL); | |
476 | /* | |
477 | * Set the timeout period and then lock it in, in three | |
478 | * steps; captures and locks in the period. | |
479 | * | |
480 | * To program the period, the SOFT_ACK_MODE must be off. | |
481 | */ | |
482 | mmr_image &= ~((unsigned long)1 << | |
483 | UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT); | |
484 | uv_write_global_mmr64 | |
485 | (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); | |
486 | /* | |
487 | * Set the 4-bit period. | |
488 | */ | |
489 | mmr_image &= ~((unsigned long)0xf << | |
490 | UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT); | |
491 | mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD << | |
492 | UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT); | |
493 | uv_write_global_mmr64 | |
494 | (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); | |
495 | /* | |
496 | * Subsequent reversals of the timebase bit (3) cause an | |
497 | * immediate timeout of one or all INTD resources as | |
498 | * indicated in bits 2:0 (7 causes all of them to timeout). | |
499 | */ | |
500 | mmr_image |= ((unsigned long)1 << | |
501 | UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT); | |
502 | uv_write_global_mmr64 | |
503 | (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); | |
1812924b | 504 | } |
1812924b CW |
505 | } |
506 | ||
b194b120 | 507 | static void *uv_ptc_seq_start(struct seq_file *file, loff_t *offset) |
1812924b CW |
508 | { |
509 | if (*offset < num_possible_cpus()) | |
510 | return offset; | |
511 | return NULL; | |
512 | } | |
513 | ||
b194b120 | 514 | static void *uv_ptc_seq_next(struct seq_file *file, void *data, loff_t *offset) |
1812924b CW |
515 | { |
516 | (*offset)++; | |
517 | if (*offset < num_possible_cpus()) | |
518 | return offset; | |
519 | return NULL; | |
520 | } | |
521 | ||
b194b120 | 522 | static void uv_ptc_seq_stop(struct seq_file *file, void *data) |
1812924b CW |
523 | { |
524 | } | |
525 | ||
526 | /* | |
527 | * Display the statistics thru /proc | |
528 | * data points to the cpu number | |
529 | */ | |
b194b120 | 530 | static int uv_ptc_seq_show(struct seq_file *file, void *data) |
1812924b CW |
531 | { |
532 | struct ptc_stats *stat; | |
533 | int cpu; | |
534 | ||
535 | cpu = *(loff_t *)data; | |
536 | ||
537 | if (!cpu) { | |
538 | seq_printf(file, | |
539 | "# cpu requestor requestee one all sretry dretry ptc_i "); | |
540 | seq_printf(file, | |
b194b120 | 541 | "sw_ack sflush dflush sok dnomsg dmult starget\n"); |
1812924b CW |
542 | } |
543 | if (cpu < num_possible_cpus() && cpu_online(cpu)) { | |
544 | stat = &per_cpu(ptcstats, cpu); | |
545 | seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld ", | |
546 | cpu, stat->requestor, | |
547 | stat->requestee, stat->onetlb, stat->alltlb, | |
548 | stat->s_retry, stat->d_retry, stat->ptc_i); | |
549 | seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n", | |
9674f35b | 550 | uv_read_global_mmr64(uv_cpu_to_pnode(cpu), |
1812924b | 551 | UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE), |
b194b120 | 552 | stat->sflush, stat->dflush, |
1812924b CW |
553 | stat->retriesok, stat->nomsg, |
554 | stat->multmsg, stat->ntargeted); | |
555 | } | |
556 | ||
557 | return 0; | |
558 | } | |
559 | ||
560 | /* | |
561 | * 0: display meaning of the statistics | |
562 | * >0: retry limit | |
563 | */ | |
b194b120 | 564 | static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user, |
b4c286e6 | 565 | size_t count, loff_t *data) |
1812924b CW |
566 | { |
567 | long newmode; | |
568 | char optstr[64]; | |
569 | ||
e7eb8726 | 570 | if (count == 0 || count > sizeof(optstr)) |
cef53278 | 571 | return -EINVAL; |
1812924b CW |
572 | if (copy_from_user(optstr, user, count)) |
573 | return -EFAULT; | |
574 | optstr[count - 1] = '\0'; | |
575 | if (strict_strtoul(optstr, 10, &newmode) < 0) { | |
576 | printk(KERN_DEBUG "%s is invalid\n", optstr); | |
577 | return -EINVAL; | |
578 | } | |
579 | ||
580 | if (newmode == 0) { | |
581 | printk(KERN_DEBUG "# cpu: cpu number\n"); | |
582 | printk(KERN_DEBUG | |
583 | "requestor: times this cpu was the flush requestor\n"); | |
584 | printk(KERN_DEBUG | |
585 | "requestee: times this cpu was requested to flush its TLBs\n"); | |
586 | printk(KERN_DEBUG | |
587 | "one: times requested to flush a single address\n"); | |
588 | printk(KERN_DEBUG | |
589 | "all: times requested to flush all TLB's\n"); | |
590 | printk(KERN_DEBUG | |
591 | "sretry: number of retries of source-side timeouts\n"); | |
592 | printk(KERN_DEBUG | |
593 | "dretry: number of retries of destination-side timeouts\n"); | |
594 | printk(KERN_DEBUG | |
595 | "ptc_i: times UV fell through to IPI-style flushes\n"); | |
596 | printk(KERN_DEBUG | |
597 | "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n"); | |
598 | printk(KERN_DEBUG | |
b194b120 | 599 | "sflush_us: cycles spent in uv_flush_tlb_others()\n"); |
1812924b | 600 | printk(KERN_DEBUG |
b194b120 | 601 | "dflush_us: cycles spent in handling flush requests\n"); |
1812924b CW |
602 | printk(KERN_DEBUG "sok: successes on retry\n"); |
603 | printk(KERN_DEBUG "dnomsg: interrupts with no message\n"); | |
604 | printk(KERN_DEBUG | |
605 | "dmult: interrupts with multiple messages\n"); | |
606 | printk(KERN_DEBUG "starget: nodes targeted\n"); | |
607 | } else { | |
608 | uv_bau_retry_limit = newmode; | |
609 | printk(KERN_DEBUG "timeout retry limit:%d\n", | |
610 | uv_bau_retry_limit); | |
611 | } | |
612 | ||
613 | return count; | |
614 | } | |
615 | ||
616 | static const struct seq_operations uv_ptc_seq_ops = { | |
dc163a41 IM |
617 | .start = uv_ptc_seq_start, |
618 | .next = uv_ptc_seq_next, | |
619 | .stop = uv_ptc_seq_stop, | |
620 | .show = uv_ptc_seq_show | |
1812924b CW |
621 | }; |
622 | ||
b194b120 | 623 | static int uv_ptc_proc_open(struct inode *inode, struct file *file) |
1812924b CW |
624 | { |
625 | return seq_open(file, &uv_ptc_seq_ops); | |
626 | } | |
627 | ||
628 | static const struct file_operations proc_uv_ptc_operations = { | |
b194b120 CW |
629 | .open = uv_ptc_proc_open, |
630 | .read = seq_read, | |
631 | .write = uv_ptc_proc_write, | |
632 | .llseek = seq_lseek, | |
633 | .release = seq_release, | |
1812924b CW |
634 | }; |
635 | ||
b194b120 | 636 | static int __init uv_ptc_init(void) |
1812924b | 637 | { |
b194b120 | 638 | struct proc_dir_entry *proc_uv_ptc; |
1812924b CW |
639 | |
640 | if (!is_uv_system()) | |
641 | return 0; | |
642 | ||
1812924b CW |
643 | proc_uv_ptc = create_proc_entry(UV_PTC_BASENAME, 0444, NULL); |
644 | if (!proc_uv_ptc) { | |
645 | printk(KERN_ERR "unable to create %s proc entry\n", | |
646 | UV_PTC_BASENAME); | |
647 | return -EINVAL; | |
648 | } | |
649 | proc_uv_ptc->proc_fops = &proc_uv_ptc_operations; | |
650 | return 0; | |
651 | } | |
652 | ||
b194b120 CW |
653 | /* |
654 | * begin the initialization of the per-blade control structures | |
655 | */ | |
656 | static struct bau_control * __init uv_table_bases_init(int blade, int node) | |
1812924b | 657 | { |
b194b120 | 658 | int i; |
b194b120 | 659 | struct bau_msg_status *msp; |
dc163a41 | 660 | struct bau_control *bau_tabp; |
b194b120 | 661 | |
dc163a41 | 662 | bau_tabp = |
b194b120 | 663 | kmalloc_node(sizeof(struct bau_control), GFP_KERNEL, node); |
dc163a41 | 664 | BUG_ON(!bau_tabp); |
b4c286e6 | 665 | |
dc163a41 | 666 | bau_tabp->msg_statuses = |
b194b120 | 667 | kmalloc_node(sizeof(struct bau_msg_status) * |
dc163a41 IM |
668 | DEST_Q_SIZE, GFP_KERNEL, node); |
669 | BUG_ON(!bau_tabp->msg_statuses); | |
b4c286e6 | 670 | |
dc163a41 | 671 | for (i = 0, msp = bau_tabp->msg_statuses; i < DEST_Q_SIZE; i++, msp++) |
b194b120 CW |
672 | bau_cpubits_clear(&msp->seen_by, (int) |
673 | uv_blade_nr_possible_cpus(blade)); | |
b4c286e6 | 674 | |
dc163a41 | 675 | uv_bau_table_bases[blade] = bau_tabp; |
b4c286e6 | 676 | |
d400524a | 677 | return bau_tabp; |
1812924b CW |
678 | } |
679 | ||
b194b120 CW |
680 | /* |
681 | * finish the initialization of the per-blade control structures | |
682 | */ | |
b4c286e6 | 683 | static void __init |
9674f35b | 684 | uv_table_bases_finish(int blade, |
b4c286e6 IM |
685 | struct bau_control *bau_tablesp, |
686 | struct bau_desc *adp) | |
b194b120 | 687 | { |
b194b120 | 688 | struct bau_control *bcp; |
9674f35b | 689 | int cpu; |
b194b120 | 690 | |
9674f35b CW |
691 | for_each_present_cpu(cpu) { |
692 | if (blade != uv_cpu_to_blade_id(cpu)) | |
693 | continue; | |
b4c286e6 | 694 | |
9674f35b | 695 | bcp = (struct bau_control *)&per_cpu(bau_control, cpu); |
b4c286e6 IM |
696 | bcp->bau_msg_head = bau_tablesp->va_queue_first; |
697 | bcp->va_queue_first = bau_tablesp->va_queue_first; | |
698 | bcp->va_queue_last = bau_tablesp->va_queue_last; | |
b4c286e6 IM |
699 | bcp->msg_statuses = bau_tablesp->msg_statuses; |
700 | bcp->descriptor_base = adp; | |
b194b120 CW |
701 | } |
702 | } | |
1812924b CW |
703 | |
704 | /* | |
b194b120 | 705 | * initialize the sending side's sending buffers |
1812924b | 706 | */ |
dc163a41 | 707 | static struct bau_desc * __init |
b194b120 | 708 | uv_activation_descriptor_init(int node, int pnode) |
1812924b CW |
709 | { |
710 | int i; | |
1812924b | 711 | unsigned long pa; |
1812924b | 712 | unsigned long m; |
b194b120 | 713 | unsigned long n; |
1812924b | 714 | unsigned long mmr_image; |
dc163a41 IM |
715 | struct bau_desc *adp; |
716 | struct bau_desc *ad2; | |
b194b120 | 717 | |
0e2595cd CW |
718 | /* |
719 | * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR) | |
720 | * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per blade | |
721 | */ | |
722 | adp = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)* | |
723 | UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node); | |
dc163a41 | 724 | BUG_ON(!adp); |
b4c286e6 | 725 | |
4ea3c51d | 726 | pa = uv_gpa(adp); /* need the real nasid*/ |
b194b120 CW |
727 | n = pa >> uv_nshift; |
728 | m = pa & uv_mmask; | |
b4c286e6 | 729 | |
b194b120 | 730 | mmr_image = uv_read_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE); |
b4c286e6 | 731 | if (mmr_image) { |
b194b120 CW |
732 | uv_write_global_mmr64(pnode, (unsigned long) |
733 | UVH_LB_BAU_SB_DESCRIPTOR_BASE, | |
734 | (n << UV_DESC_BASE_PNODE_SHIFT | m)); | |
b4c286e6 IM |
735 | } |
736 | ||
0e2595cd CW |
737 | /* |
738 | * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each | |
739 | * cpu even though we only use the first one; one descriptor can | |
740 | * describe a broadcast to 256 nodes. | |
741 | */ | |
742 | for (i = 0, ad2 = adp; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR); | |
743 | i++, ad2++) { | |
dc163a41 | 744 | memset(ad2, 0, sizeof(struct bau_desc)); |
b194b120 | 745 | ad2->header.sw_ack_flag = 1; |
94ca8e48 CW |
746 | /* |
747 | * base_dest_nodeid is the first node in the partition, so | |
748 | * the bit map will indicate partition-relative node numbers. | |
749 | * note that base_dest_nodeid is actually a nasid. | |
750 | */ | |
751 | ad2->header.base_dest_nodeid = uv_partition_base_pnode << 1; | |
b194b120 CW |
752 | ad2->header.command = UV_NET_ENDPOINT_INTD; |
753 | ad2->header.int_both = 1; | |
754 | /* | |
755 | * all others need to be set to zero: | |
756 | * fairness chaining multilevel count replied_to | |
757 | */ | |
758 | } | |
759 | return adp; | |
760 | } | |
761 | ||
762 | /* | |
763 | * initialize the destination side's receiving buffers | |
764 | */ | |
b4c286e6 IM |
765 | static struct bau_payload_queue_entry * __init |
766 | uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp) | |
b194b120 | 767 | { |
1812924b | 768 | struct bau_payload_queue_entry *pqp; |
4ea3c51d CW |
769 | unsigned long pa; |
770 | int pn; | |
b4c286e6 | 771 | char *cp; |
1812924b | 772 | |
dc163a41 IM |
773 | pqp = (struct bau_payload_queue_entry *) kmalloc_node( |
774 | (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry), | |
775 | GFP_KERNEL, node); | |
776 | BUG_ON(!pqp); | |
b4c286e6 | 777 | |
b194b120 CW |
778 | cp = (char *)pqp + 31; |
779 | pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5); | |
780 | bau_tablesp->va_queue_first = pqp; | |
4ea3c51d CW |
781 | /* |
782 | * need the pnode of where the memory was really allocated | |
783 | */ | |
784 | pa = uv_gpa(pqp); | |
785 | pn = pa >> uv_nshift; | |
b194b120 CW |
786 | uv_write_global_mmr64(pnode, |
787 | UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, | |
4ea3c51d | 788 | ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | |
b194b120 CW |
789 | uv_physnodeaddr(pqp)); |
790 | uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, | |
791 | uv_physnodeaddr(pqp)); | |
dc163a41 | 792 | bau_tablesp->va_queue_last = pqp + (DEST_Q_SIZE - 1); |
b194b120 CW |
793 | uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST, |
794 | (unsigned long) | |
795 | uv_physnodeaddr(bau_tablesp->va_queue_last)); | |
dc163a41 | 796 | memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE); |
b4c286e6 | 797 | |
b194b120 CW |
798 | return pqp; |
799 | } | |
1812924b | 800 | |
b194b120 CW |
801 | /* |
802 | * Initialization of each UV blade's structures | |
803 | */ | |
9674f35b | 804 | static int __init uv_init_blade(int blade) |
b194b120 | 805 | { |
9674f35b | 806 | int node; |
b194b120 CW |
807 | int pnode; |
808 | unsigned long pa; | |
809 | unsigned long apicid; | |
dc163a41 | 810 | struct bau_desc *adp; |
b194b120 CW |
811 | struct bau_payload_queue_entry *pqp; |
812 | struct bau_control *bau_tablesp; | |
1812924b | 813 | |
9674f35b | 814 | node = blade_to_first_node(blade); |
b194b120 CW |
815 | bau_tablesp = uv_table_bases_init(blade, node); |
816 | pnode = uv_blade_to_pnode(blade); | |
817 | adp = uv_activation_descriptor_init(node, pnode); | |
818 | pqp = uv_payload_queue_init(node, pnode, bau_tablesp); | |
9674f35b | 819 | uv_table_bases_finish(blade, bau_tablesp, adp); |
b194b120 CW |
820 | /* |
821 | * the below initialization can't be in firmware because the | |
822 | * messaging IRQ will be determined by the OS | |
823 | */ | |
9674f35b | 824 | apicid = blade_to_first_apicid(blade); |
b194b120 CW |
825 | pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG); |
826 | if ((pa & 0xff) != UV_BAU_MESSAGE) { | |
827 | uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, | |
828 | ((apicid << 32) | UV_BAU_MESSAGE)); | |
1812924b | 829 | } |
b194b120 CW |
830 | return 0; |
831 | } | |
832 | ||
833 | /* | |
834 | * Initialization of BAU-related structures | |
835 | */ | |
836 | static int __init uv_bau_init(void) | |
837 | { | |
838 | int blade; | |
b194b120 | 839 | int nblades; |
2c74d666 | 840 | int cur_cpu; |
b194b120 CW |
841 | |
842 | if (!is_uv_system()) | |
843 | return 0; | |
1812924b | 844 | |
76ba0ecd | 845 | for_each_possible_cpu(cur_cpu) |
eaa95840 | 846 | zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), |
76ba0ecd RR |
847 | GFP_KERNEL, cpu_to_node(cur_cpu)); |
848 | ||
b194b120 | 849 | uv_bau_retry_limit = 1; |
1812924b | 850 | uv_nshift = uv_hub_info->n_val; |
dc163a41 | 851 | uv_mmask = (1UL << uv_hub_info->n_val) - 1; |
9674f35b CW |
852 | nblades = uv_num_possible_blades(); |
853 | ||
1812924b CW |
854 | uv_bau_table_bases = (struct bau_control **) |
855 | kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL); | |
dc163a41 | 856 | BUG_ON(!uv_bau_table_bases); |
b4c286e6 | 857 | |
94ca8e48 CW |
858 | uv_partition_base_pnode = 0x7fffffff; |
859 | for (blade = 0; blade < nblades; blade++) | |
860 | if (uv_blade_nr_possible_cpus(blade) && | |
861 | (uv_blade_to_pnode(blade) < uv_partition_base_pnode)) | |
862 | uv_partition_base_pnode = uv_blade_to_pnode(blade); | |
9674f35b CW |
863 | for (blade = 0; blade < nblades; blade++) |
864 | if (uv_blade_nr_possible_cpus(blade)) | |
865 | uv_init_blade(blade); | |
866 | ||
99dd8713 | 867 | alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1); |
1812924b | 868 | uv_enable_timeouts(); |
b4c286e6 | 869 | |
1812924b CW |
870 | return 0; |
871 | } | |
1812924b | 872 | __initcall(uv_bau_init); |
b194b120 | 873 | __initcall(uv_ptc_init); |