Commit | Line | Data |
---|---|---|
ecd75fc8 | 1 | /* Copyright (C) 2010-2014 Free Software Foundation, Inc. |
92c9a463 JB |
2 | |
3 | This file is part of GDB. | |
4 | ||
5 | This program is free software; you can redistribute it and/or modify | |
6 | it under the terms of the GNU General Public License as published by | |
7 | the Free Software Foundation; either version 3 of the License, or | |
8 | (at your option) any later version. | |
9 | ||
10 | This program is distributed in the hope that it will be useful, | |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | GNU General Public License for more details. | |
14 | ||
15 | You should have received a copy of the GNU General Public License | |
16 | along with this program. If not, see <http://www.gnu.org/licenses/>. */ | |
17 | ||
18 | #include "defs.h" | |
19 | #include "ia64-tdep.h" | |
20 | #include "inferior.h" | |
21 | #include "inf-ttrace.h" | |
22 | #include "regcache.h" | |
23 | #include "solib-ia64-hpux.h" | |
24 | ||
25 | #include <ia64/sys/uregs.h> | |
26 | #include <sys/ttrace.h> | |
27 | ||
28 | /* The offsets used with ttrace to read the value of the raw registers. */ | |
29 | ||
30 | static int u_offsets[] = | |
31 | { /* Static General Registers. */ | |
32 | -1, __r1, __r2, __r3, __r4, __r5, __r6, __r7, | |
33 | __r8, __r9, __r10, __r11, __r12, __r13, __r14, __r15, | |
34 | __r16, __r17, __r18, __r19, __r20, __r21, __r22, __r23, | |
35 | __r24, __r25, __r26, __r27, __r28, __r29, __r30, __r31, | |
36 | -1, -1, -1, -1, -1, -1, -1, -1, | |
37 | -1, -1, -1, -1, -1, -1, -1, -1, | |
38 | -1, -1, -1, -1, -1, -1, -1, -1, | |
39 | -1, -1, -1, -1, -1, -1, -1, -1, | |
40 | -1, -1, -1, -1, -1, -1, -1, -1, | |
41 | -1, -1, -1, -1, -1, -1, -1, -1, | |
42 | -1, -1, -1, -1, -1, -1, -1, -1, | |
43 | -1, -1, -1, -1, -1, -1, -1, -1, | |
44 | -1, -1, -1, -1, -1, -1, -1, -1, | |
45 | -1, -1, -1, -1, -1, -1, -1, -1, | |
46 | -1, -1, -1, -1, -1, -1, -1, -1, | |
47 | -1, -1, -1, -1, -1, -1, -1, -1, | |
48 | ||
49 | /* Static Floating-Point Registers. */ | |
50 | -1, -1, __f2, __f3, __f4, __f5, __f6, __f7, | |
51 | __f8, __f9, __f10, __f11, __f12, __f13, __f14, __f15, | |
52 | __f16, __f17, __f18, __f19, __f20, __f21, __f22, __f23, | |
53 | __f24, __f25, __f26, __f27, __f28, __f29, __f30, __f31, | |
54 | __f32, __f33, __f34, __f35, __f36, __f37, __f38, __f39, | |
55 | __f40, __f41, __f42, __f43, __f44, __f45, __f46, __f47, | |
56 | __f48, __f49, __f50, __f51, __f52, __f53, __f54, __f55, | |
57 | __f56, __f57, __f58, __f59, __f60, __f61, __f62, __f63, | |
58 | __f64, __f65, __f66, __f67, __f68, __f69, __f70, __f71, | |
59 | __f72, __f73, __f74, __f75, __f76, __f77, __f78, __f79, | |
60 | __f80, __f81, __f82, __f83, __f84, __f85, __f86, __f87, | |
61 | __f88, __f89, __f90, __f91, __f92, __f93, __f94, __f95, | |
62 | __f96, __f97, __f98, __f99, __f100, __f101, __f102, __f103, | |
63 | __f104, __f105, __f106, __f107, __f108, __f109, __f110, __f111, | |
64 | __f112, __f113, __f114, __f115, __f116, __f117, __f118, __f119, | |
65 | __f120, __f121, __f122, __f123, __f124, __f125, __f126, __f127, | |
66 | ||
67 | -1, -1, -1, -1, -1, -1, -1, -1, | |
68 | -1, -1, -1, -1, -1, -1, -1, -1, | |
69 | -1, -1, -1, -1, -1, -1, -1, -1, | |
70 | -1, -1, -1, -1, -1, -1, -1, -1, | |
71 | -1, -1, -1, -1, -1, -1, -1, -1, | |
72 | -1, -1, -1, -1, -1, -1, -1, -1, | |
73 | -1, -1, -1, -1, -1, -1, -1, -1, | |
74 | -1, -1, -1, -1, -1, -1, -1, -1, | |
75 | ||
76 | /* Branch Registers. */ | |
77 | __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7, | |
78 | ||
79 | /* Virtual frame pointer and virtual return address pointer. */ | |
80 | -1, -1, | |
81 | ||
82 | /* Other registers. */ | |
83 | __pr, __ip, __cr_ipsr, __cfm, | |
84 | ||
85 | /* Kernel registers. */ | |
86 | -1, -1, -1, -1, | |
87 | -1, -1, -1, -1, | |
88 | ||
89 | -1, -1, -1, -1, -1, -1, -1, -1, | |
90 | ||
91 | /* Some application registers. */ | |
92 | __ar_rsc, __ar_bsp, __ar_bspstore, __ar_rnat, | |
93 | ||
94 | -1, | |
95 | -1, /* Not available: FCR, IA32 floating control register. */ | |
96 | -1, -1, | |
97 | ||
98 | -1, /* Not available: EFLAG. */ | |
99 | -1, /* Not available: CSD. */ | |
100 | -1, /* Not available: SSD. */ | |
101 | -1, /* Not available: CFLG. */ | |
102 | -1, /* Not available: FSR. */ | |
103 | -1, /* Not available: FIR. */ | |
104 | -1, /* Not available: FDR. */ | |
105 | -1, | |
106 | __ar_ccv, -1, -1, -1, __ar_unat, -1, -1, -1, | |
107 | __ar_fpsr, -1, -1, -1, | |
108 | -1, /* Not available: ITC. */ | |
109 | -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, | |
110 | -1, -1, -1, -1, -1, -1, -1, -1, -1, | |
111 | __ar_pfs, __ar_lc, __ar_ec, | |
112 | -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, | |
113 | -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, | |
114 | -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, | |
115 | -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, | |
116 | -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, | |
117 | -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, | |
118 | -1 | |
119 | /* All following registers, starting with nat0, are handled as | |
120 | pseudo registers, and hence are handled separately. */ | |
121 | }; | |
122 | ||
123 | /* Some register have a fixed value and can not be modified. | |
124 | Store their value in static constant buffers that can be used | |
125 | later to fill the register cache. */ | |
126 | static const char r0_value[8] = {0x00, 0x00, 0x00, 0x00, | |
127 | 0x00, 0x00, 0x00, 0x00}; | |
128 | static const char f0_value[16] = {0x00, 0x00, 0x00, 0x00, | |
129 | 0x00, 0x00, 0x00, 0x00, | |
130 | 0x00, 0x00, 0x00, 0x00, | |
131 | 0x00, 0x00, 0x00, 0x00}; | |
132 | static const char f1_value[16] = {0x00, 0x00, 0x00, 0x00, | |
133 | 0x00, 0x00, 0xff, 0xff, | |
134 | 0x80, 0x00, 0x00, 0x00, | |
135 | 0x00, 0x00, 0x00, 0x00}; | |
136 | ||
137 | /* The "to_wait" routine from the "inf-ttrace" layer. */ | |
138 | ||
139 | static ptid_t (*super_to_wait) (struct target_ops *, ptid_t, | |
140 | struct target_waitstatus *, int); | |
141 | ||
142 | /* The "to_wait" target_ops routine routine for ia64-hpux. */ | |
143 | ||
144 | static ptid_t | |
145 | ia64_hpux_wait (struct target_ops *ops, ptid_t ptid, | |
146 | struct target_waitstatus *ourstatus, int options) | |
147 | { | |
148 | ptid_t new_ptid; | |
149 | ||
150 | new_ptid = super_to_wait (ops, ptid, ourstatus, options); | |
151 | ||
152 | /* If this is a DLD event (hard-coded breakpoint instruction | |
153 | that was activated by the solib-ia64-hpux module), we need to | |
154 | process it, and then resume the execution as if the event did | |
155 | not happen. */ | |
156 | if (ourstatus->kind == TARGET_WAITKIND_STOPPED | |
a493e3e2 | 157 | && ourstatus->value.sig == GDB_SIGNAL_TRAP |
92c9a463 JB |
158 | && ia64_hpux_at_dld_breakpoint_p (new_ptid)) |
159 | { | |
160 | ia64_hpux_handle_dld_breakpoint (new_ptid); | |
161 | ||
a493e3e2 | 162 | target_resume (new_ptid, 0, GDB_SIGNAL_0); |
92c9a463 JB |
163 | ourstatus->kind = TARGET_WAITKIND_IGNORE; |
164 | } | |
165 | ||
166 | return new_ptid; | |
167 | } | |
168 | ||
169 | /* Fetch the RNAT register and supply it to the REGCACHE. */ | |
170 | ||
171 | static void | |
172 | ia64_hpux_fetch_rnat_register (struct regcache *regcache) | |
173 | { | |
174 | CORE_ADDR addr; | |
175 | gdb_byte buf[8]; | |
176 | int status; | |
177 | ||
178 | /* The value of RNAT is stored at bsp|0x1f8, and must be read using | |
179 | TT_LWP_RDRSEBS. */ | |
180 | ||
181 | regcache_raw_read_unsigned (regcache, IA64_BSP_REGNUM, &addr); | |
182 | addr |= 0x1f8; | |
183 | ||
184 | status = ttrace (TT_LWP_RDRSEBS, ptid_get_pid (inferior_ptid), | |
185 | ptid_get_lwp (inferior_ptid), addr, sizeof (buf), | |
186 | (uintptr_t) buf); | |
187 | if (status < 0) | |
188 | error (_("failed to read RNAT register at %s"), | |
189 | paddress (get_regcache_arch(regcache), addr)); | |
190 | ||
191 | regcache_raw_supply (regcache, IA64_RNAT_REGNUM, buf); | |
192 | } | |
193 | ||
194 | /* Read the value of the register saved at OFFSET in the save_state_t | |
195 | structure, and store its value in BUF. LEN is the size of the register | |
196 | to be read. */ | |
197 | ||
198 | static int | |
199 | ia64_hpux_read_register_from_save_state_t (int offset, gdb_byte *buf, int len) | |
200 | { | |
201 | int status; | |
202 | ||
203 | status = ttrace (TT_LWP_RUREGS, ptid_get_pid (inferior_ptid), | |
204 | ptid_get_lwp (inferior_ptid), offset, len, (uintptr_t) buf); | |
205 | ||
206 | return status; | |
207 | } | |
208 | ||
209 | /* Fetch register REGNUM from the inferior. */ | |
210 | ||
211 | static void | |
212 | ia64_hpux_fetch_register (struct regcache *regcache, int regnum) | |
213 | { | |
214 | struct gdbarch *gdbarch = get_regcache_arch (regcache); | |
215 | int offset, len, status; | |
216 | gdb_byte *buf; | |
217 | ||
218 | if (regnum == IA64_GR0_REGNUM) | |
219 | { | |
220 | /* r0 is always 0. */ | |
221 | regcache_raw_supply (regcache, regnum, r0_value); | |
222 | return; | |
223 | } | |
224 | ||
225 | if (regnum == IA64_FR0_REGNUM) | |
226 | { | |
227 | /* f0 is always 0.0. */ | |
228 | regcache_raw_supply (regcache, regnum, f0_value); | |
229 | return; | |
230 | } | |
231 | ||
232 | if (regnum == IA64_FR1_REGNUM) | |
233 | { | |
234 | /* f1 is always 1.0. */ | |
235 | regcache_raw_supply (regcache, regnum, f1_value); | |
236 | return; | |
237 | } | |
238 | ||
239 | if (regnum == IA64_RNAT_REGNUM) | |
240 | { | |
241 | ia64_hpux_fetch_rnat_register (regcache); | |
242 | return; | |
243 | } | |
244 | ||
245 | /* Get the register location. If the register can not be fetched, | |
246 | then return now. */ | |
247 | offset = u_offsets[regnum]; | |
248 | if (offset == -1) | |
249 | return; | |
250 | ||
251 | len = register_size (gdbarch, regnum); | |
252 | buf = alloca (len * sizeof (gdb_byte)); | |
253 | status = ia64_hpux_read_register_from_save_state_t (offset, buf, len); | |
254 | if (status < 0) | |
a9df6b22 | 255 | warning (_("Failed to read register value for %s."), |
92c9a463 JB |
256 | gdbarch_register_name (gdbarch, regnum)); |
257 | ||
258 | regcache_raw_supply (regcache, regnum, buf); | |
259 | } | |
260 | ||
261 | /* The "to_fetch_registers" target_ops routine for ia64-hpux. */ | |
262 | ||
263 | static void | |
264 | ia64_hpux_fetch_registers (struct target_ops *ops, | |
265 | struct regcache *regcache, int regnum) | |
266 | { | |
267 | if (regnum == -1) | |
268 | for (regnum = 0; | |
269 | regnum < gdbarch_num_regs (get_regcache_arch (regcache)); | |
270 | regnum++) | |
271 | ia64_hpux_fetch_register (regcache, regnum); | |
272 | else | |
273 | ia64_hpux_fetch_register (regcache, regnum); | |
274 | } | |
275 | ||
276 | /* Save register REGNUM (stored in BUF) in the save_state_t structure. | |
277 | LEN is the size of the register in bytes. | |
278 | ||
279 | Return the value from the corresponding ttrace call (a negative value | |
280 | means that the operation failed). */ | |
281 | ||
282 | static int | |
283 | ia64_hpux_write_register_to_saved_state_t (int offset, gdb_byte *buf, int len) | |
284 | { | |
285 | return ttrace (TT_LWP_WUREGS, ptid_get_pid (inferior_ptid), | |
286 | ptid_get_lwp (inferior_ptid), offset, len, (uintptr_t) buf); | |
287 | } | |
288 | ||
289 | /* Store register REGNUM into the inferior. */ | |
290 | ||
291 | static void | |
292 | ia64_hpux_store_register (const struct regcache *regcache, int regnum) | |
293 | { | |
294 | struct gdbarch *gdbarch = get_regcache_arch (regcache); | |
295 | int offset = u_offsets[regnum]; | |
296 | gdb_byte *buf; | |
297 | int len, status; | |
298 | ||
299 | /* If the register can not be stored, then return now. */ | |
300 | if (offset == -1) | |
301 | return; | |
302 | ||
303 | /* I don't know how to store that register for now. So just ignore any | |
304 | request to store it, to avoid an internal error. */ | |
305 | if (regnum == IA64_PSR_REGNUM) | |
306 | return; | |
307 | ||
308 | len = register_size (gdbarch, regnum); | |
309 | buf = alloca (len * sizeof (gdb_byte)); | |
310 | regcache_raw_collect (regcache, regnum, buf); | |
311 | ||
312 | status = ia64_hpux_write_register_to_saved_state_t (offset, buf, len); | |
313 | ||
314 | if (status < 0) | |
a9df6b22 | 315 | error (_("failed to write register value for %s."), |
92c9a463 JB |
316 | gdbarch_register_name (gdbarch, regnum)); |
317 | } | |
318 | ||
319 | /* The "to_store_registers" target_ops routine for ia64-hpux. */ | |
320 | ||
321 | static void | |
322 | ia64_hpux_store_registers (struct target_ops *ops, | |
323 | struct regcache *regcache, int regnum) | |
324 | { | |
325 | if (regnum == -1) | |
326 | for (regnum = 0; | |
327 | regnum < gdbarch_num_regs (get_regcache_arch (regcache)); | |
328 | regnum++) | |
329 | ia64_hpux_store_register (regcache, regnum); | |
330 | else | |
331 | ia64_hpux_store_register (regcache, regnum); | |
332 | } | |
333 | ||
334 | /* The "xfer_partial" routine from the "inf-ttrace" target layer. | |
335 | Ideally, we would like to use this routine for all transfer | |
336 | requests, but this platforms has a lot of special cases that | |
337 | need to be handled manually. So we override this routine and | |
338 | delegate back if we detect that we are not in a special case. */ | |
339 | ||
4ac248ca | 340 | static target_xfer_partial_ftype *super_xfer_partial; |
92c9a463 JB |
341 | |
342 | /* The "xfer_partial" routine for a memory region that is completely | |
343 | outside of the backing-store region. */ | |
344 | ||
9b409511 | 345 | static enum target_xfer_status |
92c9a463 JB |
346 | ia64_hpux_xfer_memory_no_bs (struct target_ops *ops, const char *annex, |
347 | gdb_byte *readbuf, const gdb_byte *writebuf, | |
9b409511 YQ |
348 | CORE_ADDR addr, LONGEST len, |
349 | ULONGEST *xfered_len) | |
92c9a463 JB |
350 | { |
351 | /* Memory writes need to be aligned on 16byte boundaries, at least | |
352 | when writing in the text section. On the other hand, the size | |
353 | of the buffer does not need to be a multiple of 16bytes. | |
354 | ||
355 | No such restriction when performing memory reads. */ | |
356 | ||
357 | if (writebuf && addr & 0x0f) | |
358 | { | |
359 | const CORE_ADDR aligned_addr = addr & ~0x0f; | |
360 | const int aligned_len = len + (addr - aligned_addr); | |
361 | gdb_byte *aligned_buf = alloca (aligned_len * sizeof (gdb_byte)); | |
362 | LONGEST status; | |
363 | ||
364 | /* Read the portion of memory between ALIGNED_ADDR and ADDR, so | |
365 | that we can write it back during our aligned memory write. */ | |
366 | status = super_xfer_partial (ops, TARGET_OBJECT_MEMORY, annex, | |
367 | aligned_buf /* read */, | |
368 | NULL /* write */, | |
369 | aligned_addr, addr - aligned_addr); | |
370 | if (status <= 0) | |
9b409511 | 371 | return TARGET_XFER_EOF; |
92c9a463 JB |
372 | memcpy (aligned_buf + (addr - aligned_addr), writebuf, len); |
373 | ||
374 | return super_xfer_partial (ops, TARGET_OBJECT_MEMORY, annex, | |
375 | NULL /* read */, aligned_buf /* write */, | |
9b409511 | 376 | aligned_addr, aligned_len, xfered_len); |
92c9a463 JB |
377 | } |
378 | else | |
379 | /* Memory read or properly aligned memory write. */ | |
380 | return super_xfer_partial (ops, TARGET_OBJECT_MEMORY, annex, readbuf, | |
9b409511 | 381 | writebuf, addr, len, xfered_len); |
92c9a463 JB |
382 | } |
383 | ||
384 | /* Read LEN bytes at ADDR from memory, and store it in BUF. This memory | |
385 | region is assumed to be inside the backing store. | |
386 | ||
387 | Return zero if the operation failed. */ | |
388 | ||
389 | static int | |
390 | ia64_hpux_read_memory_bs (gdb_byte *buf, CORE_ADDR addr, int len) | |
391 | { | |
392 | gdb_byte tmp_buf[8]; | |
393 | CORE_ADDR tmp_addr = addr & ~0x7; | |
394 | ||
395 | while (tmp_addr < addr + len) | |
396 | { | |
397 | int status; | |
398 | int skip_lo = 0; | |
399 | int skip_hi = 0; | |
400 | ||
401 | status = ttrace (TT_LWP_RDRSEBS, ptid_get_pid (inferior_ptid), | |
402 | ptid_get_lwp (inferior_ptid), tmp_addr, | |
403 | sizeof (tmp_buf), (uintptr_t) tmp_buf); | |
404 | if (status < 0) | |
405 | return 0; | |
406 | ||
407 | if (tmp_addr < addr) | |
408 | skip_lo = addr - tmp_addr; | |
409 | ||
410 | if (tmp_addr + sizeof (tmp_buf) > addr + len) | |
411 | skip_hi = (tmp_addr + sizeof (tmp_buf)) - (addr + len); | |
412 | ||
413 | memcpy (buf + (tmp_addr + skip_lo - addr), | |
414 | tmp_buf + skip_lo, | |
415 | sizeof (tmp_buf) - skip_lo - skip_hi); | |
416 | ||
417 | tmp_addr += sizeof (tmp_buf); | |
418 | } | |
419 | ||
420 | return 1; | |
421 | } | |
422 | ||
423 | /* Write LEN bytes from BUF in memory at ADDR. This memory region is assumed | |
424 | to be inside the backing store. | |
425 | ||
426 | Return zero if the operation failed. */ | |
427 | ||
428 | static int | |
429 | ia64_hpux_write_memory_bs (const gdb_byte *buf, CORE_ADDR addr, int len) | |
430 | { | |
431 | gdb_byte tmp_buf[8]; | |
432 | CORE_ADDR tmp_addr = addr & ~0x7; | |
433 | ||
434 | while (tmp_addr < addr + len) | |
435 | { | |
436 | int status; | |
437 | int lo = 0; | |
438 | int hi = 7; | |
439 | ||
440 | if (tmp_addr < addr || tmp_addr + sizeof (tmp_buf) > addr + len) | |
441 | /* Part of the 8byte region pointed by tmp_addr needs to be preserved. | |
442 | So read it in before we copy the data that needs to be changed. */ | |
443 | if (!ia64_hpux_read_memory_bs (tmp_buf, tmp_addr, sizeof (tmp_buf))) | |
444 | return 0; | |
445 | ||
446 | if (tmp_addr < addr) | |
447 | lo = addr - tmp_addr; | |
448 | ||
449 | if (tmp_addr + sizeof (tmp_buf) > addr + len) | |
450 | hi = addr - tmp_addr + len - 1; | |
451 | ||
452 | memcpy (tmp_buf + lo, buf + tmp_addr - addr + lo, hi - lo + 1); | |
453 | ||
454 | status = ttrace (TT_LWP_WRRSEBS, ptid_get_pid (inferior_ptid), | |
455 | ptid_get_lwp (inferior_ptid), tmp_addr, | |
456 | sizeof (tmp_buf), (uintptr_t) tmp_buf); | |
457 | if (status < 0) | |
458 | return 0; | |
459 | ||
460 | tmp_addr += sizeof (tmp_buf); | |
461 | } | |
462 | ||
463 | return 1; | |
464 | } | |
465 | ||
466 | /* The "xfer_partial" routine for a memory region that is completely | |
467 | inside of the backing-store region. */ | |
468 | ||
469 | static LONGEST | |
470 | ia64_hpux_xfer_memory_bs (struct target_ops *ops, const char *annex, | |
471 | gdb_byte *readbuf, const gdb_byte *writebuf, | |
472 | CORE_ADDR addr, LONGEST len) | |
473 | { | |
474 | int success; | |
475 | ||
476 | if (readbuf) | |
477 | success = ia64_hpux_read_memory_bs (readbuf, addr, len); | |
478 | else | |
479 | success = ia64_hpux_write_memory_bs (writebuf, addr, len); | |
480 | ||
481 | if (success) | |
482 | return len; | |
483 | else | |
484 | return 0; | |
485 | } | |
486 | ||
973e3cf7 JB |
487 | /* Get a register value as a unsigned value directly from the system, |
488 | instead of going through the regcache. | |
489 | ||
490 | This function is meant to be used when inferior_ptid is not | |
491 | a thread/process known to GDB. */ | |
492 | ||
493 | static ULONGEST | |
494 | ia64_hpux_get_register_from_save_state_t (int regnum, int reg_size) | |
495 | { | |
496 | gdb_byte *buf = alloca (reg_size); | |
497 | int offset = u_offsets[regnum]; | |
498 | int status; | |
499 | ||
500 | /* The register is assumed to be available for fetching. */ | |
501 | gdb_assert (offset != -1); | |
502 | ||
503 | status = ia64_hpux_read_register_from_save_state_t (offset, buf, reg_size); | |
504 | if (status < 0) | |
505 | { | |
506 | /* This really should not happen. If it does, emit a warning | |
507 | and pretend the register value is zero. Not exactly the best | |
508 | error recovery mechanism, but better than nothing. We will | |
509 | try to do better if we can demonstrate that this can happen | |
510 | under normal circumstances. */ | |
511 | warning (_("Failed to read value of register number %d."), regnum); | |
512 | return 0; | |
513 | } | |
514 | ||
515 | return extract_unsigned_integer (buf, reg_size, BFD_ENDIAN_BIG); | |
516 | } | |
517 | ||
92c9a463 JB |
518 | /* The "xfer_partial" target_ops routine for ia64-hpux, in the case |
519 | where the requested object is TARGET_OBJECT_MEMORY. */ | |
520 | ||
9b409511 | 521 | static enum target_xfer_status |
92c9a463 JB |
522 | ia64_hpux_xfer_memory (struct target_ops *ops, const char *annex, |
523 | gdb_byte *readbuf, const gdb_byte *writebuf, | |
9b409511 | 524 | CORE_ADDR addr, ULONGEST len, ULONGEST *xfered_len) |
92c9a463 JB |
525 | { |
526 | CORE_ADDR bsp, bspstore; | |
527 | CORE_ADDR start_addr, short_len; | |
528 | int status = 0; | |
529 | ||
530 | /* The back-store region cannot be read/written by the standard memory | |
531 | read/write operations. So we handle the memory region piecemeal: | |
532 | (1) and (2) The regions before and after the backing-store region, | |
533 | which can be treated as normal memory; | |
534 | (3) The region inside the backing-store, which needs to be | |
535 | read/written specially. */ | |
536 | ||
973e3cf7 JB |
537 | if (in_inferior_list (ptid_get_pid (inferior_ptid))) |
538 | { | |
539 | struct regcache *regcache = get_current_regcache (); | |
540 | ||
541 | regcache_raw_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp); | |
542 | regcache_raw_read_unsigned (regcache, IA64_BSPSTORE_REGNUM, &bspstore); | |
543 | } | |
544 | else | |
545 | { | |
546 | /* This is probably a child of our inferior created by a fork. | |
547 | Because this process has not been added to our inferior list | |
548 | (we are probably in the process of handling that child | |
549 | process), we do not have a regcache to read the registers | |
550 | from. So get those values directly from the kernel. */ | |
551 | bsp = ia64_hpux_get_register_from_save_state_t (IA64_BSP_REGNUM, 8); | |
552 | bspstore = | |
553 | ia64_hpux_get_register_from_save_state_t (IA64_BSPSTORE_REGNUM, 8); | |
554 | } | |
92c9a463 JB |
555 | |
556 | /* 1. Memory region before BSPSTORE. */ | |
557 | ||
558 | if (addr < bspstore) | |
559 | { | |
560 | short_len = len; | |
561 | if (addr + len > bspstore) | |
562 | short_len = bspstore - addr; | |
563 | ||
564 | status = ia64_hpux_xfer_memory_no_bs (ops, annex, readbuf, writebuf, | |
565 | addr, short_len); | |
566 | if (status <= 0) | |
9b409511 | 567 | return TARGET_XFER_EOF; |
92c9a463 JB |
568 | } |
569 | ||
570 | /* 2. Memory region after BSP. */ | |
571 | ||
572 | if (addr + len > bsp) | |
573 | { | |
574 | start_addr = addr; | |
575 | if (start_addr < bsp) | |
576 | start_addr = bsp; | |
577 | short_len = len + addr - start_addr; | |
578 | ||
579 | status = ia64_hpux_xfer_memory_no_bs | |
580 | (ops, annex, | |
581 | readbuf ? readbuf + (start_addr - addr) : NULL, | |
582 | writebuf ? writebuf + (start_addr - addr) : NULL, | |
583 | start_addr, short_len); | |
584 | if (status <= 0) | |
9b409511 | 585 | return TARGET_XFER_EOF; |
92c9a463 JB |
586 | } |
587 | ||
588 | /* 3. Memory region between BSPSTORE and BSP. */ | |
589 | ||
590 | if (bspstore != bsp | |
591 | && ((addr < bspstore && addr + len > bspstore) | |
592 | || (addr + len <= bsp && addr + len > bsp))) | |
593 | { | |
594 | start_addr = addr; | |
595 | if (addr < bspstore) | |
596 | start_addr = bspstore; | |
597 | short_len = len + addr - start_addr; | |
598 | ||
599 | if (start_addr + short_len > bsp) | |
600 | short_len = bsp - start_addr; | |
601 | ||
602 | gdb_assert (short_len > 0); | |
603 | ||
604 | status = ia64_hpux_xfer_memory_bs | |
605 | (ops, annex, | |
606 | readbuf ? readbuf + (start_addr - addr) : NULL, | |
607 | writebuf ? writebuf + (start_addr - addr) : NULL, | |
608 | start_addr, short_len); | |
609 | if (status < 0) | |
9b409511 | 610 | return TARGET_XFER_EOF; |
92c9a463 JB |
611 | } |
612 | ||
9b409511 YQ |
613 | *xfered_len = len; |
614 | return TARGET_XFER_OK; | |
92c9a463 JB |
615 | } |
616 | ||
77ca787b JB |
617 | /* Handle the transfer of TARGET_OBJECT_HPUX_UREGS objects on ia64-hpux. |
618 | ANNEX is currently ignored. | |
619 | ||
620 | The current implementation does not support write transfers (because | |
621 | we do not currently do not need these transfers), and will raise | |
622 | a failed assertion if WRITEBUF is not NULL. */ | |
623 | ||
9b409511 | 624 | static enum target_xfer_status |
77ca787b JB |
625 | ia64_hpux_xfer_uregs (struct target_ops *ops, const char *annex, |
626 | gdb_byte *readbuf, const gdb_byte *writebuf, | |
9b409511 | 627 | ULONGEST offset, ULONGEST len, ULONGEST *xfered_len) |
77ca787b JB |
628 | { |
629 | int status; | |
630 | ||
631 | gdb_assert (writebuf == NULL); | |
632 | ||
633 | status = ia64_hpux_read_register_from_save_state_t (offset, readbuf, len); | |
634 | if (status < 0) | |
2ed4b548 | 635 | return TARGET_XFER_E_IO; |
9b409511 YQ |
636 | |
637 | *xfered_len = (ULONGEST) len; | |
638 | return TARGET_XFER_OK; | |
77ca787b JB |
639 | } |
640 | ||
c4de7027 JB |
641 | /* Handle the transfer of TARGET_OBJECT_HPUX_SOLIB_GOT objects on ia64-hpux. |
642 | ||
643 | The current implementation does not support write transfers (because | |
644 | we do not currently do not need these transfers), and will raise | |
645 | a failed assertion if WRITEBUF is not NULL. */ | |
646 | ||
9b409511 | 647 | static enum target_xfer_status |
c4de7027 JB |
648 | ia64_hpux_xfer_solib_got (struct target_ops *ops, const char *annex, |
649 | gdb_byte *readbuf, const gdb_byte *writebuf, | |
9b409511 | 650 | ULONGEST offset, ULONGEST len, ULONGEST *xfered_len) |
c4de7027 JB |
651 | { |
652 | CORE_ADDR fun_addr; | |
653 | /* The linkage pointer. We use a uint64_t to make sure that the size | |
654 | of the object we are returning is always 64 bits long, as explained | |
655 | in the description of the TARGET_OBJECT_HPUX_SOLIB_GOT object. | |
656 | This is probably paranoia, but we do not use a CORE_ADDR because | |
657 | it could conceivably be larger than uint64_t. */ | |
658 | uint64_t got; | |
659 | ||
660 | gdb_assert (writebuf == NULL); | |
661 | ||
662 | if (offset > sizeof (got)) | |
9b409511 | 663 | return TARGET_XFER_EOF; |
c4de7027 JB |
664 | |
665 | fun_addr = string_to_core_addr (annex); | |
666 | got = ia64_hpux_get_solib_linkage_addr (fun_addr); | |
667 | ||
668 | if (len > sizeof (got) - offset) | |
669 | len = sizeof (got) - offset; | |
670 | memcpy (readbuf, &got + offset, len); | |
671 | ||
9b409511 YQ |
672 | *xfered_len = (ULONGEST) len; |
673 | return TARGET_XFER_OK; | |
c4de7027 JB |
674 | } |
675 | ||
92c9a463 JB |
676 | /* The "to_xfer_partial" target_ops routine for ia64-hpux. */ |
677 | ||
9b409511 | 678 | static enum target_xfer_status |
92c9a463 JB |
679 | ia64_hpux_xfer_partial (struct target_ops *ops, enum target_object object, |
680 | const char *annex, gdb_byte *readbuf, | |
9b409511 YQ |
681 | const gdb_byte *writebuf, ULONGEST offset, ULONGEST len, |
682 | ULONGEST *xfered_len) | |
92c9a463 | 683 | { |
9b409511 | 684 | enum target_xfer_status val; |
92c9a463 JB |
685 | |
686 | if (object == TARGET_OBJECT_MEMORY) | |
9b409511 YQ |
687 | val = ia64_hpux_xfer_memory (ops, annex, readbuf, writebuf, offset, len, |
688 | xfered_len); | |
77ca787b | 689 | else if (object == TARGET_OBJECT_HPUX_UREGS) |
9b409511 YQ |
690 | val = ia64_hpux_xfer_uregs (ops, annex, readbuf, writebuf, offset, len, |
691 | xfered_len); | |
c4de7027 JB |
692 | else if (object == TARGET_OBJECT_HPUX_SOLIB_GOT) |
693 | val = ia64_hpux_xfer_solib_got (ops, annex, readbuf, writebuf, offset, | |
9b409511 | 694 | len, xfered_len); |
92c9a463 JB |
695 | else |
696 | val = super_xfer_partial (ops, object, annex, readbuf, writebuf, offset, | |
9b409511 | 697 | len, xfered_len); |
92c9a463 JB |
698 | |
699 | return val; | |
700 | } | |
701 | ||
702 | /* The "to_can_use_hw_breakpoint" target_ops routine for ia64-hpux. */ | |
703 | ||
704 | static int | |
5461485a TT |
705 | ia64_hpux_can_use_hw_breakpoint (struct target_ops *self, |
706 | int type, int cnt, int othertype) | |
92c9a463 JB |
707 | { |
708 | /* No hardware watchpoint/breakpoint support yet. */ | |
709 | return 0; | |
710 | } | |
711 | ||
712 | /* The "to_mourn_inferior" routine from the "inf-ttrace" target_ops layer. */ | |
713 | ||
714 | static void (*super_mourn_inferior) (struct target_ops *); | |
715 | ||
716 | /* The "to_mourn_inferior" target_ops routine for ia64-hpux. */ | |
717 | ||
718 | static void | |
719 | ia64_hpux_mourn_inferior (struct target_ops *ops) | |
720 | { | |
721 | const int pid = ptid_get_pid (inferior_ptid); | |
722 | int status; | |
723 | ||
724 | super_mourn_inferior (ops); | |
725 | ||
726 | /* On this platform, the process still exists even after we received | |
727 | an exit event. Detaching from the process isn't sufficient either, | |
728 | as it only turns the process into a zombie. So the only solution | |
729 | we found is to kill it. */ | |
730 | ttrace (TT_PROC_EXIT, pid, 0, 0, 0, 0); | |
731 | wait (&status); | |
732 | } | |
733 | ||
734 | /* Prevent warning from -Wmissing-prototypes. */ | |
45717bac | 735 | void _initialize_ia64_hpux_nat (void); |
92c9a463 JB |
736 | |
737 | void | |
45717bac | 738 | _initialize_ia64_hpux_nat (void) |
92c9a463 JB |
739 | { |
740 | struct target_ops *t; | |
741 | ||
742 | t = inf_ttrace_target (); | |
743 | super_to_wait = t->to_wait; | |
744 | super_xfer_partial = t->to_xfer_partial; | |
745 | super_mourn_inferior = t->to_mourn_inferior; | |
746 | ||
747 | t->to_wait = ia64_hpux_wait; | |
748 | t->to_fetch_registers = ia64_hpux_fetch_registers; | |
749 | t->to_store_registers = ia64_hpux_store_registers; | |
750 | t->to_xfer_partial = ia64_hpux_xfer_partial; | |
751 | t->to_can_use_hw_breakpoint = ia64_hpux_can_use_hw_breakpoint; | |
752 | t->to_mourn_inferior = ia64_hpux_mourn_inferior; | |
753 | t->to_attach_no_wait = 1; | |
754 | ||
755 | add_target (t); | |
756 | } |