-# Copyright 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2004
-# Free Software Foundation, Inc.
+# Copyright 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2004, 2005,
+# 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
+# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
-#
+#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
-#
+#
# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-# Please email any bugs, comments, and/or additions to this file to:
-# bug-gdb@prep.ai.mit.edu
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This file is based on corefile.exp which was written by Fred
# Fish. (fnf@cygnus.com)
strace $tracelevel
}
-set prms_id 0
-set bug_id 0
# Are we on a target board? As of 2004-02-12, GDB didn't have a
# mechanism that would let it efficiently access a remote corefile.
# Can the system run this test (in particular support sparse
# corefiles)? On systems that lack sparse corefile support this test
-# consumes too many resources - gigabytes worth of disk space and and
+# consumes too many resources - gigabytes worth of disk space and
# I/O bandwith.
if { [istarget "*-*-*bsd*"]
|| [istarget "*-*-hpux*"]
- || [istarget "*-*-solaris*"] } {
+ || [istarget "*-*-solaris*"]
+ || [istarget "*-*-darwin*"]
+ || [istarget "*-*-cygwin*"] } {
untested "Kernel lacks sparse corefile support (PR gdb/1551)"
return
}
+# This testcase causes too much stress (in terms of memory usage)
+# on certain systems...
+if { [istarget "*-*-*irix*"] } {
+ untested "Testcase too stressful for this system"
+ return
+}
+
set testfile "bigcore"
set srcfile ${testfile}.c
set binfile ${objdir}/${subdir}/${testfile}
set corefile ${objdir}/${subdir}/${testfile}.corefile
if { [gdb_compile "${srcdir}/${subdir}/${srcfile}" "${binfile}" executable {debug}] != "" } {
- gdb_suppress_entire_file "Testcase compile failed, so all tests in this file will automatically fail."
-}
-
-# Create a core file named "TESTFILE.corefile" rather than just
-# "core", to avoid problems with sys admin types that like to
-# regularly prune all files named "core" from the system.
-
-# Some systems append "core" to the name of the program; others append
-# the name of the program to "core"; still others (like Linux, as of
-# May 2003) create cores named "core.PID". In the latter case, we
-# could have many core files lying around, and it may be difficult to
-# tell which one is ours, so let's run the program in a subdirectory.
-
-set found 0
-set coredir "${objdir}/${subdir}/coredir.[getpid]"
-file mkdir $coredir
-catch "system \"(cd ${coredir}; ${binfile}; true) >/dev/null 2>&1\""
-set names [glob -nocomplain -directory $coredir *core*]
-if {[llength $names] == 1} {
- set file [file join $coredir [lindex $names 0]]
- remote_exec build "mv $file $corefile"
- set found 1
-}
-
-# Try to clean up after ourselves.
-remote_file build delete [file join $coredir coremmap.data]
-remote_exec build "rmdir $coredir"
-
-if { $found == 0 } {
- warning "can't generate a core file - core tests suppressed - check ulimit -c"
- return 0
+ untested bigcore.exp
+ return -1
}
# Run GDB on the bigcore program up-to where it will dump core.
gdb_start
gdb_reinitialize_dir $srcdir/$subdir
gdb_load ${binfile}
-gdb_test "set print sevenbit-strings" "" \
- "set print sevenbit-strings; ${testfile}"
-gdb_test "set width 0" "" \
- "set width 0; ${testfile}"
+gdb_test_no_output "set print sevenbit-strings"
+gdb_test_no_output "set width 0"
+
if { ![runto_main] } then {
gdb_suppress_tests;
}
gdb_test next ".*0 = 0.*"
# Traverse part of bigcore's linked list of memory chunks (forward or
-# backward), saving each chunk's address. I don't know why but
-# expect_out didn't work with gdb_test_multiple.
+# backward), saving each chunk's address.
proc extract_heap { dir } {
global gdb_prompt
set heap ""
set test "extract ${dir} heap"
set lim 0
- send_gdb "print heap.${dir}\n"
- gdb_expect {
+ gdb_test_multiple "print heap.${dir}" "$test" {
-re " = \\(struct list \\*\\) 0x0.*$gdb_prompt $" {
pass "$test"
}
-re " = \\(struct list \\*\\) (0x\[0-9a-f\]*).*$gdb_prompt $" {
set heap [concat $heap $expect_out(1,string)]
- if { $lim >= 50 } {
+ if { $lim >= 200 } {
pass "$test (stop at $lim)"
} else {
incr lim
set next_heap [extract_heap next]
set prev_heap [extract_heap prev]
+# Save the total allocated size within GDB so that we can check
+# the core size later.
+gdb_test_no_output "set \$bytes_allocated = bytes_allocated" "save heap size"
+
+# Now create a core dump
+
+# Rename the core file to "TESTFILE.corefile" rather than just "core",
+# to avoid problems with sys admin types that like to regularly prune
+# all files named "core" from the system.
+
+# Some systems append "core" to the name of the program; others append
+# the name of the program to "core"; still others (like Linux, as of
+# May 2003) create cores named "core.PID".
+
+# Save the process ID. Some systems dump the core into core.PID.
+set test "grab pid"
+gdb_test_multiple "info program" $test {
+ -re "child process (\[0-9\]+).*$gdb_prompt $" {
+ set inferior_pid $expect_out(1,string)
+ pass $test
+ }
+ -re "$gdb_prompt $" {
+ set inferior_pid unknown
+ pass $test
+ }
+}
+
+# Dump core using SIGABRT
+set oldtimeout $timeout
+set timeout 600
+gdb_test "signal SIGABRT" "Program terminated with signal SIGABRT, .*"
+set timeout $oldtimeout
+
+# Find the corefile
+set file ""
+foreach pat [list core.${inferior_pid} ${testfile}.core core] {
+ set names [glob -nocomplain $pat]
+ if {[llength $names] == 1} {
+ set file [lindex $names 0]
+ remote_exec build "mv $file $corefile"
+ break
+ }
+}
+
+if { $file == "" } {
+ untested "Can't generate a core file"
+ return 0
+}
+
+# Check that the corefile is plausibly large enough. We're trying to
+# detect the case where the operating system has truncated the file
+# just before signed wraparound. TCL, unfortunately, has a similar
+# problem - so use catch. It can handle the "bad" size but not
+# necessarily the "good" one. And we must use GDB for the comparison,
+# similarly.
+
+if {[catch {file size $corefile} core_size] == 0} {
+ set core_ok 0
+ gdb_test_multiple "print \$bytes_allocated < $core_size" "check core size" {
+ -re " = 1\r\n$gdb_prompt $" {
+ pass "check core size"
+ set core_ok 1
+ }
+ -re " = 0\r\n$gdb_prompt $" {
+ pass "check core size"
+ set core_ok 0
+ }
+ }
+} {
+ # Probably failed due to the TCL build having problems with very
+ # large values. Since GDB uses a 64-bit off_t (when possible) it
+ # shouldn't have this problem. Assume that things are going to
+ # work. Without this assumption the test is skiped on systems
+ # (such as i386 GNU/Linux with patched kernel) which do pass.
+ pass "check core size"
+ set core_ok 1
+}
+if {! $core_ok} {
+ untested "check core size (system does not support large corefiles)"
+ return 0
+}
+
# Now load up that core file
set test "load corefile"