kernel_samsung_a34x-permissive/tools/testing/ktest/sample.conf

1349 lines
48 KiB
Plaintext
Raw Permalink Normal View History

#
# Config file for ktest.pl
#
# Place your customized version of this, in the working directory that
# ktest.pl is run from. By default, ktest.pl will look for a file
# called "ktest.conf", but you can name it anything you like and specify
# the name of your config file as the first argument of ktest.pl.
#
# Note, all paths must be absolute
#
# Options set in the beginning of the file are considered to be
# default options. These options can be overriden by test specific
# options, with the following exceptions:
#
# LOG_FILE
# CLEAR_LOG
# POWEROFF_ON_SUCCESS
# REBOOT_ON_SUCCESS
#
# Test specific options are set after the label:
#
# TEST_START
#
# The options after a TEST_START label are specific to that test.
# Each TEST_START label will set up a new test. If you want to
# perform a test more than once, you can add the ITERATE label
# to it followed by the number of times you want that test
# to iterate. If the ITERATE is left off, the test will only
# be performed once.
#
# TEST_START ITERATE 10
#
# You can skip a test by adding SKIP (before or after the ITERATE
# and number)
#
# TEST_START SKIP
#
# TEST_START SKIP ITERATE 10
#
# TEST_START ITERATE 10 SKIP
#
# The SKIP label causes the options and the test itself to be ignored.
# This is useful to set up several different tests in one config file, and
# only enabling the ones you want to use for a current test run.
#
# You can add default options anywhere in the file as well
# with the DEFAULTS tag. This allows you to have default options
# after the test options to keep the test options at the top
# of the file. You can even place the DEFAULTS tag between
# test cases (but not in the middle of a single test case)
#
# TEST_START
# MIN_CONFIG = /home/test/config-test1
#
# DEFAULTS
# MIN_CONFIG = /home/test/config-default
#
# TEST_START ITERATE 10
#
# The above will run the first test with MIN_CONFIG set to
# /home/test/config-test-1. Then 10 tests will be executed
# with MIN_CONFIG with /home/test/config-default.
#
# You can also disable defaults with the SKIP option
#
# DEFAULTS SKIP
# MIN_CONFIG = /home/test/config-use-sometimes
#
# DEFAULTS
# MIN_CONFIG = /home/test/config-most-times
#
# The above will ignore the first MIN_CONFIG. If you want to
# use the first MIN_CONFIG, remove the SKIP from the first
# DEFAULTS tag and add it to the second. Be careful, options
# may only be declared once per test or default. If you have
# the same option name under the same test or as default
# ktest will fail to execute, and no tests will run.
#
# DEFAULTS OVERRIDE
#
# Options defined in the DEFAULTS section can not be duplicated
# even if they are defined in two different DEFAULT sections.
# This is done to catch mistakes where an option is added but
# the previous option was forgotten about and not commented.
#
# The OVERRIDE keyword can be added to a section to allow this
# section to override other DEFAULT sections values that have
# been defined previously. It will only override options that
# have been defined before its use. Options defined later
# in a non override section will still error. The same option
# can not be defined in the same section even if that section
# is marked OVERRIDE.
#
#
#
# Both TEST_START and DEFAULTS sections can also have the IF keyword
# The value after the IF must evaluate into a 0 or non 0 positive
# integer, and can use the config variables (explained below).
#
# DEFAULTS IF ${IS_X86_32}
#
# The above will process the DEFAULTS section if the config
# variable IS_X86_32 evaluates to a non zero positive integer
# otherwise if it evaluates to zero, it will act the same
# as if the SKIP keyword was used.
#
# The ELSE keyword can be used directly after a section with
# a IF statement.
#
# TEST_START IF ${RUN_NET_TESTS}
# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-network
#
# ELSE
#
# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-normal
#
#
# The ELSE keyword can also contain an IF statement to allow multiple
# if then else sections. But all the sections must be either
# DEFAULT or TEST_START, they can not be a mixture.
#
# TEST_START IF ${RUN_NET_TESTS}
# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-network
#
# ELSE IF ${RUN_DISK_TESTS}
# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-tests
#
# ELSE IF ${RUN_CPU_TESTS}
# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-cpu
#
# ELSE
# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-network
#
# The if statement may also have comparisons that will and for
# == and !=, strings may be used for both sides.
#
# BOX_TYPE := x86_32
#
# DEFAULTS IF ${BOX_TYPE} == x86_32
# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-32
# ELSE
# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-64
#
# The DEFINED keyword can be used by the IF statements too.
# It returns true if the given config variable or option has been defined
# or false otherwise.
#
#
# DEFAULTS IF DEFINED USE_CC
# CC := ${USE_CC}
# ELSE
# CC := gcc
#
#
# As well as NOT DEFINED.
#
# DEFAULTS IF NOT DEFINED MAKE_CMD
# MAKE_CMD := make ARCH=x86
#
#
# And/or ops (&&,||) may also be used to make complex conditionals.
#
# TEST_START IF (DEFINED ALL_TESTS || ${MYTEST} == boottest) && ${MACHINE} == gandalf
#
# Notice the use of parentheses. Without any parentheses the above would be
# processed the same as:
#
# TEST_START IF DEFINED ALL_TESTS || (${MYTEST} == boottest && ${MACHINE} == gandalf)
#
#
#
# INCLUDE file
#
# The INCLUDE keyword may be used in DEFAULT sections. This will
# read another config file and process that file as well. The included
# file can include other files, add new test cases or default
# statements. Config variables will be passed to these files and changes
# to config variables will be seen by top level config files. Including
# a file is processed just like the contents of the file was cut and pasted
# into the top level file, except, that include files that end with
# TEST_START sections will have that section ended at the end of
# the include file. That is, an included file is included followed
# by another DEFAULT keyword.
#
# Unlike other files referenced in this config, the file path does not need
# to be absolute. If the file does not start with '/', then the directory
# that the current config file was located in is used. If no config by the
# given name is found there, then the current directory is searched.
#
# INCLUDE myfile
# DEFAULT
#
# is the same as:
#
# INCLUDE myfile
#
# Note, if the include file does not contain a full path, the file is
# searched first by the location of the original include file, and then
# by the location that ktest.pl was executed in.
#
#### Config variables ####
#
# This config file can also contain "config variables".
# These are assigned with ":=" instead of the ktest option
# assigment "=".
#
# The difference between ktest options and config variables
# is that config variables can be used multiple times,
# where each instance will override the previous instance.
# And that they only live at time of processing this config.
#
# The advantage to config variables are that they can be used
# by any option or any other config variables to define thing
# that you may use over and over again in the options.
#
# For example:
#
# USER := root
# TARGET := mybox
# TEST_CASE := ssh ${USER}@${TARGET} /path/to/my/test
#
# TEST_START
# MIN_CONFIG = config1
# TEST = ${TEST_CASE}
#
# TEST_START
# MIN_CONFIG = config2
# TEST = ${TEST_CASE}
#
# TEST_CASE := ssh ${USER}@${TARGET} /path/to/my/test2
#
# TEST_START
# MIN_CONFIG = config1
# TEST = ${TEST_CASE}
#
# TEST_START
# MIN_CONFIG = config2
# TEST = ${TEST_CASE}
#
# TEST_DIR := /home/me/test
#
# BUILD_DIR = ${TEST_DIR}/linux.git
# OUTPUT_DIR = ${TEST_DIR}/test
#
# Note, the config variables are evaluated immediately, thus
# updating TARGET after TEST_CASE has been assigned does nothing
# to TEST_CASE.
#
# As shown in the example, to evaluate a config variable, you
# use the ${X} convention. Simple $X will not work.
#
# If the config variable does not exist, the ${X} will not
# be evaluated. Thus:
#
# MAKE_CMD = PATH=/mypath:${PATH} make
#
# If PATH is not a config variable, then the ${PATH} in
# the MAKE_CMD option will be evaluated by the shell when
# the MAKE_CMD option is passed into shell processing.
#### Using options in other options ####
#
# Options that are defined in the config file may also be used
# by other options. All options are evaulated at time of
# use (except that config variables are evaluated at config
# processing time).
#
# If an ktest option is used within another option, instead of
# typing it again in that option you can simply use the option
# just like you can config variables.
#
# MACHINE = mybox
#
# TEST = ssh root@${MACHINE} /path/to/test
#
# The option will be used per test case. Thus:
#
# TEST_TYPE = test
# TEST = ssh root@{MACHINE}
#
# TEST_START
# MACHINE = box1
#
# TEST_START
# MACHINE = box2
#
# For both test cases, MACHINE will be evaluated at the time
# of the test case. The first test will run ssh root@box1
# and the second will run ssh root@box2.
#### Mandatory Default Options ####
# These options must be in the default section, although most
# may be overridden by test options.
# The machine hostname that you will test
#MACHINE = target
# The box is expected to have ssh on normal bootup, provide the user
# (most likely root, since you need privileged operations)
#SSH_USER = root
# The directory that contains the Linux source code
#BUILD_DIR = /home/test/linux.git
# The directory that the objects will be built
# (can not be same as BUILD_DIR)
#OUTPUT_DIR = /home/test/build/target
# The location of the compiled file to copy to the target
# (relative to OUTPUT_DIR)
#BUILD_TARGET = arch/x86/boot/bzImage
# The place to put your image on the test machine
#TARGET_IMAGE = /boot/vmlinuz-test
# A script or command to reboot the box
#
# Here is a digital loggers power switch example
#POWER_CYCLE = wget --no-proxy -O /dev/null -q --auth-no-challenge 'http://admin:admin@power/outlet?5=CCL'
#
# Here is an example to reboot a virtual box on the current host
# with the name "Guest".
#POWER_CYCLE = virsh destroy Guest; sleep 5; virsh start Guest
# The script or command that reads the console
#
# If you use ttywatch server, something like the following would work.
#CONSOLE = nc -d localhost 3001
#
# For a virtual machine with guest name "Guest".
#CONSOLE = virsh console Guest
# Signal to send to kill console.
# ktest.pl will create a child process to monitor the console.
# When the console is finished, ktest will kill the child process
# with this signal.
# (default INT)
#CLOSE_CONSOLE_SIGNAL = HUP
# Required version ending to differentiate the test
# from other linux builds on the system.
#LOCALVERSION = -test
# For REBOOT_TYPE = grub2, you must specify where the grub.cfg
# file is. This is the file that is searched to find the menu
# option to boot to with GRUB_REBOOT
#GRUB_FILE = /boot/grub2/grub.cfg
# The tool for REBOOT_TYPE = grub2 to set the next reboot kernel
# to boot into (one shot mode).
# (default grub2_reboot)
#GRUB_REBOOT = grub2_reboot
# The grub title name for the test kernel to boot
# (Only mandatory if REBOOT_TYPE = grub or grub2)
#
# Note, ktest.pl will not update the grub menu.lst, you need to
# manually add an option for the test. ktest.pl will search
# the grub menu.lst for this option to find what kernel to
# reboot into.
#
# For example, if in the /boot/grub/menu.lst the test kernel title has:
# title Test Kernel
# kernel vmlinuz-test
#
# For grub2, a search of top level "menuentry"s are done. No
# submenu is searched. The menu is found by searching for the
# contents of GRUB_MENU in the line that starts with "menuentry".
# You may want to include the quotes around the option. For example:
# for: menuentry 'Test Kernel'
# do a: GRUB_MENU = 'Test Kernel'
# For customizing, add your entry in /etc/grub.d/40_custom.
#
#GRUB_MENU = Test Kernel
# For REBOOT_TYPE = syslinux, the name of the syslinux executable
# (on the target) to use to set up the next reboot to boot the
# test kernel.
# (default extlinux)
#SYSLINUX = syslinux
# For REBOOT_TYPE = syslinux, the path that is passed to to the
# syslinux command where syslinux is installed.
# (default /boot/extlinux)
#SYSLINUX_PATH = /boot/syslinux
# For REBOOT_TYPE = syslinux, the syslinux label that references the
# test kernel in the syslinux config file.
# (default undefined)
#SYSLINUX_LABEL = "test-kernel"
# A script to reboot the target into the test kernel
# This and SWITCH_TO_TEST are about the same, except
# SWITCH_TO_TEST is run even for REBOOT_TYPE = grub.
# This may be left undefined.
# (default undefined)
#REBOOT_SCRIPT =
#### Optional Config Options (all have defaults) ####
# Email options for receiving notifications. Users must setup
# the specified mailer prior to using this feature.
#
# (default undefined)
#MAILTO =
#
# Supported mailers: sendmail, mail, mailx
# (default sendmail)
#MAILER = sendmail
#
# The executable to run
# (default: for sendmail "/usr/sbin/sendmail", otherwise equals ${MAILER})
#MAIL_EXEC = /usr/sbin/sendmail
#
# The command used to send mail, which uses the above options
# can be modified. By default if the mailer is "sendmail" then
# MAIL_COMMAND = echo \'Subject: $SUBJECT\n\n$MESSAGE\' | $MAIL_PATH/$MAILER -t $MAILTO
# For mail or mailx:
# MAIL_COMMAND = "$MAIL_PATH/$MAILER -s \'$SUBJECT\' $MAILTO <<< \'$MESSAGE\'
# ktest.pl will do the substitution for MAIL_PATH, MAILER, MAILTO at the time
# it sends the mail if "$FOO" format is used. If "${FOO}" format is used,
# then the substitutions will occur at the time the config file is read.
# But note, MAIL_PATH and MAILER require being set by the config file if
# ${MAIL_PATH} or ${MAILER} are used, but not if $MAIL_PATH or $MAILER are.
#MAIL_COMMAND = echo \'Subject: $SUBJECT\n\n$MESSAGE\' | $MAIL_PATH/$MAILER -t $MAILTO
#
# Errors are defined as those would terminate the script
# (default 1)
#EMAIL_ON_ERROR = 1
# (default 1)
#EMAIL_WHEN_FINISHED = 1
# (default 0)
#EMAIL_WHEN_STARTED = 1
#
# Users can cancel the test by Ctrl^C
# (default 0)
#EMAIL_WHEN_CANCELED = 1
# Start a test setup. If you leave this off, all options
# will be default and the test will run once.
# This is a label and not really an option (it takes no value).
# You can append ITERATE and a number after it to iterate the
# test a number of times, or SKIP to ignore this test.
#
#TEST_START
#TEST_START ITERATE 5
#TEST_START SKIP
# Have the following options as default again. Used after tests
# have already been defined by TEST_START. Optionally, you can
# just define all default options before the first TEST_START
# and you do not need this option.
#
# This is a label and not really an option (it takes no value).
# You can append SKIP to this label and the options within this
# section will be ignored.
#
# DEFAULTS
# DEFAULTS SKIP
# If you want to execute some command before the first test runs
# you can set this option. Note, it can be set as a default option
# or an option in the first test case. All other test cases will
# ignore it. If both the default and first test have this option
# set, then the first test will take precedence.
#
# default (undefined)
#PRE_KTEST = ${SSH} ~/set_up_test
# If you want to execute some command after all the tests have
# completed, you can set this option. Note, it can be set as a
# default or any test case can override it. If multiple test cases
# set this option, then the last test case that set it will take
# precedence
#
# default (undefined)
#POST_KTEST = ${SSH} ~/dismantle_test
# The default test type (default test)
# The test types may be:
# build - only build the kernel, do nothing else
# install - build and install, but do nothing else (does not reboot)
# boot - build, install, and boot the kernel
# test - build, boot and if TEST is set, run the test script
# (If TEST is not set, it defaults back to boot)
# bisect - Perform a bisect on the kernel (see BISECT_TYPE below)
# patchcheck - Do a test on a series of commits in git (see PATCHCHECK below)
#TEST_TYPE = test
# Test to run if there is a successful boot and TEST_TYPE is test.
# Must exit with 0 on success and non zero on error
# default (undefined)
#TEST = ssh user@machine /root/run_test
# The build type is any make config type or special command
# (default randconfig)
# nobuild - skip the clean and build step
# useconfig:/path/to/config - use the given config and run
# oldconfig on it.
# This option is ignored if TEST_TYPE is patchcheck or bisect
#BUILD_TYPE = randconfig
# The make command (default make)
# If you are building a 32bit x86 on a 64 bit host
#MAKE_CMD = CC=i386-gcc AS=i386-as make ARCH=i386
# Any build options for the make of the kernel (not for other makes, like configs)
# (default "")
#BUILD_OPTIONS = -j20
# If you need to do some special handling before installing
# you can add a script with this option.
# The environment variable KERNEL_VERSION will be set to the
# kernel version that is used.
#
# default (undefined)
#PRE_INSTALL = ssh user@target rm -rf '/lib/modules/*-test*'
# If you need an initrd, you can add a script or code here to install
# it. The environment variable KERNEL_VERSION will be set to the
# kernel version that is used. Remember to add the initrd line
# to your grub menu.lst file.
#
# Here's a couple of examples to use:
#POST_INSTALL = ssh user@target /sbin/mkinitrd --allow-missing -f /boot/initramfs-test.img $KERNEL_VERSION
#
# or on some systems:
#POST_INSTALL = ssh user@target /sbin/dracut -f /boot/initramfs-test.img $KERNEL_VERSION
# If for some reason you just want to boot the kernel and you do not
# want the test to install anything new. For example, you may just want
# to boot test the same kernel over and over and do not want to go through
# the hassle of installing anything, you can set this option to 1
# (default 0)
#NO_INSTALL = 1
# If there is a command that you want to run before the individual test
# case executes, then you can set this option
#
# default (undefined)
#PRE_TEST = ${SSH} reboot_to_special_kernel
# If there is a command you want to run after the individual test case
# completes, then you can set this option.
#
# default (undefined)
#POST_TEST = cd ${BUILD_DIR}; git reset --hard
# If there is a script that you require to run before the build is done
# you can specify it with PRE_BUILD.
#
# One example may be if you must add a temporary patch to the build to
# fix a unrelated bug to perform a patchcheck test. This will apply the
# patch before each build that is made. Use the POST_BUILD to do a git reset --hard
# to remove the patch.
#
# (default undef)
#PRE_BUILD = cd ${BUILD_DIR} && patch -p1 < /tmp/temp.patch
# To specify if the test should fail if the PRE_BUILD fails,
# PRE_BUILD_DIE needs to be set to 1. Otherwise the PRE_BUILD
# result is ignored.
# (default 0)
# PRE_BUILD_DIE = 1
# If there is a script that should run after the build is done
# you can specify it with POST_BUILD.
#
# As the example in PRE_BUILD, POST_BUILD can be used to reset modifications
# made by the PRE_BUILD.
#
# (default undef)
#POST_BUILD = cd ${BUILD_DIR} && git reset --hard
# To specify if the test should fail if the POST_BUILD fails,
# POST_BUILD_DIE needs to be set to 1. Otherwise the POST_BUILD
# result is ignored.
# (default 0)
#POST_BUILD_DIE = 1
# Way to reboot the box to the test kernel.
# Only valid options so far are "grub", "grub2", "syslinux" and "script"
# (default grub)
# If you specify grub, it will assume grub version 1
# and will search in /boot/grub/menu.lst for the title $GRUB_MENU
# and select that target to reboot to the kernel. If this is not
# your setup, then specify "script" and have a command or script
# specified in REBOOT_SCRIPT to boot to the target.
#
# For REBOOT_TYPE = grub2, you must define both GRUB_MENU and
# GRUB_FILE.
#
# For REBOOT_TYPE = syslinux, you must define SYSLINUX_LABEL, and
# perhaps modify SYSLINUX (default extlinux) and SYSLINUX_PATH
# (default /boot/extlinux)
#
# The entry in /boot/grub/menu.lst must be entered in manually.
# The test will not modify that file.
#REBOOT_TYPE = grub
# If you are using a machine that doesn't boot with grub, and
# perhaps gets its kernel from a remote server (tftp), then
# you can use this option to update the target image with the
# test image.
#
# You could also do the same with POST_INSTALL, but the difference
# between that option and this option is that POST_INSTALL runs
# after the install, where this one runs just before a reboot.
# (default undefined)
#SWITCH_TO_TEST = cp ${OUTPUT_DIR}/${BUILD_TARGET} ${TARGET_IMAGE}
# If you are using a machine that doesn't boot with grub, and
# perhaps gets its kernel from a remote server (tftp), then
# you can use this option to update the target image with the
# the known good image to reboot safely back into.
#
# This option holds a command that will execute before needing
# to reboot to a good known image.
# (default undefined)
#SWITCH_TO_GOOD = ssh ${SSH_USER}/${MACHINE} cp good_image ${TARGET_IMAGE}
# The min config that is needed to build for the machine
# A nice way to create this is with the following:
#
# $ ssh target
# $ lsmod > mymods
# $ scp mymods host:/tmp
# $ exit
# $ cd linux.git
# $ rm .config
# $ make LSMOD=mymods localyesconfig
# $ grep '^CONFIG' .config > /home/test/config-min
#
# If you want even less configs:
#
# log in directly to target (do not ssh)
#
# $ su
# # lsmod | cut -d' ' -f1 | xargs rmmod
#
# repeat the above several times
#
# # lsmod > mymods
# # reboot
#
# May need to reboot to get your network back to copy the mymods
# to the host, and then remove the previous .config and run the
# localyesconfig again. The CONFIG_MIN generated like this will
# not guarantee network activity to the box so the TEST_TYPE of
# test may fail.
#
# You might also want to set:
# CONFIG_CMDLINE="<your options here>"
# randconfig may set the above and override your real command
# line options.
# (default undefined)
#MIN_CONFIG = /home/test/config-min
# Sometimes there's options that just break the boot and
# you do not care about. Here are a few:
# # CONFIG_STAGING is not set
# Staging drivers are horrible, and can break the build.
# # CONFIG_SCSI_DEBUG is not set
# SCSI_DEBUG may change your root partition
# # CONFIG_KGDB_SERIAL_CONSOLE is not set
# KGDB may cause oops waiting for a connection that's not there.
# This option points to the file containing config options that will be prepended
# to the MIN_CONFIG (or be the MIN_CONFIG if it is not set)
#
# Note, config options in MIN_CONFIG will override these options.
#
# (default undefined)
#ADD_CONFIG = /home/test/config-broken
# The location on the host where to write temp files
# (default /tmp/ktest/${MACHINE})
#TMP_DIR = /tmp/ktest/${MACHINE}
# Optional log file to write the status (recommended)
# Note, this is a DEFAULT section only option.
# (default undefined)
#LOG_FILE = /home/test/logfiles/target.log
# Remove old logfile if it exists before starting all tests.
# Note, this is a DEFAULT section only option.
# (default 0)
#CLEAR_LOG = 0
# Line to define a successful boot up in console output.
# This is what the line contains, not the entire line. If you need
# the entire line to match, then use regural expression syntax like:
# (do not add any quotes around it)
#
# SUCCESS_LINE = ^MyBox Login:$
#
# (default "login:")
#SUCCESS_LINE = login:
# To speed up between reboots, defining a line that the
# default kernel produces that represents that the default
# kernel has successfully booted and can be used to pass
# a new test kernel to it. Otherwise ktest.pl will wait till
# SLEEP_TIME to continue.
# (default undefined)
#REBOOT_SUCCESS_LINE = login:
# In case the console constantly fills the screen, having
# a specified time to stop the test after success is recommended.
# (in seconds)
# (default 10)
#STOP_AFTER_SUCCESS = 10
# In case the console constantly fills the screen, having
# a specified time to stop the test after failure is recommended.
# (in seconds)
# (default 60)
#STOP_AFTER_FAILURE = 60
# In case the console constantly fills the screen, having
# a specified time to stop the test if it never succeeds nor fails
# is recommended.
# Note: this is ignored if a success or failure is detected.
# (in seconds)
# (default 600, -1 is to never stop)
#STOP_TEST_AFTER = 600
# Stop testing if a build fails. If set, the script will end if
# a failure is detected, otherwise it will save off the .config,
# dmesg and bootlog in a directory called
# MACHINE-TEST_TYPE_BUILD_TYPE-fail-yyyymmddhhmmss
# if the STORE_FAILURES directory is set.
# (default 1)
# Note, even if this is set to zero, there are some errors that still
# stop the tests.
#DIE_ON_FAILURE = 1
# Directory to store failure directories on failure. If this is not
# set, DIE_ON_FAILURE=0 will not save off the .config, dmesg and
# bootlog. This option is ignored if DIE_ON_FAILURE is not set.
# (default undefined)
#STORE_FAILURES = /home/test/failures
# Directory to store success directories on success. If this is not
# set, the .config, dmesg and bootlog will not be saved if a
# test succeeds.
# (default undefined)
#STORE_SUCCESSES = /home/test/successes
# Build without doing a make mrproper, or removing .config
# (default 0)
#BUILD_NOCLEAN = 0
# As the test reads the console, after it hits the SUCCESS_LINE
# the time it waits for the monitor to settle down between reads
# can usually be lowered.
# (in seconds) (default 1)
#BOOTED_TIMEOUT = 1
# The timeout in seconds when we consider the box hung after
# the console stop producing output. Be sure to leave enough
# time here to get pass a reboot. Some machines may not produce
# any console output for a long time during a reboot. You do
# not want the test to fail just because the system was in
# the process of rebooting to the test kernel.
# (default 120)
#TIMEOUT = 120
# The timeout in seconds when to test if the box can be rebooted
# or not. Before issuing the reboot command, a ssh connection
# is attempted to see if the target machine is still active.
# If the target does not connect within this timeout, a power cycle
# is issued instead of a reboot.
# CONNECT_TIMEOUT = 25
# In between tests, a reboot of the box may occur, and this
# is the time to wait for the console after it stops producing
# output. Some machines may not produce a large lag on reboot
# so this should accommodate it.
# The difference between this and TIMEOUT, is that TIMEOUT happens
# when rebooting to the test kernel. This sleep time happens
# after a test has completed and we are about to start running
# another test. If a reboot to the reliable kernel happens,
# we wait SLEEP_TIME for the console to stop producing output
# before starting the next test.
#
# You can speed up reboot times even more by setting REBOOT_SUCCESS_LINE.
# (default 60)
#SLEEP_TIME = 60
# The time in between bisects to sleep (in seconds)
# (default 60)
#BISECT_SLEEP_TIME = 60
# The max wait time (in seconds) for waiting for the console to finish.
# If for some reason, the console is outputting content without
# ever finishing, this will cause ktest to get stuck. This
# option is the max time ktest will wait for the monitor (console)
# to settle down before continuing.
# (default 1800)
#MAX_MONITOR_WAIT
# The time in between patch checks to sleep (in seconds)
# (default 60)
#PATCHCHECK_SLEEP_TIME = 60
# Reboot the target box on error (default 0)
#REBOOT_ON_ERROR = 0
# Power off the target on error (ignored if REBOOT_ON_ERROR is set)
# Note, this is a DEFAULT section only option.
# (default 0)
#POWEROFF_ON_ERROR = 0
# Power off the target after all tests have completed successfully
# Note, this is a DEFAULT section only option.
# (default 0)
#POWEROFF_ON_SUCCESS = 0
# Reboot the target after all test completed successfully (default 1)
# (ignored if POWEROFF_ON_SUCCESS is set)
#REBOOT_ON_SUCCESS = 1
# In case there are isses with rebooting, you can specify this
# to always powercycle after this amount of time after calling
# reboot.
# Note, POWERCYCLE_AFTER_REBOOT = 0 does NOT disable it. It just
# makes it powercycle immediately after rebooting. Do not define
# it if you do not want it.
# (default undefined)
#POWERCYCLE_AFTER_REBOOT = 5
# In case there's isses with halting, you can specify this
# to always poweroff after this amount of time after calling
# halt.
# Note, POWEROFF_AFTER_HALT = 0 does NOT disable it. It just
# makes it poweroff immediately after halting. Do not define
# it if you do not want it.
# (default undefined)
#POWEROFF_AFTER_HALT = 20
# A script or command to power off the box (default undefined)
# Needed for POWEROFF_ON_ERROR and SUCCESS
#
# Example for digital loggers power switch:
#POWER_OFF = wget --no-proxy -O /dev/null -q --auth-no-challenge 'http://admin:admin@power/outlet?5=OFF'
#
# Example for a virtual guest call "Guest".
#POWER_OFF = virsh destroy Guest
# To have the build fail on "new" warnings, create a file that
# contains a list of all known warnings (they must match exactly
# to the line with 'warning:', 'error:' or 'Error:'. If the option
# WARNINGS_FILE is set, then that file will be read, and if the
# build detects a warning, it will examine this file and if the
# warning does not exist in it, it will fail the build.
#
# Note, if this option is defined to a file that does not exist
# then any warning will fail the build.
# (see make_warnings_file below)
#
# (optional, default undefined)
#WARNINGS_FILE = ${OUTPUT_DIR}/warnings_file
# The way to execute a command on the target
# (default ssh $SSH_USER@$MACHINE $SSH_COMMAND";)
# The variables SSH_USER, MACHINE and SSH_COMMAND are defined
#SSH_EXEC = ssh $SSH_USER@$MACHINE $SSH_COMMAND";
# The way to copy a file to the target (install and modules)
# (default scp $SRC_FILE $SSH_USER@$MACHINE:$DST_FILE)
# The variables SSH_USER, MACHINE are defined by the config
# SRC_FILE and DST_FILE are ktest internal variables and
# should only have '$' and not the '${}' notation.
# (default scp $SRC_FILE ${SSH_USER}@${MACHINE}:$DST_FILE)
#SCP_TO_TARGET = echo skip scp for $SRC_FILE $DST_FILE
# If install needs to be different than modules, then this
# option will override the SCP_TO_TARGET for installation.
# (default ${SCP_TO_TARGET} )
#SCP_TO_TARGET_INSTALL = scp $SRC_FILE tftp@tftpserver:$DST_FILE
# The nice way to reboot the target
# (default ssh $SSH_USER@$MACHINE reboot)
# The variables SSH_USER and MACHINE are defined.
#REBOOT = ssh $SSH_USER@$MACHINE reboot
# The way triple faults are detected is by testing the kernel
# banner. If the kernel banner for the kernel we are testing is
# found, and then later a kernel banner for another kernel version
# is found, it is considered that we encountered a triple fault,
# and there is no panic or callback, but simply a reboot.
# To disable this (because it did a false positive) set the following
# to 0.
# (default 1)
#DETECT_TRIPLE_FAULT = 0
# All options in the config file should be either used by ktest
# or could be used within a value of another option. If an option
# in the config file is not used, ktest will warn about it and ask
# if you want to continue.
#
# If you don't care if there are non-used options, enable this
# option. Be careful though, a non-used option is usually a sign
# of an option name being typed incorrectly.
# (default 0)
#IGNORE_UNUSED = 1
# When testing a kernel that happens to have WARNINGs, and call
# traces, ktest.pl will detect these and fail a boot or test run
# due to warnings. By setting this option, ktest will ignore
# call traces, and will not fail a test if the kernel produces
# an oops. Use this option with care.
# (default 0)
#IGNORE_ERRORS = 1
#### Per test run options ####
# The following options are only allowed in TEST_START sections.
# They are ignored in the DEFAULTS sections.
#
# All of these are optional and undefined by default, although
# some of these options are required for TEST_TYPE of patchcheck
# and bisect.
#
#
# CHECKOUT = branch
#
# If the BUILD_DIR is a git repository, then you can set this option
# to checkout the given branch before running the TEST. If you
# specify this for the first run, that branch will be used for
# all preceding tests until a new CHECKOUT is set.
#
#
# TEST_NAME = name
#
# If you want the test to have a name that is displayed in
# the test result banner at the end of the test, then use this
# option. This is useful to search for the RESULT keyword and
# not have to translate a test number to a test in the config.
#
# For TEST_TYPE = patchcheck
#
# This expects the BUILD_DIR to be a git repository, and
# will checkout the PATCHCHECK_START commit.
#
# The option BUILD_TYPE will be ignored.
#
# The MIN_CONFIG will be used for all builds of the patchcheck. The build type
# used for patchcheck is oldconfig.
#
# PATCHCHECK_START is required and is the first patch to
# test (the SHA1 of the commit). You may also specify anything
# that git checkout allows (branch name, tage, HEAD~3).
#
# PATCHCHECK_END is the last patch to check (default HEAD)
#
# PATCHCHECK_CHERRY if set to non zero, then git cherry will be
# performed against PATCHCHECK_START and PATCHCHECK_END. That is
#
# git cherry ${PATCHCHECK_START} ${PATCHCHECK_END}
#
# Then the changes found will be tested.
#
# Note, PATCHCHECK_CHERRY requires PATCHCHECK_END to be defined.
# (default 0)
#
# PATCHCHECK_TYPE is required and is the type of test to run:
# build, boot, test.
#
# Note, the build test will look for warnings, if a warning occurred
# in a file that a commit touches, the build will fail, unless
# IGNORE_WARNINGS is set for the given commit's sha1
#
# IGNORE_WARNINGS can be used to disable the failure of patchcheck
# on a particuler commit (SHA1). You can add more than one commit
# by adding a list of SHA1s that are space delimited.
#
# If BUILD_NOCLEAN is set, then make mrproper will not be run on
# any of the builds, just like all other TEST_TYPE tests. But
# what makes patchcheck different from the other tests, is if
# BUILD_NOCLEAN is not set, only the first and last patch run
# make mrproper. This helps speed up the test.
#
# Example:
# TEST_START
# TEST_TYPE = patchcheck
# CHECKOUT = mybranch
# PATCHCHECK_TYPE = boot
# PATCHCHECK_START = 747e94ae3d1b4c9bf5380e569f614eb9040b79e7
# PATCHCHECK_END = HEAD~2
# IGNORE_WARNINGS = 42f9c6b69b54946ffc0515f57d01dc7f5c0e4712 0c17ca2c7187f431d8ffc79e81addc730f33d128
#
#
#
# For TEST_TYPE = bisect
#
# You can specify a git bisect if the BUILD_DIR is a git repository.
# The MIN_CONFIG will be used for all builds of the bisect. The build type
# used for bisecting is oldconfig.
#
# The option BUILD_TYPE will be ignored.
#
# BISECT_TYPE is the type of test to perform:
# build - bad fails to build
# boot - bad builds but fails to boot
# test - bad boots but fails a test
#
# BISECT_GOOD is the commit (SHA1) to label as good (accepts all git good commit types)
# BISECT_BAD is the commit to label as bad (accepts all git bad commit types)
#
# The above three options are required for a bisect operation.
#
# BISECT_REPLAY = /path/to/replay/file (optional, default undefined)
#
# If an operation failed in the bisect that was not expected to
# fail. Then the test ends. The state of the BUILD_DIR will be
# left off at where the failure occurred. You can examine the
# reason for the failure, and perhaps even find a git commit
# that would work to continue with. You can run:
#
# git bisect log > /path/to/replay/file
#
# The adding:
#
# BISECT_REPLAY= /path/to/replay/file
#
# And running the test again. The test will perform the initial
# git bisect start, git bisect good, and git bisect bad, and
# then it will run git bisect replay on this file, before
# continuing with the bisect.
#
# BISECT_START = commit (optional, default undefined)
#
# As with BISECT_REPLAY, if the test failed on a commit that
# just happen to have a bad commit in the middle of the bisect,
# and you need to skip it. If BISECT_START is defined, it
# will checkout that commit after doing the initial git bisect start,
# git bisect good, git bisect bad, and running the git bisect replay
# if the BISECT_REPLAY is set.
#
# BISECT_SKIP = 1 (optional, default 0)
#
# If BISECT_TYPE is set to test but the build fails, ktest will
# simply fail the test and end their. You could use BISECT_REPLAY
# and BISECT_START to resume after you found a new starting point,
# or you could set BISECT_SKIP to 1. If BISECT_SKIP is set to 1,
# when something other than the BISECT_TYPE fails, ktest.pl will
# run "git bisect skip" and try again.
#
# BISECT_FILES = <path> (optional, default undefined)
#
# To just run the git bisect on a specific path, set BISECT_FILES.
# For example:
#
# BISECT_FILES = arch/x86 kernel/time
#
# Will run the bisect with "git bisect start -- arch/x86 kernel/time"
#
# BISECT_REVERSE = 1 (optional, default 0)
#
# In those strange instances where it was broken forever
# and you are trying to find where it started to work!
# Set BISECT_GOOD to the commit that was last known to fail
# Set BISECT_BAD to the commit that is known to start working.
# With BISECT_REVERSE = 1, The test will consider failures as
# good, and success as bad.
#
# BISECT_MANUAL = 1 (optional, default 0)
#
# In case there's a problem with automating the bisect for
# whatever reason. (Can't reboot, want to inspect each iteration)
# Doing a BISECT_MANUAL will have the test wait for you to
# tell it if the test passed or failed after each iteration.
# This is basicall the same as running git bisect yourself
# but ktest will rebuild and install the kernel for you.
#
# BISECT_CHECK = 1 (optional, default 0)
#
# Just to be sure the good is good and bad is bad, setting
# BISECT_CHECK to 1 will start the bisect by first checking
# out BISECT_BAD and makes sure it fails, then it will check
# out BISECT_GOOD and makes sure it succeeds before starting
# the bisect (it works for BISECT_REVERSE too).
#
# You can limit the test to just check BISECT_GOOD or
# BISECT_BAD with BISECT_CHECK = good or
# BISECT_CHECK = bad, respectively.
#
# BISECT_TRIES = 5 (optional, default 1)
#
# For those cases that it takes several tries to hit a bug,
# the BISECT_TRIES is useful. It is the number of times the
# test is ran before it says the kernel is good. The first failure
# will stop trying and mark the current SHA1 as bad.
#
# Note, as with all race bugs, there's no guarantee that if
# it succeeds, it is really a good bisect. But it helps in case
# the bug is some what reliable.
#
# You can set BISECT_TRIES to zero, and all tests will be considered
# good, unless you also set BISECT_MANUAL.
#
# BISECT_RET_GOOD = 0 (optional, default undefined)
#
# In case the specificed test returns something other than just
# 0 for good, and non-zero for bad, you can override 0 being
# good by defining BISECT_RET_GOOD.
#
# BISECT_RET_BAD = 1 (optional, default undefined)
#
# In case the specificed test returns something other than just
# 0 for good, and non-zero for bad, you can override non-zero being
# bad by defining BISECT_RET_BAD.
#
# BISECT_RET_ABORT = 255 (optional, default undefined)
#
# If you need to abort the bisect if the test discovers something
# that was wrong, you can define BISECT_RET_ABORT to be the error
# code returned by the test in order to abort the bisect.
#
# BISECT_RET_SKIP = 2 (optional, default undefined)
#
# If the test detects that the current commit is neither good
# nor bad, but something else happened (another bug detected)
# you can specify BISECT_RET_SKIP to an error code that the
# test returns when it should skip the current commit.
#
# BISECT_RET_DEFAULT = good (optional, default undefined)
#
# You can override the default of what to do when the above
# options are not hit. This may be one of, "good", "bad",
# "abort" or "skip" (without the quotes).
#
# Note, if you do not define any of the previous BISECT_RET_*
# and define BISECT_RET_DEFAULT, all bisects results will do
# what the BISECT_RET_DEFAULT has.
#
#
# Example:
# TEST_START
# TEST_TYPE = bisect
# BISECT_GOOD = v2.6.36
# BISECT_BAD = b5153163ed580e00c67bdfecb02b2e3843817b3e
# BISECT_TYPE = build
# MIN_CONFIG = /home/test/config-bisect
#
#
#
# For TEST_TYPE = config_bisect
#
# In those cases that you have two different configs. One of them
# work, the other does not, and you do not know what config causes
# the problem.
# The TEST_TYPE config_bisect will bisect the bad config looking for
# what config causes the failure.
#
# The way it works is this:
#
# You can specify a good config with CONFIG_BISECT_GOOD, otherwise it
# will use the MIN_CONFIG, and if that's not specified, it will use
# the config that comes with "make defconfig".
#
# It runs both the good and bad configs through a make oldconfig to
# make sure that they are set up for the kernel that is checked out.
#
# It then reads the configs that are set, as well as the ones that are
# not set for both the good and bad configs, and then compares them.
# It will set half of the good configs within the bad config (note,
# "set" means to make the bad config match the good config, a config
# in the good config that is off, will be turned off in the bad
# config. That is considered a "set").
#
# It tests this new config and if it works, it becomes the new good
# config, otherwise it becomes the new bad config. It continues this
# process until there's only one config left and it will report that
# config.
#
# The "bad config" can also be a config that is needed to boot but was
# disabled because it depended on something that wasn't set.
#
# During this process, it saves the current good and bad configs in
# ${TMP_DIR}/good_config and ${TMP_DIR}/bad_config respectively.
# If you stop the test, you can copy them to a new location to
# reuse them again.
#
# Although the MIN_CONFIG may be the config it starts with, the
# MIN_CONFIG is ignored.
#
# The option BUILD_TYPE will be ignored.
#
# CONFIG_BISECT_TYPE is the type of test to perform:
# build - bad fails to build
# boot - bad builds but fails to boot
# test - bad boots but fails a test
#
# CONFIG_BISECT is the config that failed to boot
#
# If BISECT_MANUAL is set, it will pause between iterations.
# This is useful to use just ktest.pl just for the config bisect.
# If you set it to build, it will run the bisect and you can
# control what happens in between iterations. It will ask you if
# the test succeeded or not and continue the config bisect.
#
# CONFIG_BISECT_GOOD (optional)
# If you have a good config to start with, then you
# can specify it with CONFIG_BISECT_GOOD. Otherwise
# the MIN_CONFIG is the base, if MIN_CONFIG is not set
# It will build a config with "make defconfig"
#
# CONFIG_BISECT_CHECK (optional)
# Set this to 1 if you want to confirm that the config ktest
# generates (the bad config with the min config) is still bad.
# It may be that the min config fixes what broke the bad config
# and the test will not return a result.
# Set it to "good" to test only the good config and set it
# to "bad" to only test the bad config.
#
# CONFIG_BISECT_EXEC (optional)
# The config bisect is a separate program that comes with ktest.pl.
# By befault, it will look for:
# `pwd`/config-bisect.pl # the location ktest.pl was executed from.
# If it does not find it there, it will look for:
# `dirname <ktest.pl>`/config-bisect.pl # The directory that holds ktest.pl
# If it does not find it there, it will look for:
# ${BUILD_DIR}/tools/testing/ktest/config-bisect.pl
# Setting CONFIG_BISECT_EXEC will override where it looks.
#
# Example:
# TEST_START
# TEST_TYPE = config_bisect
# CONFIG_BISECT_TYPE = build
# CONFIG_BISECT = /home/test/config-bad
# MIN_CONFIG = /home/test/config-min
# BISECT_MANUAL = 1
#
#
#
# For TEST_TYPE = make_min_config
#
# After doing a make localyesconfig, your kernel configuration may
# not be the most useful minimum configuration. Having a true minimum
# config that you can use against other configs is very useful if
# someone else has a config that breaks on your code. By only forcing
# those configurations that are truly required to boot your machine
# will give you less of a chance that one of your set configurations
# will make the bug go away. This will give you a better chance to
# be able to reproduce the reported bug matching the broken config.
#
# Note, this does take some time, and may require you to run the
# test over night, or perhaps over the weekend. But it also allows
# you to interrupt it, and gives you the current minimum config
# that was found till that time.
#
# Note, this test automatically assumes a BUILD_TYPE of oldconfig
# and its test type acts like boot.
# TODO: add a test version that makes the config do more than just
# boot, like having network access.
#
# To save time, the test does not just grab any option and test
# it. The Kconfig files are examined to determine the dependencies
# of the configs. If a config is chosen that depends on another
# config, that config will be checked first. By checking the
# parents first, we can eliminate whole groups of configs that
# may have been enabled.
#
# For example, if a USB device config is chosen and depends on CONFIG_USB,
# the CONFIG_USB will be tested before the device. If CONFIG_USB is
# found not to be needed, it, as well as all configs that depend on
# it, will be disabled and removed from the current min_config.
#
# OUTPUT_MIN_CONFIG is the path and filename of the file that will
# be created from the MIN_CONFIG. If you interrupt the test, set
# this file as your new min config, and use it to continue the test.
# This file does not need to exist on start of test.
# This file is not created until a config is found that can be removed.
# If this file exists, you will be prompted if you want to use it
# as the min_config (overriding MIN_CONFIG) if START_MIN_CONFIG
# is not defined.
# (required field)
#
# START_MIN_CONFIG is the config to use to start the test with.
# you can set this as the same OUTPUT_MIN_CONFIG, but if you do
# the OUTPUT_MIN_CONFIG file must exist.
# (default MIN_CONFIG)
#
# IGNORE_CONFIG is used to specify a config file that has configs that
# you already know must be set. Configs are written here that have
# been tested and proved to be required. It is best to define this
# file if you intend on interrupting the test and running it where
# it left off. New configs that it finds will be written to this file
# and will not be tested again in later runs.
# (optional)
#
# MIN_CONFIG_TYPE can be either 'boot' or 'test'. With 'boot' it will
# test if the created config can just boot the machine. If this is
# set to 'test', then the TEST option must be defined and the created
# config will not only boot the target, but also make sure that the
# config lets the test succeed. This is useful to make sure the final
# config that is generated allows network activity (ssh).
# (optional)
#
# USE_OUTPUT_MIN_CONFIG set this to 1 if you do not want to be prompted
# about using the OUTPUT_MIN_CONFIG as the MIN_CONFIG as the starting
# point. Set it to 0 if you want to always just use the given MIN_CONFIG.
# If it is not defined, it will prompt you to pick which config
# to start with (MIN_CONFIG or OUTPUT_MIN_CONFIG).
#
# Example:
#
# TEST_TYPE = make_min_config
# OUTPUT_MIN_CONFIG = /path/to/config-new-min
# START_MIN_CONFIG = /path/to/config-min
# IGNORE_CONFIG = /path/to/config-tested
# MIN_CONFIG_TYPE = test
# TEST = ssh ${USER}@${MACHINE} echo hi
#
#
#
#
# For TEST_TYPE = make_warnings_file
#
# If you want the build to fail when a new warning is discovered
# you set the WARNINGS_FILE to point to a file of known warnings.
#
# The test "make_warnings_file" will let you create a new warnings
# file before you run other tests, like patchcheck.
#
# What this test does is to run just a build, you still need to
# specify BUILD_TYPE to tell the test what type of config to use.
# A BUILD_TYPE of nobuild will fail this test.
#
# The test will do the build and scan for all warnings. Any warning
# it discovers will be saved in the WARNINGS_FILE (required) option.
#
# It is recommended (but not necessary) to make sure BUILD_NOCLEAN is
# off, so that a full build is done (make mrproper is performed).
# That way, all warnings will be captured.
#
# Example:
#
# TEST_TYPE = make_warnings_file
# WARNINGS_FILE = ${OUTPUT_DIR}
# BUILD_TYPE = useconfig:oldconfig
# CHECKOUT = v3.8
# BUILD_NOCLEAN = 0
#