pymtt
 All Classes Namespaces Files Functions Variables Groups
ompi-core-template.ini
Go to the documentation of this file.
1 #
2 # Copyright (c) 2006-2009 Cisco Systems, Inc. All rights reserved.
3 # Copyright (c) 2006-2007 Sun Microystems, Inc. All rights reserved.
4 # Copyright (c) 2008 High Performance Computing Center Stuttgart,
5 # University of Stuttgart. All rights reserved.
6 # Copyright (c) 2017 IBM Corporation. All rights reserved.
7 #
8 
9 # Template MTT configuration file for Open MPI core testers. Note
10 # that there are many items in this file that, while they are good for
11 # examples, may not work for random MTT users. For example, the
12 # "ompi-tests" Git repository that is used in many of the examples
13 # below is *not* a public repository (there's nothing really secret in
14 # this repository; it contains many publicly-available MPI tests and
15 # benchmarks, but we have never looked into the redistribution rights
16 # of these codes, so we keep the Git repository "closed" to the
17 # general public and only use it internally in the Open MPI project).
18 
19 #-------------------------------------------------------------------------
20 
21 # The intent for this template file is to establish at least some
22 # loose guidelines for what Open MPI core testers should be running on
23 # a nightly basis. This file is not intended to be an exhaustive
24 # sample of all possible fields and values that MTT offers. Each site
25 # will undoubtedly have to edit this template for their local needs
26 # (e.g., pick compilers to use, etc.), but this file provides a
27 # baseline set of configurations that we intend you to run.
28 
29 # OMPI core members will need to edit some values in this file based
30 # on your local testing environment. Look for comments with "OMPI
31 # Core:" for instructions on what to change.
32 
33 # Note that this file is artificially longer than it really needs to
34 # be -- a bunch of values are explicitly set here that are exactly
35 # equivalent to their defaults. This is mainly because there is no
36 # reliable form of documentation for this ini file yet, so the values
37 # here comprise a good set of what options are settable (although it
38 # is not a comprehensive set).
39 
40 # Also keep in mind that at the time of this writing, MTT is still
41 # under active development and therefore the baselines established in
42 # this file may change on a relatively frequent basis.
43 
44 # The guidelines are as follows:
45 #
46 # 1. Download and test nightly snapshot tarballs of at least one of
47 # the following:
48 # - the master (highest preference)
49 # - release branches (highest preference is the most recent release
50 # branch; lowest preference is the oldest release branch)
51 # 2. Run all 4 correctness test suites from the ompi-tests Git repo
52 # - trivial, as many processes as possible
53 # - intel tests with all_tests_no_perf, up to 64 processes
54 # - IBM, as many processes as possible
55 # - IMB, as many processes as possible
56 # 3. Run with as many different components as possible
57 # - PMLs (ob1, dr)
58 # - BTLs (iterate through sm, tcp, whatever high speed network(s) you
59 # have, etc. -- as relevant)
60 
61 #======================================================================
62 # Overall configuration
63 #======================================================================
64 
65 [MTT]
66 
67 # OMPI Core: if you are not running in a scheduled environment and you
68 # have a fixed hostfile for what nodes you'll be running on, fill in
69 # the absolute pathname to it here. If you do not have a hostfile,
70 # leave it empty. Example:
71 # hostfile = /home/me/mtt-runs/mtt-hostfile
72 # This file will be parsed and will automatically set a valid value
73 # for &env_max_np() (it'll count the number of lines in the hostfile,
74 # adding slots/cpu counts if it finds them). The "hostfile" value is
75 # ignored if you are running in a recognized scheduled environment.
76 hostfile =
77 
78 # OMPI Core: if you would rather list the hosts individually on the
79 # mpirun command line, list hosts here delimited by whitespace (if you
80 # have a hostfile listed above, this value will be ignored!). Hosts
81 # can optionally be suffixed with ":num", where "num" is an integer
82 # indicating how many processes may be started on that machine (if not
83 # specified, ":1" is assumed). The sum of all of these values is used
84 # for &env_max_np() at run time. Example (4 uniprocessors):
85 # hostlist = node1 node2 node3 node4
86 # Another example (4 2-way SMPs):
87 # hostlist = node1:2 node2:2 node3:2 node4:2
88 # The "hostlist" value is ignored if you are running in a scheduled
89 # environment or if you have specified a hostfile.
90 hostlist =
91 
92 # OMPI Core: if you are running in a scheduled environment and want to
93 # override the scheduler and set the maximum number of processes
94 # returned by &env_max_procs(), you can fill in an integer here.
95 max_np =
96 
97 # OMPI Core: Output display preference; the default width at which MTT
98 # output will wrap.
99 textwrap = 76
100 
101 # OMPI Core: After the timeout for a command has passed, wait this
102 # many additional seconds to drain all output, and then kill it with
103 # extreme prejiduce.
104 drain_timeout = 5
105 
106 # OMPI Core: Whether this invocation of the client is a test of the
107 # client setup itself. Specifically, this value should be set to true
108 # (1) if you are testing your MTT client and/or INI file and do not
109 # want the results included in normal reporting in the MTT central
110 # results database. Results submitted in "trial" mode are not
111 # viewable (by default) on the central database, and are automatically
112 # deleted from the database after a short time period (e.g., a week).
113 # Setting this value to 1 is exactly equivalent to passing "--trial"
114 # on the MTT client command line. However, any value specified here
115 # in this INI file will override the "--trial" setting on the command
116 # line (i.e., if you set "trial = 0" here in the INI file, that will
117 # override and cancel the effect of "--trial" on the command line).
118 # trial = 0
119 
120 # OMPI Core: Set the scratch parameter here (if you do not want it to
121 # be automatically set to your current working directory). Setting
122 # this parameter accomplishes the same thing that the --scratch option
123 # does.
124 # scratch = &getenv("HOME")/mtt-scratch
125 
126 # OMPI Core: Set local_username here if you would prefer to not have
127 # your local user ID in the MTT database
128 # local_username =
129 
130 # OMPI Core: --force can be set here, instead of at the command line.
131 # Useful for a developer workspace in which it makes no sense to not
132 # use --force
133 # force = 1
134 
135 # OMPI Core: Specify a list of sentinel files that MTT will regularly
136 # check for. If these files exist, MTT will exit more-or-less
137 # immediately (i.e., after the current test completes) and report all
138 # of its results. This is a graceful mechanism to make MTT stop right
139 # where it is but not lose any results.
140 # terminate_files = &getenv("HOME")/mtt-stop,&scratch_root()/mtt-stop
141 
142 # OMPI Core: Specify a default description string that is used in the
143 # absence of description strings in the MPI install, Test build, and
144 # Test run sections. The intent of this field is to record variable
145 # data that is outside the scope, but has effect on the software under
146 # test (e.g., firmware version of a NIC). If no description string is
147 # specified here and no description strings are specified below, the
148 # description data field is left empty when reported.
149 # description = NIC firmware: &system("get_nic_firmware_rev")
150 
151 # OMPI Core: Specify a logfile where you want all MTT output to be
152 # sent in addition to stdout / stderr.
153 # logfile = /tmp/my-logfile.txt
154 
155 # OMPI Core: If you have additional .pm files for your own funclets,
156 # you can have a comma-delimited list of them here. Note that each
157 # .pm file *must* be a package within the MTT::Values::Functions
158 # namespace. For example, having a Cisco.pm file must include the
159 # line:
160 #
161 # package MTT::Values::Functions::Cisco;
162 #
163 # If this file contains a perl function named foo, you can invoke this
164 # functlet as &Cisco::foo(). Note that funclet files are loaded
165 # almost immediately, so you can use them even for other field values
166 # in the MTT section.
167 # funclet_files = /path/to/file1.pm, /path/to/file2.pm
168 
169 # OMPI Core: To ensure that MTT doesn't fill up your disk, you can
170 # tell MTT to stop when disk space gets too low. You can specify a
171 # raw number of bytes or a percentage of overall disk space. For
172 # example (default value is "5%"):
173 #
174 # min_disk_free = 5% # stop when there's less than 5% disk free
175 # min_disk_free = 500000 # stop when there's less than 500,000 bytes free
176 
177 # OMPI Core: When MTT detects a low-disk situation, it can wait a
178 # little while before reporting whatever results it has accumulated
179 # and exiting. The min_disk_free_wait field specifies a number of
180 # minutes to wait for there to be enough disk space to be free. If
181 # there is still not enough disk space at the end of that time, MTT
182 # will report accumulated results and quit.
183 #
184 # min_disk_free_wait = 60
185 
186 #
187 # Submit results on per section basis as alterative to "submit_results_after_n_results"
188 #
189 submit_group_results = 1
190 
191 # code to run on mtt start
192 # on_start=&shell("modprobe ummunot")
193 
194 # code to run on mtt stop
195 # on_stop=&shell("modprobe -r ummunot")
196 
197 #----------------------------------------------------------------------
198 
199 [Lock]
200 # The only module available is the MTTLockServer, and requires running
201 # the mtt-lock-server executable somewhere. You can leave this
202 # section blank and there will be no locking.
203 #module = MTTLockServer
204 #mttlockserver_host = hostname where mtt-lock-server is running
205 #mttlockserver_port = integer port number of the mtt-lock-server
206 
207 #======================================================================
208 # MPI get phase
209 #======================================================================
210 
211 [MPI get: ompi-nightly-master]
212 mpi_details = Open MPI
213 
214 module = OMPI_Snapshot
215 ompi_snapshot_url = https://www.open-mpi.org/nightly/master
216 
217 #----------------------------------------------------------------------
218 
219 # OMPI Core: If you do not want to test nightly v1.8 tarballs, use the
220 # --no-section client command line flag or comment out this section.
221 [MPI get: ompi-nightly-v1.10]
222 mpi_details = Open MPI
223 
224 module = OMPI_Snapshot
225 ompi_snapshot_url = https://www.open-mpi.org/nightly/v1.10
226 
227 #----------------------------------------------------------------------
228 
229 # OMPI Core: If you do not want to test nightly v2.0.x tarballs, use the
230 # --no-section client command line flag or comment out this section.
231 [MPI get: ompi-nightly-v2.0.x]
232 mpi_details = Open MPI
233 module = OMPI_Snapshot
234 ompi_snapshot_url = https://www.open-mpi.org/nightly/v2.x
235 
236 #======================================================================
237 # Install MPI phase
238 #======================================================================
239 
240 [MPI install: gcc warnings]
241 mpi_get = ompi-nightly-master,ompi-nightly-v2.0.x,ompi-nightly-v1.10
242 save_stdout_on_success = 1
243 merge_stdout_stderr = 0
244 
245 module = OMPI
246 ompi_vpath_mode = none
247 # OMPI Core: This is a GNU make option; if you are not using GNU make,
248 # you'll probably want to delete this field (i.e., leave it to its
249 # default [empty] value).
250 ompi_make_all_arguments = -j 32
251 ompi_make_check = 1
252 # OMPI Core: You will likely need to update these values for whatever
253 # compiler you want to use. You can pass any configure flags that you
254 # want, including those that change which compiler to use (e.g., CC=cc
255 # CXX=CC F77=f77 FC=f90). Valid compiler names are: gnu, pgi, intel,
256 # ibm, kai, absoft, pathscale, sun. If you have other compiler names
257 # that you need, let us know. Note that the compiler_name flag is
258 # solely for classifying test results; it does not automatically pass
259 # values to configure to set the compiler.
260 ompi_compiler_name = gnu
261 ompi_compiler_version = &get_gcc_version()
262 ompi_configure_arguments = CFLAGS=-pipe --enable-picky --enable-debug
263 
264 #----------------------------------------------------------------------
265 
266 # Similar to above, but using the intel compilers and using an
267 # environment module.
268 [MPI install: intel warnings]
269 mpi_get = ompi-nightly-master,ompi-nightly-v2.0.x,ompi-nightly-v1.10
270 save_stdout_on_success = 1
271 merge_stdout_stderr = 0
272 # Load the environment module named "intel-compilers/9.0" before
273 # executing this section (and any test phase section that uses this
274 # MPI install section). This line is only suitable for those who use
275 # the environment modules package (http://modules.sourceforge.net/)
276 env_module = intel-compilers/2016-16.0.0.109
277 
278 module = OMPI
279 ompi_vpath_mode = none
280 ompi_make_all_arguments = -j 8
281 ompi_make_check = 1
282 ompi_compiler_name = intel
283 ompi_compiler_version = &get_icc_version()
284 ompi_configure_arguments = CC=icc CXX=icpc F77=ifort FC=ifort CFLAGS=-g --enable-picky --enable-debug
285 
286 # Sun MPI install section illustrating the use of
287 # $var style substitution and &perl()
288 [MPI install: sun-autotools]
289 
290 configure_arguments = \
291  $prepend_configure_arguments \
292  $compiler_names \
293  $compiler_flags \
294  $append_configure_arguments \
295  $with_mx_lib_argument \
296  $with_tm_argument
297 
298 # ompi-nightly-*
299 mpi_get =
300 
301 # 32|64
302 bitness =
303 
304 # --whatever, ...
305 prepend_configure_arguments =
306 
307 # --whatever, ...
308 append_configure_arguments =
309 
310 # Files and directories
311 arch = &shell("uname -p")
312 home = &getenv("HOME")
313 
314 mtt_utils_dir = $home/mtt-utils
315 ompi_build_dir = $home/ompi-tools/share/ompi-build
316 compiler_names = CC=cc CXX=CC FC=f90 F77=f77
317 compiler_flags_file = $ompi_build_dir/comp-flags.sos.$arch.$bitness.opt
318 compiler_flags = &shell("cat $compiler_flags_file")
319 
320 # Only use this option if the MX directory exists
321 mx_lib = /opt/mx/lib
322 with_mx_lib_argument = <<EOT
323 &perl("
324  if (-d '$mx_lib') {
325  return '--with-mx-lib=$mx_lib';
326  } else {
327  return '';
328  }
329 ")
330 EOT
331 
332 # Only use this option if the OpenPBS directory exists
333 tm = /hpc/rte/OpenPBS-$arch
334 with_tm_argument = <<EOT
335 &perl("
336  if (-d '$tm') {
337  return '--with-tm=$tm';
338  } else {
339  return '';
340  }
341 ")
342 EOT
343 
344 # Other settings
345 save_stdout_on_success = 1
346 merge_stdout_stderr = 1
347 vpath_mode = none
348 make_all_arguments = -j 4
349 make_check = 0
350 compiler_name = sun
351 compiler_version = &get_sun_cc_version()
352 module = OMPI
353 
354 
355 # Other compiler version funclets that are available:
356 # &get_pgcc_version : PGI compiler suite
357 # &get_pathcc_version : Pathscale compiler suite
358 # &get_sun_version : Sun compiler suite
359 
360 #======================================================================
361 # MPI run details
362 #======================================================================
363 
364 [MPI Details: Open MPI]
365 
366 # MPI tests
367 exec = mpirun @hosts@ -np &test_np() @mca@ --prefix &test_prefix() &test_executable() &test_argv()
368 
369 hosts = &if(&have_hostfile(), "--hostfile " . &hostfile(), \
370  &if(&have_hostlist(), "--host " . &hostlist(), ""))
371 
372 # Example showing conditional substitution based on the MPI get
373 # section name (e.g., different versions of OMPI have different
374 # capabilities / bugs).
375 mca = &enumerate( \
376  "--mca btl vader,tcp,self @mca_params@", \
377  "--mca btl tcp,self @mca_params@")
378 
379 # MCA params that are suitable for your environment
380 mca_params = --mca btl_tcp_if_include eth0 --mca oob_tcp_if_include eth0
381 
382 # A helper script is installed by the "OMPI" MPI Install module named
383 # "mtt_ompi_cleanup.pl". This script is orterun-able and will kill
384 # all rogue orteds on a node and whack any session directories.
385 # Invoke via orterun just to emphasize that it is not an MPI
386 # application. The helper script is installed in OMPI's bin dir, so
387 # it'll automatically be found in the path (because OMPI's bin dir is
388 # in the path).
389 
390 after_each_exec = <<EOT
391 # We can exit if the test passed or was skipped (i.e., there's no need
392 # to cleanup).
393 if test "$MTT_TEST_RUN_RESULT" = "passed" -o "$MTT_TEST_RUN_RESULT" = "skipped"; then
394  exit 0
395 fi
396 
397 if test "$MTT_TEST_HOSTFILE" != ""; then
398  args="--hostfile $MTT_TEST_HOSTFILE"
399 elif test "$MTT_TEST_HOSTLIST" != ""; then
400  args="--host $MTT_TEST_HOSTLIST"
401 fi
402 orterun $args -np $MTT_TEST_NP --prefix $MTT_TEST_PREFIX mtt_ompi_cleanup.pl
403 EOT
404 
405 #======================================================================
406 # Test get phase
407 #======================================================================
408 
409 [Test get: trivial]
410 module = Trivial
411 
412 #----------------------------------------------------------------------
413 
414 [Test get: ibm]
415 module = SCM
416 scm_module = Git
417 # Per note at the beginning of this file, the "ompi-tests" repo is
418 # private. One method of specifying a username+password to Git HTTPS
419 # checkouts is to replace "username:password: with a valid Github
420 # username and password that has access to this repository.
421 scm_url = https://username:password@github.com/open-mpi/ompi-tests.git
422 scm_subdir = ibm
423 scm_post_copy = <<EOT
424 ./autogen.sh
425 EOT
426 
427 #----------------------------------------------------------------------
428 
429 [Test get: intel]
430 module = SCM
431 scm_module = Git
432 scm_url = https://username:password@github.com/open-mpi/ompi-tests.git
433 scm_subdir = intel_tests
434 
435 #----------------------------------------------------------------------
436 
437 [Test get: onesided]
438 module = SCM
439 scm_module = Git
440 scm_url = https://username:password@github.com/open-mpi/ompi-tests.git
441 scm_subdir = onesided
442 scm_post_copy = <<EOT
443 ./autogen.sh
444 EOT
445 
446 #----------------------------------------------------------------------
447 
448 [Test get: mpicxx]
449 module = SCM
450 scm_module = Git
451 scm_url = https://username:password@github.com/open-mpi/ompi-tests.git
452 scm_subdir = mpicxx
453 scm_post_copy = <<EOT
454 ./autogen.sh
455 EOT
456 
457 #----------------------------------------------------------------------
458 
459 [Test get: imb]
460 module = SCM
461 scm_module = Git
462 scm_url = https://username:password@github.com/open-mpi/ompi-tests.git
463 scm_subdir = imb
464 
465 #----------------------------------------------------------------------
466 
467 [Test get: netpipe]
468 module = SCM
469 scm_module = Git
470 scm_url = https://username:password@github.com/open-mpi/ompi-tests.git
471 scm_subdir = NetPIPE-3.7.1
472 
473 #======================================================================
474 # Test build phase
475 #======================================================================
476 
477 [Test build: trivial]
478 test_get = trivial
479 save_stdout_on_success = 1
480 merge_stdout_stderr = 1
481 
482 module = Trivial
483 
484 #----------------------------------------------------------------------
485 
486 [Test build: ibm]
487 test_get = ibm
488 save_stdout_on_success = 1
489 merge_stdout_stderr = 1
490 stderr_save_lines = 100
491 
492 module = Shell
493 shell_build_command = <<EOT
494 ./configure --enable-static --disable-shared
495 make
496 EOT
497 
498 #----------------------------------------------------------------------
499 
500 [Test build: intel]
501 test_get = intel
502 save_stdout_on_success = 1
503 merge_stdout_stderr = 1
504 stderr_save_lines = 100
505 
506 module = Intel_OMPI_Tests
507 intel_ompi_tests_make_arguments = -j 32
508 intel_ompi_tests_buildfile = all_tests_no_perf
509 
510 #----------------------------------------------------------------------
511 
512 [Test build: onesided]
513 test_get = onesided
514 save_stdout_on_success = 1
515 merge_stdout_stderr = 1
516 stderr_save_lines = 100
517 
518 module = Shell
519 shell_build_command = <<EOT
520 ./configure
521 make
522 EOT
523 
524 #----------------------------------------------------------------------
525 
526 [Test build: mpicxx]
527 test_get = mpicxx
528 save_stdout_on_success = 1
529 merge_stdout_stderr = 1
530 
531 module = Shell
532 shell_build_command = <<EOT
533 ./configure CC=mpicc CXX=mpic++
534 make
535 EOT
536 
537 #----------------------------------------------------------------------
538 
539 [Test build: imb]
540 test_get = imb
541 save_stdout_on_success = 1
542 merge_stdout_stderr = 1
543 stderr_save_lines = 100
544 
545 module = Shell
546 shell_build_command = <<EOT
547 cd src
548 make clean IMB-MPI1 IMB-EXT
549 EOT
550 
551 #----------------------------------------------------------------------
552 
553 [Test build: netpipe]
554 test_get = netpipe
555 save_stdout_on_success = 1
556 merge_stdout_stderr = 1
557 stderr_save_lines = 100
558 
559 module = Shell
560 shell_build_command = <<EOT
561 make mpi
562 EOT
563 
564 #======================================================================
565 # Test Run phase
566 #======================================================================
567 
568 # This section is not used directly; it is included in others.
569 [Defaults Test run]
570 pass = &and(&test_wifexited(), &eq(&test_wexitstatus(), 0))
571 skipped = &and(&test_wifexited(), &eq(&test_wexitstatus(), 77))
572 
573 save_stdout_on_pass = 1
574 merge_stdout_stderr = 1
575 stdout_save_lines = 100
576 stderr_save_lines = 100
577 report_after_n_results = 100
578 
579 np = &env_max_procs()
580 
581 #----------------------------------------------------------------------
582 
583 [Test run: trivial]
584 include_section = Defaults Test run
585 
586 test_build = trivial
587 timeout = &max(10, &test_np())
588 skipped = 0
589 
590 specify_module = Simple
591 simple_first:tests = &find_executables(".")
592 
593 #----------------------------------------------------------------------
594 
595 [Test run: ibm]
596 include_section = Defaults Test run
597 
598 test_build = ibm
599 timeout = &max(30, &multiply(10, &test_np()))
600 
601 specify_module = Simple
602 simple_first:np = &env_max_procs()
603 simple_first:tests = &find_executables("collective", "communicator", \
604  "datatype", "dynamic", "environment", \
605  "group", "info", "io", "onesided", \
606  "pt2pt", "random", "topology")
607 
608 # Tests that are supposed to fail
609 simple_fail:tests = environment/abort environment/final
610 simple_fail:pass = &and(&test_wifexited(), &ne(&test_wexitstatus(), 0))
611 simple_fail:exclusive = 1
612 simple_fail:timeout = &env_max_procs()
613 
614 # Spawn tests; need to run very few
615 simple_spawns:tests = dynamic/spawn dynamic/spawn_multiple
616 simple_spawns:np = 3
617 simple_spawns:pass = &and(&test_wifexited(), &eq(&test_wexitstatus(),0))
618 simple_spawns:exclusive = 1
619 simple_spawns:timeout = &multiply(5,&env_max_procs())
620 
621 # Big loop o' spawns
622 simple_loop_spawn:tests = dynamic/loop_spawn
623 simple_loop_spawn:np = 1
624 simple_loop_spawn:pass = &and(&test_wifexited(), &eq(&test_wexitstatus(),0))
625 simple_loop_spawn:exclusive = 1
626 simple_loop_spawn:timeout = 600
627 
628 # Big loop o' comm splits and whatnot. It runs for 10 minutes.
629 simple_loop_comm_split:tests = communicator/comm_split_f
630 simple_loop_comm_split:np = 1
631 simple_loop_comm_split:pass = &and(&test_wifexited(), &eq(&test_wexitstatus(),0))
632 simple_loop_comm_split:exclusive = 1
633 simple_loop_comm_split:timeout = 660
634 
635 # THREAD_MULTIPLE test will fail with the openib btl because it
636 # deactivates itself in the presence of THREAD_MULTIPLE. So just skip
637 # it. loop_child is the target for loop_spawn, so we don't need to
638 # run it (although it'll safely pass if you run it by itself).
639 simple_skip:tests = environment/init_thread_multiple dynamic/loop_child
640 simple_skip:exclusive = 1
641 simple_skip:do_not_run = 1
642 
643 #----------------------------------------------------------------------
644 
645 [Test run: intel]
646 include_section = Defaults Test run
647 
648 test_build = intel
649 timeout = &max(30, &multiply(20, &test_np()))
650 np = &min("60", &env_max_procs())
651 
652 specify_module = Simple
653 simple_successful:tests = &find_executables("src")
654 
655 simple_failures:tests = &find_executables(&prepend("src/", &cat("supposed_to_fail")))
656 simple_failures:pass = &and(&test_wifexited(), &ne(&test_wexitstatus(), 0))
657 simple_failures:exclusive = 1
658 simple_failures:timeout = &env_max_procs()
659 
660 # These tests sleep for 90 seconds (!) before attempting to process
661 # any messages
662 simple_really_slow:tests = src/MPI_Isend_flood_c src/MPI_Send_flood_c
663 simple_really_slow:pass = &and(&test_wifexited(), &eq(&test_wexitstatus(), 0))
664 simple_really_slow:exclusive = 1
665 simple_really_slow:timeout = &sum(180, &multiply(5, &test_np()))
666 
667 # This test collectively sleeps for 26 seconds *per MCW rank*
668 simple_coll_slow:tests = src/MPI_collective_overlap_c
669 simple_coll_slow:pass = &and(&test_wifexited(), &eq(&test_wexitstatus(), 0))
670 simple_coll_slow:exclusive = 1
671 simple_coll_slow:timeout = &multiply(35, &test_np()))
672 
673 #----------------------------------------------------------------------
674 
675 [Test run: onesided]
676 include_section = Defaults Test run
677 
678 test_build = onesided
679 timeout = &max(30, &multiply(10, &test_np()))
680 
681 np = &if(&gt(&env_max_procs(), 0), &step(2, &max(2, &env_max_procs()), 2), 2)
682 
683 simple_pass:tests = &cat("run_list")
684 
685 #----------------------------------------------------------------------
686 
687 [Test run: mpicxx]
688 include_section = Defaults Test run
689 
690 test_build = mpicxx
691 timeout = &max(30, &multiply(10, &test_np()))
692 
693 specify_module = Simple
694 simple_pass:tests = src/mpi2c++_test src/mpi2c++_dynamics_test
695 
696 #----------------------------------------------------------------------
697 
698 [Test run: imb-general]
699 include_section = Defaults Test run
700 
701 test_build = imb-general
702 timeout = &max(2800, &multiply(50, &test_np()))
703 np = &min("32", &env_max_procs())
704 
705 argv = -npmin &test_np()
706 
707 specify_module = Simple
708 simple_pass:tests = src/IMB-MPI1 src/IMB-EXT
709 
710 #----------------------------------------------------------------------
711 
712 [Test run: imb-check]
713 include_section = Defaults Test run
714 
715 test_build = imb-check
716 timeout = &max(2800, &multiply(50, &test_np()))
717 np = &min("32", &env_max_procs())
718 
719 argv = -npmin &test_np()
720 
721 specify_module = Simple
722 simple_pass:tests = src/IMB-MPI1 src/IMB-EXT
723 
724 #----------------------------------------------------------------------
725 
726 [Test run: imb performance]
727 include_section = Defaults Test run
728 test_build = imb
729 
730 pass = &eq(&cmd_wexitstatus(), 0)
731 timeout = -1
732 # Ensure to leave this value as "-1", or performance results could be lost!
733 stdout_save_lines = -1
734 merge_stdout_stderr = 1
735 
736 argv = -npmin &test_np() &enumerate("PingPong", "PingPing", "Sendrecv", "Exchange", "Allreduce", "Reduce", "Reduce_scatter", "Allgather", "Allgatherv", "Alltoall", "Bcast", "Barrier")
737 
738 analyze_module = IMB
739 simple_pass:tests = src/IMB-MPI1
740 
741 #----------------------------------------------------------------------
742 
743 [Test run: netpipe-performance]
744 include_section = Defaults Test run
745 
746 test_build = netpipe
747 skipped = 0
748 timeout = &multiply(&test_np(), 120)
749 # Ensure to leave this value as "-1", or performance results could be lost!
750 stdout_save_lines = -1
751 np = 2
752 
753 specify_module = Simple
754 analyze_module = NetPipe
755 simple_pass:tests = NPmpi
756 
757 #----------------------------------------------------------------------
758 
759 [Test run: nbcbench]
760 include_section = Defaults Test run
761 test_build = nbcbench
762 
763 pass = &and(&test_wifexited(), &eq(&test_wexitstatus(), 0))
764 timeout = -1
765 # Ensure to leave this value as "-1", or performance results could be lost!
766 stdout_save_lines = -1
767 merge_stdout_stderr = 1
768 
769 analyze_module = NBCBench
770 simple_pass:tests = nbcbench
771 
772 argv = -p &test_np()-&test_np() -s 1-1048576 -v -t \
773  &enumerate("MPI_Allgatherv", "MPI_Allgather", "MPI_Allreduce", \
774  "MPI_Alltoall", "MPI_Alltoallv", "MPI_Barrier", "MPI_Bcast", \
775  "MPI_Gather", "MPI_Gatherv", "MPI_Reduce", "MPI_Reduce_scatter", \
776  "MPI_Scan", "MPI_Scatter", "MPI_Scatterv")
777 
778 #======================================================================
779 # Reporter phase
780 #======================================================================
781 
782 [Reporter: IU database]
783 module = MTTDatabase
784 
785 mttdatabase_realm = OMPI
786 mttdatabase_url = https://mtt.open-mpi.org/submit/
787 # OMPI Core: Change this to be the username and password for your
788 # submit user. Get this from the OMPI MTT administrator.
789 mttdatabase_username = >>> you must set this value <<<
790 mttdatabase_password = >>> you must set this value <<<
791 # OMPI Core: Change this to be some short string identifying your
792 # cluster.
793 mttdatabase_platform = >>> you must set this value <<<
794 
795 #----------------------------------------------------------------------
796 # The reporter below is in development - submissions are marked as 'trial'
797 # [Reporter: IU REST Storage]
798 # module = MTTStorage
799 
800 # mttstorage_realm = OMPI
801 # mttstorage_url = https://mtt.open-mpi.org/submit/api
802 # # OMPI Core: Change this to be the username and password for your
803 # # submit user. Get this from the OMPI MTT administrator.
804 # mttstorage_username = >>> you must set this value <<<
805 # mttstorage_password = >>> you must set this value <<<
806 # # OMPI Core: Change this to be some short string identifying your
807 # # cluster.
808 # mttstorage_platform = >>> you must set this value <<<
809 
810 #----------------------------------------------------------------------
811 
812 # This is a backup for while debugging MTT; it also writes results to
813 # a local text file
814 
815 [Reporter: text file backup]
816 module = TextFile
817 
818 textfile_filename = $phase-$section-$mpi_name-$mpi_version.txt
819 
820 textfile_summary_header = <<EOT
821 hostname: &shell("hostname")
822 uname: &shell("uname -a")
823 who am i: &shell("who am i")
824 EOT
825 
826 textfile_summary_footer =
827 textfile_detail_header =
828 textfile_detail_footer =
829 
830 textfile_textwrap = 78
831 
832 # Send digested summary of mtt execution by email
833 #email_to =
834 #email_subject = MTT test has completed, status: $overall_mtt_status
835 #email_footer = <<EOT
836 #Test Scratch Directory is &scratch_root()
837 #EOT
838 
839 #----------------------------------------------------------------------
840 
841 [Reporter: send email]
842 module = Email
843 email_to = fill this in
844 email_subject = MPI test results: &current_section()