pymtt
 All Classes Namespaces Files Functions Variables Groups
ompi-core-perf-testing.ini
Go to the documentation of this file.
1 #
2 # Copyright (c) 2006-2009 Cisco Systems, Inc. All rights reserved.
3 #
4 # Note that there are many items in this file that, while they are
5 # good for examples, may not work for random MTT users. For example,
6 # the "ompi-tests" SVN repository that is used in the examples below
7 # is *not* a public repository (there's nothing really secret in this
8 # repository; it contains many publicly-available MPI tests and
9 # benchmarks, but we have never looked into the redistribution rights
10 # of these codes, so we keep the SVN repository "closed" to the
11 # general public and only use it internally in the Open MPI project).
12 
13 #======================================================================
14 # Generic OMPI core performance testing template configuration
15 #======================================================================
16 
17 [MTT]
18 # Leave this string so that we can identify this data subset in the
19 # database
20 # OMPI Core: Use a "test" label until we're ready to run real results
21 description = [testbake]
22 #description = [2007 collective performance bakeoff]
23 # OMPI Core: Use the "trial" flag until we're ready to run real results
24 trial = 1
25 
26 # Put other values here as relevant to your environment.
27 
28 #----------------------------------------------------------------------
29 
30 [Lock]
31 # Put values here as relevant to your environment.
32 
33 #======================================================================
34 # MPI get phase
35 #======================================================================
36 
37 [MPI get: ompi-nightly-trunk]
38 mpi_details = OMPI
39 
40 module = OMPI_Snapshot
41 ompi_snapshot_url = http://www.open-mpi.org/nightly/trunk
42 
43 #----------------------------------------------------------------------
44 
45 [MPI get: ompi-nightly-v1.2]
46 mpi_details = OMPI
47 
48 module = OMPI_Snapshot
49 ompi_snapshot_url = http://www.open-mpi.org/nightly/v1.2
50 
51 #----------------------------------------------------------------------
52 
53 [MPI get: ompi-released-v1.2]
54 mpi_details = OMPI
55 
56 module = OMPI_Snapshot
57 ompi_snapshot_url = http://www.open-mpi.org/software/ompi/v1.2/downloads
58 
59 #----------------------------------------------------------------------
60 
61 [MPI get: MPICH1]
62 mpi_details = MPICH1
63 
64 module = Download
65 # MPICH1 from the Argonne web site
66 #download_url = http://www-unix.mcs.anl.gov/mpi/mpich1/downloads/mpich.tar.gz
67 # If you are using SLURM, use this URL -- it's the same exact
68 # mpich.tar.gz but with the SLURM 1.2.12/etc/mpich1.slurm.patch in it
69 # (which allows native launching under SLURM).
70 download_url = http://www.open-mpi.org/~jsquyres/ompi-coll-bakeoff/mpich-1.2.7p1-patched-for-slurm.tar.gz
71 # This version is fixed/frozen, so it's ok to hard-code it
72 download_version = 1.2.7p1
73 
74 #----------------------------------------------------------------------
75 
76 [MPI get: MPICH-MX]
77 mpi_details = MPICH-MX
78 
79 module = Download
80 download_url = http://www.myri.com/ftp/pub/MPICH-MX/mpich-mx_1.2.7..5.tar.gz
81 # You need to obtain the username and password from Myricom
82 download_username = <OBTAIN THIS FROM MYRICOM>
83 download_password = <OBTAIN THIS FROM MYRICOM>
84 
85 #----------------------------------------------------------------------
86 
87 [MPI get: MPICH2]
88 mpi_details = MPICH2
89 
90 module = Download
91 download_url = http://www-unix.mcs.anl.gov/mpi/mpich2/downloads/mpich2-1.0.5p4.tar.gz
92 
93 #----------------------------------------------------------------------
94 
95 [MPI get: MVAPICH1]
96 mpi_details = MVAPICH1
97 
98 module = Download
99 download_url = http://mvapich.cse.ohio-state.edu/download/mvapich/mvapich-0.9.9.tar.gz
100 
101 #----------------------------------------------------------------------
102 
103 [MPI get: MVAPICH2]
104 mpi_details = MVAPICH2
105 
106 module = Download
107 download_url = http://mvapich.cse.ohio-state.edu/download/mvapich2/mvapich2-0.9.8p3.tar.gz
108 
109 #----------------------------------------------------------------------
110 
111 [MPI get: HP MPI]
112 mpi_details = HP MPI
113 
114 # You need to have HP MPI already installed somewhere
115 module = AlreadyInstalled
116 # Fill this in with the version of your HP MPI
117 alreadyinstalled_version = 2.2.5.1b1
118 
119 #----------------------------------------------------------------------
120 
121 [MPI get: Intel MPI]
122 mpi_details = Intel MPI
123 
124 # You need to have Intel MPI already installed somewhere
125 module = AlreadyInstalled
126 # Fill this in with the version of your Intel MPI
127 alreadyinstalled_version = 3.0
128 
129 #----------------------------------------------------------------------
130 
131 [SKIP MPI get: Scali MPI]
132 mpi_details = Scali MPI
133 
134 # You need to have Scali MPI already installed somewhere
135 module = AlreadyInstalled
136 # Fill this in with the version of your Scali MPI
137 alreadyinstalled_version = ???
138 
139 #----------------------------------------------------------------------
140 
141 [SKIP MPI get: Cray MPI]
142 mpi_details = Cray MPI
143 
144 # You need to have Cray MPI already installed somewhere
145 module = AlreadyInstalled
146 # Fill this in with the version of your Cray MPI
147 alreadyinstalled_version = ???
148 
149 #======================================================================
150 # Install MPI phase
151 #======================================================================
152 
153 # All flavors of Open MPI
154 [MPI install: OMPI/GNU-standard]
155 mpi_get = ompi-nightly-trunk, ompi-nightly-v1.2, ompi-released-v1.2
156 save_stdout_on_success = 1
157 merge_stdout_stderr = 0
158 
159 module = OMPI
160 ompi_make_all_arguments = -j 8
161 ompi_compiler_name = gnu
162 ompi_compiler_version = &get_gcc_version()
163 # Adjust these configure flags for your site
164 ompi_configure_arguments = CFLAGS=-O3 --with-openib --enable-mpirun-prefix-by-default --enable-branch-probabilities --disable-heterogeneous --without-mpi-param-check
165 
166 #----------------------------------------------------------------------
167 
168 [MPI install: MPICH1]
169 mpi_get = mpich1
170 save_stdout_on_success = 1
171 merge_stdout_stderr = 0
172 # Ensure that MPICH allocates enough shared memory (32MB seems to be
173 # enough for ppn=4; went to 64MB to give it plenty of room)
174 setenv = P4_GLOBMEMSIZE 67108864
175 
176 module = MPICH2
177 mpich2_use_all_target = 0
178 mpich2_apply_slurm_patch = 1
179 mpich2_compiler_name = gnu
180 mpich2_compiler_version = &get_gcc_version()
181 mpich2_configure_arguments = -cflags=-O3 -rsh=ssh --with-device=ch_p4 --with-comm=shared
182 
183 #----------------------------------------------------------------------
184 
185 [MPI install: MPICH2]
186 mpi_get = mpich2
187 save_stdout_on_success = 1
188 merge_stdout_stderr = 0
189 # Adjust this for your site (this is what works at Cisco). Needed to
190 # launch in SLURM; adding this to LD_LIBRARY_PATH here propagates this
191 # all the way through the test run phases that use this MPI install,
192 # where the test executables will need to have this set.
193 prepend_path = LD_LIBRARY_PATH /opt/slurm/current/lib
194 
195 module = MPICH2
196 mpich2_compiler_name = gnu
197 mpich2_compiler_version = &get_gcc_version()
198 mpich2_configure_arguments = --disable-f90 CFLAGS=-O3 --enable-fast --with-device=ch3:nemesis
199 # These are needed to launch through SLURM; adjust as appropriate.
200 mpich2_additional_wrapper_ldflags = -L/opt/slurm/current/lib
201 mpich2_additional_wrapper_libs = -lpmi
202 
203 #----------------------------------------------------------------------
204 
205 [MPI install: MVAPICH1]
206 mpi_get = mvapich1
207 save_stdout_on_success = 1
208 merge_stdout_stderr = 0
209 # Setting this makes MVAPICH assume that the same HCAs are on all
210 # hosts and therefore it makes some optimizations
211 setenv = VIADEV_USE_COMPAT_MODE 0
212 
213 module = MVAPICH2
214 # Adjust this to be where your OFED is installed
215 mvapich2_setenv = IBHOME /usr
216 # Leave this set (COMPAT); if you don't, their script asks questions,
217 # causing MTT to hang
218 mvapich2_setenv = COMPAT AUTO_DETECT
219 mvapich2_build_script = make.mvapich.gen2
220 mvapich2_compiler_name = gnu
221 mvapich2_compiler_version = &get_gcc_version()
222 
223 #----------------------------------------------------------------------
224 
225 [MPI install: MVAPICH2]
226 mpi_get = mvapich2
227 save_stdout_on_success = 1
228 merge_stdout_stderr = 0
229 # Adjust this for your site (this is what works at Cisco). Needed to
230 # launch in SLURM; adding this to LD_LIBRARY_PATH here propagates this
231 # all the way through the test run phases that use this MPI install,
232 # where the test executables will need to have this set.
233 prepend_path = LD_LIBRARY_PATH /opt/slurm/current/lib
234 
235 module = MVAPICH2
236 # Adjust this to be where your OFED is installed
237 mvapich2_setenv = OPEN_IB_HOME /usr
238 mvapich2_build_script = make.mvapich2.ofa
239 mvapich2_compiler_name = gnu
240 mvapich2_compiler_version = &get_gcc_version()
241 # These are needed to launch through SLURM; adjust as appropriate.
242 mvapich2_additional_wrapper_ldflags = -L/opt/slurm/current/lib
243 mvapich2_additional_wrapper_libs = -lpmi
244 
245 #----------------------------------------------------------------------
246 
247 [MPI install: Intel MPI]
248 mpi_get = Intel MPI
249 # Adjust this if you need to. It guarantees that multiple MPD's
250 # running on the same host will not collide. If you'll ever have
251 # multi-job-on-the-same-host conflicts, you may want to adjust this to
252 # reflect some unique identifier (e.g., a resource manager ID).
253 setenv = MPD_CON_EXT mtt-unique-mpd.&getenv("SLURM_JOBID")
254 
255 module = Analyze::IntelMPI
256 
257 #----------------------------------------------------------------------
258 
259 [MPI install: HP MPI]
260 mpi_get = HP MPI
261 
262 module = Analyze::HPMPI
263 
264 #======================================================================
265 # MPI run details
266 #======================================================================
267 
268 [MPI Details: OMPI]
269 # Check &test_alloc() for byslot or bynode
270 exec = mpirun @alloc@ -np &test_np() @mca@ &test_executable() &test_argv()
271 parameters = &MPI::OMPI::find_mpirun_params(&test_command_line(), \
272  &test_executable())
273 network = &MPI::OMPI::find_network(&test_command_line(), &test_executable())
274 
275 alloc = &if(&eq(&test_alloc(), "node"), "--bynode", "--byslot")
276 mca = &enumerate( \
277  "--mca btl sm,tcp,self " . @common_params@, \
278  "--mca btl tcp,self " . @common_params@, \
279  "--mca btl sm,openib,self " . @common_params@, \
280  "--mca btl sm,openib,self --mca mpi_leave_pinned 1 " . @common_params@, \
281  "--mca btl openib,self " . @common_params@, \
282  "--mca btl openib,self --mca mpi_leave_pinned 1 " . @common_params@, \
283  "--mca btl openib,self --mca mpi_leave_pinned_pipeline 1 " . @common_params@, \
284  "--mca btl openib,self --mca btl_openib_use_srq 1 " . @common_params@)
285 
286 # v1.2 has a problem with striping across heterogeneous ports right now:
287 # https://svn.open-mpi.org/trac/ompi/ticket/1125. Also keep the coll
288 # bakeoff tests on DDR only.
289 common_params = "--mca btl_tcp_if_include ib0 --mca oob_tcp_if_include ib0 --mca btl_openib_if_include mthca0 --mca mpi_paffinity_alone 1" . \
290  &if(&or(&eq(&mpi_get_name(), "ompi-nightly-v1.2"), \
291  &eq(&mpi_get_name(), "ompi-released-v1.2")), \
292  "--mca btl_openib_max_btls 1", "")
293 
294 # It is important that the after_each_exec step is a single
295 # command/line so that MTT will launch it directly (instead of via a
296 # temporary script). This is because the "srun" command is
297 # (intentionally) difficult to kill in some cases. See
298 # https://svn.open-mpi.org/trac/mtt/changeset/657 for details.
299 
300 after_each_exec = &if(&ne("", &getenv("SLURM_NNODES")), "srun -N " . &getenv("SLURM_NNODES")) /home/mpiteam/svn/ompi-tests/cisco/mtt/after_each_exec.pl
301 
302 #----------------------------------------------------------------------
303 
304 [MPI Details: MPICH1]
305 
306 # Launching through SLURM. From
307 # http://www.llnl.gov/linux/slurm/quickstart.html.
308 exec = srun @alloc@ -n &test_np() --mpi=mpich1_p4 &test_executable() &test_argv()
309 
310 # If not using SLURM, you'll need something like this (not tested).
311 # You may need different hostfiles for running by slot/by node.
312 #exec = mpirun -np &test_np() -machinefile &hostfile() &test_executable()
313 
314 network = loopback,ethernet
315 
316 alloc = &if(&eq(&test_alloc(), "node"), "-m cyclic", "-m block")
317 
318 #----------------------------------------------------------------------
319 
320 [MPI Details: MPICH2]
321 
322 # Launching through SLURM. If you use mpdboot instead, you need to
323 # ensure that multiple mpd's on the same node don't conflict (or never
324 # happen).
325 exec = srun @alloc@ -n &test_np() &test_executable() &test_argv()
326 
327 # If not using SLURM, you'll need something like this (not tested).
328 # You may need different hostfiles for running by slot/by node.
329 #exec = mpiexec -np &test_np() --host &hostlist() &test_executable()
330 
331 network = loopback,ethernet,shmem
332 
333 alloc = &if(&eq(&test_alloc(), "node"), "-m cyclic", "-m block")
334 
335 #----------------------------------------------------------------------
336 
337 [MPI Details: MVAPICH1]
338 
339 # Launching through SLURM. From
340 # http://www.llnl.gov/linux/slurm/quickstart.html.
341 exec = srun @alloc@ -n &test_np() --mpi=mvapich &test_executable() &test_argv()
342 
343 # If not using SLURM, you'll need something like this (not tested).
344 # You may need different hostfiles for running by slot/by node.
345 #exec = mpirun -np &test_np() --machinefile &hostfile() &test_executable()
346 
347 network = loopback,verbs,shmem
348 
349 alloc = &if(&eq(&test_alloc(), "node"), "-m cyclic", "-m block")
350 
351 #----------------------------------------------------------------------
352 
353 [MPI Details: MVAPICH2]
354 
355 # Launching through SLURM. If you use mpdboot instead, you need to
356 # ensure that multiple mpd's on the same node don't conflict (or never
357 # happen).
358 exec = srun @alloc@ -n &test_np() &test_executable() &test_argv()
359 
360 # If not using SLURM, you'll need something like this (not tested).
361 # You may need different hostfiles for running by slot/by node.
362 #exec = mpiexec -np &test_np() --host &hostlist() &test_executable()
363 
364 network = loopback,verbs,shmem
365 
366 alloc = &if(&eq(&test_alloc(), "node"), "-m cyclic", "-m block")
367 
368 #----------------------------------------------------------------------
369 
370 [MPI Details: MPICH-MX]
371 
372 # Launching through SLURM. From
373 # http://www.llnl.gov/linux/slurm/quickstart.html.
374 exec = srun @alloc@ -n &test_np() --mpi=mpichgm &test_executable() &test_argv()
375 network = mx
376 
377 # If not using SLURM, you'll need something like this (not tested).
378 # You may need different hostfiles for running by slot/by node.
379 #exec = mpirun -np &test_np() --machinefile &hostfile() &test_executable()
380 
381 alloc = &if(&eq(&test_alloc(), "node"), "-m cyclic", "-m block")
382 
383 #----------------------------------------------------------------------
384 
385 [MPI Details: Intel MPI]
386 
387 # Need a before_any_exec step to launch MPDs
388 before_any_exec = <<EOF
389 h=`hostname`
390 file=mtt-hostlist.$$
391 rm -f $file
392 # If we're allocating by node, get each hostname once. Otherwise, get
393 # each hostname as many times as we have slots on that node.
394 srun hostname | uniq > $file
395 # Add localhost if it's not in there (e.g., srun -A)
396 local=`grep $h $file`
397 touch /tmp/mtt-mpiexec-options.$SLURM_JOBID
398 if test "$local" = ""; then
399  echo $h >> $file
400  echo -nolocal > /tmp/mpiexec-options.$SLURM_JOBID
401 fi
402 num=`wc -l $file | awk '{ print $1 }'`
403 mpdboot -n $num -r ssh --verbose --file=$file
404 mpdtrace
405 rm -f $file
406 EOF
407 
408 # Intel MPI seems to default to by-node allocation and I can't figure
409 # out how to override it. Sigh.
410 exec = mpiexec @options@ -n &test_np() ./&test_executable() &test_argv()
411 network = loopback,verbs,shmem
412 
413 # Test both the "normal" collective algorithms and Intel's "fast"
414 # collective algorithms (their docs state that the "fast" algorithms
415 # may not be MPI conformant, and may not give the same results between
416 # multiple runs, assumedly if the process layout is different).
417 options = &stringify(&cat("/tmp/mpiexec-options." . &getenv("SLURM_JOBID"))) \
418  &enumerate("-genv I_MPI_DEVICE rdssm", \
419  "-genv I_MPI_DEVICE rdssm -genv I_MPI_FAST_COLLECTIVES 1")
420 
421 after_all_exec = <<EOT
422 rm -f /tmp/mpiexec-options.$SLURM_JOBID
423 mpdallexit
424 EOT
425 
426 #----------------------------------------------------------------------
427 
428 [MPI Details: HP MPI]
429 
430 # I use MPI_IBV_NO_FORK_SAFE=1 because I'm using RHEL4U4, which
431 # doesn't have IBV fork() support. I also have multiple active HCA
432 # ports and therefore need to give HP MPI a clue on the scheduling of
433 # ports via MPI_IB_CARD_ORDER. I got this information via e-mailing
434 # HP MPI support.
435 #
436 # In SLURM, HP MPI seems to schedule first by node and then by slot.
437 # So if you have 2 quad-core nodes in your SLURM alloc, if you mpirun
438 # -np 2, you'll get one proc on each node. If you mpirun -np 4,
439 # you'll get MCW ranks 0 and 1 on the first node and MCA ranks 2 and 3
440 # on the second node. This is pretty much exactly what we want, so we
441 # don't need to check &test_alloc() here.
442 exec = mpirun -IBV -e MPI_IBV_NO_FORK_SAFE=1 -e MPI_IB_CARD_ORDER=0:0 -srun -n&test_np() ./&test_executable() &test_argv()
443 network = loopback,verbs,shmem
444 
445 #======================================================================
446 # Test get phase
447 #======================================================================
448 
449 [Test get: netpipe]
450 module = Download
451 download_url = http://www.scl.ameslab.gov/netpipe/code/NetPIPE_3.6.2.tar.gz
452 
453 #----------------------------------------------------------------------
454 
455 [Test get: osu]
456 module = SVN
457 svn_url = https://svn.open-mpi.org/svn/ompi-tests/trunk/osu
458 
459 #----------------------------------------------------------------------
460 
461 [Test get: imb]
462 module = SVN
463 svn_url = https://svn.open-mpi.org/svn/ompi-tests/trunk/IMB_2.3
464 
465 #----------------------------------------------------------------------
466 
467 [Test get: skampi]
468 module = SVN
469 svn_url = https://svn.open-mpi.org/svn/ompi-tests/trunk/skampi-5.0.1
470 
471 #----------------------------------------------------------------------
472 
473 [Test get: nbcbench]
474 module = SVN
475 svn_url = https://svn.open-mpi.org/svn/ompi-tests/trunk/nbcbench
476 
477 #======================================================================
478 # Test build phase
479 #======================================================================
480 
481 [Test build: netpipe]
482 test_get = netpipe
483 save_stdout_on_success = 1
484 merge_stdout_stderr = 1
485 stderr_save_lines = 100
486 
487 module = Shell
488 shell_build_command = <<EOT
489 make mpi
490 EOT
491 
492 #----------------------------------------------------------------------
493 
494 [Test build: osu]
495 test_get = osu
496 save_stdout_on_success = 1
497 merge_stdout_stderr = 1
498 stderr_save_lines = 100
499 
500 module = Shell
501 shell_build_command = <<EOT
502 make osu_latency osu_bw osu_bibw
503 EOT
504 
505 #----------------------------------------------------------------------
506 
507 [Test build: imb]
508 test_get = imb
509 save_stdout_on_success = 1
510 merge_stdout_stderr = 1
511 
512 module = Shell
513 shell_build_command = <<EOT
514 cd src
515 make clean IMB-MPI1
516 EOT
517 
518 #----------------------------------------------------------------------
519 
520 [Test build: skampi]
521 test_get = skampi
522 save_stdout_on_success = 1
523 merge_stdout_stderr = 1
524 stderr_save_lines = 100
525 
526 module = Shell
527 # Set EVERYONE_CAN_MPI_IO for HP MPI
528 shell_build_command = <<EOT
529 make CFLAGS="-O2 -DPRODUCE_SPARSE_OUTPUT -DEVERYONE_CAN_MPI_IO"
530 EOT
531 
532 #----------------------------------------------------------------------
533 
534 [Test build: nbcbench]
535 test_get = nbcbench
536 save_stdout_on_success = 1
537 merge_stdout_stderr = 1
538 stderr_save_lines = 100
539 
540 module = Shell
541 shell_build_command = <<EOT
542 make
543 EOT
544 
545 #======================================================================
546 # Test Run phase
547 #======================================================================
548 
549 [Test run: netpipe]
550 test_build = netpipe
551 pass = &and(&cmd_wifexited(), &eq(&cmd_wexitstatus(), 0))
552 # Timeout hueristic: 10 minutes
553 timeout = 10:00
554 save_stdout_on_pass = 1
555 # Ensure to leave this value as "-1", or performance results could be lost!
556 stdout_save_lines = -1
557 merge_stdout_stderr = 1
558 np = 2
559 alloc = node
560 
561 specify_module = Simple
562 analyze_module = NetPipe
563 
564 simple_pass:tests = NPmpi
565 
566 #----------------------------------------------------------------------
567 
568 [Test run: osu]
569 test_build = osu
570 pass = &and(&cmd_wifexited(), &eq(&cmd_wexitstatus(), 0))
571 # Timeout hueristic: 10 minutes
572 timeout = 10:00
573 save_stdout_on_pass = 1
574 # Ensure to leave this value as "-1", or performance results could be lost!
575 stdout_save_lines = -1
576 merge_stdout_stderr = 1
577 np = 2
578 alloc = node
579 
580 specify_module = Simple
581 analyze_module = OSU
582 
583 simple_pass:tests = osu_bw osu_latency osu_bibw
584 
585 #----------------------------------------------------------------------
586 
587 [Test run: imb]
588 test_build = imb
589 pass = &and(&cmd_wifexited(), &eq(&cmd_wexitstatus(), 0))
590 # Timeout hueristic: 10 minutes
591 timeout = 10:00
592 save_stdout_on_pass = 1
593 # Ensure to leave this value as "-1", or performance results could be lost!
594 stdout_save_lines = -1
595 merge_stdout_stderr = 1
596 np = &env_max_procs()
597 
598 argv = -npmin &test_np() &enumerate("PingPong", "PingPing", "Sendrecv", "Exchange", "Allreduce", "Reduce", "Reduce_scatter", "Allgather", "Allgatherv", "Alltoall", "Bcast", "Barrier")
599 
600 specify_module = Simple
601 analyze_module = IMB
602 
603 simple_pass:tests = src/IMB-MPI1
604 
605 #----------------------------------------------------------------------
606 
607 [Test run: skampi]
608 test_build = skampi
609 pass = &and(&cmd_wifexited(), &eq(&cmd_wexitstatus(), 0))
610 # Timeout hueristic: 10 minutes
611 timeout = 10:00
612 save_stdout_on_pass = 1
613 # Ensure to leave this value as "-1", or performance results could be lost!
614 stdout_save_lines = -1
615 merge_stdout_stderr = 1
616 np = &env_max_procs()
617 
618 argv = -i &find("mtt_.+.ski", "input_files_bakeoff")
619 
620 specify_module = Simple
621 analyze_module = SKaMPI
622 
623 simple_pass:tests = skampi
624 
625 #----------------------------------------------------------------------
626 
627 [Test run: nbcbench]
628 test_build = nbcbench
629 pass = &and(&test_wifexited(), &eq(&test_wexitstatus(), 0))
630 timeout = -1
631 save_stdout_on_pass = 1
632 # Ensure to leave this value as "-1", or performance results could be lost!
633 stdout_save_lines = -1
634 merge_stdout_stderr = 1
635 
636 specify_module = Simple
637 analyze_module = NBCBench
638 simple_pass:tests = nbcbench
639 
640 argv = -p &test_np()-&test_np() -s 1-1048576 -v -t \
641  &enumerate("MPI_Allgatherv", "MPI_Allgather", "MPI_Allreduce", \
642  "MPI_Alltoall", "MPI_Alltoallv", "MPI_Barrier", "MPI_Bcast", \
643  "MPI_Gather", "MPI_Gatherv", "MPI_Reduce", "MPI_Reduce_scatter", \
644  "MPI_Scan", "MPI_Scatter", "MPI_Scatterv")
645 
646 #======================================================================
647 # Reporter phase
648 #======================================================================
649 
650 [Reporter: IU database]
651 module = MTTDatabase
652 
653 mttdatabase_realm = OMPI
654 mttdatabase_url = https://www.open-mpi.org/mtt/submit/
655 # Change this to be the username and password for your submit user.
656 # Get this from the OMPI MTT administrator.
657 mttdatabase_username = you must set this value
658 mttdatabase_password = you must set this value
659 # Change this to be some short string identifying your cluster.
660 mttdatabase_platform = you must set this value
661 
662 mttdatabase_debug_filename = mttdb_debug_file_perf
663 mttdatabase_keep_debug_files = 1
664 
665 #----------------------------------------------------------------------
666 
667 # This is a backup reporter; it also writes results to a local text
668 # file
669 
670 [Reporter: text file backup]
671 module = TextFile
672 
673 textfile_filename = $phase-$section-$mpi_name-$mpi_version.txt
674 
675 textfile_summary_header = <<EOT
676 Hostname: &shell("hostname")
677 uname: &shell("uname -a")
678 Username: &shell("who am i")
679 EOT
680 
681 textfile_summary_footer =
682 textfile_detail_header =
683 textfile_detail_footer =
684 
685 textfile_textwrap = 78