testlib.py 79.6 KB
Newer Older
1
# coding=utf8
2
#
3
4
5
# (c) Simon Marlow 2002
#

6
import io
7
import shutil
8
9
10
import os
import re
import traceback
11
12
import time
import datetime
13
import copy
14
import glob
15
import sys
ian@well-typed.com's avatar
ian@well-typed.com committed
16
from math import ceil, trunc
thomie's avatar
thomie committed
17
from pathlib import PurePath
18
import collections
19
import subprocess
20

21
from testglobals import config, ghc_env, default_testopts, brokens, t, TestResult
22
from testutil import strip_quotes, lndir, link_or_copy_file, passed, failBecause, failBecauseStderr, str_fail, str_pass, testing_metrics
23
from cpu_features import have_cpu_feature
24
25
import perf_notes as Perf
from perf_notes import MetricChange
26
extra_src_files = {'T4198': ['exitminus1.c']} # TODO: See #12223
27

28
global pool_sema
29
30
if config.use_threads:
    import threading
31
    pool_sema = threading.BoundedSemaphore(value=config.threads)
32

33
34
global wantToStop
wantToStop = False
35

36
37
38
def stopNow():
    global wantToStop
    wantToStop = True
39

40
41
42
def stopping():
    return wantToStop

43

44
45
# Options valid for the current test only (these get reset to
# testdir_testopts after each test).
46

ei@vuokko.info's avatar
ei@vuokko.info committed
47
global testopts_local
48
49
50
51
52
53
if config.use_threads:
    testopts_local = threading.local()
else:
    class TestOpts_Local:
        pass
    testopts_local = TestOpts_Local()
54
55

def getTestOpts():
ei@vuokko.info's avatar
ei@vuokko.info committed
56
    return testopts_local.x
57

ei@vuokko.info's avatar
ei@vuokko.info committed
58
59
60
def setLocalTestOpts(opts):
    global testopts_local
    testopts_local.x=opts
61

62
63
64
65
def isCompilerStatsTest():
    opts = getTestOpts()
    return bool(opts.is_compiler_stats_test)

66
67
def isStatsTest():
    opts = getTestOpts()
68
    return opts.is_stats_test
69

70
71
72
# This can be called at the top of a file of tests, to set default test options
# for the following tests.
def setTestOpts( f ):
73
    global thisdir_settings
74
    thisdir_settings = [thisdir_settings, f]
75
76
77
78
79
80
81
82
83
84
85

# -----------------------------------------------------------------------------
# Canned setup functions for common cases.  eg. for a test you might say
#
#      test('test001', normal, compile, [''])
#
# to run it without any options, but change it to
#
#      test('test001', expect_fail, compile, [''])
#
# to expect failure for this test.
86
87
#
# type TestOpt = (name :: String, opts :: Object) -> IO ()
88

89
def normal( name, opts ):
90
91
    return;

92
def skip( name, opts ):
93
    opts.skip = True
94

95
def expect_fail( name, opts ):
96
97
98
    # The compiler, testdriver, OS or platform is missing a certain
    # feature, and we don't plan to or can't fix it now or in the
    # future.
99
100
    opts.expect = 'fail';

101
def reqlib( lib ):
102
    return lambda name, opts, l=lib: _reqlib (name, opts, l )
103

104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
def stage1(name, opts):
    # See Note [Why is there no stage1 setup function?]
    framework_fail(name, 'stage1 setup function does not exist',
                   'add your test to testsuite/tests/stage1 instead')

# Note [Why is there no stage1 setup function?]
#
# Presumably a stage1 setup function would signal that the stage1
# compiler should be used to compile a test.
#
# Trouble is, the path to the compiler + the `ghc --info` settings for
# that compiler are currently passed in from the `make` part of the
# testsuite driver.
#
# Switching compilers in the Python part would be entirely too late, as
# all ghc_with_* settings would be wrong. See config/ghc for possible
# consequences (for example, config.run_ways would still be
# based on the default compiler, quite likely causing ./validate --slow
# to fail).
#
# It would be possible to let the Python part of the testsuite driver
# make the call to `ghc --info`, but doing so would require quite some
# work. Care has to be taken to not affect the run_command tests for
# example, as they also use the `ghc --info` settings:
#     quasiquotation/qq007/Makefile:ifeq "$(GhcDynamic)" "YES"
#
# If you want a test to run using the stage1 compiler, add it to the
# testsuite/tests/stage1 directory. Validate runs the tests in that
# directory with `make stage=1`.

134
135
# Cache the results of looking to see if we have a library or not.
# This makes quite a difference, especially on Windows.
136
have_lib_cache = {}
137

138
139
140
141
def have_library(lib):
    """ Test whether the given library is available """
    if lib in have_lib_cache:
        got_it = have_lib_cache[lib]
142
    else:
143
144
145
        cmd = strip_quotes(config.ghc_pkg)
        p = subprocess.Popen([cmd, '--no-user-package-db', 'describe', lib],
                             stdout=subprocess.PIPE,
146
147
                             stderr=subprocess.PIPE,
                             env=ghc_env)
148
149
150
151
        # read from stdout and stderr to avoid blocking due to
        # buffers filling
        p.communicate()
        r = p.wait()
152
        got_it = r == 0
153
154
155
        have_lib_cache[lib] = got_it

    return got_it
156

157
158
def _reqlib( name, opts, lib ):
    if not have_library(lib):
159
        opts.expect = 'missing-lib'
160

161
162
163
164
def req_haddock( name, opts ):
    if not config.haddock:
        opts.expect = 'missing-lib'

165
def req_profiling( name, opts ):
166
    '''Require the profiling libraries (add 'GhcLibWays += p' to mk/build.mk)'''
167
168
169
    if not config.have_profiling:
        opts.expect = 'fail'

170
def req_shared_libs( name, opts ):
Simon Marlow's avatar
Simon Marlow committed
171
172
173
    if not config.have_shared_libs:
        opts.expect = 'fail'

174
def req_interp( name, opts ):
Ian Lynagh's avatar
Ian Lynagh committed
175
176
177
    if not config.have_interp:
        opts.expect = 'fail'

178
def req_smp( name, opts ):
Simon Marlow's avatar
Simon Marlow committed
179
180
181
    if not config.have_smp:
        opts.expect = 'fail'

182
183
184
185
186
def ignore_stdout(name, opts):
    opts.ignore_stdout = True

def ignore_stderr(name, opts):
    opts.ignore_stderr = True
187

188
def combined_output( name, opts ):
pcapriotti's avatar
pcapriotti committed
189
190
    opts.combined_output = True

191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
def use_specs( specs ):
    """
    use_specs allows one to override files based on suffixes. e.g. 'stdout',
    'stderr', 'asm', 'prof.sample', etc.

    Example use_specs({'stdout' : 'prof002.stdout'}) to make the test re-use
    prof002.stdout.

    Full Example:
    test('T5889', [only_ways(['normal']), req_profiling,
                   extra_files(['T5889/A.hs', 'T5889/B.hs']),
                   use_specs({'stdout' : 'prof002.stdout'})],
         multimod_compile,
         ['A B', '-O -prof -fno-prof-count-entries -v0'])

    """
    return lambda name, opts, s=specs: _use_specs( name, opts, s )

def _use_specs( name, opts, specs ):
    opts.extra_files.extend(specs.values ())
    opts.use_specs = specs

213
214
215
# -----

def expect_fail_for( ways ):
216
    return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
217

218
def _expect_fail_for( name, opts, ways ):
219
220
    opts.expect_fail_for = ways

221
def expect_broken( bug ):
222
223
    # This test is a expected not to work due to the indicated trac bug
    # number.
224
225
226
    return lambda name, opts, b=bug: _expect_broken (name, opts, b )

def _expect_broken( name, opts, bug ):
227
    record_broken(name, opts, bug)
228
229
    opts.expect = 'fail';

Ian Lynagh's avatar
Ian Lynagh committed
230
def expect_broken_for( bug, ways ):
231
    return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
Ian Lynagh's avatar
Ian Lynagh committed
232

233
def _expect_broken_for( name, opts, bug, ways ):
234
    record_broken(name, opts, bug)
Ian Lynagh's avatar
Ian Lynagh committed
235
    opts.expect_fail_for = ways
Ian Lynagh's avatar
Ian Lynagh committed
236

237
238
def record_broken(name, opts, bug):
    me = (bug, opts.testdir, name)
239
240
241
    if not me in brokens:
        brokens.append(me)

242
243
244
245
246
def _expect_pass(way):
    # Helper function. Not intended for use in .T files.
    opts = getTestOpts()
    return opts.expect == 'pass' and way not in opts.expect_fail_for

247
248
# -----

249
250
251
252
253
254
255
256
257
258
259
def fragile( bug ):
    """
    Indicates that the test should be skipped due to fragility documented in
    the given ticket.
    """
    def helper( name, opts, bug=bug ):
        record_broken(name, opts, bug)
        opts.skip = True

    return helper

260
def fragile_for( bug, ways ):
261
262
263
264
265
266
    """
    Indicates that the test should be skipped due to fragility in the given
    test ways as documented in the given ticket.
    """
    def helper( name, opts, bug=bug, ways=ways ):
        record_broken(name, opts, bug)
267
        opts.omit_ways += ways
268
269
270
271
272

    return helper

# -----

273
def omit_ways( ways ):
274
    return lambda name, opts, w=ways: _omit_ways( name, opts, w )
275

276
def _omit_ways( name, opts, ways ):
Ben Gamari's avatar
Ben Gamari committed
277
    assert ways.__class__ is list
278
    opts.omit_ways += ways
279
280
281

# -----

282
def only_ways( ways ):
283
    return lambda name, opts, w=ways: _only_ways( name, opts, w )
284

285
def _only_ways( name, opts, ways ):
286
287
288
289
    opts.only_ways = ways

# -----

290
def extra_ways( ways ):
291
    return lambda name, opts, w=ways: _extra_ways( name, opts, w )
292

293
def _extra_ways( name, opts, ways ):
294
295
296
297
    opts.extra_ways = ways

# -----

298
def set_stdin( file ):
299
   return lambda name, opts, f=file: _set_stdin(name, opts, f);
300

301
def _set_stdin( name, opts, f ):
302
303
304
305
306
   opts.stdin = f

# -----

def exit_code( val ):
307
    return lambda name, opts, v=val: _exit_code(name, opts, v);
308

309
def _exit_code( name, opts, v ):
310
311
    opts.exit_code = v

312
313
def signal_exit_code( val ):
    if opsys('solaris2'):
314
        return exit_code( val )
315
316
317
318
319
320
    else:
        # When application running on Linux receives fatal error
        # signal, then its exit code is encoded as 128 + signal
        # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
        # I assume that Mac OS X behaves in the same way at least Mac
        # OS X builder behavior suggests this.
321
        return exit_code( val+128 )
322

323
324
# -----

325
326
def compile_timeout_multiplier( val ):
    return lambda name, opts, v=val: _compile_timeout_multiplier(name, opts, v)
327

328
329
330
331
332
333
334
335
def _compile_timeout_multiplier( name, opts, v ):
    opts.compile_timeout_multiplier = v

def run_timeout_multiplier( val ):
    return lambda name, opts, v=val: _run_timeout_multiplier(name, opts, v)

def _run_timeout_multiplier( name, opts, v ):
    opts.run_timeout_multiplier = v
336
337
338

# -----

339
def extra_run_opts( val ):
340
    return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
341

342
def _extra_run_opts( name, opts, v ):
343
344
    opts.extra_run_opts = v

345
346
# -----

Simon Marlow's avatar
Simon Marlow committed
347
def extra_hc_opts( val ):
348
    return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
Simon Marlow's avatar
Simon Marlow committed
349

350
def _extra_hc_opts( name, opts, v ):
Simon Marlow's avatar
Simon Marlow committed
351
352
353
354
    opts.extra_hc_opts = v

# -----

355
def extra_clean( files ):
356
357
    # TODO. Remove all calls to extra_clean.
    return lambda _name, _opts: None
358

359
360
361
362
363
364
def extra_files(files):
    return lambda name, opts: _extra_files(name, opts, files)

def _extra_files(name, opts, files):
    opts.extra_files.extend(files)

365
366
# -----

367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
# Defaults to "test everything, and only break on extreme cases"
#
# The inputs to this function are slightly interesting:
# metric can be either:
#     - 'all', in which case all 3 possible metrics are collected and compared.
#     - The specific metric one wants to use in the test.
#     - A list of the metrics one wants to use in the test.
#
# Deviation defaults to 20% because the goal is correctness over performance.
# The testsuite should avoid breaking when there is not an actual error.
# Instead, the testsuite should notify of regressions in a non-breaking manner.
#
# collect_compiler_stats is used when the metrics collected are about the compiler.
# collect_stats is used in the majority case when the metrics to be collected
# are about the performance of the runtime code generated by the compiler.
def collect_compiler_stats(metric='all',deviation=20):
    return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m,d, True)

def collect_stats(metric='all', deviation=20):
    return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m, d)

# This is an internal function that is used only in the implementation.
# 'is_compiler_stats_test' is somewhat of an unfortunate name.
# If the boolean is set to true, it indicates that this test is one that
# measures the performance numbers of the compiler.
# As this is a fairly rare case in the testsuite, it defaults to false to
# indicate that it is a 'normal' performance test.
394
def _collect_stats(name, opts, metrics, deviation, is_compiler_stats_test=False):
395
396
    if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
        failBecause('This test has an invalid name.')
397

398
399
400
401
402
403
    # Normalize metrics to a list of strings.
    if isinstance(metrics, str):
        if metrics == 'all':
            metrics = testing_metrics()
        else:
            metrics = [metrics]
404

405
    opts.is_stats_test = True
406
407
    if is_compiler_stats_test:
        opts.is_compiler_stats_test = True
408

409
410
    # Compiler performance numbers change when debugging is on, making the results
    # useless and confusing. Therefore, skip if debugging is on.
411
412
413
    if config.compiler_debugged and is_compiler_stats_test:
        opts.skip = 1

414
415
416
417
    for metric in metrics:
        def baselineByWay(way, target_commit, metric=metric):
            return Perf.baseline_metric( \
                              target_commit, name, config.test_env, metric, way)
418

419
        opts.stats_range_fields[metric] = (baselineByWay, deviation)
420

421
422
# -----

423
def when(b, f):
ian@well-typed.com's avatar
ian@well-typed.com committed
424
425
426
    # When list_brokens is on, we want to see all expect_broken calls,
    # so we always do f
    if b or config.list_broken:
427
428
429
430
431
432
433
        return f
    else:
        return normal

def unless(b, f):
    return when(not b, f)

ian@well-typed.com's avatar
ian@well-typed.com committed
434
435
436
def doing_ghci():
    return 'ghci' in config.run_ways

437
438
439
440
441
442
443
444
def requires_th(name, opts):
    """
    Mark a test as requiring TemplateHaskell. Currently this means
    that we don't run the test in the profasm when when GHC is
    dynamically-linked since we can't load profiled objects in this case.
    """
    return when(ghc_dynamic(), omit_ways(['profasm']))

445
def ghc_dynamic():
ian@well-typed.com's avatar
ian@well-typed.com committed
446
    return config.ghc_dynamic
447
448

def fast():
449
    return config.speed == 2
450

451
452
def platform( plat ):
    return config.platform == plat
Ian Lynagh's avatar
Ian Lynagh committed
453

454
455
def opsys( os ):
    return config.os == os
Ian Lynagh's avatar
Ian Lynagh committed
456

457
458
def arch( arch ):
    return config.arch == arch
459

460
461
def wordsize( ws ):
    return config.wordsize == str(ws)
tibbe's avatar
tibbe committed
462

463
464
def msys( ):
    return config.msys
ian@well-typed.com's avatar
ian@well-typed.com committed
465

466
467
def cygwin( ):
    return config.cygwin
Ian Lynagh's avatar
Ian Lynagh committed
468

469
470
def have_vanilla( ):
    return config.have_vanilla
471

472
473
474
def have_ncg( ):
    return config.have_ncg

475
476
def have_dynamic( ):
    return config.have_dynamic
477

478
479
def have_profiling( ):
    return config.have_profiling
480

481
482
def in_tree_compiler( ):
    return config.in_tree_compiler
483

484
485
486
487
488
489
def unregisterised( ):
    return config.unregisterised

def compiler_profiled( ):
    return config.compiler_profiled

490
491
def compiler_debugged( ):
    return config.compiler_debugged
492

493
494
495
496
497
498
def have_gdb( ):
    return config.have_gdb

def have_readelf( ):
    return config.have_readelf

499
500
501
502
503
504
505
506
507
def integer_gmp( ):
    return have_library("integer-gmp")

def integer_simple( ):
    return have_library("integer-simple")

def llvm_build ( ):
    return config.ghc_built_by_llvm

508
509
# ---

510
def high_memory_usage(name, opts):
511
512
    opts.alone = True

513
514
515
516
517
# If a test is for a multi-CPU race, then running the test alone
# increases the chance that we'll actually see it.
def multi_cpu_race(name, opts):
    opts.alone = True

Ian Lynagh's avatar
Ian Lynagh committed
518
# ---
519
def literate( name, opts ):
520
    opts.literate = True
Ian Lynagh's avatar
Ian Lynagh committed
521

522
def c_src( name, opts ):
523
    opts.c_src = True
524

525
def objc_src( name, opts ):
526
    opts.objc_src = True
Austin Seipp's avatar
Austin Seipp committed
527

528
def objcpp_src( name, opts ):
529
    opts.objcpp_src = True
530

531
def cmm_src( name, opts ):
532
    opts.cmm_src = True
533

534
def outputdir( odir ):
535
    return lambda name, opts, d=odir: _outputdir(name, opts, d)
536

537
def _outputdir( name, opts, odir ):
538
539
    opts.outputdir = odir;

540
541
# ----

542
def pre_cmd( cmd ):
543
    return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
544

545
def _pre_cmd( name, opts, cmd ):
546
547
548
549
    opts.pre_cmd = cmd

# ----

550
def cmd_prefix( prefix ):
551
    return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
552

553
def _cmd_prefix( name, opts, prefix ):
554
555
556
557
558
    opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;

# ----

def cmd_wrapper( fun ):
559
    return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
560

561
def _cmd_wrapper( name, opts, fun ):
562
    opts.cmd_wrapper = fun
563

564
565
# ----

Ian Lynagh's avatar
Ian Lynagh committed
566
def compile_cmd_prefix( prefix ):
567
    return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
Ian Lynagh's avatar
Ian Lynagh committed
568

569
def _compile_cmd_prefix( name, opts, prefix ):
Ian Lynagh's avatar
Ian Lynagh committed
570
571
572
573
    opts.compile_cmd_prefix = prefix

# ----

574
575
576
577
578
579
def check_stdout( f ):
    return lambda name, opts, f=f: _check_stdout(name, opts, f)

def _check_stdout( name, opts, f ):
    opts.check_stdout = f

580
581
582
def no_check_hp(name, opts):
    opts.check_hp = False

583
584
# ----

585
586
587
588
589
590
def filter_stdout_lines( regex ):
    """ Filter lines of stdout with the given regular expression """
    def f( name, opts ):
        _normalise_fun(name, opts, lambda s: '\n'.join(re.findall(regex, s)))
    return f

591
def normalise_slashes( name, opts ):
592
    _normalise_fun(name, opts, normalise_slashes_)
593

594
def normalise_exe( name, opts ):
595
    _normalise_fun(name, opts, normalise_exe_)
596

597
598
def normalise_fun( *fs ):
    return lambda name, opts: _normalise_fun(name, opts, fs)
599

600
def _normalise_fun( name, opts, *fs ):
601
    opts.extra_normaliser = join_normalisers(opts.extra_normaliser, fs)
602

603
604
def normalise_errmsg_fun( *fs ):
    return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
605

606
def _normalise_errmsg_fun( name, opts, *fs ):
607
608
    opts.extra_errmsg_normaliser =  join_normalisers(opts.extra_errmsg_normaliser, fs)

Joachim Breitner's avatar
Joachim Breitner committed
609
610
611
612
613
614
615
616
617
618
619
620
621
def check_errmsg(needle):
    def norm(str):
        if needle in str:
            return "%s contained in -ddump-simpl\n" % needle
        else:
            return "%s not contained in -ddump-simpl\n" % needle
    return normalise_errmsg_fun(norm)

def grep_errmsg(needle):
    def norm(str):
        return "".join(filter(lambda l: re.search(needle, l), str.splitlines(True)))
    return normalise_errmsg_fun(norm)

622
623
624
625
626
627
def normalise_whitespace_fun(f):
    return lambda name, opts: _normalise_whitespace_fun(name, opts, f)

def _normalise_whitespace_fun(name, opts, f):
    opts.whitespace_normaliser = f

628
629
630
631
632
633
634
635
636
637
638
def normalise_version_( *pkgs ):
    def normalise_version__( str ):
        return re.sub('(' + '|'.join(map(re.escape,pkgs)) + ')-[0-9.]+',
                      '\\1-<VERSION>', str)
    return normalise_version__

def normalise_version( *pkgs ):
    def normalise_version__( name, opts ):
        _normalise_fun(name, opts, normalise_version_(*pkgs))
        _normalise_errmsg_fun(name, opts, normalise_version_(*pkgs))
    return normalise_version__
639

thomie's avatar
thomie committed
640
641
642
643
def normalise_drive_letter(name, opts):
    # Windows only. Change D:\\ to C:\\.
    _normalise_fun(name, opts, lambda str: re.sub(r'[A-Z]:\\', r'C:\\', str))

644
645
646
647
648
649
650
def keep_prof_callstacks(name, opts):
    """Keep profiling callstacks.

    Use together with `only_ways(prof_ways)`.
    """
    opts.keep_prof_callstacks = True

651
652
def join_normalisers(*a):
    """
653
    Compose functions, flattening sequences.
654

655
       join_normalisers(f1,[f2,f3],f4)
656
657
658

    is the same as

659
       lambda x: f1(f2(f3(f4(x))))
660
661
    """

662
663
664
665
666
    def flatten(l):
        """
        Taken from http://stackoverflow.com/a/2158532/946226
        """
        for el in l:
667
668
            if (isinstance(el, collections.Iterable)
                and not isinstance(el, (bytes, str))):
669
670
671
672
673
674
                for sub in flatten(el):
                    yield sub
            else:
                yield el

    a = flatten(a)
675
676
677

    fn = lambda x:x # identity function
    for f in a:
678
        assert callable(f)
679
680
681
        fn = lambda x,f=f,fn=fn: fn(f(x))
    return fn

682
683
684
# ----
# Function for composing two opt-fns together

685
def executeSetups(fs, name, opts):
686
    if type(fs) is list:
687
        # If we have a list of setups, then execute each one
688
689
        for f in fs:
            executeSetups(f, name, opts)
690
691
692
    else:
        # fs is a single function, so just apply it
        fs(name, opts)
693

694
695
696
# -----------------------------------------------------------------------------
# The current directory of tests

697
698
def newTestDir(tempdir, dir):

699
    global thisdir_settings
700
    # reset the options for this test directory
701
702
703
    def settings(name, opts, tempdir=tempdir, dir=dir):
        return _newTestDir(name, opts, tempdir, dir)
    thisdir_settings = settings
704

705
706
# Should be equal to entry in toplevel .gitignore.
testdir_suffix = '.run'
707
708

def _newTestDir(name, opts, tempdir, dir):
thomie's avatar
thomie committed
709
    testdir = os.path.join('', *(p for p in PurePath(dir).parts if p != '..'))
710
    opts.srcdir = os.path.join(os.getcwd(), dir)
thomie's avatar
thomie committed
711
    opts.testdir = os.path.join(tempdir, testdir, name + testdir_suffix)
712
    opts.compiler_always_flags = config.compiler_always_flags
713
714
715
716

# -----------------------------------------------------------------------------
# Actually doing tests

717
718
parallelTests = []
aloneTests = []
719
allTestNames = set([])
720

721
def runTest(watcher, opts, name, func, args):
722
    if config.use_threads:
723
724
725
726
727
728
        pool_sema.acquire()
        t = threading.Thread(target=test_common_thread,
                             name=name,
                             args=(watcher, name, opts, func, args))
        t.daemon = False
        t.start()
729
    else:
730
        test_common_work(watcher, name, opts, func, args)
731

732
# name  :: String
733
# setup :: [TestOpt] -> IO ()
734
def test(name, setup, func, args):
735
736
737
738
739
740
741
742
743
    global aloneTests
    global parallelTests
    global allTestNames
    global thisdir_settings
    if name in allTestNames:
        framework_fail(name, 'duplicate', 'There are multiple tests with this name')
    if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
        framework_fail(name, 'bad_name', 'This test has an invalid name')

744
745
746
747
748
    if config.run_only_some_tests:
        if name not in config.only:
            return
        else:
            # Note [Mutating config.only]
Gabor Greif's avatar
Gabor Greif committed
749
            # config.only is initially the set of tests requested by
750
751
752
753
            # the user (via 'make TEST='). We then remove all tests that
            # we've already seen (in .T files), so that we can later
            # report on any tests we couldn't find and error out.
            config.only.remove(name)
754

755
756
757
758
    # Make a deep copy of the default_testopts, as we need our own copy
    # of any dictionaries etc inside it. Otherwise, if one test modifies
    # them, all tests will see the modified version!
    myTestOpts = copy.deepcopy(default_testopts)
759

760
    executeSetups([thisdir_settings, setup], name, myTestOpts)
761

762
    thisTest = lambda watcher: runTest(watcher, myTestOpts, name, func, args)
763
764
765
766
    if myTestOpts.alone:
        aloneTests.append(thisTest)
    else:
        parallelTests.append(thisTest)
767
    allTestNames.add(name)
768

769
if config.use_threads:
770
771
772
773
774
    def test_common_thread(watcher, name, opts, func, args):
            try:
                test_common_work(watcher, name, opts, func, args)
            finally:
                pool_sema.release()
775

776
777
778
779
780
781
782
783
784
def get_package_cache_timestamp():
    if config.package_conf_cache_file == '':
        return 0.0
    else:
        try:
            return os.stat(config.package_conf_cache_file).st_mtime
        except:
            return 0.0

Edward Z. Yang's avatar
Edward Z. Yang committed
785
do_not_copy = ('.hi', '.o', '.dyn_hi', '.dyn_o', '.out') # 12112
786

787
def test_common_work(watcher, name, opts, func, args):
788
    try:
789
        t.total_tests += 1
790
791
792
793
794
795
796
        setLocalTestOpts(opts)

        package_conf_cache_file_start_timestamp = get_package_cache_timestamp()

        # All the ways we might run this test
        if func == compile or func == multimod_compile:
            all_ways = config.compile_ways
797
        elif func == compile_and_run or func == multimod_compile_and_run:
798
799
800
801
802
803
            all_ways = config.run_ways
        elif func == ghci_script:
            if 'ghci' in config.run_ways:
                all_ways = ['ghci']
            else:
                all_ways = []
804
        else:
805
806
807
            all_ways = ['normal']

        # A test itself can request extra ways by setting opts.extra_ways
808
        all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
809

810
        t.total_test_cases += len(all_ways)
811
812
813

        ok_way = lambda way: \
            not getTestOpts().skip \
814
            and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
815
            and (config.cmdline_ways == [] or way in config.cmdline_ways) \
816
            and (not (config.skip_perf_tests and isStatsTest())) \
817
            and (not (config.only_perf_tests and not isStatsTest())) \
818
819
820
            and way not in getTestOpts().omit_ways

        # Which ways we are asked to skip
821
        do_ways = list(filter (ok_way,all_ways))
822

823
824
        # Only run all ways in slow mode.
        # See Note [validate and testsuite speed] in toplevel Makefile.
825
826
        if config.accept:
            # Only ever run one way
827
            do_ways = do_ways[:1]
828
829
830
        elif config.speed > 0:
            # However, if we EXPLICITLY asked for a way (with extra_ways)
            # please test it!
831
832
            explicit_ways = list(filter(lambda way: way in opts.extra_ways, do_ways))
            other_ways = list(filter(lambda way: way not in opts.extra_ways, do_ways))
833
            do_ways = other_ways[:1] + explicit_ways
834

835
836
837
838
839
840
841
842
        # Find all files in the source directory that this test
        # depends on. Do this only once for all ways.
        # Generously add all filenames that start with the name of
        # the test to this set, as a convenience to test authors.
        # They will have to use the `extra_files` setup function to
        # specify all other files that their test depends on (but
        # this seems to be necessary for only about 10% of all
        # tests).
843
844
845
846
        files = set(f for f in os.listdir(opts.srcdir)
                       if f.startswith(name) and not f == name and
                          not f.endswith(testdir_suffix) and
                          not os.path.splitext(f)[1] in do_not_copy)
847
        for filename in (opts.extra_files + extra_src_files.get(name, [])):
848
            if filename.startswith('/'):
849
850
851
852
853
854
855
856
857
                framework_fail(name, 'whole-test',
                    'no absolute paths in extra_files please: ' + filename)

            elif '*' in filename:
                # Don't use wildcards in extra_files too much, as
                # globbing is slow.
                files.update((os.path.relpath(f, opts.srcdir)
                            for f in glob.iglob(in_srcdir(filename))))

thomie's avatar
thomie committed
858
            elif filename:
859
860
                files.add(filename)

thomie's avatar
thomie committed
861
862
863
            else:
                framework_fail(name, 'whole-test', 'extra_file is empty string')

864
865
866
867
        # Run the required tests...
        for way in do_ways:
            if stopping():
                break
868
869
870
871
872
873
874
            try:
                do_test(name, way, func, args, files)
            except KeyboardInterrupt:
                stopNow()
            except Exception as e:
                framework_fail(name, way, str(e))
                traceback.print_exc()
875

876
        t.n_tests_skipped += len(set(all_ways) - set(do_ways))
877

878
        if config.cleanup and do_ways:
879
880
881
882
            try:
                cleanup()
            except Exception as e:
                framework_fail(name, 'runTest', 'Unhandled exception during cleanup: ' + str(e))
883

884
        package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
885

886
887
        if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
            framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
888

889
    except Exception as e:
890
        framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
891
892
    finally:
        watcher.notify()
Ian Lynagh's avatar
Ian Lynagh committed
893

894
895
896
def do_test(name, way, func, args, files):
    opts = getTestOpts()

897
898
    full_name = name + '(' + way + ')'

899
    progress_args = [ full_name, t.total_tests, len(allTestNames),
900
901
        [len(t.unexpected_passes),
         len(t.unexpected_failures),
902
903
904
905
906
907
908
909
         len(t.framework_failures)]]
    if_verbose(2, "=====> {0} {1} of {2} {3}".format(*progress_args))

    # Update terminal title
    # useful progress indicator even when make test VERBOSE=1
    if config.supports_colors:
        print("\033]0;{0} {1} of {2} {3}\007".format(*progress_args), end="")
        sys.stdout.flush()
910
911
912
913

    # Clean up prior to the test, so that we can't spuriously conclude
    # that it passed on the basis of old run outputs.
    cleanup()
914
    os.makedirs(opts.testdir)
915
916
917
918
919
920
921
922
923

    # Link all source files for this test into a new directory in
    # /tmp, and run the test in that directory. This makes it
    # possible to run tests in parallel, without modification, that
    # would otherwise (accidentally) write to the same output file.
    # It also makes it easier to keep the testsuite clean.

    for extra_file in files:
        src = in_srcdir(extra_file)
924
        dst = in_testdir(os.path.basename(extra_file.rstrip('/\\')))
925
        if os.path.isfile(src):
926
            link_or_copy_file(src, dst)
927
        elif os.path.isdir(src):
928
929
            if os.path.exists(dst):
                shutil.rmtree(dst)
930
            os.mkdir(dst)
931
932
            lndir(src, dst)
        else:
933
            if not config.haddock and os.path.splitext(extra_file)[1] == '.t':
934
935
936
937
938
939
940
941
                # When using a ghc built without haddock support, .t
                # files are rightfully missing. Don't
                # framework_fail. Test will be skipped later.
                pass
            else:
                framework_fail(name, way,
                    'extra_file does not exist: ' + extra_file)

Ben Gamari's avatar
Ben Gamari committed
942
    if func.__name__ == 'run_command' or func.__name__ == 'makefile_test' or opts.pre_cmd:
943
944
945
946
947
        # When running 'MAKE' make sure 'TOP' still points to the
        # root of the testsuite.
        src_makefile = in_srcdir('Makefile')
        dst_makefile = in_testdir('Makefile')
        if os.path.exists(src_makefile):
948
            with io.open(src_makefile, 'r', encoding='utf8') as src:
949
                makefile = re.sub('TOP=.*', 'TOP=' + config.top, src.read(), 1)
950
                with io.open(dst_makefile, 'w', encoding='utf8') as dst:
951
                    dst.write(makefile)
952

953
    if opts.pre_cmd:
954
955
956
957
        exit_code = runCmd('cd "{0}" && {1}'.format(opts.testdir, override_options(opts.pre_cmd)),
                           stderr = subprocess.STDOUT,
                           print_output = config.verbose >= 3)

958
959
        # If user used expect_broken then don't record failures of pre_cmd
        if exit_code != 0 and opts.expect not in ['fail']:
960
            framework_fail(name, way, 'pre_cmd failed: {0}'.format(exit_code))
961
            if_verbose(1, '** pre_cmd was "{0}".'.format(override_options(opts.pre_cmd)))
962

963
    result = func(*[name,way] + args)
964

965
966
    if opts.expect not in ['pass', 'fail', 'missing-lib']:
        framework_fail(name, way, 'bad expected ' + opts.expect)
967

968
969
    try:
        passFail = result['passFail']
970
    except (KeyError, TypeError):
971
        passFail = 'No passFail found'
972

973
974
    directory = re.sub('^\\.[/\\\\]', '', opts.testdir)

975
976
    if passFail == 'pass':
        if _expect_pass(way):
977
            t.expected_passes.append(TestResult(directory, name, "", way))
978
            t.n_expected_passes += 1
979
980
        else:
            if_verbose(1, '*** unexpected pass for %s' % full_name)
981
            t.unexpected_passes.append(TestResult(directory, name, 'unexpected', way))