Skip to content
Snippets Groups Projects
Simulations.py 204 KiB
Newer Older
    The actual launching of all cases in the Cases dictionary. Note that here
    only the PBS files are written and not the actuall htc files.

    Parameters
    ----------

    cases : dict
        Dictionary with the case name as key and another dictionary as value.
        The latter holds all the tag/value pairs used in the respective
        simulation.

    verbose : boolean, default=False

    runmethod : {'local' (default),'thyra','gorm','linux-script','none',
                 'windows-script'}
        Specify how/what to run where. For local, each case in cases is
        run locally via python directly. If set to 'linux-script' a shell
        script is written to run all cases locally sequential. If set to
        'thyra' or 'gorm', PBS scripts are written to the respective server.
    """

    random_case = list(cases.keys())[0]
    sim_id = cases[random_case]['[sim_id]']
    pbs_out_dir = cases[random_case]['[pbs_out_dir]']

    if runmethod == 'local-script' or runmethod == 'linux-script':
        local_shell_script(cases, sim_id)
    elif runmethod == 'windows-script':
        local_windows_script(cases, sim_id, nr_cpus=windows_nr_cpus)
    elif runmethod in ['jess','gorm']:
        # create the pbs object
        pbs = PBS(cases, server=runmethod, short_job_names=short_job_names,
                  pbs_fname_appendix=pbs_fname_appendix, qsub=qsub)
        pbs.wine_appendix = wine_appendix
        pbs.copyback_turb = copyback_turb
        pbs.verbose = verbose
        pbs.pbs_out_dir = pbs_out_dir
        pbs.create()
    elif runmethod == 'local':
        cases = run_local(cases, silent=silent, check_log=check_log)
    elif runmethod =='local-ram':
        cases = run_local_ram(cases, check_log=check_log)
    elif runmethod == 'none':
        pass
    else:
        msg = 'unsupported runmethod, valid options: local, thyra, gorm or opt'
        raise ValueError(msg)

def post_launch(cases, save_iter=False):
    """
    Do some basics checks: do all launched cases have a result and LOG file
    and are there any errors in the LOG files?

    Parameters
    ----------

    cases : either a string (path to file) or the cases itself
    """

    # TODO: finish support for default location of the cases and file name
    # two scenario's: either pass on an cases and get from their the
    # post processing path or pass on the simid and load from the cases
    # from the default location
    # in case run_local, do not check PBS!

    # in case it is a path, load the cases
    if type(cases).__name__ == 'str':
        cases = load_pickled_file(cases)

    # saving output to textfile and print(at the same time
    LOG = Log()
    LOG.print_logging = True

    # load one case dictionary from the cases to get data that is the same
    # over all simulations in the cases
    try:
        master = list(cases.keys())[0]
    except IndexError:
        print('there are no cases, aborting...')
        return None
    post_dir = cases[master]['[post_dir]']
    sim_id = cases[master]['[sim_id]']
    run_dir = cases[master]['[run_dir]']
    log_dir = cases[master]['[log_dir]']

    # for how many of the created cases are there actually result, log files
    pbs = PBS(cases)
    pbs.cases = cases
    cases_fail = pbs.check_results(cases)

    # add the failed cases to the LOG:
    LOG.add(['number of failed cases: ' + str(len(cases_fail))])
    LOG.add(list(cases_fail))
    # for k in cases_fail:
    #    print(k

    # initiate the object to check the log files
    errorlogs = ErrorLogs(cases=cases)
    LOG.add(['checking ' + str(len(cases)) + ' LOG files...'])
    nr = 1
    nr_tot = len(cases)

    tmp = list(cases.keys())[0]
    print('checking logs, path (from a random item in cases):')
    print(os.path.join(run_dir, log_dir))

    for k in sorted(cases.keys()):
        # a case could not have a result, but a log file might still exist
        if k.endswith('.htc'):
            kk = k[:-4] + '.log'
        else:
            kk = k + '.log'
        # note that if errorlogs.PathToLogs is a file, it will only check that
        # file. If it is a directory, it will check all that is in the dir
        run_dir = cases[k]['[run_dir]']
        log_dir = cases[k]['[log_dir]']
        errorlogs.PathToLogs = os.path.join(run_dir, log_dir, kk)
        try:
            errorlogs.check(save_iter=save_iter)
            print('checking logfile progress: ' + str(nr) + '/' + str(nr_tot))
        except IOError:
            print('           no logfile for:  %s' % (errorlogs.PathToLogs))
        except Exception as e:
            print('  log analysis failed for: %s' % kk)
            print(e)
        nr += 1

        # if simulation did not ended correctly, put it on the fail list
        try:
            if not errorlogs.MsgListLog2[kk][1]:
                cases_fail[k] = cases[k]
        except KeyError:
            pass

    # now see how many cases resulted in an error and add to the general LOG
    # determine how long the first case name is
    try:
        spacing = len(list(errorlogs.MsgListLog2.keys())[0]) + 9
    except Exception as e:
        print('nr of OK cases: %i' % (len(cases) - len(cases_fail)))
        raise(e)
    LOG.add(['display log check'.ljust(spacing) + 'found_error?'.ljust(15) + \
            'exit_correctly?'])
    for k in errorlogs.MsgListLog2:
        LOG.add([k.ljust(spacing)+str(errorlogs.MsgListLog2[k][0]).ljust(15)+\
            str(errorlogs.MsgListLog2[k][1]) ])
    # save the extended (.csv format) errorlog list?
    # but put in one level up, so in the logfiles folder directly
    errorlogs.ResultFile = sim_id + '_ErrorLog.csv'
    # save the log file analysis in the run_dir instead of the log_dir
    errorlogs.PathToLogs = run_dir# + log_dir
    errorlogs.save()

    # save the error LOG list, this is redundant, since it already exists in
    # the general LOG file (but only as a print, not the python variable)
    tmp = os.path.join(post_dir, sim_id + '_MsgListLog2')
    save_pickle(tmp, errorlogs.MsgListLog2)

    # save the list of failed cases
    save_pickle(os.path.join(post_dir, sim_id + '_fail.pkl'), cases_fail)

    return cases_fail

def logcheck_case(errorlogs, cases, case, silent=False):
    """
    Check logfile of a single case
    ==============================

    Given the cases and a case, check that single case on errors in the
    logfile.

    """

    #post_dir = cases[case]['[post_dir]']
    #sim_id = cases[case]['[sim_id]']
    run_dir = cases[case]['[run_dir]']
    log_dir = cases[case]['[log_dir]']
    if case.endswith('.htc'):
        caselog = case[:-4] + '.log'
    else:
        caselog = case + '.log'
    errorlogs.PathToLogs = os.path.join(run_dir, log_dir, caselog)
    errorlogs.check()

    # in case we find an error, abort or not?
    errors = errorlogs.MsgListLog2[caselog][0]
    exitcorrect = errorlogs.MsgListLog2[caselog][1]
    if errors:
        # print all error messages
        #logs.MsgListLog : [ [case, line nr, error1, line nr, error2, ....], ]
        # difficult: MsgListLog is not a dict!!
        #raise UserWarning, 'HAWC2 simulation has errors in logfile, abort!'
        #warnings.warn('HAWC2 simulation has errors in logfile!')
        logging.warn('HAWC2 simulation has errors in logfile!')
    elif not exitcorrect:
        #raise UserWarning, 'HAWC2 simulation did not ended correctly, abort!'
        #warnings.warn('HAWC2 simulation did not ended correctly!')
        logging.warn('HAWC2 simulation did not ended correctly!')

    # no need to do that, aborts on failure anyway and OK log check will be
    # printed in run_local when also printing how long it took to check
    #if not silent:
        #print 'log checks ok'
        #print '   found error: %s' % errorlogs.MsgListLog2[caselog][0]
        #print 'exit correctly: %s' % errorlogs.MsgListLog2[caselog][1]

    return errorlogs

    ## save the extended (.csv format) errorlog list?
    ## but put in one level up, so in the logfiles folder directly
    #errorlogs.ResultFile = sim_id + '_ErrorLog.csv'
    ## use the model path of the last encoutered case in cases
    #errorlogs.PathToLogs = run_dir + log_dir
    #errorlogs.save()


class Log(object):
    """
    Class for convinient logging. Create an instance and add lines to the
    logfile as a list with the function add.
    The added items will be printed if
        self.print_logging = True. Default value is False

    Create the instance, add with .add('lines') (lines=list), save with
    .save(target), print(current log to screen with .printLog()
    """
    def __init__(self):
        self.log = []
        # option, should the lines added to the log be printed as well?
        self.print_logging = False
        self.file_mode = 'a'

    def add(self, lines):
        # the input is a list, where each entry is considered as a new line
        for k in lines:
            self.log.append(k)
            if self.print_logging:
                print(k)

    def save(self, target):
        # tread every item in the log list as a new line
        FILE = open(target, self.file_mode)
        for k in self.log:
            FILE.write(k + '\n')
        FILE.close()
        # and empty the log again
        self.log = []

    def printscreen(self):
        for k in self.log:
            print(k)

class HtcMaster(object):
    """
    """

    def __init__(self, verbose=False, silent=False):
        """
        """

        # TODO: make HtcMaster callable, so that when called you actually
        # set a value for a certain tag or add a new one. In doing so,
        # you can actually warn when you are overwriting a tag, or when
        # a different tag has the same name, etc

        # create a dictionary with the tag name as key as the default value
        self.tags = dict()

        # should we print(where the file is written?
        self.verbose = verbose
        self.silent = silent

        # following tags are required
        #---------------------------------------------------------------------
        self.tags['[case_id]'] = None

        self.tags['[master_htc_file]'] = None
        self.tags['[master_htc_dir]'] = None
        # path to model zip file, needs to accessible from the server
        # relative from the directory where the pbs files are launched on the
        # server. Suggestions is to always place the zip file in the model
        # folder, so only the zip file name has to be defined
        self.tags['[model_zip]'] = None

        # path to HAWTOPT blade result file: quasi/res/blade.dat
        self.tags['[blade_hawtopt_dir]'] = None
        self.tags['[blade_hawtopt]'] = None
        self.tags['[zaxis_fact]'] = 1.0
        # TODO: rename to execution dir, that description fits much better!
        self.tags['[run_dir]'] = None
        #self.tags['[run_dir]'] = '/home/dave/tmp/'

        # following dirs are relative to the run_dir!!
        # they indicate the location of the SAVED (!!) results, they can be
        # different from the execution dirs on the node which are set in PBS
        self.tags['[hawc2_exe]'] = 'hawc2mb.exe'
        self.tags['[data_dir]'] = 'data/'
        self.tags['[res_dir]'] = 'results/'
        self.tags['[iter_dir]'] = 'iter/'
        self.tags['[log_dir]'] = 'logfiles/'
        self.tags['[turb_dir]'] = 'turb/'
        self.tags['[wake_dir]'] = None
        self.tags['[meand_dir]'] = None
        self.tags['[turb_db_dir]'] = None
        self.tags['[wake_db_dir]'] = None
        self.tags['[meand_db_dir]'] = None
        self.tags['[control_dir]'] = 'control/'
        self.tags['[externalforce]'] = 'externalforce/'
        self.tags['[animation_dir]'] = 'animation/'
        self.tags['[eigenfreq_dir]'] = 'eigenfreq/'
        self.tags['[wake_dir]'] = 'wake/'
        self.tags['[meander_dir]'] = 'meander/'
        self.tags['[htc_dir]'] = 'htc/'
        self.tags['[mooring_dir]'] = 'mooring/'
        self.tags['[hydro_dir]'] = 'htc_hydro/'
        self.tags['[pbs_out_dir]'] = 'pbs_out/'
        self.tags['[turb_base_name]'] = None
        self.tags['[wake_base_name]'] = None
        self.tags['[meand_base_name]'] = None
        self.tags['[zip_root_files]'] = []

        self.tags['[fname_source]'] = []
        self.tags['[fname_default_target]'] = []

        self.tags['[eigen_analysis]'] = False

        self.tags['[pbs_queue_command]'] = '#PBS -q workq'
        # the express que has 2 thyra nodes with max walltime of 1h
#        self.tags['[pbs_queue_command]'] = '#PBS -q xpresq'
        # walltime should have following format: hh:mm:ss
        self.tags['[walltime]'] = '04:00:00'

#        self.queue = Queue.Queue()

        self.output_dirs = ['[res_dir]', '[log_dir]', '[turb_dir]',
                            '[case_id]', '[wake_dir]', '[animation_dir]',
                            '[meand_dir]', '[eigenfreq_dir]']
1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758

    def create_run_dir(self):
        """
        If non existent, create run_dir and all required model sub directories
        """

        dirkeys = ['[data_dir]', '[htc_dir]', '[res_dir]', '[log_dir]',
                   '[eigenfreq_dir]', '[animation_dir]', '[turb_dir]',
                   '[wake_dir]', '[meander_dir]', '[opt_dir]', '[control_dir]',
                   '[mooring_dir]', '[hydro_dir]', '[externalforce]']

        # create all the necessary directories
        for dirkey in dirkeys:
            if self.tags[dirkey]:
                path = os.path.join(self.tags['[run_dir]'], self.tags[dirkey])
                if not os.path.exists(path):
                    os.makedirs(path)

    # TODO: copy_model_data and create_model_zip should be the same.
    def copy_model_data(self):
        """

        Copy the model data to the execution folder

        """

        # in case we are running local and the model dir is the server dir
        # we do not need to copy the data files, they are already on location
        data_local = os.path.join(self.tags['[model_dir_local]'],
                                  self.tags['[data_dir]'])
        data_run = os.path.join(self.tags['[run_dir]'], self.tags['[data_dir]'])
        if not data_local == data_run:

            # copy root files
            model_root = self.tags['[model_dir_local]']
            run_root = self.tags['[run_dir]']
            for fname in self.tags['[zip_root_files]']:
                shutil.copy2(model_root + fname, run_root + fname)

            # copy special files with changing file names
            if '[ESYSMooring_init_fname]' in self.tags:
                if self.tags['[ESYSMooring_init_fname]'] is not None:
                    fname_source = self.tags['[ESYSMooring_init_fname]']
                    fname_target = 'ESYSMooring_init.dat'
                    shutil.copy2(model_root + fname_source,
                                 run_root + fname_target)

            # copy the master file into the htc/_master dir
            src = os.path.join(self.tags['[master_htc_dir]'],
                               self.tags['[master_htc_file]'])
            # FIXME: htc_dir can contain the DLC folder name
            dst = os.path.join(self.tags['[run_dir]'], 'htc', '_master')
            if not os.path.exists(dst):
                os.makedirs(dst)
            shutil.copy2(src, dst)

            # copy all content of the following dirs
            dirs = [self.tags['[control_dir]'], self.tags['[hydro_dir]'],
                    self.tags['[mooring_dir]'], self.tags['[externalforce]'],
                    self.tags['[data_dir]'], 'htc/DLCs/']
            plocal = self.tags['[model_dir_local]']
            prun = self.tags['[run_dir]']

            # copy all files present in the specified folders
            for path in dirs:
                if not path:
                    continue
                elif not os.path.exists(os.path.join(plocal, path)):
                    continue
                for root, dirs, files in os.walk(os.path.join(plocal, path)):
                    for file_name in files:
                        src = os.path.join(root, file_name)
                        dst = root.replace(os.path.abspath(plocal),
                                           os.path.abspath(prun))
                        if not os.path.exists(dst):
                            os.makedirs(dst)
                        dst = os.path.join(dst, file_name)
                        shutil.copy2(src, dst)

            # and last copies: the files with generic input names
            if not isinstance(self.tags['[fname_source]'], list):
                raise ValueError('[fname_source] needs to be a list')
            if not isinstance(self.tags['[fname_default_target]'], list):
                raise ValueError('[fname_default_target] needs to be a list')
            len1 = len(self.tags['[fname_source]'])
            len2 = len(self.tags['[fname_default_target]'])
            if len1 != len2:
                raise ValueError('[fname_source] and [fname_default_target] '
                                 'need to have the same number of items')
            for i in range(len1):
                src = os.path.join(plocal, self.tags['[fname_source]'][i])
                dst = os.path.join(prun, self.tags['[fname_default_target]'][i])
                if not os.path.exists(os.path.dirname(dst)):
                    os.makedirs(os.path.dirname(dst))
                shutil.copy2(src, dst)

    # TODO: copy_model_data and create_model_zip should be the same.
    def create_model_zip(self):
        """

        Create the model zip file based on the master tags file settings.

        Paremeters
        ----------

        master : HtcMaster object


        """

        # FIXME: all directories should be called trough their appropriate tag!

        #model_dir = HOME_DIR + 'PhD/Projects/Hawc2Models/'+MODEL+'/'
        model_dir_server = self.tags['[run_dir]']
        model_dir_local = self.tags['[model_dir_local]']

        # ---------------------------------------------------------------------
        # create the zipfile object locally
        zf = zipfile.ZipFile(model_dir_local + self.tags['[model_zip]'],'w')

        # empty folders, the'll hold the outputs
        # zf.write(source, target in zip, )
        # TODO: use user defined directories here and in PBS
        # note that they need to be same as defined in the PBS script. We
        # manually set these up instead of just copying the original.

#        animation_dir = self.tags['[animation_dir]']
#        eigenfreq_dir = self.tags['[eigenfreq_dir]']
#        logfiles_dir = self.tags['[log_dir]']
#        results_dir = self.tags['[res_dir]']
#        htc_dir = self.tags['[htc_dir]']
        htcmaster = self.tags['[master_htc_file]']

        control_dir = self.tags['[control_dir]']
        htcmaster_dir = self.tags['[master_htc_dir]']
        data_dir = self.tags['[data_dir]']
        turb_dir = self.tags['[turb_dir]']
        wake_dir = self.tags['[wake_dir]']
        meander_dir = self.tags['[meander_dir]']
        mooring_dir = self.tags['[mooring_dir]']
        hydro_dir = self.tags['[hydro_dir]']
        extforce = self.tags['[externalforce]']
        # result dirs are not required, HAWC2 will create them
        dirs = [control_dir, data_dir, extforce, turb_dir, wake_dir,
                 meander_dir, mooring_dir, hydro_dir]
        for zipdir in dirs:
            if zipdir:
                zf.write('.', zipdir + '.', zipfile.ZIP_DEFLATED)
        zf.write('.', 'htc/_master/.', zipfile.ZIP_DEFLATED)

        # if any, add files that should be added to the root of the zip file
        for file_name in self.tags['[zip_root_files]']:
            zf.write(model_dir_local+file_name, file_name, zipfile.ZIP_DEFLATED)

        if '[ESYSMooring_init_fname]' in self.tags:
            if self.tags['[ESYSMooring_init_fname]'] is not None:
                fname_source = self.tags['[ESYSMooring_init_fname]']
                fname_target = 'ESYSMooring_init.dat'
                zf.write(model_dir_local + fname_source, fname_target,
                         zipfile.ZIP_DEFLATED)

        # the master file
        src = os.path.join(htcmaster_dir, htcmaster)
        dst = os.path.join('htc', '_master', htcmaster)
        zf.write(src, dst, zipfile.ZIP_DEFLATED)

        # manually add all that resides in control, mooring and hydro
        paths = [control_dir, mooring_dir, hydro_dir, extforce, data_dir]
        for target_path in paths:
            if not target_path:
                continue
            path_src = os.path.join(model_dir_local, target_path)
            for root, dirs, files in os.walk(path_src):
                for file_name in files:
                    #print 'adding', file_name
                    src = os.path.join(root, file_name)
                    # the zip file only contains the relative paths
                    rel_dst = root.replace(os.path.abspath(model_dir_local), '')
                    if os.path.isabs(rel_dst):
                        rel_dst = rel_dst[1:]
                    rel_dst = os.path.join(rel_dst, file_name)
                    zf.write(src, rel_dst, zipfile.ZIP_DEFLATED)

        # and last copies: the files with generic input names
        if not isinstance(self.tags['[fname_source]'], list):
            raise ValueError('[fname_source] needs to be a list')
        if not isinstance(self.tags['[fname_default_target]'], list):
            raise ValueError('[fname_default_target] needs to be a list')
        len1 = len(self.tags['[fname_source]'])
        len2 = len(self.tags['[fname_default_target]'])
        if len1 != len2:
            raise ValueError('[fname_source] and [fname_default_target] '
                             'need to have the same number of items')
        for i in range(len1):
            src = os.path.join(model_dir_local, self.tags['[fname_source]'][i])
            # the zip file only contains the relative paths
            rel_dst = self.tags['[fname_default_target]'][i]
            # we can not have an absolute path here, make sure it isn't
            if os.path.isabs(rel_dst):
                rel_dst = rel_dst[1:]
            zf.write(src, rel_dst, zipfile.ZIP_DEFLATED)

        # and close again
        zf.close()

        # ---------------------------------------------------------------------
        # copy zip file to the server, this will be used on the nodes
        src = model_dir_local  + self.tags['[model_zip]']
        dst = model_dir_server + self.tags['[model_zip]']

        # in case we are running local and the model dir is the server dir
        # we do not need to copy the zip file, it is already on location
        if not src == dst:
            shutil.copy2(src, dst)

        ## copy to zip data file to sim_id htc folder on the server dir
        ## so we now have exactly all data to relaunch any htc file later
        #dst  = model_dir_server + self.tags['[htc_dir]']
        #dst += self.tags['[model_zip]']
        #shutil.copy2(src, dst)

    def _sweep_tags(self):
        """
        The original way with all tags in the htc file for each blade node
        """
        # set the correct sweep cruve, these values are used
        a = self.tags['[sweep_amp]']
        b = self.tags['[sweep_exp]']
        z0 = self.tags['[sweep_curve_z0]']
        ze = self.tags['[sweep_curve_ze]']
        nr = self.tags['[nr_nodes_blade]']
        # format for the x values in the htc file
        ff = ' 1.03f'
        for zz in range(nr):
            it_nosweep = '[x'+str(zz+1)+'-nosweep]'
            item = '[x'+str(zz+1)+']'
            z = self.tags['[z'+str(zz+1)+']']
            if z >= z0:
                curve = eval(self.tags['[sweep_curve_def]'])
                # new swept position = original + sweep curve
                self.tags[item]=format(self.tags[it_nosweep]+curve,ff)
            else:
                self.tags[item]=format(self.tags[it_nosweep], ff)

    def _staircase_windramp(self, nr_steps, wind_step, ramptime, septime):
        """Create a stair case wind ramp


        """

        pass

    def _all_in_one_blade_tag(self, radius_new=None):
        """
        Create htc input based on a HAWTOPT blade result file

        Automatically get the number of nodes correct in master.tags based
        on the number of blade nodes

        WARNING: initial x position of the half chord point is assumed to be
        zero

        zaxis_fact : int, default=1.0 --> is member of default tags
            Factor for the htc z-axis coordinates. The htc z axis is mapped to
            the HAWTOPT radius. If the blade radius develops in negative z
            direction, set to -1

        Parameters
        ----------

        radius_new : ndarray(n), default=False
            z coordinates of the nodes. If False, a linear distribution is
            used and the tag [nr--of-nodes-per-blade] sets the number of nodes


        """
        # TODO: implement support for x position to be other than zero

        # TODO: This is not a good place, should live somewhere else. Or
        # reconsider inputs etc so there is more freedom in changing the
        # location of the nodes, set initial x position of the blade etc

        # and save under tag [blade_htc_node_input] in htc input format

        nr_nodes = self.tags['[nr_nodes_blade]']

        blade = self.tags['[blade_hawtopt]']
        # in the htc file, blade root =0 and not blade hub radius
        blade[:,0] = blade[:,0] - blade[0,0]

        if type(radius_new).__name__ == 'NoneType':
            # interpolate to the specified number of nodes
            radius_new = np.linspace(blade[0,0], blade[-1,0], nr_nodes)

        # Data checks on radius_new
        elif not type(radius_new).__name__ == 'ndarray':
            raise ValueError('radius_new has to be either NoneType or ndarray')
        else:
            if not len(radius_new.shape) == 1:
                raise ValueError('radius_new has to be 1D')
            elif not len(radius_new) == nr_nodes:
                msg = 'radius_new has to have ' + str(nr_nodes) + ' elements'
                raise ValueError(msg)

        # save the nodal positions in the tag cloud
        self.tags['[blade_nodes_z_positions]'] = radius_new

        # make sure that radius_hr is just slightly smaller than radius low res
        radius_new[-1] = blade[-1,0]-0.00000001
        twist_new = interpolate.griddata(blade[:,0], blade[:,2], radius_new)
        # blade_new is the htc node input part:
        # sec 1   x     y     z   twist;
        blade_new = scipy.zeros((len(radius_new),4))
        blade_new[:,2] = radius_new*self.tags['[zaxis_fact]']
        # twist angle remains the same in either case (standard/ojf rotation)
        blade_new[:,3] = twist_new*-1.

        # set the correct sweep cruve, these values are used
        a = self.tags['[sweep_amp]']
        b = self.tags['[sweep_exp]']
        z0 = self.tags['[sweep_curve_z0]']
        ze = self.tags['[sweep_curve_ze]']
        tmp = 'nsec ' + str(nr_nodes) + ';'
        for k in range(nr_nodes):
            tmp += '\n'
            i = k+1
            z = blade_new[k,2]
            y = blade_new[k,1]
            twist = blade_new[k,3]
            # x position, sweeping?
            if z >= z0:
                x = eval(self.tags['[sweep_curve_def]'])
            else:
                x = 0.0

            # the node number
            tmp += '        sec ' + format(i, '2.0f')
            tmp += format(x, ' 11.03f')
            tmp += format(y, ' 11.03f')
            tmp += format(z, ' 11.03f')
            tmp += format(twist, ' 11.03f')
            tmp += ' ;'

        self.tags['[blade_htc_node_input]'] = tmp

        # and create the ae file
        #5	Blade Radius [m] 	Chord[m]  T/C[%]  Set no. of pc file
        #1 25 some comments
        #0.000     0.100    21.000   1
        nr_points = blade.shape[0]
        tmp2 = '1  Blade Radius [m] Chord [m] T/C [%] pc file set nr\n'
        tmp2 += '1  %i auto generated by _all_in_one_blade_tag()' % nr_points

        for k in range(nr_points):
            tmp2 += '\n'
            tmp2 += '%9.3f %9.3f %9.3f' % (blade[k,0], blade[k,1], blade[k,3])
            tmp2 += ' %4i' % (k+1)
        # end with newline
        tmp2 += '\n'

        # TODO: finish writing file, implement proper handling of hawtopt path
        # and save the file
        #if self.tags['aefile']
        #write_file(file_path, tmp2, 'w')

    def loadmaster(self):
        """
        Load the master file, path to master file is defined in
        __init__(): target, master. Additionally, find all the tags in the
        master file. Note that tags [] in the label and comment sections are
        ignored.

        All the tags that are found in the master file are saved in the
        self.tags_in_master dictionary, with the line numbers in a list as
        values:
        tags_in_master[tagname] = [line nr occurance 1, line nr occurance 2, ]
        note that tagname includes the []
        """

        # what is faster, load the file in one string and do replace()?
        # or the check error log approach?
        fpath  = os.path.join(self.tags['[master_htc_dir]'],
                              self.tags['[master_htc_file]'])
        # load the file:
        if not self.silent:
            print('loading master: ' + fpath)
        FILE = open(fpath, 'r')
        lines = FILE.readlines()
        FILE.close()

        # regex for finding all tags in a line
        regex = re.compile('(\\[.*?\\])')
        self.tags_in_master = {}

        # convert to string:
        self.master_str = ''
        for i, line in enumerate(lines):
            # are there any tags on this line? Ignore comment AND label section
            tags = regex.findall(line.split(';')[0].split('#')[0])
            for tag in tags:
                try:
                    self.tags_in_master[tag].append(i)
                except KeyError:
                    self.tags_in_master[tag] = [i]
            # safe for later
            self.master_str += line

    def createcase_check(self, htc_dict_repo, \
                            tmp_dir='/tmp/HawcPyTmp/', write_htc=True):
        """
        Check if a certain case name already exists in a specified htc_dict.
        If true, return a message and do not create the case. It can be that
        either the case name is a duplicate and should be named differently,
        or that the simulation is a duplicate and it shouldn't be repeated.
        """

        # is the [case_id] tag unique, given the htc_dict_repo?
        if self.verbose:
            print('checking if following case is in htc_dict_repo: ')
            print(self.tags['[case_id]'] + '.htc')

        if self.tags['[case_id]'] + '.htc' in htc_dict_repo:
            # if the new case_id already exists in the htc_dict_repo
            # do not add it again!
            # print('case_id key is not unique in the given htc_dict_repo!'
            raise UserWarning('case_id key is not unique in the given '
                              'htc_dict_repo!')
        else:
            htc = self.createcase(tmp_dir=tmp_dir, write_htc=write_htc)
            return htc

    def createcase(self, tmp_dir='/tmp/HawcPyTmp/', write_htc=True):
        """
        replace all the tags from the master file and save the new htc file
        """

        htc = self.master_str

        # and now replace all the tags in the htc master file
        # when iterating over a dict, it will give the key, given in the
        # corresponding format (string keys as strings, int keys as ints...)
        for k in self.tags:
            # TODO: give error if a default is not defined, like null
            # if it is a boolean, replace with ; or blank
            if isinstance(self.tags[k], bool):
                if self.tags[k]:
                    # we have a boolean that is True, switch it on
                    value = ''
                else:
                    value = ';'
            else:
                value = self.tags[k]
            # if string is not found, it will do nothing
            htc = htc.replace(str(k), str(value))

        # and save the the case htc file:
        cname = self.tags['[case_id]'] + '.htc'

        htc_target = os.path.join(self.tags['[run_dir]'], self.tags['[htc_dir]'])
        if not self.silent:
            print('htc will be written to: ')
            print('  ' + htc_target)
            print('  ' + cname)

        # and write the htc file to the temp dir first
        if write_htc:
            self.write_htc(cname, htc, htc_target)
#            thread = threading.Thread(target=self.write_htc,
#                                      args=(cname, htc, htc_target))
#            thread.daemon = True
#            thread.start()
        # save all the tags for debugging purpuses
        if self.verbose:
            tmp = ''
            for key in sorted(self.tags.keys()):
                value = self.tags[key]
                rpl = (key.rjust(25), str(value).ljust(70),
                       type(key).__name__.ljust(10), type(value).__name__)
                tmp += '%s -- %s -- %s -- %s\n' % rpl
            write_file(htc_target + cname + '.tags', tmp, 'w')

        # return the used tags, some parameters can be used later, such as the
        # turbulence name in the pbs script
        # return as a dictionary, to be used in htc_dict
        # return a copy of the tags, otherwise you will not catch changes
        # made to the different tags in your sim series
        return {cname : copy.copy(self.tags)}

    def write_htc(self, cname, htc, htc_target):
        # create subfolder if necesarry
        if not os.path.exists(htc_target):
            os.makedirs(htc_target)
        write_file(htc_target + cname, htc, 'w')
        # write_file(tmp_dir + case, htc, 'w')

    def lower_case_output(self):
        """
        force lower case tags on output files since HAWC2 will force them to
        lower case anyway
        """

        for key in self.output_dirs:
            if isinstance(self.tags[key], str):
                self.tags[key] = self.tags[key].lower()


class PBS(object):
    """
    The part where the actual pbs script is writtin in this class (functions
    create(), starting() and ending() ) is based on the MS Excel macro
    written by Torben J. Larsen

    input a list with htc file names, and a dict with the other paths,
    such as the turbulence file and folder, htc folder and others
    """

    def __init__(self, cases, server='gorm', qsub='time',
                 pbs_fname_appendix=True, short_job_names=True):
        """
        Define the settings here. This should be done outside, but how?
        In a text file, paramters list or first create the object and than set
        the non standard values??

        where htc_dict is a dictionary with
            [key=case name, value=used_tags_dict]

        where tags as outputted by MasterFile (dict with the chosen options)

        For gorm, maxcpu is set to 1, do not change otherwise you might need to
        change the scratch dir handling.

        qsub : str
            time, or depend. For time each job will need to get a start
            time, which will have to be set by replacing [start_time].
            For depend a job dependency chain will have to be established.
            This will be set via [nodeps] and [job_id]
            When none of the above, neither of them is specified, and
            consequently the pbs file can be submitted without replacing any
            tag. Use qsub=None in combination with the launch.Scheduler

        short_job_names : boolean, default=True
            How should the job be named (relevant for the PBS queueing system)?
            When True, it will be named like HAWC2_123456. With False, the
            case_id will be used as job name.

        """
        self.server = server
        self.verbose = True

#        if server == 'thyra':
#            self.maxcpu = 4
#            self.secperiter = 0.020
        if server == 'gorm':
            self.maxcpu = 1
            self.secperiter = 0.012
        elif server == 'jess':
            self.maxcpu = 1
            self.secperiter = 0.012
        else:
            raise UserWarning('server support only for jess or gorm')

        # the output channels comes with a price tag. Each time step
        # will have a penelty depending on the number of output channels

        self.iterperstep = 8.0 # average nr of iterations per time step
        # lead time: account for time losses when starting a simulation,
        # copying the turbulence data, generating the turbulence
        self.tlead = 5.0*60.0

        # use pbs job name as prefix in the pbs file name
        self.pbs_fname_appendix = pbs_fname_appendix
        self.short_job_names = short_job_names
        # pbs job name prefix
        self.pref = 'HAWC2_'
        # the actual script starts empty
        self.pbs = ''

        # FIXME: this goes wrong when Morten does it directly on the cluster
        # the resulting PBS script has too many slashes !
        self.wine = 'time WINEARCH=win32 WINEPREFIX=~/.wine32 wine'
        # in case you want to redirect stdout to /dev/nul
#        self.wine_appendix = '> /dev/null 2>&1'
        self.wine_appendix = ''
        self.wine_dir = '/home/dave/.wine32/drive_c/bin'
        # /dev/shm should be the RAM of the cluster
#        self.node_run_root = '/dev/shm'
        self.node_run_root = '/scratch'

        self.cases = cases

        # location of the output messages .err and .out created by the node
        self.pbs_out_dir = 'pbs_out/'
        self.pbs_in_dir = 'pbs_in/'

        # for the start number, take hour/minute combo
        d = datetime.datetime.today()
        tmp = int( str(d.hour)+format(d.minute, '02.0f') )*100
        self.pbs_start_number = tmp
        self.qsub = qsub

#        if quemethod == 'time':
#            self.que_jobdeps = False
#        elif type(quemethod).__name__ == 'int':
#            nr_cpus = quemethod
#            self.que_jobdeps = True
#            nr_jobs = len(cases)
#            jobs_per_cpu = int(math.ceil(float(nr_jobs)/float(nr_cpus)))
#            # precalculate all the job ids
#            self.jobid_deps = []
#            self.jobid_list = []
#            count2 = self.pbs_start_number
#            for cpu in range(nr_cpus):
#                self.jobid_list.extend(range(count2, count2+jobs_per_cpu))
#                # the first job to start does not have a dependency
#                self.jobid_deps.append(None)
#                self.jobid_deps.extend(range(count2, count2+jobs_per_cpu-1))
#                count2 += jobs_per_cpu

        self.copyback_turb = True
        self.copyback_fnames = []
        self.copyback_fnames_rename = []
        self.copyto_generic = []
        self.copyto_fname = []

    def create(self):
        """
        Main loop for creating the pbs scripts, based on cases, which
        contains the case name as key and tag dictionairy as value
        """

        # dynamically set walltime based on the number of time steps
        # for thyra, make a list so we base the walltime on the slowest case
        self.nr_time_steps = []
        self.duration = []
        self.t0 = []
        # '[time_stop]' '[dt_sim]'

        # REMARK: this i not realy consistent with how the result and log file
        # dirs are allowed to change for each individual case...
        # first check if the pbs_out_dir exists, this dir is considered to be
        # the same for all cases present in cases
        # self.tags['[run_dir]']
        case0 = list(self.cases.keys())[0]
        path = self.cases[case0]['[run_dir]'] + self.pbs_out_dir
        if not os.path.exists(path):
            os.makedirs(path)

        # create pbs_in base dir
        path = self.cases[case0]['[run_dir]'] + self.pbs_in_dir
        if not os.path.exists(path):
            os.makedirs(path)

        # number the pbs jobs:
        count2 = self.pbs_start_number
        # initial cpu count is zero
        count1 = 1
        # scan through all the cases
        i, i_tot = 1, len(self.cases)
        ended = True

        for case in self.cases:

            # get a shorter version for the current cases tag_dict: