diff --git a/wetb/prepost/DataChecks.py b/wetb/prepost/DataChecks.py
new file mode 100644
index 0000000000000000000000000000000000000000..7447bc753f466a8c0dfaa3e7c6870733ac71904d
--- /dev/null
+++ b/wetb/prepost/DataChecks.py
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Mon Mar  5 16:00:02 2012
+
+@author: dave
+"""
+
+# time and data should be 1 dimensional arrays
+def array_1d(array):
+    """
+    Check if the given array has only one dimension. Following formats will
+    return True:
+        (x,), (x,1) and (1,x)
+    """
+    if not len(array.shape) == 1:
+        # in case it has (samples,1) or (1,samples) as dimensions
+        if len(array.shape) == 2:
+            if (array.shape[0] == 1) or (array.shape[1] == 1):
+                return True
+            else:
+                raise ValueError, 'only 1D arrays are accepted'
+    else:
+        return True
+
+    return True
diff --git a/wetb/prepost/Simulations.py b/wetb/prepost/Simulations.py
new file mode 100755
index 0000000000000000000000000000000000000000..fc0e4fe31ba33441f6a8a9c53c9d9dd6b117d14e
--- /dev/null
+++ b/wetb/prepost/Simulations.py
@@ -0,0 +1,5174 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Nov  1 15:16:34 2011
+
+@author: dave
+__author__ = "David Verelst <dave@dtu.dk>"
+__license__ = "GPL-2+"
+"""
+
+from __future__ import division
+from __future__ import print_function
+#print(*objects, sep=' ', end='\n', file=sys.stdout)
+
+# standard python library
+import os
+import subprocess as sproc
+import copy
+import zipfile
+import shutil
+import datetime
+import math
+import pickle
+import re
+# what is actually the difference between warnings and logging.warn?
+# for which context is which better?
+#import warnings
+import logging
+from operator import itemgetter
+from time import time
+#import Queue
+#import threading
+
+# numpy and scipy only used in HtcMaster._all_in_one_blade_tag
+import numpy as np
+import scipy
+import scipy.interpolate as interpolate
+#import matplotlib.pyplot as plt
+import pandas as pd
+import tables as tbl
+
+# custom libraries
+import misc
+import windIO
+import prepost
+try:
+    import fatigue_tools.dlc_fatigue as dlc_ft
+except ImportError:
+    print('can not import fatigue_tools.dlc_fatigue')
+
+def load_pickled_file(source):
+    FILE = open(source, 'rb')
+    result = pickle.load(FILE)
+    FILE.close()
+    return result
+
+def save_pickle(source, variable):
+    FILE = open(source, 'wb')
+    pickle.dump(variable, FILE, protocol=2)
+    FILE.close()
+
+def write_file(file_path, file_contents, mode):
+    """
+    INPUT:
+        file_path: path/to/file/name.csv
+        string   : file contents is a string
+        mode     : reading (r), writing (w), append (a),...
+    """
+
+    FILE = open(file_path, mode)
+    FILE.write(file_contents)
+    FILE.close()
+
+def create_multiloop_list(iter_dict, debug=False):
+    """
+    Create a list based on multiple nested loops
+    ============================================
+
+    Considerd the following example
+
+    >>> for v in range(V_start, V_end, V_delta):
+    ...     for y in range(y_start, y_end, y_delta):
+    ...         for c in range(c_start, c_end, c_delta):
+    ...             print v, y, c
+
+    Could be replaced by a list with all these combinations. In order to
+    replicate this with create_multiloop_list, iter_dict should have
+    the following structure
+
+    >>> iter_dict = dict()
+    >>> iter_dict['v'] = range(V_start, V_end, V_delta)
+    >>> iter_dict['y'] = range(y_start, y_end, y_delta)
+    >>> iter_dict['c'] = range(c_start, c_end, c_delta)
+    >>> iter_list = create_multiloop_list(iter_dict)
+    >>> for case in iter_list:
+    ...     print case['v'], case['y'], case['c']
+
+    Parameters
+    ----------
+
+    iter_dict : dictionary
+        Key holds a valid tag as used in HtcMaster.tags. The corresponding
+        value shouuld be a list of values to be considered.
+
+    Output
+    ------
+
+    iter_list : list
+        List containing dictionaries. Each entry is a combination of the
+        given iter_dict keys.
+
+    Example
+    -------
+
+    >>> iter_dict={'[wind]':[5,6,7],'[coning]':[0,-5,-10]}
+    >>> create_multiloop_list(iter_dict)
+    [{'[wind]': 5, '[coning]': 0},
+     {'[wind]': 5, '[coning]': -5},
+     {'[wind]': 5, '[coning]': -10},
+     {'[wind]': 6, '[coning]': 0},
+     {'[wind]': 6, '[coning]': -5},
+     {'[wind]': 6, '[coning]': -10},
+     {'[wind]': 7, '[coning]': 0},
+     {'[wind]': 7, '[coning]': -5},
+     {'[wind]': 7, '[coning]': -10}]
+    """
+
+    iter_list = []
+
+    # fix the order of the keys
+    key_order = iter_dict.keys()
+    nr_keys = len(key_order)
+    nr_values,indices = [],[]
+    # determine how many items on each key
+    for key in key_order:
+        # each value needs to be an iterable! len() will fail if it isn't
+        # count how many values there are for each key
+        if type(iter_dict[key]).__name__ != 'list':
+            print('%s does not hold a list' % key)
+            raise ValueError('Each value in iter_dict has to be a list!')
+        nr_values.append(len(iter_dict[key]))
+        # create an initial indices list
+        indices.append(0)
+
+    if debug: print(nr_values, indices)
+
+    go_on = True
+    # keep track on which index you are counting, start at the back
+    loopkey = nr_keys -1
+    cc = 0
+    while go_on:
+        if debug: print(indices)
+
+        # Each entry on the list is a dictionary with the parameter combination
+        iter_list.append(dict())
+
+        # save all the different combination into one list
+        for keyi in range(len(key_order)):
+            key = key_order[keyi]
+            # add the current combination of values as one dictionary
+            iter_list[cc][key] = iter_dict[key][indices[keyi]]
+
+        # +1 on the indices of the last entry, the overflow principle
+        indices[loopkey] += 1
+
+        # cycle backwards thourgh all dimensions and propagate the +1 if the
+        # current dimension is full. Hence overflow.
+        for k in range(loopkey,-1,-1):
+            # if the current dimension is over its max, set to zero and change
+            # the dimension of the next. Remember we are going backwards
+            if not indices[k] < nr_values[k] and k > 0:
+                # +1 on the index of the previous dimension
+                indices[k-1] += 1
+                # set current loopkey index back to zero
+                indices[k] = 0
+                # if the previous dimension is not on max, break out
+                if indices[k-1] < nr_values[k-1]:
+                    break
+            # if we are on the last dimension, break out if that is also on max
+            elif k == 0 and not indices[k] < nr_values[k]:
+                if debug: print(cc)
+                go_on = False
+
+        # fail safe exit mechanism...
+        if cc > 20000:
+            raise UserWarning('multiloop_list has already '+str(cc)+' items..')
+            go_on = False
+
+        cc += 1
+
+    return iter_list
+
+def local_shell_script(htc_dict, sim_id):
+    """
+    """
+    shellscript = ''
+    breakline = '"' + '*'*80 + '"'
+    nr_cases = len(htc_dict)
+    nr = 1
+    for case in htc_dict:
+        shellscript += 'echo ""' + '\n'
+        shellscript += 'echo ' + breakline + '\n' + 'echo '
+        shellscript += '" ===> Progress:'+str(nr)+'/'+str(nr_cases)+'"\n'
+        # get a shorter version for the current cases tag_dict:
+        scriptpath = os.path.join(htc_dict[case]['[run_dir]'], 'runall.sh')
+        try:
+            hawc2_exe = htc_dict[case]['[hawc2_exe]']
+        except KeyError:
+            hawc2_exe = 'hawc2mb.exe'
+        htc_dir = htc_dict[case]['[htc_dir]']
+        # log all warning messages: WINEDEBUG=-all!
+        wine = 'WINEARCH=win32 WINEPREFIX=~/.wine32 wine'
+        htc_target = os.path.join(htc_dir, case)
+        shellscript += '%s %s %s \n' % (wine, hawc2_exe, htc_target)
+        shellscript += 'echo ' + breakline + '\n'
+        nr+=1
+
+    write_file(scriptpath, shellscript, 'w')
+    print('\nrun local shell script written to:')
+    print(scriptpath)
+
+def local_windows_script(cases, sim_id, nr_cpus=2):
+    """
+    """
+
+    tot_cases = len(cases)
+    i_script = 1
+    i_case_script = 1
+    cases_per_script = int(math.ceil(float(tot_cases)/float(nr_cpus)))
+    # header of the new script, each process has its own copy
+    header = ''
+    header += 'rem\nrem\n'
+    header += 'mkdir _%i_\n'
+    # copy the data folder in case it holds a lot of .dat files
+    header += 'robocopy .\data .\_%i_\data /e \n'
+    # do not copy the following stuff
+    exc_file_pat = ['*.log', '*.dat', '*.sel', '*.xls*', '*.bat']
+    exc_dir_pat = ['_*_', 'data']
+    header += 'robocopy .\ .\_%i_ /e '
+    header += (' /xf ' + ' /xf '.join(exc_file_pat))
+    header += (' /xd ' + ' /xd '.join(exc_dir_pat))
+    header += '\n'
+    header += 'cd _%i_\n'
+    header += 'rem\nrem\n'
+    footer = ''
+    footer += 'rem\nrem\n'
+    footer += 'cd ..\n'
+    footer += 'robocopy .\_%i_\ /e .\ /move\n'
+    footer += 'rem\nrem\n'
+    shellscript = header % (i_script, i_script, i_script, i_script)
+
+    stop = False
+
+    for i_case, (cname, case) in enumerate(cases.iteritems()):
+#    for i_case, case in enumerate(sorted(cases.keys())):
+
+        shellscript += 'rem\nrem\n'
+        shellscript += 'rem ===> Progress: %3i / %3i\n' % (i_case+1, tot_cases)
+        # copy turbulence from data base, if applicable
+        if case['[turb_db_dir]'] is not None:
+            # we are one dir up in cpu exe dir
+            turb = case['[turb_base_name]'] + '*.bin'
+            dbdir = os.path.join('./../', case['[turb_db_dir]'], turb)
+            dbdir = dbdir.replace('/', '\\')
+            rpl = (dbdir, case['[turb_dir]'].replace('/', '\\'))
+            shellscript += 'copy %s %s\n' % rpl
+
+        # get a shorter version for the current cases tag_dict:
+        scriptpath = '%srunall-%i.bat' % (case['[run_dir]'], i_script)
+        htcpath = case['[htc_dir]'][:-1].replace('/', '\\') # ditch the /
+        try:
+            hawc2_exe = case['[hawc2_exe]']
+        except KeyError:
+            hawc2_exe = 'hawc2mb.exe'
+        rpl = (hawc2_exe.replace('/', '\\'), htcpath, cname.replace('/', '\\'))
+        shellscript += "%s .\\%s\\%s\n" % rpl
+        # copy back to data base directory if they do not exists there
+        # remove turbulence file again, if copied from data base
+        if case['[turb_db_dir]'] is not None:
+            # copy back if it does not exist in the data base
+            # IF EXIST "c:\test\file.ext"  (move /y "C:\test\file.ext" "C:\quality\" )
+            turbu = case['[turb_base_name]'] + 'u.bin'
+            turbv = case['[turb_base_name]'] + 'v.bin'
+            turbw = case['[turb_base_name]'] + 'w.bin'
+            dbdir = os.path.join('./../', case['[turb_db_dir]'])
+            for tu in (turbu, turbv, turbw):
+                tu_db = os.path.join(dbdir, tu).replace('/', '\\')
+                tu_run = os.path.join(case['[turb_dir]'], tu).replace('/', '\\')
+                rpl = (tu_db, tu_run, dbdir.replace('/', '\\'))
+                shellscript += 'IF NOT EXIST "%s" move /y "%s" "%s"\n' % rpl
+            # remove turbulence from run dir
+            allturb = os.path.join(case['[turb_dir]'], '*.*')
+            allturb = allturb.replace('/', '\\')
+            # do not prompt for delete confirmation: /Q
+            shellscript += 'del /Q "%s"\n' % allturb
+
+        if i_case_script >= cases_per_script:
+            # footer: copy all files back
+            shellscript += footer % i_script
+            stop = True
+            write_file(scriptpath, shellscript, 'w')
+            print('\nrun local shell script written to:')
+            print(scriptpath)
+
+            # header of the new script, each process has its own copy
+            # but only if there are actually jobs left
+            if i_case+1 < tot_cases:
+                i_script += 1
+                i_case_script = 1
+                shellscript = header % (i_script, i_script, i_script, i_script)
+                stop = False
+        else:
+            i_case_script += 1
+
+    # we might have missed the footer of a partial script
+    if not stop:
+        shellscript += footer % i_script
+        write_file(scriptpath, shellscript, 'w')
+        print('\nrun local shell script written to:')
+        print(scriptpath)
+
+def run_local_ram(cases, check_log=True):
+
+    ram_root = '/tmp/HAWC2/'
+
+    if not os.path.exists(ram_root):
+        os.makedirs(ram_root)
+
+    print('copying data from run_dir to RAM...', end='')
+
+    # first copy everything to RAM
+    for ii, case in enumerate(cases):
+        # all tags for the current case
+        tags = cases[case]
+        run_dir = copy.copy(tags['[run_dir]'])
+        run_dir_ram = ram_root + tags['[sim_id]']
+        if not os.path.exists(run_dir_ram):
+            os.makedirs(run_dir_ram)
+        # and also change the run dir so we can launch it easily
+        tags['[run_dir]'] = run_dir_ram + '/'
+        for root, dirs, files in os.walk(run_dir):
+            run_dir_base = os.path.commonprefix([root, run_dir])
+            cdir = root.replace(run_dir_base, '')
+            dstbase = os.path.join(run_dir_ram, cdir)
+            if not os.path.exists(dstbase):
+                os.makedirs(dstbase)
+            for fname in files:
+                src = os.path.join(root, fname)
+                dst = os.path.join(dstbase, fname)
+                shutil.copy2(src, dst)
+
+    print('done')
+
+    # launch from RAM
+    run_local(cases, check_log=check_log)
+    # change run_dir back to original
+    for ii, case in enumerate(cases):
+        tags = cases[case]
+        tags['[run_dir]'] = run_dir
+
+    print('copying data from RAM back to run_dir')
+    print('run_dir: %s' % run_dir)
+
+    # and copy everything back
+    for root, dirs, files in os.walk(run_dir_ram):
+        run_dir_base = os.path.commonprefix([root, run_dir_ram])
+        cdir = root.replace(run_dir_base, '')
+        # in case it is the same
+        if len(cdir) == 0:
+            pass
+        # join doesn't work if cdir has a leading / ?? so drop it
+        elif cdir[0] == '/':
+            dstbase = os.path.join(run_dir, cdir[1:])
+        for fname in files:
+            src = os.path.join(root, fname)
+            dst = os.path.join(dstbase, fname)
+            if not os.path.exists(dstbase):
+                os.makedirs(dstbase)
+            try:
+                shutil.copy2(src, dst)
+            except Exception as e:
+                print('src:', src)
+                print('dst:', dst)
+                print(e)
+                print()
+                pass
+
+    print('...done')
+
+    return cases
+
+
+def run_local(cases, silent=False, check_log=True):
+    """
+    Run all HAWC2 simulations locally from cases
+    ===============================================
+
+    Run all case present in a cases dict locally and wait until HAWC2 is ready.
+
+    In verbose mode, each HAWC2 simulation is also timed
+
+    Parameters
+    ----------
+
+    cases : dict{ case : dict{tag : value} }
+        Dictionary where each case is a key and its value a dictionary holding
+        all the tags/value pairs as used for that case
+
+    check_log : boolean, default=False
+        Check the log file emmidiately after execution of the HAWC2 case
+
+    silent : boolean, default=False
+        When False, usefull information will be printed and the HAWC2
+        simulation time will be calculated from the Python perspective. The
+        silent variable is also passed on to logcheck_case
+
+    Returns
+    -------
+
+    cases : dict{ case : dict{tag : value} }
+        Update cases with the STDOUT of the respective HAWC2 simulation
+
+    """
+
+    # remember the current working directory
+    cwd = os.getcwd()
+    nr = len(cases)
+    if not silent:
+        print('')
+        print('='*79)
+        print('Be advised, launching %i HAWC2 simulation(s) sequentially' % nr)
+        print('run dir: %s' % cases[cases.keys()[0]]['[run_dir]'])
+        print('')
+
+    if check_log:
+        errorlogs = ErrorLogs(silent=silent)
+
+    for ii, case in enumerate(cases):
+        # all tags for the current case
+        tags = cases[case]
+        # for backward compatibility assume default HAWC2 executable
+        try:
+            hawc2_exe = tags['[hawc2_exe]']
+        except KeyError:
+            hawc2_exe = 'hawc2-latest'
+        # TODO: if a turbulence data base is set, copy the files from there
+
+        # the launch command
+        cmd  = 'WINEDEBUG=-all WINEARCH=win32 WINEPREFIX=~/.wine32 wine'
+        cmd += " %s %s%s" % (hawc2_exe, tags['[htc_dir]'], case)
+        # remove any escaping in tags and case for security reasons
+        cmd = cmd.replace('\\','')
+        # browse to the correct launch path for the HAWC2 simulation
+        os.chdir(tags['[run_dir]'])
+        # create the required directories
+        dirkeys = ['[data_dir]', '[htc_dir]', '[res_dir]', '[log_dir]',
+                   '[eigenfreq_dir]', '[animation_dir]', '[turb_dir]',
+                   '[wake_dir]', '[meander_dir]', '[opt_dir]', '[control_dir]',
+                   '[mooring_dir]', '[hydro_dir]', '[externalforce]']
+        for dirkey in dirkeys:
+            if tags[dirkey]:
+                if not os.path.exists(tags[dirkey]):
+                    os.makedirs(tags[dirkey])
+
+        if not silent:
+            start = time()
+            progress = '%4i/%i  : %s%s' % (ii+1, nr, tags['[htc_dir]'], case)
+            print('*'*75)
+            print(progress)
+
+        # and launch the HAWC2 simulation
+        p = sproc.Popen(cmd,stdout=sproc.PIPE,stderr=sproc.STDOUT,shell=True)
+
+        # p.wait() will lock the current shell until p is done
+        # p.stdout.readlines() checks if there is any output, but also locks
+        # the thread if nothing comes back
+        # save the output that HAWC2 sends to the shell to the cases
+        # note that this is a list, each item holding a line
+        cases[case]['sim_STDOUT'] = p.stdout.readlines()
+        # wait until HAWC2 finished doing its magic
+        p.wait()
+
+        if not silent:
+            # print(the simulation command line output
+            print(' ' + '-'*75)
+            print(''.join(cases[case]['sim_STDOUT']))
+            print(' ' + '-'*75)
+            # caclulation time
+            stp = time() - start
+            stpmin = stp/60.
+            print('HAWC2 execution time: %8.2f sec (%8.2f min)' % (stp,stpmin))
+
+        # where there any errors in the output? If yes, abort
+        for k in cases[case]['sim_STDOUT']:
+            kstart = k[:14]
+            if kstart in [' *** ERROR ***', 'forrtl: severe']:
+                cases[case]['[hawc2_sim_ok]'] = False
+                #raise UserWarning, 'Found error in HAWC2 STDOUT'
+            else:
+                cases[case]['[hawc2_sim_ok]'] = True
+
+        # check the log file strait away if required
+        if check_log:
+            start = time()
+            errorlogs = logcheck_case(errorlogs, cases, case, silent=silent)
+            stop = time() - start
+            if case.endswith('.htc'):
+                kk = case[:-4] + '.log'
+            else:
+                kk = case + '.log'
+            errors = errorlogs.MsgListLog2[kk][0]
+            exitok = errorlogs.MsgListLog2[kk][1]
+            if not silent:
+                print('log checks took %5.2f sec' % stop)
+                print('    found error: ', errors)
+                print(' exit correctly: ', exitok)
+                print('*'*75)
+                print()
+            # also save in cases
+            if not errors and exitok:
+                cases[case]['[hawc2_sim_ok]'] = True
+            else:
+                cases[case]['[hawc2_sim_ok]'] = False
+
+    if check_log:
+        # take the last case to determine sim_id, run_dir and log_dir
+        sim_id = cases[case]['[sim_id]']
+        run_dir = cases[case]['[run_dir]']
+        log_dir = cases[case]['[log_dir]']
+        # save the extended (.csv format) errorlog list?
+        # but put in one level up, so in the logfiles folder directly
+        errorlogs.ResultFile = sim_id + '_ErrorLog.csv'
+        # use the model path of the last encoutered case in cases
+        errorlogs.PathToLogs = os.path.join(run_dir, log_dir)
+        errorlogs.save()
+
+    # just in case, browse back the working path relevant for the python magic
+    os.chdir(cwd)
+    if not silent:
+        print('\nHAWC2 has done all of its sequential magic!')
+        print('='*79)
+        print('')
+
+    return cases
+
+
+def prepare_launch(iter_dict, opt_tags, master, variable_tag_func,
+                write_htc=True, runmethod='local', verbose=False,
+                copyback_turb=True, msg='', silent=False, check_log=True,
+                update_cases=False, ignore_non_unique=False, wine_appendix='',
+                run_only_new=False, windows_nr_cpus=2, qsub='',
+                pbs_fname_appendix=True, short_job_names=True,
+                update_model_data=True):
+    """
+    Create the htc files, pbs scripts and replace the tags in master file
+    =====================================================================
+
+    Do not use any uppercase letters in the filenames, since HAWC2 will
+    convert all of them to lower case results file names (.sel, .dat, .log)
+
+    create sub folders according to sim_id, in order to not create one
+    folder for the htc, results, logfiles which grows very large in due
+    time!!
+
+    opt_tags is a list of dictionaries of tags:
+        [ {tag1=12,tag2=23,..},{tag1=11, tag2=33, tag9=5,...},...]
+    for each wind, yaw and coning combi, each tag dictionary in the list
+    will be set.
+
+    Make sure to always define all dictionary keys in each list, otherwise
+    the value of the first appareance will remain set for the remaining
+    simulations in the list.
+    For instance, in the example above, if tag9=5 is not set for subsequent
+    lists, tag9 will remain having value 5 for these subsequent sets
+
+    The tags for each case are consequently set in following order (or
+    presedence):
+        * master
+        * opt_tags
+        * iter_dict
+        * variable_tag_func
+
+    Parameters
+    ----------
+
+    iter_dict : dict
+
+    opt_tags : list
+
+    master : HtcMaster object
+
+    variable_tag_func : function object
+
+    write_htc : boolean, default=True
+
+    verbose : boolean, default=False
+
+    runmethod : {'local' (default),'thyra','gorm','local-script','none'}
+        Specify how/what to run where. For local, each case in cases is
+        run locally via python directly. If set to 'local-script' a shell
+        script is written to run all cases locally sequential. If set to
+        'thyra' or 'gorm', PBS scripts are written to the respective server.
+
+    msg : str, default=''
+        A descriptive message of the simulation series is saved at
+        "post_dir + master.tags['[sim_id]'] + '_tags.txt'". Additionally, this
+         tagfile also holds the opt_tags and iter_dict values.
+
+    update_cases : boolean, default=False
+        If True, a current cases dictionary can be updated with new simulations
+
+    qsub : str, default=''
+        Valid options are 'time' (use with launch), 'depend' (use with launch.py
+        --depend) or '' (use with launch.py).
+        Empty string means there are no tags placed in the pbs file, and
+        consequently the pbs file can be submitted as is. When using
+        qsub='time', a start time option is inserted with a start time tag
+        that has to be set at launch time. With 'depend', a job_id dependency
+        line is added, and when launching the job this dependency needs to
+        specified.
+
+    update_model_data : default=True
+        If set to False, the zip file will not be created, and the data files
+        are not copied to the run_dir. Use this when only updating the htc
+        files.
+
+    Returns
+    -------
+
+    cases : dict{ case : dict{tag : value} }
+        Dictionary where each case is a key and its value a dictionary holding
+        all the tags/value pairs as used for that case
+
+    """
+
+    post_dir = master.tags['[post_dir]']
+    fpath_post_base = os.path.join(post_dir, master.tags['[sim_id]'])
+    # either take a currently existing cases dictionary, or create a new one
+    if update_cases:
+        try:
+            FILE = open(fpath_post_base + '.pkl', 'rb')
+            cases = pickle.load(FILE)
+            FILE.close()
+            print('updating cases for %s' % master.tags['[sim_id]'])
+        except IOError:
+            print(79*'=')
+            print("failed to load cases dict for updating simd_id at:")
+            print(fpath_post_base + '.pkl')
+            print(79*'=')
+            cases = {}
+        # but only run the new cases
+        cases_to_run = {}
+    else:
+        cases = {}
+
+    # if empty, just create a dummy item so we get into the loops
+    if len(iter_dict) == 0:
+        iter_dict = {'__dummy__': [0]}
+    combi_list = create_multiloop_list(iter_dict)
+
+    # load the master htc file as a string under the master.tags
+    master.loadmaster()
+    # save a copy of the default values
+    mastertags_default = copy.copy(master.tags)
+
+    # ignore if the opt_tags is empty, will result in zero
+    if len(opt_tags) > 0:
+        sim_total = len(combi_list)*len(opt_tags)
+    else:
+        sim_total = len(combi_list)
+        # if no opt_tags specified, create an empty dummy tag
+        opt_tags = [dict({'__DUMMY_TAG__' : 0})]
+    sim_nr = 0
+
+    # make sure all the required directories are in place at run_dir
+#    master.create_run_dir()
+#    master.init_multithreads()
+
+    # cycle thourgh all the combinations
+    for it in combi_list:
+        for ot in opt_tags:
+            sim_nr += 1
+            # starting point should always be the default values. This is
+            # important when a previous case had a certain tag defined, and in
+            # the next case it is absent.
+            master.tags = mastertags_default.copy()
+            # update the tags from the opt_tags list
+            if not '__DUMMY_TAG__' in ot:
+                master.tags.update(ot)
+            # update the tags set in the combi_list
+            master.tags.update(it)
+            # force lower case values as defined in output_dirs
+            master.lower_case_output()
+            # -----------------------------------------------------------
+            # start variable tags update
+            if variable_tag_func is not None:
+                master = variable_tag_func(master)
+            # end variable tags
+            # -----------------------------------------------------------
+            if not silent:
+                print('htc progress: ' + format(sim_nr, '3.0f') + '/' + \
+                       format(sim_total, '3.0f'))
+
+            if verbose:
+                print('===master.tags===\n', master.tags)
+
+            # returns a dictionary with all the tags used for this
+            # specific case
+            htc = master.createcase(write_htc=write_htc)
+            master.create_run_dir()
+            #htc=master.createcase_check(cases_repo,write_htc=write_htc)
+
+            # make sure the current cases is unique!
+            if not ignore_non_unique:
+                if htc.keys()[0] in cases:
+                    msg = 'non unique case in cases: %s' % htc.keys()[0]
+                    raise KeyError(msg)
+
+            # save in the big cases. Note that values() gives a copy!
+            cases[htc.keys()[0]] = htc.values()[0]
+            # if we have an update scenario, keep track of the cases we want
+            # to run again. This prevents us from running all cases on every
+            # update
+            if run_only_new:
+                cases_to_run[htc.keys()[0]] = htc.values()[0]
+
+            if verbose:
+                print('created cases for: %s.htc\n' % master.tags['[case_id]'])
+
+#    print(master.queue.get())
+
+    # only copy data and create zip after all htc files have been created.
+    # Note that createcase could also creat other input files
+    # create the execution folder structure and copy all data to it
+    if update_model_data:
+        master.copy_model_data()
+        # create the zip file
+        master.create_model_zip()
+
+    # create directory if post_dir does not exists
+    try:
+        os.mkdir(post_dir)
+    except OSError:
+        pass
+    FILE = open(fpath_post_base + '.pkl', 'wb')
+    pickle.dump(cases, FILE, protocol=2)
+    FILE.close()
+
+    if not silent:
+        print('\ncases saved at:')
+        print(fpath_post_base + '.pkl')
+
+    # also save the iter_dict and opt_tags in a text file for easy reference
+    # or quick checks on what each sim_id actually contains
+    # sort the taglist for convienent reading/comparing
+    tagfile = msg + '\n\n'
+    tagfile += '='*79 + '\n'
+    tagfile += 'iter_dict\n'.rjust(30)
+    tagfile += '='*79 + '\n'
+    iter_dict_list = sorted(iter_dict.iteritems(), key=itemgetter(0))
+    for k in iter_dict_list:
+        tagfile += str(k[0]).rjust(30) + ' : ' + str(k[1]).ljust(20) + '\n'
+
+    tagfile += '\n'
+    tagfile += '='*79 + '\n'
+    tagfile += 'opt_tags\n'.rjust(30)
+    tagfile += '='*79 + '\n'
+    for k in opt_tags:
+        tagfile += '\n'
+        tagfile += '-'*79 + '\n'
+        tagfile += 'opt_tags set\n'.rjust(30)
+        tagfile += '-'*79 + '\n'
+        opt_dict = sorted(k.iteritems(), key=itemgetter(0), reverse=False)
+        for kk in opt_dict:
+            tagfile += str(kk[0]).rjust(30)+' : '+str(kk[1]).ljust(20) + '\n'
+    if update_cases:
+        mode = 'a'
+    else:
+        mode = 'w'
+    write_file(fpath_post_base + '_tags.txt', tagfile, mode)
+
+    if run_only_new:
+        cases = cases_to_run
+
+    launch(cases, runmethod=runmethod, verbose=verbose, check_log=check_log,
+           copyback_turb=copyback_turb, qsub=qsub, wine_appendix=wine_appendix,
+           windows_nr_cpus=windows_nr_cpus, short_job_names=short_job_names,
+           pbs_fname_appendix=pbs_fname_appendix)
+
+    return cases
+
+def prepare_relaunch(cases, runmethod='gorm', verbose=False, write_htc=True,
+                     copyback_turb=True, silent=False, check_log=True):
+    """
+    Instead of redoing everything, we know recreate the HTC file for those
+    in the given cases dict. Nothing else changes. The data and zip files
+    are not updated, the convience tagfile is not recreated. However, the
+    saved (pickled) cases dict corresponding to the sim_id is updated!
+
+    This method is usefull to correct mistakes made for some cases.
+
+    It is adviced to not change the case_id, sim_id, from the cases.
+    """
+
+    # initiate the HtcMaster object, load the master file
+    master = HtcMaster()
+    # for invariant tags, load random case. Necessary before we can load
+    # the master file, otherwise we don't know which master to load
+    master.tags = cases[cases.keys()[0]]
+    master.loadmaster()
+
+    # load the original cases dict
+    post_dir = master.tags['[post_dir]']
+    FILE = open(post_dir + master.tags['[sim_id]'] + '.pkl', 'rb')
+    cases_orig = pickle.load(FILE)
+    FILE.close()
+
+    sim_nr = 0
+    sim_total = len(cases)
+    for case, casedict in cases.iteritems():
+        sim_nr += 1
+
+        # set all the tags in the HtcMaster file
+        master.tags = casedict
+        # returns a dictionary with all the tags used for this
+        # specific case
+        htc = master.createcase(write_htc=write_htc)
+        #htc=master.createcase_check(cases_repo,write_htc=write_htc)
+
+        if not silent:
+            print('htc progress: ' + format(sim_nr, '3.0f') + '/' + \
+                   format(sim_total, '3.0f'))
+
+        if verbose:
+            print('===master.tags===\n', master.tags)
+
+        # make sure the current cases already exists, otherwise we are not
+        # relaunching!
+        if case not in cases_orig:
+            msg = 'relaunch only works for existing cases: %s' % case
+            raise KeyError(msg)
+
+        # save in the big cases. Note that values() gives a copy!
+        # remark, what about the copying done at the end of master.createcase?
+        # is that redundant then?
+        cases[htc.keys()[0]] = htc.values()[0]
+
+        if verbose:
+            print('created cases for: %s.htc\n' % master.tags['[case_id]'])
+
+    launch(cases, runmethod=runmethod, verbose=verbose, check_log=check_log,
+           copyback_turb=copyback_turb, silent=silent)
+
+    # update the original file: overwrite the newly set cases
+    FILE = open(post_dir + master.tags['[sim_id]'] + '.pkl', 'wb')
+    cases_orig.update(cases)
+    pickle.dump(cases_orig, FILE, protocol=2)
+    FILE.close()
+
+def prepare_launch_cases(cases, runmethod='gorm', verbose=False,write_htc=True,
+                         copyback_turb=True, silent=False, check_log=True,
+                         variable_tag_func=None, sim_id_new=None):
+    """
+    Same as prepare_launch, but now the input is just a cases object (cao).
+    If relaunching some earlier defined simulations, make sure to at least
+    rename the sim_id, otherwise it could become messy: things end up in the
+    same folder, sim_id post file get overwritten, ...
+
+    In case you do not use a variable_tag_fuc, make sure all your tags are
+    defined in cases. First and foremost, this means that the case_id does not
+    get updated to have a new sim_id, the path's are not updated, etc
+
+    When given a variable_tag_func, make sure it is properly
+    defined: do not base a variable tag's value on itself to avoid value chains
+
+    The master htc file will be loaded and alls tags defined in the cases dict
+    will be applied to it as is.
+    """
+
+    # initiate the HtcMaster object, load the master file
+    master = HtcMaster()
+    # for invariant tags, load random case. Necessary before we can load
+    # the master file, otherwise we don't know which master to load
+    master.tags = cases[cases.keys()[0]]
+    # load the master htc file as a string under the master.tags
+    master.loadmaster()
+    # create the execution folder structure and copy all data to it
+    # but reset to the correct launch dirs first
+    sim_id = master.tags['[sim_id]']
+    if runmethod in ['local', 'local-script', 'none']:
+        path = '/home/dave/PhD_data/HAWC2_results/ojf_post/%s/' % sim_id
+        master.tags['[run_dir]'] = path
+    elif runmethod == 'jess':
+        master.tags['[run_dir]'] = '/mnt/jess/HAWC2/ojf_post/%s/' % sim_id
+    elif runmethod == 'gorm':
+        master.tags['[run_dir]'] = '/mnt/gorm/HAWC2/ojf_post/%s/' % sim_id
+    else:
+        msg='unsupported runmethod, options: none, local, thyra, gorm, opt'
+        raise ValueError(msg)
+
+    master.create_run_dir()
+    master.copy_model_data()
+    # create the zip file
+    master.create_model_zip()
+
+    sim_nr = 0
+    sim_total = len(cases)
+
+    # for safety, create a new cases dict. At the end of the ride both cases
+    # and cases_new should be identical!
+    cases_new = {}
+
+    # cycle thourgh all the combinations
+    for case, casedict in cases.iteritems():
+        sim_nr += 1
+
+        sim_id = casedict['[sim_id]']
+        # reset the launch dirs
+        if runmethod in ['local', 'local-script', 'none']:
+            path = '/home/dave/PhD_data/HAWC2_results/ojf_post/%s/' % sim_id
+            casedict['[run_dir]'] = path
+        elif runmethod == 'thyra':
+            casedict['[run_dir]'] = '/mnt/thyra/HAWC2/ojf_post/%s/' % sim_id
+        elif runmethod == 'gorm':
+            casedict['[run_dir]'] = '/mnt/gorm/HAWC2/ojf_post/%s/' % sim_id
+        else:
+            msg='unsupported runmethod, options: none, local, thyra, gorm, opt'
+            raise ValueError(msg)
+
+        # -----------------------------------------------------------
+        # set all the tags in the HtcMaster file
+        master.tags = casedict
+        # apply the variable tags if applicable
+        if variable_tag_func:
+            master = variable_tag_func(master)
+        elif sim_id_new:
+            # TODO: finish this
+            # replace all the sim_id occurences with the updated one
+            # this means also the case_id tag changes!
+            pass
+        # -----------------------------------------------------------
+
+        # returns a dictionary with all the tags used for this specific case
+        htc = master.createcase(write_htc=write_htc)
+
+        if not silent:
+            print('htc progress: ' + format(sim_nr, '3.0f') + '/' + \
+                   format(sim_total, '3.0f'))
+
+        if verbose:
+            print('===master.tags===\n', master.tags)
+
+        # make sure the current cases is unique!
+        if htc.keys()[0] in cases_new:
+            msg = 'non unique case in cases: %s' % htc.keys()[0]
+            raise KeyError(msg)
+        # save in the big cases. Note that values() gives a copy!
+        # remark, what about the copying done at the end of master.createcase?
+        # is that redundant then?
+        cases_new[htc.keys()[0]] = htc.values()[0]
+
+        if verbose:
+            print('created cases for: %s.htc\n' % master.tags['[case_id]'])
+
+    post_dir = master.tags['[post_dir]']
+
+    # create directory if post_dir does not exists
+    try:
+        os.mkdir(post_dir)
+    except OSError:
+        pass
+    FILE = open(post_dir + master.tags['[sim_id]'] + '.pkl', 'wb')
+    pickle.dump(cases_new, FILE, protocol=2)
+    FILE.close()
+
+    if not silent:
+        print('\ncases saved at:')
+        print(post_dir + master.tags['[sim_id]'] + '.pkl')
+
+    launch(cases_new, runmethod=runmethod, verbose=verbose,
+           copyback_turb=copyback_turb, check_log=check_log)
+
+    return cases_new
+
+
+
+def launch(cases, runmethod='local', verbose=False, copyback_turb=True,
+           silent=False, check_log=True, windows_nr_cpus=2, qsub='time',
+           wine_appendix='', pbs_fname_appendix=True, short_job_names=True):
+    """
+    The actual launching of all cases in the Cases dictionary. Note that here
+    only the PBS files are written and not the actuall htc files.
+
+    Parameters
+    ----------
+
+    cases : dict
+        Dictionary with the case name as key and another dictionary as value.
+        The latter holds all the tag/value pairs used in the respective
+        simulation.
+
+    verbose : boolean, default=False
+
+    runmethod : {'local' (default),'thyra','gorm','linux-script','none',
+                 'windows-script'}
+        Specify how/what to run where. For local, each case in cases is
+        run locally via python directly. If set to 'linux-script' a shell
+        script is written to run all cases locally sequential. If set to
+        'thyra' or 'gorm', PBS scripts are written to the respective server.
+    """
+
+    random_case = cases.keys()[0]
+    sim_id = cases[random_case]['[sim_id]']
+    pbs_out_dir = cases[random_case]['[pbs_out_dir]']
+
+    if runmethod == 'local-script' or runmethod == 'linux-script':
+        local_shell_script(cases, sim_id)
+    elif runmethod == 'windows-script':
+        local_windows_script(cases, sim_id, nr_cpus=windows_nr_cpus)
+    elif runmethod in ['jess','gorm']:
+        # create the pbs object
+        pbs = PBS(cases, server=runmethod, short_job_names=short_job_names,
+                  pbs_fname_appendix=pbs_fname_appendix, qsub=qsub)
+        pbs.wine_appendix = wine_appendix
+        pbs.copyback_turb = copyback_turb
+        pbs.verbose = verbose
+        pbs.pbs_out_dir = pbs_out_dir
+        pbs.create()
+    elif runmethod == 'local':
+        cases = run_local(cases, silent=silent, check_log=check_log)
+    elif runmethod =='local-ram':
+        cases = run_local_ram(cases, check_log=check_log)
+    elif runmethod == 'none':
+        pass
+    else:
+        msg = 'unsupported runmethod, valid options: local, thyra, gorm or opt'
+        raise ValueError(msg)
+
+def post_launch(cases, save_iter=False):
+    """
+    Do some basics checks: do all launched cases have a result and LOG file
+    and are there any errors in the LOG files?
+
+    Parameters
+    ----------
+
+    cases : either a string (path to file) or the cases itself
+    """
+
+    # TODO: finish support for default location of the cases and file name
+    # two scenario's: either pass on an cases and get from their the
+    # post processing path or pass on the simid and load from the cases
+    # from the default location
+    # in case run_local, do not check PBS!
+
+    # in case it is a path, load the cases
+    if type(cases).__name__ == 'str':
+        cases = load_pickled_file(cases)
+
+    # saving output to textfile and print(at the same time
+    LOG = Log()
+    LOG.print_logging = True
+
+    # load one case dictionary from the cases to get data that is the same
+    # over all simulations in the cases
+    try:
+        master = cases.keys()[0]
+    except IndexError:
+        print('there are no cases, aborting...')
+        return None
+    post_dir = cases[master]['[post_dir]']
+    sim_id = cases[master]['[sim_id]']
+    run_dir = cases[master]['[run_dir]']
+    log_dir = cases[master]['[log_dir]']
+
+    # for how many of the created cases are there actually result, log files
+    pbs = PBS(cases)
+    pbs.cases = cases
+    cases_fail = pbs.check_results(cases)
+
+    # add the failed cases to the LOG:
+    LOG.add(['number of failed cases: ' + str(len(cases_fail))])
+    LOG.add(list(cases_fail))
+    # for k in cases_fail:
+    #    print(k
+
+    # initiate the object to check the log files
+    errorlogs = ErrorLogs(cases=cases)
+    LOG.add(['checking ' + str(len(cases)) + ' LOG files...'])
+    nr = 1
+    nr_tot = len(cases)
+
+    tmp = cases.keys()[0]
+    print('checking logs, path (from a random item in cases):')
+    print(os.path.join(run_dir, log_dir))
+
+    for k in sorted(cases.keys()):
+        # a case could not have a result, but a log file might still exist
+        if k.endswith('.htc'):
+            kk = k[:-4] + '.log'
+        else:
+            kk = k + '.log'
+        # note that if errorlogs.PathToLogs is a file, it will only check that
+        # file. If it is a directory, it will check all that is in the dir
+        run_dir = cases[k]['[run_dir]']
+        log_dir = cases[k]['[log_dir]']
+        errorlogs.PathToLogs = os.path.join(run_dir, log_dir, kk)
+        try:
+            errorlogs.check(save_iter=save_iter)
+            print('checking logfile progress: ' + str(nr) + '/' + str(nr_tot))
+        except IOError:
+            print('           no logfile for:  %s' % (errorlogs.PathToLogs))
+        except Exception as e:
+            print('  log analysis failed for: %s' % kk)
+            print(e)
+        nr += 1
+
+        # if simulation did not ended correctly, put it on the fail list
+        try:
+            if not errorlogs.MsgListLog2[kk][1]:
+                cases_fail[k] = cases[k]
+        except KeyError:
+            pass
+
+    # now see how many cases resulted in an error and add to the general LOG
+    # determine how long the first case name is
+    try:
+        spacing = len(errorlogs.MsgListLog2.keys()[0]) + 9
+    except Exception as e:
+        print('nr of OK cases: %i' % (len(cases) - len(cases_fail)))
+        raise(e)
+    LOG.add(['display log check'.ljust(spacing) + 'found_error?'.ljust(15) + \
+            'exit_correctly?'])
+    for k in errorlogs.MsgListLog2:
+        LOG.add([k.ljust(spacing)+str(errorlogs.MsgListLog2[k][0]).ljust(15)+\
+            str(errorlogs.MsgListLog2[k][1]) ])
+    # save the extended (.csv format) errorlog list?
+    # but put in one level up, so in the logfiles folder directly
+    errorlogs.ResultFile = sim_id + '_ErrorLog.csv'
+    # save the log file analysis in the run_dir instead of the log_dir
+    errorlogs.PathToLogs = run_dir# + log_dir
+    errorlogs.save()
+
+    # save the error LOG list, this is redundant, since it already exists in
+    # the general LOG file (but only as a print, not the python variable)
+    tmp = os.path.join(post_dir, sim_id + '_MsgListLog2')
+    save_pickle(tmp, errorlogs.MsgListLog2)
+
+    # save the list of failed cases
+    save_pickle(os.path.join(post_dir, sim_id + '_fail.pkl'), cases_fail)
+
+    return cases_fail
+
+def logcheck_case(errorlogs, cases, case, silent=False):
+    """
+    Check logfile of a single case
+    ==============================
+
+    Given the cases and a case, check that single case on errors in the
+    logfile.
+
+    """
+
+    #post_dir = cases[case]['[post_dir]']
+    #sim_id = cases[case]['[sim_id]']
+    run_dir = cases[case]['[run_dir]']
+    log_dir = cases[case]['[log_dir]']
+    if case.endswith('.htc'):
+        caselog = case[:-4] + '.log'
+    else:
+        caselog = case + '.log'
+    errorlogs.PathToLogs = os.path.join(run_dir, log_dir, caselog)
+    errorlogs.check()
+
+    # in case we find an error, abort or not?
+    errors = errorlogs.MsgListLog2[caselog][0]
+    exitcorrect = errorlogs.MsgListLog2[caselog][1]
+    if errors:
+        # print all error messages
+        #logs.MsgListLog : [ [case, line nr, error1, line nr, error2, ....], ]
+        # difficult: MsgListLog is not a dict!!
+        #raise UserWarning, 'HAWC2 simulation has errors in logfile, abort!'
+        #warnings.warn('HAWC2 simulation has errors in logfile!')
+        logging.warn('HAWC2 simulation has errors in logfile!')
+    elif not exitcorrect:
+        #raise UserWarning, 'HAWC2 simulation did not ended correctly, abort!'
+        #warnings.warn('HAWC2 simulation did not ended correctly!')
+        logging.warn('HAWC2 simulation did not ended correctly!')
+
+    # no need to do that, aborts on failure anyway and OK log check will be
+    # printed in run_local when also printing how long it took to check
+    #if not silent:
+        #print 'log checks ok'
+        #print '   found error: %s' % errorlogs.MsgListLog2[caselog][0]
+        #print 'exit correctly: %s' % errorlogs.MsgListLog2[caselog][1]
+
+    return errorlogs
+
+    ## save the extended (.csv format) errorlog list?
+    ## but put in one level up, so in the logfiles folder directly
+    #errorlogs.ResultFile = sim_id + '_ErrorLog.csv'
+    ## use the model path of the last encoutered case in cases
+    #errorlogs.PathToLogs = run_dir + log_dir
+    #errorlogs.save()
+
+
+class Log:
+    """
+    Class for convinient logging. Create an instance and add lines to the
+    logfile as a list with the function add.
+    The added items will be printed if
+        self.print_logging = True. Default value is False
+
+    Create the instance, add with .add('lines') (lines=list), save with
+    .save(target), print(current log to screen with .printLog()
+    """
+    def __init__(self):
+        self.log = []
+        # option, should the lines added to the log be printed as well?
+        self.print_logging = False
+        self.file_mode = 'a'
+
+    def add(self, lines):
+        # the input is a list, where each entry is considered as a new line
+        for k in lines:
+            self.log.append(k)
+            if self.print_logging:
+                print(k)
+
+    def save(self, target):
+        # tread every item in the log list as a new line
+        FILE = open(target, self.file_mode)
+        for k in self.log:
+            FILE.write(k + '\n')
+        FILE.close()
+        # and empty the log again
+        self.log = []
+
+    def printscreen(self):
+        for k in self.log:
+            print(k)
+
+class HtcMaster:
+    """
+    """
+
+    def __init__(self, verbose=False, silent=False):
+        """
+        """
+
+        # TODO: make HtcMaster callable, so that when called you actually
+        # set a value for a certain tag or add a new one. In doing so,
+        # you can actually warn when you are overwriting a tag, or when
+        # a different tag has the same name, etc
+
+        # create a dictionary with the tag name as key as the default value
+        self.tags = dict()
+
+        # should we print(where the file is written?
+        self.verbose = verbose
+        self.silent = silent
+
+        # following tags are required
+        #---------------------------------------------------------------------
+        self.tags['[case_id]'] = None
+
+        self.tags['[master_htc_file]'] = None
+        self.tags['[master_htc_dir]'] = None
+        # path to model zip file, needs to accessible from the server
+        # relative from the directory where the pbs files are launched on the
+        # server. Suggestions is to always place the zip file in the model
+        # folder, so only the zip file name has to be defined
+        self.tags['[model_zip]'] = None
+
+        # path to HAWTOPT blade result file: quasi/res/blade.dat
+        self.tags['[blade_hawtopt_dir]'] = None
+        self.tags['[blade_hawtopt]'] = None
+        self.tags['[zaxis_fact]'] = 1.0
+        # TODO: rename to execution dir, that description fits much better!
+        self.tags['[run_dir]'] = None
+        #self.tags['[run_dir]'] = '/home/dave/tmp/'
+
+        # following dirs are relative to the run_dir!!
+        # they indicate the location of the SAVED (!!) results, they can be
+        # different from the execution dirs on the node which are set in PBS
+        self.tags['[hawc2_exe]'] = 'hawc2mb.exe'
+        self.tags['[data_dir]'] = 'data/'
+        self.tags['[res_dir]'] = 'results/'
+        self.tags['[iter_dir]'] = 'iter/'
+        self.tags['[log_dir]'] = 'logfiles/'
+        self.tags['[turb_dir]'] = 'turb/'
+        self.tags['[wake_dir]'] = None
+        self.tags['[meand_dir]'] = None
+        self.tags['[turb_db_dir]'] = None
+        self.tags['[wake_db_dir]'] = None
+        self.tags['[meand_db_dir]'] = None
+        self.tags['[control_dir]'] = 'control/'
+        self.tags['[externalforce]'] = 'externalforce/'
+        self.tags['[animation_dir]'] = 'animation/'
+        self.tags['[eigenfreq_dir]'] = 'eigenfreq/'
+        self.tags['[wake_dir]'] = 'wake/'
+        self.tags['[meander_dir]'] = 'meander/'
+        self.tags['[htc_dir]'] = 'htc/'
+        self.tags['[mooring_dir]'] = 'mooring/'
+        self.tags['[hydro_dir]'] = 'htc_hydro/'
+        self.tags['[pbs_out_dir]'] = 'pbs_out/'
+        self.tags['[turb_base_name]'] = 'turb_'
+        self.tags['[wake_base_name]'] = 'turb_'
+        self.tags['[meand_base_name]'] = 'turb_'
+        self.tags['[zip_root_files]'] = []
+
+        self.tags['[fname_source]'] = []
+        self.tags['[fname_default_target]'] = []
+
+        self.tags['[eigen_analysis]'] = False
+
+        self.tags['[pbs_queue_command]'] = '#PBS -q workq'
+        # the express que has 2 thyra nodes with max walltime of 1h
+#        self.tags['[pbs_queue_command]'] = '#PBS -q xpresq'
+        # walltime should have following format: hh:mm:ss
+        self.tags['[walltime]'] = '04:00:00'
+
+#        self.queue = Queue.Queue()
+
+        self.output_dirs = ['[res_dir]', '[log_dir]', '[turb_base_name]',
+                            '[case_id]', '[wake_base_name]', '[animation_dir]',
+                            '[meand_base_name]', '[eigenfreq_dir]']
+
+    def create_run_dir(self):
+        """
+        If non existent, create run_dir and all required model sub directories
+        """
+
+        dirkeys = ['[data_dir]', '[htc_dir]', '[res_dir]', '[log_dir]',
+                   '[eigenfreq_dir]', '[animation_dir]', '[turb_dir]',
+                   '[wake_dir]', '[meander_dir]', '[opt_dir]', '[control_dir]',
+                   '[mooring_dir]', '[hydro_dir]', '[externalforce]']
+
+        # create all the necessary directories
+        for dirkey in dirkeys:
+            if self.tags[dirkey]:
+                path = os.path.join(self.tags['[run_dir]'], self.tags[dirkey])
+                if not os.path.exists(path):
+                    os.makedirs(path)
+
+    # TODO: copy_model_data and create_model_zip should be the same.
+    def copy_model_data(self):
+        """
+
+        Copy the model data to the execution folder
+
+        """
+
+        # in case we are running local and the model dir is the server dir
+        # we do not need to copy the data files, they are already on location
+        data_local = os.path.join(self.tags['[model_dir_local]'],
+                                  self.tags['[data_dir]'])
+        data_run = os.path.join(self.tags['[run_dir]'], self.tags['[data_dir]'])
+        if not data_local == data_run:
+
+            # copy root files
+            model_root = self.tags['[model_dir_local]']
+            run_root = self.tags['[run_dir]']
+            for fname in self.tags['[zip_root_files]']:
+                shutil.copy2(model_root + fname, run_root + fname)
+
+            # copy special files with changing file names
+            if '[ESYSMooring_init_fname]' in self.tags:
+                if self.tags['[ESYSMooring_init_fname]'] is not None:
+                    fname_source = self.tags['[ESYSMooring_init_fname]']
+                    fname_target = 'ESYSMooring_init.dat'
+                    shutil.copy2(model_root + fname_source,
+                                 run_root + fname_target)
+
+            # copy the master file into the htc/_master dir
+            src = os.path.join(self.tags['[master_htc_dir]'],
+                               self.tags['[master_htc_file]'])
+            # FIXME: htc_dir can contain the DLC folder name
+            dst = os.path.join(self.tags['[run_dir]'], 'htc', '_master')
+            if not os.path.exists(dst):
+                os.makedirs(dst)
+            shutil.copy2(src, dst)
+
+            # copy all content of the following dirs
+            dirs = [self.tags['[control_dir]'], self.tags['[hydro_dir]'],
+                    self.tags['[mooring_dir]'], self.tags['[externalforce]'],
+                    self.tags['[data_dir]'], 'htc/DLCs/']
+            plocal = self.tags['[model_dir_local]']
+            prun = self.tags['[run_dir]']
+
+            # copy all files present in the specified folders
+            for path in dirs:
+                if not path:
+                    continue
+                elif not os.path.exists(os.path.join(plocal, path)):
+                    continue
+                for root, dirs, files in os.walk(os.path.join(plocal, path)):
+                    for file_name in files:
+                        src = os.path.join(root, file_name)
+                        dst = root.replace(os.path.abspath(plocal),
+                                           os.path.abspath(prun))
+                        if not os.path.exists(dst):
+                            os.makedirs(dst)
+                        dst = os.path.join(dst, file_name)
+                        shutil.copy2(src, dst)
+
+            # and last copies: the files with generic input names
+            if not isinstance(self.tags['[fname_source]'], list):
+                raise ValueError('[fname_source] needs to be a list')
+            if not isinstance(self.tags['[fname_default_target]'], list):
+                raise ValueError('[fname_default_target] needs to be a list')
+            len1 = len(self.tags['[fname_source]'])
+            len2 = len(self.tags['[fname_default_target]'])
+            if len1 != len2:
+                raise ValueError('[fname_source] and [fname_default_target] '
+                                 'need to have the same number of items')
+            for i in range(len1):
+                src = os.path.join(plocal, self.tags['[fname_source]'][i])
+                dst = os.path.join(prun, self.tags['[fname_default_target]'][i])
+                if not os.path.exists(os.path.dirname(dst)):
+                    os.makedirs(os.path.dirname(dst))
+                shutil.copy2(src, dst)
+
+    # TODO: copy_model_data and create_model_zip should be the same.
+    def create_model_zip(self):
+        """
+
+        Create the model zip file based on the master tags file settings.
+
+        Paremeters
+        ----------
+
+        master : HtcMaster object
+
+
+        """
+
+        # FIXME: all directories should be called trough their appropriate tag!
+
+        #model_dir = HOME_DIR + 'PhD/Projects/Hawc2Models/'+MODEL+'/'
+        model_dir_server = self.tags['[run_dir]']
+        model_dir_local = self.tags['[model_dir_local]']
+
+        # ---------------------------------------------------------------------
+        # create the zipfile object locally
+        zf = zipfile.ZipFile(model_dir_local + self.tags['[model_zip]'],'w')
+
+        # empty folders, the'll hold the outputs
+        # zf.write(source, target in zip, )
+        # TODO: use user defined directories here and in PBS
+        # note that they need to be same as defined in the PBS script. We
+        # manually set these up instead of just copying the original.
+
+#        animation_dir = self.tags['[animation_dir]']
+#        eigenfreq_dir = self.tags['[eigenfreq_dir]']
+#        logfiles_dir = self.tags['[log_dir]']
+#        results_dir = self.tags['[res_dir]']
+#        htc_dir = self.tags['[htc_dir]']
+        htcmaster = self.tags['[master_htc_file]']
+
+        control_dir = self.tags['[control_dir]']
+        htcmaster_dir = self.tags['[master_htc_dir]']
+        data_dir = self.tags['[data_dir]']
+        turb_dir = self.tags['[turb_dir]']
+        wake_dir = self.tags['[wake_dir]']
+        meander_dir = self.tags['[meander_dir]']
+        mooring_dir = self.tags['[mooring_dir]']
+        hydro_dir = self.tags['[hydro_dir]']
+        extforce = self.tags['[externalforce]']
+        # result dirs are not required, HAWC2 will create them
+        dirs = [control_dir, data_dir, extforce, turb_dir, wake_dir,
+                 meander_dir, mooring_dir, hydro_dir]
+        for zipdir in dirs:
+            if zipdir:
+                zf.write('.', zipdir + '.', zipfile.ZIP_DEFLATED)
+        zf.write('.', 'htc/_master/.', zipfile.ZIP_DEFLATED)
+
+        # if any, add files that should be added to the root of the zip file
+        for file_name in self.tags['[zip_root_files]']:
+            zf.write(model_dir_local+file_name, file_name, zipfile.ZIP_DEFLATED)
+
+        if '[ESYSMooring_init_fname]' in self.tags:
+            if self.tags['[ESYSMooring_init_fname]'] is not None:
+                fname_source = self.tags['[ESYSMooring_init_fname]']
+                fname_target = 'ESYSMooring_init.dat'
+                zf.write(model_dir_local + fname_source, fname_target,
+                         zipfile.ZIP_DEFLATED)
+
+        # the master file
+        src = os.path.join(htcmaster_dir, htcmaster)
+        dst = os.path.join('htc', '_master', htcmaster)
+        zf.write(src, dst, zipfile.ZIP_DEFLATED)
+
+        # manually add all that resides in control, mooring and hydro
+        paths = [control_dir, mooring_dir, hydro_dir, extforce, data_dir]
+        for target_path in paths:
+            if not target_path:
+                continue
+            path_src = os.path.join(model_dir_local, target_path)
+            for root, dirs, files in os.walk(path_src):
+                for file_name in files:
+                    #print 'adding', file_name
+                    src = os.path.join(root, file_name)
+                    # the zip file only contains the relative paths
+                    rel_dst = root.replace(os.path.abspath(model_dir_local), '')
+                    if os.path.isabs(rel_dst):
+                        rel_dst = rel_dst[1:]
+                    rel_dst = os.path.join(rel_dst, file_name)
+                    zf.write(src, rel_dst, zipfile.ZIP_DEFLATED)
+
+        # and last copies: the files with generic input names
+        if not isinstance(self.tags['[fname_source]'], list):
+            raise ValueError('[fname_source] needs to be a list')
+        if not isinstance(self.tags['[fname_default_target]'], list):
+            raise ValueError('[fname_default_target] needs to be a list')
+        len1 = len(self.tags['[fname_source]'])
+        len2 = len(self.tags['[fname_default_target]'])
+        if len1 != len2:
+            raise ValueError('[fname_source] and [fname_default_target] '
+                             'need to have the same number of items')
+        for i in range(len1):
+            src = os.path.join(model_dir_local, self.tags['[fname_source]'][i])
+            # the zip file only contains the relative paths
+            rel_dst = self.tags['[fname_default_target]'][i]
+            # we can not have an absolute path here, make sure it isn't
+            if os.path.isabs(rel_dst):
+                rel_dst = rel_dst[1:]
+            zf.write(src, rel_dst, zipfile.ZIP_DEFLATED)
+
+        # and close again
+        zf.close()
+
+        # ---------------------------------------------------------------------
+        # copy zip file to the server, this will be used on the nodes
+        src = model_dir_local  + self.tags['[model_zip]']
+        dst = model_dir_server + self.tags['[model_zip]']
+
+        # in case we are running local and the model dir is the server dir
+        # we do not need to copy the zip file, it is already on location
+        if not src == dst:
+            shutil.copy2(src, dst)
+
+        ## copy to zip data file to sim_id htc folder on the server dir
+        ## so we now have exactly all data to relaunch any htc file later
+        #dst  = model_dir_server + self.tags['[htc_dir]']
+        #dst += self.tags['[model_zip]']
+        #shutil.copy2(src, dst)
+
+    def _sweep_tags(self):
+        """
+        The original way with all tags in the htc file for each blade node
+        """
+        # set the correct sweep cruve, these values are used
+        a = self.tags['[sweep_amp]']
+        b = self.tags['[sweep_exp]']
+        z0 = self.tags['[sweep_curve_z0]']
+        ze = self.tags['[sweep_curve_ze]']
+        nr = self.tags['[nr_nodes_blade]']
+        # format for the x values in the htc file
+        ff = ' 1.03f'
+        for zz in range(nr):
+            it_nosweep = '[x'+str(zz+1)+'-nosweep]'
+            item = '[x'+str(zz+1)+']'
+            z = self.tags['[z'+str(zz+1)+']']
+            if z >= z0:
+                curve = eval(self.tags['[sweep_curve_def]'])
+                # new swept position = original + sweep curve
+                self.tags[item]=format(self.tags[it_nosweep]+curve,ff)
+            else:
+                self.tags[item]=format(self.tags[it_nosweep], ff)
+
+    def _staircase_windramp(self, nr_steps, wind_step, ramptime, septime):
+        """Create a stair case wind ramp
+
+
+        """
+
+        pass
+
+    def _all_in_one_blade_tag(self, radius_new=None):
+        """
+        Create htc input based on a HAWTOPT blade result file
+
+        Automatically get the number of nodes correct in master.tags based
+        on the number of blade nodes
+
+        WARNING: initial x position of the half chord point is assumed to be
+        zero
+
+        zaxis_fact : int, default=1.0 --> is member of default tags
+            Factor for the htc z-axis coordinates. The htc z axis is mapped to
+            the HAWTOPT radius. If the blade radius develops in negative z
+            direction, set to -1
+
+        Parameters
+        ----------
+
+        radius_new : ndarray(n), default=False
+            z coordinates of the nodes. If False, a linear distribution is
+            used and the tag [nr--of-nodes-per-blade] sets the number of nodes
+
+
+        """
+        # TODO: implement support for x position to be other than zero
+
+        # TODO: This is not a good place, should live somewhere else. Or
+        # reconsider inputs etc so there is more freedom in changing the
+        # location of the nodes, set initial x position of the blade etc
+
+        # and save under tag [blade_htc_node_input] in htc input format
+
+        nr_nodes = self.tags['[nr_nodes_blade]']
+
+        blade = self.tags['[blade_hawtopt]']
+        # in the htc file, blade root =0 and not blade hub radius
+        blade[:,0] = blade[:,0] - blade[0,0]
+
+        if type(radius_new).__name__ == 'NoneType':
+            # interpolate to the specified number of nodes
+            radius_new = np.linspace(blade[0,0], blade[-1,0], nr_nodes)
+
+        # Data checks on radius_new
+        elif not type(radius_new).__name__ == 'ndarray':
+            raise ValueError('radius_new has to be either NoneType or ndarray')
+        else:
+            if not len(radius_new.shape) == 1:
+                raise ValueError('radius_new has to be 1D')
+            elif not len(radius_new) == nr_nodes:
+                msg = 'radius_new has to have ' + str(nr_nodes) + ' elements'
+                raise ValueError(msg)
+
+        # save the nodal positions in the tag cloud
+        self.tags['[blade_nodes_z_positions]'] = radius_new
+
+        # make sure that radius_hr is just slightly smaller than radius low res
+        radius_new[-1] = blade[-1,0]-0.00000001
+        twist_new = interpolate.griddata(blade[:,0], blade[:,2], radius_new)
+        # blade_new is the htc node input part:
+        # sec 1   x     y     z   twist;
+        blade_new = scipy.zeros((len(radius_new),4))
+        blade_new[:,2] = radius_new*self.tags['[zaxis_fact]']
+        # twist angle remains the same in either case (standard/ojf rotation)
+        blade_new[:,3] = twist_new*-1.
+
+        # set the correct sweep cruve, these values are used
+        a = self.tags['[sweep_amp]']
+        b = self.tags['[sweep_exp]']
+        z0 = self.tags['[sweep_curve_z0]']
+        ze = self.tags['[sweep_curve_ze]']
+        tmp = 'nsec ' + str(nr_nodes) + ';'
+        for k in range(nr_nodes):
+            tmp += '\n'
+            i = k+1
+            z = blade_new[k,2]
+            y = blade_new[k,1]
+            twist = blade_new[k,3]
+            # x position, sweeping?
+            if z >= z0:
+                x = eval(self.tags['[sweep_curve_def]'])
+            else:
+                x = 0.0
+
+            # the node number
+            tmp += '        sec ' + format(i, '2.0f')
+            tmp += format(x, ' 11.03f')
+            tmp += format(y, ' 11.03f')
+            tmp += format(z, ' 11.03f')
+            tmp += format(twist, ' 11.03f')
+            tmp += ' ;'
+
+        self.tags['[blade_htc_node_input]'] = tmp
+
+        # and create the ae file
+        #5	Blade Radius [m] 	Chord[m]  T/C[%]  Set no. of pc file
+        #1 25 some comments
+        #0.000     0.100    21.000   1
+        nr_points = blade.shape[0]
+        tmp2 = '1  Blade Radius [m] Chord [m] T/C [%] pc file set nr\n'
+        tmp2 += '1  %i auto generated by _all_in_one_blade_tag()' % nr_points
+
+        for k in range(nr_points):
+            tmp2 += '\n'
+            tmp2 += '%9.3f %9.3f %9.3f' % (blade[k,0], blade[k,1], blade[k,3])
+            tmp2 += ' %4i' % (k+1)
+        # end with newline
+        tmp2 += '\n'
+
+        # TODO: finish writing file, implement proper handling of hawtopt path
+        # and save the file
+        #if self.tags['aefile']
+        #write_file(file_path, tmp2, 'w')
+
+    def loadmaster(self):
+        """
+        Load the master file, path to master file is defined in
+        __init__(): target, master. Additionally, find all the tags in the
+        master file. Note that tags [] in the label and comment sections are
+        ignored.
+
+        All the tags that are found in the master file are saved in the
+        self.tags_in_master dictionary, with the line numbers in a list as
+        values:
+        tags_in_master[tagname] = [line nr occurance 1, line nr occurance 2, ]
+        note that tagname includes the []
+        """
+
+        # what is faster, load the file in one string and do replace()?
+        # or the check error log approach?
+        fpath  = os.path.join(self.tags['[master_htc_dir]'],
+                              self.tags['[master_htc_file]'])
+        # load the file:
+        if not self.silent:
+            print('loading master: ' + fpath)
+        FILE = open(fpath, 'r')
+        lines = FILE.readlines()
+        FILE.close()
+
+        # regex for finding all tags in a line
+        regex = re.compile('(\\[.*?\\])')
+        self.tags_in_master = {}
+
+        # convert to string:
+        self.master_str = ''
+        for i, line in enumerate(lines):
+            # are there any tags on this line? Ignore comment AND label section
+            tags = regex.findall(line.split(';')[0].split('#')[0])
+            for tag in tags:
+                try:
+                    self.tags_in_master[tag].append(i)
+                except KeyError:
+                    self.tags_in_master[tag] = [i]
+            # safe for later
+            self.master_str += line
+
+    def createcase_check(self, htc_dict_repo, \
+                            tmp_dir='/tmp/HawcPyTmp/', write_htc=True):
+        """
+        Check if a certain case name already exists in a specified htc_dict.
+        If true, return a message and do not create the case. It can be that
+        either the case name is a duplicate and should be named differently,
+        or that the simulation is a duplicate and it shouldn't be repeated.
+        """
+
+        # is the [case_id] tag unique, given the htc_dict_repo?
+        if self.verbose:
+            print('checking if following case is in htc_dict_repo: ')
+            print(self.tags['[case_id]'] + '.htc')
+
+        if htc_dict_repo.has_key(self.tags['[case_id]'] + '.htc'):
+            # if the new case_id already exists in the htc_dict_repo
+            # do not add it again!
+            # print('case_id key is not unique in the given htc_dict_repo!'
+            raise UserWarning('case_id key is not unique in the given '
+                              'htc_dict_repo!')
+        else:
+            htc = self.createcase(tmp_dir=tmp_dir, write_htc=write_htc)
+            return htc
+
+    def createcase(self, tmp_dir='/tmp/HawcPyTmp/', write_htc=True):
+        """
+        replace all the tags from the master file and save the new htc file
+        """
+
+        htc = self.master_str
+
+        # and now replace all the tags in the htc master file
+        # when iterating over a dict, it will give the key, given in the
+        # corresponding format (string keys as strings, int keys as ints...)
+        for k in self.tags:
+            # TODO: give error if a default is not defined, like null
+            # if it is a boolean, replace with ; or blank
+            if isinstance(self.tags[k], bool):
+                if self.tags[k]:
+                    # we have a boolean that is True, switch it on
+                    value = ''
+                else:
+                    value = ';'
+            else:
+                value = self.tags[k]
+            # if string is not found, it will do nothing
+            htc = htc.replace(str(k), str(value))
+
+        # and save the the case htc file:
+        cname = self.tags['[case_id]'] + '.htc'
+
+        htc_target = os.path.join(self.tags['[run_dir]'], self.tags['[htc_dir]'])
+        if not self.silent:
+            print('htc will be written to: ')
+            print('  ' + htc_target)
+            print('  ' + cname)
+
+        # and write the htc file to the temp dir first
+        if write_htc:
+            self.write_htc(cname, htc, htc_target)
+#            thread = threading.Thread(target=self.write_htc,
+#                                      args=(cname, htc, htc_target))
+#            thread.daemon = True
+#            thread.start()
+        # save all the tags for debugging purpuses
+        if self.verbose:
+            tmp = ''
+            for key in sorted(self.tags.keys()):
+                value = self.tags[key]
+                rpl = (key.rjust(25), str(value).ljust(70),
+                       type(key).__name__.ljust(10), type(value).__name__)
+                tmp += '%s -- %s -- %s -- %s\n' % rpl
+            write_file(htc_target + cname + '.tags', tmp, 'w')
+
+        # return the used tags, some parameters can be used later, such as the
+        # turbulence name in the pbs script
+        # return as a dictionary, to be used in htc_dict
+        # return a copy of the tags, otherwise you will not catch changes
+        # made to the different tags in your sim series
+        return {cname : copy.copy(self.tags)}
+
+    def write_htc(self, cname, htc, htc_target):
+        # create subfolder if necesarry
+        if not os.path.exists(htc_target):
+            os.makedirs(htc_target)
+        write_file(htc_target + cname, htc, 'w')
+        # write_file(tmp_dir + case, htc, 'w')
+
+    def lower_case_output(self):
+        """
+        force lower case tags on output files since HAWC2 will force them to
+        lower case anyway
+        """
+
+        for key in self.output_dirs:
+            if isinstance(self.tags[key], str):
+                self.tags[key] = self.tags[key].lower()
+
+
+class PBS:
+    """
+    The part where the actual pbs script is writtin in this class (functions
+    create(), starting() and ending() ) is based on the MS Excel macro
+    written by Torben J. Larsen
+
+    input a list with htc file names, and a dict with the other paths,
+    such as the turbulence file and folder, htc folder and others
+    """
+
+    def __init__(self, cases, server='gorm', qsub='time',
+                 pbs_fname_appendix=True, short_job_names=True):
+        """
+        Define the settings here. This should be done outside, but how?
+        In a text file, paramters list or first create the object and than set
+        the non standard values??
+
+        where htc_dict is a dictionary with
+            [key=case name, value=used_tags_dict]
+
+        where tags as outputted by MasterFile (dict with the chosen options)
+
+        For gorm, maxcpu is set to 1, do not change otherwise you might need to
+        change the scratch dir handling.
+
+        qsub : str
+            time, or depend. For time each job will need to get a start
+            time, which will have to be set by replacing [start_time].
+            For depend a job dependency chain will have to be established.
+            This will be set via [nodeps] and [job_id]
+            When none of the above, neither of them is specified, and
+            consequently the pbs file can be submitted without replacing any
+            tag. Use qsub=None in combination with the launch.Scheduler
+
+        short_job_names : boolean, default=True
+            How should the job be named (relevant for the PBS queueing system)?
+            When True, it will be named like HAWC2_123456. With False, the
+            case_id will be used as job name.
+
+        """
+        self.server = server
+        self.verbose = True
+
+#        if server == 'thyra':
+#            self.maxcpu = 4
+#            self.secperiter = 0.020
+        if server == 'gorm':
+            self.maxcpu = 1
+            self.secperiter = 0.012
+        elif server == 'jess':
+            self.maxcpu = 1
+            self.secperiter = 0.012
+        else:
+            raise UserWarning('server support only for jess or gorm')
+
+        # the output channels comes with a price tag. Each time step
+        # will have a penelty depending on the number of output channels
+
+        self.iterperstep = 8.0 # average nr of iterations per time step
+        # lead time: account for time losses when starting a simulation,
+        # copying the turbulence data, generating the turbulence
+        self.tlead = 5.0*60.0
+
+        # use pbs job name as prefix in the pbs file name
+        self.pbs_fname_appendix = pbs_fname_appendix
+        self.short_job_names = short_job_names
+        # pbs job name prefix
+        self.pref = 'HAWC2_'
+        # the actual script starts empty
+        self.pbs = ''
+
+        # FIXME: this goes wrong when Morten does it directly on the cluster
+        # the resulting PBS script has too many slashes !
+        self.wine = 'time WINEARCH=win32 WINEPREFIX=~/.wine32 wine'
+        # in case you want to redirect stdout to /dev/nul
+#        self.wine_appendix = '> /dev/null 2>&1'
+        self.wine_appendix = ''
+        self.wine_dir = '/home/dave/.wine32/drive_c/bin'
+        # /dev/shm should be the RAM of the cluster
+#        self.node_run_root = '/dev/shm'
+        self.node_run_root = '/scratch'
+
+        self.cases = cases
+
+        # location of the output messages .err and .out created by the node
+        self.pbs_out_dir = 'pbs_out/'
+        self.pbs_in_dir = 'pbs_in/'
+
+        # for the start number, take hour/minute combo
+        d = datetime.datetime.today()
+        tmp = int( str(d.hour)+format(d.minute, '02.0f') )*100
+        self.pbs_start_number = tmp
+        self.qsub = qsub
+
+#        if quemethod == 'time':
+#            self.que_jobdeps = False
+#        elif type(quemethod).__name__ == 'int':
+#            nr_cpus = quemethod
+#            self.que_jobdeps = True
+#            nr_jobs = len(cases)
+#            jobs_per_cpu = int(math.ceil(float(nr_jobs)/float(nr_cpus)))
+#            # precalculate all the job ids
+#            self.jobid_deps = []
+#            self.jobid_list = []
+#            count2 = self.pbs_start_number
+#            for cpu in range(nr_cpus):
+#                self.jobid_list.extend(range(count2, count2+jobs_per_cpu))
+#                # the first job to start does not have a dependency
+#                self.jobid_deps.append(None)
+#                self.jobid_deps.extend(range(count2, count2+jobs_per_cpu-1))
+#                count2 += jobs_per_cpu
+
+        self.copyback_turb = True
+        self.copyback_fnames = []
+        self.copyback_fnames_rename = []
+        self.copyto_generic = []
+        self.copyto_fname = []
+
+    def create(self):
+        """
+        Main loop for creating the pbs scripts, based on cases, which
+        contains the case name as key and tag dictionairy as value
+        """
+
+        # dynamically set walltime based on the number of time steps
+        # for thyra, make a list so we base the walltime on the slowest case
+        self.nr_time_steps = []
+        self.duration = []
+        self.t0 = []
+        # '[time_stop]' '[dt_sim]'
+
+        # REMARK: this i not realy consistent with how the result and log file
+        # dirs are allowed to change for each individual case...
+        # first check if the pbs_out_dir exists, this dir is considered to be
+        # the same for all cases present in cases
+        # self.tags['[run_dir]']
+        case0 = self.cases.keys()[0]
+        path = self.cases[case0]['[run_dir]'] + self.pbs_out_dir
+        if not os.path.exists(path):
+            os.makedirs(path)
+
+        # create pbs_in base dir
+        path = self.cases[case0]['[run_dir]'] + self.pbs_in_dir
+        if not os.path.exists(path):
+            os.makedirs(path)
+
+        # number the pbs jobs:
+        count2 = self.pbs_start_number
+        # initial cpu count is zero
+        count1 = 1
+        # scan through all the cases
+        i, i_tot = 1, len(self.cases)
+        ended = True
+
+        for case in self.cases:
+
+            # get a shorter version for the current cases tag_dict:
+            tag_dict = self.cases[case]
+
+            # group all values loaded from the tag_dict here, to keep overview
+            # the directories to SAVE the results/logs/turb files
+            # load all relevant dir settings: the result/logfile/turbulence/zip
+            # they are now also available for starting() and ending() parts
+            hawc2_exe = tag_dict['[hawc2_exe]']
+            self.results_dir = tag_dict['[res_dir]']
+            self.eigenfreq_dir = tag_dict['[eigenfreq_dir]']
+            self.logs_dir = tag_dict['[log_dir]']
+            self.animation_dir = tag_dict['[animation_dir]']
+            self.TurbDirName = tag_dict['[turb_dir]']
+            self.TurbDb = tag_dict['[turb_db_dir]']
+            self.wakeDb = tag_dict['[wake_db_dir]']
+            self.meandDb = tag_dict['[meand_db_dir]']
+            self.WakeDirName = tag_dict['[wake_dir]']
+            self.MeanderDirName = tag_dict['[meander_dir]']
+            self.ModelZipFile = tag_dict['[model_zip]']
+            self.htc_dir = tag_dict['[htc_dir]']
+            self.hydro_dir = tag_dict['[hydro_dir]']
+            self.mooring_dir = tag_dict['[mooring_dir]']
+            self.model_path = tag_dict['[run_dir]']
+            self.turb_base_name = tag_dict['[turb_base_name]']
+            self.wake_base_name = tag_dict['[wake_base_name]']
+            self.meand_base_name = tag_dict['[meand_base_name]']
+            self.pbs_queue_command = tag_dict['[pbs_queue_command]']
+            self.walltime = tag_dict['[walltime]']
+            self.dyn_walltime = tag_dict['[auto_walltime]']
+
+            # create the pbs_out_dir if necesary
+            try:
+                path = tag_dict['[run_dir]'] + tag_dict['[pbs_out_dir]']
+                if not os.path.exists(path):
+                    os.makedirs(path)
+                self.pbs_out_dir = tag_dict['[pbs_out_dir]']
+            except:
+                pass
+
+            # create pbs_in subdirectories if necessary
+            try:
+                path = tag_dict['[run_dir]'] + tag_dict['[pbs_in_dir]']
+                if not os.path.exists(path):
+                    os.makedirs(path)
+                self.pbs_in_dir = tag_dict['[pbs_in_dir]']
+            except:
+                pass
+
+            try:
+                self.copyback_files = tag_dict['[copyback_files]']
+                self.copyback_frename = tag_dict['[copyback_frename]']
+            except KeyError:
+                pass
+
+            try:
+                self.copyto_generic = tag_dict['[copyto_generic]']
+                self.copyto_files = tag_dict['[copyto_files]']
+            except KeyError:
+                pass
+
+            # related to the dynamically setting the walltime
+            duration = float(tag_dict['[time_stop]'])
+            dt = float(tag_dict['[dt_sim]'])
+            self.nr_time_steps.append(duration/dt)
+            self.duration.append(float(tag_dict['[duration]']))
+            self.t0.append(float(tag_dict['[t0]']))
+
+            if self.verbose:
+                print('htc_dir in pbs.create:')
+                print(self.htc_dir)
+                print(self.model_path)
+
+            # we only start a new case, if we have something that ended before
+            # the very first case has to start with starting
+            if ended:
+                count1 = 1
+
+#                # when jobs depend on other jobs (constant node loading)
+#                if self.que_jobdeps:
+#                    jobid = self.pref + str(self.jobid_list[i-1])
+#                    jobid_dep = self.pref + str(self.jobid_deps[i-1])
+#                else:
+#                    jobid = self.pref + str(count2)
+#                    jobid_dep = None
+                if self.short_job_names:
+                    jobid = self.pref + str(count2)
+                else:
+                    jobid = tag_dict['[case_id]']
+                if self.pbs_fname_appendix and self.short_job_names:
+                    # define the path for the new pbs script
+                    pbs_in_fname = '%s_%s.p' % (tag_dict['[case_id]'], jobid)
+                else:
+                    pbs_in_fname = '%s.p' % (tag_dict['[case_id]'])
+                pbs_path = self.model_path + self.pbs_in_dir + pbs_in_fname
+                # Start a new pbs script, we only need the tag_dict here
+                self.starting(tag_dict, jobid)
+                ended = False
+
+            # -----------------------------------------------------------------
+            # WRITING THE ACTUAL JOB PARAMETERS
+
+            # output the current scratch directory
+            self.pbs += "pwd\n"
+            # zip file has been copied to the node before (in start_pbs())
+            # unzip now in the node
+            self.pbs += "/usr/bin/unzip " + self.ModelZipFile + '\n'
+            # create all directories, especially relevant if there are case
+            # dependent sub directories that are not present in the ZIP file
+            self.pbs += "mkdir -p " + self.htc_dir + '\n'
+            self.pbs += "mkdir -p " + self.results_dir + '\n'
+            self.pbs += "mkdir -p " + self.logs_dir + '\n'
+            self.pbs += "mkdir -p " + self.TurbDirName + '\n'
+            if self.WakeDirName:
+                self.pbs += "mkdir -p " + self.WakeDirName + '\n'
+            if self.MeanderDirName:
+                self.pbs += "mkdir -p " + self.MeanderDirName + '\n'
+            if self.hydro_dir:
+                self.pbs += "mkdir -p " + self.hydro_dir + '\n'
+            # create the eigen analysis dir just in case that is necessary
+            if self.eigenfreq_dir:
+                self.pbs += 'mkdir -p %s \n' % self.eigenfreq_dir
+
+            # and copy the htc file to the node
+            self.pbs += "cp -R $PBS_O_WORKDIR/" + self.htc_dir \
+                + case +" ./" + self.htc_dir + '\n'
+
+            # if there is a turbulence file data base dir, copy from there
+            if self.TurbDb:
+                tmp = (self.TurbDb, self.turb_base_name, self.TurbDirName)
+                self.pbs += "cp -R $PBS_O_WORKDIR/%s%s*.bin %s \n" % tmp
+            else:
+                # turbulence files basenames are defined for the case
+                self.pbs += "cp -R $PBS_O_WORKDIR/" + self.TurbDirName + \
+                    self.turb_base_name + "*.bin ./"+self.TurbDirName + '\n'
+
+            if self.wakeDb and self.WakeDirName:
+                tmp = (self.wakeDb, self.wake_base_name, self.WakeDirName)
+                self.pbs += "cp -R $PBS_O_WORKDIR/%s%s*.bin %s \n" % tmp
+            elif self.WakeDirName:
+                self.pbs += "cp -R $PBS_O_WORKDIR/" + self.WakeDirName + \
+                    self.wake_base_name + "*.bin ./"+self.WakeDirName + '\n'
+
+            if self.meandDb and self.MeanderDirName:
+                tmp = (self.meandDb, self.meand_base_name, self.MeanderDirName)
+                self.pbs += "cp -R $PBS_O_WORKDIR/%s%s*.bin %s \n" % tmp
+            elif self.MeanderDirName:
+                self.pbs += "cp -R $PBS_O_WORKDIR/" + self.MeanderDirName + \
+                    self.meand_base_name + "*.bin ./"+self.MeanderDirName + '\n'
+
+            # copy and rename input files with given versioned name to the
+            # required non unique generic version
+            for fname, fgen in zip(self.copyto_files, self.copyto_generic):
+                self.pbs += "cp -R $PBS_O_WORKDIR/%s ./%s \n" % (fname, fgen)
+
+            # the hawc2 execution commands via wine
+            param = (self.wine, hawc2_exe, self.htc_dir+case, self.wine_appendix)
+            self.pbs += "%s %s ./%s %s &\n" % param
+
+            #self.pbs += "wine get_mac_adresses" + '\n'
+            # self.pbs += "cp -R ./*.mac  $PBS_O_WORKDIR/." + '\n'
+            # -----------------------------------------------------------------
+
+            # and we end when the cpu's per node are full
+            if int(count1/self.maxcpu) == 1:
+                # write the end part of the pbs script
+                self.ending(pbs_path)
+                ended = True
+                # print progress:
+                replace = ((i/self.maxcpu), (i_tot/self.maxcpu), self.walltime)
+                print('pbs script %3i/%i walltime=%s' % replace)
+
+            count2 += 1
+            i += 1
+            # the next cpu
+            count1 += 1
+
+        # it could be that the last node was not fully loaded. In that case
+        # we do not have had a succesfull ending, and we still need to finish
+        if not ended:
+            # write the end part of the pbs script
+            self.ending(pbs_path)
+            # progress printing
+            replace = ( (i/self.maxcpu), (i_tot/self.maxcpu), self.walltime )
+            print('pbs script %3i/%i walltime=%s, partially loaded' % replace)
+#            print 'pbs progress, script '+format(i/self.maxcpu,'2.0f')\
+#                + '/' + format(i_tot/self.maxcpu, '2.0f') \
+#                + ' partially loaded...'
+
+    def starting(self, tag_dict, jobid):
+        """
+        First part of the pbs script
+        """
+
+        # a new clean pbs script!
+        self.pbs = ''
+        self.pbs += "### Standard Output" + ' \n'
+
+        case_id = tag_dict['[case_id]']
+
+        # PBS job name
+        self.pbs += "#PBS -N %s \n" % (jobid)
+        self.pbs += "#PBS -o ./" + self.pbs_out_dir + case_id + ".out" + '\n'
+        # self.pbs += "#PBS -o ./pbs_out/" + jobid + ".out" + '\n'
+        self.pbs += "### Standard Error" + ' \n'
+        self.pbs += "#PBS -e ./" + self.pbs_out_dir + case_id + ".err" + '\n'
+        # self.pbs += "#PBS -e ./pbs_out/" + jobid + ".err" + '\n'
+        self.pbs += '#PBS -W umask=003\n'
+        self.pbs += "### Maximum wallclock time format HOURS:MINUTES:SECONDS\n"
+#        self.pbs += "#PBS -l walltime=" + self.walltime + '\n'
+        self.pbs += "#PBS -l walltime=[walltime]\n"
+        if self.qsub == 'time':
+            self.pbs += "#PBS -a [start_time]" + '\n'
+        elif self.qsub == 'depend':
+            # set job dependencies, job_id refers to PBS job_id, which is only
+            # assigned to a job at the moment it gets qsubbed into the que
+            self.pbs += "[nodeps]PBS -W depend=afterany:[job_id]\n"
+
+#        if self.que_jobdeps:
+#            self.pbs += "#PBS -W depend=afterany:%s\n" % jobid_dep
+#        else:
+#            self.pbs += "#PBS -a [start_time]" + '\n'
+
+        # in case of gorm, we need to make it work correctly. Now each job
+        # has a different scratch dir. If we set maxcpu to 12 they all have
+        # the same scratch dir. In that case there should be done something
+        # differently
+
+        # specify the number of nodes and cpu's per node required
+        if self.maxcpu > 1:
+            # Number of nodes and cpus per node (ppn)
+            lnodes = int(math.ceil(len(self.cases)/float(self.maxcpu)))
+            lnodes = 1
+            self.pbs += "#PBS -l nodes=%i:ppn=%i\n" % (lnodes, self.maxcpu)
+        else:
+            self.pbs += "#PBS -l nodes=1:ppn=1\n"
+            # Number of nodes and cpus per node (ppn)
+
+        self.pbs += "### Queue name" + '\n'
+        # queue names for Thyra are as follows:
+        # short walltime queue (shorter than an hour): '#PBS -q xpresq'
+        # or otherwise for longer jobs: '#PBS -q workq'
+        self.pbs += self.pbs_queue_command + '\n'
+
+        self.pbs += "### Create scratch directory and copy data to it \n"
+        # output the current directory
+        self.pbs += "cd $PBS_O_WORKDIR" + '\n'
+        self.pbs += 'echo "current working dir (pwd):"\n'
+        self.pbs += "pwd \n"
+        # The batch system on Gorm allows more than one job per node.
+        # Because of this the scratch directory name includes both the
+        # user name and the job ID, that is /scratch/$USER/$PBS_JOBID
+        # if not scratch, make the dir
+        if self.node_run_root != '/scratch':
+            self.pbs += 'mkdir -p %s/$USER\n' % self.node_run_root
+            self.pbs += 'mkdir -p %s/$USER/$PBS_JOBID\n' % self.node_run_root
+
+        # copy the zip files to the scratch dir on the node
+        self.pbs += "cp -R ./" + self.ModelZipFile + \
+            ' %s/$USER/$PBS_JOBID\n' % (self.node_run_root)
+
+        self.pbs += '\n\n'
+        self.pbs += 'echo ""\n'
+        self.pbs += 'echo "Execute commands on scratch nodes"\n'
+        self.pbs += 'cd %s/$USER/$PBS_JOBID\n' % self.node_run_root
+#        # also copy all the HAWC2 exe's to the scratch dir
+#        self.pbs += "cp -R %s/* ./\n" % self.wine_dir
+#        # custom name hawc2 exe
+#        self.h2_new = tag_dict['[hawc2_exe]'] + '-' + jobid + '.exe'
+#        self.pbs += "mv %s.exe %s\n" % (tag_dict['[hawc2_exe]'], self.h2_new)
+
+    def ending(self, pbs_path):
+        """
+        Last part of the pbs script, including command to write script to disc
+        COPY BACK: from node to
+        """
+
+        self.pbs += "### wait for jobs to finish \n"
+        self.pbs += "wait\n"
+        self.pbs += 'echo ""\n'
+        self.pbs += 'echo "Copy back from scratch directory" \n'
+        for i in range(1,self.maxcpu+1,1):
+
+            # navigate to the cpu dir on the node
+            # The batch system on Gorm allows more than one job per node.
+            # Because of this the scratch directory name includes both the
+            # user name and the job ID, that is /scratch/$USER/$PBS_JOBID
+            # NB! This is different from Thyra!
+            self.pbs += "cd %s/$USER/$PBS_JOBID\n" % self.node_run_root
+
+            # create the log, res etc dirs in case they do not exist
+            self.pbs += "mkdir -p $PBS_O_WORKDIR/" + self.results_dir + "\n"
+            self.pbs += "mkdir -p $PBS_O_WORKDIR/" + self.logs_dir + "\n"
+            if self.animation_dir:
+                self.pbs += "mkdir -p $PBS_O_WORKDIR/" + self.animation_dir + "\n"
+            if self.copyback_turb and self.TurbDb:
+                self.pbs += "mkdir -p $PBS_O_WORKDIR/" + self.TurbDb + "\n"
+            elif self.copyback_turb:
+                self.pbs += "mkdir -p $PBS_O_WORKDIR/" + self.TurbDirName + "\n"
+            if self.copyback_turb and self.wakeDb:
+                self.pbs += "mkdir -p $PBS_O_WORKDIR/" + self.wakeDb + "\n"
+            elif self.WakeDirName:
+                self.pbs += "mkdir -p $PBS_O_WORKDIR/" + self.WakeDirName + "\n"
+            if self.copyback_turb and self.meandDb:
+                self.pbs += "mkdir -p $PBS_O_WORKDIR/" + self.meandDb + "\n"
+            elif self.MeanderDirName:
+                self.pbs += "mkdir -p $PBS_O_WORKDIR/" + self.MeanderDirName + "\n"
+
+            # and copy the results and log files frome the node to the
+            # thyra home dir
+            self.pbs += "cp -R " + self.results_dir + \
+                ". $PBS_O_WORKDIR/" + self.results_dir + ".\n"
+            self.pbs += "cp -R " + self.logs_dir + \
+                ". $PBS_O_WORKDIR/" + self.logs_dir + ".\n"
+            if self.animation_dir:
+                self.pbs += "cp -R " + self.animation_dir + \
+                    ". $PBS_O_WORKDIR/" + self.animation_dir + ".\n"
+
+            if self.eigenfreq_dir:
+                # just in case the eig dir has subdirs for the results, only
+                # select the base path and cp -r will take care of the rest
+                p1 = self.eigenfreq_dir.split('/')[0]
+                self.pbs += "cp -R %s/. $PBS_O_WORKDIR/%s/. \n" % (p1, p1)
+                # for eigen analysis with floater, modes are in root
+                eig_dir_sys = '%ssystem/' % self.eigenfreq_dir
+                self.pbs += 'mkdir -p $PBS_O_WORKDIR/%s \n' % eig_dir_sys
+                self.pbs += "cp -R mode* $PBS_O_WORKDIR/%s. \n" % eig_dir_sys
+
+            # only copy the turbulence files back if they do not exist
+            # for all *.bin files on the node
+            cmd = 'for i in `ls *.bin`; do  if [ -e $PBS_O_WORKDIR/%s$i ]; '
+            cmd += 'then echo "$i exists no copyback"; else echo "$i copyback"; '
+            cmd += 'cp $i $PBS_O_WORKDIR/%s; fi; done\n'
+            # copy back turbulence file?
+            # browse to the node turb dir
+            self.pbs += '\necho ""\n'
+            self.pbs += 'echo "COPY BACK TURB IF APPLICABLE"\n'
+            if self.TurbDirName:
+                self.pbs += 'cd %s\n' % self.TurbDirName
+            if self.copyback_turb and self.TurbDb:
+                tmp = (self.TurbDb, self.TurbDb)
+                self.pbs += cmd % tmp
+            elif self.copyback_turb:
+                tmp = (self.TurbDirName, self.TurbDirName)
+                self.pbs += cmd % tmp
+            if self.TurbDirName:
+                # and back to normal model root
+                self.pbs += "cd %s/$USER/$PBS_JOBID\n" % self.node_run_root
+
+            if self.WakeDirName:
+                self.pbs += 'cd %s\n' % self.WakeDirName
+            if self.copyback_turb and self.wakeDb:
+                tmp = (self.wakeDb, self.wakeDb)
+                self.pbs += cmd % tmp
+            elif self.copyback_turb and self.WakeDirName:
+                tmp = (self.WakeDirName, self.WakeDirName)
+                self.pbs += cmd % tmp
+            if self.WakeDirName:
+                # and back to normal model root
+                self.pbs += "cd %s/$USER/$PBS_JOBID\n" % self.node_run_root
+
+            if self.MeanderDirName:
+                self.pbs += 'cd %s\n' % self.MeanderDirName
+            if self.copyback_turb and self.meandDb:
+                tmp = (self.meandDb, self.meandDb)
+                self.pbs += cmd % tmp
+            elif self.copyback_turb and self.MeanderDirName:
+                tmp = (self.MeanderDirName, self.MeanderDirName)
+                self.pbs += cmd % tmp
+            if self.MeanderDirName:
+                # and back to normal model root
+                self.pbs += "cd %s/$USER/$PBS_JOBID\n" % self.node_run_root
+            self.pbs += 'echo "END COPY BACK TURB"\n'
+            self.pbs += 'echo ""\n\n'
+
+            # copy back any other kind of file specified
+            if len(self.copyback_frename) == 0:
+                self.copyback_frename = self.copyback_files
+            for fname, fnew in zip(self.copyback_files, self.copyback_frename):
+                self.pbs += "cp -R %s $PBS_O_WORKDIR/%s \n" % (fname, fnew)
+
+            # check what is left
+            self.pbs += 'echo ""\n'
+            self.pbs += 'echo "following files are on the node (find .):"\n'
+            self.pbs += 'find .\n'
+
+#            # and delete it all (but that is not allowed)
+#            self.pbs += 'cd ..\n'
+#            self.pbs += 'ls -lah\n'
+#            self.pbs += 'echo $PBS_JOBID\n'
+#            self.pbs += 'rm -r $PBS_JOBID \n'
+
+            # Delete the batch file at the end. However, is this possible since
+            # the batch file is still open at this point????
+            # self.pbs += "rm "
+
+        # base walltime on the longest simulation in the batch
+        nr_time_steps = max(self.nr_time_steps)
+        # TODO: take into acccount the difference between time steps with
+        # and without output. This penelaty also depends on the number of
+        # channels outputted. So from 0 until t0 we have no penalty,
+        # from t0 until t0+duration we have the output penalty.
+
+        # always a predifined lead time to account for startup losses
+        tmax = int(nr_time_steps*self.secperiter*self.iterperstep + self.tlead)
+        if self.dyn_walltime:
+            dt_seconds = datetime.datetime.fromtimestamp(tmax)
+            self.walltime = dt_seconds.strftime('%H:%M:%S')
+            self.pbs = self.pbs.replace('[walltime]', self.walltime)
+        else:
+            self.pbs = self.pbs.replace('[walltime]', self.walltime)
+        # and reset the nr_time_steps list for the next pbs job file
+        self.nr_time_steps = []
+        self.t0 = []
+        self.duration = []
+
+        # TODO: add logfile checking support directly here. In that way each
+        # node will do the logfile checking and statistics calculations right
+        # after the simulation. Figure out a way how to merge the data from
+        # all the different cases afterwards
+
+        self.pbs += "exit\n"
+
+        if self.verbose:
+            print('writing pbs script to path: ' + pbs_path)
+
+        # and write the script to a file:
+        write_file(pbs_path, self.pbs, 'w')
+        # make the string empty again, for memory
+        self.pbs = ''
+
+    def check_results(self, cases):
+        """
+        Cross-check if all simulations on the list have returned a simulation.
+        Combine with ErrorLogs to identify which errors occur where.
+        """
+
+        cases_fail = {}
+
+        print('checking if all log and result files are present...', end='')
+
+        # check for each case if we have results and a log file
+        for cname, case in cases.iteritems():
+            run_dir = case['[run_dir]']
+            res_dir = case['[res_dir]']
+            log_dir = case['[log_dir]']
+            cname_ = cname.replace('.htc', '')
+            f_log = os.path.join(run_dir, log_dir, cname_)
+            f_res = os.path.join(run_dir, res_dir, cname_)
+            if not os.path.exists(f_log + '.log'):
+                cases_fail[cname] = copy.copy(cases[cname])
+                continue
+            try:
+                size_sel = os.stat(f_res + '.sel').st_size
+                size_dat = os.stat(f_res + '.dat').st_size
+            except OSError:
+                size_sel = 0
+                size_dat = 0
+            if size_sel < 5 or size_dat < 5:
+                cases_fail[cname] = copy.copy(cases[cname])
+
+        print('done!')
+
+        # length will be zero if there are no failures
+        return cases_fail
+
+# TODO: rewrite the error log analysis to something better. Take different
+# approach: start from the case and see if the results are present. Than we
+# also have the tags_dict available when log-checking a certain case
+class ErrorLogs:
+    """
+    Analyse all HAWC2 log files in any given directory
+    ==================================================
+
+    Usage:
+    logs = ErrorLogs()
+    logs.MsgList    : list with the to be checked messages. Add more if required
+    logs.ResultFile : name of the result file (default is ErrorLog.csv)
+    logs.PathToLogs : specify the directory where the logsfile reside,
+                        the ResultFile will be saved in the same directory.
+                        It is also possible to give the path of a specific
+                        file, the logfile will not be saved in this case. Save
+                        when all required messages are analysed with save()
+    logs.check() to analyse all the logfiles and create the ResultFile
+    logs.save() to save after single file analysis
+
+    logs.MsgListLog : [ [case, line nr, error1, line nr, error2, ....], [], ...]
+    holding the error messages, empty if no err msg found
+    will survive as long as the logs object exists. Keep in
+    mind that when processing many messages with many error types (as defined)
+    in MsgList might lead to an increase in memory usage.
+
+    logs.MsgListLog2 : dict(key=case, value=[found_error, exit_correct]
+        where found_error and exit_correct are booleans. Found error will just
+        indicate whether or not any error message has been found
+
+    All files in the speficied folder (PathToLogs) will be evaluated.
+    When Any item present in MsgList occurs, the line number of the first
+    occurance will be displayed in the ResultFile.
+    If more messages are required, add them to the MsgList
+    """
+
+    # TODO: move to the HAWC2 plugin for cases
+
+    def __init__(self, silent=False, cases=None):
+
+        self.silent = silent
+        # specify folder which contains the log files
+        self.PathToLogs = ''
+        self.ResultFile = 'ErrorLog.csv'
+
+        self.cases = cases
+
+        # the total message list log:
+        self.MsgListLog = []
+        # a smaller version, just indication if there are errors:
+        self.MsgListLog2 = dict()
+
+        # specify which message to look for. The number track's the order.
+        # this makes it easier to view afterwards in spreadsheet:
+        # every error will have its own column
+
+        # error messages that appear during initialisation
+        self.err_init = {}
+        self.err_init[' *** ERROR *** Error in com'] = len(self.err_init.keys())
+        self.err_init[' *** ERROR ***  in command '] = len(self.err_init.keys())
+        #  *** WARNING *** A comma "," is written within the command line
+        self.err_init[' *** WARNING *** A comma ",'] = len(self.err_init.keys())
+        #  *** ERROR *** Not correct number of parameters
+        self.err_init[' *** ERROR *** Not correct '] = len(self.err_init.keys())
+        #  *** INFO *** End of file reached
+        self.err_init[' *** INFO *** End of file r'] = len(self.err_init.keys())
+        #  *** ERROR *** No line termination in command line
+        self.err_init[' *** ERROR *** No line term'] = len(self.err_init.keys())
+        #  *** ERROR *** MATRIX IS NOT DEFINITE
+        self.err_init[' *** ERROR *** MATRIX IS NO'] = len(self.err_init.keys())
+        #  *** ERROR *** There are unused relative
+        self.err_init[' *** ERROR *** There are un'] = len(self.err_init.keys())
+        #  *** ERROR *** Error finding body based
+        self.err_init[' *** ERROR *** Error findin'] = len(self.err_init.keys())
+        #  *** ERROR *** In body actions
+        self.err_init[' *** ERROR *** In body acti'] = len(self.err_init.keys())
+        #  *** ERROR *** Command unknown
+        self.err_init[' *** ERROR *** Command unkn'] = len(self.err_init.keys())
+        #  *** ERROR *** ERROR - More bodies than elements on main_body: tower
+        self.err_init[' *** ERROR *** ERROR - More'] = len(self.err_init.keys())
+        #  *** ERROR *** The program will stop
+        self.err_init[' *** ERROR *** The program '] = len(self.err_init.keys())
+        #  *** ERROR *** Unknown begin command in topologi.
+        self.err_init[' *** ERROR *** Unknown begi'] = len(self.err_init.keys())
+        #  *** ERROR *** Not all needed topologi main body commands present
+        self.err_init[' *** ERROR *** Not all need'] = len(self.err_init.keys())
+        #  *** ERROR ***  opening timoschenko data file
+        self.err_init[' *** ERROR ***  opening tim'] = len(self.err_init.keys())
+        #  *** ERROR *** Error opening AE data file
+        self.err_init[' *** ERROR *** Error openin'] = len(self.err_init.keys())
+        #  *** ERROR *** Requested blade _ae set number not found in _ae file
+        self.err_init[' *** ERROR *** Requested bl'] = len(self.err_init.keys())
+        #  Error opening PC data file
+        self.err_init[' Error opening PC data file'] = len(self.err_init.keys())
+        #  *** ERROR *** error reading mann turbulence
+        self.err_init[' *** ERROR *** error readin'] = len(self.err_init.keys())
+        #  *** INFO *** The DLL subroutine
+        self.err_init[' *** INFO *** The DLL subro'] = len(self.err_init.keys())
+        #  ** WARNING: FROM ESYS ELASTICBAR: No keyword
+        self.err_init[' ** WARNING: FROM ESYS ELAS'] = len(self.err_init.keys())
+        #  *** ERROR *** DLL ./control/killtrans.dll could not be loaded - error!
+        self.err_init[' *** ERROR *** DLL'] = len(self.err_init.keys())
+        # *** ERROR *** The DLL subroutine
+        self.err_init[' *** ERROR *** The DLL subr'] = len(self.err_init.keys())
+        # *** WARNING *** Shear center x location not in elastic center, set to zero
+        self.err_init[' *** WARNING *** Shear cent'] = len(self.err_init.keys())
+        self.err_init[' *** WARNING ***'] = len(self.err_init.keys())
+        self.err_init[' *** ERROR ***'] = len(self.err_init.keys())
+        self.err_init[' WARNING'] = len(self.err_init.keys())
+        self.err_init[' ERROR'] = len(self.err_init.keys())
+
+        # error messages that appear during simulation
+        self.err_sim = {}
+        #  *** ERROR *** Wind speed requested inside
+        self.err_sim[' *** ERROR *** Wind speed r'] = len(self.err_sim.keys())
+        #  Maximum iterations exceeded at time step:
+        self.err_sim[' Maximum iterations exceede'] = len(self.err_sim.keys())
+        #  Solver seems not to converge:
+        self.err_sim[' Solver seems not to conver'] = len(self.err_sim.keys())
+        #  *** ERROR *** Out of x bounds:
+        self.err_sim[' *** ERROR *** Out of x bou'] = len(self.err_sim.keys())
+        #  *** ERROR *** Out of limits in user defined shear field - limit value used
+        self.err_sim[' *** ERROR *** Out of limit'] = len(self.err_sim.keys())
+
+        # TODO: error message from a non existing channel output/input
+        # add more messages if required...
+
+        self.init_cols = len(self.err_init.keys())
+        self.sim_cols = len(self.err_sim.keys())
+
+    # TODO: save this not a csv text string but a df_dict, and save as excel
+    # and DataFrame!
+    def check(self, appendlog=False, save_iter=False):
+
+        # MsgListLog = []
+
+        # load all the files in the given path
+        FileList = []
+        for files in os.walk(self.PathToLogs):
+            FileList.append(files)
+
+        # if the instead of a directory, a file path is given
+        # the generated FileList will be empty!
+        try:
+            NrFiles = len(FileList[0][2])
+        # input was a single file:
+        except:
+            NrFiles = 1
+            # simulate one entry on FileList[0][2], give it the file name
+            # and save the directory on in self.PathToLogs
+            tmp = self.PathToLogs.split(os.path.sep)[-1]
+            # cut out the file name from the directory
+            self.PathToLogs = self.PathToLogs.replace(tmp, '')
+            FileList.append([ [],[],[tmp] ])
+            single_file = True
+        i=1
+
+        # walk trough the files present in the folder path
+        for fname in FileList[0][2]:
+            fname_lower = fname.lower()
+            # progress indicator
+            if NrFiles > 1:
+                if not self.silent:
+                    print('progress: ' + str(i) + '/' + str(NrFiles))
+
+            # open the current log file
+            f_log = os.path.join(self.PathToLogs, str(fname_lower))
+            with open(f_log, 'r') as f:
+                lines = f.readlines()
+
+            # keep track of the messages allready found in this file
+            tempLog = []
+            tempLog.append(fname)
+            exit_correct, found_error = False, False
+            # create empty list item for the different messages and line
+            # number. Include one column for non identified messages
+            for j in range(self.init_cols + self.sim_cols + 1):
+                tempLog.append('')
+                tempLog.append('')
+
+            # if there is a cases object, see how many time steps we expect
+            if self.cases is not None:
+                case = self.cases[fname.replace('.log', '.htc')]
+                dt = float(case['[dt_sim]'])
+                time_steps = float(case['[time_stop]']) / dt
+                iterations = np.ndarray( (time_steps+1,3), dtype=np.float32 )
+            else:
+                iterations = np.ndarray( (len(lines),3), dtype=np.float32 )
+                dt = False
+            iterations[:,0:2] = -1
+            iterations[:,2] = 0
+
+            # keep track of the time_step number
+            time_step, init_block = -1, True
+            # check for messages in the current line
+            # for speed: delete from message watch list if message is found
+            for j, line in enumerate(lines):
+                # all id's of errors are 27 characters long
+                msg = line[:27]
+
+                # keep track of the number of iterations
+                if line[:12] == ' Global time':
+                    time_step += 1
+                    iterations[time_step,0] = float(line[14:40])
+                    iterations[time_step,1] = int(line[-6:-2])
+                    # time step is the first time stamp
+                    if not dt:
+                        dt = float(line[15:40])
+                    # no need to look for messages if global time is mentioned
+                    continue
+
+                elif line[:20] == ' Starting simulation':
+                    init_block = False
+
+                elif init_block:
+                    # if string is shorter, we just get a shorter string.
+                    # checking presence in dict is faster compared to checking
+                    # the length of the string
+                    if msg in self.err_init:
+                        col_nr = self.err_init[msg]
+                        # 2nd item is the column position of the message
+                        tempLog[2*(col_nr+1)] = line[:-2]
+                        # line number of the message
+                        tempLog[2*col_nr+1] += '%i, ' % j
+                        found_error = True
+
+                # find errors that can occur during simulation
+                elif msg in self.err_sim:
+                    col_nr = self.err_sim[msg] + self.init_cols
+                    # 2nd item is the column position of the message
+                    tempLog[2*(col_nr+1)] = line[:-2]
+                    # in case stuff already goes wrong on the first time step
+                    if time_step == -1:
+                        time_step = 0
+                    # line number of the message
+                    tempLog[2*col_nr+1] += '%i, ' % time_step
+                    found_error = True
+                    iterations[time_step,2] = 1
+
+                # method of last resort, we have no idea what message
+                elif line[:10] == ' *** ERROR' or line[:10]==' ** WARNING':
+                    tempLog[-2] = line[:-2]
+                    # line number of the message
+                    tempLog[-1] = j
+                    found_error = True
+                    # in case stuff already goes wrong on the first time step
+                    if time_step == -1:
+                        time_step = 0
+                    iterations[time_step,2] = 1
+
+            # simulation and simulation output time
+            if self.cases is not None:
+                t_stop = float(case['[time_stop]'])
+                duration = float(case['[duration]'])
+            else:
+                t_stop = -1
+                duration = -1
+
+            # see if the last line holds the sim time
+            if line[:15] ==  ' Elapsed time :':
+                exit_correct = True
+                elapsed_time = float(line[15:-3])
+                tempLog.append( elapsed_time )
+            # in some cases, Elapsed time is not given, and the last message
+            # might be: " Closing of external type2 DLL"
+            elif line[:20] == ' Closing of external':
+                exit_correct = True
+                elapsed_time = iterations[time_step,0]
+                tempLog.append( elapsed_time )
+            elif np.allclose(iterations[time_step,0], t_stop):
+                exit_correct = True
+                elapsed_time = iterations[time_step,0]
+                tempLog.append( elapsed_time )
+            else:
+                elapsed_time = -1
+                tempLog.append('')
+
+            # give the last recorded time step
+            tempLog.append('%1.11f' % iterations[time_step,0])
+
+            # simulation and simulation output time
+            tempLog.append('%1.01f' % t_stop)
+            tempLog.append('%1.04f' % (t_stop/elapsed_time))
+            tempLog.append('%1.01f' % duration)
+
+            # as last element, add the total number of iterations
+            itertotal = np.nansum(iterations[:,1])
+            tempLog.append('%i' % itertotal)
+
+            # the delta t used for the simulation
+            if dt:
+                tempLog.append('%1.7f' % dt)
+            else:
+                tempLog.append('failed to find dt')
+
+            # number of time steps
+            tempLog.append('%i' % len(iterations) )
+
+            # if the simulation didn't end correctly, the elapsed_time doesn't
+            # exist. Add the average and maximum nr of iterations per step
+            # or, if only the structural and eigen analysis is done, we have 0
+            try:
+                ratio = float(elapsed_time)/float(itertotal)
+                tempLog.append('%1.6f' % ratio)
+            except (UnboundLocalError, ZeroDivisionError, ValueError) as e:
+                tempLog.append('')
+            # when there are no time steps (structural analysis only)
+            try:
+                tempLog.append('%1.2f' % iterations[:,1].mean() )
+                tempLog.append('%1.2f' % iterations[:,1].max() )
+            except ValueError:
+                tempLog.append('')
+                tempLog.append('')
+
+            # save the iterations in the results folder
+            if save_iter:
+                fiter = fname.replace('.log', '.iter')
+                fmt = ['%12.06f', '%4i', '%4i']
+                if self.cases is not None:
+                    fpath = os.path.join(case['[run_dir]'], case['[iter_dir]'])
+                    # in case it has subdirectories
+                    for tt in [3,2,1]:
+                        tmp = os.path.sep.join(fpath.split(os.path.sep)[:-tt])
+                        if not os.path.exists(tmp):
+                            os.makedirs(tmp)
+                    if not os.path.exists(fpath):
+                        os.makedirs(fpath)
+                    np.savetxt(fpath + fiter, iterations, fmt=fmt)
+                else:
+                    np.savetxt(os.path.join(self.PathToLogs, fiter), iterations,
+                               fmt=fmt)
+
+            # append the messages found in the current file to the overview log
+            self.MsgListLog.append(tempLog)
+            self.MsgListLog2[fname] = [found_error, exit_correct]
+            i += 1
+
+#            # if no messages are found for the current file, than say so:
+#            if len(MsgList2) == len(self.MsgList):
+#                tempLog[-1] = 'NO MESSAGES FOUND'
+
+        # if we have only one file, don't save the log file to disk. It is
+        # expected that if we analyse many different single files, this will
+        # cause a slower script
+        if single_file:
+            # now we make it available over the object to save and let it grow
+            # over many analysis
+            # self.MsgListLog = copy.copy(MsgListLog)
+            pass
+        else:
+            self.save(appendlog=appendlog)
+
+    def save(self, appendlog=False):
+
+        # write the results in a file, start with a header
+        contents = 'file name;' + 'lnr;msg;'*(self.init_cols)
+        contents += 'iter_nr;msg;'*(self.sim_cols)
+        contents += 'lnr;msg;'
+        # and add headers for elapsed time, nr of iterations, and sec/iteration
+        contents += 'Elapsted time;last time step;Simulation time;'
+        contents += 'real sim time;Sim output time;'
+        contents += 'total iterations;dt;nr time steps;'
+        contents += 'seconds/iteration;average iterations/time step;'
+        contents += 'maximum iterations/time step;\n'
+        for k in self.MsgListLog:
+            for n in k:
+                contents = contents + str(n) + ';'
+            # at the end of each line, new line symbol
+            contents = contents + '\n'
+
+        # write csv file to disk, append to facilitate more logfile analysis
+        fname = os.path.join(self.PathToLogs, str(self.ResultFile))
+        if not self.silent:
+            print('Error log analysis saved at:')
+            print(fname)
+        if appendlog:
+            mode = 'a'
+        else:
+            mode = 'w'
+        with open(fname, mode) as f:
+            f.write(contents)
+
+
+class ModelData:
+    """
+    Second generation ModelData function. The HawcPy version is crappy, buggy
+    and not mutch of use in the optimisation context.
+    """
+    class st_headers:
+        """
+        Indices to the respective parameters in the HAWC2 st data file
+        """
+        r     = 0
+        m     = 1
+        x_cg  = 2
+        y_cg  = 3
+        ri_x  = 4
+        ri_y  = 5
+        x_sh  = 6
+        y_sh  = 7
+        E     = 8
+        G     = 9
+        Ixx   = 10
+        Iyy   = 11
+        I_p   = 12
+        k_x   = 13
+        k_y   = 14
+        A     = 15
+        pitch = 16
+        x_e   = 17
+        y_e   = 18
+
+    def __init__(self, verbose=False, silent=False):
+        self.verbose = verbose
+        self.silent = silent
+        # define the column width for printing
+        self.col_width = 13
+        # formatting and precision
+        self.float_hi = 9999.9999
+        self.float_lo =  0.01
+        self.prec_float = ' 9.05f'
+        self.prec_exp =   ' 8.04e'
+        self.prec_loss = 0.01
+
+        #0 1  2    3    4    5    6    7   8 9 10   11
+        #r m x_cg y_cg ri_x ri_y x_sh y_sh E G I_x  I_y
+        #12    13  14  15  16  17  18
+        #I_p/K k_x k_y A pitch x_e y_e
+        # 19 cols
+        self.st_column_header_list = ['r', 'm', 'x_cg', 'y_cg', 'ri_x', \
+            'ri_y', 'x_sh', 'y_sh', 'E', 'G', 'I_x', 'I_y', 'J', 'k_x', \
+            'k_y', 'A', 'pitch', 'x_e', 'y_e']
+
+        self.st_column_header_list_latex = ['r','m','x_{cg}','y_{cg}','ri_x',\
+            'ri_y', 'x_{sh}','y_{sh}','E', 'G', 'I_x', 'I_y', 'J', 'k_x', \
+            'k_y', 'A', 'pitch', 'x_e', 'y_e']
+
+        # make the column header
+        self.column_header_line = 19 * self.col_width * '=' + '\n'
+        for k in self.st_column_header_list:
+            self.column_header_line += k.rjust(self.col_width)
+        self.column_header_line += '\n' + (19 * self.col_width * '=') + '\n'
+
+    def fromline(self, line, separator=' '):
+        # TODO: move this to the global function space (dav-general-module)
+        """
+        split a line, but ignore any blank spaces and return a list with only
+        the values, not empty places
+        """
+        # remove all tabs, new lines, etc? (\t, \r, \n)
+        line = line.replace('\t',' ').replace('\n','').replace('\r','')
+        # trailing and ending spaces
+        line = line.strip()
+        line = line.split(separator)
+        values = []
+        for k in range(len(line)):
+            if len(line[k]) > 0: #and k == item_nr:
+                values.append(line[k])
+                # break
+
+        return values
+
+    def load_st(self, file_path, file_name):
+        """
+        Now a better format: st_dict has following key/value pairs
+            'nset'    : total number of sets in the file (int).
+                        This should be autocalculated every time when writing
+                        a new file.
+            '007-000-0' : set number line in one peace
+            '007-001-a' : comments for set-subset nr 07-01 (str)
+            '007-001-b' : subset nr and number of data points, should be
+                        autocalculate every time you generate a file
+            '007-001-d' : data for set-subset nr 07-01 (ndarray(n,19))
+
+        NOW WE ONLY CONSIDER SUBSET COMMENTS, SET COMMENTS, HOW ARE THEY
+        TREADED NOW??
+
+        st_dict is for easy remaking the same file. We need a different format
+        for easy reading the comments as well. For that we have the st_comments
+        """
+
+        # TODO: store this in an HDF5 format! This is perfect for that.
+
+        # read all the lines of the file into memory
+        self.st_path, self.st_file = file_path, file_name
+        FILE = open(os.path.join(file_path, file_name))
+        lines = FILE.readlines()
+        FILE.close()
+
+        subset = False
+        st_dict = dict()
+        st_comments = dict()
+        for i, line in enumerate(lines):
+
+            # convert line to list space seperated list
+            line_list = self.fromline(line)
+
+            # see if the first character is marking something
+            if i == 0:
+                # it is possible that the NSET line is not defined
+                parts = line.split(' ')
+                try:
+                    for k in xrange(10):
+                        parts.remove(' ') # throws error when can't find
+                except ValueError:
+                    pass
+                # we don't care what is on the nset line, just capture if
+                # there are any comments lines
+                set_nr = 0
+                subset_nr = 0
+                st_dict['000-000-0'] = line
+
+            # marks the start of a set
+            if line[0] == '#':
+                #sett = True
+                # first character is the #, the rest is the number
+                set_nr = int(line_list[0][1:])
+                st_dict['%03i-000-0' % set_nr] = line
+                # and reset subset nr to zero now
+                subset_nr = 0
+                subset_nr_track = 0
+                # and comments only format, back to one string
+                st_comments['%03i-000-0' % set_nr] = ' '.join(line_list[1:])
+
+            # marks the start of a subset
+            elif line[0] == '$':
+                subset_nr_track += 1
+                subset = True
+                subset_nr = int(line_list[0][1:])
+                # and comments only format, back to one string
+                setid = '%03i-%03i-b' % (set_nr, subset_nr)
+                st_comments[setid] = ' '.join(line_list[2:])
+
+                # check if the number read corresponds to tracking
+                if subset_nr is not subset_nr_track:
+                    msg = 'subset_nr and subset_nr_track do not match'
+                    raise UserWarning(msg)
+
+                nr_points = int(line_list[1])
+                st_dict[setid] = line
+                # prepare read data points
+                sub_set_arr = scipy.zeros((nr_points,19), dtype=np.float64)
+                # keep track of where we are on the data array, initialize
+                # to 0 for starters
+                point = 0
+
+            # in case we are not in subset mode, we only have comments left
+            elif not subset:
+                # FIXME: how are we dealing with set comments now?
+                # subset comments are coming before the actual subset
+                # so we account them to one set later than we are now
+                #if subset_nr > 0 :
+                key = '%03i-%03i-a' % (set_nr, subset_nr+1)
+                # in case it is not the first comment line
+                if st_dict.has_key(key): st_dict[key] += line
+                else: st_dict[key]  = line
+                ## otherwise we have the set comments
+                #else:
+                    #key = '%03i-%03i-a' % (set_nr, subset_nr)
+                    ## in case it is not the first comment line
+                    #if st_dict.has_key(key): st_dict[key] += line
+                    #else: st_dict[key]  = line
+
+            # in case we have the data points, make sure there are enough
+            # data poinst present, raise an error if it doesn't
+            elif len(line_list)==19 and subset:
+                # we can store it in the array
+                sub_set_arr[point,:] = line_list
+                # on the last entry:
+                if point == nr_points-1:
+                    # save to the dict:
+                    st_dict['%03i-%03i-d' % (set_nr, subset_nr)]= sub_set_arr
+                    # and indicate we're done subsetting, next we can have
+                    # either set or subset comments
+                    subset = False
+                point += 1
+
+            #else:
+                #msg='error in st format: don't know where to put current line'
+                #raise UserWarning, msg
+
+        self.st_dict = st_dict
+        self.st_comments = st_comments
+
+    def _format_nr(self, number):
+        """
+        Automatic format the number
+
+        prec_loss : float, default=0.01
+            acceptible precision loss expressed in %
+
+        """
+
+        # the formatting of the number
+        numabs = abs(number)
+        # just a float precision defined in self.prec_float
+        if (numabs < self.float_hi and numabs > self.float_lo):
+            numfor = format(number, self.prec_float)
+        # if it is zero, just simply print as 0.0
+        elif number == 0.0:
+            numfor = format(number, ' 1.1f')
+        # exponentional, precision defined in self.prec_exp
+        else:
+            numfor = format(number, self.prec_exp)
+
+        try:
+            loss = 100.0*abs(1 - (float(numfor)/number))
+        except ZeroDivisionError:
+            if abs(float(numfor)) > 0.00000001:
+                msg = 'precision loss, from %1.10f to %s' \
+                            % (number, numfor.strip())
+                raise ValueError('precesion loss for new st file')
+            else:
+                loss = 0
+        if loss > self.prec_loss:
+            msg = 'precision loss, from %1.10f to %s (%f pc)' \
+                        % (number, numfor.strip(), loss)
+            raise ValueError(msg)
+
+        return numfor
+
+    def write_st(self, file_path, file_name, print_header=False):
+        """
+        prec_loss : float, default=0.01
+            acceptible precision loss expressed in %
+        """
+        # TODO: implement all the tests when writing on nset, number of data
+        # points, subsetnumber sequence etc
+
+        content = ''
+
+        # sort the key list
+        keysort = self.st_dict.keys()
+        keysort.sort()
+
+        for key in keysort:
+
+            # in case we are just printing what was recorded before
+            if not key.endswith('d'):
+                content += self.st_dict[key]
+            # else we have an array
+            else:
+                # cycle through data points and print them orderly: control
+                # precision depending on the number, keep spacing constant
+                # so it is easy to read the textfile
+                for m in range(self.st_dict[key].shape[0]):
+                    for n in range(self.st_dict[key].shape[1]):
+                        # TODO: check what do we lose here?
+                        # we are coming from a np.float64, as set in the array
+                        # but than it will not work with the format()
+                        number = float(self.st_dict[key][m,n])
+                        numfor = self._format_nr(number)
+                        content += numfor.rjust(self.col_width)
+                    content += '\n'
+
+                if print_header:
+                    content += self.column_header_line
+
+        # and write file to disk again
+        FILE = open(file_path + file_name, 'w')
+        FILE.write(content)
+        FILE.close()
+        if not self.silent:
+            print('st file written:', file_path + file_name)
+
+    def write_latex(self, fpath, selection=[]):
+        """
+        Write a table in Latex format based on the data in the st file.
+
+        selection : list
+            [ [setnr, subsetnr, table caption], [setnr, subsetnr, caption],...]
+            if not specified, all subsets will be plotted
+
+        """
+
+        cols_p1 = ['r [m]', 'm [kg/m]', 'm(ri{_x})^2 [kgNm^2]',
+                   'm(ri{_y})^2 [kgNm^2]', 'EI_x [Nm^2]', 'EI_y [Nm^2]',
+                   'EA [N]', 'GJ [\\frac{Nm^2}{rad}]']
+
+        cols_p2 = ['r [m]', 'x_cg [m]', 'y_cg [m]', 'x_sh [m]', 'y_sh [m]',
+                'x_e [m]', 'y_e [m]', 'k_x [-]', 'k_y [-]', 'pitch [deg]']
+
+        if len(selection) < 1:
+            for key in self.st_dict.keys():
+                # but now only take the ones that hold data
+                if key[-1] == 'd':
+                    selection.append([int(key[:3]), int(key[4:7])])
+
+        for i,j, caption in selection:
+            # get the data
+            try:
+                # set comment should be the name of the body
+                set_comment = self.st_comments['%03i-000-0' % (i)]
+#                subset_comment = self.st_comments['%03i-%03i-b' % (i,j)]
+                st_arr = self.st_dict['%03i-%03i-d' % (i,j)]
+            except AttributeError:
+                msg = 'ModelData object md is not loaded properly'
+                raise AttributeError(msg)
+
+            # build the latex table header
+#            textable = u"\\begin{table}[b!]\n"
+#            textable += u"\\begin{center}\n"
+            textable_p1 = u"\\centering\n"
+            textable_p1 += u"\\begin{tabular}"
+            # configure the column properties
+            tmp = [u'C{2.0 cm}' for k in cols_p1]
+            tmp = u"|".join(tmp)
+            textable_p1 += u'{|' + tmp + u'|}'
+            textable_p1 += u'\hline\n'
+            # add formula mode for the headers
+            tmp = []
+            for k in cols_p1:
+                k1, k2 = k.split(' ')
+                tmp.append(u'$%s$ $%s$' % (k1,k2) )
+#            tmp = [u'$%s$' % k for k in cols_p1]
+            textable_p1 += u' & '.join(tmp)
+            textable_p1 += u'\\\\ \n'
+            textable_p1 += u'\hline\n'
+
+            textable_p2 = u"\\centering\n"
+            textable_p2 += u"\\begin{tabular}"
+            # configure the column properties
+            tmp = [u'C{1.5 cm}' for k in cols_p2]
+            tmp = u"|".join(tmp)
+            textable_p2 += u'{|' + tmp + u'|}'
+            textable_p2 += u'\hline\n'
+            # add formula mode for the headers
+            tmp = []
+            for k in cols_p2:
+                k1, k2 = k.split(' ')
+                tmp.append(u'$%s$ $%s$' % (k1,k2) )
+#            tmp = [u'$%s$ $%s$' % (k1, k2) for k in cols_p2]
+            # hack: spread the last element over two lines
+#            tmp[-1] = '$pitch$ $[deg]$'
+            textable_p2 += u' & '.join(tmp)
+            textable_p2 += u'\\\\ \n'
+            textable_p2 += u'\hline\n'
+
+            for row in xrange(st_arr.shape[0]):
+                r    = st_arr[row, self.st_headers.r]
+                m    = st_arr[row,self.st_headers.m]
+                x_cg = st_arr[row,self.st_headers.x_cg]
+                y_cg = st_arr[row,self.st_headers.y_cg]
+                ri_x = st_arr[row,self.st_headers.ri_x]
+                ri_y = st_arr[row,self.st_headers.ri_y]
+                x_sh = st_arr[row,self.st_headers.x_sh]
+                y_sh = st_arr[row,self.st_headers.y_sh]
+                E    = st_arr[row,self.st_headers.E]
+                G    = st_arr[row,self.st_headers.G]
+                Ixx  = st_arr[row,self.st_headers.Ixx]
+                Iyy  = st_arr[row,self.st_headers.Iyy]
+                I_p  = st_arr[row,self.st_headers.I_p]
+                k_x  = st_arr[row,self.st_headers.k_x]
+                k_y  = st_arr[row,self.st_headers.k_y]
+                A    = st_arr[row,self.st_headers.A]
+                pitch = st_arr[row,self.st_headers.pitch]
+                x_e   = st_arr[row,self.st_headers.x_e]
+                y_e   = st_arr[row,self.st_headers.y_e]
+                # WARNING: same order as the labels defined in variable "cols"!
+                p1 = [r, m, m*ri_x*ri_x, m*ri_y*ri_y, E*Ixx, E*Iyy, E*A,I_p*G]
+                p2 = [r, x_cg, y_cg, x_sh, y_sh, x_e, y_e, k_x, k_y, pitch]
+
+                textable_p1 += u" & ".join([self._format_nr(k) for k in p1])
+                textable_p1 += u'\\\\ \n'
+
+                textable_p2 += u" & ".join([self._format_nr(k) for k in p2])
+                textable_p2 += u'\\\\ \n'
+
+            # default caption
+            if caption == '':
+                caption = 'HAWC2 cross sectional parameters for body: %s' % set_comment
+
+            textable_p1 += u"\hline\n"
+            textable_p1 += u"\end{tabular}\n"
+            textable_p1 += u"\caption{%s}\n" % caption
+#            textable += u"\end{center}\n"
+#            textable += u"\end{table}\n"
+
+            fname = '%s-%s-%03i-%03i_p1' % (self.st_file, set_comment, i, j)
+            fname = fname.replace('.', '') + '.tex'
+            with open(fpath + fname, 'w') as f:
+                f.write(textable_p1)
+
+            textable_p2 += u"\hline\n"
+            textable_p2 += u"\end{tabular}\n"
+            textable_p2 += u"\caption{%s}\n" % caption
+#            textable += u"\end{center}\n"
+#            textable += u"\end{table}\n"
+
+            fname = '%s-%s-%03i-%03i_p2' % (self.st_file, set_comment, i, j)
+            fname = fname.replace('.', '') + '.tex'
+            with open(fpath + fname, 'w') as f:
+                f.write(textable_p2)
+
+
+class WeibullParameters(object):
+
+    def __init__(self):
+        self.Vin = 4.
+        self.Vr = 12.
+        self.Vout = 26.
+        self.Vref = 50.
+        self.Vstep = 2.
+        self.shape_k = 2.
+
+
+# FIXME: Cases has a memory leek somewhere, this whole thing needs to be
+# reconsidered and rely on a DataFrame instead of a dict!
+class Cases:
+    """
+    Class for the old htc_dict
+    ==========================
+
+    Formerly known as htc_dict: a dictionary with on the key a case identifier
+    (case name) and the value is a dictionary holding all the different tags
+    and value pairs which define the case
+
+    TODO:
+
+    define a public API so that plugin's can be exposed in a standarized way
+    using pre defined variables:
+
+    * pandas DataFrame backend instead of a dictionary
+
+    * generic, so not bound to HAWC2. Goal: manage a lot of simulations
+      and their corresponding inputs/outus
+
+    * integration with OpenMDAO?
+
+    * case id (hash)
+
+    * case name (which is typically created with variable_tag_name method)
+
+    * results
+
+    * inputs
+
+    * outputs
+
+    a variable tags that has a dictionary mirror for database alike searching
+
+    launch, post_launch, prepare_(re)launch should be methods of this or
+    inheret from Cases
+
+    Create a method to add and remove cases from the pool so you can perform
+    some analysis on them. Maybe make a GUI that present a list with current
+    cases in the pool and than checkboxes to remove them.
+
+    Remove the HAWC2 specific parts to a HAWC2 plugin. The HAWC2 plugin will
+    inheret from Cases. Proposed class name: HAWC2Cases, XFOILCases
+
+    Rename cases to pool? A pool contains several cases, mixing several
+    sim_id's?
+
+    create a unique case ID based on the hash value of all the tag+values?
+    """
+
+    # TODO: add a method that can reload a certain case_dict, you change
+    # some parameters for each case (or some) and than launch again
+
+    #def __init__(self, post_dir, sim_id, resdir=False):
+    def __init__(self, *args, **kwargs):
+        """
+        Either load the cases dictionary if post_dir and sim_id is given,
+        otherwise the input is a cases dictionary
+
+        Paramters
+        ---------
+
+        cases : dict
+            The cases dictionary in case there is only one argument
+
+        post_dir : str
+            When using two arguments
+
+        sim_id : str or list
+            When using two arguments
+
+        resdir : str, default=False
+
+        loadstats : boolean, default=False
+
+        rem_failed : boolean, default=True
+
+        """
+
+        resdir = kwargs.get('resdir', False)
+        self.loadstats = kwargs.get('loadstats', False)
+        self.rem_failed = kwargs.get('rem_failed', True)
+        self.config = kwargs.get('config', {})
+        print(self.config)
+        # determine the input argument scenario
+        if len(args) == 1:
+            if type(args[0]).__name__ == 'dict':
+                self.cases = args[0]
+                sim_id = False
+            else:
+                raise ValueError('One argument input should be a cases dict')
+        elif len(args) == 2:
+            self.post_dir = args[0]
+            sim_id = args[1]
+        else:
+            raise ValueError('Only one or two arguments are allowed.')
+
+        # if sim_id is a list, than merge all sim_id's of that list
+        if type(sim_id).__name__ == 'list':
+            # stats, dynprop and fail are empty dictionaries if they do not
+            # exist
+            self.merge_sim_ids(sim_id)
+            # and define a new sim_id based on all items from the list
+            self.sim_id = '_'.join(sim_id)
+        # in case we still need to load the cases dict
+        elif type(sim_id).__name__ == 'str':
+            self.sim_id = sim_id
+            self._get_cases_dict(self.post_dir, sim_id)
+            # load the statistics if applicable
+            if self.loadstats:
+                self.stats_df, self.Leq_df, self.AEP_df = self.load_stats()
+
+        # change the results directory if applicable
+        if resdir:
+            self.change_results_dir(resdir)
+
+#        # try to load failed cases and remove them
+#        try:
+#            self.load_failed(sim_id)
+#            self.remove_failed()
+#        except IOError:
+#            pass
+
+        #return self.cases
+
+    def select(self, search_keyval=False, search_key=False):
+        """
+        Select only a sub set of the cases
+
+        Select either search_keyval or search_key. Using both is not supported
+        yet. Run select twice to achieve the same effect. If both are False,
+        cases will be emptied!
+
+        Parameters
+        ----------
+
+        search_keyval : dictionary, default=False
+            Keys are the column names. If the values match the ones in the
+            database, the respective row gets selected. Each tag is hence
+            a unique row identifier
+
+        search_key : dict, default=False
+            The key is the string that should either be inclusive (value TRUE)
+            or exclusive (value FALSE) in the case key
+        """
+
+        db = misc.DictDB(self.cases)
+        if search_keyval:
+            db.search(search_keyval)
+        elif search_key:
+            db.search_key(search_keyval)
+        else:
+            db.dict_sel = {}
+        # and remove all keys that are not in the list
+        remove = set(self.cases) - set(db.dict_sel)
+        for k in remove:
+            self.cases.pop(k)
+
+    def launch(self, runmethod='local', verbose=False, copyback_turb=True,
+           silent=False, check_log=True):
+        """
+        Launch all cases
+        """
+
+        launch(self.cases, runmethod=runmethod, verbose=verbose, silent=silent,
+               check_log=check_log, copyback_turb=copyback_turb)
+
+    def post_launch(self, save_iter=False):
+        """
+        Post Launching Maintenance
+
+        check the logs files and make sure result files are present and
+        accounted for.
+        """
+        # TODO: integrate global post_launch in here
+        self.cases_fail = post_launch(self.cases, save_iter=save_iter)
+
+        if self.rem_failed:
+            self.remove_failed()
+
+    def load_case(self, case):
+        try:
+            iterations = self.load_iterations(case)
+        except IOError:
+            iterations = None
+        res = self.load_result_file(case)
+        return res, iterations
+
+    def load_iterations(self, case):
+
+        fp = os.path.join(case['[run_dir]'], case['[iter_dir]'],
+                          case['[case_id]'])
+        return np.loadtxt(fp + '.iter')
+
+    # TODO: HAWC2 result file reading should be moved to Simulations
+    # and we should also switch to faster HAWC2 reading!
+    def load_result_file(self, case, _slice=False):
+        """
+        Set the correct HAWC2 channels
+
+        Parameters
+        ----------
+
+        case : dict
+            a case dictionary holding all the tags set for this specific
+            HAWC2 simulation
+
+        Returns
+        -------
+
+        res : object
+            A HawcPy LoadResults instance with attributes such as sig, ch_dict,
+            and much much more
+
+        """
+
+        respath = os.path.join(case['[run_dir]'], case['[res_dir]'])
+        resfile = case['[case_id]']
+        self.res = windIO.LoadResults(respath, resfile)
+        if not _slice:
+            _slice = np.r_[0:len(self.res.sig)]
+        self.time = self.res.sig[_slice,0]
+        self.sig = self.res.sig[_slice,:]
+        self.case = case
+
+        return self.res
+
+    def load_struct_results(self, case, max_modes=500, nrmodes=1000):
+        """
+        Load the structural analysis result files
+        """
+        fpath = os.path.join(case['[run_dir]'], case['[eigenfreq_dir]'])
+
+        # BEAM OUTPUT
+        fname = '%s_beam_output.txt' % case['[case_id]']
+        beam = None
+
+        # BODY OUTPUT
+        fname = '%s_body_output.txt' % case['[case_id]']
+        body = None
+
+        # EIGEN BODY
+        fname = '%s_eigen_body.txt' % case['[case_id]']
+        try:
+            eigen_body, rs2 = windIO.ReadEigenBody(fpath, fname, debug=False,
+                                              nrmodes=nrmodes)
+        except Exception as e:
+            eigen_body = None
+            print('failed to load eigen_body')
+            print(e)
+
+        # EIGEN STRUCT
+        fname = '%s_eigen_struct.txt' % case['[case_id]']
+        try:
+            eigen_struct = windIO.ReadEigenStructure(fpath, fname, debug=False,
+                                                     max_modes=max_modes)
+        except Exception as e:
+            eigen_struct = None
+            print('failed to load eigen_struct')
+            print(e)
+
+        # STRUCT INERTIA
+        fname = '%s_struct_inertia.txt' % case['[case_id]']
+        struct_inertia = None
+
+        return beam, body, eigen_body, eigen_struct, struct_inertia
+
+    def change_results_dir(self, forcedir, post_dir=False):
+        """
+        if the post processing concerns simulations done by thyra/gorm, and
+        is downloaded locally, change path to results accordingly
+
+        """
+        for case in self.cases:
+            self.cases[case]['[run_dir]'] = forcedir
+            if post_dir:
+                self.cases[case]['[post_dir]'] = post_dir
+
+        #return cases
+
+    def force_lower_case_id(self):
+        tmp_cases = {}
+        for cname, case in self.cases.iteritems():
+            tmp_cases[cname.lower()] = case.copy()
+        self.cases = tmp_cases
+
+    def _get_cases_dict(self, post_dir, sim_id):
+        """
+        Load the pickled dictionary containing all the cases and their
+        respective tags.
+
+        Returns
+        -------
+
+        cases : Cases object
+            cases with failures removed. Failed cases are kept in
+            self.cases_fail
+
+        """
+        self.cases = load_pickled_file(os.path.join(post_dir, sim_id + '.pkl'))
+        self.cases_fail = {}
+
+        self.force_lower_case_id()
+
+        if self.rem_failed:
+            try:
+                self.load_failed(sim_id)
+                # ditch all the failed cases out of the htc_dict otherwise
+                #  we will have fails when reading the results data files
+                self.remove_failed()
+            except IOError:
+                print("couldn't find pickled failed dictionary")
+
+        return
+
+    def cases2df(self):
+        """Convert the cases dict to a DataFrame and save as excel sheet"""
+
+        tag_set = []
+
+        # maybe some cases have tags that others don't, create a set with
+        # all the tags that occur
+        for cname, tags in self.cases.iteritems():
+            tag_set.extend(tags.keys())
+        # also add cname as a tag
+        tag_set.append('cname')
+        # only unique tags
+        tag_set = set(tag_set)
+        # and build the df_dict with all the tags
+        df_dict = {tag:[] for tag in tag_set}
+
+        for cname, tags in self.cases.iteritems():
+            current_tags = set(tags.keys())
+            for tag, value in tags.iteritems():
+                df_dict[tag].append(value)
+            # and the missing ones
+            for tag in (tag_set - current_tags):
+                df_dict[tag].append('')
+
+        df_dict2 = misc.df_dict_check_datatypes(df_dict)
+
+        return pd.DataFrame(df_dict2)
+
+    def merge_sim_ids(self, sim_id_list, silent=False):
+        """
+        Load and merge for a list of sim_id's cases, fail, dynprop and stats
+        ====================================================================
+
+        For all sim_id's in the sim_id_list the cases, stats, fail and dynprop
+        dictionaries are loaded. If one of them doesn't exists, an empty
+        dictionary is returned.
+
+        Currently, there is no warning given when a certain case will be
+        overwritten upon merging.
+
+        """
+
+        cases_merged = {}
+        cases_fail_merged = {}
+
+        for ii, sim_id in enumerate(sim_id_list):
+
+            # TODO: give a warning if we have double entries or not?
+            self.sim_id = sim_id
+            self._get_cases_dict(self.post_dir, sim_id)
+            cases_fail_merged.update(self.cases_fail)
+
+            # and copy to htc_dict_merged. Note that non unique keys will be
+            # overwritten: each case has to have a unique name!
+            cases_merged.update(self.cases)
+
+            # merge the statistics if applicable
+            # self.stats_dict[channels] = df
+            if self.loadstats:
+                if ii == 0:
+                    self.stats_df, self.Leq_df, self.AEP_df = self.load_stats()
+                else:
+                    tmp1, tmp2, tmp3 = self.load_stats()
+                    self.stats_df = self.stats_df.append(tmp1)
+                    self.Leq_df = self.Leq_df.append(tmp2)
+                    self.AEP_df = self.AEP_df.append(tmp3)
+
+        self.cases = cases_merged
+        self.cases_fail = cases_fail_merged
+
+    def printall(self, scenario, figpath=''):
+        """
+        For all the cases, get the average value of a certain channel
+        """
+        self.figpath = figpath
+
+        # plot for each case the dashboard
+        for k in self.cases:
+
+            if scenario == 'blade_deflection':
+                self.blade_deflection(self.cases[k], self.figpath)
+
+    def diff(self, refcase_dict, cases):
+        """
+        See wich tags change over the given cases of the simulation object
+        """
+
+        # there is only one case allowed in refcase dict
+        if not len(refcase_dict) == 1:
+            return ValueError, 'Only one case allowed in refcase dict'
+
+        # take an arbritrary case as baseline for comparison
+        refcase = refcase_dict[refcase_dict.keys()[0]]
+        #reftags = sim_dict[refcase]
+
+        diffdict = dict()
+        adddict = dict()
+        remdict = dict()
+        print()
+        print('*'*80)
+        print('comparing %i cases' % len(cases))
+        print('*'*80)
+        print()
+        # compare each case with the refcase and see if there are any diffs
+        for case in sorted(cases.keys()):
+            dd = misc.DictDiff(refcase, cases[case])
+            diffdict[case] = dd.changed()
+            adddict[case] = dd.added()
+            remdict[case] = dd.removed()
+            print('')
+            print('='*80)
+            print(case)
+            print('='*80)
+            for tag in sorted(diffdict[case]):
+                print(tag.rjust(20),':', cases[case][tag])
+
+        return diffdict, adddict, remdict
+
+    def blade_deflection(self, case, **kwargs):
+        """
+        """
+
+        # read the HAWC2 result file
+        self.load_result_file(case)
+
+        # select all the y deflection channels
+        db = misc.DictDB(self.res.ch_dict)
+
+        db.search({'sensortype' : 'state pos', 'component' : 'z'})
+        # sort the keys and save the mean values to an array/list
+        chiz, zvals = [], []
+        for key in sorted(db.dict_sel.keys()):
+            zvals.append(-self.sig[:,db.dict_sel[key]['chi']].mean())
+            chiz.append(db.dict_sel[key]['chi'])
+
+        db.search({'sensortype' : 'state pos', 'component' : 'y'})
+        # sort the keys and save the mean values to an array/list
+        chiy, yvals = [], []
+        for key in sorted(db.dict_sel.keys()):
+            yvals.append(self.sig[:,db.dict_sel[key]['chi']].mean())
+            chiy.append(db.dict_sel[key]['chi'])
+
+        return np.array(zvals), np.array(yvals)
+
+    def remove_failed(self):
+
+        # don't do anything if there is nothing defined
+        if self.cases_fail == None:
+            print('no failed cases to remove')
+            return
+
+        # ditch all the failed cases out of the htc_dict
+        # otherwise we will have fails when reading the results data files
+        for k in self.cases_fail:
+            try:
+                self.cases_fail[k] = copy.copy(self.cases[k])
+                del self.cases[k]
+                print('removed from htc_dict due to error: ' + k)
+            except KeyError:
+                print('WARNING: failed case does not occur in cases')
+                print('   ', k)
+
+    def load_failed(self, sim_id):
+
+        fname = os.path.join(self.post_dir, sim_id + '_fail.pkl')
+        FILE = open(fname, 'rb')
+        self.cases_fail = pickle.load(FILE)
+        FILE.close()
+
+    def load_stats(self, **kwargs):
+        """
+        Load an existing statistcs file
+
+        Parameters
+        ----------
+
+        post_dir : str, default=self.post_dir
+
+        sim_id : str, default=self.sim_id
+
+        fpath : str, default=sim_id
+
+        leq : bool, default=False
+
+        columns : list, default=None
+        """
+        post_dir = kwargs.get('post_dir', self.post_dir)
+        sim_id = kwargs.get('sim_id', self.sim_id)
+        fpath = os.path.join(post_dir, sim_id)
+        Leq_df = kwargs.get('leq', False)
+        columns = kwargs.get('columns', None)
+
+        try:
+            stats_df = pd.read_hdf(fpath + '_statistics.h5', 'table',
+                                   columns=columns)
+#            FILE = open(post_dir + sim_id + '_statistics.pkl', 'rb')
+#            stats_dict = pickle.load(FILE)
+#            FILE.close()
+        except IOError:
+            stats_df = None
+            print('NO STATS FOUND FOR', sim_id)
+
+        try:
+            AEP_df = pd.read_hdf(fpath + '_AEP.h5', 'table')
+        except IOError:
+            AEP_df = None
+            print('NO AEP FOUND FOR', sim_id)
+
+        if Leq_df:
+            try:
+                Leq_df = pd.read_hdf(fpath + '_Leq.h5', 'table')
+            except IOError:
+                Leq_df = None
+                print('NO Leq FOUND FOR', sim_id)
+
+        return stats_df, Leq_df, AEP_df
+
+    def statistics(self, new_sim_id=False, silent=False, ch_sel=None,
+                   tags=['[turb_seed]','[windspeed]'], calc_mech_power=False,
+                   save=True, m=[3, 4, 6, 8, 10, 12], neq=None, no_bins=46,
+                   ch_fatigue={}, update=False, add_sensor=None,
+                   chs_resultant=[], i0=0, i1=-1, saveinterval=1000,
+                   csv=True, suffix=None, fatigue_cycles=False, A=None,
+                   ch_wind=None, save_new_sigs=False):
+        """
+        Calculate statistics and save them in a pandas dataframe. Save also
+        every 500 cases the statistics file.
+
+        Parameters
+        ----------
+
+        ch_sel : list, default=None
+            If defined, only add defined channels to the output data frame.
+            The list should contain valid channel names as defined in ch_dict.
+
+        tags : list, default=['[turb_seed]','[windspeed]']
+            Select which tag values from cases should be included in the
+            dataframes. This will help in selecting and identifying the
+            different cases.
+
+        ch_fatigue : list, default=[]
+            Valid ch_dict channel names for which the equivalent fatigue load
+            needs to be calculated. When set to None, ch_fatigue = ch_sel,
+            and hence all channels will have a fatigue analysis.
+
+        fatigue_cycles : Boolean, default=False
+            If True, the cycle matrix, or sum( n_i*S_i^m ), is calculated. If
+            set to False, the 1Hz equivalent load is calculated.
+
+        chs_resultant
+
+        add_sensor
+
+        calc_mech_power
+
+        saveinterval : int, default=1000
+            When processing a large number of cases, the statistics file
+            will be saved every saveinterval-ed case
+
+        update : boolean, default=False
+            Update an existing DataFrame instead of overwriting one. When
+            the number of cases is larger then saveinterval, the statistics
+            file will be updated every saveinterval-ed case
+
+        suffix : boolean or str, default=False
+            When True, the statistics data file will be appended with a suffix
+            that corresponds to the index of the last case added. When a string,
+            that suffix will be added to the file name (up to but excluding,
+            much like range()). Set to True when a large number of cases is
+            being considered in order to avoid excessively large DataFrames.
+
+        csv : boolean, default=False
+            In addition to a h5 file, save the statistics also in csv format.
+
+        Returns
+        -------
+
+        dfs : dict
+            Dictionary of dataframes, where the key is the channel name of
+            the output (that was optionally defined in ch_sel), and the value
+            is the dataframe containing the statistical values for all the
+            different selected cases.
+
+        """
+
+        def add_df_row(df_dict, **kwargs):
+            """
+            add a new channel to the df_dict format of ch_df
+            """
+            for col, value in kwargs.iteritems():
+                df_dict[col].append(value)
+            for col in (self.res.cols - set(kwargs.keys())):
+                df_dict[col].append('')
+            return df_dict
+
+        # in case the output changes, remember the original ch_sel
+        if ch_sel is not None:
+            ch_sel_init = ch_sel.copy()
+        else:
+            ch_sel_init = None
+
+        if ch_fatigue is None:
+            ch_fatigue_init = None
+        else:
+            ch_fatigue_init = ch_fatigue
+
+        # TODO: should the default tags not be all the tags in the cases dict?
+        tag_default = ['[case_id]', '[sim_id]']
+        tag_chan = 'channel'
+        # merge default with other tags
+        for tag in tag_default:
+            if tag not in tags:
+                tags.append(tag)
+
+        # tags can only be unique, when there the same tag appears twice
+        # it will break the DataFrame creation
+        if len(tags) is not len(set(tags)):
+            raise ValueError('tags can only contain unique entries')
+
+        # get some basic parameters required to calculate statistics
+        try:
+            case = self.cases.keys()[0]
+        except IndexError:
+            print('no cases to select so no statistics, aborting ...')
+            return None
+
+        post_dir = self.cases[case]['[post_dir]']
+        if not new_sim_id:
+            # select the sim_id from a random case
+            sim_id = self.cases[case]['[sim_id]']
+        else:
+            sim_id = new_sim_id
+
+        if not silent:
+            nrcases = len(self.cases)
+            print('='*79)
+            print('statistics for %s, nr cases: %i' % (sim_id, nrcases))
+
+        df_dict = None
+        add_stats = True
+
+        for ii, (cname, case) in enumerate(self.cases.iteritems()):
+
+            # build the basic df_dict if not defined
+            if df_dict is None:
+                # the dictionary that will be used to create a pandas dataframe
+                df_dict = { tag:[] for tag in tags }
+                df_dict[tag_chan] = []
+                # add more columns that will help with IDing the channel
+                df_dict['channel_name'] = []
+                df_dict['channel_units'] = []
+                df_dict['channel_nr'] = []
+                df_dict['channel_desc'] = []
+                add_stats = True
+
+            if not silent:
+                pc = '%6.2f' % (float(ii)*100.0/float(nrcases))
+                pc += ' %'
+                print('stats progress: %4i/%i %s' % (ii, nrcases, pc))
+
+            # make sure the selected tags exist
+            if len(tags) != len(set(case) and tags):
+                raise KeyError('    not all selected tags exist in cases')
+
+            self.load_result_file(case)
+            ch_dict_new = {}
+            # this is really messy, now we are also in parallal using the
+            # channel DataFrame structure
+            ch_df_new = {col:[] for col in self.res.cols}
+            ch_df_new['ch_name'] = []
+            # calculate the statistics values
+#            stats = self.res.calc_stats(self.sig, i0=i0, i1=i1)
+            i_new_chans = self.sig.shape[1] # self.Nch
+            sig_size = self.res.N  # len(self.sig[i0:i1,0])
+            new_sigs = np.ndarray((sig_size, 0))
+
+            if add_sensor is not None:
+                chi1 = self.res.ch_dict[add_sensor['ch1_name']]['chi']
+                chi2 = self.res.ch_dict[add_sensor['ch2_name']]['chi']
+                name = add_sensor['ch_name_add']
+                factor = add_sensor['factor']
+                operator = add_sensor['operator']
+
+                p1 = self.sig[:,chi1]
+                p2 = self.sig[:,chi2]
+                sig_add = np.ndarray((len(p1), 1))
+                if operator == '*':
+                    sig_add[:,0] = p1*p2*factor
+                elif operator == '/':
+                    sig_add[:,0] = factor*p1/p2
+                else:
+                    raise ValueError('Operator needs to be either * or /')
+#                add_stats = self.res.calc_stats(sig_add)
+#                add_stats_i = stats['max'].shape[0]
+                # add a new channel description for the mechanical power
+                ch_dict_new[name] = {}
+                ch_dict_new[name]['chi'] = i_new_chans
+                ch_df_new = add_df_row(ch_df_new, {'chi':i_new_chans,
+                                                   'ch_name':name})
+                i_new_chans += 1
+                new_sigs = np.append(new_sigs, sig_add, axis=1)
+#                # and append to all the statistics types
+#                for key, stats_arr in stats.iteritems():
+#                    stats[key] = np.append(stats_arr, add_stats[key])
+
+            # calculate the resultants
+            sig_resultants = np.ndarray((sig_size, len(chs_resultant)))
+            inc = []
+            for j, chs in enumerate(chs_resultant):
+                sig_res = np.ndarray((sig_size, len(chs)))
+                lab = ''
+                no_channel = False
+                for i, ch in enumerate(chs):
+                    # if the channel does not exist, zet to zero
+                    try:
+                        chi = self.res.ch_dict[ch]['chi']
+                        sig_res[:,i] = self.sig[:,chi]
+                        no_channel = False
+                    except KeyError:
+                        no_channel = True
+                    lab += ch.split('-')[-1]
+                name = '-'.join(ch.split('-')[:-1] + [lab])
+                # when on of the components do no exist, we can not calculate
+                # the resultant!
+                if no_channel:
+                    rpl = (name, cname)
+                    print('    missing channel, no resultant for: %s, %s' % rpl)
+                    continue
+                inc.append(j)
+                sig_resultants[:,j] = np.sqrt(sig_res*sig_res).sum(axis=1)
+#                resultant = np.sqrt(sig_resultants[:,j].reshape(self.res.N, 1))
+#                add_stats = self.res.calc_stats(resultant)
+#                add_stats_i = stats['max'].shape[0]
+                # add a new channel description for this resultant
+                ch_dict_new[name] = {}
+                ch_dict_new[name]['chi'] = i_new_chans
+                ch_df_new = add_df_row(ch_df_new, {'chi':i_new_chans,
+                                                   'ch_name':name})
+                i_new_chans += 1
+                # and append to all the statistics types
+#                for key, stats_arr in stats.iteritems():
+#                    stats[key] = np.append(stats_arr, add_stats[key])
+            if len(chs_resultant) > 0:
+                # but only take the channels that where not missing
+                new_sigs = np.append(new_sigs, sig_resultants[:,inc], axis=1)
+
+            # calculate mechanical power first before deriving statistics
+            # from it
+            if calc_mech_power:
+                name = 'stats-shaft-power'
+                sig_pmech = np.ndarray((sig_size, 1))
+                sig_pmech[:,0] = self.shaft_power()
+#                P_mech_stats = self.res.calc_stats(sig_pmech)
+#                mech_stats_i = stats['max'].shape[0]
+                # add a new channel description for the mechanical power
+                ch_dict_new[name] = {}
+                ch_dict_new[name]['chi'] = i_new_chans
+                ch_df_new = add_df_row(ch_df_new, {'chi':i_new_chans,
+                                                   'ch_name':name})
+                i_new_chans += 1
+                new_sigs = np.append(new_sigs, sig_pmech, axis=1)
+
+                # and C_p_mech
+                if A is not None:
+                    name = 'stats-cp-mech'
+                    if ch_wind is None:
+                        chiwind = self.res.ch_dict[self.find_windchan_hub()]['chi']
+                    else:
+                        chiwind = self.res.ch_dict[ch_wind]['chi']
+                    wind = self.res.sig[:,chiwind]
+                    cp = np.ndarray((sig_size, 1))
+                    cp[:,0] = self.cp(-sig_pmech[:,0], wind, A)
+                    # add a new channel description for the mechanical power
+                    ch_dict_new[name] = {}
+                    ch_dict_new[name]['chi'] = i_new_chans
+                    ch_df_new = add_df_row(ch_df_new, {'chi':i_new_chans,
+                                                       'ch_name':name})
+                    i_new_chans += 1
+                    new_sigs = np.append(new_sigs, cp, axis=1)
+
+                    try:
+                        try:
+                            nn_shaft = self.config['nn_shaft']
+                        except:
+                            nn_shaft = 4
+
+                        chan_t = 'shaft_nonrotate-shaft-node-%3.3i-forcevec-z'%nn_shaft
+                        i = self.res.ch_dict[chan_t]['chi']
+                        thrust = self.res.sig[:,i]
+                        name = 'stats-ct'
+                        ct = np.ndarray((sig_size, 1))
+                        ct[:,0] = self.ct(thrust, wind, A)
+                        ch_dict_new[name] = {}
+                        ch_dict_new[name]['chi'] = i_new_chans
+                        ch_df_new = add_df_row(ch_df_new, {'chi':i_new_chans,
+                                                           'ch_name':name})
+                        i_new_chans += 1
+                        new_sigs = np.append(new_sigs, ct, axis=1)
+                    except KeyError:
+                        print('    can not calculate CT')
+
+                # and append to all the statistics types
+#                for key, stats_arr in stats.iteritems():
+#                    stats[key] = np.append(stats_arr, P_mech_stats[key])
+
+            if save_new_sigs and new_sigs.shape[1] > 0:
+                chis, keys = [], []
+                for key, value in ch_dict_new.iteritems():
+                    chis.append(value['chi'])
+                    keys.append(key)
+                # sort on channel number, so it agrees with the new_sigs array
+                isort = np.array(chis).argsort()
+                keys = np.array(keys)[isort].tolist()
+                df_new_sigs = pd.DataFrame(new_sigs, columns=keys)
+                respath = os.path.join(case['[run_dir]'], case['[res_dir]'])
+                resfile = case['[case_id]']
+                fname = os.path.join(respath, resfile + '_postres.h5')
+                print('    saving post-processed res: %s...' % fname, end='')
+                df_new_sigs.to_hdf(fname, 'table', mode='w', format='table',
+                                   complevel=9, complib='blosc')
+                print('done!')
+                del df_new_sigs
+
+            ch_dict = self.res.ch_dict.copy()
+            ch_dict.update(ch_dict_new)
+
+#            ch_df = pd.concat([self.res.ch_df, pd.DataFrame(ch_df_new)])
+
+            # put all the extra channels into the results if we want to also
+            # be able to calculate the fatigue loads on them.
+            self.sig = np.append(self.sig, new_sigs, axis=1)
+
+            # calculate the statistics values
+            stats = self.res.calc_stats(self.sig, i0=i0, i1=i1)
+
+            # Because each channel is a new row, it doesn't matter how many
+            # data channels each case has, and this approach does not brake
+            # when different cases have a different number of output channels
+            # By default, just take all channels in the result file.
+            if ch_sel_init is None:
+                ch_sel = ch_dict.keys()
+#                ch_sel = ch_df.ch_name.tolist()
+#                ch_sel = [str(k) for k in ch_sel]
+                print('    selecting all channels for statistics')
+
+            # calculate the fatigue properties from selected channels
+            fatigue, tags_fatigue = {}, []
+            if ch_fatigue_init is None:
+                ch_fatigue = ch_sel
+                print('    selecting all channels for fatigue')
+            else:
+                ch_fatigue = ch_fatigue_init
+
+            for ch_id in ch_fatigue:
+                chi = ch_dict[ch_id]['chi']
+                signal = self.sig[:,chi]
+                if neq is None:
+                    neq = float(case['[duration]'])
+                if not fatigue_cycles:
+                    eq = self.res.calc_fatigue(signal, no_bins=no_bins,
+                                               neq=neq, m=m)
+                else:
+                    eq = self.res.cycle_matrix(signal, no_bins=no_bins, m=m)
+                fatigue[ch_id] = {}
+                # when calc_fatigue succeeds, we should have as many items
+                # as in m
+                if len(eq) == len(m):
+                    for eq_, m_ in zip(eq, m):
+                        fatigue[ch_id]['m=%2.01f' % m_] = eq_
+                # when it fails, we get an empty list back
+                else:
+                    for m_ in m:
+                        fatigue[ch_id]['m=%2.01f' % m_] = np.nan
+
+            # build the fatigue tags
+            for m_ in m:
+                tag = 'm=%2.01f' % m_
+                tags_fatigue.append(tag)
+
+            # -----------------------------------------------------------------
+            # define the pandas data frame dict on first run
+            # -----------------------------------------------------------------
+            # Only build the ch_sel collection once. By definition, the
+            # statistics, fatigue and htc tags will not change
+            if add_stats:
+                # statistical parameters
+                for statparam in stats.keys():
+                    df_dict[statparam] = []
+#                # additional tags
+#                for tag in tags:
+#                    df_dict[tag] = []
+                # fatigue data
+                for tag in tags_fatigue:
+                    df_dict[tag] = []
+                add_stats = False
+
+            for ch_id in ch_sel:
+
+                chi = ch_dict[ch_id]['chi']
+                # ch_name is not unique anymore, this doesn't work obviously!
+                # use the channel index instead, that is unique
+#                chi = ch_df[ch_df.ch_name==ch_id].chi.values[0]
+
+                # sig_stat = [(0=value,1=index),statistic parameter, channel]
+                # stat params = 0 max, 1 min, 2 mean, 3 std, 4 range, 5 abs max
+                # note that min, mean, std, and range are not relevant for index
+                # values. Set to zero there.
+
+                # -------------------------------------------------------------
+                # Fill in all the values for the current data entry
+                # -------------------------------------------------------------
+
+                # the auxiliry columns
+                try:
+                    name = self.res.ch_details[chi,0]
+                    unit = self.res.ch_details[chi,1]
+                    desc = self.res.ch_details[chi,2]
+                # the new channels from new_sigs are not in here
+                except (IndexError, AttributeError) as e:
+                    name = ch_id
+                    desc = ''
+                    unit = ''
+                df_dict['channel_name'].append(name)
+                df_dict['channel_units'].append(unit)
+                df_dict['channel_desc'].append(desc)
+                df_dict['channel_nr'].append(chi)
+
+                # each df line is a channel of case that needs to be id-eed
+                df_dict[tag_chan].append(ch_id)
+
+                # for all the statistics keys, save the values for the
+                # current channel
+                for statparam in stats.keys():
+                    df_dict[statparam].append(stats[statparam][chi])
+                # and save the tags from the input htc file in order to
+                # label each different case properly
+                for tag in tags:
+                    df_dict[tag].append(case[tag])
+                # append any fatigue channels if applicable, otherwise nan
+                if ch_id in fatigue:
+                    for m_fatigue, eq_ in fatigue[ch_id].iteritems():
+                        df_dict[m_fatigue].append(eq_)
+                else:
+                    for tag in tags_fatigue:
+                        # TODO: or should this be NaN?
+                        df_dict[tag].append(np.nan)
+            # when dealing with a lot of cases, save the stats data at
+            # intermediate points to avoid memory issues
+            if math.fmod(ii+1, saveinterval) == 0.0:
+                df_dict2 = self._df_dict_check_datatypes(df_dict)
+                # convert, save/update
+                if isinstance(suffix, str):
+                    ext = suffix
+                elif suffix is True:
+                    ext = '_%06i' % (ii+1)
+                else:
+                    ext = ''
+#                dfs = self._df_dict_save(df_dict2, post_dir, sim_id, save=save,
+#                                         update=update, csv=csv, suffix=ext)
+                # TODO: test this first
+                fname = os.path.join(post_dir, sim_id + '_statistics' + ext)
+                dfs = misc.dict2df(df_dict2, fname, save=save, update=update,
+                                   csv=csv, check_datatypes=False)
+
+                df_dict2 = None
+                df_dict = None
+                add_stats = True
+
+        # only save again when there is actual data in df_dict
+        if df_dict is not None:
+            # make consistent data types
+            df_dict2 = self._df_dict_check_datatypes(df_dict)
+            # convert, save/update
+            if isinstance(suffix, str):
+                ext = suffix
+            elif suffix is True:
+                ext = '_%06i' % ii
+            else:
+                ext = ''
+#            dfs = self._df_dict_save(df_dict2, post_dir, sim_id, save=save,
+#                                     update=update, csv=csv, suffix=ext)
+            # TODO: test this first
+            fname = os.path.join(post_dir, sim_id + '_statistics' + ext)
+            dfs = misc.dict2df(df_dict2, fname, save=save, update=update,
+                               csv=csv, check_datatypes=False)
+
+        return dfs
+
+    def _add2newsigs(self, ch_dict, name, i_new_chans, new_sigs, addendum):
+
+        ch_dict[name] = {}
+        ch_dict[name]['chi'] = i_new_chans
+        i_new_chans += 1
+        return ch_dict, np.append(new_sigs, addendum, axis=1)
+
+    # TODO: use the version in misc instead.
+    def _df_dict_save(self, df_dict2, post_dir, sim_id, save=True,
+                      update=False, csv=True, suffix=None):
+        """
+        Convert the df_dict to df and save/update.
+
+        DEPRICATED, use misc.dict2df instead
+        """
+        if isinstance(suffix, str):
+            fpath = os.path.join(post_dir, sim_id + '_statistics' + suffix)
+        else:
+            fpath = os.path.join(post_dir, sim_id + '_statistics')
+
+        # in case converting to dataframe fails, fall back
+        try:
+            dfs = pd.DataFrame(df_dict2)
+        except Exception as e:
+
+            FILE = open(fpath + '.pkl', 'wb')
+            pickle.dump(df_dict2, FILE, protocol=2)
+            FILE.close()
+            # check what went wrong
+            misc.check_df_dict(df_dict2)
+            print('failed to convert to data frame, saved as dict')
+            raise(e)
+
+#        # apply categoricals to objects
+#        for column_name, column_dtype in dfs.dtypes.iteritems():
+#            # applying categoricals mostly makes sense for objects
+#            # we ignore all others
+#            if column_dtype.name == 'object':
+#                dfs[column_name] = dfs[column_name].astype('category')
+
+        # and save/update the statistics database
+        if save:
+            if update:
+                print('updating statistics: %s ...' % (post_dir + sim_id), end='')
+                try:
+                    dfs.to_hdf('%s.h5' % fpath, 'table', mode='r+', append=True,
+                               format='table', complevel=9, complib='blosc')
+                except IOError:
+                    print('Can not update, file does not exist. Saving instead'
+                          '...', end='')
+                    dfs.to_hdf('%s.h5' % fpath, 'table', mode='w',
+                               format='table', complevel=9, complib='blosc')
+            else:
+                print('saving statistics: %s ...' % (post_dir + sim_id), end='')
+                if csv:
+                    dfs.to_csv('%s.csv' % fpath)
+                dfs.to_hdf('%s.h5' % fpath, 'table', mode='w',
+                           format='table', complevel=9, complib='blosc')
+
+            print('DONE!!\n')
+
+        return dfs
+
+    # TODO: use the version in misc instead.
+    def _df_dict_check_datatypes(self, df_dict):
+        """
+        there might be a mix of strings and numbers now, see if we can have
+        the same data type throughout a column
+        nasty hack: because of the unicode -> string conversion we might not
+        overwrite the same key in the dict.
+
+        DEPRICATED, use misc.df_dict_check_datatypes instead
+        """
+        # FIXME: this approach will result in twice the memory useage though...
+        # we can not pop/delete items from a dict while iterating over it
+        df_dict2 = {}
+        for colkey, col in df_dict.iteritems():
+            # if we have a list, convert to string
+            if type(col[0]).__name__ == 'list':
+                for ii, item in enumerate(col):
+                    col[ii] = '**'.join(item)
+            # if we already have an array (statistics) or a list of numbers
+            # do not try to cast into another data type, because downcasting
+            # in that case will not raise any exception
+            elif type(col[0]).__name__[:3] in ['flo', 'int', 'nda']:
+                df_dict2[str(colkey)] = np.array(col)
+                continue
+            # in case we have unicodes instead of strings, we need to convert
+            # to strings otherwise the saved .h5 file will have pickled elements
+            try:
+                df_dict2[str(colkey)] = np.array(col, dtype=np.int32)
+            except OverflowError:
+                try:
+                    df_dict2[str(colkey)] = np.array(col, dtype=np.int64)
+                except OverflowError:
+                    df_dict2[str(colkey)] = np.array(col, dtype=np.float64)
+            except ValueError:
+                try:
+                    df_dict2[str(colkey)] = np.array(col, dtype=np.float64)
+                except ValueError:
+                    df_dict2[str(colkey)] = np.array(col, dtype=np.str)
+            except TypeError:
+                # in all other cases, make sure we have converted them to
+                # strings and NOT unicode
+                df_dict2[str(colkey)] = np.array(col, dtype=np.str)
+            except Exception as e:
+                print('failed to convert column %s to single data type' % colkey)
+                raise(e)
+        return df_dict2
+
+    def fatigue_lifetime(self, dfs, neq, res_dir='res/', fh_lst=None,
+                         dlc_folder="dlc%s_iec61400-1ed3/", extra_cols=[],
+                         dlc_name="dlc%s_wsp%02d_wdir%03d_s*.sel", save=False,
+                         update=False, csv=False, new_sim_id=False, years=20.):
+        """
+        Cacluate the fatigue over a selection of cases and indicate how many
+        hours each case contributes to its life time.
+
+        This approach can only work reliably if the common DLC folder
+        structure is followed.
+
+        Parameters
+        ----------
+
+        dfs : DataFrame
+            Statistics Pandas DataFrame. When extra_cols is not defined, it
+            should only hold the results of one standard organized DLC (one
+            turbine, one inflow case).
+
+        neq : float
+            Reference number of cycles. Usually, neq is either set to 10e6,
+            10e7 or 10e8.
+
+        res_dir : str, default='res/'
+            Base directory of the results. Results would be located in
+            res/dlc_folder/*.sel
+
+        dlc_folder : str, default="dlc%s_iec61400-1ed3/"
+            String with the DLC subfolder names. One string substitution is
+            required (%s), and should represent the DLC number (withouth comma
+            or point). Not relevant when fh_lst is defined.
+
+        dlc_name : str, default="dlc%s_wsp%02d_wdir%03d_s*.sel"
+            String with the DLC names. One string, and two integer substitutions
+            are required (%s, %02d, %03d), indicating the DLC number (e.g. '12'),
+            the windspeed (e.g. int(6)), and wind speed direction (e.g. int(10))
+            respectively. Notice that different seed numbers are covered with the
+            wildcard *. Not relevant when fh_lst is defined.
+
+        extra_cols : list, default=[]
+            The included columns are the material constants, and each row is
+            a channel. When multiple DLC cases are included in dfs, the user
+            has to define additional columns in order to distinguish between
+            the DLC cases.
+
+        fh_lst : list, default=None
+            Number of hours for each case over its life time. Format:
+            [(filename, hours),...] where, filename is the name of the file
+            (can be a full path, but only the base path is considered), hours
+            is the number of hours over the life time. When fh_lst is set,
+            dlc_folder and dlc_name are not used.
+
+        years : float, default=20
+            Total life time expressed in years.
+
+        Returns
+        -------
+
+        df_Leq : DataFrame
+            Pandas DataFrame with the life time equivalent load for the given
+            neq, all the channels, and a range of material parameters m.
+        """
+
+        print('Calculating life time fatigue load')
+
+        # get some basic parameters required to calculate statistics
+        try:
+            case = self.cases.keys()[0]
+        except IndexError:
+            print('no cases to select so no statistics, aborting ...')
+            return None
+        post_dir = self.cases[case]['[post_dir]']
+        if not new_sim_id:
+            # select the sim_id from a random case
+            sim_id = self.cases[case]['[sim_id]']
+        else:
+            sim_id = new_sim_id
+        # we assume the run_dir (root) is the same every where
+        run_dir = self.cases[self.cases.keys()[0]]['[run_dir]']
+        path = os.path.join(run_dir, res_dir)
+
+        if fh_lst is None:
+            wb = WeibullParameters()
+            if 'Weibull' in self.config.keys():
+                for key in self.config['Weibull'].keys():
+                    setattr(wb, key, self.config['Weibull'][key])
+
+            dlc_dict = dlc_ft.dlc_dict(Vin=wb.Vin, Vr=wb.Vr, Vout=wb.Vout,
+                                       Vref=wb.Vref, Vstep=wb.Vstep,
+                                       shape_k=wb.shape_k)
+            fh_lst = dlc_ft.file_hour_lst(path, dlc_dict, dlc_folder=dlc_folder,
+                                          dlc_name=dlc_name, years=years)
+        # now we have a full path to the result files, but we only need the
+        # the case_id to indentify the corresponding entry from the statistics
+        # DataFrame (exluciding the .sel extension)
+        case_ids = [os.path.basename(k[0].replace('.sel', '')) for k in fh_lst]
+        hours = [k[1] for k in fh_lst]
+
+        # ---------------------------------------------------------------------
+        # column definitions
+        # ---------------------------------------------------------------------
+        # available material constants
+        ms, cols = [], []
+        for key in dfs.keys():
+            if key[:2] == 'm=':
+                ms.append(key)
+        # when multiple DLC cases are included, add extra cols to identify each
+        # DLC group.
+        extra_cols.append('channel')
+        cols = copy.copy(ms)
+        cols.extend(extra_cols)
+        # ---------------------------------------------------------------------
+
+        # Built the DataFrame, we do not have a unqique channel index
+        dict_Leq = {col:[] for col in cols}
+        # index on case_id on the original DataFrame so we can select accordingly
+        dfs = dfs.set_index('[case_id]')
+        # which rows to keep: a
+        # select for each channel all the cases
+        for grname, gr in dfs.groupby(dfs.channel):
+            # if one m has any nan's, assume none of them are good and throw
+            # away
+#            if np.isnan(gr[ms[0]].values).any():
+#                sel_rows.pop(grname)
+#                continue
+            # select the cases in the same order as the corresponding hours
+            try:
+                sel_sort = gr.loc[case_ids]
+            except KeyError:
+                print('    ignore sensor for Leq:', grname)
+            for col in extra_cols:
+                # at this stage we already should have one case, so its
+                # identifiers should also be.
+                val_unique = sel_sort[col].unique()
+                if len(val_unique) > 1:
+                    print('found %i sets instead of 1:' % len(val_unique))
+                    print(val_unique)
+                    raise ValueError('For Leq load, the given DataFrame can '
+                                     'only hold one complete DLC set.')
+                # values of the identifier columns for each case. We do this
+                # in case the original dfs holds multiple DLC cases.
+                dict_Leq[col].append(sel_sort[col].unique()[0])
+            for m in ms:
+                # sel_sort[m] holds the cycle_matrices for each of the DLC
+                # cases: such all the different wind speeds for dlc1.2
+                R_eq = (sel_sort[m].values*np.array(hours)).sum()
+                # the effective Leq for each of the material constants
+                dict_Leq[m].append(math.pow(R_eq/neq, 1.0/float(m[2:])))
+                # the following is twice as slow:
+                # [i*j for (i,j) in zip(sel_sort[m].values.tolist(),hours)]
+
+#        collens = misc.check_df_dict(dict_Leq)
+        # make consistent data types, and convert to DataFrame
+        fname = os.path.join(post_dir, sim_id + '_Leq')
+        df_Leq = misc.dict2df(dict_Leq, fname, save=save, update=update,
+                              csv=csv, check_datatypes=True)
+
+        # only keep the ones that do not have nan's (only works with index)
+        return df_Leq
+
+    def AEP(self, dfs, fh_lst=None, ch_powe=None, extra_cols=[], update=False,
+            res_dir='res/', dlc_folder="dlc%s_iec61400-1ed3/", csv=False,
+            dlc_name="dlc%s_wsp%02d_wdir%03d_s*.sel", new_sim_id=False,
+            save=False):
+
+        """
+        Calculate the Annual Energy Production (AEP) for DLC1.2 cases.
+
+        Parameters
+        ----------
+
+        dfs : DataFrame
+            Statistics Pandas DataFrame. When extra_cols is not defined, it
+            should only hold the results of one standard organized DLC (one
+            turbine, one inflow case).
+
+        fh_lst : list, default=None
+            Number of hours for each case over its life time. Format:
+            [(filename, hours),...] where, filename is the name of the file
+            (can be a full path, but only the base path is considered), hours
+            is the number of hours over the life time. When fh_lst is set,
+            dlc_folder and dlc_name are not used.
+
+        ch_powe : string, default=None
+
+        extra_cols : list, default=[]
+            The included column is just the AEP, and each row is
+            a channel. When multiple DLC cases are included in dfs, the user
+            has to define additional columns in order to distinguish between
+            the DLC cases.
+
+        res_dir : str, default='res/'
+            Base directory of the results. Results would be located in
+            res/dlc_folder/*.sel
+
+        dlc_folder : str, default="dlc%s_iec61400-1ed3/"
+            String with the DLC subfolder names. One string substitution is
+            required (%s), and should represent the DLC number (withouth comma
+            or point). Not relevant when fh_lst is defined.
+
+        dlc_name : str, default="dlc%s_wsp%02d_wdir%03d_s*.sel"
+            String with the DLC names. One string, and two integer substitutions
+            are required (%s, %02d, %03d), indicating the DLC number (e.g. '12'),
+            the windspeed (e.g. int(6)), and wind speed direction (e.g. int(10))
+            respectively. Notice that different seed numbers are covered with the
+            wildcard *. Not relevant when fh_lst is defined.
+        """
+
+        # get some basic parameters required to calculate statistics
+        try:
+            case = self.cases.keys()[0]
+        except IndexError:
+            print('no cases to select so no statistics, aborting ...')
+            return None
+        post_dir = self.cases[case]['[post_dir]']
+        if not new_sim_id:
+            # select the sim_id from a random case
+            sim_id = self.cases[case]['[sim_id]']
+        else:
+            sim_id = new_sim_id
+        # we assume the run_dir (root) is the same every where
+        run_dir = self.cases[self.cases.keys()[0]]['[run_dir]']
+        path = os.path.join(run_dir, res_dir)
+
+        if fh_lst is None:
+            wb = WeibullParameters()
+            if 'Weibull' in self.config.keys():
+                for key in self.config['Weibull'].keys():
+                    setattr(wb, key, self.config['Weibull'][key])
+            dlc_dict = dlc_ft.dlc_dict(Vin=wb.Vin, Vr=wb.Vr, Vout=wb.Vout,
+                                       Vref=wb.Vref, Vstep=wb.Vstep,
+                                       shape_k=wb.shape_k)
+            fh_lst = dlc_ft.file_hour_lst(path, dlc_dict, dlc_folder=dlc_folder,
+                                          dlc_name=dlc_name, years=1.0)
+        # now we have a full path to the result files, but we only need the
+        # the case_id to indentify the corresponding entry from the statistics
+        # DataFrame (exluciding the .sel extension)
+        def basename(k):
+            return os.path.basename(k[0].replace('.sel', ''))
+        fh_lst_basename = [(basename(k), k[1]) for k in fh_lst]
+        # only take dlc12 for power production
+        case_ids = [k[0] for k in fh_lst_basename if k[0][:5]=='dlc12']
+        hours = [k[1] for k in fh_lst_basename if k[0][:5]=='dlc12']
+
+        # the default electrical power channel name from DTU Wind controller
+        if ch_powe is None:
+            ch_powe = 'DLL-2-inpvec-2'
+        # and select only the power channels
+        dfs_powe = dfs[dfs.channel==ch_powe]
+
+        # by default we have AEP as a column
+        cols = ['AEP']
+        cols.extend(extra_cols)
+        # Built the DataFrame, we do not have a unqique channel index
+        dict_AEP = {col:[] for col in cols}
+        # index on case_id on the original DataFrame so we can select accordingly
+        dfs_powe = dfs_powe.set_index('[case_id]')
+
+        # select the cases in the same order as the corresponding hours
+        sel_sort = dfs_powe.loc[case_ids]
+        for col in extra_cols:
+            # at this stage we already should have one case, so its
+            # identifiers should also be.
+            val_unique = sel_sort[col].unique()
+            if len(val_unique) > 1:
+                print('found %i sets instead of 1:' % len(val_unique))
+                print(val_unique)
+                raise ValueError('For AEP, the given DataFrame can only hold'
+                                 'one complete DLC set. Make sure to identify '
+                                 'the proper extra_cols to identify the '
+                                 'different DLC sets.')
+            # values of the identifier columns for each case. We do this
+            # in case the original dfs holds multiple DLC cases.
+            dict_AEP[col].append(sel_sort[col].unique()[0])
+
+        # and the AEP: take the average, multiply with the duration
+#        duration = sel_sort['[duration]'].values
+#        power_mean = sel_sort['mean'].values
+        AEP = (sel_sort['mean'].values * np.array(hours)).sum()
+        dict_AEP['AEP'].append(AEP)
+
+        # make consistent data types, and convert to DataFrame
+        fname = os.path.join(post_dir, sim_id + '_AEP')
+        df_AEP = misc.dict2df(dict_AEP, fname, update=update, csv=csv,
+                              save=save, check_datatypes=True)
+
+        return df_AEP
+
+    def stats2dataframe(self, ch_sel=None, tags=['[turb_seed]','[windspeed]']):
+        """
+        Convert the archaic statistics dictionary of a group of cases to
+        a more convienent pandas dataframe format.
+
+        DEPRICATED, use statistics instead!!
+
+        Parameters
+        ----------
+
+        ch_sel : dict, default=None
+            Map short names to the channel id's defined in ch_dict in order to
+            have more human readable column names in the pandas dataframe. By
+            default, if ch_sel is None, a dataframe for each channel in the
+            ch_dict (so in the HAWC2 output) will be created. When ch_sel is
+            defined, only those channels are considered.
+            ch_sel[short name] = full ch_dict identifier
+
+        tags : list, default=['[turb_seed]','[windspeed]']
+            Select which tag values from cases should be included in the
+            dataframes. This will help in selecting and identifying the
+            different cases.
+
+        Returns
+        -------
+
+        dfs : dict
+            Dictionary of dataframes, where the key is the channel name of
+            the output (that was optionally defined in ch_sel), and the value
+            is the dataframe containing the statistical values for all the
+            different selected cases.
+        """
+
+        df_dict = {}
+
+        for cname, case in self.cases.iteritems():
+
+            # make sure the selected tags exist
+            if len(tags) != len(set(case) and tags):
+                raise KeyError('not all selected tags exist in cases')
+
+            sig_stats = self.stats_dict[cname]['sig_stats']
+            ch_dict = self.stats_dict[cname]['ch_dict']
+
+            if ch_sel is None:
+                ch_sel = { (i, i) for i in ch_dict.keys() }
+
+            for ch_short, ch_name in ch_sel.iteritems():
+
+                chi = ch_dict[ch_name]['chi']
+                # sig_stat = [(0=value,1=index),statistic parameter, channel]
+                # stat params = 0 max, 1 min, 2 mean, 3 std, 4 range, 5 abs max
+                # note that min, mean, std, and range are not relevant for index
+                # values. Set to zero there.
+                try:
+                    df_dict[ch_short]['case name'].append(cname)
+                    df_dict[ch_short]['max'].append(   sig_stats[0,0,chi])
+                    df_dict[ch_short]['min'].append(   sig_stats[0,1,chi])
+                    df_dict[ch_short]['mean'].append(  sig_stats[0,2,chi])
+                    df_dict[ch_short]['std'].append(   sig_stats[0,3,chi])
+                    df_dict[ch_short]['range'].append( sig_stats[0,4,chi])
+                    df_dict[ch_short]['absmax'].append(sig_stats[0,5,chi])
+                    for tag in tags:
+                        df_dict[ch_short][tag].append(case[tag])
+                except KeyError:
+                    df_dict[ch_short] = {'case name' : [cname]}
+                    df_dict[ch_short]['max']    = [sig_stats[0,0,chi]]
+                    df_dict[ch_short]['min']    = [sig_stats[0,1,chi]]
+                    df_dict[ch_short]['mean']   = [sig_stats[0,2,chi]]
+                    df_dict[ch_short]['std']    = [sig_stats[0,3,chi]]
+                    df_dict[ch_short]['range']  = [sig_stats[0,4,chi]]
+                    df_dict[ch_short]['absmax'] = [sig_stats[0,5,chi]]
+                    for tag in tags:
+                        df_dict[ch_short][tag] = [ case[tag] ]
+
+        # and create for each channel a dataframe
+        dfs = {}
+        for ch_short, df_values in df_dict.iteritems():
+            dfs[ch_short] = pd.DataFrame(df_values)
+
+        return dfs
+
+    def load_azimuth(self, azi, load, sectors=360):
+        """
+        Establish load dependency on rotor azimuth angle
+        """
+
+        # sort on azimuth angle
+        isort = np.argsort(azi)
+        azi = azi[isort]
+        load = load[isort]
+
+        azi_sel = np.linspace(0, 360, num=sectors)
+        load_sel = np.interp(azi_sel, azi, load)
+
+    def find_windchan_hub(self):
+        """
+        """
+        # if we sort we'll get the largest absolute coordinate last
+        for ch in sorted(self.res.ch_dict.keys()):
+            if ch[:29] == 'windspeed-global-Vy-0.00-0.00':
+                chan_found = ch
+        return chan_found
+
+    def ct(self, thrust, wind, A, rho=1.225):
+        return thrust / (0.5 * rho * A * wind * wind)
+
+    def cp(self, power, wind, A, rho=1.225):
+        return power / (0.5 * rho * A * wind * wind * wind)
+
+    def shaft_power(self):
+        """
+        Return the mechanical shaft power based on the shaft torsional loading
+        """
+        try:
+            i = self.res.ch_dict['bearing-shaft_rot-angle_speed-rpm']['chi']
+            rads = self.res.sig[:,i]*np.pi/30.0
+        except KeyError:
+            try:
+                i = self.res.ch_dict['bearing-shaft_rot-angle_speed-rads']['chi']
+                rads = self.res.sig[:,i]
+            except KeyError:
+                i = self.res.ch_dict['Omega']['chi']
+                rads = self.res.sig[:,i]
+        try:
+            nn_shaft = self.config['nn_shaft']
+        except:
+            nn_shaft = 4
+        itorque = self.res.ch_dict['shaft-shaft-node-%3.3i-momentvec-z'%nn_shaft]['chi']
+        torque = self.res.sig[:,itorque]
+
+        return torque*rads
+
+    def calc_torque_const(self, save=False, name='ojf'):
+        """
+        If we have constant RPM over the simulation, calculate the torque
+        constant. The current loaded HAWC2 case is considered. Consequently,
+        first load a result file with load_result_file
+
+        Parameters
+        ----------
+
+        save : boolean, default=False
+
+        name : str, default='ojf'
+            File name of the torque constant result. Default to using the
+            ojf case name. If set to hawc2, it will the case_id. In both
+            cases the file name will be extended with '.kgen'
+
+        Returns
+        -------
+
+        [windspeed, rpm, K] : list
+
+        """
+        # make sure the results have been loaded previously
+        try:
+            # get the relevant index to the wanted channels
+            # tag: coord-bodyname-pos-sensortype-component
+            tag = 'bearing-shaft_nacelle-angle_speed-rpm'
+            irpm = self.res.ch_dict[tag]['chi']
+            chi_rads = self.res.ch_dict['Omega']['chi']
+            tag = 'shaft-shaft-node-001-momentvec-z'
+            chi_q = self.res.ch_dict[tag]['chi']
+        except AttributeError:
+            msg = 'load results first with Cases.load_result_file()'
+            raise ValueError(msg)
+
+#        if not self.case['[fix_rpm]']:
+#            print
+#            return
+
+        windspeed = self.case['[windspeed]']
+        rpm = self.res.sig[:,irpm].mean()
+        # and get the average rotor torque applied to maintain
+        # constant rotor speed
+        K = -np.mean(self.res.sig[:,chi_q]*1000./self.res.sig[:,chi_rads])
+
+        result = np.array([windspeed, rpm, K])
+
+        # optionally, save the values and give the case name as file name
+        if save:
+            fpath = self.case['[post_dir]'] + 'torque_constant/'
+            if name == 'hawc2':
+                fname = self.case['[case_id]'] + '.kgen'
+            elif name == 'ojf':
+                fname = self.case['[ojf_case]'] + '.kgen'
+            else:
+                raise ValueError('name should be either ojf or hawc2')
+            # create the torque_constant dir if it doesn't exists
+            try:
+                os.mkdir(fpath)
+            except OSError:
+                pass
+
+#            print('gen K saving at:', fpath+fname
+            np.savetxt(fpath+fname, result, header='windspeed, rpm, K')
+
+        return result
+
+    def compute_envelope(self, sig, ch_list):
+
+        envelope= {}
+        for ch in ch_list:
+            chi0 = self.res.ch_dict[ch[0]]['chi']
+            chi1 = self.res.ch_dict[ch[1]]['chi']
+            s0 = np.array(sig[:, chi0]).reshape(-1, 1)
+            s1 = np.array(sig[:, chi1]).reshape(-1, 1)
+            cloud =  np.append(s0, s1, axis=1)
+            hull = scipy.spatial.ConvexHull(cloud)
+            closed_contour = np.append(cloud[hull.vertices,:],
+                                       cloud[hull.vertices[0],:].reshape(1,2),
+                                       axis=0)
+            for ich in range(2, len(ch)):
+                chix = self.res.ch_dict[ch[ich]]['chi']
+                s0 = np.array(sig[hull.vertices, chix]).reshape(-1, 1)
+                s1 = np.array(sig[hull.vertices[0], chix]).reshape(-1, 1)
+                s0 = np.append(s0, s1, axis=0)
+                closed_contour = np.append(closed_contour, s0, axis=1)
+            envelope[ch[0]] = closed_contour
+        return envelope
+
+    def envelope(self, silent=False, ch_list=[], append=''):
+        """
+        Calculate envelopes and save them in a table.
+
+        Parameters
+        ----------
+
+
+        Returns
+        -------
+
+
+        """
+        # get some basic parameters required to calculate statistics
+        try:
+            case = self.cases.keys()[0]
+        except IndexError:
+            print('no cases to select so no statistics, aborting ...')
+            return None
+
+        post_dir = self.cases[case]['[post_dir]']
+        sim_id = self.cases[case]['[sim_id]']
+
+        if not silent:
+            nrcases = len(self.cases)
+            print('='*79)
+            print('statistics for %s, nr cases: %i' % (sim_id, nrcases))
+
+        fname = os.path.join(post_dir, sim_id, '_envelope' + append + '.h5')
+        h5f = tbl.openFile(fname, mode="w", title=str(sim_id),
+                           filters=tbl.Filters(complevel=9))
+
+        # Create a new group under "/" (root)
+        for ii, (cname, case) in enumerate(self.cases.iteritems()):
+
+            groupname = str(cname[:-4])
+            groupname = groupname.replace('-', '_')
+            h5f.createGroup("/", groupname)
+            ctab = getattr(h5f.root, groupname)
+
+            if not silent:
+                pc = '%6.2f' % (float(ii)*100.0/float(nrcases))
+                pc += ' %'
+                print('envelope progress: %4i/%i %s' % (ii, nrcases, pc))
+
+            self.load_result_file(case)
+
+            envelope = self.compute_envelope(self.sig, ch_list)
+
+            for ch_id in ch_list:
+                h5f.createTable(ctab, str(ch_id[0].replace('-', '_')),
+                                EnvelopeClass.section,
+                                title=str(ch_id[0].replace('-', '_')))
+                csv_table = getattr(ctab, str(ch_id[0].replace('-', '_')))
+                tablerow = csv_table.row
+                for row in envelope[ch_id[0]]:
+                    tablerow['Mx'] = float(row[0])
+                    tablerow['My'] = float(row[1])
+                    if len(row)>2:
+                        tablerow['Mz'] = float(row[2])
+                        if len(row)>3:
+                            tablerow['Fx'] = float(row[3])
+                            tablerow['Fy'] = float(row[4])
+                            tablerow['Fz'] = float(row[5])
+                        else:
+                            tablerow['Fx'] = 0.0
+                            tablerow['Fy'] = 0.0
+                            tablerow['Fz'] = 0.0
+                    else:
+                        tablerow['Mz'] = 0.0
+                        tablerow['Fx'] = 0.0
+                        tablerow['Fy'] = 0.0
+                        tablerow['Fz'] = 0.0
+                    tablerow.append()
+                csv_table.flush()
+        h5f.close()
+
+
+class EnvelopeClass:
+    """
+    Class with the definition of the table for the envelope results
+    """
+    class section(tbl.IsDescription):
+
+        Mx = tbl.Float32Col()
+        My = tbl.Float32Col()
+        Mz = tbl.Float32Col()
+        Fx = tbl.Float32Col()
+        Fy = tbl.Float32Col()
+        Fz = tbl.Float32Col()
+
+
+# TODO: implement this
+class Results():
+    """
+    Move all Hawc2io to here? NO: this should be the wrapper, to interface
+    the htc_dict with the io functions
+
+    There should be a bare metal module/class for those who only want basic
+    python support for HAWC2 result files and/or launching simulations.
+
+    How to properly design this module? Change each class into a module? Or
+    leave like this?
+    """
+
+    # OK, for now use this to do operations on HAWC2 results files
+
+    def __init___(self):
+        """
+        """
+        pass
+
+    def m_equiv(self, st_arr, load, pos):
+        r"""Centrifugal corrected equivalent moment
+
+        Convert beam loading into a single equivalent bending moment. Note that
+        this is dependent on the location in the cross section. Due to the
+        way we measure the strain on the blade and how we did the calibration
+        of those sensors.
+
+        .. math::
+
+            \epsilon = \frac{M_{x_{equiv}}y}{EI_{xx}} = \frac{M_x y}{EI_{xx}}
+            + \frac{M_y x}{EI_{yy}} + \frac{F_z}{EA}
+
+            M_{x_{equiv}} = M_x + \frac{I_{xx}}{I_{yy}} M_y \frac{x}{y}
+            + \frac{I_{xx}}{Ay} F_z
+
+        Parameters
+        ----------
+
+        st_arr : np.ndarray(19)
+            Only one line of the st_arr is allowed and it should correspond
+            to the correct radial position of the strain gauge.
+
+        load : list(6)
+            list containing the load time series of following components
+            .. math:: load = F_x, F_y, F_z, M_x, M_y, M_z
+            and where each component is an ndarray(m)
+
+        pos : np.ndarray(2)
+            x,y position wrt neutral axis in the cross section for which the
+            equivalent load should be calculated
+
+        Returns
+        -------
+
+        m_eq : ndarray(m)
+            Equivalent load, see main title
+
+        """
+
+        F_z = load[2]
+        M_x = load[3]
+        M_y = load[4]
+
+        x, y = pos[0], pos[1]
+
+        A = st_arr[ModelData.st_headers.A]
+        I_xx = st_arr[ModelData.st_headers.Ixx]
+        I_yy = st_arr[ModelData.st_headers.Iyy]
+
+        M_x_equiv = M_x + ( (I_xx/I_yy)*M_y*(x/y) ) + ( F_z*I_xx/(A*y) )
+        # or ignore edgewise moment
+        #M_x_equiv = M_x + ( F_z*I_xx/(A*y) )
+
+        return M_x_equiv
+
+
+class ManTurb64(object):
+    """
+    alfaeps, L, gamma, seed, nr_u, nr_v, nr_w, du, dv, dw high_freq_comp
+    mann_turb_x64.exe fname 1.0 29.4 3.0 1209 256 32 32 2.0 5 5 true
+    """
+
+    def __init__(self):
+        self.man64_exe = 'mann_turb_x64.exe'
+        self.wine = 'WINEARCH=win64 WINEPREFIX=~/.wine64 wine'
+
+    def run():
+        pass
+
+    def gen_pbs(cases):
+
+        case0 = cases[cases.keys()[0]]
+        pbs = prepost.PBSScript()
+        # make sure the path's end with a trailing separator
+        pbs.pbsworkdir = os.path.join(case0['[run_dir]'], '')
+        pbs.path_pbs_e = os.path.join(case0['[pbs_out_dir]'], '')
+        pbs.path_pbs_o = os.path.join(case0['[pbs_out_dir]'], '')
+        pbs.path_pbs_i = os.path.join(case0['[pbs_in_dir]'], '')
+        pbs.check_dirs()
+        for cname, case in cases.iteritems():
+            base = case['[case_id]']
+            pbs.path_pbs_e = os.path.join(case['[pbs_out_dir]'], base + '.err')
+            pbs.path_pbs_o = os.path.join(case['[pbs_out_dir]'], base + '.out')
+            pbs.path_pbs_i = os.path.join(case['[pbs_in_dir]'], base + '.pbs')
+
+            pbs.execute()
+            pbs.create()
+
+
+def eigenbody(cases, debug=False):
+    """
+    Read HAWC2 body eigenalysis result file
+    =======================================
+
+    This is basically a cases convience wrapper around Hawc2io.ReadEigenBody
+
+    Parameters
+    ----------
+
+    cases : dict{ case : dict{tag : value} }
+        Dictionary where each case is a key and its value a dictionary
+        holding all the tags/value pairs as used for that case.
+
+    Returns
+    -------
+
+    cases : dict{ case : dict{tag : value} }
+        Dictionary where each case is a key and its value a dictionary
+        holding all the tags/value pairs as used for that case. For each
+        case, it is updated with the results, results2 of the eigenvalue
+        analysis performed for each body using the following respective
+        tags: [eigen_body_results] and [eigen_body_results2].
+
+    """
+
+    #Body data for body number : 3 with the name :nacelle
+    #Results:         fd [Hz]       fn [Hz]       log.decr [%]
+    #Mode nr:  1:   1.45388E-21    1.74896E-03    6.28319E+02
+
+    for case in cases:
+        # tags for the current case
+        tags = cases[case]
+        file_path = os.path.join(tags['[run_dir]'], tags['[eigenfreq_dir]'])
+        # FIXME: do not assuem anything about the file name here, should be
+        # fully defined in the tags/dataframe
+        file_name = tags['[case_id]'] + '_body_eigen'
+        # and load the eigenfrequency body results
+        results, results2 = windIO.ReadEigenBody(file_path, file_name,
+                                                  nrmodes=10)
+        # add them to the htc_dict
+        cases[case]['[eigen_body_results]'] = results
+        cases[case]['[eigen_body_results2]'] = results2
+
+    return cases
+
+def eigenstructure(cases, debug=False):
+    """
+    Read HAWC2 structure eigenalysis result file
+    ============================================
+
+    This is basically a cases convience wrapper around
+    windIO.ReadEigenStructure
+
+    Parameters
+    ----------
+
+    cases : dict{ case : dict{tag : value} }
+        Dictionary where each case is a key and its value a dictionary
+        holding all the tags/value pairs as used for that case.
+
+    Returns
+    -------
+
+    cases : dict{ case : dict{tag : value} }
+        Dictionary where each case is a key and its value a dictionary
+        holding all the tags/value pairs as used for that case. For each
+        case, it is updated with the modes_arr of the eigenvalue
+        analysis performed for the structure.
+        The modes array (ndarray(3,n)) holds fd, fn and damping.
+    """
+
+    for case in cases:
+        # tags for the current case
+        tags = cases[case]
+        file_path = os.path.join(tags['[run_dir]'], tags['[eigenfreq_dir]'])
+        # FIXME: do not assuem anything about the file name here, should be
+        # fully defined in the tags/dataframe
+        file_name = tags['[case_id]'] + '_strc_eigen'
+        # and load the eigenfrequency structure results
+        modes = windIO.ReadEigenStructure(file_path, file_name, max_modes=500)
+        # add them to the htc_dict
+        cases[case]['[eigen_structure]'] = modes
+
+    return cases
+
+if __name__ == '__main__':
+    pass
+
diff --git a/wetb/prepost/dlcdefs.py b/wetb/prepost/dlcdefs.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec98c70381849c4737054c8740272565013f2ba9
--- /dev/null
+++ b/wetb/prepost/dlcdefs.py
@@ -0,0 +1,392 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Wed Nov  5 14:01:25 2014
+
+@author: dave
+"""
+from __future__ import division
+from __future__ import print_function
+
+import os
+import unittest
+
+import pandas as pd
+
+import misc
+
+def casedict2xlsx():
+    """
+    Convert a full Cases.cases dict to Excel spreadsheets
+    """
+
+
+def configure_dirs(verbose=False):
+    """
+    Automatically configure required directories to launch simulations
+    """
+
+    P_RUN = os.getcwd()
+    p_run_root = os.sep.join(P_RUN.split(os.sep)[:-2])
+    # MODEL SOURCES, exchanche file sources
+    P_SOURCE = P_RUN
+    # Project name, sim_id: derive from folder name
+    PROJECT = P_RUN.split(os.sep)[-2]
+    sim_id = P_RUN.split(os.sep)[-1]
+
+    master = find_master_file(P_SOURCE)
+    if master is None:
+        raise ValueError('Could not find master file in htc/_master')
+    MASTERFILE = master
+    P_MASTERFILE = os.path.join(P_SOURCE, 'htc%s_master%s' % (os.sep, os.sep))
+    POST_DIR = os.path.join(p_run_root, PROJECT, 'python-prepost-data%s' % os.sep)
+
+    if verbose:
+        print('='*79)
+        print('POST_DIR: %s' % POST_DIR)
+        print('   P_RUN: %s' % P_RUN)
+        print('P_SOURCE: %s' % P_SOURCE)
+        print(' PROJECT: %s' % PROJECT)
+        print('  sim_id: %s' % sim_id)
+        print('  master: %s' % MASTERFILE)
+        print('='*79)
+
+    return P_RUN, P_SOURCE, PROJECT, sim_id, P_MASTERFILE, MASTERFILE, POST_DIR
+
+
+def find_master_file(proot, htc_dir='htc', master_dir='_master',
+                     master_contains='_master_'):
+    """
+    Find the master file name. It is assumed that the master file is in the
+    folder _master, under htc, and contains _master_ in the file name.
+    """
+
+    for root, dirs, files in os.walk(os.path.join(proot, htc_dir, master_dir)):
+        for fname in files:
+            if fname.find(master_contains) > -1:
+                return fname
+    return None
+
+
+def variable_tag_func(master, case_id_short=False):
+    """
+    When using the Excel definitions, and the whole default setup, the
+    variable_tag_func is not required to do anything extra.
+    """
+
+    # -------------------------------------------------------------------------
+#    mt = master
+#    V = mt['windspeed']
+#    mt['duration'] = mt['time_stop'] - mt['t0']
+#    t = mt['duration']
+#    if V > abs(1e-15):
+#        b = 5.6
+#        mt['TI'] = mt['TI_ref'] * ((0.75*V) + b) / V # NTM
+#        # ETM
+#        c = 2.0
+#        V_ave = 0.2 * 50.0
+#        sigma = mt['TI_ref'] / V
+#        mt['TI'] = sigma * c * (0.072 * (V_ave / c + 3.0) * (V / c - 4.0) + 10.0)
+#    else:
+#        mt['TI'] = 0
+#
+#    mt['turb_dx'] = V*t/mt['turb_grid_x']
+#
+#    mt['turb_dy'] = (mt['rotor_diameter'] / mt['turb_grid_yz'])*1.1
+#
+#    mt['turb_dz'] = (mt['rotor_diameter'] / mt['turb_grid_yz'])*1.1
+#
+#    # check: dx spacing should be 0.1*mean_windspeed and 0.2*mean_windspeed
+#    # between 0.1 and 0.2 seconds between points
+#    if not (V*0.1 < mt['turb_dx'] < V*0.2):
+#        logging.warn('turbulence spacing dx out of bounds')
+#        print('%5.3f  %5.3f  %5.3f' % (V*0.1, mt['turb_dx'], V*0.2))
+#
+#    #mt['turb_base_name'] = 'turb_s' + str(mt['turb_seed']) + '_' + str(V)
+#    mt['turb_base_name'] = 'turb_s%i_%1.2f' % (mt['turb_seed'], V)
+    # -------------------------------------------------------------------------
+
+    return master
+
+
+def vartags_dlcs(master):
+
+    mt = master.tags
+
+    dlc_case = mt['[Case folder]']
+    mt['[data_dir]'] = 'data/'
+    mt['[res_dir]'] = 'res/%s/' % dlc_case
+    mt['[log_dir]'] = 'logfiles/%s/' % dlc_case
+    mt['[htc_dir]'] = 'htc/%s/' % dlc_case
+    mt['[case_id]'] = mt['[Case id.]']
+    mt['[time_stop]'] = mt['[time stop]']
+    mt['[turb_base_name]'] = mt['[Turb base name]']
+    mt['[DLC]'] = mt['[Case id.]'].split('_')[0][3:]
+    mt['[pbs_out_dir]'] = 'pbs_out/%s/' % dlc_case
+    mt['[pbs_in_dir]'] = 'pbs_in/%s/' % dlc_case
+    mt['[iter_dir]'] = 'iter/%s/' % dlc_case
+    if mt['[eigen_analysis]']:
+        rpl = (dlc_case, mt['[Case id.]'])
+        mt['[eigenfreq_dir]'] = 'res_eigen/%s/%s/' % rpl
+    mt['[duration]'] = str(float(mt['[time_stop]']) - float(mt['[t0]']))
+    # replace nan with empty
+    for ii, jj in mt.iteritems():
+        if jj == 'nan':
+            mt[ii] = ''
+
+    return master
+
+
+def tags_dlcs(master):
+    """
+    Initiate tags that are defined in the DLC spreadsheets
+    """
+
+    master.tags['[t0]'] = 0
+    master.tags['[time stop]'] = 0
+    master.tags['[Case folder]'] = 'test'
+    master.tags['[Case id.]'] = 'test'
+    master.tags['[Windspeed]'] = 8
+    master.tags['[wdir]'] = 0 # used for the user defined wind
+    master.tags['[wdir_rot]'] = 0 # used for the windfield rotations
+    master.tags['[tu_seed]'] = 0
+    master.tags['[tu_model]'] = 0
+    master.tags['[TI]'] = 0
+    master.tags['[Turb base name]'] = 'none'
+    master.tags['[turb_dx]'] = 1.0
+    master.tags['[shear_exp]'] = 0.2
+    master.tags['[wsp factor]'] = 1.0
+    master.tags['[gust]'] = False
+    master.tags['[gust_type]'] = ''
+    master.tags['[G_A]'] = ''
+    master.tags['[G_phi0]'] = ''
+    master.tags['[G_t0]'] = ''
+    master.tags['[G_T]'] = ''
+    master.tags['[Rotor azimuth]'] = 0
+    master.tags['[Free shaft rot]'] = ''
+    master.tags['[init_wr]'] = 0.5
+    master.tags['[Pitch 1 DLC22b]'] = 0
+    master.tags['[Rotor locked]'] = False
+    master.tags['[Time stuck DLC22b]'] = -1
+    master.tags['[Cut-in time]'] = -1
+    master.tags['[Cut-out time]'] = -1
+    master.tags['[Stop type]'] = -1
+    master.tags['[Pitvel 1]'] = 4
+    master.tags['[Pitvel 2]'] = 6
+    master.tags['[Grid loss time]'] = 1000
+    master.tags['[out_format]'] = 'hawc_binary'
+    master.tags['[Time pitch runaway]'] = 1000
+    master.tags['[Induction]'] = 1
+    master.tags['[Dyn stall]'] = 1
+
+    return master
+
+
+def tags_defaults(master):
+
+    # other required tags and their defaults
+    master.tags['[dt_sim]'] = 0.02
+    master.tags['[hawc2_exe]'] = 'hawc2-latest'
+    # folder names for the saved results, htc, data, zip files
+    # Following dirs are relative to the model_dir_server and they specify
+    # the location of where the results, logfiles, animation files that where
+    # run on the server should be copied to after the simulation has finished.
+    # on the node, it will try to copy the turbulence files from these dirs
+    master.tags['[animation_dir]'] = 'animation/'
+    master.tags['[control_dir]']   = 'control/'
+    master.tags['[data_dir]']      = 'data/'
+    master.tags['[eigen_analysis]'] = False
+    master.tags['[eigenfreq_dir]'] = False
+    master.tags['[htc_dir]']       = 'htc/'
+    master.tags['[log_dir]']       = 'logfiles/'
+    master.tags['[meander_dir]']   = False
+    master.tags['[opt_dir]']       = False
+    master.tags['[pbs_out_dir]']   = 'pbs_out/'
+    master.tags['[res_dir]']       = 'res/'
+    master.tags['[iter_dir]']      = 'iter/'
+    master.tags['[turb_dir]']      = 'turb/'
+    master.tags['[turb_db_dir]']   = '../turb/'
+    master.tags['[wake_dir]']      = False
+    master.tags['[hydro_dir]']     = False
+    master.tags['[mooring_dir]']   = False
+    master.tags['[externalforce]'] = False
+    # zip_root_files only is used when copy to run_dir and zip creation, define
+    # in the HtcMaster object
+    master.tags['[zip_root_files]'] = []
+    # only active on PBS level, so files have to be present in the run_dir
+    master.tags['[copyback_files]'] = []   # copyback_resultfile
+    master.tags['[copyback_frename]'] = [] # copyback_resultrename
+    master.tags['[copyto_files]'] = []     # copyto_inputfile
+    master.tags['[copyto_generic]'] = []   # copyto_input_required_defaultname
+    master.tags['[eigen_analysis]'] = False
+
+    # =========================================================================
+    # basic required tags by HtcMaster and PBS in order to function properly
+    # =========================================================================
+    # the express queue ('#PBS -q xpresq') has a maximum walltime of 1h
+    master.tags['[pbs_queue_command]'] = '#PBS -q workq'
+    # walltime should have following format: hh:mm:ss
+    master.tags['[walltime]'] = '04:00:00'
+    master.tags['[auto_walltime]'] = False
+
+    return master
+
+
+def excel_stabcon(proot, fext='xlsx', pignore=None, sheet=0,
+                  pinclude=None):
+    """
+    Read all MS Excel files that hold load case definitions according to
+    the team STABCON definitions. Save each case in a list according to the
+    opt_tags principles as used in Simulations.launch(). This method assumes
+    that a standard HAWC2 folder layout is used with the following folder
+    names: res, logfiles, htc, pbs_out, pbs_in, iter. Further some tags
+    are added to be compatible with the tag convention in the Simulations
+    module.
+
+
+    Parameters
+    ----------
+
+    proot : string
+        Path that will be searched recursively for Excel files containing
+        load case definitions.
+
+    fext : string, default='xlsx'
+        File extension of the Excel files that should be loaded
+
+    pignore : string, default=None
+        Specify which string can not occur in the full path of the DLC target.
+
+    pinclude : string, default=None
+        Specify which string has to occur in the full path of the DLC target.
+
+    sheet : string or int, default=0
+        Name or index of the Excel sheet to be considered. By default, the
+        first sheet (index=0) is taken.
+
+    """
+    print('looking for DLC spreadsheet definitions at:')
+    print(proot)
+    df_list = misc.read_excel_files(proot, fext=fext, pignore=pignore,
+                                    sheet=sheet, pinclude=pinclude)
+
+    print('found %i Excel file(s), ' % len(df_list), end='')
+    k = 0
+    for df in df_list:
+        k += len(df)
+    print('in which a total of %s cases are defined.' % k)
+
+    opt_tags = []
+
+    for dlc, df in df_list.iteritems():
+        # replace ';' with False, and Nan(='') with True
+        # this is more easy when testing for the presence of stuff compared
+        # to checking if a value is either True/False or ''/';'
+        # this doesn't work, it will result in 1 for True and 0 for False
+        # because the nan values have np.float dtype
+#        df.fillna(' ', inplace=True)
+#        df.replace(';', False, inplace=True)
+        # instead, convert everything to strings, this will maintain some nans
+        # as empty strings, but not all of them!
+        df2 = df.astype(str)
+        for count, row in df2.iterrows():
+            tags_dict = {}
+            # construct to dict, convert unicode keys/values to strings
+            for key, value in row.iteritems():
+                if isinstance(value, unicode):
+                    tags_dict[str(key)] = str(value)
+                else:
+                    tags_dict[str(key)] = value
+                # convert ; and empty to False/True
+#                if isinstance(tags_dict[str(key)], str):
+                if tags_dict[str(key)] == ';':
+                    tags_dict[str(key)] = False
+                elif tags_dict[str(key)] == '':
+                    tags_dict[str(key)] = True
+                elif tags_dict[str(key)].lower() == 'nan':
+                    tags_dict[str(key)] = True
+
+            tags_dict['[Case folder]'] = tags_dict['[Case folder]'].lower()
+            tags_dict['[Case id.]'] = tags_dict['[Case id.]'].lower()
+            dlc_case = tags_dict['[Case folder]']
+            tags_dict['[data_dir]'] = 'data/'
+            tags_dict['[res_dir]'] = 'res/%s/' % dlc_case
+            tags_dict['[log_dir]'] = 'logfiles/%s/' % dlc_case
+            tags_dict['[htc_dir]'] = 'htc/%s/' % dlc_case
+            tags_dict['[case_id]'] = tags_dict['[Case id.]']
+            tags_dict['[time_stop]'] = tags_dict['[time stop]']
+            tags_dict['[turb_base_name]'] = tags_dict['[Turb base name]']
+            tags_dict['[DLC]'] = tags_dict['[Case id.]'].split('_')[0][3:]
+            tags_dict['[pbs_out_dir]'] = 'pbs_out/%s/' % dlc_case
+            tags_dict['[pbs_in_dir]'] = 'pbs_in/%s/' % dlc_case
+            tags_dict['[iter_dir]'] = 'iter/%s/' % dlc_case
+            # the default spreadsheets do not define the tags related to the
+            # eigen analsyis yet
+            if '[eigen_analysis]' in tags_dict and tags_dict['[eigen_analysis]']:
+                rpl = (dlc_case, tags_dict['[Case id.]'])
+                if '[eigenfreq_dir]' in tags_dict:
+                    tags_dict['[eigenfreq_dir]'] = 'res_eigen/%s/%s/' % rpl
+            t_stop = float(tags_dict['[time_stop]'])
+            t0 = float(tags_dict['[t0]'])
+            tags_dict['[duration]'] = str(t_stop - t0)
+            opt_tags.append(tags_dict.copy())
+
+    return opt_tags
+
+
+def read_tags_spreadsheet(fname):
+    """Read a spreadsheet with HAWC2 tags, make sure no 0/1/nan ends up
+    replacing the ";" or "" (empty). Do not add any other tags.
+
+    Returns
+    -------
+
+    opt_tags : [{}, {}] list of dictionaries
+    """
+
+    df = pd.read_excel(fname)
+    df2 = df.astype(str)
+    opt_tags = []
+    for count, row in df2.iterrows():
+        tags_dict = {}
+        # construct to dict, convert unicode keys/values to strings
+        for key, value in row.iteritems():
+            if isinstance(value, unicode):
+                tags_dict[str(key)] = str(value)
+            else:
+                tags_dict[str(key)] = value
+            # convert ; and empty to False/True
+            if tags_dict[str(key)] == ';':
+                tags_dict[str(key)] = False
+            elif tags_dict[str(key)] == '':
+                tags_dict[str(key)] = True
+            elif tags_dict[str(key)].lower() == 'nan':
+                tags_dict[str(key)] = True
+        opt_tags.append(tags_dict.copy())
+
+    return opt_tags
+
+
+class Tests(unittest.TestCase):
+    """
+    """
+
+    def setUp(self):
+        self.fpath = 'data/DLCs'
+
+    def test_read_tag_exchange_file(self):
+
+        df_list = misc.read_excel_files(self.fpath, fext='xlsx', pignore=None,
+                                        sheet=0, pinclude=None)
+
+        df = df_list[df_list.keys()[0]]
+#        df.fillna('', inplace=True)
+#        df.replace(';', False, inplace=True)
+
+    def test_excel_stabcon(self):
+        opt_tags = excel_stabcon(self.fpath)
+
+
+if __name__ == '__main__':
+
+    unittest.main()
+
diff --git a/wetb/prepost/dlcplots.py b/wetb/prepost/dlcplots.py
new file mode 100644
index 0000000000000000000000000000000000000000..87d18cdd3c086e11c4648d6b9271d2baa69d39b5
--- /dev/null
+++ b/wetb/prepost/dlcplots.py
@@ -0,0 +1,1095 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Sep 16 10:21:11 2014
+
+@author: dave
+"""
+
+from __future__ import division
+from __future__ import print_function
+#print(*objects, sep=' ', end='\n', file=sys.stdout)
+
+import os
+import socket
+import gc
+
+import numpy as np
+
+import matplotlib.pyplot as plt
+import matplotlib as mpl
+#from matplotlib.figure import Figure
+#from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigCanvas
+#from scipy import interpolate as interp
+#from scipy.optimize import fmin_slsqp
+#from scipy.optimize import minimize
+#from scipy.interpolate import interp1d
+#import scipy.integrate as integrate
+#http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html
+import pandas as pd
+
+#import openpyxl as px
+#import numpy as np
+
+#import windIO
+import mplutils
+import Simulations as sim
+import dlcdefs
+
+plt.rc('font', family='serif')
+plt.rc('xtick', labelsize=10)
+plt.rc('ytick', labelsize=10)
+plt.rc('axes', labelsize=12)
+# do not use tex on Gorm
+if not socket.gethostname()[:2] == 'g-':
+    plt.rc('text', usetex=True)
+plt.rc('legend', fontsize=11)
+plt.rc('legend', numpoints=1)
+plt.rc('legend', borderaxespad=0)
+
+# =============================================================================
+### STAT PLOTS
+# =============================================================================
+
+def plot_stats(sim_ids, post_dirs, fig_dir_base=None):
+    """
+    For each wind speed, take the max of the max.
+
+    Only one or two sim_ids are supported. When they are from different
+    models/projects, specify for each sim_id a different post_dir
+
+    Parameters
+    ----------
+
+    sim_ids : list
+        list of sim_id's, 1 or 2
+
+    post_dirs
+        list of post_dir's, 1 or 2. If 2, should correspond to sim_ids
+
+    fig_dir_base : str, default=None
+
+    """
+
+    # if sim_id is a list, combine the two dataframes into one
+    df_stats = pd.DataFrame()
+    if type(sim_ids).__name__ == 'list':
+        for ii, sim_id in enumerate(sim_ids):
+            if isinstance(post_dirs, list):
+                post_dir = post_dirs[ii]
+            else:
+                post_dir = post_dirs
+            cc = sim.Cases(post_dir, sim_id, rem_failed=True)
+            if ii == 0:
+                df_stats, _, _ = cc.load_stats()
+            else:
+                # because there is no unique index, we will ignore it
+                df_stats, _, _ = pd.concat([df_stats, cc.load_stats()],
+                                            ignore_index=True)
+    else:
+        sim_id = sim_ids
+        sim_ids = False
+        post_dir = post_dirs
+        cc = sim.Cases(post_dir, sim_id, rem_failed=True)
+        df_stats, _, _ = cc.load_stats()
+
+#    if force_dir:
+#        cc.change_results_dir(resdir=force_dir)
+#        for case in cc.cases:
+#            sim_id = cc.cases[case]['[post_dir]']
+#            cc.cases[case]['[post_dir]'] = post_dir
+
+#    # add DLC category
+#    f = lambda x: x.split('_')[0]
+#    df_stats['DLC'] = df_stats['[Case id.]'].map(f)
+
+#    fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(12,8), num=1)
+
+    # define the number of positions you want to have the color for
+    N = 22
+    # select a color map
+    cmap = mpl.cm.get_cmap('jet', N)
+    # convert to array
+    cmap_arr = cmap(np.arange(N))
+    # color=cmap_arr[icol][0:3]
+
+    # make a stastics plot for each channel
+    gb_ch = df_stats.groupby(df_stats.channel)
+
+    # channel selection
+    plot_chans = {}
+    plot_chans['DLL-2-inpvec-2'] = 'P_e'
+    plot_chans['bearing-shaft_rot-angle_speed-rpm'] = 'RPM'
+
+    plot_chans['tower-tower-node-001-momentvec-x'] = 'M_x T_B'
+    plot_chans['tower-tower-node-001-momentvec-y'] = 'M_y T_B'
+    plot_chans['tower-tower-node-001-momentvec-z'] = 'M_z T_B'
+
+    plot_chans['tower-tower-node-008-momentvec-z'] = 'M_x T_T'
+    plot_chans['tower-tower-node-008-momentvec-z'] = 'M_y T_T'
+    plot_chans['tower-tower-node-008-momentvec-z'] = 'M_z T_T'
+
+    plot_chans['shaft-shaft-node-004-momentvec-x'] = 'M_x Shaft_{MB}'
+    plot_chans['shaft-shaft-node-004-momentvec-y'] = 'M_y Shaft_{MB}'
+    plot_chans['shaft-shaft-node-004-momentvec-z'] = 'M_z Shaft_{MB}'
+
+    plot_chans['blade1-blade1-node-003-momentvec-x'] = 'M_x B1_{root}'
+    plot_chans['blade1-blade1-node-003-momentvec-y'] = 'M_y B1_{root}'
+    plot_chans['blade1-blade1-node-003-momentvec-z'] = 'M_z B1_{root}'
+    plot_chans['blade2-blade2-node-003-momentvec-x'] = 'M_x B2_{root}'
+    plot_chans['blade2-blade2-node-003-momentvec-y'] = 'M_y B2_{root}'
+    plot_chans['blade2-blade2-node-003-momentvec-z'] = 'M_z B2_{root}'
+    plot_chans['blade3-blade3-node-003-momentvec-x'] = 'M_x B3_{root}'
+    plot_chans['blade3-blade3-node-003-momentvec-y'] = 'M_y B3_{root}'
+    plot_chans['blade3-blade3-node-003-momentvec-z'] = 'M_z B3_{root}'
+
+    plot_chans['DLL-5-inpvec-1'] = 'Min tower clearance'
+
+    plot_chans['bearing-pitch1-angle-deg'] = 'B1_{pitch}'
+    plot_chans['bearing-pitch2-angle-deg'] = 'B2_{pitch}'
+    plot_chans['bearing-pitch3-angle-deg'] = 'B3_{pitch}'
+
+    plot_chans['setbeta-bladenr-1-flapnr-1'] = 'B1_{flap}'
+    plot_chans['setbeta-bladenr-2-flapnr-1'] = 'B2_{flap}'
+    plot_chans['setbeta-bladenr-3-flapnr-1'] = 'B3_{flap}'
+
+    mfcs1 = ['k', 'w']
+    mfcs2 = ['b', 'w']
+    mfcs3 = ['r', 'w']
+    stds = ['r', 'b']
+
+    for nr, (ch_name, gr_ch) in enumerate(gb_ch):
+        if ch_name not in plot_chans:
+            continue
+        for dlc_name, gr_ch_dlc in gr_ch.groupby(df_stats['[DLC]']):
+            print('start plotting:  %s %s' % (str(dlc_name).ljust(7), ch_name))
+
+            fig, axes = mplutils.make_fig(nrows=1, ncols=1, figsize=(7,5))
+            ax = axes[0,0]
+            # seperate figure for the standard deviations
+            fig2, axes2 = mplutils.make_fig(nrows=1, ncols=1, figsize=(7,5))
+            ax2 = axes2[0,0]
+
+            if fig_dir_base is None and not sim_ids:
+                res_dir = gr_ch_dlc['[res_dir]'][:1].values[0]
+                run_dir = gr_ch_dlc['[run_dir]'][:1].values[0]
+                fig_dir = os.path.join(fig_dir_base, res_dir)
+            elif fig_dir_base is None and isinstance(sim_ids, list):
+                fig_dir = os.path.join(fig_dir_base, '-'.join(sim_ids))
+            elif fig_dir_base and not sim_ids:
+                res_dir = gr_ch_dlc['[res_dir]'][:1].values[0]
+                fig_dir = os.path.join(fig_dir_base, res_dir)
+            elif sim_ids and fig_dir_base is not None:
+                # create the compare directory if not defined
+                fig_dir = fig_dir_base
+
+            # if we have a list of different cases, we also need to group those
+            # because the sim_id wasn't saved before in the data frame,
+            # we need to derive that from the run dir
+            # if there is only one run dir nothing changes
+            ii = 0
+            sid_names = []
+            for run_dir, gr_ch_dlc_sid in gr_ch_dlc.groupby(df_stats['[run_dir]']):
+                sid_name = run_dir.split('/')[-2]
+                sid_names.append(sid_name)
+                print(sid_name)
+                wind = gr_ch_dlc_sid['[Windspeed]'].values
+                dmin = gr_ch_dlc_sid['min'].values
+                dmean = gr_ch_dlc_sid['mean'].values
+                dmax = gr_ch_dlc_sid['max'].values
+                dstd = gr_ch_dlc_sid['std'].values
+                if not sim_ids:
+                    lab1 = 'mean'
+                    lab2 = 'min'
+                    lab3 = 'max'
+                    lab4 = 'std'
+                else:
+                    lab1 = 'mean %s' % sid_name
+                    lab2 = 'min %s' % sid_name
+                    lab3 = 'max %s' % sid_name
+                    lab4 = 'std %s' % sid_name
+                mfc1 = mfcs1[ii]
+                mfc2 = mfcs2[ii]
+                mfc3 = mfcs3[ii]
+                ax.plot(wind, dmean, mec='k', marker='o', mfc=mfc1, ls='',
+                        label=lab1, alpha=0.7)
+                ax.plot(wind, dmin, mec='b', marker='^', mfc=mfc2, ls='',
+                        label=lab2, alpha=0.7)
+                ax.plot(wind, dmax, mec='r', marker='v', mfc=mfc3, ls='',
+                        label=lab3, alpha=0.7)
+
+                ax2.plot(wind, dstd, mec=stds[ii], marker='s', mfc=stds[ii], ls='',
+                        label=lab4, alpha=0.7)
+
+                ii += 1
+
+#            for wind, gr_wind in  gr_ch_dlc.groupby(df_stats['[Windspeed]']):
+#                wind = gr_wind['[Windspeed]'].values
+#                dmin = gr_wind['min'].values#.mean()
+#                dmean = gr_wind['mean'].values#.mean()
+#                dmax = gr_wind['max'].values#.mean()
+##                dstd = gr_wind['std'].mean()
+#                ax.plot(wind, dmean, 'ko', label='mean', alpha=0.7)
+#                ax.plot(wind, dmin, 'b^', label='min', alpha=0.7)
+#                ax.plot(wind, dmax, 'rv', label='max', alpha=0.7)
+##                ax.errorbar(wind, dmean, c='k', ls='', marker='s', mfc='w',
+##                        label='mean and std', yerr=dstd)
+            ax.grid()
+            ax.set_xlim([3, 27])
+            leg = ax.legend(loc='best', ncol=2)
+            leg.get_frame().set_alpha(0.7)
+            ax.set_title(r'{DLC%s} $%s$' % (dlc_name, plot_chans[ch_name]))
+            ax.set_xlabel('Wind speed [m/s]')
+            fig.tight_layout()
+            fig.subplots_adjust(top=0.92)
+            if not sim_ids:
+                fig_path = os.path.join(fig_dir,
+                                        ch_name.replace(' ', '_') + '.png')
+            else:
+                sids = '_'.join(sid_names)
+#                fig_dir = run_dir.split('/')[:-1] + 'figures/'
+                fname = '%s_%s.png' % (ch_name.replace(' ', '_'), sids)
+                fig_path = os.path.join(fig_dir, 'dlc%s/' % dlc_name)
+                if not os.path.exists(fig_path):
+                    os.makedirs(fig_path)
+                fig_path = fig_path + fname
+            fig.savefig(fig_path)#.encode('latin-1')
+#            canvas.close()
+            fig.clear()
+            print('saved: %s' % fig_path)
+
+
+            ax2.grid()
+            ax2.set_xlim([3, 27])
+            leg = ax2.legend(loc='best', ncol=2)
+            leg.get_frame().set_alpha(0.7)
+            ax2.set_title(r'{DLC%s} $%s$' % (dlc_name, plot_chans[ch_name]))
+            ax2.set_xlabel('Wind speed [m/s]')
+            fig2.tight_layout()
+            fig2.subplots_adjust(top=0.92)
+            if not sim_ids:
+                fig_path = os.path.join(fig_dir,
+                                        ch_name.replace(' ', '_') + '_std.png')
+            else:
+                sids = '_'.join(sid_names)
+                fname = '%s_std_%s.png' % (ch_name.replace(' ', '_'), sids)
+                fig_path = os.path.join(fig_dir, 'dlc%s/' % dlc_name)
+                if not os.path.exists(fig_path):
+                    os.makedirs(fig_path)
+                fig_path = fig_path + fname
+            fig2.savefig(fig_path)#.encode('latin-1')
+#            canvas.close()
+            fig2.clear()
+            print('saved: %s' % fig_path)
+
+
+def plot_stats2(sim_ids, post_dirs, fig_dir_base=None, labels=None,
+                post_dir_save=False, dlc_ignore=['00']):
+    """
+    Map which channels have to be compared
+    """
+
+    plot_chans = {}
+
+    plot_chans['B1_{flap}'] = ['setbeta-bladenr-1-flapnr-1']
+    plot_chans['B2_{flap}'] = ['setbeta-bladenr-2-flapnr-1']
+    plot_chans['B3_{flap}'] = ['setbeta-bladenr-3-flapnr-1']
+    plot_chans['M_x B1_{root}'] = ['blade1-blade1-node-003-momentvec-x',
+                                   'blade1-blade1-node-004-momentvec-x']
+    plot_chans['M_y B1_{root}'] = ['blade1-blade1-node-003-momentvec-y',
+                                   'blade1-blade1-node-004-momentvec-y']
+    plot_chans['M_z B1_{root}'] = ['blade1-blade1-node-003-momentvec-z',
+                                   'blade1-blade1-node-004-momentvec-z']
+    plot_chans['B3_{pitch}'] = ['bearing-pitch3-angle-deg']
+    plot_chans['RPM'] = ['bearing-shaft_rot-angle_speed-rpm']
+    plot_chans['P_e'] = ['DLL-2-inpvec-2']
+    plot_chans['P_{mech}'] = ['stats-shaft-power']
+    plot_chans['M_x B3_{root}'] = ['blade3-blade3-node-003-momentvec-x',
+                                   'blade3-blade3-node-004-momentvec-x']
+    plot_chans['M_y B3_{root}'] = ['blade3-blade3-node-003-momentvec-y',
+                                   'blade3-blade3-node-004-momentvec-y']
+    plot_chans['M_z B3_{root}'] = ['blade3-blade3-node-003-momentvec-z',
+                                   'blade3-blade3-node-004-momentvec-z']
+    plot_chans['B2_{pitch}'] = ['bearing-pitch2-angle-deg']
+
+    plot_chans['B3 U_y'] = ['global-blade3-elem-018-zrel-1.00-State pos-y']
+    plot_chans['M_z B2_{root}'] = ['blade2-blade2-node-003-momentvec-z',
+                                   'blade2-blade2-node-004-momentvec-z']
+    plot_chans['M_x B2_{root}'] = ['blade2-blade2-node-003-momentvec-x',
+                                   'blade2-blade2-node-004-momentvec-x']
+    plot_chans['M_y B2_{root}'] = ['blade2-blade2-node-003-momentvec-y',
+                                   'blade2-blade2-node-004-momentvec-y']
+    plot_chans['B1_{pitch}'] = ['bearing-pitch1-angle-deg']
+    plot_chans['M_x T_B'] = ['tower-tower-node-001-momentvec-x']
+    plot_chans['M_y T_B'] = ['tower-tower-node-001-momentvec-y']
+    plot_chans['M_z T_B'] = ['tower-tower-node-001-momentvec-z']
+    plot_chans['tower clearance'] = ['DLL-5-inpvec-1']
+    plot_chans['M_z T_T'] = ['tower-tower-node-008-momentvec-z']
+    plot_chans['M_y Shaft_{MB}'] = ['shaft-shaft-node-004-momentvec-y']
+    plot_chans['M_x Shaft_{MB}'] = ['shaft-shaft-node-004-momentvec-x']
+    plot_chans['M_z Shaft_{MB}'] = ['shaft-shaft-node-004-momentvec-z']
+
+    # reduce required memory, only use following columns
+    cols = ['[run_dir]', '[DLC]', 'channel', '[res_dir]', '[Windspeed]',
+            'mean', 'max', 'min', 'std', '[wdir]']
+
+    # if sim_id is a list, combine the two dataframes into one
+    df_stats = pd.DataFrame()
+    if type(sim_ids).__name__ == 'list':
+        for ii, sim_id in enumerate(sim_ids):
+            if isinstance(post_dirs, list):
+                post_dir = post_dirs[ii]
+            else:
+                post_dir = post_dirs
+            cc = sim.Cases(post_dir, sim_id, rem_failed=True)
+            df_stats, _, _ = cc.load_stats(columns=cols, leq=False)
+            print('%s Cases loaded.' % sim_id)
+
+            # if specified, save the merged sims elsewhere
+            if post_dir_save:
+                fpath = os.path.join(post_dir_save, '-'.join(sim_ids) + '.h5')
+                try:
+                    os.makedirs(post_dir_save)
+                except OSError:
+                    pass
+            else:
+                fpath = os.path.join(post_dir, '-'.join(sim_ids) + '.h5')
+            if ii == 0:
+                # and save somewhere so we can add the second data frame on
+                # disc
+                df_stats.to_hdf(fpath, 'table', mode='w', format='table',
+                                complevel=9, complib='blosc')
+                print('%s merged stats written to: %s' % (sim_id, fpath))
+            else:
+                # instead of doing a concat in memory, add to the hdf store
+                df_stats.to_hdf(fpath, 'table', mode='r+', format='table',
+                                complevel=9, complib='blosc', append=True)
+                print('%s merging stats into:      %s' % (sim_id, fpath))
+#                df_stats = pd.concat([df_stats, df_stats2], ignore_index=True)
+#                df_stats2 = None
+            # we might run into memory issues
+            del df_stats, _, cc
+            gc.collect()
+        # and load the reduced combined set
+        print('loading merged stats:            %s' % fpath)
+        df_stats = pd.read_hdf(fpath, 'table')
+    else:
+        sim_id = sim_ids
+        sim_ids = [sim_id]
+        post_dir = post_dirs
+        cc = sim.Cases(post_dir, sim_id, rem_failed=True)
+        df_stats, _, _ = cc.load_stats(leq=False)
+
+    mfcs1 = ['k', 'w']
+    mfcs2 = ['b', 'w']
+    mfcs3 = ['r', 'w']
+    stds = ['r', 'b']
+
+    # first, take each DLC appart
+    for dlc_name, gr_dlc in df_stats.groupby(df_stats['[DLC]']):
+        # do not plot the stats for dlc00
+        if dlc_name in dlc_ignore:
+            continue
+        # cycle through all the target plot channels
+        for ch_dscr, ch_names in plot_chans.iteritems():
+            # second, group per channel. Note that when the channel names are not
+            # identical, we need to manually pick them.
+            # figure file name will be the first channel
+            if isinstance(ch_names, list):
+                df_chan = gr_dlc[gr_dlc.channel == ch_names[0]]
+                fname_base = ch_names[0].replace(' ', '_')
+                try:
+                    df2 = gr_dlc[gr_dlc.channel == ch_names[1]]
+                    df_chan = pd.concat([df_chan, df2], ignore_index=True)
+                except IndexError:
+                    pass
+            else:
+                ch_name = ch_names
+                ch_names = [ch_name]
+                df_chan = gr_dlc[gr_dlc.channel == ch_names]
+                fname_base = ch_names.replace(' ', '_')
+
+            # if not, than we are missing a channel description, or the channel
+            # is simply not available in the given result set
+#            if not len(df_chan.channel.unique()) == len(ch_names):
+#                continue
+            lens = []
+            for key, gr_ch_dlc_sid in df_chan.groupby(df_chan['[run_dir]']):
+                lens.append(len(gr_ch_dlc_sid))
+            # when the channel is simply not present
+            if len(lens) == 0:
+                continue
+            # when only one of the channels was present, but the set is still
+            # complete.
+            # FIXME: what if both channels are present?
+            if len(ch_names) > 1 and (lens[0] < 1 or lens[1] < 1):
+                continue
+
+            print('start plotting:  %s %s' % (str(dlc_name).ljust(7), ch_dscr))
+
+            fig, axes = mplutils.make_fig(nrows=1, ncols=1,
+                                           figsize=(11,7.15), dpi=120)
+            ax = axes[0,0]
+            # seperate figure for the standard deviations
+            fig2, axes2 = mplutils.make_fig(nrows=1, ncols=1,
+                                             figsize=(11,7.15), dpi=120)
+            ax2 = axes2[0,0]
+
+            if fig_dir_base is None and len(sim_ids) < 2:
+                res_dir = df_chan['[res_dir]'][:1].values[0]
+                fig_dir = os.path.join(fig_dir_base, res_dir)
+            elif fig_dir_base is None and isinstance(sim_ids, list):
+                fig_dir = os.path.join(fig_dir_base, '-'.join(sim_ids))
+#            elif fig_dir_base and len(sim_ids) < 2:
+#                res_dir = df_chan['[res_dir]'][:1].values[0]
+#                fig_dir = os.path.join(fig_dir_base, res_dir)
+            elif sim_ids and fig_dir_base is not None:
+                # create the compare directory if not defined
+                fig_dir = fig_dir_base
+
+            # if we have a list of different cases, we also need to group those
+            # because the sim_id wasn't saved before in the data frame,
+            # we need to derive that from the run dir
+            # if there is only one run dir nothing changes
+            ii = 0
+            sid_names = []
+            for run_dir, gr_ch_dlc_sid in df_chan.groupby(df_chan['[run_dir]']):
+                if labels is None:
+                    sid_name = run_dir.split(os.path.sep)[-2]
+                else:
+                    sid_name = labels[ii]
+                sid_names.append(sid_name)
+                print('   sim_id/label:', sid_name)
+                # FIXME: will this go wrong in PY3?
+                if str(dlc_name) in ['61', '62']:
+                    xdata = gr_ch_dlc_sid['[wdir]'].values
+                    xlabel = 'wind direction [deg]'
+                    xlims = [0, 360]
+                else:
+                    xdata = gr_ch_dlc_sid['[Windspeed]'].values
+                    xlabel = 'Wind speed [m/s]'
+                    xlims = [3, 27]
+                dmin = gr_ch_dlc_sid['min'].values
+                dmean = gr_ch_dlc_sid['mean'].values
+                dmax = gr_ch_dlc_sid['max'].values
+                dstd = gr_ch_dlc_sid['std'].values
+                if not sim_ids:
+                    lab1 = 'mean'
+                    lab2 = 'min'
+                    lab3 = 'max'
+                    lab4 = 'std'
+                else:
+                    lab1 = 'mean %s' % sid_name
+                    lab2 = 'min %s' % sid_name
+                    lab3 = 'max %s' % sid_name
+                    lab4 = 'std %s' % sid_name
+                mfc1 = mfcs1[ii]
+                mfc2 = mfcs2[ii]
+                mfc3 = mfcs3[ii]
+                ax.errorbar(xdata, dmean, mec='k', marker='o', mfc=mfc1, ls='',
+                            label=lab1, alpha=0.7, yerr=dstd)
+                ax.plot(xdata, dmin, mec='b', marker='^', mfc=mfc2, ls='',
+                        label=lab2, alpha=0.7)
+                ax.plot(xdata, dmax, mec='r', marker='v', mfc=mfc3, ls='',
+                        label=lab3, alpha=0.7)
+
+                ax2.plot(xdata, dstd, mec=stds[ii], marker='s', mfc=stds[ii],
+                        ls='', label=lab4, alpha=0.7)
+
+                ii += 1
+
+#            for wind, gr_wind in  gr_ch_dlc.groupby(df_stats['[Windspeed]']):
+#                wind = gr_wind['[Windspeed]'].values
+#                dmin = gr_wind['min'].values#.mean()
+#                dmean = gr_wind['mean'].values#.mean()
+#                dmax = gr_wind['max'].values#.mean()
+##                dstd = gr_wind['std'].mean()
+#                ax.plot(wind, dmean, 'ko', label='mean', alpha=0.7)
+#                ax.plot(wind, dmin, 'b^', label='min', alpha=0.7)
+#                ax.plot(wind, dmax, 'rv', label='max', alpha=0.7)
+##                ax.errorbar(wind, dmean, c='k', ls='', marker='s', mfc='w',
+##                        label='mean and std', yerr=dstd)
+            ax.grid()
+            ax.set_xlim(xlims)
+            leg = ax.legend(loc='best', ncol=3)
+            leg.get_frame().set_alpha(0.7)
+            ax.set_title(r'{DLC%s} $%s$' % (dlc_name, ch_dscr))
+            ax.set_xlabel(xlabel)
+            fig.tight_layout()
+            fig.subplots_adjust(top=0.92)
+            if not sim_ids:
+                fig_path = os.path.join(fig_dir, fname_base + '.png')
+            else:
+                sids = '_'.join(sid_names)
+                fname = '%s_%s.png' % (fname_base, sids)
+                fig_path = os.path.join(fig_dir, 'dlc%s/' % dlc_name)
+                if not os.path.exists(fig_path):
+                    os.makedirs(fig_path)
+                fig_path = fig_path + fname
+            fig.savefig(fig_path)#.encode('latin-1')
+            fig.clear()
+            print('saved: %s' % fig_path)
+
+            ax2.grid()
+            ax2.set_xlim(xlims)
+            leg = ax2.legend(loc='best', ncol=3)
+            leg.get_frame().set_alpha(0.7)
+            ax2.set_title(r'{DLC%s} $%s$' % (dlc_name, ch_dscr))
+            ax2.set_xlabel('Wind speed [m/s]')
+            fig2.tight_layout()
+            fig2.subplots_adjust(top=0.92)
+            if not sim_ids:
+                fig_path = os.path.join(fig_dir, fname_base + '_std.png')
+            else:
+                sids = '_'.join(sid_names)
+                fname = '%s_std_%s.png' % (fname_base, sids)
+                fig_path = os.path.join(fig_dir, 'dlc%s/' % dlc_name)
+                if not os.path.exists(fig_path):
+                    os.makedirs(fig_path)
+                fig_path = fig_path + fname
+            fig2.savefig(fig_path)#.encode('latin-1')
+            fig2.clear()
+            print('saved: %s' % fig_path)
+
+
+class PlotStats(object):
+
+    def __init__(self):
+        pass
+
+    def load_stats(self, sim_ids, post_dirs, post_dir_save=False):
+
+        self.sim_ids = sim_ids
+        self.post_dirs = post_dirs
+
+        # reduce required memory, only use following columns
+        cols = ['[run_dir]', '[DLC]', 'channel', '[res_dir]', '[Windspeed]',
+                'mean', 'max', 'min', 'std', '[wdir]']
+
+        # if sim_id is a list, combine the two dataframes into one
+        df_stats = pd.DataFrame()
+        if type(sim_ids).__name__ == 'list':
+            for ii, sim_id in enumerate(sim_ids):
+                if isinstance(post_dirs, list):
+                    post_dir = post_dirs[ii]
+                else:
+                    post_dir = post_dirs
+                cc = sim.Cases(post_dir, sim_id, rem_failed=True)
+                df_stats, _, _ = cc.load_stats(columns=cols, leq=False)
+                print('%s Cases loaded.' % sim_id)
+
+                # if specified, save the merged sims elsewhere
+                if post_dir_save:
+                    fpath = os.path.join(post_dir_save, '-'.join(sim_ids) + '.h5')
+                    try:
+                        os.makedirs(post_dir_save)
+                    except OSError:
+                        pass
+                else:
+                    fpath = os.path.join(post_dir, '-'.join(sim_ids) + '.h5')
+                if ii == 0:
+                    # and save somewhere so we can add the second data frame on
+                    # disc
+                    df_stats.to_hdf(fpath, 'table', mode='w', format='table',
+                                    complevel=9, complib='blosc')
+                    print('%s merged stats written to: %s' % (sim_id, fpath))
+                else:
+                    # instead of doing a concat in memory, add to the hdf store
+                    df_stats.to_hdf(fpath, 'table', mode='r+', format='table',
+                                    complevel=9, complib='blosc', append=True)
+                    print('%s merging stats into:      %s' % (sim_id, fpath))
+                # we might run into memory issues
+                del df_stats, _, cc
+                gc.collect()
+            # and load the reduced combined set
+            print('loading merged stats:            %s' % fpath)
+            df_stats = pd.read_hdf(fpath, 'table')
+        else:
+            sim_id = sim_ids
+            sim_ids = [sim_id]
+            post_dir = post_dirs
+            cc = sim.Cases(post_dir, sim_id, rem_failed=True)
+            df_stats, _, _ = cc.load_stats(leq=False)
+
+        return df_stats
+
+    def select_extremes_blade_radial(self, df):
+        """
+        For each radial position of the blade, find the extremes
+        """
+
+        def selector(x):
+            """
+            select following channels:
+            'local-blade%1i-node-%03i-momentvec-x'
+            'blade2-blade2-node-003-momentvec-y'
+            """
+            if x[:11] == 'local-blade' and x[22:31] == 'momentvec':
+                return True
+            else:
+                return False
+
+        # find all blade local load channels
+        criteria = df.channel.map(lambda x: selector(x))
+        df = df[criteria]
+        # split channel columns so we can select channels properly
+        df = df.join(df.channel.apply(lambda x: pd.Series(x.split('-'))))
+
+        df_ext = {'dlc':[], 'case':[], 'node':[], 'max':[], 'min':[], 'comp':[]}
+
+        def fillvalues(x, ii, maxmin):
+            x['node'].append(m_group[3].ix[ii])
+            x['dlc'].append(m_group['[DLC]'].ix[ii])
+            x['case'].append(m_group['[case_id]'].ix[ii])
+            x['comp'].append(m_group[5].ix[ii])
+            if maxmin == 'max':
+                x['max'].append(m_group['max'].ix[ii])
+                x['min'].append(np.nan)
+            else:
+                x['max'].append(np.nan)
+                x['min'].append(m_group['min'].ix[ii])
+            return x
+
+        # we take blade1, blade2, and blade3
+        df_b2 = df[df[0]=='local']
+#        df_b2 = df_b2[df_b2[1]=='blade2']
+        df_b2 = df_b2[df_b2[4]=='momentvec']
+#        df_b2 = df_b2[df_b2[5]=='x']
+        # make sure we only have blade1, 2 and 3
+        assert set(df_b2[1].unique()) == set(['blade3', 'blade2', 'blade1'])
+#        # number of nodes
+#        nrnodes = len(df_b2[3].unique())
+        # group by node number, and take the max
+        for nodenr, group in df_b2.groupby(df_b2[3]):
+            print(nodenr, end='   ')
+            for comp, m_group in df_b2.groupby(group[5]):
+                print(comp)
+                imax = m_group['max'].argmax()
+                imin = m_group['min'].argmin()
+                df_ext = fillvalues(df_ext, imax, 'max')
+                df_ext = fillvalues(df_ext, imin, 'min')
+
+        df_ext = pd.DataFrame(df_ext)
+        df_ext.sort(columns='node', inplace=True)
+
+        return df_ext
+
+    def plot_extremes_blade_radial(self, df_ext, fpath):
+        nrows = 2
+        ncols = 2
+        figsize = (11,7.15)
+        fig, axes = mplutils.make_fig(nrows=nrows, ncols=ncols, figsize=figsize)
+
+#        self.col = ['r', 'k']
+#        self.alf = [1.0, 0.7]
+#        self.i = 0
+
+        mx_max = df_ext['max'][df_ext.comp == 'x'].dropna()
+        mx_min = df_ext['min'][df_ext.comp == 'x'].dropna()
+        my_max = df_ext['max'][df_ext.comp == 'y'].dropna()
+        my_min = df_ext['min'][df_ext.comp == 'y'].dropna()
+#        nodes = df_ext.node.ix[mx_max.index]
+        axes[0,0].plot(mx_max, 'r^', label='$M_{x_{max}}$')
+        axes[0,1].plot(mx_min, 'bv', label='$M_{x_{min}}$')
+        axes[1,0].plot(my_max, 'r^', label='$M_{y_{max}}$')
+        axes[1,1].plot(my_min, 'bv', label='$M_{y_{min}}$')
+
+        axs = axes.ravel()
+        for ax in axs:
+            ax.grid()
+            ax.legend(loc='best')
+
+#        axs[0].set_xticklabels([])
+#        axs[1].set_xticklabels([])
+#        axs[2].set_xticklabels([])
+#        axs[-1].set_xlabel('time [s]')
+
+        fig.tight_layout()
+        fig.subplots_adjust(hspace=0.06)
+        fig.subplots_adjust(top=0.98)
+
+        fdir = os.path.dirname(fpath)
+#        fname = os.path.basename(fpath)
+
+        if not os.path.exists(fdir):
+            os.makedirs(fdir)
+        print('saving: %s ...' % fpath, end='')
+        fig.savefig(fpath)#.encode('latin-1')
+        print('done')
+        fig.clear()
+
+        # save as tables
+        df_ext.ix[mx_max.index].to_excel(fpath.replace('.png', '_mx_max.xls'))
+        df_ext.ix[mx_min.index].to_excel(fpath.replace('.png', '_mx_min.xls'))
+        df_ext.ix[my_max.index].to_excel(fpath.replace('.png', '_my_max.xls'))
+        df_ext.ix[my_min.index].to_excel(fpath.replace('.png', '_my_min.xls'))
+
+    def extract_leq_blade_radial(self, df_leq, fpath):
+
+        def selector(x):
+            """
+            select following channels:
+            'local-blade%1i-node-%03i-momentvec-x'
+            'blade2-blade2-node-003-momentvec-y'
+            """
+            if x[:11] == 'local-blade' and x[22:31] == 'momentvec':
+                return True
+            else:
+                return False
+
+        # find all blade local load channels
+        criteria = df_leq.channel.map(lambda x: selector(x))
+        df = df_leq[criteria]
+        # split channel columns so we can select channels properly
+        df = df.join(df.channel.apply(lambda x: pd.Series(x.split('-'))))
+        df.sort(columns=3, inplace=True)
+        assert set(df[1].unique()) == set(['blade3', 'blade2', 'blade1'])
+
+        leqs = df.keys()[1:10]
+        df_ext = {leq:[] for leq in leqs}
+        df_ext['node'] = []
+        df_ext['comp'] = []
+
+        for nodenr, group in df.groupby(df[3]):
+            print(nodenr, end='   ')
+            for comp, m_group in df.groupby(group[5]):
+                print(comp)
+                for leq in leqs:
+                    df_ext[leq].append(m_group[leq].max())
+                df_ext['node'].append(nodenr)
+                df_ext['comp'].append(comp)
+
+        df_ext = pd.DataFrame(df_ext)
+        df_ext.sort(columns='node', inplace=True)
+
+        df_ext[df_ext.comp=='x'].to_excel(fpath.replace('.xls', '_x.xls'))
+        df_ext[df_ext.comp=='y'].to_excel(fpath.replace('.xls', '_y.xls'))
+        df_ext[df_ext.comp=='z'].to_excel(fpath.replace('.xls', '_z.xls'))
+
+        return df_ext
+
+
+class PlotPerf(object):
+
+    def __init__(self, nrows=4, ncols=1, figsize=(14,11)):
+
+        self.fig, self.axes = mplutils.make_fig(nrows=nrows, ncols=ncols,
+                                                 figsize=figsize)
+#        self.axs = self.axes.ravel()
+        self.col = ['r', 'k']
+        self.alf = [1.0, 0.7]
+        self.i = 0
+
+    def plot(self, res, label_id):
+        """
+        """
+        i = self.i
+
+        sim_id = label_id
+        time = res.sig[:,0]
+        self.t0, self.t1 = time[0], time[-1]
+
+        # find the wind speed
+        for channame, chan in res.ch_dict.iteritems():
+            if channame.startswith('windspeed-global-Vy-0.00-0.00'):
+                break
+        wind = res.sig[:,chan['chi']]
+
+        chi = res.ch_dict['bearing-shaft_rot-angle_speed-rpm']['chi']
+        rpm = res.sig[:,chi]
+
+        chi = res.ch_dict['bearing-pitch1-angle-deg']['chi']
+        pitch = res.sig[:,chi]
+
+        chi = res.ch_dict['tower-tower-node-001-momentvec-x']['chi']
+        tx = res.sig[:,chi]
+
+        chi = res.ch_dict['tower-tower-node-001-momentvec-y']['chi']
+        ty = res.sig[:,chi]
+
+        chi = res.ch_dict['DLL-2-inpvec-2']['chi']
+        power = res.sig[:,chi]
+
+        try:
+            chi = res.ch_dict['Tors_e-1-100.11']['chi']
+        except KeyError:
+            chi = res.ch_dict['Tors_e-1-86.50']['chi']
+        tors_1 = res.sig[:,chi]
+
+#        try:
+#            chi = res.ch_dict['Tors_e-1-96.22']['chi']
+#        except:
+#            chi = res.ch_dict['Tors_e-1-83.13']['chi']
+#        tors_2 = res.sig[:,chi]
+
+        try:
+            chi = res.ch_dict['Tors_e-1-84.53']['chi']
+        except:
+            chi = res.ch_dict['Tors_e-1-73.21']['chi']
+        tors_3 = res.sig[:,chi]
+
+        ax = self.axes.ravel()
+        ax[0].plot(time, wind, self.col[i]+'--', label='%s wind speed' % sim_id,
+                   alpha=self.alf[i])
+        ax[0].plot(time, pitch, self.col[i]+'-.', label='%s pitch' % sim_id,
+                   alpha=self.alf[i])
+        ax[0].plot(time, rpm, self.col[i]+'-', label='%s RPM' % sim_id,
+                   alpha=self.alf[i])
+
+        ax[1].plot(time, tx, self.col[i]+'--', label='%s Tower FA' % sim_id,
+                   alpha=self.alf[i])
+        ax[1].plot(time, ty, self.col[i]+'-', label='%s Tower SS' % sim_id,
+                   alpha=self.alf[i])
+
+        ax[2].plot(time, power/1e6, self.col[i]+'-', alpha=self.alf[i],
+                   label='%s El Power' % sim_id)
+
+        ax[3].plot(time, tors_1, self.col[i]+'--', label='%s torsion tip' % sim_id,
+                   alpha=self.alf[i])
+#        ax[3].plot(time, tors_2, self.col[i]+'-.', label='%s torsion 96 pc' % sim_id,
+#                   alpha=self.alf[i])
+#        ax[3].plot(time, tors_3, self.col[i]+'-', label='%s torsion 84 pc' % sim_id,
+#                   alpha=self.alf[i])
+
+        self.i += 1
+
+    def final(self, fig_path, fig_name):
+
+        axs = self.axes.ravel()
+
+        for ax in axs:
+            ax.set_xlim([self.t0, self.t1])
+            ax.grid()
+            ax.legend(loc='best')
+
+        axs[0].set_xticklabels([])
+        axs[1].set_xticklabels([])
+        axs[2].set_xticklabels([])
+        axs[-1].set_xlabel('time [s]')
+
+        self.fig.tight_layout()
+        self.fig.subplots_adjust(hspace=0.06)
+        self.fig.subplots_adjust(top=0.98)
+
+        if not os.path.exists(fig_path):
+            os.makedirs(fig_path)
+        fname = os.path.join(fig_path, fig_name)
+        print('saving: %s ...' % fname, end='')
+        self.fig.savefig(fname)#.encode('latin-1')
+        print('done')
+        self.fig.clear()
+
+def plot_dlc01_powercurve(sim_ids, post_dirs, run_dirs, fig_dir_base):
+    """
+    Create power curve based on steady DLC01 results
+    Use the same format as for HS2 for easy comparison!
+    """
+
+
+
+def plot_dlc00(sim_ids, post_dirs, run_dirs, fig_dir_base=None, labels=None,
+               cnames=['dlc00_stair_wsp04_25_noturb.htc',
+                       'dlc00_ramp_wsp04_25_04_noturb.htc'], figsize=(14,11)):
+    """
+    This version is an update over plot_staircase.
+    """
+
+    stairs = []
+    # if sim_id is a list, combine the two dataframes into one
+    if type(sim_ids).__name__ == 'list':
+        for ii, sim_id in enumerate(sim_ids):
+            if isinstance(post_dirs, list):
+                post_dir = post_dirs[ii]
+            else:
+                post_dir = post_dirs
+            stairs.append(sim.Cases(post_dir, sim_id, rem_failed=True))
+    else:
+        post_dir = post_dirs
+        stairs.append(sim.Cases(post_dir, sim_id, rem_failed=True))
+
+    for cname in cnames:
+        fp = PlotPerf(figsize=figsize)
+        for i, cc in enumerate(stairs):
+            if isinstance(cname, list):
+                _cname = cname[i]
+            else:
+                _cname = cname
+            if _cname in cc.cases_fail:
+                print('no result for %s' % cc.sim_id)
+                continue
+            cc.change_results_dir(run_dirs[i])
+            try:
+                res = cc.load_result_file(cc.cases[_cname])
+            except KeyError:
+                for k in sorted(cc.cases.keys()):
+                    print(k)
+                print('-'*79)
+                print(cc.sim_id, _cname)
+                print('-'*79)
+                raise KeyError
+            if labels is not None:
+                label = labels[i]
+            else:
+                label = cc.sim_id
+            fp.plot(res, label)
+        dlcf = 'dlc' + cc.cases[_cname]['[DLC]']
+        fig_path = os.path.join(fig_dir_base, dlcf)
+        fp.final(fig_path, _cname.replace('.htc', '.png'))
+
+def plot_staircase(sim_ids, post_dirs, run_dirs, fig_dir_base=None,
+                   cname='dlc00_stair_wsp04_25_noturb.htc'):
+    """
+    Default stair and ramp names:
+
+    dlc00_stair_wsp04_25_noturb
+    dlc00_ramp_wsp04_25_04_noturb
+    """
+
+    stairs = []
+
+    col = ['r', 'k']
+    alf = [1.0, 0.7]
+
+    # if sim_id is a list, combine the two dataframes into one
+    if type(sim_ids).__name__ == 'list':
+        for ii, sim_id in enumerate(sim_ids):
+            if isinstance(post_dirs, list):
+                post_dir = post_dirs[ii]
+            else:
+                post_dir = post_dirs
+            stairs.append(sim.Cases(post_dir, sim_id, rem_failed=True))
+    else:
+        sim_id = sim_ids
+        sim_ids = [sim_id]
+        post_dir = post_dirs
+        stairs.append(sim.Cases(post_dir, sim_id, rem_failed=True))
+
+    fig, axes = mplutils.make_fig(nrows=3, ncols=1, figsize=(14,10))
+    ax = axes.ravel()
+
+    for i, cc in enumerate(stairs):
+        if cname in cc.cases_fail:
+            print('no result for %s' % cc.sim_id)
+            continue
+        cc.change_results_dir(run_dirs[i])
+        res = cc.load_result_file(cc.cases[cname])
+        respath = cc.cases[cname]['[run_dir]']
+        fname = os.path.join(respath, cname)
+        df_respost = pd.read_hdf(fname + '_postres.h5', 'table')
+        sim_id = cc.sim_id
+        time = res.sig[:,0]
+        t0, t1 = time[0], time[-1]
+
+        # find the wind speed
+        for channame, chan in res.ch_dict.iteritems():
+            if channame.startswith('windspeed-global-Vy-0.00-0.00'):
+                break
+        wind = res.sig[:,chan['chi']]
+        chi = res.ch_dict['bearing-pitch1-angle-deg']['chi']
+        pitch = res.sig[:,chi]
+
+        chi = res.ch_dict['bearing-shaft_rot-angle_speed-rpm']['chi']
+        rpm = res.sig[:,chi]
+
+        chi = res.ch_dict['bearing-pitch1-angle-deg']['chi']
+        pitch = res.sig[:,chi]
+
+        chi = res.ch_dict['tower-tower-node-001-momentvec-x']['chi']
+        tx = res.sig[:,chi]
+
+        chi = res.ch_dict['tower-tower-node-001-momentvec-y']['chi']
+        ty = res.sig[:,chi]
+
+        chi = res.ch_dict['DLL-2-inpvec-2']['chi']
+        power = res.sig[:,chi]
+
+        chi = res.ch_dict['DLL-2-inpvec-2']['chi']
+        power_mech = df_respost['stats-shaft-power']
+
+        ax[0].plot(time, wind, col[i]+'--', label='%s wind speed' % sim_id,
+                   alpha=alf[i])
+        ax[0].plot(time, pitch, col[i]+'-.', label='%s pitch' % sim_id,
+                   alpha=alf[i])
+        ax[0].plot(time, rpm, col[i]+'-', label='%s RPM' % sim_id,
+                   alpha=alf[i])
+
+        ax[1].plot(time, tx, col[i]+'--', label='%s Tower FA' % sim_id,
+                   alpha=alf[i])
+        ax[1].plot(time, ty, col[i]+'-', label='%s Tower SS' % sim_id,
+                   alpha=alf[i])
+
+        ax[2].plot(time, power/1e6, col[i]+'-', label='%s El Power' % sim_id,
+                   alpha=alf[i])
+        ax[2].plot(time, power_mech/1e3, col[i]+'-', alpha=alf[i],
+                   label='%s Mech Power' % sim_id)
+
+    ax[0].set_xlim([t0, t1])
+    ax[0].grid()
+    ax[0].legend(loc='best')
+    ax[0].set_xticklabels([])
+#    ax[0].set_xlabel('time [s]')
+
+    ax[1].set_xlim([t0, t1])
+    ax[1].grid()
+    ax[1].legend(loc='best')
+    ax[1].set_xticklabels([])
+#    ax[1].set_xlabel('time [s]')
+
+    ax[2].set_xlim([t0, t1])
+    ax[2].grid()
+    ax[2].legend(loc='best')
+    ax[2].set_xlabel('time [s]')
+
+    fig.tight_layout()
+    fig.subplots_adjust(hspace=0.06)
+    fig.subplots_adjust(top=0.92)
+
+    if not os.path.exists(fig_dir_base):
+        os.makedirs(fig_dir_base)
+    fig_path = os.path.join(fig_dir_base, '-'.join(sim_ids) + '_stair.png')
+    print('saving: %s ...' % fig_path, end='')
+    fig.savefig(fig_path)#.encode('latin-1')
+    print('done')
+    fig.clear()
+
+if __name__ == '__main__':
+
+    # auto configure directories: assume you are running in the root of the
+    # relevant HAWC2 model
+    # and assume we are in a simulation case of a certain turbine/project
+    P_RUN, P_SOURCE, PROJECT, sim_id, P_MASTERFILE, MASTERFILE, POST_DIR \
+        = dlcdefs.configure_dirs()
+
+    # -------------------------------------------------------------------------
+#    # manually configure all the dirs
+#    p_root_remote = '/mnt/hawc2sim'
+#    p_root_local = '/home/dave/DTU/Projects/AVATAR/'
+#    # project name, sim_id, master file name
+#    PROJECT = 'DTU10MW'
+#    sim_id = 'C0014'
+#    MASTERFILE = 'dtu10mw_master_C0014.htc'
+#    # MODEL SOURCES, exchanche file sources
+#    P_RUN = os.path.join(p_root_remote, PROJECT, sim_id+'/')
+#    P_SOURCE = os.path.join(p_root_local, PROJECT)
+#    # location of the master file
+#    P_MASTERFILE = os.path.join(p_root_local, PROJECT, 'htc', '_master/')
+#    # location of the pre and post processing data
+#    POST_DIR = os.path.join(p_root_remote, PROJECT, 'python-prepost-data/')
+#    force_dir = P_RUN
+    # -------------------------------------------------------------------------
+
+    # PLOT STATS, when comparing cases
+    sim_ids = [sim_id]
+    run_dirs = [P_RUN]
+    figdir = os.path.join(P_RUN, '..', 'figures/%s' % sim_id)
+
+    print('='*79)
+    print('   P_RUN: %s' % P_RUN)
+    print('P_SOURCE: %s' % P_SOURCE)
+    print(' PROJECT: %s' % PROJECT)
+    print('  sim_id: %s' % sim_id)
+    print('  master: %s' % MASTERFILE)
+    print('  figdir: %s' % figdir)
+    print('='*79)
+
+    plot_stats2(sim_ids, POST_DIR, fig_dir_base=figdir)
+    plot_dlc00(sim_ids, POST_DIR, run_dirs, fig_dir_base=figdir)
diff --git a/wetb/prepost/dlctemplate.py b/wetb/prepost/dlctemplate.py
new file mode 100755
index 0000000000000000000000000000000000000000..00be5a349a5bcc2d176aac957ea2912e7414620d
--- /dev/null
+++ b/wetb/prepost/dlctemplate.py
@@ -0,0 +1,462 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Thu Sep 18 13:00:25 2014
+
+@author: dave
+"""
+from __future__ import division
+from __future__ import print_function
+
+import os
+import socket
+from argparse import ArgumentParser
+
+#import numpy as np
+#import pandas as pd
+from matplotlib import pyplot as plt
+#import matplotlib as mpl
+
+import Simulations as sim
+#import misc
+#import windIO
+import dlcdefs
+import dlcplots
+
+plt.rc('font', family='serif')
+plt.rc('xtick', labelsize=10)
+plt.rc('ytick', labelsize=10)
+plt.rc('axes', labelsize=12)
+# on Gorm tex printing doesn't work
+if not socket.gethostname()[:2] == 'g-':
+    plt.rc('text', usetex=True)
+plt.rc('legend', fontsize=11)
+plt.rc('legend', numpoints=1)
+plt.rc('legend', borderaxespad=0)
+
+# =============================================================================
+### MODEL
+# =============================================================================
+
+def master_tags(sim_id, runmethod='local', silent=False, verbose=False):
+    """
+    Create HtcMaster() object
+    =========================
+
+    the HtcMaster contains all the settings to start creating htc files.
+    It holds the master file, server paths and more.
+
+    The master.tags dictionary holds those tags who do not vary for different
+    cases. Variable tags, i.e. tags who are a function of other variables
+    or other tags, are defined in the function variable_tag_func().
+
+    It is considered as good practice to define the default values for all
+    the variable tags in the master_tags
+
+    Members
+    -------
+
+    Returns
+    -------
+
+    """
+
+    # TODO: write a lot of logical tests for the tags!!
+    # TODO: tests to check if the dirs are setup properly (ending slahses ...)
+    # FIXME: some tags are still variable! Only static tags here that do
+    # not depent on any other variable that can change
+
+    master = sim.HtcMaster(verbose=verbose, silent=silent)
+    # set the default tags
+    master = dlcdefs.tags_defaults(master)
+
+    # =========================================================================
+    # SOURCE FILES
+    # =========================================================================
+#    # TODO: move to variable_tag
+#    rpl = (p_root, project, sim_id)
+#    if runmethod in ['local', 'local-script', 'none', 'local-ram']:
+#        master.tags['[run_dir]'] = '%s/%s/%s/' % rpl
+#    elif runmethod == 'windows-script':
+#        master.tags['[run_dir]'] = '%s/%s/%s/' % rpl
+#    elif runmethod == 'gorm':
+#        master.tags['[run_dir]'] = '%s/%s/%s/' % rpl
+#    elif runmethod == 'jess':
+#        master.tags['[run_dir]'] = '%s/%s/%s/' % rpl
+#    else:
+#        msg='unsupported runmethod, options: none, local, gorm or opt'
+#        raise ValueError, msg
+
+    master.tags['[master_htc_file]'] = MASTERFILE
+    master.tags['[master_htc_dir]'] = P_MASTERFILE
+    # directory to data, htc, SOURCE DIR
+    if P_SOURCE[-1] == os.sep:
+        master.tags['[model_dir_local]']  = P_SOURCE
+    else:
+        master.tags['[model_dir_local]']  = P_SOURCE + os.sep
+    if P_RUN[-1] == os.sep:
+        master.tags['[run_dir]'] = P_RUN
+    else:
+        master.tags['[run_dir]'] = P_RUN + os.sep
+
+    master.tags['[post_dir]'] = POST_DIR
+    master.tags['[sim_id]'] = sim_id
+    # set the model_zip tag to include the sim_id
+    master.tags['[model_zip]'] = PROJECT
+    master.tags['[model_zip]'] += '_' + master.tags['[sim_id]'] + '.zip'
+    # -------------------------------------------------------------------------
+
+    return master
+
+
+def variable_tag_func(master, case_id_short=False):
+    """
+    Function which updates HtcMaster.tags and returns an HtcMaster object
+
+    Only use lower case characters for case_id since a hawc2 result and
+    logfile are always in lower case characters.
+
+    BE CAREFULL: if you change a master tag that is used to dynamically
+    calculate an other tag, that change will be propageted over all cases,
+    for example:
+    master.tags['tag1'] *= master.tags[tag2]*master.tags[tag3']
+    it will accumlate over each new case. After 20 cases
+    master.tags['tag1'] = (master.tags[tag2]*master.tags[tag3'])^20
+    which is not wanted, you should do
+    master.tags['tag1'] = tag1_base*master.tags[tag2]*master.tags[tag3']
+
+    This example is based on reading the default DLC spreadsheets, and is
+    already included in the dlcdefs.excel_stabcon
+    """
+
+    mt = master.tags
+
+    dlc_case = mt['[Case folder]']
+    mt['[data_dir]'] = 'data/'
+    mt['[res_dir]'] = 'res/%s/' % dlc_case
+    mt['[log_dir]'] = 'logfiles/%s/' % dlc_case
+    mt['[htc_dir]'] = 'htc/%s/' % dlc_case
+    mt['[case_id]'] = mt['[Case id.]']
+    mt['[time_stop]'] = mt['[time stop]']
+    mt['[turb_base_name]'] = mt['[Turb base name]']
+    mt['[DLC]'] = mt['[Case id.]'].split('_')[0][3:]
+    mt['[pbs_out_dir]'] = 'pbs_out/%s/' % dlc_case
+    mt['[pbs_in_dir]'] = 'pbs_in/%s/' % dlc_case
+    mt['[iter_dir]'] = 'iter/%s/' % dlc_case
+    if mt['[eigen_analysis]']:
+        rpl = os.path.join(dlc_case, mt['[Case id.]'])
+        mt['[eigenfreq_dir]'] = 'res_eigen/%s/' % rpl
+    mt['[duration]'] = str(float(mt['[time_stop]']) - float(mt['[t0]']))
+    # replace nan with empty
+    for ii, jj in mt.iteritems():
+        if jj == 'nan':
+            mt[ii] = ''
+
+    return master
+
+# =============================================================================
+### PRE- POST
+# =============================================================================
+
+def launch_dlcs_excel(sim_id):
+    """
+    Launch load cases defined in Excel files
+    """
+
+    iter_dict = dict()
+    iter_dict['[empty]'] = [False]
+
+    # see if a htc/DLCs dir exists
+    dlcs_dir = os.path.join(P_SOURCE, 'htc', 'DLCs')
+    if os.path.exists(dlcs_dir):
+        opt_tags = dlcdefs.excel_stabcon(dlcs_dir)
+    else:
+        opt_tags = dlcdefs.excel_stabcon(os.path.join(P_SOURCE, 'htc'))
+
+    if len(opt_tags) < 1:
+        raise ValueError('There are is not a single case defined. Make sure '
+                         'the DLC spreadsheets are configured properly.')
+
+    # add all the root files, except anything with *.zip
+    f_ziproot = []
+    for (dirpath, dirnames, fnames) in os.walk(P_SOURCE):
+        # remove all zip files
+        for i, fname in enumerate(fnames):
+            if fname.endswith('.zip'):
+                fnames.pop(i)
+        f_ziproot.extend(fnames)
+        break
+    # and add those files
+    for opt in opt_tags:
+        opt['[zip_root_files]'] = f_ziproot
+
+    runmethod = 'gorm'
+#    runmethod = 'local-script'
+#    runmethod = 'windows-script'
+#    runmethod = 'jess'
+    master = master_tags(sim_id, runmethod=runmethod)
+    master.tags['[sim_id]'] = sim_id
+    master.output_dirs.append('[Case folder]')
+    master.output_dirs.append('[Case id.]')
+
+    # TODO: copy master and DLC exchange files to p_root too!!
+
+    # all tags set in master_tags will be overwritten by the values set in
+    # variable_tag_func(), iter_dict and opt_tags
+    # values set in iter_dict have precedence over opt_tags
+    # variable_tag_func() has precedense over iter_dict, which has precedence
+    # over opt_tags. So opt_tags comes last
+    # variable_tag func is not required because everything is already done
+    # in dlcdefs.excel_stabcon
+    no_variable_tag_func = None
+    sim.prepare_launch(iter_dict, opt_tags, master, no_variable_tag_func,
+                       write_htc=True, runmethod=runmethod, verbose=False,
+                       copyback_turb=True, msg='', update_cases=False,
+                       ignore_non_unique=False, run_only_new=False,
+                       pbs_fname_appendix=False, short_job_names=False)
+
+
+def launch_param(sim_id):
+    """
+    Launch parameter variations defined according to the Simulations syntax
+    """
+    # MODEL SOURCES, exchanche file sources
+#    p_local = '/mnt/vea-group/AED/STABCON/SIM/NREL5MW'
+#    p_local = '%s/%s' % (P_SOURCE, PROJECT)
+    # target run dir (is defined in the master_tags)
+#    p_root = '/mnt/gorm/HAWC2/NREL5MW'
+
+    iter_dict = dict()
+    iter_dict['[Windspeed]'] = [False]
+
+    opt_tags = []
+
+    runmethod = 'gorm'
+#    runmethod = 'local'
+#    runmethod = 'linux-script'
+#    runmethod = 'windows-script'
+#    runmethod = 'jess'
+    master = master_tags(sim_id, runmethod=runmethod)
+    master.tags['[hawc2_exe]'] = 'hawc2-latest'
+    master.tags['[sim_id]'] = sim_id
+    master.output_dirs.append('[Case folder]')
+    master.output_dirs.append('[Case id.]')
+
+    # TODO: copy master and DLC exchange files to p_root too!!
+
+    # all tags set in master_tags will be overwritten by the values set in
+    # variable_tag_func(), iter_dict and opt_tags
+    # values set in iter_dict have precedence over opt_tags
+    # variable_tag_func() has precedense over iter_dict, which has precedence
+    # over opt_tags. So opt_tags comes last
+    sim.prepare_launch(iter_dict, opt_tags, master, variable_tag_func,
+                       write_htc=True, runmethod=runmethod, verbose=False,
+                       copyback_turb=False, msg='', update_cases=False,
+                       ignore_non_unique=False, run_only_new=False,
+                       pbs_fname_appendix=False, short_job_names=False)
+
+
+def post_launch(sim_id, statistics=True, rem_failed=True, check_logs=True,
+                force_dir=False, update=False, saveinterval=2000, csv=False,
+                fatigue_cycles=False, m=[1, 3, 4, 5, 6, 8, 10, 12, 14],
+                neq=1e6, no_bins=46, years=20.0, fatigue=True, nn_twb=1,
+                nn_twt=20, nn_blr=4, A=None, save_new_sigs=False,
+                envelopeturbine=False, envelopeblade=False, save_iter=False):
+
+    # =========================================================================
+    # check logfiles, results files, pbs output files
+    # logfile analysis is written to a csv file in logfiles directory
+    # =========================================================================
+    # load the file saved in post_dir
+    config = {}
+    config['Weibull'] = {}
+    config['Weibull']['Vr'] = 11.
+    config['Weibull']['Vref'] = 50
+    config['nn_shaft'] = 4
+    cc = sim.Cases(POST_DIR, sim_id, rem_failed=rem_failed, config=config)
+    cc.force_lower_case_id()
+
+    if force_dir:
+        for case in cc.cases:
+            cc.cases[case]['[post_dir]'] = POST_DIR
+            cc.cases[case]['[run_dir]'] = force_dir
+
+    if check_logs:
+        cc.post_launch(save_iter=save_iter)
+    elif rem_failed:
+        cc.remove_failed()
+
+    # using suffix is only relevant if we have more cases then the save interval
+    if len(cc.cases) > saveinterval:
+        suffix = True
+    else:
+        suffix = False
+
+    df_stats, df_AEP, df_Leq = None, None, None
+
+    if statistics:
+        # for the default load case analysis, add mechanical power
+#        add = {'ch1_name':'shaft-shaft-node-004-momentvec-z',
+#               'ch2_name':'Omega',
+#               'ch_name_add':'mechanical-power-floater-floater-001',
+#               'factor':1.0, 'operator':'*'}
+        # for the AVATAR DLB, following resultants are defined:
+        chs_resultant = [['tower-tower-node-%03i-momentvec-x' % nn_twb,
+                          'tower-tower-node-%03i-momentvec-y' % nn_twb],
+                         ['tower-tower-node-%03i-momentvec-x' % nn_twt,
+                          'tower-tower-node-%03i-momentvec-y' % nn_twt],
+                         ['shaft-shaft-node-004-momentvec-x',
+                          'shaft-shaft-node-004-momentvec-z'],
+                         ['shaft-shaft-node-004-momentvec-y',
+                          'shaft-shaft-node-004-momentvec-z'],
+                         ['shaft_nonrotate-shaft-node-004-momentvec-x',
+                          'shaft_nonrotate-shaft-node-004-momentvec-z'],
+                         ['shaft_nonrotate-shaft-node-004-momentvec-y',
+                          'shaft_nonrotate-shaft-node-004-momentvec-z'],
+                         ['blade1-blade1-node-%03i-momentvec-x' % nn_blr,
+                          'blade1-blade1-node-%03i-momentvec-y' % nn_blr],
+                         ['blade2-blade2-node-%03i-momentvec-x' % nn_blr,
+                          'blade2-blade2-node-%03i-momentvec-y' % nn_blr],
+                         ['blade3-blade3-node-%03i-momentvec-x' % nn_blr,
+                          'blade3-blade3-node-%03i-momentvec-y' % nn_blr],
+                         ['hub1-blade1-node-%03i-momentvec-x' % nn_blr,
+                          'hub1-blade1-node-%03i-momentvec-y' % nn_blr],
+                         ['hub2-blade2-node-%03i-momentvec-x' % nn_blr,
+                          'hub2-blade2-node-%03i-momentvec-y' % nn_blr],
+                         ['hub3-blade3-node-%03i-momentvec-x' % nn_blr,
+                          'hub3-blade3-node-%03i-momentvec-y' % nn_blr]]
+        i0, i1 = 0, -1
+
+        tags = cc.cases[cc.cases.keys()[0]].keys()
+        add = None
+        # general statistics for all channels channel
+        df_stats = cc.statistics(calc_mech_power=True, i0=i0, i1=i1,
+                                 tags=tags, add_sensor=add, ch_fatigue=None,
+                                 update=update, saveinterval=saveinterval,
+                                 suffix=suffix, fatigue_cycles=fatigue_cycles,
+                                 csv=csv, m=m, neq=neq, no_bins=no_bins,
+                                 chs_resultant=chs_resultant, A=A,
+                                 save_new_sigs=save_new_sigs)
+        # annual energy production
+        df_AEP = cc.AEP(df_stats, csv=csv, update=update, save=True)
+
+    if envelopeblade:
+        ch_list = []
+        for iblade in range(1, 4):
+            for i in range(1, 18):
+                rpl = (iblade, iblade, i)
+                ch_list.append(['blade%i-blade%i-node-%3.3i-momentvec-x' % rpl,
+                                'blade%i-blade%i-node-%3.3i-momentvec-y' % rpl,
+                                'blade%i-blade%i-node-%3.3i-momentvec-z' % rpl,
+                                'blade%i-blade%i-node-%3.3i-forcevec-x' % rpl,
+                                'blade%i-blade%i-node-%3.3i-forcevec-y' % rpl,
+                                'blade%i-blade%i-node-%3.3i-forcevec-z' % rpl])
+        cc.envelope(ch_list=ch_list, append='_blade')
+
+    if envelopeturbine:
+        ch_list = [['tower-tower-node-001-momentvec-x',
+                    'tower-tower-node-001-momentvec-y',
+                    'tower-tower-node-001-momentvec-z'],
+                   ['tower-tower-node-022-momentvec-x',
+                   'tower-tower-node-022-momentvec-y',
+                   'tower-tower-node-022-momentvec-z',
+                   'tower-tower-node-022-forcevec-x',
+                   'tower-tower-node-022-forcevec-y',
+                   'tower-tower-node-022-forcevec-z'],
+                   ['hub1-hub1-node-001-momentvec-x',
+                   'hub1-hub1-node-001-momentvec-y',
+                   'hub1-hub1-node-001-momentvec-z']]
+        cc.envelope(ch_list=ch_list, append='_turbine')
+    if fatigue:
+        # load the statistics in case they are missing
+        if not statistics:
+            df_stats, Leq_df, AEP_df = cc.load_stats()
+        # life time equivalent load for all channels
+        df_Leq = cc.fatigue_lifetime(df_stats, neq, csv=csv, update=update,
+                                     years=years, save=True)
+
+    return df_stats, df_AEP, df_Leq
+
+
+if __name__ == '__main__':
+
+    parser = ArgumentParser(description = "pre- or post-processes DLC's")
+    parser.add_argument('--prep', action='store_true', default=False,
+                        dest='prep', help='create htc, pbs, files')
+    parser.add_argument('--check_logs', action='store_true', default=False,
+                        dest='check_logs', help='check the log files')
+    parser.add_argument('--stats', action='store_true', default=False,
+                        dest='stats', help='calculate statistics')
+    parser.add_argument('--fatigue', action='store_true', default=False,
+                        dest='fatigue', help='calculate Leq for a full DLC')
+    parser.add_argument('--csv', action='store_true', default=False,
+                        dest='csv', help='Save data also as csv file')
+    parser.add_argument('--years', type=float, default=20.0, action='store',
+                        dest='years', help='Total life time in years')
+    parser.add_argument('--no_bins', type=float, default=46.0, action='store',
+                        dest='no_bins', help='Number of bins for fatigue loads')
+    parser.add_argument('--neq', type=float, default=1e6, action='store',
+                        dest='neq', help='Equivalent cycles neq')
+    parser.add_argument('--nn_twt', type=float, default=20, action='store',
+                        dest='nn_twt', help='Node number tower top')
+    parser.add_argument('--nn_blr', type=float, default=4, action='store',
+                        dest='nn_blr', help='Node number blade root')
+    parser.add_argument('--rotarea', type=float, default=4, action='store',
+                        dest='rotarea', help='Rotor area for C_T, C_P')
+    parser.add_argument('--save_new_sigs', default=False, action='store_true',
+                        dest='save_new_sigs', help='Save post-processed sigs')
+    parser.add_argument('--dlcplot', default=False, action='store_true',
+                        dest='dlcplot', help='Plot DLC load basis results')
+    parser.add_argument('--envelopeblade', default=False, action='store_true',
+                        dest='envelopeblade', help='Compute envelopeblade')
+    parser.add_argument('--envelopeturbine', default=False, action='store_true',
+                        dest='envelopeturbine', help='Compute envelopeturbine')
+    opt = parser.parse_args()
+
+    # auto configure directories: assume you are running in the root of the
+    # relevant HAWC2 model
+    # and assume we are in a simulation case of a certain turbine/project
+    P_RUN, P_SOURCE, PROJECT, sim_id, P_MASTERFILE, MASTERFILE, POST_DIR \
+        = dlcdefs.configure_dirs(verbose=True)
+
+    # TODO: use arguments to determine the scenario:
+    # --plots, --report, --...
+
+    # -------------------------------------------------------------------------
+#    # manually configure all the dirs
+#    p_root_remote = '/mnt/hawc2sim'
+#    p_root_local = '/home/dave/DTU/Projects/AVATAR/'
+#    # project name, sim_id, master file name
+#    PROJECT = 'DTU10MW'
+#    sim_id = 'C0014'
+#    MASTERFILE = 'dtu10mw_master_C0014.htc'
+#    # MODEL SOURCES, exchanche file sources
+#    P_RUN = os.path.join(p_root_remote, PROJECT, sim_id+'/')
+#    P_SOURCE = os.path.join(p_root_local, PROJECT)
+#    # location of the master file
+#    P_MASTERFILE = os.path.join(p_root_local, PROJECT, 'htc', '_master/')
+#    # location of the pre and post processing data
+#    POST_DIR = os.path.join(p_root_remote, PROJECT, 'python-prepost-data/')
+#    force_dir = P_RUN
+#    launch_dlcs_excel(sim_id)
+#    post_launch(sim_id, check_logs=True, update=False, force_dir=force_dir,
+#                saveinterval=2000, csv=False)
+    # -------------------------------------------------------------------------
+
+    # create HTC files and PBS launch scripts (*.p)
+    if opt.prep:
+        print('Start creating all the htc files and pbs_in files...')
+        launch_dlcs_excel(sim_id)
+    # post processing: check log files, calculate statistics
+    if opt.check_logs or opt.stats or opt.fatigue or opt.envelopeblade or opt.envelopeturbine:
+        post_launch(sim_id, check_logs=opt.check_logs, update=False,
+                    force_dir=P_RUN, saveinterval=2000, csv=opt.csv,
+                    statistics=opt.stats, years=opt.years, neq=opt.neq,
+                    fatigue=opt.fatigue, fatigue_cycles=True, A=opt.rotarea,
+                    no_bins=opt.no_bins, nn_blr=opt.nn_blr, nn_twt=opt.nn_twt,
+                    save_new_sigs=opt.save_new_sigs, save_iter=False,
+                    envelopeturbine=opt.envelopeturbine,
+                    envelopeblade=opt.envelopeblade)
+    if opt.dlcplot:
+        sim_ids = [sim_id]
+        figdir = os.path.join(P_RUN, '..', 'figures/%s' % '-'.join(sim_ids))
+        dlcplots.plot_stats2(sim_ids, [POST_DIR], fig_dir_base=figdir)
diff --git a/wetb/prepost/filters.py b/wetb/prepost/filters.py
new file mode 100644
index 0000000000000000000000000000000000000000..3764aeeb725b54c029b565d440c4a24017bfa2f6
--- /dev/null
+++ b/wetb/prepost/filters.py
@@ -0,0 +1,457 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Sun Jan 20 18:14:02 2013
+
+@author: dave
+"""
+
+import numpy as np
+import scipy as sp
+
+import DataChecks as chk
+from misc import calc_sample_rate
+import mplutils
+
+
+class Filters:
+
+    def __init__(self):
+        pass
+
+
+    def smooth(self, x, window_len=11, window='hanning'):
+        """
+        Smooth the data using a window with requested size
+        ==================================================
+
+        This method is based on the convolution of a scaled window with the
+        signal. The signal is prepared by introducing reflected copies of the
+        signal (with the window size) in both ends so that transient parts are
+        minimized in the begining and end part of the output signal.
+
+        input:
+            x: the input signal
+            window_len: the dimension of the smoothing window; should be an odd
+            integer
+            window: the type of window from 'flat', 'hanning', 'hamming',
+            'bartlett', 'blackman' flat window will produce a moving average
+            smoothing.
+
+        output:
+            the smoothed signal
+
+        example:
+
+        t=linspace(-2,2,0.1)
+        x=sin(t)+randn(len(t))*0.1
+        y=smooth(x)
+
+        see also:
+
+        numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman,
+        numpy.convolve, scipy.signal.lfilter
+
+        TODO: the window parameter could be the window itself if an array
+        instead of a string
+
+        SOURCE: http://www.scipy.org/Cookbook/SignalSmooth
+        """
+
+        if x.ndim != 1:
+            raise ValueError, "smooth only accepts 1 dimension arrays."
+
+        if x.size < window_len:
+            msg = "Input vector needs to be bigger than window size."
+            raise ValueError, msg
+
+        if window_len<3:
+            return x
+
+        windowlist = ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']
+        if not window in windowlist:
+            msg = "Window should be 'flat', 'hanning', 'hamming', 'bartlett',"
+            msg += " or 'blackman'"
+            raise ValueError, msg
+
+        s = np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]
+        #print(len(s))
+        if window == 'flat': #moving average
+            w = np.ones(window_len,'d')
+        else:
+            w = eval('np.'+window+'(window_len)')
+
+        y = np.convolve(w/w.sum(), s, mode='valid')
+        return y
+
+    def butter(self, time, data, **kwargs):
+        """
+        Source:
+        https://azitech.wordpress.com/2011/03/15/
+        designing-a-butterworth-low-pass-filter-with-scipy/
+        """
+
+        sample_rate = kwargs.get('sample_rate', None)
+        if not sample_rate:
+            sample_rate = calc_sample_rate(time)
+
+        # The cutoff frequency of the filter.
+        cutoff_hz = kwargs.get('cutoff_hz', 1.0)
+
+        # design filter
+        norm_pass = cutoff_hz/(sample_rate/2.0)
+        norm_stop = 1.5*norm_pass
+        (N, Wn) = sp.signal.buttord(wp=norm_pass, ws=norm_stop, gpass=2,
+                                    gstop=30, analog=0)
+        (b, a) = sp.signal.butter(N, Wn, btype='low', analog=0, output='ba')
+
+        # filtered output
+        #zi = signal.lfiltic(b, a, x[0:5], x[0:5])
+        #(y, zi) = signal.lfilter(b, a, x, zi=zi)
+        data_filt = sp.signal.lfilter(b, a, data)
+
+        return data_filt
+
+    def fir(self, time, data, **kwargs):
+        """
+        Based on the xxample from the SciPy cook boock, see
+        http://www.scipy.org/Cookbook/FIRFilter
+
+        Parameters
+        ----------
+
+        time : ndarray(n)
+
+        data : ndarray(n)
+
+        plot : boolean, default=False
+
+        figpath : str, default=False
+
+        figfile : str, default=False
+
+        sample_rate : int, default=None
+            If None, sample rate will be calculated from the given signal
+
+        freq_trans_width : float, default=1
+            The desired width of the transition from pass to stop,
+            relative to the Nyquist rate.
+
+        ripple_db : float, default=10
+            The desired attenuation in the stop band, in dB.
+
+        cutoff_hz : float, default=10
+            Frequencies above cutoff_hz are filtered out
+
+        Returns
+        -------
+
+        filtered_x : ndarray(n - (N-1))
+            filtered signal
+
+        N : float
+            order of the firwin filter
+
+        delay : float
+            phase delay due to the filtering process
+
+        """
+
+        plot = kwargs.get('plot', False)
+        figpath = kwargs.get('figpath', False)
+        figfile = kwargs.get('figfile', False)
+
+        sample_rate = kwargs.get('sample_rate', None)
+        # The desired width of the transition from pass to stop,
+        # relative to the Nyquist rate.  We'll design the filter
+        # with a 5 Hz transition width.
+        freq_trans_width = kwargs.get('freq_trans_width', 1)
+
+        # The desired attenuation in the stop band, in dB.
+        ripple_db = kwargs.get('ripple_db', 10)
+
+        # The cutoff frequency of the filter.
+        cutoff_hz = kwargs.get('cutoff_hz', 10)
+
+        chk.array_1d(time)
+        chk.array_1d(data)
+
+        if not sample_rate:
+            sample_rate = calc_sample_rate(time)
+
+        #------------------------------------------------
+        # Create a FIR filter and apply it to data[:,channel]
+        #------------------------------------------------
+
+        # The Nyquist rate of the signal.
+        nyq_rate = sample_rate / 2.0
+
+        # The desired width of the transition from pass to stop,
+        # relative to the Nyquist rate.  We'll design the filter
+        # with a 5 Hz transition width.
+        width = freq_trans_width/nyq_rate
+
+        # Compute the order and Kaiser parameter for the FIR filter.
+        N, beta = sp.signal.kaiserord(ripple_db, width)
+
+        # Use firwin with a Kaiser window to create a lowpass FIR filter.
+        taps = sp.signal.firwin(N, cutoff_hz/nyq_rate,
+                                  window=('kaiser', beta))
+
+        # Use lfilter to filter x with the FIR filter.
+        filtered_x = sp.signal.lfilter(taps, 1.0, data)
+
+        # The phase delay of the filtered signal.
+        delay = 0.5 * (N-1) / sample_rate
+
+#        # the filtered signal, shifted to compensate for the phase delay.
+#        time_shifted = time-delay
+#        # the "good" part of the filtered signal.  The first N-1
+#        # samples are "corrupted" by the initial conditions.
+#        time_good = time[N-1:] - delay
+
+        if plot:
+            self.plot_fir(figpath, figfile, time, data, filtered_x, N, delay,
+                 sample_rate, taps, nyq_rate)
+
+        return filtered_x, N, delay
+
+
+    def plot_fir(self, figpath, figfile, time, data, filtered_x, N, delay,
+                 sample_rate, taps, nyq_rate):
+        """
+        """
+
+        #------------------------------------------------
+        # Setup the figure parameters
+        #------------------------------------------------
+
+        plot = mplutils.A4Tuned()
+        plot.setup(figpath+figfile, nr_plots=3, grandtitle=figfile,
+                         figsize_y=20, wsleft_cm=2.)
+
+        #------------------------------------------------
+        # Plot the FIR filter coefficients.
+        #------------------------------------------------
+        plot_nr = 1
+        ax1 = plot.fig.add_subplot(plot.nr_rows, plot.nr_cols, plot_nr)
+        ax1.plot(taps, 'bo-', linewidth=2)
+        ax1.set_title('Filter Coefficients (%d taps)' % N)
+        ax1.grid(True)
+
+        #------------------------------------------------
+        # Plot the magnitude response of the filter.
+        #------------------------------------------------
+
+        plot_nr += 1
+        ax2 = plot.fig.add_subplot(plot.nr_rows, plot.nr_cols, plot_nr)
+
+        w, h = sp.signal.freqz(taps, worN=8000)
+        ax2.plot((w/np.pi)*nyq_rate, np.absolute(h), linewidth=2)
+        ax2.set_xlabel('Frequency (Hz)')
+        ax2.set_ylabel('Gain')
+        ax2.set_title('Frequency Response')
+        ax2.set_ylim(-0.05, 1.05)
+#        ax2.grid(True)
+
+        # in order to place the nex axes inside following figure, first
+        # determine the ax2 bounding box
+        # points: a 2x2 numpy array of the form [[x0, y0], [x1, y1]]
+        ax2box = ax2.get_window_extent().get_points()
+        # seems to be expressed in pixels so convert to relative coordinates
+#        print ax2box
+        # figure size in pixels
+        figsize_x_pix = plot.figsize_x*plot.dpi
+        figsize_y_pix = plot.figsize_y*plot.dpi
+        # ax2 box in relative coordinates
+        ax2box[:,0] = ax2box[:,0] / figsize_x_pix
+        ax2box[:,1] = ax2box[:,1] / figsize_y_pix
+#        print ax2box[0,0], ax2box[1,0], ax2box[0,1], ax2box[1,1]
+        # left position new box at 10% of x1
+        left   = ax2box[0,0] + ((ax2box[1,0] - ax2box[0,0]) * 0.15)
+        bottom = ax2box[0,1] + ((ax2box[1,1] - ax2box[0,1]) * 0.30)  # x2
+        width  = (ax2box[1,0] - ax2box[0,0]) * 0.35
+        height = (ax2box[1,1] - ax2box[0,1]) * 0.6
+#        print [left, bottom, width, height]
+
+        # left inset plot.
+        # [left, bottom, width, height]
+#        ax2a = plot.fig.add_axes([0.42, 0.6, .45, .25])
+        ax2a = plot.fig.add_axes([left, bottom, width, height])
+        ax2a.plot((w/np.pi)*nyq_rate, np.absolute(h), linewidth=2)
+        ax2a.set_xlim(0,8.0)
+        ax2a.set_ylim(0.9985, 1.001)
+        ax2a.grid(True)
+
+        # right inset plot
+        left   = ax2box[0,0] + ((ax2box[1,0] - ax2box[0,0]) * 0.62)
+        bottom = ax2box[0,1] + ((ax2box[1,1] - ax2box[0,1]) * 0.30)  # x2
+        width  = (ax2box[1,0] - ax2box[0,0]) * 0.35
+        height = (ax2box[1,1] - ax2box[0,1]) * 0.6
+
+        # Lower inset plot
+#        ax2b = plot.fig.add_axes([0.42, 0.25, .45, .25])
+        ax2b = plot.fig.add_axes([left, bottom, width, height])
+        ax2b.plot((w/np.pi)*nyq_rate, np.absolute(h), linewidth=2)
+        ax2b.set_xlim(12.0, 20.0)
+        ax2b.set_ylim(0.0, 0.0025)
+        ax2b.grid(True)
+
+        #------------------------------------------------
+        # Plot the original and filtered signals.
+        #------------------------------------------------
+
+        # The phase delay of the filtered signal.
+        delay = 0.5 * (N-1) / sample_rate
+
+        plot_nr += 1
+        ax3 = plot.fig.add_subplot(plot.nr_rows, plot.nr_cols, plot_nr)
+        # Plot the original signal.
+        ax3.plot(time, data, label='original signal')
+        # Plot the filtered signal, shifted to compensate for the phase delay.
+        ax3.plot(time-delay, filtered_x, 'r-', label='filtered signal')
+        # Plot just the "good" part of the filtered signal.  The first N-1
+        # samples are "corrupted" by the initial conditions.
+        ax3.plot(time[N-1:]-delay, filtered_x[N-1:], 'g', linewidth=4)
+
+        ax3.set_xlabel('t')
+        ax3.grid(True)
+
+        plot.save_fig()
+
+
+    def scipy_example(self, time, data, figpath, sample_rate=None):
+        """
+        Example from the SciPy Cookboock, see
+        http://www.scipy.org/Cookbook/FIRFilter
+        """
+
+        chk.array_1d(time)
+        chk.array_1d(data)
+
+        if not sample_rate:
+            sample_rate = calc_sample_rate(time)
+
+        #------------------------------------------------
+        # Create a FIR filter and apply it to data[:,channel]
+        #------------------------------------------------
+
+        # The Nyquist rate of the signal.
+        nyq_rate = sample_rate / 2.0
+
+        # The desired width of the transition from pass to stop,
+        # relative to the Nyquist rate.  We'll design the filter
+        # with a 5 Hz transition width.
+        width = 5.0/nyq_rate
+
+        # The desired attenuation in the stop band, in dB.
+        ripple_db = 60.0
+
+        # Compute the order and Kaiser parameter for the FIR filter.
+        N, beta = sp.signal.kaiserord(ripple_db, width)
+
+        # The cutoff frequency of the filter.
+        cutoff_hz = 10.0
+
+        # Use firwin with a Kaiser window to create a lowpass FIR filter.
+        taps = sp.signal.firwin(N, cutoff_hz/nyq_rate, window=('kaiser', beta))
+
+        # Use lfilter to filter x with the FIR filter.
+        filtered_x = sp.signal.lfilter(taps, 1.0, data)
+
+        #------------------------------------------------
+        # Setup the figure parameters
+        #------------------------------------------------
+        figfile = 'filterdesign'
+
+        plot = mplutils.A4Tuned()
+        plot.setup(figpath+figfile, nr_plots=3, grandtitle=figfile,
+                         figsize_y=20, wsleft_cm=2.)
+
+        #------------------------------------------------
+        # Plot the FIR filter coefficients.
+        #------------------------------------------------
+        plot_nr = 1
+        ax1 = plot.fig.add_subplot(plot.nr_rows, plot.nr_cols, plot_nr)
+        ax1.plot(taps, 'bo-', linewidth=2)
+        ax1.set_title('Filter Coefficients (%d taps)' % N)
+        ax1.grid(True)
+
+        #------------------------------------------------
+        # Plot the magnitude response of the filter.
+        #------------------------------------------------
+
+        plot_nr += 1
+        ax2 = plot.fig.add_subplot(plot.nr_rows, plot.nr_cols, plot_nr)
+
+        w, h = sp.signal.freqz(taps, worN=8000)
+        ax2.plot((w/np.pi)*nyq_rate, np.absolute(h), linewidth=2)
+        ax2.set_xlabel('Frequency (Hz)')
+        ax2.set_ylabel('Gain')
+        ax2.set_title('Frequency Response')
+        ax2.set_ylim(-0.05, 1.05)
+#        ax2.grid(True)
+
+        # in order to place the nex axes inside following figure, first
+        # determine the ax2 bounding box
+        # points: a 2x2 numpy array of the form [[x0, y0], [x1, y1]]
+        ax2box = ax2.get_window_extent().get_points()
+        # seems to be expressed in pixels so convert to relative coordinates
+#        print ax2box
+        # figure size in pixels
+        figsize_x_pix = plot.figsize_x*plot.dpi
+        figsize_y_pix = plot.figsize_y*plot.dpi
+        # ax2 box in relative coordinates
+        ax2box[:,0] = ax2box[:,0] / figsize_x_pix
+        ax2box[:,1] = ax2box[:,1] / figsize_y_pix
+#        print ax2box[0,0], ax2box[1,0], ax2box[0,1], ax2box[1,1]
+        # left position new box at 10% of x1
+        left   = ax2box[0,0] + ((ax2box[1,0] - ax2box[0,0]) * 0.15)
+        bottom = ax2box[0,1] + ((ax2box[1,1] - ax2box[0,1]) * 0.30)  # x2
+        width  = (ax2box[1,0] - ax2box[0,0]) * 0.35
+        height = (ax2box[1,1] - ax2box[0,1]) * 0.6
+#        print [left, bottom, width, height]
+
+        # left inset plot.
+        # [left, bottom, width, height]
+#        ax2a = plot.fig.add_axes([0.42, 0.6, .45, .25])
+        ax2a = plot.fig.add_axes([left, bottom, width, height])
+        ax2a.plot((w/np.pi)*nyq_rate, np.absolute(h), linewidth=2)
+        ax2a.set_xlim(0,8.0)
+        ax2a.set_ylim(0.9985, 1.001)
+        ax2a.grid(True)
+
+        # right inset plot
+        left   = ax2box[0,0] + ((ax2box[1,0] - ax2box[0,0]) * 0.62)
+        bottom = ax2box[0,1] + ((ax2box[1,1] - ax2box[0,1]) * 0.30)  # x2
+        width  = (ax2box[1,0] - ax2box[0,0]) * 0.35
+        height = (ax2box[1,1] - ax2box[0,1]) * 0.6
+
+        # Lower inset plot
+#        ax2b = plot.fig.add_axes([0.42, 0.25, .45, .25])
+        ax2b = plot.fig.add_axes([left, bottom, width, height])
+        ax2b.plot((w/np.pi)*nyq_rate, np.absolute(h), linewidth=2)
+        ax2b.set_xlim(12.0, 20.0)
+        ax2b.set_ylim(0.0, 0.0025)
+        ax2b.grid(True)
+
+        #------------------------------------------------
+        # Plot the original and filtered signals.
+        #------------------------------------------------
+
+        # The phase delay of the filtered signal.
+        delay = 0.5 * (N-1) / sample_rate
+
+        plot_nr += 1
+        ax3 = plot.fig.add_subplot(plot.nr_rows, plot.nr_cols, plot_nr)
+        # Plot the original signal.
+        ax3.plot(time, data, label='original signal')
+        # Plot the filtered signal, shifted to compensate for the phase delay.
+        ax3.plot(time-delay, filtered_x, 'r-', label='filtered signal')
+        # Plot just the "good" part of the filtered signal.  The first N-1
+        # samples are "corrupted" by the initial conditions.
+        ax3.plot(time[N-1:]-delay, filtered_x[N-1:], 'g', linewidth=4)
+
+        ax3.set_xlabel('t')
+        ax3.grid(True)
+
+        plot.save_fig()
\ No newline at end of file
diff --git a/wetb/prepost/h2_vs_hs2.py b/wetb/prepost/h2_vs_hs2.py
new file mode 100644
index 0000000000000000000000000000000000000000..132a325a11b62c58155364c2162f185c3738a486
--- /dev/null
+++ b/wetb/prepost/h2_vs_hs2.py
@@ -0,0 +1,1373 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Mon Nov  2 15:23:15 2015
+
+@author: dave
+"""
+
+from __future__ import division
+from __future__ import print_function
+
+import os
+
+import numpy as np
+#import scipy.interpolate as interpolate
+import pandas as pd
+from matplotlib import pyplot as plt
+
+import Simulations as sim
+import dlcdefs
+import hawcstab2 as hs2
+import mplutils
+
+
+class Configurations:
+    # HAWC2
+    eigenan = {'[eigen_analysis]':True, '[time stop]':0,
+               '[t0]'            :0,     '[output]'  :False}
+    control = {'[Free shaft rot]':True,  '[dll]'        :True,
+               '[fixed_op]'      :False, '[fixed_shaft]':False,
+               '[init_wr]'       :0.5,   '[pitch_bearing]':True}
+    opt_h2 = {'[output]'         :True,  '[hs2]'        :False,
+              '[hawc2_exe]'      :'hawc2-latest',
+              '[Case folder]'    :'HAWC2', '[hawc2]'    :True}
+    fix_op = {'[Free shaft rot]' :False, '[dll]'        :False,
+              '[fixed_op]'       :True,  '[fixed_shaft]':False,
+              '[init_wr]'        :0.5,   '[fixed_omega]':0.5,
+              '[pitch_bearing]'  :False}
+    # HAWCStab2
+    opt_hs2 = {'[output]'         :False, '[hs2]'        :True,
+               '[Free shaft rot]' :True,  '[dll]'        :False,
+               '[fixed_op]'       :False, '[fixed_shaft]':False,
+               '[init_wr]'        :0.5,   '[fixed_omega]':0.5,
+               '[pitch_angle]'    :0.0,   '[hawc2_exe]'  :'hs2cmd-latest',
+               '[Case folder]'    :'HAWCStab2', '[hawc2]':False,
+               '[pitch_bearing]'  :True}
+
+    # AERODYNAMIC MODELLING OPTIONS
+    aero_simple = {'[aerocalc]':1, '[Induction]':0, '[tip_loss]':0,
+                   '[Dyn stall]':0, '[t0]':100, '[time stop]':150}
+    # when induction is on, especially the low wind speeds will need more time
+    # to reach steady state in HAWC2 compared to when there is no induction.
+    aero_full = {'[aerocalc]':1, '[Induction]':1, '[tip_loss]':1,
+                 '[Dyn stall]':1, '[t0]':500, '[time stop]':550}
+
+    blade_stiff_pitchC4 = {'[blade_damp_x]':0.01, '[blade_damp_y]':0.01,
+                           '[blade_damp_z]':0.01, '[blade_set]':4,
+                           '[blade_subset]':1, '[blade_posx]':-0.75}
+
+    blade_stiff_pitchC2 = {'[blade_damp_x]':0.01, '[blade_damp_y]':0.01,
+                           '[blade_damp_z]':0.01, '[blade_set]':4,
+                           '[blade_subset]':1, '[blade_posx]':0.0}
+
+    blade_stiff_pitch3C4 = {'[blade_damp_x]':0.01, '[blade_damp_y]':0.01,
+                           '[blade_damp_z]':0.01, '[blade_set]':4,
+                           '[blade_subset]':1, '[blade_posx]':0.75}
+
+    blade_flex50_tstiff_C14 = {'[blade_damp_x]':0.03, '[blade_damp_y]':0.03,
+                               '[blade_damp_z]':0.03, '[blade_set]':1,
+                               '[blade_subset]':15, '[blade_posx]':-0.75,
+                               '[blade_nbodies]': 17,
+                               '[c12]':False, '[c14]':True}
+    blade_flex50_tstiff_C12 = {'[blade_damp_x]':0.03, '[blade_damp_y]':0.03,
+                               '[blade_damp_z]':0.03, '[blade_set]':1,
+                               '[blade_subset]':13, '[blade_posx]':0.0,
+                               '[blade_nbodies]': 17,
+                               '[c12]':True, '[c14]':False}
+    blade_flex50_C14 = {'[blade_damp_x]':0.03, '[blade_damp_y]':0.03,
+                        '[blade_damp_z]':0.03, '[blade_set]':1,
+                        '[blade_subset]':17, '[blade_posx]':-0.75,
+                        '[blade_nbodies]': 17,
+                        '[c12]':False, '[c14]':True}
+    blade_flex50_C12 = {'[blade_damp_x]':0.03, '[blade_damp_y]':0.03,
+                        '[blade_damp_z]':0.03, '[blade_set]':1,
+                        '[blade_subset]':16, '[blade_posx]':0.0,
+                        '[blade_nbodies]': 17,
+                        '[c12]':True, '[c14]':False}
+    blade_flex89_tstiff_C14 = {'[blade_damp_x]':0.03, '[blade_damp_y]':0.03,
+                            '[blade_damp_z]':0.03, '[blade_set]':1,
+                            '[blade_subset]':23, '[blade_posx]':-0.75,
+                            '[blade_nbodies]': 17,
+                            '[c12]':False, '[c14]':True}
+    blade_flex89_tstiff_C12 = {'[blade_damp_x]':0.03, '[blade_damp_y]':0.03,
+                            '[blade_damp_z]':0.03, '[blade_set]':1,
+                            '[blade_subset]':22, '[blade_posx]':0.0,
+                            '[blade_nbodies]': 17,
+                            '[c12]':True, '[c14]':False}
+    blade_flex89_t50_C14 = {'[blade_damp_x]':0.03, '[blade_damp_y]':0.03,
+                            '[blade_damp_z]':0.03, '[blade_set]':1,
+                            '[blade_subset]':19, '[blade_posx]':-0.75,
+                            '[blade_nbodies]': 17,
+                            '[c12]':False, '[c14]':True}
+    blade_flex89_t50_C12 = {'[blade_damp_x]':0.03, '[blade_damp_y]':0.03,
+                            '[blade_damp_z]':0.03, '[blade_set]':1,
+                            '[blade_subset]':18, '[blade_posx]':0.0,
+                            '[blade_nbodies]': 17,
+                            '[c12]':True, '[c14]':False}
+    blade_flex89_t50_C12_allstC14 = {'[blade_damp_x]':0.03, '[blade_damp_y]':0.03,
+                            '[blade_damp_z]':0.03, '[blade_set]':1,
+                            '[blade_subset]':21, '[blade_posx]':-0.75,
+                            '[blade_nbodies]': 17,
+                            '[c12]':False, '[c14]':True}
+    blade_flex89_t50_C12_allstC12 = {'[blade_damp_x]':0.03, '[blade_damp_y]':0.03,
+                            '[blade_damp_z]':0.03, '[blade_set]':1,
+                            '[blade_subset]':19, '[blade_posx]':0.0,
+                            '[blade_nbodies]': 17,
+                            '[c12]':True, '[c14]':False}
+
+    blade_flex89_t50_C12_cgshC14_eaC12 = {'[blade_damp_x]':0.03,
+                                          '[blade_damp_y]':0.03,
+                                          '[blade_damp_z]':0.03,
+                                          '[blade_set]'   :1,
+                                          '[blade_subset]':24,
+                                          '[blade_posx]'  :0.0,
+                                          '[blade_nbodies]': 17,
+                                          '[c12]':True, '[c14]':False}
+
+    blade_flex = {'[blade_damp_x]':0.01, '[blade_damp_y]':0.01,
+                  '[blade_damp_z]':0.01, '[blade_set]':1,
+                  '[blade_subset]':1, '[blade_posx]':-0.75}
+
+    blade_flex_allac = {'[blade_damp_x]':0.01, '[blade_damp_y]':0.01,
+                        '[blade_damp_z]':0.01, '[blade_set]':1,
+                        '[blade_subset]':2, '[blade_posx]':-0.75}
+
+    blade_flex_allac_11 = {'[blade_damp_x]':0.01, '[blade_damp_y]':0.01,
+                           '[blade_damp_z]':0.01, '[blade_set]':1,
+                           '[blade_subset]':7, '[blade_posx]':-0.75}
+
+    blade_flex_allac_33 = {'[blade_damp_x]':0.02, '[blade_damp_y]':0.02,
+                           '[blade_damp_z]':0.02, '[blade_set]':1,
+                           '[blade_subset]':8, '[blade_posx]':-0.75}
+
+    blade_flex_allac_50 = {'[blade_damp_x]':0.03, '[blade_damp_y]':0.03,
+                           '[blade_damp_z]':0.03, '[blade_set]':1,
+                           '[blade_subset]':9, '[blade_posx]':-0.75}
+
+    blade_flex_allac_50_pitchC2 = {'[blade_damp_x]':0.03, '[blade_damp_y]':0.03,
+                                   '[blade_damp_z]':0.03, '[blade_set]':1,
+                                   '[blade_subset]':9, '[blade_posx]':0.0}
+
+    # configurations for the B-series (which has quite a few changes)
+    # B0001
+    stiff_pc14_cgsheac14 = {'[blade_damp_x]':0.01, '[blade_damp_y]':0.01,
+                            '[blade_damp_z]':0.01, '[blade_set]':1,
+                            '[blade_subset]':3,    '[blade_posx]':-0.75,
+                            '[blade_nbodies]': 17,
+                            '[st_file]'     :'blade_flex_rect.st',
+                            '[c12]':False, '[c14]':True,
+                            '[ae_tolrel]': 1e-7,
+                            '[ae_itmax]' : 2000,
+                            '[ae_1relax]': 0.2}
+    # B0002
+    flex_tstiff_pc14_cgsheac14 = {'[blade_damp_x]':0.13, '[blade_damp_y]':0.13,
+                                  '[blade_damp_z]':0.15, '[blade_set]':1,
+                                  '[blade_subset]':5,    '[blade_posx]':-0.75,
+                                  '[blade_nbodies]': 17,
+                                  '[st_file]'     :'blade_flex_rect.st',
+                                  '[c12]':False, '[c14]':True,
+                                  '[ae_tolrel]': 1e-7,
+                                  '[ae_itmax]' : 2000,
+                                  '[ae_1relax]': 0.7}
+    # B0003
+    flex_pc14_cgsheac14 = {'[blade_damp_x]':0.13, '[blade_damp_y]':0.13,
+                           '[blade_damp_z]':0.15, '[blade_set]':1,
+                           '[blade_subset]':6,    '[blade_posx]':-0.75,
+                           '[blade_nbodies]': 17,
+                           '[st_file]'     :'blade_flex_rect.st',
+                           '[c12]':False, '[c14]':True,
+                           '[ae_tolrel]': 1e-7,
+                           '[ae_itmax]' : 2000,
+                           '[ae_1relax]': 0.7}
+    # B0004
+    flex_pc12_cgsheac12 = {'[blade_damp_x]':0.15, '[blade_damp_y]':0.15,
+                           '[blade_damp_z]':0.17, '[blade_set]':1,
+                           '[blade_subset]':7,    '[blade_posx]':0.00,
+                           '[blade_nbodies]': 17,
+                           '[st_file]'     :'blade_flex_rect.st',
+                           '[c12]':True, '[c14]':False,
+                           '[ae_tolrel]': 1e-7,
+                           '[ae_itmax]' : 2000,
+                           '[ae_1relax]': 0.7}
+    # B0005, B0006
+    flex_pc12_cgshc14_eac12 = {'[blade_damp_x]':0.15, '[blade_damp_y]':0.15,
+                               '[blade_damp_z]':0.17, '[blade_set]':1,
+                               '[blade_subset]':8,    '[blade_posx]':0.00,
+                               '[blade_nbodies]': 17,
+                               '[st_file]'     :'blade_flex_rect.st',
+                               '[c12]':True, '[c14]':False,
+                               '[ae_tolrel]': 1e-7,
+                               '[ae_itmax]' : 2000,
+                               '[ae_1relax]': 0.98}
+
+
+    def __init__(self):
+        pass
+
+    def opt_tags_h2_eigenanalysis(self, basename):
+        """Return opt_tags suitable for a standstill HAWC2 eigen analysis.
+        """
+        opt_tags = [self.opt_h2.copy()]
+        opt_tags[0].update(self.eigenan.copy())
+        opt_tags[0]['[Case id.]'] = '%s_hawc2_eigenanalysis' % basename
+        opt_tags[0]['[blade_damp_x]'] = 0.0
+        opt_tags[0]['[blade_damp_y]'] = 0.0
+        opt_tags[0]['[blade_damp_z]'] = 0.0
+        opt_tags[0]['[blade_nbodies]'] = 1
+        opt_tags[0]['[Windspeed]'] = 0.0
+        opt_tags[0]['[init_wr]'] = 0.0
+        opt_tags[0]['[operational_data]'] = 'case-turbine2-empty.opt'
+
+        return opt_tags
+
+    def opt_tags_hs_structure_body_eigen(self, basename):
+        """Return opt_tags suitable for a standstill HAWCStab2 body eigen
+        analysis, at 0 RPM.
+        """
+        opt_tags = [self.opt_hs2.copy()]
+        opt_tags[0]['[Case id.]'] = '%s_hawc2_eigenanalysis' % basename
+        opt_tags[0]['[blade_damp_x]'] = 0.0
+        opt_tags[0]['[blade_damp_y]'] = 0.0
+        opt_tags[0]['[blade_damp_z]'] = 0.0
+        opt_tags[0]['[blade_nbodies]'] = 1
+        opt_tags[0]['[Windspeed]'] = 0.0
+        opt_tags[0]['[init_wr]'] = 0.0
+        opt_tags[0]['[fixed_omega]'] = 0.0
+        opt_tags[0]['[operational_data]'] = 'case-turbine2-empty.opt'
+
+        return opt_tags
+
+    def opt_tags_hs2(self, basename):
+
+        opt_tags = [self.opt_hs2.copy()]
+        opt_tags[0]['[Case id.]'] = '%s_hawcstab2' % basename
+        return opt_tags
+
+    def set_hs2opdata(self, master, basename):
+        """Load the HS2 operational data file and create opt_tags for HAWC2
+        cases.
+
+        Returns
+        -------
+        opt_tags : list of dicts
+        """
+        fpath = os.path.join(master.tags['[data_dir]'],
+                             master.tags['[operational_data]'])
+        hs2_res = hs2.results()
+        hs2_res.load_operation(fpath)
+        omegas = hs2_res.operation.rotorspeed_rpm.values*np.pi/30.0
+        winds = hs2_res.operation.windspeed.values
+        pitchs = hs2_res.operation.pitch_deg.values
+
+        return self.set_opdata(winds, pitchs, omegas, basename=basename)
+
+    def set_opdata(self, winds, pitchs, omegas, basename=None):
+        """Return opt_tags for HAWC2 based on an HAWCStab2 operational data
+        file.
+
+        Parameters
+        ----------
+
+        winds : ndarray(n)
+            wind speed for given operating point [m/s]
+
+        pitchs : ndarray(n)
+            pitch angle at given operating point [deg]
+
+        omegas : ndarray(n)
+            rotor speed at given operating point [rad/s]
+
+        basename : str, default=None
+            If not None, the [Case id.] tag is composed out of the basename,
+            wind speed, pitch angle and rotor speed. If set to None, the
+            [Case id.] tag is not set.
+
+        Returns
+        -------
+        opt_tags : list of dicts
+        """
+
+        # the HAWC2 cases
+        opt_tags = []
+        for wind, pitch, omega in zip(winds, pitchs, omegas):
+            opt_dict = {}
+            opt_dict.update(self.opt_h2.copy())
+            opt_dict.update(self.fix_op.copy())
+            rpl = (basename, wind, pitch, omega)
+            if basename is not None:
+                tmp = '%s_%02.0fms_%04.01fdeg_%04.02frads_hawc2' % rpl
+                opt_dict['[Case id.]'] = tmp
+            opt_dict['[Windspeed]'] = wind
+            opt_dict['[pitch_angle]'] = pitch
+            opt_dict['[fixed_omega]'] = omega
+            opt_dict['[init_wr]'] = omega
+#            opt_dict['[t0]'] = int(2000.0/opt_dict['[Windspeed]']) # or 2000?
+#            opt_dict['[time stop]'] = opt_dict['[t0]']+100
+#            opt_dict['[time_stop]'] = opt_dict['[t0]']+100
+            opt_tags.append(opt_dict.copy())
+        return opt_tags
+
+
+class Sims(object):
+
+    def __init__(self, sim_id, P_MASTERFILE, MASTERFILE, P_SOURCE, P_RUN,
+                 PROJECT, POST_DIR):
+        """
+        Create HtcMaster() object
+        =========================
+
+        the HtcMaster contains all the settings to start creating htc files.
+        It holds the master file, server paths and more.
+
+        The master.tags dictionary holds those tags who do not vary for different
+        cases. Variable tags, i.e. tags who are a function of other variables
+        or other tags, are defined in the function variable_tag_func().
+
+        It is considered as good practice to define the default values for all
+        the variable tags in the master_tags
+
+        Members
+        -------
+
+        Returns
+        -------
+
+        """
+
+        self.sim_id = sim_id
+        self.P_MASTERFILE = P_MASTERFILE
+        self.MASTERFILE = MASTERFILE
+        self.P_SOURCE = P_SOURCE
+        self.P_RUN = P_RUN
+        self.PROJECT = PROJECT
+        self.POST_DIR = POST_DIR
+
+        # TODO: write a lot of logical tests for the tags!!
+        # TODO: tests to check if the dirs are setup properly (ending slahses)
+        # FIXME: some tags are still variable! Only static tags here that do
+        # not depent on any other variable that can change
+        self.master = sim.HtcMaster()
+        self.set_tag_defaults()
+
+    def _var_tag_func(self, master, case_id_short=False):
+        """
+        Function which updates HtcMaster.tags and returns an HtcMaster object
+
+        Only use lower case characters for case_id since a hawc2 result and
+        logfile are always in lower case characters. Simulations.prepare_launch
+        will force the value of the tags as defined in master.output_dirs
+        to lower case.
+
+        BE CAREFULL: if you change a master tag that is used to dynamically
+        calculate an other tag, that change will be propageted over all cases,
+        for example:
+        master.tags['tag1'] *= master.tags[tag2]*master.tags[tag3']
+        it will accumlate over each new case. After 20 cases
+        master.tags['tag1'] = (master.tags[tag2]*master.tags[tag3'])^20
+        which is not wanted, you should do
+        master.tags['tag1'] = tag1_base*master.tags[tag2]*master.tags[tag3']
+
+        """
+
+        mt = master.tags
+
+        dlc_case = mt['[Case folder]']
+        mt['[data_dir]'] = 'data/'
+        mt['[res_dir]'] = 'res/%s/' % dlc_case
+        mt['[log_dir]'] = 'logfiles/%s/' % dlc_case
+        mt['[htc_dir]'] = 'htc/%s/' % dlc_case
+        mt['[case_id]'] = mt['[Case id.]']
+        mt['[DLC]'] = dlc_case
+        mt['[pbs_out_dir]'] = 'pbs_out/%s/' % dlc_case
+        mt['[pbs_in_dir]'] = 'pbs_in/%s/' % dlc_case
+        mt['[iter_dir]'] = 'iter/%s/' % dlc_case
+
+        if mt['[eigen_analysis]']:
+            rpl = os.path.join(dlc_case, mt['[Case id.]'])
+            mt['[eigenfreq_dir]'] = 'res_eigen/%s/' % rpl
+
+        # for HAWCStab2 certain things have to be done differently
+        if mt['[hs2]']:
+            mt['[htc_dir]'] = ''
+            mt['[t0]'] = 0
+            mt['[time stop]'] = 1
+            mt['[hawc2]'] = False
+            mt['[output]'] = False
+            mt['[copyback_files]'] = ['./*.ind', './*.pwr', './*.log',
+                                      './*.cmb', './*.bea']
+            mt['[copyback_frename]'] = [mt['[res_dir]'], mt['[res_dir]'],
+                                        mt['[log_dir]'], mt['[res_dir]'],
+                                        mt['[res_dir]']]
+            if mt['[hs2_bladedeform_switch]']:
+                mt['[hs2_bladedeform]'] = 'bladedeform'
+            else:
+                mt['[hs2_bladedeform]'] = 'nobladedeform'
+
+            if int(mt['[tip_loss]']) == 1:
+                mt['[hs2_tipcorrect]'] = 'tipcorrect'
+            else:
+                mt['[hs2_tipcorrect]'] = 'notipcorrect'
+
+            if int(mt['[Induction]']) == 1:
+                mt['[hs2_induction]'] = 'induction'
+            else:
+                mt['[hs2_induction]'] = 'noinduction'
+
+            if mt['[hs2_gradients_switch]']:
+                mt['[hs2_gradients]'] = 'gradients'
+            else:
+                mt['[hs2_gradients]'] = 'nogradients'
+
+        mt['[windspeed]'] = mt['[Windspeed]']
+        mt['[time_stop]'] = mt['[time stop]']
+        mt['[duration]'] = str(float(mt['[time_stop]']) - float(mt['[t0]']))
+
+        return master
+
+    def _set_path_auto_config(self, verbose=True):
+        """
+        auto configure directories: assume you are running in the root of the
+        relevant HAWC2 model
+        and assume we are in a simulation case of a certain turbine/project
+        """
+        (self.P_RUN, self.P_SOURCE, self.PROJECT,
+             self.sim_id, self.P_MASTERFILE,
+             self.MASTERFILE, self.POST_DIR) = dlcdefs.configure_dirs(verbose=verbose)
+
+    def _set_path_config(self, runmethod='here'):
+        """
+        Set the path configuration into the tags
+        """
+
+        self.runmethod = runmethod
+
+        if runmethod == 'here':
+            self._set_path_auto_config()
+        elif runmethod in ['local', 'local-script', 'none', 'local-ram']:
+            self.p_root = '/home/dave/SimResults/h2_vs_hs2/'
+        elif runmethod == 'windows-script':
+            self.p_root = '/mnt/D16731/dave/Documents/_SimResults'
+        elif runmethod == 'gorm':
+            self.p_root = '/mnt/hawc2sim/h2_vs_hs2'
+        elif runmethod == 'jess':
+            self.p_root = '/mnt/hawc2sim/h2_vs_hs2'
+        else:
+            msg='unsupported runmethod, options: none, local, gorm or opt'
+            raise ValueError, msg
+
+        if not runmethod == 'here':
+            self.P_RUN = os.path.join(self.p_root, self.PROJECT, self.sim_id)
+
+        self.master.tags['[master_htc_file]'] = self.MASTERFILE
+        self.master.tags['[master_htc_dir]'] = self.P_MASTERFILE
+        # directory to data, htc, SOURCE DIR
+        if self.P_SOURCE[-1] == os.sep:
+            self.master.tags['[model_dir_local]']  = self.P_SOURCE
+        else:
+            self.master.tags['[model_dir_local]']  = self.P_SOURCE + os.sep
+        if self.P_RUN[-1] == os.sep:
+            self.master.tags['[run_dir]'] = self.P_RUN
+        else:
+            self.master.tags['[run_dir]'] = self.P_RUN + os.sep
+
+        self.master.tags['[post_dir]'] = self.POST_DIR
+        self.master.tags['[sim_id]'] = self.sim_id
+        # set the model_zip tag to include the sim_id
+        rpl = (self.PROJECT, self.master.tags['[sim_id]'])
+        self.master.tags['[model_zip]'] = '%s_%s.zip' % rpl
+
+    def set_tag_defaults(self):
+        """
+        Set the default values of the required master tags
+        """
+        mt = self.master.tags
+
+        # other required tags and their defaults
+        mt['[dt_sim]'] = 0.01
+        mt['[hawc2_exe]'] = 'hawc2-latest'
+        # convergence_limits  0.001  0.005  0.005 ;
+        # critical one, risidual on the forces: 0.0001 = 1e-4
+        mt['[epsresq]'] = '1.0' # default=10.0
+        # increment residual
+        mt['[epsresd]'] = '0.5' # default= 1.0
+        # constraint equation residual
+        mt['[epsresg]'] = '1e-8' # default= 1e-7
+        # folder names for the saved results, htc, data, zip files
+        # Following dirs are relative to the model_dir_server and they specify
+        # the location of where the results, logfiles, animation files that where
+        # run on the server should be copied to after the simulation has finished.
+        # on the node, it will try to copy the turbulence files from these dirs
+        mt['[animation_dir]'] = 'animation/'
+        mt['[control_dir]']   = 'control/'
+        mt['[data_dir]']      = 'data/'
+        mt['[eigen_analysis]'] = False
+        mt['[eigenfreq_dir]'] = False
+        mt['[htc_dir]']       = 'htc/'
+        mt['[log_dir]']       = 'logfiles/'
+        mt['[meander_dir]']   = False
+        mt['[opt_dir]']       = False
+        mt['[pbs_out_dir]']   = 'pbs_out/'
+        mt['[res_dir]']       = 'res/'
+        mt['[iter_dir]']      = 'iter/'
+        mt['[turb_dir]']      = 'turb/'
+        mt['[turb_db_dir]']   = '../turb/'
+        mt['[wake_dir]']      = False
+        mt['[hydro_dir]']     = False
+        mt['[mooring_dir]']   = False
+        mt['[externalforce]'] = False
+        mt['[Case folder]']   = 'NoCaseFolder'
+        # zip_root_files only is used when copy to run_dir and zip creation, define
+        # in the HtcMaster object
+        mt['[zip_root_files]'] = []
+        # only active on PBS level, so files have to be present in the run_dir
+        mt['[copyback_files]'] = []   # copyback_resultfile
+        mt['[copyback_frename]'] = [] # copyback_resultrename
+        mt['[copyto_files]'] = []     # copyto_inputfile
+        mt['[copyto_generic]'] = []   # copyto_input_required_defaultname
+
+        # In master file tags within the HAWC2 vs HAWCStab2 context
+        mt['[hawc2]'] = False
+        mt['[output]'] = False
+        mt['[eigen_analysis]'] = False
+        mt['[system_eigen_analysis]'] = False
+        mt['[operational_data]'] = 'case_name.opt'
+
+        mt['[gravity]'] = 0.0
+        mt['[shaft_tilt]'] = 0.0 # 5.0
+        mt['[coning]'] = 0.0 # 2.5
+        mt['[Windspeed]'] = 1.0
+        mt['[wtilt]'] = 0.0
+        mt['[wdir]'] = 0.0
+        mt['[aerocalc]'] = 1
+        mt['[Induction]'] = 0
+        mt['[tip_loss]'] = 0
+        mt['[Dyn stall]'] = 0
+        mt['[tu_model]'] = 0
+        mt['[shear_exp]'] = 0
+        mt['[tower_shadow]'] = 0
+        mt['[TI]'] = 1
+        mt['[fixed_omega]'] = 1.0
+        mt['[init_wr]'] = 0
+        mt['[pc_file_name]'] = 'hawc_pc.mhh'
+        mt['[ae_file_name]'] = 'hawc2_ae.mhh'
+        mt['[nr_ae_sections]'] = 30
+        mt['[use_nr_ae_sections]'] = True
+        mt['[use_ae_distrb_file]'] = False
+        mt['[ae_set_nr]'] = 1
+        # tors_e output depends on the pitch axis configuration
+        mt['[c12]'] = False
+        mt['[c14]'] = False
+
+        mt['[t0]'] = 500
+        mt['[time stop]'] = 600
+
+        mt['[hs2]'] = False
+        mt['[nr_blade_modes_hs2]'] = 10
+        mt['[stab_analysis]'] = False
+        mt['[steady_states]'] = True
+        mt['[hs2_bladedeform_switch]'] = True
+        mt['[hs2_gradients_switch]'] = False
+        # by default take the stiff set
+        mt['[st_file]'] = 'hawc2_st.mhh'
+        mt['[tower_set]'] = 4 # 1
+        mt['[shaft_set]'] = 4 # 2
+        mt['[blade_set]'] = 4 # 3
+        mt['[tower_subset]'] = 1
+        mt['[shaft_subset]'] = 1
+        mt['[blade_subset]'] = 1
+        mt['[blade_nbodies]'] = 1
+        mt['[blade_posx]'] = -0.75
+        mt['[blade_damp_x]'] = 0.01
+        mt['[blade_damp_y]'] = 0.01
+        mt['[blade_damp_z]'] = 0.01
+        # HAWCStab2 convergence criteria
+        mt['[bem_tol]'] = 1e-12
+        mt['[bem_itmax]'] = 10000
+        mt['[bem_1relax]'] = 0.02
+        mt['[ae_tolrel]'] = 1e-7
+        mt['[ae_itmax]'] = 2000
+        mt['[ae_1relax]'] = 0.5
+        mt['[tol_7]'] = 10
+        mt['[tol_8]'] = 5
+        mt['[tol_9]'] = 1e-8
+
+        # =========================================================================
+        # basic required tags by HtcMaster and PBS in order to function properly
+        # =========================================================================
+        # the express queue ('#PBS -q xpresq') has a maximum walltime of 1h
+        mt['[pbs_queue_command]'] = '#PBS -q workq'
+        # walltime should have following format: hh:mm:ss
+        mt['[walltime]'] = '04:00:00'
+        mt['[auto_walltime]'] = False
+
+    def get_dlc_casedefs(self):
+        """
+        Create iter_dict and opt_tags based on spreadsheets
+        """
+
+        iter_dict = dict()
+        iter_dict['[empty]'] = [False]
+
+        # see if a htc/DLCs dir exists
+        dlcs_dir = os.path.join(self.P_SOURCE, 'htc', 'DLCs')
+        if os.path.exists(dlcs_dir):
+            opt_tags = dlcdefs.excel_stabcon(dlcs_dir)
+        else:
+            opt_tags = dlcdefs.excel_stabcon(os.path.join(self.P_SOURCE, 'htc'))
+
+        if len(opt_tags) < 1:
+            raise ValueError('There are is not a single case defined. Make sure '
+                             'the DLC spreadsheets are configured properly.')
+
+        # add all the root files, except anything with *.zip
+        f_ziproot = []
+        for (dirpath, dirnames, fnames) in os.walk(self.P_SOURCE):
+            # remove all zip files
+            for i, fname in enumerate(fnames):
+                if fname.endswith('.zip'):
+                    fnames.pop(i)
+            f_ziproot.extend(fnames)
+            break
+        # and add those files
+        for opt in opt_tags:
+            opt['[zip_root_files]'] = f_ziproot
+
+        self.master.output_dirs.extend('[Case folder]')
+        self.master.output_dirs.extend('[Case id.]')
+
+        return iter_dict, opt_tags
+
+    def create_inputs(self, iter_dict, opt_tags):
+
+        sim.prepare_launch(iter_dict, opt_tags, self.master, self._var_tag_func,
+                           write_htc=True, runmethod=self.runmethod, verbose=False,
+                           copyback_turb=False, msg='', update_cases=False,
+                           ignore_non_unique=False, run_only_new=False,
+                           pbs_fname_appendix=False, short_job_names=False)
+
+    def get_control_tuning(self, fpath):
+        """
+        Read a HAWCStab2 controller tuning file and return as tags
+        """
+        tuning = hs2.hs2_control_tuning()
+        tuning.read_parameters(fpath)
+
+        tune_tags = {}
+
+        tune_tags['[pi_gen_reg1.K]'] = tuning.pi_gen_reg1.K
+
+        tune_tags['[pi_gen_reg2.I]'] = tuning.pi_gen_reg2.I
+        tune_tags['[pi_gen_reg2.Kp]'] = tuning.pi_gen_reg2.Kp
+        tune_tags['[pi_gen_reg2.Ki]'] = tuning.pi_gen_reg2.Ki
+
+        tune_tags['[pi_pitch_reg3.Kp]'] = tuning.pi_pitch_reg3.Kp
+        tune_tags['[pi_pitch_reg3.Ki]'] = tuning.pi_pitch_reg3.Ki
+        tune_tags['[pi_pitch_reg3.K1]'] = tuning.pi_pitch_reg3.K1
+        tune_tags['[pi_pitch_reg3.K2]'] = tuning.pi_pitch_reg3.K2
+
+        tune_tags['[aero_damp.Kp2]'] = tuning.aero_damp.Kp2
+        tune_tags['[aero_damp.Ko1]'] = tuning.aero_damp.Ko1
+        tune_tags['[aero_damp.Ko2]'] = tuning.aero_damp.Ko2
+
+        return tune_tags
+
+    def post_processing(self, statistics=True, resdir=None):
+        """
+        Parameters
+        ----------
+
+        resdir : str, default=None
+            Defaults to reading the results from the [run_dir] tag.
+            Force to any other directory using this variable. You can also use
+            the presets as defined for runmethod in _set_path_config.
+        """
+
+        post_dir = self.POST_DIR
+
+        # =========================================================================
+        # check logfiles, results files, pbs output files
+        # logfile analysis is written to a csv file in logfiles directory
+        # =========================================================================
+        # load the file saved in post_dir
+        cc = sim.Cases(post_dir, self.sim_id, rem_failed=False)
+
+        if resdir is None:
+            # we keep the run_dir as defined during launch
+            run_root = None
+        elif resdir in ['local', 'local-script', 'none', 'local-ram']:
+            run_root = '/home/dave/SimResults'
+        elif resdir == 'windows-script':
+            run_root = '/mnt/D16731/dave/Documents/_SimResults'
+        elif resdir == 'gorm':
+            run_root = '/mnt/hawc2sim/h2_vs_hs2'
+        elif resdir == 'jess':
+            run_root = '/mnt/hawc2sim/h2_vs_hs2'
+        else:
+            run_root = None
+            cc.change_results_dir(resdir)
+
+        if isinstance(run_root, str):
+            forcedir = os.path.join(run_root, self.PROJECT, self.sim_id)
+            cc.change_results_dir(forcedir)
+
+        cc.post_launch()
+        cc.remove_failed()
+
+        if statistics:
+            tags=['[windspeed]']
+            stats_df = cc.statistics(calc_mech_power=False, ch_fatigue=[],
+                                     tags=tags, update=False)
+            ftarget = os.path.join(self.POST_DIR, '%s_statistics.xlsx')
+            stats_df.to_excel(ftarget % self.sim_id)
+
+
+class MappingsH2HS2(object):
+
+    def __init__(self, chord_length=3.0):
+        """
+        """
+        self.hs2_res = hs2.results()
+        self.chord_length = chord_length
+
+    def powercurve(self, h2_df_stats, fname_hs):
+
+        self._powercurve_h2(h2_df_stats)
+        self._powercurve_hs2(fname_hs)
+
+    def _powercurve_h2(self, df_stats):
+
+        mappings = {'Ae rot. power' : 'P_aero',
+                    'Ae rot. thrust': 'T_aero',
+                    'Vrel-1-39.03'  : 'vrel_39',
+                    'Omega'         : 'rotorspeed',
+                    'tower-tower-node-010-forcevec-y' : 'T_towertop',
+                    'tower-shaft-node-003-forcevec-y' : 'T_shafttip'}
+
+        df_stats.sort_values('[windspeed]', inplace=True)
+        df_mean = pd.DataFrame()
+        df_std = pd.DataFrame()
+
+        for key, value in mappings.iteritems():
+            tmp = df_stats[df_stats['channel']==key]
+            df_mean[value] = tmp['mean'].values.copy()
+            df_std[value] = tmp['std'].values.copy()
+
+        # also add the wind speed
+        df_mean['windspeed'] = tmp['[windspeed]'].values.copy()
+        df_std['windspeed'] = tmp['[windspeed]'].values.copy()
+
+        self.pwr_h2_mean = df_mean
+        self.pwr_h2_std = df_std
+        self.h2_df_stats = df_stats
+
+    def _powercurve_hs2(self, fname):
+
+        mappings = {u'P [kW]'  :'P_aero',
+                    u'T [kN]'  :'T_aero',
+                    u'V [m/s]' :'windspeed'}
+
+        df_pwr, units = self.hs2_res.load_pwr_df(fname)
+
+        self.pwr_hs = pd.DataFrame()
+        for key, value in mappings.iteritems():
+            self.pwr_hs[value] = df_pwr[key].values.copy()
+
+    def blade_distribution(self, fname_h2, fname_hs2, h2_df_stats=None,
+                           fname_h2_tors=None):
+
+        self.hs2_res.load_ind(fname_hs2)
+        self.h2_res = sim.windIO.ReadOutputAtTime(fname_h2)
+        self._distribution_hs2()
+        self._distribution_h2()
+        if h2_df_stats is not None:
+            self.h2_df_stats = h2_df_stats
+            if fname_h2_tors is not None:
+                self.distribution_torsion_h2(fname_h2_tors)
+
+    def _distribution_hs2(self):
+        """Read a HAWCStab2 *.ind file (blade distribution loading)
+        """
+
+        mapping_hs2 =  {u's [m]'       :'curved_s',
+                        u'CL0 [-]'     :'Cl',
+                        u'CD0 [-]'     :'Cd',
+                        u'CT [-]'      :'Ct',
+                        u'CP [-]'      :'Cp',
+                        u'A [-]'       :'ax_ind',
+                        u'AP [-]'      :'tan_ind',
+                        u'U0 [m/s]'    :'vrel',
+                        u'PHI0 [rad]'  :'inflow_angle',
+                        u'ALPHA0 [rad]':'AoA',
+                        u'X_AC0 [m]'   :'pos_x',
+                        u'Y_AC0 [m]'   :'pos_y',
+                        u'Z_AC0 [m]'   :'pos_z',
+                        u'UX0 [m]'     :'def_x',
+                        u'UY0 [m]'     :'def_y',
+                        u'Tors. [rad]' :'torsion',
+                        u'Twist[rad]'  :'twist',
+                        u'V_a [m/s]'   :'ax_ind_vel',
+                        u'V_t [m/s]'   :'tan_ind_vel',
+                        u'FX0 [N/m]'   :'F_x',
+                        u'FY0 [N/m]'   :'F_y',
+                        u'M0 [Nm/m]'   :'M'}
+
+        try:
+            hs2_cols = [k for k in mapping_hs2]
+            # select only the HS channels that will be used for the mapping
+            std_cols = [mapping_hs2[k] for k in hs2_cols]
+            self.hs_aero = self.hs2_res.ind.df_data[hs2_cols].copy()
+        except KeyError:
+            # some results have been created with older HAWCStab2 that did not
+            # include CT and CP columns
+            mapping_hs2.pop(u'CT [-]')
+            mapping_hs2.pop(u'CP [-]')
+            hs2_cols = [k for k in mapping_hs2]
+            std_cols = [mapping_hs2[k] for k in hs2_cols]
+            # select only the HS channels that will be used for the mapping
+            self.hs_aero = self.hs2_res.ind.df_data[hs2_cols].copy()
+
+        # change column names to the standard form that is shared with H2
+        self.hs_aero.columns = std_cols
+        self.hs_aero['AoA'] *= (180.0/np.pi)
+        self.hs_aero['inflow_angle'] *= (180.0/np.pi)
+        self.hs_aero['torsion'] *= (180.0/np.pi)
+#        self.hs_aero['pos_x'] = (-1.0) # self.chord_length / 4.0
+
+    def _distribution_h2(self):
+        mapping_h2 =  { u'Radius_s'  :'curved_s',
+                        u'Cl'        :'Cl',
+                        u'Cd'        :'Cd',
+                        u'Ct_local'  :'Ct',
+                        u'Cq_local'  :'Cq',
+                        u'Induc_RPy' :'ax_ind_vel',
+                        u'Induc_RPx' :'tan_ind_vel',
+                        u'Vrel'      :'vrel',
+                        u'Inflow_ang':'inflow_angle',
+                        u'alfa'      :'AoA',
+                        u'pos_RP_x'  :'pos_x',
+                        u'pos_RP_y'  :'pos_y',
+                        u'pos_RP_z'  :'pos_z',
+                        u'Secfrc_RPx':'F_x',
+                        u'Secfrc_RPy':'F_y',
+                        u'Secmom_RPz':'M'}
+        h2_cols = [k for k in mapping_h2]
+        std_cols = [mapping_h2[k] for k in h2_cols]
+
+        # select only the h2 channels that will be used for the mapping
+        h2_aero = self.h2_res[h2_cols].copy()
+        # change column names to the standard form that is shared with HS
+        h2_aero.columns = std_cols
+        h2_aero['def_x'] = self.h2_res['Pos_B_x'] - self.h2_res['Inipos_x_x']
+        h2_aero['def_y'] = self.h2_res['Pos_B_y'] - self.h2_res['Inipos_y_y']
+        h2_aero['def_z'] = self.h2_res['Pos_B_z'] - self.h2_res['Inipos_z_z']
+        h2_aero['ax_ind_vel'] *= (-1.0)
+        h2_aero['pos_x'] += (self.chord_length / 2.0)
+        h2_aero['F_x'] *= (1e3)
+        h2_aero['F_y'] *= (1e3)
+        h2_aero['M'] *= (1e3)
+#        # HAWC2 includes root and tip nodes, while HAWC2 doesn't. Remove them
+#        h2_aero = h2_aero[1:-1]
+        self.h2_aero = h2_aero
+
+    def distribution_torsion_h2(self, fname_h2):
+        """Determine torsion distribution from the HAWC2 result statistics.
+        tors_e is in degrees.
+        """
+        if not hasattr(self, 'h2_aero'):
+            raise UserWarning('first run blade_distribution')
+
+        # load the HAWC2 .sel file for the channels
+        fpath = os.path.dirname(fname_h2)
+        fname = os.path.basename(fname_h2)
+        res = sim.windIO.LoadResults(fpath, fname, readdata=False)
+        sel = res.ch_df[res.ch_df.sensortype == 'Tors_e'].copy()
+        sel.sort_values(['radius'], inplace=True)
+        self.h2_aero['Radius_s_tors'] = sel.radius.values.copy()
+        self.h2_aero['tors_e'] = sel.radius.values.copy()
+        tors_e_channels = sel.ch_name.tolist()
+
+        # find the current case in the statistics DataFrame
+        case = fname.replace('.htc', '')
+        df_case = self.h2_df_stats[self.h2_df_stats['[case_id]']==case].copy()
+        # and select all the torsion channels
+        df_tors_e = df_case[df_case.channel.isin(tors_e_channels)].copy()
+        # join the stats with the channel descriptions DataFrames, have the
+        # same name on the joining column
+        df_tors_e.set_index('channel', inplace=True)
+        sel.set_index('ch_name', inplace=True)
+
+        # joining happens on the index, and for which the same channel has been
+        # used: the unique HAWC2 channel naming scheme
+        df_tors_e = pd.concat([df_tors_e, sel], axis=1)
+        df_tors_e.radius = df_tors_e.radius.astype(np.float64)
+        # sorting on radius, combine with ch_df
+        df_tors_e.sort_values(['radius'], inplace=True)
+
+        # FIXME: what if number of torsion outputs is less than aero
+        # calculation points?
+#        df_tmp = pd.DataFrame()
+        self.h2_aero['torsion'] = df_tors_e['mean'].values.copy()
+        self.h2_aero['torsion_std'] = df_tors_e['std'].values.copy()
+        self.h2_aero['torsion_radius_s'] = df_tors_e['radius'].values.copy()
+#        df_tmp = pd.DataFrame()
+#        df_tmp['torsion'] = df_tors_e['mean'].copy()
+#        df_tmp['torsion_std'] = df_tors_e['std'].copy()
+#        df_tmp['torsion_radius_s'] = df_tors_e['radius'].copy()
+#        df_tmp.set_index('')
+
+    def body_structure_modes(self, fname_h2, fname_hs):
+        self._body_structure_modes_h2(fname_h2)
+        self._body_structure_modes_hs(fname_hs)
+
+    def _body_structure_modes_h2(self, fname):
+        self.body_freq_h2 = sim.windIO.ReadEigenBody(fname)
+
+        blade_h2 = self.body_freq_h2[self.body_freq_h2['body']=='blade1'].copy()
+        # because HAWCStab2 is sorted by frequency
+        blade_h2.sort_values('Fd_hz', inplace=True)
+        # HAWC2 usually has a lot of duplicate entries
+        blade_h2.drop_duplicates('Fd_hz', keep='first', inplace=True)
+        # also drop the ones with very high damping, and 0 frequency
+        query = '(log_decr_pct < 500 and log_decr_pct > -500) and Fd_hz > 0.0'
+        self.blade_body_freq_h2 = blade_h2.query(query)
+
+    def _body_structure_modes_hs(self, fname):
+        self.body_freq_hs = hs2.results().load_cmb_df(fname)
+
+
+class Plots(object):
+    """
+    Comparison plots between HACW2 and HAWCStab2. This is done based on
+    the HAWC2 output output_at_time, and HAWCStab2 output *.ind
+    """
+
+    def __init__(self):
+
+        self.h2c = 'b'
+        self.h2ms = '+'
+        self.h2ls = '-'
+        self.hsc = 'r'
+        self.hsms = 'x'
+        self.hsls = '--'
+        self.errls = '-'
+        self.errc = 'k'
+        self.errlab = 'diff [\\%]'
+        self.interactive = False
+
+        self.dist_size = (16, 11)
+        self.dist_channels = ['pos_x', 'pos_y', 'AoA', 'inflow_angle',
+                              'Cl', 'Cd', 'vrel', 'ax_ind_vel',
+                              'F_x', 'F_y', 'M']
+
+    def load_h2(self, fname_h2, h2_df_stats=None, fname_h2_tors=None):
+
+        res = MappingsH2HS2()
+        res.h2_res = sim.windIO.ReadOutputAtTime(fname_h2)
+        res._distribution_h2()
+        if h2_df_stats is not None:
+            res.h2_df_stats = h2_df_stats
+            if fname_h2_tors is not None:
+                res.distribution_torsion_h2(fname_h2_tors)
+
+        return res
+
+    def load_hs(self, fname_hs):
+
+        res = MappingsH2HS2()
+        res.hs2_res.load_ind(fname_hs)
+        res._distribution_hs2()
+
+        return res
+
+    def new_fig(self, title=None, nrows=2, ncols=1, dpi=150, size=(12.0, 5.0)):
+
+        if self.interactive:
+            subplots = plt.subplots
+        else:
+            subplots = mplutils.subplots
+
+        fig, axes = subplots(nrows=nrows, ncols=ncols, dpi=dpi, figsize=size)
+        axes = axes.ravel()
+        if title is not None:
+            fig.suptitle(title)
+        return fig, axes
+
+    def set_axes_label_grid(self, axes, setlegend=False):
+        for ax in axes.ravel():
+            if setlegend:
+                leg = ax.legend(loc='best')
+                if leg is not None:
+                    leg.get_frame().set_alpha(0.5)
+            ax.grid(True)
+        return axes
+
+    def save_fig(self, fig, axes, fname):
+        fig.tight_layout()
+        fig.subplots_adjust(top=0.89)
+        fig.savefig(fname, dpi=150)
+        fig.clear()
+        print('saved:', fname)
+
+    def distribution(self, results, labels, title, channels, x_ax='pos_z',
+                     xlabel='Z-coordinate [m]', nrows=2, ncols=4, size=(16, 5)):
+        """
+        Compare blade distribution results
+        """
+        res1 = results[0]
+        res2 = results[1]
+        lab1 = labels[0]
+        lab2 = labels[1]
+
+        radius1 = res1[x_ax].values
+        radius2 = res2[x_ax].values
+
+        fig, axes = self.new_fig(title=title, nrows=nrows, ncols=ncols, size=size)
+        axesflat = axes.flatten()
+        for i, chan in enumerate(channels):
+            ax = axesflat[i]
+            ax.plot(radius1, res1[chan].values, color=self.h2c,
+                    label=lab1, alpha=0.9, marker=self.h2ms, ls=self.h2ls)
+            ax.plot(radius2, res2[chan].values, color=self.hsc,
+                    label=lab2, alpha=0.7, marker=self.hsms, ls=self.hsls)
+            ax.set_ylabel(chan.replace('_', '\\_'))
+
+#            if len(radius1) > len(radius2):
+#                radius = res1.hs_aero['pos_z'].values[n0:]
+#                x = res2.hs_aero['pos_z'].values[n0:]
+#                y = res2.hs_aero[chan].values[n0:]
+#                qq1 = res1.hs_aero[chan].values[n0:]
+#                qq2 = interpolate.griddata(x, y, radius)
+#            elif len(radius1) < len(radius2):
+#                radius = res2.hs_aero['pos_z'].values[n0:]
+#                x = res1.hs_aero['pos_z'].values[n0:]
+#                y = res1.hs_aero[chan].values[n0:]
+#                qq1 = interpolate.griddata(x, y, radius)
+#                qq2 = res2.hs_aero[chan].values[n0:]
+#            else:
+#                if np.allclose(radius1, radius2):
+#                    radius = res1.hs_aero['pos_z'].values[n0:]
+#                    qq1 = res1.hs_aero[chan].values[n0:]
+#                    qq2 = res2.hs_aero[chan].values[n0:]
+#                else:
+#                    radius = res1.hs_aero['pos_z'].values[n0:]
+#                    x = res2.hs_aero['pos_z'].values[n0:]
+#                    y = res2.hs_aero[chan].values[n0:]
+#                    qq1 = res1.hs_aero[chan].values[n0:]
+#                    qq2 = interpolate.griddata(x, y, radius)
+
+            # calculate moment arm
+            if chan == 'M':
+                arm = res1.M / res1.F_y
+                axr = ax.twinx()
+                labr = lab1 + ' moment arm'
+                axr.plot(radius1, arm, color=self.errc, label=labr, alpha=0.6,
+                         ls=self.errls, marker=self.h2ms)
+            else:
+                # relative errors on the right axes
+                err = np.abs(1.0 - (res1[chan].values / res2[chan].values))*100.0
+                axr = ax.twinx()
+                axr.plot(radius1, err, color=self.errc, ls=self.errls,
+                         alpha=0.6, label=self.errlab)
+                if err.max() > 50:
+                    axr.set_ylim([0, 35])
+
+            # use axr for the legend
+            lines = ax.lines + axr.lines
+            labels = [l.get_label() for l in lines]
+            leg = axr.legend(lines, labels, loc='best')
+            leg.get_frame().set_alpha(0.5)
+        # x-label only on the last row
+        for k in range(ncols):
+            axesflat[-k-1].set_xlabel(xlabel)
+
+        axes = self.set_axes_label_grid(axes)
+        return fig, axes
+
+    def all_h2_channels(self, results, labels, fpath, channels=None):
+        """Results is a list of res (=HAWC2 results object)"""
+
+        for chan, details in results[0].ch_dict.iteritems():
+            if channels is None or chan not in channels:
+                continue
+            resp = []
+            for res in results:
+                resp.append([res.sig[:,0], res.sig[:,details['chi']]])
+
+            fig, axes = self.new_fig(title=chan.replace('_', '\\_'))
+            try:
+                mplutils.time_psd(resp, labels, axes, alphas=[1.0, 0.7], NFFT=None,
+                                   colors=['k-', 'r-'], res_param=250, f0=0, f1=5,
+                                   nr_peaks=10, min_h=15, mark_peaks=False)
+            except Exception as e:
+                print('****** FAILED')
+                print(e)
+                continue
+            axes[0].set_xlim([0,5])
+            axes[1].set_xlim(res.sig[[0,-1],0])
+            fname = os.path.join(fpath, chan + '.png')
+            self.save_fig(fig, axes, fname)
+
+    def h2_blade_distribution(self, fname_1, fname_2, title, labels, n0=0,
+                              df_stats1=None, df_stats2=None):
+        """
+        Compare blade distribution aerodynamics of two HAWC2 cases.
+        """
+        tors1 = fname_1.split('_aero_at_tstop')[0]
+        res1 = self.load_h2(fname_1, h2_df_stats=df_stats1, fname_h2_tors=tors1)
+        tors2 = fname_2.split('_aero_at_tstop')[0]
+        res2 = self.load_h2(fname_2, h2_df_stats=df_stats2, fname_h2_tors=tors2)
+
+        results = [res1.h2_aero[n0+1:], res2.h2_aero[n0+1:]]
+
+        fig, axes = self.distribution(results, labels, title, self.dist_channels,
+                                      x_ax='pos_z', xlabel='Z-coordinate [m]',
+                                      nrows=3, ncols=4, size=self.dist_size)
+
+        return fig, axes
+
+    def hs_blade_distribution(self, fname_1, fname_2, title, labels, n0=0):
+
+        res1 = self.load_hs(fname_1)
+        res2 = self.load_hs(fname_2)
+
+        results = [res1.hs_aero[n0:], res2.hs_aero[n0:]]
+#        channels = ['pos_x', 'pos_y', 'AoA', 'inflow_angle', 'Cl', 'Cd',
+#                    'vrel', 'ax_ind_vel']
+
+        fig, axes = self.distribution(results, labels, title, self.dist_channels,
+                                      x_ax='pos_z', xlabel='Z-coordinate [m]',
+                                      nrows=3, ncols=4, size=self.dist_size)
+
+        return fig, axes
+
+    def blade_distribution(self, fname_h2, fname_hs2, title, n0=0,
+                           h2_df_stats=None, fname_h2_tors=None):
+        """Compare aerodynamics, blade deflections between HAWC2 and HAWCStab2.
+        This is based on HAWCSTab2 *.ind files, and an HAWC2 output_at_time
+        output file.
+        """
+
+        results = MappingsH2HS2()
+        results.blade_distribution(fname_h2, fname_hs2, h2_df_stats=h2_df_stats,
+                                   fname_h2_tors=fname_h2_tors)
+        res = [results.h2_aero[n0+1:-1], results.hs_aero[n0:]]
+
+#        channels = ['pos_x', 'pos_y', 'AoA', 'inflow_angle', 'Cl', 'Cd',
+#                    'vrel', 'ax_ind_vel']
+        labels = ['HAWC2', 'HAWCStab2']
+
+        fig, axes = self.distribution(res, labels, title, self.dist_channels,
+                                      x_ax='pos_z', xlabel='Z-coordinate [m]',
+                                      nrows=3, ncols=4, size=self.dist_size)
+
+        return fig, axes
+
+    def blade_distribution2(self, fname_h2, fname_hs2, title, n0=0):
+        """Compare aerodynamics, blade deflections between HAWC2 and HAWCStab2.
+        This is based on HAWCSTab2 *.ind files, and an HAWC2 output_at_time
+        output file.
+        """
+
+        results = MappingsH2HS2()
+        results.blade_distribution(fname_h2, fname_hs2)
+        res = [results.h2_aero[n0+1:-1], results.hs_aero[n0:]]
+
+        channels = ['pos_x', 'pos_y', 'torsion', 'inflow_angle',
+                    'Cl', 'Cd', 'vrel',  'AoA',
+                    'F_x', 'F_y', 'M', 'ax_ind_vel', 'torsion']
+        labels = ['HAWC2', 'HAWCStab2']
+
+        fig, axes = self.distribution(res, labels, title, channels,
+                                      x_ax='pos_z', xlabel='Z-coordinate [m]',
+                                      nrows=3, ncols=4, size=(16, 12))
+
+        return fig, axes
+
+    def powercurve(self, h2_df_stats, fname_hs, title, size=(8.6, 4)):
+
+        results = MappingsH2HS2()
+        results.powercurve(h2_df_stats, fname_hs)
+
+        fig, axes = self.new_fig(title=title, nrows=1, ncols=2, size=size)
+
+        wind_h2 = results.pwr_h2_mean['windspeed'].values
+        wind_hs = results.pwr_hs['windspeed'].values
+
+        # POWER
+        ax = axes[0]
+        key = 'P_aero'
+        # HAWC2
+        yerr = results.pwr_h2_std[key]
+        ax.errorbar(wind_h2, results.pwr_h2_mean[key], color=self.h2c, yerr=yerr,
+                    marker=self.h2ms, ls=self.h2ls, label='HAWC2', alpha=0.9)
+        # HAWCSTAB2
+        ax.plot(wind_hs, results.pwr_hs[key], label='HAWCStab2',
+                alpha=0.7, color=self.hsc, ls=self.hsls, marker=self.hsms)
+        ax.set_title('Power [kW]')
+        # relative errors on the right axes
+        axr = ax.twinx()
+        assert np.allclose(wind_h2, wind_hs)
+        qq1 = results.pwr_h2_mean[key].values
+        qq2 = results.pwr_hs[key].values
+        err = np.abs(1.0 - qq1 / qq2)*100.0
+        axr.plot(wind_hs, err, color=self.errc, ls=self.errls, alpha=0.6,
+                 label=self.errlab)
+#        axr.set_ylabel('absolute error []')
+#        axr.set_ylim([])
+
+        # THRUST
+        ax = axes[1]
+        keys = ['T_aero', 'T_shafttip']
+        lss = [self.h2ls, '--', ':']
+        # HAWC2
+        for key, ls in zip(keys, lss):
+            label = 'HAWC2 %s' % (key.replace('_', '$_{') + '}$')
+            yerr = results.pwr_h2_std[key]
+            c = self.h2c
+            ax.errorbar(wind_h2, results.pwr_h2_mean[key], color=c, ls=ls,
+                        label=label, alpha=0.9, yerr=yerr, marker=self.h2ms)
+        # HAWCStab2
+        ax.plot(wind_hs, results.pwr_hs['T_aero'], color=self.hsc, alpha=0.7,
+                label='HAWCStab2 T$_{aero}$', marker=self.hsms, ls=self.hsls)
+        # relative errors on the right axes
+        axr = ax.twinx()
+        qq1 = results.pwr_h2_mean['T_aero'].values
+        qq2 = results.pwr_hs['T_aero'].values
+        err = np.abs(1.0 - (qq1 / qq2))*100.0
+        axr.plot(wind_hs, err, color=self.errc, ls=self.errls, alpha=0.6,
+                 label=self.errlab)
+        ax.set_title('Thrust [kN]')
+
+        axes = self.set_axes_label_grid(axes, setlegend=True)
+#        # use axr for the legend
+#        lines = [ax.lines[2]] + [ax.lines[5]] + [ax.lines[6]] + axr.lines
+#        labels = keys + ['HAWCStab2 T$_{aero}$', self.errlab]
+#        leg = axr.legend(lines, labels, loc='best')
+#        leg.get_frame().set_alpha(0.5)
+
+        return fig, axes
+
+    def h2_powercurve(self, h2_df_stats1, h2_df_stats2, title, labels,
+                      size=(8.6,4)):
+        res1 = MappingsH2HS2()
+        res1._powercurve_h2(h2_df_stats1)
+        wind1 = res1.pwr_h2_mean['windspeed'].values
+
+        res2 = MappingsH2HS2()
+        res2._powercurve_h2(h2_df_stats2)
+        wind2 = res2.pwr_h2_mean['windspeed'].values
+
+        fig, axes = self.new_fig(title=title, nrows=1, ncols=2, size=size)
+
+        # POWER
+        ax = axes[0]
+        key = 'P_aero'
+        # HAWC2
+        yerr1 = res1.pwr_h2_std[key]
+        ax.errorbar(wind1, res1.pwr_h2_mean[key], color=self.h2c, yerr=yerr1,
+                    marker=self.h2ms, ls=self.h2ls, label=labels[0], alpha=0.9)
+        yerr2 = res2.pwr_h2_std[key]
+        ax.errorbar(wind2, res2.pwr_h2_mean[key], color=self.hsc, yerr=yerr2,
+                    marker=self.hsms, ls=self.hsls, label=labels[1], alpha=0.7)
+        ax.set_title('Power [kW]')
+        # relative errors on the right axes
+        axr = ax.twinx()
+        assert np.allclose(wind1, wind2)
+        qq1 = res1.pwr_h2_mean[key].values
+        qq2 = res2.pwr_h2_mean[key].values
+        err = np.abs(1.0 - qq1 / qq2)*100.0
+        axr.plot(wind1, err, color=self.errc, ls=self.errls, alpha=0.6,
+                 label=self.errlab)
+
+        # THRUST
+        ax = axes[1]
+        keys = ['T_aero', 'T_shafttip']
+        lss = [self.h2ls, '--', ':']
+        for key, ls in zip(keys, lss):
+            label = '%s %s' % (labels[0], key.replace('_', '$_{') + '}$')
+            yerr = res1.pwr_h2_std[key]
+            c = self.h2c
+            ax.errorbar(wind1, res1.pwr_h2_mean[key], color=c, ls=ls,
+                        label=label, alpha=0.9, yerr=yerr, marker=self.h2ms)
+        for key, ls in zip(keys, lss):
+            label = '%s %s' % (labels[1], key.replace('_', '$_{') + '}$')
+            yerr = res2.pwr_h2_std[key]
+            c = self.hsc
+            ax.errorbar(wind2, res2.pwr_h2_mean[key], color=c, ls=ls,
+                        label=label, alpha=0.9, yerr=yerr, marker=self.hsms)
+        # relative errors on the right axes
+        axr = ax.twinx()
+        qq1 = res1.pwr_h2_mean['T_aero'].values
+        qq2 = res2.pwr_h2_mean['T_aero'].values
+        err = np.abs(1.0 - (qq1 / qq2))*100.0
+        axr.plot(wind1, err, color=self.errc, ls=self.errls, alpha=0.6,
+                 label=self.errlab)
+        ax.set_title('Thrust [kN]')
+
+        axes = self.set_axes_label_grid(axes, setlegend=True)
+#        # use axr for the legend
+#        lines = ax.lines + axr.lines
+#        labels = [l.get_label() for l in lines]
+#        leg = axr.legend(lines, labels, loc='best')
+#        leg.get_frame().set_alpha(0.5)
+
+        return fig, axes
+
+    def hs_powercurve(self, fname1, fname2, title, labels, size=(8.6, 4)):
+
+        res1 = MappingsH2HS2()
+        res1._powercurve_hs2(fname1)
+        wind1 = res1.pwr_hs['windspeed'].values
+
+        res2 = MappingsH2HS2()
+        res2._powercurve_hs2(fname2)
+        wind2 = res2.pwr_hs['windspeed'].values
+
+        fig, axes = self.new_fig(title=title, nrows=1, ncols=2, size=size)
+
+        # POWER
+        ax = axes[0]
+        key = 'P_aero'
+        ax.plot(wind1, res1.pwr_hs['P_aero'], label=labels[0],
+                alpha=0.9, color=self.h2c, ls=self.h2ls, marker=self.h2ms)
+        ax.plot(wind2, res2.pwr_hs['P_aero'], label=labels[1],
+                alpha=0.7, color=self.hsc, ls=self.hsls, marker=self.hsms)
+        ax.set_title('Power [kW]')
+        # relative errors on the right axes
+        axr = ax.twinx()
+        assert np.allclose(wind1, wind2)
+        qq1 = res1.pwr_hs[key].values
+        qq2 = res2.pwr_hs[key].values
+        err = np.abs(1.0 - qq1 / qq2)*100.0
+        axr.plot(wind1, err, color=self.errc, ls=self.errls, alpha=0.6,
+                 label=self.errlab)
+#        axr.set_ylim([])
+
+        # THRUST
+        ax = axes[1]
+        ax.plot(wind1, res1.pwr_hs['T_aero'], color=self.h2c, alpha=0.9,
+                label=labels[0], marker=self.h2ms, ls=self.h2ls)
+        ax.plot(wind2, res2.pwr_hs['T_aero'], color=self.hsc, alpha=0.7,
+                label=labels[1], marker=self.hsms, ls=self.hsls)
+        # relative errors on the right axes
+        axr = ax.twinx()
+        qq1 = res1.pwr_hs['T_aero'].values
+        qq2 = res2.pwr_hs['T_aero'].values
+        err = np.abs(1.0 - (qq1 / qq2))*100.0
+        axr.plot(wind1, err, color=self.errc, ls=self.errls, alpha=0.6,
+                 label=self.errlab)
+        ax.set_title('Thrust [kN]')
+
+        axes = self.set_axes_label_grid(axes, setlegend=True)
+#        # use axr for the legend
+#        lines = ax.lines + axr.lines
+#        labels = [l.get_label() for l in lines]
+#        leg = axr.legend(lines, labels, loc='best')
+#        leg.get_frame().set_alpha(0.5)
+
+        return fig, axes
+
+
+if __name__ == '__main__':
+
+    dummy = None
diff --git a/wetb/prepost/hawcstab2.py b/wetb/prepost/hawcstab2.py
new file mode 100644
index 0000000000000000000000000000000000000000..60240bd047c7b779467ec4d1e0c12488804f12d6
--- /dev/null
+++ b/wetb/prepost/hawcstab2.py
@@ -0,0 +1,350 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Jan 14 14:12:58 2014
+
+@author: dave
+"""
+
+from __future__ import print_function
+from __future__ import division
+import unittest
+import os
+import re
+
+import numpy as np
+import pandas as pd
+
+import mplutils
+
+
+class dummy:
+    def __init__(self):
+        pass
+
+
+def ReadFileHAWCStab2Header(fname, widths):
+    """
+    Read a file with a weird HAWCStab2 header that starts with a #, and
+    includes the column number and units between square brackets.
+    """
+
+    regex = re.compile('(\\[.*?\\])')
+
+    def _newformat(fname):
+        df = pd.read_fwf(fname, header=0, widths=[20]*15)
+        # find all units
+        units = regex.findall(''.join(df.columns))
+        df.columns = [k[:-2].replace('#', '').strip() for k in df.columns]
+        return df, units
+
+    def _oldformat(fname):
+        df = pd.read_fwf(fname, header=0, widths=[14]*13)
+        # find all units
+        units = regex.findall(''.join(df.columns))
+        df.columns = [k.replace('#', '').strip() for k in df.columns]
+        return df, units
+
+    with open(fname) as f:
+        line = f.readline()
+
+    if len(line) > 200:
+        return _newformat(fname)
+    else:
+        return _oldformat(fname)
+
+
+class InductionResults(object):
+    def __init__(self):
+        pass
+    def read(self, fname):
+        self.data = np.loadtxt(fname)
+        self.wsp = int(fname.split('_u')[-1][:-4]) / 1000.0
+        self.df_data = pd.read_fwf(fname, header=0, widths=[14]*34)
+        # sanitize the headers
+        cols = self.df_data.columns
+        self.df_data.columns = [k[:-2].replace('#', '').strip() for k in cols]
+
+
+class results(object):
+    """
+    Loading HAWCStab2 result files
+    """
+
+    def __init__(self):
+        pass
+
+    def load_pwr(self, fname):
+        pwr = np.loadtxt(fname)
+
+        res = dummy()
+
+        res.wind = pwr[:,0]
+        res.power = pwr[:,1]
+        res.thrust = pwr[:,2]
+        res.cp = pwr[:,3]
+        res.ct = pwr[:,4]
+        res.pitch_deg = pwr[:,8]
+        res.rpm = pwr[:,9]
+
+        return res
+
+    def load_pwr_df(self, fname):
+        return ReadFileHAWCStab2Header(fname, [20]*15)
+
+    def load_cmb(self, fname):
+        cmb = np.loadtxt(fname)
+        # when there is only data for one operating condition we only have one
+        # row and consequently only a 1D array
+        if len(cmb.shape) == 1:
+            cmb = cmb.reshape( (1, cmb.shape[0]) )
+        wind = cmb[:,0]
+        ii = int((cmb.shape[1]-1)/2)
+        freq = cmb[:,1:ii+1]
+        damp = cmb[:,ii+1:]
+
+        return wind, freq, damp
+
+    def load_cmb_df(self, fname):
+        # index name can be rotor speed or wind speed
+        with open(fname) as f:
+            header = f.readline()
+        oper_name = header.split('1')[0].strip().replace('#', '').lower()
+        oper_name = oper_name.replace(' ', '').replace('[', '_')[:-1]
+        oper_name = oper_name.replace('/', '')
+
+        speed, freq, damp = self.load_cmb(fname)
+        mods = freq.shape[1]
+        ops = freq.shape[0]
+
+        df = pd.DataFrame(columns=[oper_name, 'Fd_hz', 'damp_ratio', 'mode'])
+        df['Fd_hz'] = freq.flatten()
+        df['damp_ratio'] = damp.flatten()
+        # now each mode number is a row so that means that each operating
+        # point is now repeated as many times as there are modes
+        df[oper_name] = speed.repeat(mods)
+        modes = np.arange(1, mods+1, 1)
+        df['mode'] = modes.reshape((1,mods)).repeat(ops, axis=0).flatten()
+
+        return df
+
+    def load_frf(self, fname, nr_inputs=3):
+        frf = np.loadtxt(fname)
+
+        self.nr_outputs = ((frf.shape[1] - 1) / 2) / nr_inputs
+        self.nr_inputs = nr_inputs
+
+        return frf
+
+    def load_ind(self, fname):
+        self.ind = InductionResults()
+        self.ind.read(fname)
+
+    def load_operation(self, fname):
+
+        operation = np.loadtxt(fname, skiprows=1)
+        # when the array is empty, set operation to an empty DataFrame
+        if len(operation) == 0:
+            cols = ['windspeed', 'pitch_deg', 'rotorspeed_rpm']
+            self.operation = pd.DataFrame(columns=cols)
+            return
+        # when there is only one data point, the array is 1D, we allways need
+        # a 2D array otherwise the columns become rows in the DataFrame
+        elif len(operation.shape) == 1:
+            operation = operation.reshape((1, operation.shape[0]))
+        try:
+            cols = ['windspeed', 'pitch_deg', 'rotorspeed_rpm']
+            self.operation = pd.DataFrame(operation, columns=cols)
+        except ValueError:
+            cols = ['windspeed', 'pitch_deg', 'rotorspeed_rpm', 'P_aero',
+                    'T_aero']
+            self.operation = pd.DataFrame(operation, columns=cols)
+
+    def write_ae_sections_h2(self):
+        """
+        Get the aerosection positions from the HS2 ind result file and
+        write them as outputs for HAWC2
+        """
+        self.ind
+
+    def plot_pwr(self, figname, fnames, labels=[], figsize=(11,7.15), dpi=120):
+
+        results = []
+        if isinstance(fnames, list):
+            if len(fnames) > 4:
+                raise ValueError('compare up to maximum 4 HawcStab2 cases')
+            for fname in fnames:
+                results.append(self.load_pwr(fname))
+                # if the labels are not defined, take the file name
+                if len(labels) < len(fnames):
+                    labels.append(os.path.basename(fname))
+        else:
+            results.append(self.load_pwr(fname))
+
+        colors = list('krbg')
+        symbols = list('o<+x')
+        alphas = [1.0, 0.9, 0.8, 0.75]
+
+        fig, axes = mplutils.subplots(nrows=2, ncols=2, figsize=figsize,
+                                       dpi=dpi, num=0)
+        for i, res in enumerate(results):
+            ax = axes[0,0]
+            ax.plot(res.wind, res.power, color=colors[i], label=labels[i],
+                    marker=symbols[i], ls='-', alpha=alphas[i])
+            ax.set_title('Aerodynamic Power [kW]')
+
+            ax = axes[0,1]
+            ax.plot(res.wind, res.pitch_deg, color=colors[i], label=labels[i],
+                    marker=symbols[i], ls='-', alpha=alphas[i])
+            ax.set_title('Pitch [deg]')
+
+            ax = axes[1,0]
+            ax.plot(res.wind, res.thrust, color=colors[i], label=labels[i],
+                    marker=symbols[i], ls='-', alpha=alphas[i])
+            ax.set_title('Thrust [kN]')
+
+            ax = axes[1,1]
+            ax.plot(res.wind, res.cp, label='$C_p$ %s ' % labels[i], ls='-',
+                    color=colors[i], marker=symbols[i], alpha=alphas[i])
+            ax.plot(res.wind, res.ct, label='$C_t$ %s ' % labels[i], ls='--',
+                    color=colors[i], marker=symbols[i], alpha=alphas[i])
+            ax.set_title('Power and Thrust coefficients [-]')
+
+        for ax in axes.ravel():
+            ax.legend(loc='best')
+            ax.grid(True)
+            ax.set_xlim([res.wind[0], res.wind[-1]])
+        fig.tight_layout()
+
+        print('saving figure: %s ... ' % figname, end='')
+        figpath = os.path.dirname(figname)
+        if not os.path.exists(figpath):
+            os.makedirs(figpath)
+        fig.savefig(figname)
+        fig.clear()
+        print('done!')
+
+
+class hs2_control_tuning(object):
+
+    def __init__(self):
+        """
+        """
+        pass
+
+    def parse_line(self, line, controller):
+
+        split1 = line.split('=')
+        var1 = split1[0].strip()
+        try:
+            val1 = float(split1[1].split('[')[0])
+            attr = getattr(self, controller)
+            setattr(attr, var1, val1)
+
+            if len(split1) > 2:
+                var2 = split1[1].split(',')[1].strip()
+                val2 = float(split1[2].split('[')[0])
+                setattr(attr, var2, val2)
+        except IndexError:
+            pass
+
+    def read_parameters(self, fpath):
+        """
+        Read the controller tuning file
+        ===============================
+
+        """
+
+        with open(fpath, "r") as f:
+            for i, line in enumerate(f):
+                if i == 0:
+                    controller = 'pi_gen_reg1'
+                    setattr(self, controller, dummy())
+                elif i == 2:
+                    controller = 'pi_gen_reg2'
+                    setattr(self, controller, dummy())
+                elif i == 6:
+                    controller = 'pi_pitch_reg3'
+                    setattr(self, controller, dummy())
+                elif i == 10:
+                    controller = 'aero_damp'
+                    setattr(self, controller, dummy())
+                else:
+                    self.parse_line(line, controller)
+
+        # set some parameters to zero for the linear case
+        if not hasattr(self.pi_pitch_reg3, 'K2'):
+            setattr(self.pi_pitch_reg3, 'K2', 0.0)
+        if not hasattr(self.aero_damp, 'Ko2'):
+            setattr(self.aero_damp, 'Ko2', 0.0)
+
+
+class tests(unittest.TestCase):
+    """
+    """
+
+    def setUp(self):
+        self.fpath_linear = 'data/controller_input_linear.txt'
+        self.fpath_quadratic = 'data/controller_input_quadratic.txt'
+
+    def test_cmb_df(self):
+        fname1 = 'data/campbell_wind.cmb'
+        speed, freq, damp = results().load_cmb(fname1)
+
+        df = results().load_cmb_df(fname1)
+        #mods = freq.shape[1]
+        ops = freq.shape[0]
+
+        self.assertEqual(len(speed), ops)
+
+        for k in range(ops):
+            df_oper = df[df['wind_ms']==speed[k]]
+            np.testing.assert_allclose(freq[k,:], df_oper['Fd_hz'].values)
+            np.testing.assert_allclose(damp[k,:], df_oper['damp_ratio'].values)
+            np.testing.assert_allclose(np.arange(1,len(df_oper)+1), df_oper['mode'])
+            self.assertEqual(len(df_oper['wind_ms'].unique()), 1)
+            self.assertEqual(df_oper['wind_ms'].unique()[0], speed[k])
+
+    def test_linear_file(self):
+
+        hs2 = hs2_control_tuning()
+        hs2.read_parameters(self.fpath_linear)
+
+        self.assertEqual(hs2.pi_gen_reg1.K, 0.108313E+07)
+
+        self.assertEqual(hs2.pi_gen_reg2.I, 0.307683E+08)
+        self.assertEqual(hs2.pi_gen_reg2.Kp, 0.135326E+08)
+        self.assertEqual(hs2.pi_gen_reg2.Ki, 0.303671E+07)
+
+        self.assertEqual(hs2.pi_pitch_reg3.Kp, 0.276246E+01)
+        self.assertEqual(hs2.pi_pitch_reg3.Ki, 0.132935E+01)
+        self.assertEqual(hs2.pi_pitch_reg3.K1, 5.79377)
+        self.assertEqual(hs2.pi_pitch_reg3.K2, 0.0)
+
+        self.assertEqual(hs2.aero_damp.Kp2, 0.269403E+00)
+        self.assertEqual(hs2.aero_damp.Ko1, -4.21472)
+        self.assertEqual(hs2.aero_damp.Ko2, 0.0)
+
+    def test_quadratic_file(self):
+
+        hs2 = hs2_control_tuning()
+        hs2.read_parameters(self.fpath_quadratic)
+
+        self.assertEqual(hs2.pi_gen_reg1.K, 0.108313E+07)
+
+        self.assertEqual(hs2.pi_gen_reg2.I, 0.307683E+08)
+        self.assertEqual(hs2.pi_gen_reg2.Kp, 0.135326E+08)
+        self.assertEqual(hs2.pi_gen_reg2.Ki, 0.303671E+07)
+
+        self.assertEqual(hs2.pi_pitch_reg3.Kp, 0.249619E+01)
+        self.assertEqual(hs2.pi_pitch_reg3.Ki, 0.120122E+01)
+        self.assertEqual(hs2.pi_pitch_reg3.K1, 7.30949)
+        self.assertEqual(hs2.pi_pitch_reg3.K2, 1422.81187)
+
+        self.assertEqual(hs2.aero_damp.Kp2, 0.240394E-01)
+        self.assertEqual(hs2.aero_damp.Ko1, -1.69769)
+        self.assertEqual(hs2.aero_damp.Ko2, -15.02688)
+
+
+if __name__ == '__main__':
+
+    unittest.main()
diff --git a/wetb/prepost/misc.py b/wetb/prepost/misc.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d51dd8bffc0c30740eec10dba1f1633e640732b
--- /dev/null
+++ b/wetb/prepost/misc.py
@@ -0,0 +1,1141 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Thu Sep 27 11:09:04 2012
+
+Library for general stuff
+
+@author: dave
+"""
+
+from __future__ import print_function
+#print(*objects, sep=' ', end='\n', file=sys.stdout)
+import os
+import sys
+import shutil
+import unittest
+import pickle
+
+#from xlrd import open_workbook
+import numpy as np
+import scipy as sp
+from scipy import optimize as opt
+from scipy import stats
+#import scipy.interpolate
+#import scipy.ndimage
+from matplotlib import pyplot as plt
+import pandas as pd
+
+
+class Logger:
+    """The Logger class can be used to redirect standard output to a log file.
+    Usage: Create a Logger object and redirect standard output to the Logger
+    object.  For example:
+    output = Logger(file_handle, True)
+    import sys
+    sys.stdout = output
+    """
+
+    def __init__(self, logFile, echo):
+        """Arguments:
+        logFile     a file object that is available for writing
+        echo        Boolean.  If True, output is sent to standard output in
+                    addition to the log file.
+        """
+        import sys
+        self.out = sys.stdout
+        self.logFile = logFile
+        self.echo = echo
+
+    def write(self, s):
+        """Required method that replaces stdout. You don't have to call this
+        directly--all print statements will be redirected here."""
+        self.logFile.write(s)
+        if self.echo:
+            self.out.write(s)
+        self.logFile.flush()
+
+
+def print_both(f, text, end='\n'):
+    """
+    Print both to a file and the console
+    """
+    print(text)
+    if isinstance(f, file):
+        f.write(text + end)
+
+def unique(s):
+    """
+    SOURCE: http://code.activestate.com/recipes/52560/
+    AUTHOR: Tim Peters
+
+    Return a list of the elements in s, but without duplicates.
+
+    For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3],
+    unique("abcabc") some permutation of ["a", "b", "c"], and
+    unique(([1, 2], [2, 3], [1, 2])) some permutation of
+    [[2, 3], [1, 2]].
+
+    For best speed, all sequence elements should be hashable.  Then
+    unique() will usually work in linear time.
+
+    If not possible, the sequence elements should enjoy a total
+    ordering, and if list(s).sort() doesn't raise TypeError it's
+    assumed that they do enjoy a total ordering.  Then unique() will
+    usually work in O(N*log2(N)) time.
+
+    If that's not possible either, the sequence elements must support
+    equality-testing.  Then unique() will usually work in quadratic
+    time.
+    """
+
+    n = len(s)
+    if n == 0:
+        return []
+
+    # Try using a dict first, as that's the fastest and will usually
+    # work.  If it doesn't work, it will usually fail quickly, so it
+    # usually doesn't cost much to *try* it.  It requires that all the
+    # sequence elements be hashable, and support equality comparison.
+    u = {}
+    try:
+        for x in s:
+            u[x] = 1
+    except TypeError:
+        del u  # move on to the next method
+    else:
+        return u.keys()
+
+    # We can't hash all the elements.  Second fastest is to sort,
+    # which brings the equal elements together; then duplicates are
+    # easy to weed out in a single pass.
+    # NOTE:  Python's list.sort() was designed to be efficient in the
+    # presence of many duplicate elements.  This isn't true of all
+    # sort functions in all languages or libraries, so this approach
+    # is more effective in Python than it may be elsewhere.
+    try:
+        t = list(s)
+        t.sort()
+    except TypeError:
+        del t  # move on to the next method
+    else:
+        assert n > 0
+        last = t[0]
+        lasti = i = 1
+        while i < n:
+            if t[i] != last:
+                t[lasti] = last = t[i]
+                lasti += 1
+            i += 1
+        return t[:lasti]
+
+    # Brute force is all that's left.
+    u = []
+    for x in s:
+        if x not in u:
+            u.append(x)
+    return u
+
+def CoeffDeter(obs, model):
+    """
+    Coefficient of determination
+    ============================
+
+    https://en.wikipedia.org/wiki/Coefficient_of_determination
+
+    Parameters
+    ----------
+
+    obs : ndarray(n) or list
+        The observed dataset
+
+    model : ndarray(n), list or scalar
+        The fitted dataset
+
+    Returns
+    -------
+
+    R2 : float
+        The coefficient of determination, varies between 1 for a perfect fit,
+        and 0 for the worst possible fit ever
+
+    """
+
+    if type(obs).__name__ == 'list':
+        obs = np.array(obs)
+
+    SS_tot = np.sum(np.power( (obs - obs.mean()), 2 ))
+    SS_err = np.sum(np.power( (obs - model), 2 ))
+    R2 = 1 - (SS_err/SS_tot)
+
+    return R2
+
+
+def calc_sample_rate(time, rel_error=1e-4):
+    """
+    the sample rate should be constant throughout the measurement serie
+    define the maximum allowable relative error on the local sample rate
+
+    rel_error = 1e-4 # 0.0001 = 0.01%
+    """
+    deltas = np.diff(time)
+    # the sample rate should be constant throughout the measurement serie
+    # define the maximum allowable relative error on the local sample rate
+    if not (deltas.max() - deltas.min())/deltas.max() <  rel_error:
+        print('Sample rate not constant, max, min values:', end='')
+        print('%1.6f, %1.6f' % (1/deltas.max(), 1/deltas.min()))
+#        raise AssertionError
+    return 1/deltas.mean()
+
+def findIntersection(fun1, fun2, x0):
+    """
+    Find Intersection points of two functions
+    =========================================
+
+    Find the intersection between two random callable functions.
+    The other alternative is that they are not callable, but are just numpy
+    arrays describing the functions.
+
+    Parameters
+    ----------
+
+    fun1 : calable
+        Function 1, should return a scalar and have one argument
+
+    fun2 : calable
+        Function 2, should return a scalar and have one argument
+
+    x0 : float
+        Initial guess for sp.optimize.fsolve
+
+    Returns
+    -------
+
+
+
+    """
+    return sp.optimize.fsolve(lambda x : fun1(x) - fun2(x), x0)
+
+# TODO: replace this with some of the pyrain functions
+def find0(array, xi=0, yi=1, verbose=False, zerovalue=0.0):
+    """
+    Find single zero crossing
+    =========================
+
+    Find the point where a x-y dataset crosses zero. This method can only
+    handle one zero crossing point.
+
+    Parameters
+    ----------
+    array : ndarray
+        should be 2D, with a least 2 columns and 2 rows
+
+    xi : int, default=0
+        index of the x values on array[:,xi]
+
+    yi : int, default=1
+        index of the y values on array[:,yi]
+
+    zerovalue : float, default=0
+        Set tot non zero to find the corresponding crossing.
+
+    verbose : boolean, default=False
+        if True intermediate results are printed. Usefull for debugging
+
+    Returns
+    -------
+    y0 : float
+        if no x0=0 exists, the result will be an interpolation between
+        the two points around 0.
+
+    y0i : int
+        index leading to y0 in the input array. In case y0 was the
+        result of an interpolation, the result is the one closest to x0=0
+
+    """
+
+    # Determine the two points where aoa=0 lies in between
+    # take all the negative values, the maximum is the one closest to 0
+    try:
+        neg0i = np.abs(array[array[:,xi].__le__(zerovalue),xi]).argmax()
+    # This method will fail if there is no zero crossing (not enough data)
+    # in other words: does the given data range span from negative, to zero to
+    # positive?
+    except ValueError:
+        print('Given data range does not include zero crossing.')
+        return 0,0
+
+    # find the points closest to zero, sort on absolute values
+    isort = np.argsort(np.abs(array[:,xi]-zerovalue))
+    if verbose:
+        print(array[isort,:])
+    # find the points closest to zero on both ends of the axis
+    neg0i = isort[0]
+    sign = int(np.sign(array[neg0i,xi]))
+    # only search for ten points
+    for i in xrange(1,20):
+        # first time we switch sign, we have it
+        if int(np.sign(array[isort[i],xi])) is not sign:
+            pos0i = isort[i]
+            break
+
+    try:
+        pos0i
+    except NameError:
+        print('Given data range does not include zero crossing.')
+        return 0,0
+
+    # find the value closest to zero on the positive side
+#    pos0i = neg0i +1
+
+    if verbose:
+        print('0_negi, 0_posi', neg0i, pos0i)
+        print('x[neg0i], x[pos0i]', array[neg0i,xi], array[pos0i,xi])
+
+    # check if x=0 is an actual point of the series
+    if np.allclose(array[neg0i,xi], 0):
+        y0 = array[neg0i,yi]
+        if verbose:
+            prec = ' 01.08f'
+            print('y0:', format(y0, prec))
+            print('x0:', format(array[neg0i,xi], prec))
+    # check if x=0 is an actual point of the series
+    elif np.allclose(array[pos0i,xi], 0):
+        y0 = array[pos0i,yi]
+        if verbose:
+            prec = ' 01.08f'
+            print('y0:', format(y0, prec))
+            print('x0:', format(array[pos0i,xi], prec))
+    # if not very close to zero, interpollate to find the zero point
+    else:
+        y1 = array[neg0i,yi]
+        y2 = array[pos0i,yi]
+        x1 = array[neg0i,xi]
+        x2 = array[pos0i,xi]
+        y0 = (-x1*(y2-y1)/(x2-x1)) + y1
+
+        if verbose:
+            prec = ' 01.08f'
+            print('y0:', format(y0, prec))
+            print('y1, y2', format(y1, prec), format(y2, prec))
+            print('x1, x2', format(x1, prec), format(x2, prec))
+
+    # return the index closest to the value of AoA zero
+    if abs(array[neg0i,0]) > abs(array[pos0i,0]):
+        y0i = pos0i
+    else:
+        y0i = neg0i
+
+    return y0, y0i
+
+def remove_items(list, value):
+    """Remove items from list
+    The given list wil be returned withouth the items equal to value.
+    Empty ('') is allowed. So this is een extension on list.remove()
+    """
+    # remove list entries who are equal to value
+    ind_del = []
+    for i in xrange(len(list)):
+        if list[i] == value:
+            # add item at the beginning of the list
+            ind_del.insert(0, i)
+
+    # remove only when there is something to remove
+    if len(ind_del) > 0:
+        for k in ind_del:
+            del list[k]
+
+    return list
+
+class DictDB(object):
+    """
+    A dictionary based database class
+    =================================
+
+    Each tag corresponds to a row and each value holds another tag holding
+    the tables values, or for the current row the column values.
+
+    Each tag should hold a dictionary for which the subtags are the same for
+    each row entry. Otherwise you have columns appearing and dissapearing.
+    That is not how a database is expected to behave.
+    """
+
+    def __init__(self, dict_db):
+        """
+        """
+        # TODO: data checks to see if the dict can qualify as a database
+        # in this context
+
+        self.dict_db = dict_db
+
+    def search(self, dict_search):
+        """
+        Search a dictionary based database
+        ==================================
+
+        Searching on based keys having a certain value.
+
+        Parameters
+        ----------
+
+        search_dict : dictionary
+            Keys are the column names. If the values match the ones in the
+            database, the respective row gets selected. Each tag is hence
+            a unique row identifier. In case the value is a list (or it will
+            be faster if it is a set), all the list entries are considered as
+            a go.
+        """
+        self.dict_sel = dict()
+
+        # browse through all the rows
+        for row in self.dict_db:
+            # and for each search value, check if the row holds the requested
+            # column value
+            init = True
+            alltrue = True
+            for col_search, val_search in dict_search.items():
+                # for backwards compatibility, convert val_search to list
+                if not type(val_search).__name__ in ['set', 'list']:
+                    # conversion to set is more costly than what you gain
+                    # by target in set([]) compared to target in []
+                    # conclusion: keep it as a list
+                    val_search = [val_search]
+
+                # all items should be true
+                # if the key doesn't exists, it is not to be considered
+                try:
+                    if self.dict_db[row][col_search] in val_search:
+                        if init or alltrue:
+                            alltrue = True
+                    else:
+                        alltrue = False
+                except KeyError:
+                    alltrue = False
+                init = False
+            # all search criteria match, save the row
+            if alltrue:
+                self.dict_sel[row] = self.dict_db[row]
+
+    # TODO: merge with search into a more general search/select method?
+    # shouldn't I be moving to a proper database with queries?
+    def search_key(self, dict_search):
+        """
+        Search for a string in dictionary keys
+        ======================================
+
+        Searching based on the key of the dictionaries, not the values
+
+        Parameters
+        ----------
+
+        searchdict : dict
+            As key the search string, as value the operator: True for inclusive
+            and False for exclusive. Operator is AND.
+
+        """
+
+        self.dict_sel = dict()
+
+        # browse through all the rows
+        for row in self.dict_db:
+            # and see for each row if its name contains the search strings
+            init = True
+            alltrue = True
+            for col_search, inc_exc in dict_search.iteritems():
+                # is it inclusive the search string or exclusive?
+                if (row.find(col_search) > -1) == inc_exc:
+                    if init:
+                        alltrue = True
+                else:
+                    alltrue = False
+                    break
+                init = False
+            # all search criteria matched, save the row
+            if alltrue:
+                self.dict_sel[row] = self.dict_db[row]
+
+class DictDiff(object):
+    """
+    Calculate the difference between two dictionaries as:
+    (1) items added
+    (2) items removed
+    (3) keys same in both but changed values
+    (4) keys same in both and unchanged values
+
+    Source
+    ------
+
+    Basic idea of the magic is based on following stackoverflow question
+    http://stackoverflow.com/questions/1165352/
+    fast-comparison-between-two-python-dictionary
+    """
+    def __init__(self, current_dict, past_dict):
+        self.current_d = current_dict
+        self.past_d    = past_dict
+        self.set_current  = set(current_dict.keys())
+        self.set_past     = set(past_dict.keys())
+        self.intersect    = self.set_current.intersection(self.set_past)
+    def added(self):
+        return self.set_current - self.intersect
+    def removed(self):
+        return self.set_past - self.intersect
+    def changed(self):
+        #set(o for o in self.intersect if self.past_d[o] != self.current_d[o])
+        # which is the  similar (exept for the extension) as below
+        olist = []
+        for o in self.intersect:
+            # if we have a numpy array
+            if type(self.past_d[o]).__name__ == 'ndarray':
+                if not np.allclose(self.past_d[o], self.current_d[o]):
+                    olist.append(o)
+            elif self.past_d[o] != self.current_d[o]:
+                olist.append(o)
+        return set(olist)
+
+    def unchanged(self):
+        t=set(o for o in self.intersect if self.past_d[o] == self.current_d[o])
+        return t
+
+def fit_exp(time, data, checkplot=True, method='linear', func=None, C0=0.0):
+    """
+    Note that all values in data have to be possitive for this method to work!
+    """
+
+    def fit_exp_linear(t, y, C=0):
+        y = y - C
+        y = np.log(y)
+        K, A_log = np.polyfit(t, y, 1)
+        A = np.exp(A_log)
+        return A, K
+
+    def fit_exp_nonlinear(t, y):
+        # The model function, f(x, ...). It must take the independent variable
+        # as the first argument and the parameters to fit as separate remaining
+        # arguments.
+        opt_parms, parm_cov = sp.optimize.curve_fit(model_func,t,y)
+        A, K, C = opt_parms
+        return A, K, C
+
+    def model_func(t, A, K, C):
+        return A * np.exp(K * t) + C
+
+    # Linear fit
+    if method == 'linear':
+#        if data.min() < 0.0:
+#            msg = 'Linear exponential fitting only works for positive values'
+#            raise ValueError, msg
+        A, K = fit_exp_linear(time, data, C=C0)
+        fit = model_func(time, A, K, C0)
+        C = C0
+
+    # Non-linear Fit
+    elif method == 'nonlinear':
+        A, K, C = fit_exp_nonlinear(time, data)
+        fit = model_func(time, A, K, C)
+
+    if checkplot:
+        plt.figure()
+        plt.plot(time, data, 'ro', label='data')
+        plt.plot(time, fit, 'b', label=method)
+        plt.legend(bbox_to_anchor=(0.9, 1.1), ncol=2)
+        plt.grid()
+
+    return fit, A, K, C
+
+def curve_fit_exp(time, data, checkplot=True, weights=None):
+    """
+    This code is based on a StackOverflow question/answer:
+    http://stackoverflow.com/questions/3938042/
+    fitting-exponential-decay-with-no-initial-guessing
+
+    A*e**(K*t) + C
+    """
+
+    def fit_exp_linear(t, y, C=0):
+        y = y - C
+        y = np.log(y)
+        K, A_log = np.polyfit(t, y, 1)
+        A = np.exp(A_log)
+        return A, K
+
+    def fit_exp_nonlinear(t, y):
+        # The model function, f(x, ...). It must take the independent variable
+        # as the first argument and the parameters to fit as separate remaining
+        # arguments.
+        opt_parms, parm_cov = sp.optimize.curve_fit(model_func,t,y)
+        A, K, C = opt_parms
+        return A, K, C
+
+    def model_func(t, A, K, C):
+        return A * np.exp(K * t) + C
+
+    C0 = 0
+
+    ## Actual parameters
+    #A0, K0, C0 = 2.5, -4.0, 0.0
+    ## Generate some data based on these
+    #tmin, tmax = 0, 0.5
+    #num = 20
+    #t = np.linspace(tmin, tmax, num)
+    #y = model_func(t, A0, K0, C0)
+    ## Add noise
+    #noisy_y = y + 0.5 * (np.random.random(num) - 0.5)
+
+    # Linear fit
+    A_lin, K_lin = fit_exp_linear(time, data, C=C0)
+    fit_lin = model_func(time, A_lin, K_lin, C0)
+
+    # Non-linear Fit
+    A_nonlin, K_nonlin, C = fit_exp_nonlinear(time, data)
+    fit_nonlin = model_func(time, A_nonlin, K_nonlin, C)
+
+    # and plot
+    if checkplot:
+        plt.figure()
+        plt.plot(time, data, 'ro', label='data')
+        plt.plot(time, fit_lin, 'b', label='linear')
+        plt.plot(time[::-1], fit_nonlin, 'g', label='nonlinear')
+        plt.legend(bbox_to_anchor=(0.9, 1.0), ncol=3)
+        plt.grid()
+
+    return
+
+def convert_to_utf8(filename):
+    # gather the encodings you think that the file may be
+    # encoded inside a tuple
+    encodings = ('windows-1253', 'iso-8859-7', 'macgreek')
+
+    # try to open the file and exit if some IOError occurs
+    try:
+        f = open(filename, 'r').read()
+    except Exception:
+        sys.exit(1)
+
+    # now start iterating in our encodings tuple and try to
+    # decode the file
+    for enc in encodings:
+        try:
+            # try to decode the file with the first encoding
+            # from the tuple.
+            # if it succeeds then it will reach break, so we
+            # will be out of the loop (something we want on
+            # success).
+            # the data variable will hold our decoded text
+            data = f.decode(enc)
+            break
+        except Exception:
+            # if the first encoding fail, then with the continue
+            # keyword will start again with the second encoding
+            # from the tuple an so on.... until it succeeds.
+            # if for some reason it reaches the last encoding of
+            # our tuple without success, then exit the program.
+            if enc == encodings[-1]:
+                sys.exit(1)
+            continue
+
+    # now get the absolute path of our filename and append .bak
+    # to the end of it (for our backup file)
+    fpath = os.path.abspath(filename)
+    newfilename = fpath + '.bak'
+    # and make our backup file with shutil
+    shutil.copy(filename, newfilename)
+
+    # and at last convert it to utf-8
+    f = open(filename, 'w')
+    try:
+        f.write(data.encode('utf-8'))
+    except Exception(e):
+        print(e)
+    finally:
+        f.close()
+
+def to_lower_case(proot):
+    """
+    Rename all the files in the subfolders of proot to lower case, and
+    also the subfolder name when it the folder name starts with DLC
+    """
+    # find all dlc defintions in the subfolders
+    for root, dirs, files in os.walk(proot):
+        for fname in files:
+            orig = os.path.join(root, fname)
+            rename = os.path.join(root, fname.lower())
+            os.rename(orig, rename)
+        base = root.split(os.path.sep)[-1]
+        if base[:3] == 'DLC':
+            new = root.replace(base, base.lower())
+            os.rename(root, new)
+
+def read_excel_files(proot, fext='xlsx', pignore=None, sheet=0,
+                     pinclude=None):
+    """
+    Read recursively all MS Excel files with extension "fext". Only the
+    default name for the first sheet (Sheet1) of the Excel file is considered.
+
+    Parameters
+    ----------
+
+    proot : string
+        Path that will be recursively explored for the presence of files
+        that have file extension "fext"
+
+    fext : string, default='xlsx'
+        File extension of the Excel files that should be loaded
+
+    pignore : string, default=None
+        Specify which string can not occur in the full path of the DLC target.
+
+    pinclude : string, default=None
+        Specify which string has to occur in the full path of the DLC target.
+
+    sheet : string or int, default=0
+        Name or index of the Excel sheet to be considered. By default, the
+        first sheet (index=0) is taken.
+
+    Returns
+    -------
+
+    df_list : list
+        A list of pandas DataFrames. Each DataFrame corresponds to the
+        contents of a single Excel file that was found in proot or one of
+        its sub-directories
+
+    """
+
+    df_list = {}
+    # find all dlc defintions in the subfolders
+    for root, dirs, files in os.walk(proot):
+        for file_name in files:
+            if not file_name.split('.')[-1] == fext:
+                continue
+            f_target = os.path.join(root, file_name)
+            # if it does not contain pinclude, ignore the dlc
+            if pinclude is not None and f_target.find(pinclude) < 0:
+                continue
+            # if it does contain pignore, ingore the dlc
+            if pignore is not None and f_target.find(pignore) > -1:
+                continue
+            print(f_target, end='')
+            try:
+                xl = pd.ExcelFile(f_target)
+                df = xl.parse(sheet)
+                df_list[f_target.replace('.'+fext, '')] = df
+                print(': sucesfully included %i case(s)' % len(df))
+            except:
+                print('     XXXXX ERROR COULD NOT READ')
+
+    return df_list
+
+def convert_xlsx2csv(fpath, sheet='Sheet1', fext='xlsx'):
+    """
+    Convert xlsx load case definitions to csv so we can track them with git
+    """
+
+    for root, dirs, files in os.walk(fpath):
+        for file_name in files:
+            if not file_name.split('.')[-1] == fext:
+                continue
+            fxlsx = os.path.join(root, file_name)
+            print(fxlsx)
+            xl = pd.ExcelFile(fxlsx)
+            df = xl.parse(sheet)
+            fcsv = fxlsx.replace(fext, 'csv')
+            df.to_csv(fcsv, sep=';')
+
+def check_df_dict(df_dict):
+    """
+    Verify if the dictionary that needs to be transferred to a Pandas DataFrame
+    makes sense
+    """
+    collens = {}
+    for col, values in df_dict.iteritems():
+        print('%6i : %s' % (len(values), col))
+        collens[col] = len(values)
+    return collens
+
+
+def find_tags(fname):
+    """
+    Find all unqiue tags in a text file.
+    """
+    pass
+
+
+def read_mathematica_3darray(fname, shape=None, data=None, dtype=None):
+    """
+    I am not sure with which Mathematica command you generate this data,
+    but this is the format in which I got it.
+
+    Parameters
+    ----------
+
+    fname : str
+
+    shape : tuple, default=None
+        Tuple with 3 elements, defining the ndarray elements for each of the
+        axes. Only used when data is set to None.
+
+    dtype : dtype, default=None
+        Is used to set the data dtype when data=None.
+
+    data : ndarray, default=None
+        When None, the data array is created according to shape and dtype.
+
+    Returns
+    -------
+
+    data : ndarray
+
+    """
+
+    if data is None:
+        data = np.ndarray(shape, dtype=dtype)
+    else:
+        dtype = data.dtype
+    with open(fname, 'r') as f:
+        for i, line in enumerate(f.readlines()):
+            els = line.split('}","{')
+            for j, row in enumerate(els):
+                row_ = row.replace('{', '').replace('}', '').replace('"', '')
+                data[i,j,:] = np.genfromtxt(row_.split(', '), dtype=dtype)
+
+    return data
+
+
+def CDF(series, sort=True):
+    """
+    Cumulative distribution function
+    ================================
+
+    Cumulative distribution function of the form:
+
+    .. math::
+        CDF(i) = \\frac{i-0.3}{N - 0.9}
+
+    where
+        i : the index of the sorted item in the series
+        N : total number of elements in the serie
+    Series will be sorted first.
+
+    Parameters
+    ----------
+    series : ndarra(N)
+
+    sort  : bool, default=True
+        to sort or not to sort
+
+    Returns
+    -------
+    cdf : ndarray (N,2)
+        Array with the sorted input series on the first column
+        and the cumulative distribution function on the second.
+
+    """
+
+    N = len(series)
+    # column array
+    i_range = np.arange(N)
+    # convert to row array
+    x, i_range = np.meshgrid([1], i_range)
+    # to sort or not to sort the input series
+    if sort:
+        series.sort(axis=0)
+    # convert to row array. Do after sort, otherwise again 1D column array
+    x, series = np.meshgrid([1], series)
+    # cdf array
+    cdf = sp.zeros((N,2))
+    # calculate the actual cdf values
+    cdf[:,1] = (i_range[:,0]-0.3)/(float(N)-0.9)
+    # make sure it is sorted from small to large
+    if abs(series[0,0]) > abs(series[series.shape[0]-1,0]) and series[0,0] < 0:
+        # save in new variable, otherwise things go wrong!!
+        # if we do series[:,0] = series[::-1,0], we get somekind of mirrord
+        # array
+        series2 = series[::-1,0]
+    # x-channel should be on zero for plotting
+    cdf[:,0] = series2[:]
+
+    return cdf
+
+
+def rebin(hist, bins, nrbins):
+    """
+    Assume within a bin, the values are equally distributed. Only works for
+    equally spaced bins.
+    """
+
+    binrange = float(bins[-1] - bins[0])
+    width = np.diff(bins).mean()
+    width_ = binrange / float(nrbins)
+    hist_ = sp.zeros((nrbins))
+    bins_ = np.linspace(bins[0], bins[-1], num=nrbins+1)
+
+    if width_ < width:
+        raise(ValueError, 'you can only rebin to larger bins')
+
+    if not len(hist)+1 == len(bins):
+        raise(ValueError, 'bins should contain the bin edges')
+
+    window, j = width, 0
+#    print('width:', width)
+#    print('j=0')
+    for i, k in enumerate(hist):
+        if window < width_:
+            hist_[j] += hist[i]#*width
+#            print('window=%1.04f' % window, end=' ')
+#            print('(%02i):%1.04f' % (i, hist[i]))
+            window += width
+            if i+1 == len(hist):
+                print()
+        else:
+            w_right = (window - width_) / width
+            w_left = (width - (window - width_)) / width
+            hist_[j] += hist[i]*w_left
+#            print('window=%1.04f' % window, end=' ')
+#            print('(%02i):%1.04f*(%1.02f)' % (i, hist[i], w_left), end=' ')
+#            print('T: %1.04f' % hist_[j])
+            if j+1 >= nrbins:
+                hist_[j] += hist[i]*w_right
+                print('')
+                return hist_, bins_
+            j += 1
+#            print('j=%i' % j)
+#            print('window=%1.04f' % window, end=' ')
+            hist_[j] += hist[i]*w_right
+            window = w_right*width + width
+#            print('(%02i):%1.04f*(%1.02f)' % (i, hist[i], w_right))
+#    print('')
+    return hist_, bins_
+
+
+def histfit(hist, bin_edges, xnew):
+    """
+    This should be similar to the Matlab function histfit:
+    http://se.mathworks.com/help/stats/histfit.html
+
+    Based on:
+    http://nbviewer.ipython.org/url/xweb.geos.ed.ac.uk/~jsteven5/blog/
+    fitting_distributions_from_percentiles.ipynb
+
+    Parameters
+    ----------
+
+    hist : ndarray(n)
+
+    bin_edges : ndarray(n+1)
+
+    xnew : ndarray(k)
+
+    Returns
+    -------
+
+    shape_out
+
+    scale_out
+
+    pdf_fit : ndarray(k)
+
+    """
+
+    # Take the upper edges of the bins. I tried to use the center of the bin
+    # and the left bin edges, but it works best with the right edges
+    # It only works ok with x data is positive, force only positive x-data
+    x_hist = (bin_edges - bin_edges[0])[1:]
+    y_hist = hist.cumsum()/hist.cumsum().max()  # Normalise the cumulative sum
+
+    # FIT THE DISTRIBUTION
+    (shape_out, scale_out), pcov = opt.curve_fit(
+                lambda xdata, shape, scale: stats.lognorm.cdf(xdata, shape,
+                loc=0, scale=scale), x_hist, y_hist)
+
+    pdf_fit =  stats.lognorm.pdf(xnew, shape_out, loc=0, scale=scale_out)
+    # normalize
+    width = np.diff(x_hist).mean()
+    pdf_fit = pdf_fit / (pdf_fit * width).sum()
+    return shape_out, scale_out, pdf_fit
+
+
+def df_dict_check_datatypes(df_dict):
+    """
+    there might be a mix of strings and numbers now, see if we can have
+    the same data type throughout a column
+    nasty hack: because of the unicode -> string conversion we might not
+    overwrite the same key in the dict.
+    """
+    # FIXME: this approach will result in twice the memory useage though...
+    # we can not pop/delete items from a dict while iterating over it
+    df_dict2 = {}
+    for colkey, col in df_dict.iteritems():
+        # if we have a list, convert to string
+        if type(col[0]).__name__ == 'list':
+            for ii, item in enumerate(col):
+                col[ii] = '**'.join(item)
+        # if we already have an array (statistics) or a list of numbers
+        # do not try to cast into another data type, because downcasting
+        # in that case will not raise any exception
+        elif type(col[0]).__name__[:3] in ['flo', 'int', 'nda']:
+            df_dict2[str(colkey)] = np.array(col)
+            continue
+        # in case we have unicodes instead of strings, we need to convert
+        # to strings otherwise the saved .h5 file will have pickled elements
+        try:
+            df_dict2[str(colkey)] = np.array(col, dtype=np.int32)
+        except OverflowError:
+            try:
+                df_dict2[str(colkey)] = np.array(col, dtype=np.int64)
+            except OverflowError:
+                df_dict2[str(colkey)] = np.array(col, dtype=np.float64)
+        except ValueError:
+            try:
+                df_dict2[str(colkey)] = np.array(col, dtype=np.float64)
+            except ValueError:
+                df_dict2[str(colkey)] = np.array(col, dtype=np.str)
+        except TypeError:
+            # in all other cases, make sure we have converted them to
+            # strings and NOT unicode
+            df_dict2[str(colkey)] = np.array(col, dtype=np.str)
+        except Exception as e:
+            print('failed to convert column %s to single data type' % colkey)
+            raise(e)
+    return df_dict2
+
+
+def dict2df(df_dict, fname, save=True, update=False, csv=False, colsort=None,
+            check_datatypes=False, rowsort=None, csv_index=False):
+        """
+        Convert the df_dict to df and save/update if required. If converting
+        to df fails, pickle the object. Optionally save as csv too.
+
+        Parameters
+        ----------
+
+        df_dict : dict
+            Dictionary that will be converted to a DataFrame
+
+        fname : str
+            File name excluding the extension. .pkl, .h5 and/or .csv will be
+            added.
+        """
+        if check_datatypes:
+            df_dict = df_dict_check_datatypes(df_dict)
+
+        # in case converting to dataframe fails, fall back
+        try:
+            if colsort is not None:
+                dfs = pd.DataFrame(df_dict)[colsort]
+#                try:
+#                    dfs = dfs[colsort]
+#                except KeyError as e:
+#                    print('Re-ordering the columns failed. colsort cols are:')
+#                    print(sorted(colsort))
+#                    print('Actual columns:')
+#                    print(sorted(dfs.keys()))
+#                    print('&', set(colsort) & set(dfs.keys()))
+#                    print('-', set(colsort) - set(dfs.keys()))
+#                    raise e
+            else:
+                dfs = pd.DataFrame(df_dict)
+        except Exception as e:
+            print('failed to convert to data frame', end='')
+            if fname is not None:
+                with open(fname + '.pkl', 'wb') as f:
+                    pickle.dump(df_dict, f, protocol=2)
+                print(', saved as dict')
+            else:
+                print('')
+            print('df_dict has following columns and corresponding nr of rows')
+            check_df_dict(df_dict)
+            raise(e)
+
+        if rowsort is not None:
+            dfs.sort(columns=rowsort, inplace=True)
+
+#        # apply categoricals to objects: reduce in memory footprint. In theory
+#        # when using compression on a saved hdf5 object, this shouldn't make
+#        # any difference.
+#        for column_name, column_dtype in dfs.dtypes.iteritems():
+#            # applying categoricals mostly makes sense for objects
+#            # we ignore all others
+#            if column_dtype.name == 'object':
+#                dfs[column_name] = dfs[column_name].astype('category')
+
+        # and save/update the statistics database
+        if save and fname is not None:
+            if update:
+                print('updating: %s ...' % (fname), end='')
+                try:
+                    dfs.to_hdf('%s.h5' % fname, 'table', mode='r+', append=True,
+                               format='table', complevel=9, complib='blosc')
+                except IOError:
+                    print('Can not update, file does not exist. Saving instead'
+                          '...', end='')
+                    dfs.to_hdf('%s.h5' % fname, 'table', mode='w',
+                               format='table', complevel=9, complib='blosc')
+            else:
+                print('saving: %s ...' % (fname), end='')
+                if csv:
+                    dfs.to_csv('%s.csv' % fname, index=csv_index)
+                dfs.to_hdf('%s.h5' % fname, 'table', mode='w',
+                           format='table', complevel=9, complib='blosc')
+
+            print('DONE!!\n')
+
+        return dfs
+
+
+class Tests(unittest.TestCase):
+
+    def setUp(self):
+        pass
+
+    def test_rebin1(self):
+        hist = np.array([2,5,5,9,2,6])
+        bins = np.arange(7)
+        nrbins = 3
+        hist_, bins_ = rebin(hist, bins, nrbins)
+        answer = np.array([7, 14, 8])
+        self.assertTrue(np.allclose(answer, hist_))
+        binanswer = np.array([0.0, 2.0, 4.0, 6.0])
+        self.assertTrue(np.allclose(binanswer, bins_))
+
+    def test_rebin2(self):
+        hist = np.array([2,5,5,9,2,6])
+        bins = np.arange(7)
+        nrbins = 1
+        hist_, bins_ = rebin(hist, bins, nrbins)
+        answer = np.array([hist.sum()])
+        self.assertTrue(np.allclose(answer, hist_))
+        binanswer = np.array([bins[0],bins[-1]])
+        self.assertTrue(np.allclose(binanswer, bins_))
+
+    def test_rebin3(self):
+        hist = np.array([1,1,1])
+        bins = np.arange(4)
+        nrbins = 2
+        hist_, bins_ = rebin(hist, bins, nrbins)
+        answer = np.array([1.5, 1.5])
+        self.assertTrue(np.allclose(answer, hist_))
+        binanswer = np.array([0, 1.5, 3.0])
+        self.assertTrue(np.allclose(binanswer, bins_))
+
+    def test_rebin4(self):
+        hist = np.array([1,1,1])
+        bins = np.arange(2, 14, 3)
+        nrbins = 2
+        hist_, bins_ = rebin(hist, bins, nrbins)
+        answer = np.array([1.5, 1.5])
+        self.assertTrue(np.allclose(answer, hist_))
+        binanswer = np.array([2, 6.5, 11.0])
+        self.assertTrue(np.allclose(binanswer, bins_))
+
+    def test_rebin5(self):
+        hist = np.array([1,4,2,5,6,11,9,10,8,0.5])
+        bins = np.linspace(-2, 10, 11)
+        nrbins = 8
+        hist_, bins_ = rebin(hist, bins, nrbins)
+        answer = np.array([2, 4, 4.75, 7.25, 13.25, 11.75, 11, 2.5])
+        self.assertTrue(np.allclose(answer, hist_))
+        binanswer = np.array([-2., -0.5, 1., 2.5, 4., 5.5, 7., 8.5, 10.0])
+        self.assertTrue(np.allclose(binanswer, bins_))
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/wetb/prepost/mplutils.py b/wetb/prepost/mplutils.py
new file mode 100644
index 0000000000000000000000000000000000000000..26bc47d531f9228482d12d10dfe8601278ed2690
--- /dev/null
+++ b/wetb/prepost/mplutils.py
@@ -0,0 +1,337 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Wed Nov 23 11:22:50 2011
+
+@author: dave
+"""
+
+from __future__ import division
+from __future__ import print_function
+
+# external libraries
+import numpy as np
+
+import matplotlib as mpl
+# use a headless backend
+from matplotlib.backends.backend_agg import FigureCanvasAgg as FigCanvas
+import wafo
+
+
+def make_fig(nrows=1, ncols=1, figsize=(12,8), dpi=120):
+    """
+
+    Equivalent function of pyplot.subplots(). The difference is that this one
+    is not interactive and is used with backend plotting only.
+
+    Parameters
+    ----------
+    nrows=1, ncols=1, figsize=(12,8), dpi=120
+
+    Returns
+    -------
+    fig, canvas, axes
+
+
+    """
+    return subplots(nrows=nrows, ncols=ncols, figsize=figsize, dpi=dpi)
+
+
+def subplots(nrows=1, ncols=1, figsize=(12,8), dpi=120, num=0):
+    """
+
+    Equivalent function of pyplot.subplots(). The difference is that this one
+    is not interactive and is used with backend plotting only.
+
+    Parameters
+    ----------
+    nrows=1, ncols=1, figsize=(12,8), dpi=120
+
+    num : dummy variable for compatibility
+
+    Returns
+    -------
+    fig, axes
+
+
+    """
+
+    fig = mpl.figure.Figure(figsize=figsize, dpi=dpi)
+    canvas = FigCanvas(fig)
+    fig.set_canvas(canvas)
+    axes = np.ndarray((nrows, ncols), dtype=np.object)
+    plt_nr = 1
+    for row in range(nrows):
+        for col in range(ncols):
+            axes[row,col] = fig.add_subplot(nrows, ncols, plt_nr)
+            plt_nr += 1
+    return fig, axes
+
+
+def match_axis_ticks(ax1, ax2, ax1_format=None, ax2_format=None):
+    """
+    Match ticks of ax2 to ax1
+    =========================
+
+    ax1_format: '%1.1f'
+
+    Parameters
+    ----------
+
+    ax1, ax2
+
+    Returns
+    -------
+
+    ax1, ax2
+
+    """
+    # match the ticks of ax2 to ax1
+    yticks1 = len(ax1.get_yticks())
+    ylim2 = ax2.get_ylim()
+    yticks2 = np.linspace(ylim2[0], ylim2[1], num=yticks1).tolist()
+    ax2.yaxis.set_ticks(yticks2)
+
+    # give the tick labels a given precision
+    if ax1_format:
+        majorFormatter = mpl.ticker.FormatStrFormatter(ax1_format)
+        ax1.yaxis.set_major_formatter(majorFormatter)
+
+    if ax2_format:
+        majorFormatter = mpl.ticker.FormatStrFormatter(ax2_format)
+        ax2.yaxis.set_major_formatter(majorFormatter)
+
+    return ax1, ax2
+
+
+def one_legend(*args, **kwargs):
+    # or more general: not only simple line plots (bars, hist, ...)
+    objs = []
+    for ax in args:
+        objs += ax.get_legend_handles_labels()[0]
+#    objs = [ax.get_legend_handles_labels()[0] for ax in args]
+    labels = [obj.get_label() for obj in objs]
+    # place the legend on the last axes
+    leg = ax.legend(objs, labels, **kwargs)
+    return leg
+
+
+def p4psd(ax, rpm_mean, p_max=17, y_pos_rel=0.25, color='g', ls='--',
+          col_text='w'):
+    """
+    Add the P's on a PSD
+
+    fn_max is the maximum value on the plot (ax.xlim). This only works when
+    setting the xlim of the plot before calling p4psd.
+
+    Parameters
+    ----------
+
+    ax
+
+    rpm_mean
+
+    p_max : int, default=17
+
+    y_pos_rel : int or list, default=0.25
+    """
+    if isinstance(y_pos_rel, float) or isinstance(y_pos_rel, int):
+        y_pos_rel = [y_pos_rel]*p_max
+
+    f_min = ax.get_xlim()[0]
+    f_max = ax.get_xlim()[1]
+
+    # add the P's
+    bbox = dict(boxstyle="round", edgecolor=color, facecolor=color)
+    for i, p in enumerate(range(1, p_max)):
+        p_freq = p * rpm_mean / 60.0
+        if p_freq > f_max:
+            break
+        if p%3 == 0:
+            alpha=0.5
+            ax.axvline(x=p_freq, linewidth=1, color=color, alpha=0.7, ls=ls)
+        else:
+            alpha = 0.2
+            ax.axvline(x=p_freq, linewidth=1, color=color, alpha=0.7, ls=ls)
+
+        x = (p_freq - f_min) / (f_max - f_min)
+        y = y_pos_rel[i]
+
+        p_str = '%iP' % p
+        bbox['alpha'] = alpha
+        ax.text(x, y, p_str, fontsize=8, verticalalignment='bottom',
+                horizontalalignment='center', bbox=bbox, color=col_text,
+                transform=ax.transAxes)
+
+    return ax
+
+
+def peaks(ax, freqs, Pxx, fn_max, min_h, nr_peaks=15, col_line='k',
+          ypos_mean=0.14, col_text='w', ypos_delta=0.06, bbox_alpha=0.5):
+    """
+    indicate the peaks
+    """
+    i_fn_max = np.abs(freqs - fn_max).argmin()
+    # ignore everything above fn_max
+    freqs = freqs[:i_fn_max]
+    Pxx = Pxx[:i_fn_max]
+    Pxx_log = 10.*np.log10(Pxx)
+    try:
+        pi = wafo.misc.findpeaks(Pxx_log, n=len(Pxx), min_h=min_h)
+        print('len Pxx', len(Pxx_log), 'nr of peaks:', len(pi))
+    except Exception as e:
+        print('len Pxx', len(Pxx_log))
+        print('*** wafo.misc.findpeaks FAILED ***')
+        print(e)
+        return ax
+
+    # only take the nr_peaks most significant heights
+    pi = pi[:nr_peaks]
+    # and sort them accoriding to frequency (or index)
+    pi.sort()
+
+    # mark the peaks with a circle
+#    ax.plot(freqs[pi], Pxx[:xlim][pi], 'o')
+    # and mark all peaks
+    switch = True
+    yrange_plot = Pxx_log.max() - Pxx_log.min()
+    for peak_nr, ii in enumerate(pi):
+        freq_peak = freqs[ii]
+#        Pxx_peak = Pxx_log[ii]
+        # take the average frequency and plot vertical line
+        ax.axvline(x=freq_peak, linewidth=1, color=col_line, alpha=0.6)
+        # and the value in a text box
+        textbox = '%2.2f' % freq_peak
+        if switch:
+            # locate at the min value (down the plot), but a little
+            # lower so it does not interfere with the plot itself
+            # if ax.set_yscale('log') True, set log values as coordinates!
+            text_ypos = Pxx_log.min() + yrange_plot*0.1
+            text_ypos = ypos_mean + ypos_delta
+            switch = False
+        else:
+            # put it a little lower than the max value so it does
+            # not mess with the title (up the plot)
+            text_ypos = Pxx_log.min() - yrange_plot*0.4
+            text_ypos = ypos_mean - ypos_delta
+            switch = True
+#        print('%2.2e %2.2e %2.2e' % (yrange_plot, Pxx[:xlim].max(), Pxx[:xlim].min())
+#        print peak, text_ypos
+#        print textbox
+#        print yrange_plot
+        xrel = freq_peak/fn_max
+        ax.text(xrel, text_ypos, textbox, fontsize=10, transform=ax.transAxes,
+                 va='bottom', color=col_text, bbox=dict(boxstyle="round",
+                 ec=col_line, fc=col_line, alpha=bbox_alpha,))
+
+    return ax
+
+
+def match_yticks(ax1, ax2, nr_ticks_forced=None, extend=False):
+    """
+    """
+
+    if nr_ticks_forced is None:
+        nr_yticks1 = len(ax1.get_yticks())
+    else:
+        nr_yticks1 = nr_ticks_forced
+        ylim1 = ax1.get_ylim()
+        yticks1 = np.linspace(ylim1[0], ylim1[1], num=nr_yticks1).tolist()
+        ax1.yaxis.set_ticks(yticks1)
+
+    ylim2 = ax2.get_ylim()
+    yticks2 = np.linspace(ylim2[0], ylim2[1], num=nr_yticks1).tolist()
+    ax2.yaxis.set_ticks(yticks2)
+
+    if extend:
+        offset1 = (ylim1[1] - ylim1[0])*0.1
+        ax1.set_ylim(ylim1[0]-offset1, ylim1[1]+offset1)
+        offset2 = (ylim2[1] - ylim2[0])*0.1
+        ax2.set_ylim(ylim2[0]-offset2, ylim2[1]+offset2)
+
+    return ax1, ax2
+
+
+def time_psd(results, labels, axes, alphas=[1.0, 0.7], colors=['k-', 'r-'],
+             NFFT=None, res_param=250, f0=0, f1=None, nr_peaks=10, min_h=15,
+             mark_peaks=True):
+    """
+    Plot time series and the corresponding PSD of the channel.
+
+    The frequency range depends on the sample rate: fn_max = sps/2.
+    The number of points is: NFFT/2
+    Consequently, the frequency resolution is: NFFT/SPS (points/Hz)
+
+    len(data) > NFFT otherwise the results do not make any sense
+
+    res_param = NFFT*frequency_range*sps
+    good range for nondim_resolution is: 200-300
+
+    Frequency range is based on f0 and f1
+
+    for the PSD powers of 2 are faster, but the difference in performance with
+    any other random number fluctuates on average and is not that relevant.
+
+    Requires two axes!
+
+    With the PSD peaks this only works nicely for 2 results
+
+    Parameters
+    ----------
+
+    results : list
+        list of time,data pairs (ndarrays)
+    """
+
+    axes = axes.ravel()
+    ypos = [0.04, 0.90]
+
+    for i, res in enumerate(results):
+        time, data = res
+        label = labels[i]
+        col = colors[i]
+        alpha = alphas[i]
+        sps = int(round(1.0/np.diff(time).mean(), 0))
+        if f1 is None:
+            f1 = sps/2.0
+
+        if NFFT is None:
+            nfft = int(round(res_param * sps / (f1-f0), 0))
+        elif isinstance(NFFT, list):
+            nfft = NFFT[i]
+        else:
+            nfft = NFFT
+        if nfft > len(data):
+            nfft = len(data)
+
+        # calculate the PSD
+        Pxx, freqs = mpl.mlab.psd(data, NFFT=nfft, Fs=sps)
+
+        i0 = np.abs(freqs - f0).argmin()
+        i1 = np.abs(freqs - f1).argmin()
+
+        # plotting psd, marking peaks
+        axes[0].plot(freqs[i0:i1], Pxx[i0:i1], col, label=label, alpha=alpha)
+        if mark_peaks:
+            axes[0] = peaks(axes[0], freqs[i0:i1], Pxx[i0:i1], fn_max=f1,
+                            nr_peaks=nr_peaks, col_line=col[:1],
+                            ypos_delta=0.04, bbox_alpha=0.5, col_text='w',
+                            ypos_mean=ypos[i], min_h=min_h)
+        # plotting time series
+        axes[1].plot(time, data, col, label=label, alpha=alpha)
+
+    axes[0].set_yscale('log')
+    axes[0].set_xlabel('frequency [Hz]')
+    axes[1].set_xlabel('time [s]')
+    for ax in axes:
+        leg = ax.legend(loc='best', borderaxespad=0)
+        # leg is None when no labels have been defined
+        if leg is not None:
+            leg.get_frame().set_alpha(0.7)
+        ax.grid(True)
+
+    return axes
+
+
+if __name__ == '__main__':
+
+    pass
diff --git a/wetb/prepost/prepost.py b/wetb/prepost/prepost.py
new file mode 100644
index 0000000000000000000000000000000000000000..e06cc6c13b11773ae27c9b2e843ced774a553a34
--- /dev/null
+++ b/wetb/prepost/prepost.py
@@ -0,0 +1,162 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Mar 10 18:47:32 2015
+
+@author: dave
+"""
+
+from __future__ import division
+from __future__ import print_function
+
+import os
+import copy
+
+class PBSScript(object):
+    """
+    Generate a PBS script that includes commands such as copying model files
+    to the node and copying back the results
+    """
+
+    template = """
+### Standard Output
+#PBS -N [jobname]
+#PBS -o [path_pbs_o]
+### Standard Error
+#PBS -e [path_pbs_e]
+#PBS -W umask=[umask]
+### Maximum wallclock time format HOURS:MINUTES:SECONDS
+#PBS -l walltime=[walltime]
+#PBS -lnodes=[lnodes]:ppn=[ppn]
+### Queue name
+#PBS -q [queue]
+
+### Browse to current working dir
+echo ""
+cd $PBS_O_WORKDIR
+echo "current working dir:"
+pwd
+echo ""
+
+### ===========================================================================
+echo "------------------------------------------------------------------------"
+echo "PRELUDE"
+echo "------------------------------------------------------------------------"
+
+[prelude]
+
+echo ""
+echo "------------------------------------------------------------------------"
+echo "EXECUTION"
+echo "------------------------------------------------------------------------"
+
+[execution]
+### wait for jobs to finish
+wait
+
+echo ""
+echo "------------------------------------------------------------------------"
+echo "CODA"
+echo "------------------------------------------------------------------------"
+
+[coda]
+
+echo ""
+### ===========================================================================
+exit
+"""
+
+    def __init__(self):
+
+        # PBS configuration
+        self.jobname = None
+        # relative paths with respect to PBS working directory
+        self.path_pbs_o = 'pbs_out/dummy.out'
+        self.path_pbs_e = 'pbs_out/dummy.err'
+        self.path_pbs_i = 'pbs_in/dummy.pbs'
+        # absolute path of the PBS working directory
+        self.pbsworkdir = None
+        self.umask = '003'
+        self.walltime = '00:59:59'
+        self.queue = 'workq'
+        self.lnodes = '1'
+        self.ppn = '1'
+
+        # regarding the job
+        # source2node = [ [/abs/src/base/a/b/c, a/b/c.mod] ]
+        self.source2node = [] # copy from the source to the node
+        # node2source = [ [a/b/d, /abs/src/base/a/b/d.mod] ]
+        self.node2source = [] # what to copy back from the node
+        self.ziparchives = []
+        self.prelude = ''
+        self.execution = ''
+        self.coda = ''
+
+    def check_dirs(self):
+        """Create the directories of std out, std error and pbs file if they
+        do not exist"""
+        dnames = set([os.path.dirname(self.path_pbs_o),
+                      os.path.dirname(self.path_pbs_e),
+                      os.path.dirname(self.path_pbs_i)])
+        for dname in dnames:
+            if not os.path.exists(os.path.join(self.pbsworkdir, dname)):
+                os.makedirs(os.path.join(self.pbsworkdir, dname))
+
+    def create(self, **kwargs):
+        """
+        path_pbs_e, path_pbs_o, and path_pbs are relative with respect to
+        the working dir
+
+        Parameters
+        ----------
+
+        template : str, default=PBSSCript.template
+
+        """
+
+        pbs = kwargs.get('template', copy.copy(self.template))
+        jobname = kwargs.get('jobname', self.jobname)
+        path_pbs_o = kwargs.get('path_pbs_o', self.path_pbs_o)
+        path_pbs_e = kwargs.get('path_pbs_e', self.path_pbs_e)
+        path_pbs_i = kwargs.get('path_pbs_i', self.path_pbs_i)
+        pbsworkdir = kwargs.get('pbsworkdir', self.pbsworkdir)
+        umask = kwargs.get('umask', self.umask)
+        walltime = kwargs.get('walltime', self.walltime)
+        queue = kwargs.get('queue', self.queue)
+        lnodes = kwargs.get('lnodes', self.lnodes)
+        ppn = kwargs.get('ppn', self.ppn)
+#        source2node = kwargs.get('source2node', self.source2node)
+#        node2source = kwargs.get('node2source', self.node2source)
+#        ziparchives = kwargs.get('ziparchives', self.ziparchives)
+        prelude = kwargs.get('prelude', self.prelude)
+        execution = kwargs.get('execution', self.execution)
+        coda = kwargs.get('coda', self.coda)
+        check_dirs = kwargs.get('check_dirs', False)
+
+        if not os.path.isabs(path_pbs_o):
+            path_pbs_o = './' + path_pbs_o
+        if not os.path.isabs(path_pbs_e):
+            path_pbs_e = './' + path_pbs_e
+
+        pbs = pbs.replace('[jobname]', jobname)
+        pbs = pbs.replace('[path_pbs_o]', path_pbs_o)
+        pbs = pbs.replace('[path_pbs_e]', path_pbs_e)
+        pbs = pbs.replace('[umask]', umask)
+        pbs = pbs.replace('[walltime]', walltime)
+        pbs = pbs.replace('[queue]', queue)
+        pbs = pbs.replace('[lnodes]', lnodes)
+        pbs = pbs.replace('[ppn]', ppn)
+
+        pbs = pbs.replace('[prelude]', prelude)
+        pbs = pbs.replace('[execution]', execution)
+        pbs = pbs.replace('[coda]', coda)
+
+        if check_dirs:
+            self.check_dirs()
+
+        # write the pbs_script
+        with open(os.path.join(pbsworkdir, path_pbs_i), 'w') as f:
+            f.write(pbs)
+
+
+if __name__ == '__main__':
+    pass
diff --git a/wetb/prepost/windIO.py b/wetb/prepost/windIO.py
new file mode 100755
index 0000000000000000000000000000000000000000..09f3c356ef2113d258d9436494bc392cb5b7017d
--- /dev/null
+++ b/wetb/prepost/windIO.py
@@ -0,0 +1,1989 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Thu Apr  3 19:53:59 2014
+
+@author: dave
+"""
+
+from __future__ import division # always devide as floats
+from __future__ import print_function
+#print(*objects, sep=' ', end='\n', file=sys.stdout)
+
+__author__ = 'David Verelst'
+__license__ = 'GPL'
+__version__ = '0.5'
+
+import os
+import copy
+import unittest
+import struct
+import math
+from time import time
+import codecs
+
+import scipy
+import scipy.io as sio
+import scipy.integrate as integrate
+import array
+import numpy as np
+import pandas as pd
+
+#import sympy
+
+# misc is part of prepost, which is available on the dtu wind gitlab server:
+# https://gitlab.windenergy.dtu.dk/dave/prepost
+import misc
+# wind energy python toolbox, available on the dtu wind redmine server:
+# http://vind-redmine.win.dtu.dk/projects/pythontoolbox/repository/show/fatigue_tools
+import fatigue
+
+
+class LoadResults:
+    """Read a HAWC2 result data file
+
+    Usage:
+    obj = LoadResults(file_path, file_name)
+
+    This class is called like a function:
+    HawcResultData() will read the specified file upon object initialization.
+
+    Available output:
+    obj.sig[timeStep,channel]   : complete result file in a numpy array
+    obj.ch_details[channel,(0=ID; 1=units; 2=description)] : np.array
+    obj.error_msg: is 'none' if everything went OK, otherwise it holds the
+    error
+
+    The ch_dict key/values pairs are structured differently for different
+        type of channels. Currently supported channels are:
+
+        For forcevec, momentvec, state commands:
+            key:
+                coord-bodyname-pos-sensortype-component
+                global-tower-node-002-forcevec-z
+                local-blade1-node-005-momentvec-z
+                hub1-blade1-elem-011-zrel-1.00-state pos-z
+            value:
+                ch_dict[tag]['coord']
+                ch_dict[tag]['bodyname']
+                ch_dict[tag]['pos'] = pos
+                ch_dict[tag]['sensortype']
+                ch_dict[tag]['component']
+                ch_dict[tag]['chi']
+                ch_dict[tag]['sensortag']
+                ch_dict[tag]['units']
+
+        For the DLL's this is:
+            key:
+                DLL-dll_name-io-io_nr
+                DLL-yaw_control-outvec-3
+                DLL-yaw_control-inpvec-1
+            value:
+                ch_dict[tag]['dll_name']
+                ch_dict[tag]['io']
+                ch_dict[tag]['io_nr']
+                ch_dict[tag]['chi']
+                ch_dict[tag]['sensortag']
+                ch_dict[tag]['units']
+
+        For the bearings this is:
+            key:
+                bearing-bearing_name-output_type-units
+                bearing-shaft_nacelle-angle_speed-rpm
+            value:
+                ch_dict[tag]['bearing_name']
+                ch_dict[tag]['output_type']
+                ch_dict[tag]['chi']
+                ch_dict[tag]['units']
+
+    """
+
+    # start with reading the .sel file, containing the info regarding
+    # how to read the binary file and the channel information
+    def __init__(self, file_path, file_name, debug=False, usecols=None,
+                 readdata=True):
+
+        self.debug = debug
+
+        # timer in debug mode
+        if self.debug:
+            start = time()
+
+        self.file_path = file_path
+        # remove .log, .dat, .sel extensions who might be accedental left
+        if file_name[-4:] in ['.htc','.sel','.dat','.log']:
+            file_name = file_name[:-4]
+        # FIXME: since HAWC2 will always have lower case output files, convert
+        # any wrongly used upper case letters to lower case here
+        self.file_name = file_name.lower()
+        self.read_sel()
+        # create for any supported channel the
+        # continue if the file has been succesfully read
+        if self.error_msg == 'none':
+            # load the channel id's and scale factors
+            self.scale_factors = self.data_sel()
+            # with the sel file loaded, we have all the channel names to
+            # squeeze into a more consistant naming scheme
+            self._unified_channel_names()
+            # only read when asked for
+            if readdata:
+                # if there is sel file but it is empty or whatever else
+                # FilType will not exists
+                try:
+                    # read the binary file
+                    if self.FileType == 'BINARY':
+                        self.read_bin(self.scale_factors, usecols=usecols)
+                    # read the ASCII file
+                    elif self.FileType == 'ASCII':
+                        self.read_ascii(usecols=usecols)
+                    else:
+                        print('='*79)
+                        print('unknown file type: ' + self.FileType)
+                        print('='*79)
+                        self.error_msg = 'error: unknown file type'
+                        self.sig = []
+                except:
+                    print('='*79)
+                    print('couldn\'t determine FileType')
+                    print('='*79)
+                    self.error_msg = 'error: no file type'
+                    self.sig = []
+
+        if self.debug:
+            stop = time() - start
+            print('time to load HAWC2 file:', stop, 's')
+
+    def read_sel(self):
+        # anticipate error on file reading
+        try:
+            # open file, read and close
+            go_sel = os.path.join(self.file_path, self.file_name + '.sel')
+            FILE = open(go_sel, "r")
+            self.lines = FILE.readlines()
+            FILE.close()
+            self.error_msg = 'none'
+
+        # error message if the file does not exists
+        except:
+            # print(26*' ' + 'ERROR'
+            print(50*'=')
+            print(self.file_path)
+            print(self.file_name + '.sel could not be found')
+            print(50*'=')
+            self.error_msg = 'error: file not found'
+
+    def data_sel(self):
+
+        # scan through all the lines in the file
+        line_nr = 1
+        # channel counter for ch_details
+        ch = 0
+        for line in self.lines:
+            # on line 9 we can read following paramaters:
+            if line_nr == 9:
+                # remove the end of line character
+                line = line.replace('\n','').replace('\r', '')
+
+                settings = line.split(' ')
+                # delete all empty string values
+                for k in range(settings.count('')):
+                    settings.remove('')
+
+                # and assign proper values with correct data type
+                self.N = int(settings[0])
+                self.Nch = int(settings[1])
+                self.Time = float(settings[2])
+                self.FileType = settings[3]
+                self.Freq = self.N/self.Time
+
+                # prepare list variables
+                self.ch_details = np.ndarray(shape=(self.Nch,3),dtype='<U100')
+                # it seems that float64 reeds the data correctly from the file
+                scale_factors = scipy.zeros(self.Nch, dtype='Float64')
+                #self.scale_factors_dec = scipy.zeros(self.Nch, dtype='f8')
+                i = 0
+
+            # starting from line 13, we have the channels info
+            if line_nr > 12:
+                # read the signal details
+                if line_nr < 13 + self.Nch:
+                    # remove leading and trailing whitespaces from line parts
+                    self.ch_details[ch,0] = str(line[12:43]).strip() # chID
+                    self.ch_details[ch,1] = str(line[43:54]).strip() # chUnits
+                    self.ch_details[ch,2] = str(line[54:-1]).strip() # chDescr
+                    ch += 1
+                # read the signal scale parameters for binary format
+                elif line_nr > 14 + self.Nch:
+                    scale_factors[i] = line
+                    # print(scale_factors[i]
+                    #self.scale_factors_dec[i] = D.Decimal(line)
+                    i = i + 1
+                # stop going through the lines if at the end of the file
+                if line_nr == 2*self.Nch + 14:
+                    self.scale_factors = scale_factors
+
+                    if self.debug:
+                        print('N       ', self.N)
+                        print('Nch     ', self.Nch)
+                        print('Time    ', self.Time)
+                        print('FileType', self.FileType)
+                        print('Freq    ', self.Freq)
+                        print('scale_factors', scale_factors.shape)
+
+                    return scale_factors
+                    break
+
+            # counting the line numbers
+            line_nr = line_nr + 1
+
+    def read(self, usecols=False):
+        """
+        This whole LoadResults needs to be refactered because it is crap.
+        Keep the old ones for backwards compatibility
+        """
+
+        if self.FileType == 'ASCII':
+            self.read_ascii(usecols=usecols)
+        elif self.FileType == 'BINARY':
+            self.read_bin(self.scale_factors, usecols=usecols)
+
+    def read_bin(self, scale_factors, usecols=False):
+        if not usecols:
+            usecols = range(0, self.Nch)
+        fid = open(os.path.join(self.file_path, self.file_name) + '.dat', 'rb')
+        self.sig = np.zeros( (self.N, len(usecols)) )
+        for j, i in enumerate(usecols):
+            fid.seek(i*self.N*2,0)
+            self.sig[:,j] = np.fromfile(fid, 'int16', self.N)*scale_factors[i]
+
+    def read_bin_old(self, scale_factors):
+        # if there is an error reading the binary file (for instance if empty)
+        try:
+            # read the binary file
+            go_binary = os.path.join(self.file_path, self.file_name) + '.dat'
+            FILE = open(go_binary, mode='rb')
+
+            # create array, put all the binary elements as one long chain in it
+            binvalues = array.array('h')
+            binvalues.fromfile(FILE, self.N * self.Nch)
+            FILE.close()
+            # convert now to a structured numpy array
+            # sig = np.array(binvalues, np.float)
+#            sig = np.array(binvalues)
+            # this is faster! the saved bin values are only of type int16
+            sig = np.array(binvalues, dtype='int16')
+
+            if self.debug: print(self.N, self.Nch, sig.shape)
+
+#            sig = np.reshape(sig, (self.Nch, self.N))
+#            # apperently Nch and N had to be reversed to read it correctly
+#            # is this because we are reading a Fortran array with Python C
+#            # code? so now transpose again so we have sig(time, channel)
+#            sig = np.transpose(sig)
+
+            # reshape the array to 2D and transpose (Fortran to C array)
+            sig = sig.reshape((self.Nch, self.N)).T
+
+            # create diagonal vector of size (Nch,Nch)
+            dig = np.diag(scale_factors)
+            # now all rows of column 1 are multiplied with dig(1,1)
+            sig = np.dot(sig,dig)
+            self.sig = sig
+            # 'file name;' + 'lnr;msg;'*(len(MsgList)) + '\n'
+        except:
+            self.sig = []
+            self.error_msg = 'error: reading binary file failed'
+            print('========================================================')
+            print(self.error_msg)
+            print(self.file_path)
+            print(self.file_name)
+            print('========================================================')
+
+    def read_ascii(self, usecols=None):
+
+        try:
+            go_ascii = os.path.join(self.file_path, self.file_name) + '.dat'
+#            self.sig = np.genfromtxt(go_ascii)
+            self.sig = np.loadtxt(go_ascii, usecols=usecols)
+#            self.sig = np.fromfile(go_ascii, dtype=np.float32, sep='  ')
+#            self.sig = self.sig.reshape((self.N, self.Nch))
+        except:
+            self.sig = []
+            self.error_msg = 'error: reading ascii file failed'
+            print('========================================================')
+            print(self.error_msg)
+            print(self.file_path)
+            print(self.file_name)
+            print('========================================================')
+
+#        print '========================================================'
+#        print 'ASCII reading not implemented yet'
+#        print '========================================================'
+#        self.sig = []
+#        self.error_msg = 'error: ASCII reading not implemented yet'
+
+    def reformat_sig_details(self):
+        """Change HAWC2 output description of the channels short descriptive
+        strings, usable in plots
+
+        obj.ch_details[channel,(0=ID; 1=units; 2=description)] : np.array
+        """
+
+        # CONFIGURATION: mappings between HAWC2 and short good output:
+        change_list = []
+        change_list.append( ['original','new improved'] )
+
+#        change_list.append( ['Mx coo: hub1','blade1 root bending: flap'] )
+#        change_list.append( ['My coo: hub1','blade1 root bending: edge'] )
+#        change_list.append( ['Mz coo: hub1','blade1 root bending: torsion'] )
+#
+#        change_list.append( ['Mx coo: hub2','blade2 root bending: flap'] )
+#        change_list.append( ['My coo: hub2','blade2 root bending: edge'] )
+#        change_list.append( ['Mz coo: hub2','blade2 root bending: torsion'] )
+#
+#        change_list.append( ['Mx coo: hub3','blade3 root bending: flap'] )
+#        change_list.append( ['My coo: hub3','blade3 root bending: edge'] )
+#        change_list.append( ['Mz coo: hub3','blade3 root bending: torsion'] )
+
+        change_list.append( ['Mx coo: blade1','blade1 flap'] )
+        change_list.append( ['My coo: blade1','blade1 edge'] )
+        change_list.append( ['Mz coo: blade1','blade1 torsion'] )
+
+        change_list.append( ['Mx coo: blade2','blade2 flap'] )
+        change_list.append( ['My coo: blade2','blade2 edge'] )
+        change_list.append( ['Mz coo: blade2','blade2 torsion'] )
+
+        change_list.append( ['Mx coo: blade3','blade3 flap'] )
+        change_list.append( ['My coo: blade3','blade3 edeg'] )
+        change_list.append( ['Mz coo: blade3','blade3 torsion'] )
+
+        change_list.append( ['Mx coo: hub1','blade1 out-of-plane'] )
+        change_list.append( ['My coo: hub1','blade1 in-plane'] )
+        change_list.append( ['Mz coo: hub1','blade1 torsion'] )
+
+        change_list.append( ['Mx coo: hub2','blade2 out-of-plane'] )
+        change_list.append( ['My coo: hub2','blade2 in-plane'] )
+        change_list.append( ['Mz coo: hub2','blade2 torsion'] )
+
+        change_list.append( ['Mx coo: hub3','blade3 out-of-plane'] )
+        change_list.append( ['My coo: hub3','blade3 in-plane'] )
+        change_list.append( ['Mz coo: hub3','blade3 torsion'] )
+        # this one will create a false positive for tower node nr1
+        change_list.append( ['Mx coo: tower','tower top momemt FA'] )
+        change_list.append( ['My coo: tower','tower top momemt SS'] )
+        change_list.append( ['Mz coo: tower','yaw-moment'] )
+
+        change_list.append( ['Mx coo: chasis','chasis momemt FA'] )
+        change_list.append( ['My coo: chasis','yaw-moment chasis'] )
+        change_list.append( ['Mz coo: chasis','chasis moment SS'] )
+
+        change_list.append( ['DLL inp  2:  2','tower clearance'] )
+
+        self.ch_details_new = np.ndarray(shape=(self.Nch,3),dtype='<U100')
+
+        # approach: look for a specific description and change it.
+        # This approach is slow, but will not fail if the channel numbers change
+        # over different simulations
+        for ch in range(self.Nch):
+            # the change_list will always be slower, so this loop will be
+            # inside the bigger loop of all channels
+            self.ch_details_new[ch,:] = self.ch_details[ch,:]
+            for k in range(len(change_list)):
+                if change_list[k][0] == self.ch_details[ch,0]:
+                    self.ch_details_new[ch,0] =  change_list[k][1]
+                    # channel description should be unique, so delete current
+                    # entry and stop looking in the change list
+                    del change_list[k]
+                    break
+
+#        self.ch_details_new = ch_details_new
+
+    def _unified_channel_names(self):
+        """
+        Make certain channels independent from their index.
+
+        The unified channel dictionary ch_dict holds consequently named
+        channels as the key, and the all information is stored in the value
+        as another dictionary.
+
+        The ch_dict key/values pairs are structured differently for different
+        type of channels. Currently supported channels are:
+
+        For forcevec, momentvec, state commands:
+            node numbers start with 0 at the root
+            element numbers start with 1 at the root
+            key:
+                coord-bodyname-pos-sensortype-component
+                global-tower-node-002-forcevec-z
+                local-blade1-node-005-momentvec-z
+                hub1-blade1-elem-011-zrel-1.00-state pos-z
+            value:
+                ch_dict[tag]['coord']
+                ch_dict[tag]['bodyname']
+                ch_dict[tag]['pos']
+                ch_dict[tag]['sensortype']
+                ch_dict[tag]['component']
+                ch_dict[tag]['chi']
+                ch_dict[tag]['sensortag']
+                ch_dict[tag]['units']
+
+        For the DLL's this is:
+            key:
+                DLL-dll_name-io-io_nr
+                DLL-yaw_control-outvec-3
+                DLL-yaw_control-inpvec-1
+            value:
+                ch_dict[tag]['dll_name']
+                ch_dict[tag]['io']
+                ch_dict[tag]['io_nr']
+                ch_dict[tag]['chi']
+                ch_dict[tag]['sensortag']
+                ch_dict[tag]['units']
+
+        For the bearings this is:
+            key:
+                bearing-bearing_name-output_type-units
+                bearing-shaft_nacelle-angle_speed-rpm
+            value:
+                ch_dict[tag]['bearing_name']
+                ch_dict[tag]['output_type']
+                ch_dict[tag]['chi']
+                ch_dict[tag]['units']
+
+        For many of the aero sensors:
+            'Cl', 'Cd', 'Alfa', 'Vrel'
+            key:
+                sensortype-blade_nr-pos
+                Cl-1-0.01
+            value:
+                ch_dict[tag]['sensortype']
+                ch_dict[tag]['blade_nr']
+                ch_dict[tag]['pos']
+                ch_dict[tag]['chi']
+                ch_dict[tag]['units']
+        """
+        # save them in a dictionary, use the new coherent naming structure
+        # as the key, and as value again a dict that hols all the different
+        # classifications: (chi, channel nr), (coord, coord), ...
+        self.ch_dict = dict()
+
+        # some channel ID's are unique, use them
+        ch_unique = set(['Omega', 'Ae rot. torque', 'Ae rot. power',
+                     'Ae rot. thrust', 'Time', 'Azi  1'])
+        ch_aero = set(['Cl', 'Cd', 'Alfa', 'Vrel', 'Tors_e', 'Alfa'])
+        ch_aerogrid = set(['a_grid', 'am_grid'])
+
+        # also safe as df
+#        cols = set(['bearing_name', 'sensortag', 'bodyname', 'chi',
+#                    'component', 'pos', 'coord', 'sensortype', 'radius',
+#                    'blade_nr', 'units', 'output_type', 'io_nr', 'io', 'dll',
+#                    'azimuth', 'flap_nr'])
+        df_dict = {col:[] for col in self.cols}
+        df_dict['ch_name'] = []
+
+        # scan through all channels and see which can be converted
+        # to sensible unified name
+        for ch in range(self.Nch):
+            items = self.ch_details[ch,2].split(' ')
+            # remove empty values in the list
+            items = misc.remove_items(items, '')
+
+            dll = False
+
+            # be carefull, identify only on the starting characters, because
+            # the signal tag can hold random text that in some cases might
+            # trigger a false positive
+
+            # -----------------------------------------------------------------
+            # check for all the unique channel descriptions
+            if self.ch_details[ch,0].strip() in ch_unique:
+                tag = self.ch_details[ch,0].strip()
+                channelinfo = {}
+                channelinfo['units'] = self.ch_details[ch,1]
+                channelinfo['sensortag'] = self.ch_details[ch,2]
+                channelinfo['chi'] = ch
+
+            # -----------------------------------------------------------------
+            # or in the long description:
+            #    0          1        2      3  4    5     6 and up
+            # MomentMz Mbdy:blade nodenr:   5 coo: blade  TAG TEXT
+            elif self.ch_details[ch,2].startswith('MomentM'):
+                coord = items[5]
+                bodyname = items[1].replace('Mbdy:', '')
+                # set nodenr to sortable way, include leading zeros
+                # node numbers start with 0 at the root
+                nodenr = '%03i' % int(items[3])
+                # skip the attached the component
+                #sensortype = items[0][:-2]
+                # or give the sensor type the same name as in HAWC2
+                sensortype = 'momentvec'
+                component = items[0][-1:len(items[0])]
+                # the tag only exists if defined
+                if len(items) > 6:
+                    sensortag = ' '.join(items[6:])
+                else:
+                    sensortag = ''
+
+                # and tag it
+                pos = 'node-%s' % nodenr
+                tagitems = (coord,bodyname,pos,sensortype,component)
+                tag = '%s-%s-%s-%s-%s' % tagitems
+                # save all info in the dict
+                channelinfo = {}
+                channelinfo['coord'] = coord
+                channelinfo['bodyname'] = bodyname
+                channelinfo['pos'] = pos
+                channelinfo['sensortype'] = sensortype
+                channelinfo['component'] = component
+                channelinfo['chi'] = ch
+                channelinfo['sensortag'] = sensortag
+                channelinfo['units'] = self.ch_details[ch,1]
+
+            # -----------------------------------------------------------------
+            #   0    1      2        3       4  5     6     7 and up
+            # Force  Fx Mbdy:blade nodenr:   2 coo: blade  TAG TEXT
+            elif self.ch_details[ch,2].startswith('Force'):
+                coord = items[6]
+                bodyname = items[2].replace('Mbdy:', '')
+                nodenr = '%03i' % int(items[4])
+                # skipe the attached the component
+                #sensortype = items[0]
+                # or give the sensor type the same name as in HAWC2
+                sensortype = 'forcevec'
+                component = items[1][1]
+                if len(items) > 7:
+                    sensortag = ' '.join(items[7:])
+                else:
+                    sensortag = ''
+
+                # and tag it
+                pos = 'node-%s' % nodenr
+                tagitems = (coord,bodyname,pos,sensortype,component)
+                tag = '%s-%s-%s-%s-%s' % tagitems
+                # save all info in the dict
+                channelinfo = {}
+                channelinfo['coord'] = coord
+                channelinfo['bodyname'] = bodyname
+                channelinfo['pos'] = pos
+                channelinfo['sensortype'] = sensortype
+                channelinfo['component'] = component
+                channelinfo['chi'] = ch
+                channelinfo['sensortag'] = sensortag
+                channelinfo['units'] = self.ch_details[ch,1]
+
+            # -----------------------------------------------------------------
+            #   0    1  2      3       4      5   6         7    8
+            # State pos x  Mbdy:blade E-nr:   1 Z-rel:0.00 coo: blade
+            #   0           1     2    3        4    5   6         7     8     9+
+            # State_rot proj_ang tx Mbdy:bname E-nr: 1 Z-rel:0.00 coo: cname  label
+            # State_rot omegadot tz Mbdy:bname E-nr: 1 Z-rel:1.00 coo: cname  label
+            elif self.ch_details[ch,2].startswith('State'):
+#                 or self.ch_details[ch,0].startswith('euler') \
+#                 or self.ch_details[ch,0].startswith('ax') \
+#                 or self.ch_details[ch,0].startswith('omega') \
+#                 or self.ch_details[ch,0].startswith('proj'):
+                coord = items[8]
+                bodyname = items[3].replace('Mbdy:', '')
+                # element numbers start with 1 at the root
+                elementnr = '%03i' % int(items[5])
+                zrel = '%04.2f' % float(items[6].replace('Z-rel:', ''))
+                # skip the attached the component
+                #sensortype = ''.join(items[0:2])
+                # or give the sensor type the same name as in HAWC2
+                tmp = self.ch_details[ch,0].split(' ')
+                sensortype = tmp[0]
+                if sensortype.startswith('State'):
+                    sensortype += ' ' + tmp[1]
+                component = items[2]
+                if len(items) > 8:
+                    sensortag = ' '.join(items[9:])
+                else:
+                    sensortag = ''
+
+                # and tag it
+                pos = 'elem-%s-zrel-%s' % (elementnr, zrel)
+                tagitems = (coord,bodyname,pos,sensortype,component)
+                tag = '%s-%s-%s-%s-%s' % tagitems
+                # save all info in the dict
+                channelinfo = {}
+                channelinfo['coord'] = coord
+                channelinfo['bodyname'] = bodyname
+                channelinfo['pos'] = pos
+                channelinfo['sensortype'] = sensortype
+                channelinfo['component'] = component
+                channelinfo['chi'] = ch
+                channelinfo['sensortag'] = sensortag
+                channelinfo['units'] = self.ch_details[ch,1]
+
+            # -----------------------------------------------------------------
+            # DLL CONTROL I/O
+            # there are two scenario's on how the channel description is formed
+            # the channel id is always the same though
+            # id for all three cases:
+            #          DLL out  1:  3
+            #          DLL inp  2:  3
+            # description case 1 ("dll type2_dll b2h2 inpvec 30" in htc output)
+            #               0         1    2   3     4+
+            #          yaw_control outvec  3  yaw_c input reference angle
+            # description case 2 ("dll inpvec 2 1" in htc output):
+            #           0  1 2     3  4  5  6+
+            #          DLL : 2 inpvec :  4  mgen hss
+            # description case 3
+            #           0         1     2       4
+            #          hawc_dll :echo outvec :  1
+            elif self.ch_details[ch,0].startswith('DLL'):
+                # case 3
+                if items[1][0] == ':echo':
+                    # hawc_dll named case (case 3) is polluted with colons
+                    items = self.ch_details[ch,2].replace(':','')
+                    items = items.split(' ')
+                    items = misc.remove_items(items, '')
+                    dll = items[1]
+                    io = items[2]
+                    io_nr = items[3]
+                    tag = 'DLL-%s-%s-%s' % (dll,io,io_nr)
+                    sensortag = ''
+                # case 2: no reference to dll name
+                elif self.ch_details[ch,2].startswith('DLL'):
+                    dll = items[2]
+                    io = items[3]
+                    io_nr = items[5]
+                    sensortag = ' '.join(items[6:])
+                    # and tag it
+                    tag = 'DLL-%s-%s-%s' % (dll,io,io_nr)
+                # case 1: type2 dll name is given
+                else:
+                    dll = items[0]
+                    io = items[1]
+                    io_nr = items[2]
+                    sensortag = ' '.join(items[3:])
+                    tag = 'DLL-%s-%s-%s' % (dll,io,io_nr)
+
+                # save all info in the dict
+                channelinfo = {}
+                channelinfo['dll'] = dll
+                channelinfo['io'] = io
+                channelinfo['io_nr'] = io_nr
+                channelinfo['chi'] = ch
+                channelinfo['sensortag'] = sensortag
+                channelinfo['units'] = self.ch_details[ch,1]
+
+            # -----------------------------------------------------------------
+            # BEARING OUTPUS
+            # bea1 angle_speed       rpm      shaft_nacelle angle speed
+            elif self.ch_details[ch,0].startswith('bea'):
+                output_type = self.ch_details[ch,0].split(' ')[1]
+                bearing_name = items[0]
+                units = self.ch_details[ch,1]
+                # there is no label option for the bearing output
+
+                # and tag it
+                tag = 'bearing-%s-%s-%s' % (bearing_name,output_type,units)
+                # save all info in the dict
+                channelinfo = {}
+                channelinfo['bearing_name'] = bearing_name
+                channelinfo['output_type'] = output_type
+                channelinfo['units'] = units
+                channelinfo['chi'] = ch
+
+            # -----------------------------------------------------------------
+            # AERO CL, CD, CM, VREL, ALFA, LIFT, DRAG, etc
+            # Cl, R=  0.5     deg      Cl of blade  1 at radius   0.49
+            # Azi  1          deg      Azimuth of blade  1
+            elif self.ch_details[ch,0].split(',')[0] in ch_aero:
+                dscr_list = self.ch_details[ch,2].split(' ')
+                dscr_list = misc.remove_items(dscr_list, '')
+
+                sensortype = self.ch_details[ch,0].split(',')[0]
+                radius = dscr_list[-1]
+                # is this always valid?
+                blade_nr = self.ch_details[ch,2].split('blade  ')[1][0]
+                # sometimes the units for aero sensors are wrong!
+                units = self.ch_details[ch,1]
+                # there is no label option
+
+                # and tag it
+                tag = '%s-%s-%s' % (sensortype,blade_nr,radius)
+                # save all info in the dict
+                channelinfo = {}
+                channelinfo['sensortype'] = sensortype
+                channelinfo['radius'] = float(radius)
+                channelinfo['blade_nr'] = int(blade_nr)
+                channelinfo['units'] = units
+                channelinfo['chi'] = ch
+
+            # -----------------------------------------------------------------
+            # for the induction grid over the rotor
+            # a_grid, azi    0.00 r   1.74
+            elif self.ch_details[ch,0].split(',')[0] in ch_aerogrid:
+                items = self.ch_details[ch,0].split(',')
+                sensortype = items[0]
+                items2 = items[1].split(' ')
+                items2 = misc.remove_items(items2, '')
+                azi = items2[1]
+                radius = items2[3]
+                units = self.ch_details[ch,1]
+                # and tag it
+                tag = '%s-azi-%s-r-%s' % (sensortype,azi,radius)
+                # save all info in the dict
+                channelinfo = {}
+                channelinfo['sensortype'] = sensortype
+                channelinfo['radius'] = float(radius)
+                channelinfo['azimuth'] = float(azi)
+                channelinfo['units'] = units
+                channelinfo['chi'] = ch
+
+            # -----------------------------------------------------------------
+            # INDUCTION AT THE BLADE
+            # 0: Induc. Vz, rpco, R=  1.4
+            # 1: m/s
+            # 2: Induced wsp Vz of blade  1 at radius   1.37, RP. coo.
+# Induc. Vx, locco, R=  1.4 // Induced wsp Vx of blade  1 at radius   1.37, local ae coo.
+# Induc. Vy, blco, R=  1.4 // Induced wsp Vy of blade  1 at radius   1.37, local bl coo.
+# Induc. Vz, glco, R=  1.4 // Induced wsp Vz of blade  1 at radius   1.37, global coo.
+# Induc. Vx, rpco, R=  8.4 // Induced wsp Vx of blade  1 at radius   8.43, RP. coo.
+            elif self.ch_details[ch,0].strip()[:5] == 'Induc':
+                items = self.ch_details[ch,2].split(' ')
+                items = misc.remove_items(items, '')
+                blade_nr = int(items[5])
+                radius = float(items[8].replace(',', ''))
+                items = self.ch_details[ch,0].split(',')
+                coord = items[1].strip()
+                component = items[0][-2:]
+                units = self.ch_details[ch,1]
+                # and tag it
+                rpl = (coord, blade_nr, component, radius)
+                tag = 'induc-%s-blade-%1i-%s-r-%03.02f' % rpl
+                # save all info in the dict
+                channelinfo = {}
+                channelinfo['blade_nr'] = blade_nr
+                channelinfo['sensortype'] = 'induction'
+                channelinfo['radius'] = radius
+                channelinfo['coord'] = coord
+                channelinfo['component'] = component
+                channelinfo['units'] = units
+                channelinfo['chi'] = ch
+
+            # TODO: wind speed
+            # some spaces have been trimmed here
+            # WSP gl. coo.,Vy          m/s
+            # // Free wind speed Vy, gl. coo, of gl. pos   0.00,  0.00,  -2.31
+            # WSP gl. coo.,Vdir_hor          deg
+            # Free wind speed Vdir_hor, gl. coo, of gl. pos  0.00,  0.00, -2.31
+
+            # -----------------------------------------------------------------
+            # WATER SURFACE gl. coo, at gl. coo, x,y=   0.00,   0.00
+            elif self.ch_details[ch,2].startswith('Water'):
+                units = self.ch_details[ch,1]
+
+                # but remove the comma
+                x = items[-2][:-1]
+                y = items[-1]
+
+                # and tag it
+                tag = 'watersurface-global-%s-%s' % (x, y)
+                # save all info in the dict
+                channelinfo = {}
+                channelinfo['coord'] = 'global'
+                channelinfo['pos'] = (float(x), float(y))
+                channelinfo['units'] = units
+                channelinfo['chi'] = ch
+
+            # -----------------------------------------------------------------
+            # WIND SPEED
+            # WSP gl. coo.,Vx
+            elif self.ch_details[ch,0].startswith('WSP gl.'):
+                units = self.ch_details[ch,1]
+                direction = self.ch_details[ch,0].split(',')[1]
+                tmp = self.ch_details[ch,2].split('pos')[1]
+                x, y, z = tmp.split(',')
+                x, y, z = x.strip(), y.strip(), z.strip()
+
+                # and tag it
+                tag = 'windspeed-global-%s-%s-%s-%s' % (direction, x, y, z)
+                # save all info in the dict
+                channelinfo = {}
+                channelinfo['coord'] = 'global'
+                channelinfo['pos'] = (x, y, z)
+                channelinfo['units'] = units
+                channelinfo['chi'] = ch
+
+            # WIND SPEED AT BLADE
+            # 0: WSP Vx, glco, R= 61.5
+            # 2: Wind speed Vx of blade  1 at radius  61.52, global coo.
+            elif self.ch_details[ch,0].startswith('WSP V'):
+                units = self.ch_details[ch,1].strip()
+                direction = self.ch_details[ch,0].split(' ')[1].strip()
+                blade_nr = self.ch_details[ch,2].split('blade')[1].strip()[:2]
+                radius = self.ch_details[ch,2].split('radius')[1].split(',')[0]
+                coord = self.ch_details[ch,2].split(',')[1].strip()
+
+                radius = radius.strip()
+                blade_nr = blade_nr.strip()
+
+                # and tag it
+                rpl = (direction, blade_nr, radius, coord)
+                tag = 'wsp-blade-%s-%s-%s-%s' % rpl
+                # save all info in the dict
+                channelinfo = {}
+                channelinfo['coord'] = coord
+                channelinfo['direction'] = direction
+                channelinfo['blade_nr'] = int(blade_nr)
+                channelinfo['radius'] = float(radius)
+                channelinfo['units'] = units
+                channelinfo['chi'] = ch
+
+            # FLAP ANGLE
+            # 2: Flap angle for blade  3 flap number  1
+            elif self.ch_details[ch,0][:7] == 'setbeta':
+                units = self.ch_details[ch,1].strip()
+                blade_nr = self.ch_details[ch,2].split('blade')[1].strip()
+                blade_nr = blade_nr.split(' ')[0].strip()
+                flap_nr = self.ch_details[ch,2].split(' ')[-1].strip()
+
+                radius = radius.strip()
+                blade_nr = blade_nr.strip()
+
+                # and tag it
+                tag = 'setbeta-bladenr-%s-flapnr-%s' % (blade_nr, flap_nr)
+                # save all info in the dict
+                channelinfo = {}
+                channelinfo['coord'] = coord
+                channelinfo['flap_nr'] = int(flap_nr)
+                channelinfo['blade_nr'] = int(blade_nr)
+                channelinfo['units'] = units
+                channelinfo['chi'] = ch
+
+            # -----------------------------------------------------------------
+            # ignore all the other cases we don't know how to deal with
+            else:
+                # if we get here, we don't have support yet for that sensor
+                # and hence we can't save it. Continue with next channel
+                continue
+
+            # -----------------------------------------------------------------
+            # ignore if we have a non unique tag
+            if tag in self.ch_dict:
+                jj = 1
+                while True:
+                    tag_new = tag + '_v%i' % jj
+                    if tag_new in self.ch_dict:
+                        jj += 1
+                    else:
+                        tag = tag_new
+                        break
+#                msg = 'non unique tag for HAWC2 results, ignoring: %s' % tag
+#                logging.warn(msg)
+#            else:
+            self.ch_dict[tag] = copy.copy(channelinfo)
+
+            # -----------------------------------------------------------------
+            # save in for DataFrame format
+            cols_ch = set(channelinfo.keys())
+            for col in cols_ch:
+                df_dict[col].append(channelinfo[col])
+            # the remainder columns we have not had yet. Fill in blank
+            for col in (self.cols - cols_ch):
+                df_dict[col].append('')
+            df_dict['ch_name'].append(tag)
+
+        self.ch_df = pd.DataFrame(df_dict)
+        self.ch_df.set_index('chi', inplace=True)
+
+
+    def _ch_dict2df(self):
+        """
+        Create a DataFrame version of the ch_dict, and the chi columns is
+        set as the index
+        """
+        # identify all the different columns
+        cols = set()
+        for ch_name, channelinfo in self.ch_dict.iteritems():
+            cols.update(set(channelinfo.keys()))
+
+        df_dict = {col:[] for col in cols}
+        df_dict['ch_name'] = []
+        for ch_name, channelinfo in self.ch_dict.iteritems():
+            cols_ch = set(channelinfo.keys())
+            for col in cols_ch:
+                df_dict[col].append(channelinfo[col])
+            # the remainder columns we have not had yet. Fill in blank
+            for col in (cols - cols_ch):
+                df_dict[col].append('')
+            df_dict['ch_name'].append(ch_name)
+
+        self.ch_df = pd.DataFrame(df_dict)
+        self.ch_df.set_index('chi', inplace=True)
+
+
+    def _data_window(self, nr_rev=None, time=None):
+        """
+        Based on a time interval, create a proper slice object
+        ======================================================
+
+        The window will start at zero and ends with the covered time range
+        of the time input.
+
+        Paramters
+        ---------
+
+        nr_rev : int, default=None
+            NOT IMPLEMENTED YET
+
+        time : list, default=None
+            time = [time start, time stop]
+
+        Returns
+        -------
+
+        slice_
+
+        window
+
+        zoomtype
+
+        time_range
+            time_range = [0, time[1]]
+
+        """
+
+        # -------------------------------------------------
+        # determine zome range if necesary
+        # -------------------------------------------------
+        time_range = None
+        if nr_rev:
+            raise NotImplementedError
+            # input is a number of revolutions, get RPM and sample rate to
+            # calculate the required range
+            # TODO: automatich detection of RPM channel!
+            time_range = nr_rev/(self.rpm_mean/60.)
+            # convert to indices instead of seconds
+            i_range = int(self.Freq*time_range)
+            window = [0, time_range]
+            # in case the first datapoint is not at 0 seconds
+            i_zero = int(self.sig[0,0]*self.Freq)
+            slice_ = np.r_[i_zero:i_range+i_zero]
+
+            zoomtype = '_nrrev_' + format(nr_rev, '1.0f') + 'rev'
+
+        elif time.any():
+            time_range = time[1] - time[0]
+
+            i_start = int(time[0]*self.Freq)
+            i_end = int(time[1]*self.Freq)
+            slice_ = np.r_[i_start:i_end]
+            window = [time[0], time[1]]
+
+            zoomtype = '_zoom_%1.1f-%1.1fsec' %  (time[0], time[1])
+
+        return slice_, window, zoomtype, time_range
+
+    # TODO: general signal method, this is not HAWC2 specific, move out
+    def calc_stats(self, sig, i0=0, i1=-1):
+
+        stats = {}
+        # calculate the statistics values:
+        stats['max'] = sig[i0:i1,:].max(axis=0)
+        stats['min'] = sig[i0:i1,:].min(axis=0)
+        stats['mean'] = sig[i0:i1,:].mean(axis=0)
+        stats['std'] = sig[i0:i1,:].std(axis=0)
+        stats['range'] = stats['max'] - stats['min']
+        stats['absmax'] = np.absolute(sig[i0:i1,:]).max(axis=0)
+        stats['rms'] = np.sqrt(np.mean(sig[i0:i1,:]*sig[i0:i1,:], axis=0))
+        stats['int'] = integrate.trapz(sig[i0:i1,:], x=sig[i0:i1,0], axis=0)
+        return stats
+
+    # TODO: general signal method, this is not HAWC2 specific, move out
+    def calc_fatigue(self, signal, no_bins=46, m=[3, 4, 6, 8, 10, 12], neq=1):
+        """
+        signal is 1D
+        """
+
+        try:
+            sig_rf = fatigue.rainflow_astm(signal)
+        except:
+            return []
+
+        if len(sig_rf) < 1 and not sig_rf:
+            return []
+
+        hist_data, x, bin_avg =  fatigue.rfc_hist(sig_rf, no_bins)
+
+        m = np.atleast_1d(m)
+
+        eq = []
+        for i in range(len(m)):
+            eq.append(np.power(np.sum(0.5 * hist_data *\
+                                    np.power(bin_avg, m[i])) / neq, 1. / m[i]))
+        return eq
+
+    # TODO: general signal method, this is not HAWC2 specific, move out
+    def cycle_matrix(self, signal, no_bins=46, m=[3, 4, 6, 8, 10, 12]):
+
+#        import fatigue_tools.fatigue as ft
+#        cycles, ampl_bin_mean, ampl_bin_edges, mean_bin_mean, mean_edges \
+#            = ft.cycle_matrix(signal, ampl_bins=no_bins, mean_bins=1,
+#                              rainflow_func=ft.rainflow_windap)
+#        # in this case eq = sum( n_i*S_i^m )
+#        return [np.sum(cycles * ampl_bin_mean ** _m) for _m in m]
+
+        try:
+            sig_rf = fatigue.rainflow_astm(signal)
+        except:
+            return []
+
+        if len(sig_rf) < 1 and not sig_rf:
+            return []
+
+        hist_data, x, bin_avg =  fatigue.rfc_hist(sig_rf, no_bins)
+        m = np.atleast_1d(m)
+        return [np.sum(0.5 * hist_data * bin_avg ** _m) for _m in m]
+
+    def blade_deflection(self):
+        """
+        """
+
+        # select all the y deflection channels
+        db = misc.DictDB(self.ch_dict)
+
+        db.search({'sensortype' : 'state pos', 'component' : 'z'})
+        # sort the keys and save the mean values to an array/list
+        chiz, zvals = [], []
+        for key in sorted(db.dict_sel.keys()):
+            zvals.append(-self.sig[:,db.dict_sel[key]['chi']].mean())
+            chiz.append(db.dict_sel[key]['chi'])
+
+        db.search({'sensortype' : 'state pos', 'component' : 'y'})
+        # sort the keys and save the mean values to an array/list
+        chiy, yvals = [], []
+        for key in sorted(db.dict_sel.keys()):
+            yvals.append(self.sig[:,db.dict_sel[key]['chi']].mean())
+            chiy.append(db.dict_sel[key]['chi'])
+
+        return np.array(zvals), np.array(yvals)
+
+    def save_csv(self, fname, fmt='%.18e', delimiter=','):
+        """
+        Save to csv and use the unified channel names as columns
+        """
+        map_sorting = {}
+        # first, sort on channel index
+        for ch_key, ch in self.ch_dict.iteritems():
+            map_sorting[ch['chi']] = ch_key
+
+        header = []
+        # not all channels might be present...iterate again over map_sorting
+        for chi in map_sorting:
+            try:
+                sensortag = self.ch_dict[map_sorting[chi]]['sensortag']
+                header.append(map_sorting[chi] + ' // ' + sensortag)
+            except:
+                header.append(map_sorting[chi])
+
+        # and save
+        print('saving...', end='')
+        np.savetxt(fname, self.sig[:,map_sorting.keys()], fmt=fmt,
+                   delimiter=delimiter, header=delimiter.join(header))
+        print(fname)
+
+    def save_df(self, fname):
+        """
+        Save the HAWC2 data and sel file in a DataFrame that contains all the
+        data, and all the channel information (the one from the sel file
+        and the parsed from this function)
+        """
+
+        self.sig
+        self.ch_details
+        self.ch_dict
+
+
+def ReadOutputAtTime(fname):
+    """Distributed blade loading as generated by the HAWC2 output_at_time
+    command.
+    """
+    # because the formatting is really weird, we need to sanatize it a bit
+    with open(fname, 'r') as f:
+        # read the header from line 3
+        f.readline()
+        f.readline()
+        header = f.readline().replace('\r', '').replace('\n', '')
+        cols = [k.strip().replace(' ', '_') for k in header.split('#')[1:]]
+
+#    data = pd.read_fwf(fname, skiprows=3, header=None)
+#    pd.read_table(fname, sep='  ', skiprows=3)
+#    data.index.names = cols
+
+    data = np.loadtxt(fname, skiprows=3)
+    return pd.DataFrame(data, columns=cols)
+
+
+def ReadEigenBody(fname, debug=False):
+    """
+    Read HAWC2 body eigenalysis result file
+    =======================================
+
+    Parameters
+    ----------
+
+    file_path : str
+
+    file_name : str
+
+
+    Returns
+    -------
+
+    results : DataFrame
+        Columns: body, Fd_hz, Fn_hz, log_decr_pct
+
+    """
+
+    #Body data for body number : 3 with the name :nacelle
+    #Results:         fd [Hz]       fn [Hz]       log.decr [%]
+    #Mode nr:  1:   1.45388E-21    1.74896E-03    6.28319E+02
+    FILE = open(fname)
+    lines = FILE.readlines()
+    FILE.close()
+
+    df_dict = {'Fd_hz':[], 'Fn_hz':[], 'log_decr_pct':[], 'body':[]}
+    for i, line in enumerate(lines):
+        if debug: print('line nr: %5i' % i)
+        # identify for which body we will read the data
+        if line[:25] == 'Body data for body number':
+            body = line.split(':')[2].rstrip().lstrip()
+            # remove any annoying characters
+            body = body.replace('\n','').replace('\r','')
+            if debug: print('modes for body: %s' % body)
+        # identify mode number and read the eigenfrequencies
+        elif line[:8] == 'Mode nr:':
+            linelist = line.replace('\n','').replace('\r','').split(':')
+            #modenr = linelist[1].rstrip().lstrip()
+            # text after Mode nr can be empty
+            try:
+                eigenmodes = linelist[2].rstrip().lstrip().split('   ')
+            except IndexError:
+                eigenmodes = ['0', '0', '0']
+
+            if debug: print(eigenmodes)
+            # in case we have more than 3, remove all the empty ones
+            # this can happen when there are NaN values
+            if not len(eigenmodes) == 3:
+                eigenmodes = linelist[2].rstrip().lstrip().split(' ')
+                eigmod = []
+                for k in eigenmodes:
+                    if len(k) > 1:
+                        eigmod.append(k)
+                #eigenmodes = eigmod
+            else:
+                eigmod = eigenmodes
+            # remove any trailing spaces for each element
+            for k in range(len(eigmod)):
+                eigmod[k] = float(eigmod[k])#.lstrip().rstrip()
+
+            df_dict['body'].append(body)
+            df_dict['Fd_hz'].append(eigmod[0])
+            df_dict['Fn_hz'].append(eigmod[1])
+            df_dict['log_decr_pct'].append(eigmod[2])
+
+    return pd.DataFrame(df_dict)
+
+
+def ReadEigenStructure(file_path, file_name, debug=False, max_modes=500):
+    """
+    Read HAWC2 structure eigenalysis result file
+    ============================================
+
+    The file looks as follows:
+    #0 Version ID : HAWC2MB 11.3
+    #1 ___________________________________________________________________
+    #2 Structure eigenanalysis output
+    #3 ___________________________________________________________________
+    #4 Time : 13:46:59
+    #5 Date : 28:11.2012
+    #6 ___________________________________________________________________
+    #7 Results:         fd [Hz]       fn [Hz]       log.decr [%]
+    #8 Mode nr:  1:   3.58673E+00    3.58688E+00    5.81231E+00
+    #...
+    #302  Mode nr:294:   0.00000E+00    6.72419E+09    6.28319E+02
+
+    Parameters
+    ----------
+
+    file_path : str
+
+    file_name : str
+
+    debug : boolean, default=False
+
+    max_modes : int
+        Stop evaluating the result after max_modes number of modes have been
+        identified
+
+    Returns
+    -------
+
+    modes_arr : ndarray(3,n)
+        An ndarray(3,n) holding Fd, Fn [Hz] and the logarithmic damping
+        decrement [%] for n different structural eigenmodes
+
+    """
+
+    #0 Version ID : HAWC2MB 11.3
+    #1 ___________________________________________________________________
+    #2 Structure eigenanalysis output
+    #3 ___________________________________________________________________
+    #4 Time : 13:46:59
+    #5 Date : 28:11.2012
+    #6 ___________________________________________________________________
+    #7 Results:         fd [Hz]       fn [Hz]       log.decr [%]
+    #8 Mode nr:  1:   3.58673E+00    3.58688E+00    5.81231E+00
+    #  Mode nr:294:   0.00000E+00    6.72419E+09    6.28319E+02
+
+    FILE = open(os.path.join(file_path, file_name))
+    lines = FILE.readlines()
+    FILE.close()
+
+    header_lines = 8
+
+    # we now the number of modes by having the number of lines
+    nrofmodes = len(lines) - header_lines
+
+    modes_arr = np.ndarray((3,nrofmodes))
+
+    for i, line in enumerate(lines):
+        if i > max_modes:
+            # cut off the unused rest
+            modes_arr = modes_arr[:,:i]
+            break
+
+        # ignore the header
+        if i < header_lines:
+            continue
+
+        # split up mode nr from the rest
+        parts = line.split(':')
+        #modenr = int(parts[1])
+        # get fd, fn and damping, but remove all empty items on the list
+        modes_arr[:,i-header_lines]=misc.remove_items(parts[2].split(' '),'')
+
+    return modes_arr
+
+
+class UserWind:
+    """
+    """
+
+    def __init__(self):
+        pass
+
+    def __call__(self, z_h, r_blade_tip, a_phi=None, shear_exp=None, nr_hor=3,
+                 nr_vert=20, h_ME=500.0, fname=None, wdir=None):
+        """
+
+        Parameters
+        ----------
+
+        z_h : float
+            Hub height
+
+        r_blade_tip : float
+            Blade tip radius
+
+        a_phi : float, default=None
+            :math:`a_{\\varphi} \\approx 0.5` parameter for the modified
+            Ekman veer distribution. Values vary between -1.2 and 0.5.
+
+        shear_exp : float, default=None
+
+        nr_vert : int, default=3
+
+        nr_hor : int, default=20
+
+        h_ME : float, default=500
+            Modified Ekman parameter. Take roughly 500 for off shore sites,
+            1000 for on shore sites.
+
+        fname : str, default=None
+            When specified, the HAWC2 user defined veer input file will be
+            written.
+
+        wdir : float, default=None
+            A constant veer angle, or yaw angle. Equivalent to setting the
+            wind direction. Angle in degrees.
+
+        Returns
+        -------
+
+        None
+
+        """
+
+        x, z = self.create_coords(z_h, r_blade_tip, nr_vert=nr_vert,
+                                  nr_hor=nr_hor)
+        if a_phi is not None:
+            phi_rad = self.veer_ekman_mod(z, z_h, h_ME=h_ME, a_phi=a_phi)
+            assert len(phi_rad) == nr_vert
+        else:
+            nr_vert = len(z)
+            phi_rad = np.zeros((nr_vert,))
+        # add any yaw error on top of
+        if wdir is not None:
+            # because wdir cw positive, and phi veer ccw positive
+            phi_rad -= (wdir*np.pi/180.0)
+        u, v, w, xx, zz = self.decompose_veer(phi_rad, x, z)
+        # scale the shear on top of that
+        if shear_exp is not None:
+            shear = self.shear_powerlaw(zz, z_h, shear_exp)
+            uu = u*shear[:,np.newaxis]
+            vv = v*shear[:,np.newaxis]
+            ww = w*shear[:,np.newaxis]
+        # and write to a file
+        if fname is not None:
+            self.write_user_defined_shear(fname, uu, vv, ww, xx, zz)
+
+    def create_coords(self, z_h, r_blade_tip, nr_vert=3, nr_hor=20):
+        """
+        Utility to create the coordinates of the wind field based on hub heigth
+        and blade length.
+        """
+        # take 15% extra space after the blade tip
+        z = np.linspace(0, z_h + r_blade_tip*1.15, nr_vert)
+        # along the horizontal, coordinates with 0 at the rotor center
+        x = np.linspace(-r_blade_tip*1.15, r_blade_tip*1.15, nr_hor)
+
+        return x, z
+
+    def shear_powerlaw(self, z, z_ref, a):
+        profile = np.power(z/z_ref, a)
+        # when a negative, make sure we return zero and not inf
+        profile[np.isinf(profile)] = 0.0
+        return profile
+
+    def veer_ekman_mod(self, z, z_h, h_ME=500.0, a_phi=0.5):
+        """
+        Modified Ekman veer profile, as defined by Mark C. Kelly in email on
+        10 October 2014 15:10 (RE: veer profile)
+
+        .. math::
+            \\varphi(z) - \\varphi(z_H) \\approx a_{\\varphi}
+            e^{-\sqrt{z_H/h_{ME}}}
+            \\frac{z-z_H}{\sqrt{z_H*h_{ME}}}
+            \\left( 1 - \\frac{z-z_H}{2 \sqrt{z_H h_{ME}}}
+            - \\frac{z-z_H}{4z_H} \\right)
+
+        where:
+        :math:`h_{ME} \\equiv \\frac{\\kappa u_*}{f}`
+        and :math:`f = 2 \Omega \sin \\varphi` is the coriolis parameter,
+        and :math:`\\kappa = 0.41` as the von Karman constant,
+        and :math:`u_\\star = \\sqrt{\\frac{\\tau_w}{\\rho}}` friction velocity.
+
+        For on shore, :math:`h_{ME} \\approx 1000`, for off-shore,
+        :math:`h_{ME} \\approx 500`
+
+        :math:`a_{\\varphi} \\approx 0.5`
+
+        Parameters
+        ----------
+
+        :math:`a_{\\varphi} \\approx 0.5` parameter for the modified
+            Ekman veer distribution. Values vary between -1.2 and 0.5.
+
+        returns
+        -------
+
+        phi_rad : ndarray
+            veer angle in radians
+
+        """
+
+        t1 = np.exp(-math.sqrt(z_h / h_ME))
+        t2 = (z - z_h) / math.sqrt(z_h * h_ME)
+        t3 = ( 1.0 - (z-z_h)/(2.0*math.sqrt(z_h*h_ME)) - (z-z_h)/(4.0*z_h) )
+
+        return a_phi * t1 * t2 * t3
+
+    def decompose_veer(self, phi_rad, x, z):
+        """
+        Convert a veer angle into u, v, and w components, ready for the
+        HAWC2 user defined veer input file.
+
+        Paramters
+        ---------
+
+        phi_rad : ndarray
+            veer angle in radians
+
+        method : str, default=linear
+            'linear' for a linear veer, 'ekman_mod' for modified ekman method
+
+        Returns
+        -------
+
+        u, v, w, v_coord, w_coord
+
+        """
+
+        nr_hor = len(x)
+        nr_vert = len(z)
+        assert len(phi_rad) == nr_vert
+
+        tan_phi = np.tan(phi_rad)
+
+        # convert veer angles to veer components in v, u. Make sure the
+        # normalized wind speed remains 1!
+#        u = sympy.Symbol('u')
+#        v = sympy.Symbol('v')
+#        tan_phi = sympy.Symbol('tan_phi')
+#        eq1 = u**2.0 + v**2.0 - 1.0
+#        eq2 = (tan_phi*u/v) - 1.0
+#        sol = sympy.solvers.solve([eq1, eq2], [u,v], dict=True)
+#        # proposed solution is:
+#        u2 = np.sqrt(tan_phi**2/(tan_phi**2 + 1.0))/tan_phi
+#        v2 = np.sqrt(tan_phi**2/(tan_phi**2 + 1.0))
+#        # but that gives the sign switch wrong, simplify/rewrite to:
+        u = np.sqrt(1.0/(tan_phi**2 + 1.0))
+        v = np.sqrt(1.0/(tan_phi**2 + 1.0))*tan_phi
+        # verify they are actually the same but the sign:
+#        assert np.allclose(np.abs(u), np.abs(u2))
+#        assert np.allclose(np.abs(v), np.abs(v2))
+
+        u_full = u[:,np.newaxis] + np.zeros((3,))[np.newaxis,:]
+        v_full = v[:,np.newaxis] + np.zeros((3,))[np.newaxis,:]
+        w_full = np.zeros((nr_vert,nr_hor))
+
+        return u_full, v_full, w_full, x, z
+
+    def load_user_defined_veer(self, fname):
+        """
+        Load a user defined veer and shear file as used for HAWC2
+
+        Returns
+        -------
+
+        u_comp, v_comp, w_comp, v_coord, w_coord, phi_deg
+        """
+        blok = 0
+        bloks = {}
+        with open(fname) as f:
+            for i, line in enumerate(f.readlines()):
+                if line.strip()[0] == '#' and blok > 0:
+                    bloks[blok] = i
+                    blok += 1
+                elif line.strip()[0] == '#':
+                    continue
+                elif blok == 0:
+                    items = line.split(' ')
+                    items = misc.remove_items(items, '')
+                    nr_hor, nr_vert = int(items[0]), int(items[1])
+                    blok += 1
+#        nr_lines = i
+
+        k = nr_hor + 4*nr_vert + 7
+        v_comp = np.genfromtxt(fname, skiprows=3, skip_footer=i-3-3-nr_vert)
+        u_comp = np.genfromtxt(fname, skiprows=3+1+nr_vert,
+                               skip_footer=i-3-3-nr_vert*2)
+        w_comp = np.genfromtxt(fname, skiprows=3+2+nr_vert*2,
+                               skip_footer=i-3-3-nr_vert*3)
+        v_coord = np.genfromtxt(fname, skiprows=3+3+nr_vert*3,
+                               skip_footer=i-3-3-nr_vert*3-3)
+        w_coord = np.genfromtxt(fname, skiprows=3+3+nr_vert*3+4,
+                               skip_footer=i-k)
+        phi_deg = np.arctan(v_comp[:,0]/u_comp[:,0])*180.0/np.pi
+
+        return u_comp, v_comp, w_comp, v_coord, w_coord, phi_deg
+
+    def write_user_defined_shear(self, fname, u, v, w, v_coord, w_coord):
+        """
+        """
+        nr_hor = len(v_coord)
+        nr_vert = len(w_coord)
+
+        try:
+            assert u.shape == v.shape
+            assert u.shape == w.shape
+            assert u.shape[0] == nr_vert
+            assert u.shape[1] == nr_hor
+        except AssertionError:
+            raise ValueError('u, v, w shapes should be consistent with '
+                             'nr_hor and nr_vert: u.shape: %s, nr_hor: %i, '
+                             'nr_vert: %i' % (str(u.shape), nr_hor, nr_vert))
+
+        # and create the input file
+        with open(fname, 'w') as f:
+            f.write('# User defined shear file\n')
+            f.write('%i %i # nr_hor (v), nr_vert (w)\n' % (nr_hor, nr_vert))
+            h1 = 'normalized with U_mean, nr_hor (v) rows, nr_vert (w) columns'
+            f.write('# v component, %s\n' % h1)
+            np.savetxt(f, v, fmt='% 08.05f', delimiter='  ')
+            f.write('# u component, %s\n' % h1)
+            np.savetxt(f, u, fmt='% 08.05f', delimiter='  ')
+            f.write('# w component, %s\n' % h1)
+            np.savetxt(f, w, fmt='% 08.05f', delimiter='  ')
+            h2 = '# v coordinates (along the horizontal, nr_hor, 0 rotor center)'
+            f.write('%s\n' % h2)
+            np.savetxt(f, v_coord.reshape((v_coord.size,1)), fmt='% 8.02f')
+            h3 = '# w coordinates (zero is at ground level, height, nr_hor)'
+            f.write('%s\n' % h3)
+            np.savetxt(f, w_coord.reshape((w_coord.size,1)), fmt='% 8.02f')
+
+
+class WindProfiles:
+
+    def __init__(self):
+        pass
+
+    def powerlaw(self, z, z_ref, a):
+        profile = np.power(z/z_ref, a)
+        # when a negative, make sure we return zero and not inf
+        profile[np.isinf(profile)] = 0.0
+        return profile
+
+    def veer_ekman_mod(self, z, z_h, h_ME=500.0, a_phi=0.5):
+        """
+        Modified Ekman veer profile, as defined by Mark C. Kelly in email on
+        10 October 2014 15:10 (RE: veer profile)
+
+        .. math::
+            \\varphi(z) - \\varphi(z_H) \\approx a_{\\varphi}
+            e^{-\sqrt{z_H/h_{ME}}}
+            \\frac{z-z_H}{\sqrt{z_H*h_{ME}}}
+            \\left( 1 - \\frac{z-z_H}{2 \sqrt{z_H h_{ME}}}
+            - \\frac{z-z_H}{4z_H} \\right)
+
+        where:
+        :math:`h_{ME} \\equiv \\frac{\\kappa u_*}{f}`
+        and :math:`f = 2 \Omega \sin \\varphi` is the coriolis parameter,
+        and :math:`\\kappa = 0.41` as the von Karman constant,
+        and :math:`u_\\star = \\sqrt{\\frac{\\tau_w}{\\rho}}` friction velocity.
+
+        For on shore, :math:`h_{ME} \\approx 1000`, for off-shore,
+        :math:`h_{ME} \\approx 500`
+
+        :math:`a_{\\varphi} \\approx 0.5`
+
+        Parameters
+        ----------
+
+        :math:`a_{\\varphi} \\approx 0.5` parameter for the modified
+            Ekman veer distribution. Values vary between -1.2 and 0.5.
+
+        returns
+        -------
+
+        phi_rad : ndarray
+            veer angle in radians as function of height
+
+        """
+
+        t1 = np.exp(-math.sqrt(z_h / h_ME))
+        t2 = (z - z_h) / math.sqrt(z_h * h_ME)
+        t3 = ( 1.0 - (z-z_h)/(2.0*math.sqrt(z_h*h_ME)) - (z-z_h)/(4.0*z_h) )
+
+        return a_phi * t1 * t2 * t3
+
+
+class Turbulence:
+
+    def __init__(self):
+
+        pass
+
+    def read_hawc2(self, fpath, shape):
+        """
+        Read the HAWC2 turbulence format
+        """
+
+        fid = open(fpath, 'rb')
+        tmp = np.fromfile(fid, 'float32', shape[0]*shape[1]*shape[2])
+        turb = np.reshape(tmp, shape)
+
+        return turb
+
+    def read_bladed(self, fpath, basename):
+
+        fid = open(fpath + basename + '.wnd', 'rb')
+        R1 = struct.unpack('h', fid.read(2))[0]
+        R2 = struct.unpack('h', fid.read(2))[0]
+        turb = struct.unpack('i', fid.read(4))[0]
+        lat = struct.unpack('f', fid.read(4))[0]
+        rough = struct.unpack('f', fid.read(4))[0]
+        refh = struct.unpack('f', fid.read(4))[0]
+        longti = struct.unpack('f', fid.read(4))[0]
+        latti = struct.unpack('f', fid.read(4))[0]
+        vertti = struct.unpack('f', fid.read(4))[0]
+        dv = struct.unpack('f', fid.read(4))[0]
+        dw = struct.unpack('f', fid.read(4))[0]
+        du = struct.unpack('f', fid.read(4))[0]
+        halfalong = struct.unpack('i', fid.read(4))[0]
+        mean_ws = struct.unpack('f', fid.read(4))[0]
+        VertLongComp = struct.unpack('f', fid.read(4))[0]
+        LatLongComp = struct.unpack('f', fid.read(4))[0]
+        LongLongComp = struct.unpack('f', fid.read(4))[0]
+        Int = struct.unpack('i', fid.read(4))[0]
+        seed = struct.unpack('i', fid.read(4))[0]
+        VertGpNum = struct.unpack('i', fid.read(4))[0]
+        LatGpNum = struct.unpack('i', fid.read(4))[0]
+        VertLatComp = struct.unpack('f', fid.read(4))[0]
+        LatLatComp = struct.unpack('f', fid.read(4))[0]
+        LongLatComp = struct.unpack('f', fid.read(4))[0]
+        VertVertComp = struct.unpack('f', fid.read(4))[0]
+        LatVertComp = struct.unpack('f', fid.read(4))[0]
+        LongVertComp = struct.unpack('f', fid.read(4))[0]
+
+        points = np.fromfile(fid, 'int16', 2*halfalong*VertGpNum*LatGpNum*3)
+        fid.close()
+        return points
+
+    def convert2bladed(self, fpath, basename, shape=(4096,32,32)):
+        """
+        Convert turbulence box to BLADED format
+        """
+
+        u = self.read_hawc2(fpath + basename + 'u.bin', shape)
+        v = self.read_hawc2(fpath + basename + 'v.bin', shape)
+        w = self.read_hawc2(fpath + basename + 'w.bin', shape)
+
+        # mean velocity components at the center of the box
+        v1, v2 = (shape[1]/2)-1, shape[1]/2
+        w1, w2 = (shape[2]/2)-1, shape[2]/2
+        ucent = (u[:,v1,w1] + u[:,v1,w2] + u[:,v2,w1] + u[:,v2,w2]) / 4.0
+        vcent = (v[:,v1,w1] + v[:,v1,w2] + v[:,v2,w1] + v[:,v2,w2]) / 4.0
+        wcent = (w[:,v1,w1] + w[:,v1,w2] + w[:,v2,w1] + w[:,v2,w2]) / 4.0
+
+        # FIXME: where is this range 351:7374 coming from?? The original script
+        # considered a box of lenght 8192
+        umean = np.mean(ucent[351:7374])
+        vmean = np.mean(vcent[351:7374])
+        wmean = np.mean(wcent[351:7374])
+
+        ustd = np.std(ucent[351:7374])
+        vstd = np.std(vcent[351:7374])
+        wstd = np.std(wcent[351:7374])
+
+        # gives a slight different outcome, but that is that significant?
+#        umean = np.mean(u[351:7374,15:17,15:17])
+#        vmean = np.mean(v[351:7374,15:17,15:17])
+#        wmean = np.mean(w[351:7374,15:17,15:17])
+
+        # this is wrong since we want the std on the center point
+#        ustd = np.std(u[351:7374,15:17,15:17])
+#        vstd = np.std(v[351:7374,15:17,15:17])
+#        wstd = np.std(w[351:7374,15:17,15:17])
+
+        iu = np.zeros(shape)
+        iv = np.zeros(shape)
+        iw = np.zeros(shape)
+
+        iu[:,:,:] = (u - umean)/ustd*1000.0
+        iv[:,:,:] = (v - vmean)/vstd*1000.0
+        iw[:,:,:] = (w - wmean)/wstd*1000.0
+
+        # because MATLAB and Octave do a round when casting from float to int,
+        # and Python does a floor, we have to round first
+        np.around(iu, decimals=0, out=iu)
+        np.around(iv, decimals=0, out=iv)
+        np.around(iw, decimals=0, out=iw)
+
+        return iu.astype(np.int16), iv.astype(np.int16), iw.astype(np.int16)
+
+    def write_bladed(self, fpath, basename, shape):
+        """
+        Write turbulence BLADED file
+        """
+        # TODO: get these parameters from a HAWC2 input file
+        seed = 6
+        mean_ws = 11.4
+        turb = 3
+        R1 = -99
+        R2 = 4
+
+        du = 0.974121094
+        dv = 4.6875
+        dw = 4.6875
+
+        longti = 14
+        latti = 9.8
+        vertti = 7
+
+        iu, iv, iw = self.convert2bladed(fpath, basename, shape=shape)
+
+        fid = open(fpath + basename + '.wnd', 'wb')
+        fid.write(struct.pack('h', R1)) # R1
+        fid.write(struct.pack('h', R2)) # R2
+        fid.write(struct.pack('i', turb)) # Turb
+        fid.write(struct.pack('f', 999)) # Lat
+        fid.write(struct.pack('f', 999)) # rough
+        fid.write(struct.pack('f', 999)) # refh
+        fid.write(struct.pack('f', longti)) # LongTi
+        fid.write(struct.pack('f', latti)) # LatTi
+        fid.write(struct.pack('f', vertti)) # VertTi
+        fid.write(struct.pack('f', dv)) # VertGpSpace
+        fid.write(struct.pack('f', dw)) # LatGpSpace
+        fid.write(struct.pack('f', du)) # LongGpSpace
+        fid.write(struct.pack('i', shape[0]/2)) # HalfAlong
+        fid.write(struct.pack('f', mean_ws)) # meanWS
+        fid.write(struct.pack('f', 999.)) # VertLongComp
+        fid.write(struct.pack('f', 999.)) # LatLongComp
+        fid.write(struct.pack('f', 999.)) # LongLongComp
+        fid.write(struct.pack('i', 999)) # Int
+        fid.write(struct.pack('i', seed)) # Seed
+        fid.write(struct.pack('i', shape[1])) # VertGpNum
+        fid.write(struct.pack('i', shape[2])) # LatGpNum
+        fid.write(struct.pack('f', 999)) # VertLatComp
+        fid.write(struct.pack('f', 999)) # LatLatComp
+        fid.write(struct.pack('f', 999)) # LongLatComp
+        fid.write(struct.pack('f', 999)) # VertVertComp
+        fid.write(struct.pack('f', 999)) # LatVertComp
+        fid.write(struct.pack('f', 999)) # LongVertComp
+#        fid.flush()
+
+#        bladed2 = np.ndarray((shape[0], shape[2], shape[1], 3), dtype=np.int16)
+#        for i in xrange(shape[0]):
+#            for k in xrange(shape[1]):
+#                for j in xrange(shape[2]):
+#                    fid.write(struct.pack('i', iu[i, shape[1]-j-1, k]))
+#                    fid.write(struct.pack('i', iv[i, shape[1]-j-1, k]))
+#                    fid.write(struct.pack('i', iw[i, shape[1]-j-1, k]))
+#                    bladed2[i,k,j,0] = iu[i, shape[1]-j-1, k]
+#                    bladed2[i,k,j,1] = iv[i, shape[1]-j-1, k]
+#                    bladed2[i,k,j,2] = iw[i, shape[1]-j-1, k]
+
+        # re-arrange array for bladed format
+        bladed = np.ndarray((shape[0], shape[2], shape[1], 3), dtype=np.int16)
+        bladed[:,:,:,0] = iu[:,::-1,:]
+        bladed[:,:,:,1] = iv[:,::-1,:]
+        bladed[:,:,:,2] = iw[:,::-1,:]
+        bladed_swap_view = bladed.swapaxes(1,2)
+        bladed_swap_view.tofile(fid, format='%int16')
+
+        fid.flush()
+        fid.close()
+
+
+class Bladed(object):
+
+    def __init__(self):
+        """
+        Some BLADED results I have seen are just weird text files. Convert
+        them to a more convienent format.
+
+        path/to/file
+        channel 1 description
+        col a name/unit col b name/unit
+        a0 b0
+        a1 b1
+        ...
+        path/to/file
+        channel 2 description
+        col a name/unit col b name/unit
+        ...
+        """
+        pass
+
+    def infer_format(self, lines):
+        """
+        Figure out how many channels and time steps are included
+        """
+        count = 1
+        for line in lines[1:]:
+            if line == lines[0]:
+                break
+            count += 1
+        iters = count - 3
+        chans = len(lines) / (iters + 3)
+        return int(chans), int(iters)
+
+    def read(self, fname, chans=None, iters=None, enc='cp1252'):
+        """
+        Parameters
+        ----------
+
+        fname : str
+
+        chans : int, default=None
+
+        iters : int, default=None
+
+        enc : str, default='cp1252'
+            character encoding of the source file. Usually BLADED is used on
+            windows so Western-European windows encoding is a safe bet.
+        """
+
+        with codecs.open(fname, 'r', enc) as f:
+            lines = f.readlines()
+        nrl = len(lines)
+        if chans is None and iters is None:
+            chans, iters = self.infer_format(lines)
+        if iters is not None:
+            chans = int(nrl / (iters + 3))
+        if chans is not None:
+            iters = int((nrl / chans) - 3)
+#        file_head = [ [k[:-2],0] for k in lines[0:nrl:iters+3] ]
+#        chan_head = [ [k[:-2],0] for k in lines[1:nrl:iters+3] ]
+#        cols_head = [ k.split('\t')[:2] for k in lines[2:nrl:iters+3] ]
+
+        data = {}
+        for k in range(chans):
+            # take the column header from the 3 comment line, but
+            head = lines[2 + (3 + iters)*k][:-2].split('\t')[1].encode('utf-8')
+            i0 = 3 + (3 + iters)*k
+            i1 = i0 + iters
+            data[head] = np.array([k[:-2].split('\t')[1] for k in lines[i0:i1:1]])
+            data[head] = data[head].astype(np.float64)
+        time = np.array([k[:-2].split('\t')[0] for k in lines[i0:i1:1]])
+        df = pd.DataFrame(data, index=time.astype(np.float64))
+        df.index.name = lines[0][:-2]
+        return df
+
+
+class Tests(unittest.TestCase):
+
+    def setUp(self):
+        pass
+
+    def print_test_info(self):
+        pass
+
+    def test_reshaped(self):
+        """
+        Make sure we correctly reshape the array instead of the manual
+        index reassignments
+        """
+        fpath = 'data/turb_s100_3.00w.bin'
+        fid = open(fpath, 'rb')
+        turb = np.fromfile(fid, 'float32', 32*32*8192)
+        turb.shape
+        fid.close()
+        u = np.zeros((8192,32,32))
+
+        for i in xrange(8192):
+            for j in xrange(32):
+                for k in xrange(32):
+                    u[i,j,k] = turb[ i*1024 + j*32 + k]
+
+        u2 = np.reshape(turb, (8192, 32, 32))
+
+        self.assertTrue(np.alltrue(np.equal(u, u2)))
+
+    def test_headers(self):
+
+        fpath = 'data/'
+
+        basename = 'turb_s100_3.00_refoctave_header'
+        fid = open(fpath + basename + '.wnd', 'rb')
+        R1 = struct.unpack("h",fid.read(2))[0]
+        R2 = struct.unpack("h",fid.read(2))[0]
+        turb = struct.unpack("i",fid.read(4))[0]
+        lat = struct.unpack("f",fid.read(4))[0]
+        # last line
+        fid.seek(100)
+        LongVertComp = struct.unpack("f",fid.read(4))[0]
+        fid.close()
+
+        basename = 'turb_s100_3.00_python_header'
+        fid = open(fpath + basename + '.wnd', 'rb')
+        R1_p = struct.unpack("h",fid.read(2))[0]
+        R2_p = struct.unpack("h",fid.read(2))[0]
+        turb_p = struct.unpack("i",fid.read(4))[0]
+        lat_p = struct.unpack("f",fid.read(4))[0]
+        # last line
+        fid.seek(100)
+        LongVertComp_p = struct.unpack("f",fid.read(4))[0]
+        fid.close()
+
+        self.assertEqual(R1, R1_p)
+        self.assertEqual(R2, R2_p)
+        self.assertEqual(turb, turb_p)
+        self.assertEqual(lat, lat_p)
+        self.assertEqual(LongVertComp, LongVertComp_p)
+
+    def test_write_bladed(self):
+
+        fpath = 'data/'
+        turb = Turbulence()
+        # write with Python
+        basename = 'turb_s100_3.00'
+        turb.write_bladed(fpath, basename, shape=(8192,32,32))
+        python = turb.read_bladed(fpath, basename)
+
+        # load octave
+        basename = 'turb_s100_3.00_refoctave'
+        octave = turb.read_bladed(fpath, basename)
+
+        # float versions of octave
+        basename = 'turb_s100_3.00_refoctave_float'
+        fid = open(fpath + basename + '.wnd', 'rb')
+        octave32 = np.fromfile(fid, 'float32', 8192*32*32*3)
+
+        # find the differences
+        nr_diff = (python-octave).__ne__(0).sum()
+        print(nr_diff)
+        print(nr_diff/len(python))
+
+        self.assertTrue(np.alltrue(python == octave))
+
+    def test_turbdata(self):
+
+        shape = (8192,32,32)
+
+        fpath = 'data/'
+        basename = 'turb_s100_3.00_refoctave'
+        fid = open(fpath + basename + '.wnd', 'rb')
+
+        # check the last element of the header
+        fid.seek(100)
+        print(struct.unpack("f",fid.read(4))[0])
+        # save in a list using struct
+        items = (os.path.getsize(fpath + basename + '.wnd')-104)/2
+        data_list = [struct.unpack("h",fid.read(2))[0] for k in xrange(items)]
+
+
+        fid.seek(104)
+        data_16 = np.fromfile(fid, 'int16', shape[0]*shape[1]*shape[2]*3)
+
+        fid.seek(104)
+        data_8 = np.fromfile(fid, 'int8', shape[0]*shape[1]*shape[2]*3)
+
+        self.assertTrue(np.alltrue( data_16 == data_list ))
+        self.assertFalse(np.alltrue( data_8 == data_list ))
+
+    def test_compare_octave(self):
+        """
+        Compare the results from the original script run via octave
+        """
+
+        turb = Turbulence()
+        iu, iv, iw = turb.convert2bladed('data/', 'turb_s100_3.00',
+                                         shape=(8192,32,32))
+        res = sio.loadmat('data/workspace.mat')
+        # increase tolerances, values have a range up to 5000-10000
+        # and these values will be written to an int16 format for BLADED!
+        self.assertTrue(np.allclose(res['iu'], iu, rtol=1e-03, atol=1e-2))
+        self.assertTrue(np.allclose(res['iv'], iv, rtol=1e-03, atol=1e-2))
+        self.assertTrue(np.allclose(res['iw'], iw, rtol=1e-03, atol=1e-2))
+
+    def test_allindices(self):
+        """
+        Verify that all indices are called
+        """
+        fpath = 'data/turb_s100_3.00w.bin'
+        fid = open(fpath, 'rb')
+        turb = np.fromfile(fid, 'float32', 32*32*8192)
+        turb.shape
+        fid.close()
+
+        check = []
+        for i in xrange(8192):
+            for j in xrange(32):
+                for k in xrange(32):
+                    check.append(i*1024 + j*32 + k)
+
+        qq = np.array(check)
+        qdiff = np.diff(qq)
+
+        self.assertTrue(np.alltrue(np.equal(qdiff, np.ones(qdiff.shape))))
+
+
+if __name__ == '__main__':
+
+    unittest.main()