diff --git a/docs/developer-guide.md b/docs/developer-guide.md
index 8d238eed59a5ee9a5aa711102b04338f28c28203..5cd2afbde62dfaed8a27868ed4a1b8cf4388155a 100644
--- a/docs/developer-guide.md
+++ b/docs/developer-guide.md
@@ -97,7 +97,7 @@ Where `-n py39` refers any user defined name that describes what is the environm
 used for. These environments can then be activated as follows:
 
 ```
->> conda activate py35
+>> conda activate py39
 ```
 
 The Python distribution in use will now be located in e.g. \<path_to_anaconda\>/env/py39/
@@ -110,13 +110,11 @@ use ```conda deactivate``` to deactivate the environment.
 - Compiler (```wetb``` contains cython extensions that require a compiler):
     - Linux: gcc (should be installed by default)
     - Windows:
-        - Python 2.7: [Microsoft Visual C++ Compiler for Python 2.7](http://aka.ms/vcpython27),
-        or the [direct link](https://www.microsoft.com/en-gb/download/details.aspx?id=44266).
-        - Python 3.4: MS Visual Studio 2010
-        - Python 3.5: MS Visual Studio 2015 or [Visual C++ Build Tools](https://visualstudio.microsoft.com/downloads/#build-tools-for-visual-studio-2017)
-        - Python 3.5+: MS Visual Studio 2017 or [Visual C++ 2017 Redistributable](https://visualstudio.microsoft.com/downloads/#build-tools-for-visual-studio-2017)
+        - Python 3.5+: MS Visual Studio 2022 or [Visual C++ 2022 Redistributable](https://visualstudio.microsoft.com/downloads/#microsoft-visual-c-redistributable-for-visual-studio-2022)
+        https://visualstudio.microsoft.com/visual-cpp-build-tools/
+        
         - Only one MS Visual Studio version can be installed, but you can for
-        example install MS Visual Studio 2010 alongside the Visual C++ Build Tools.
+        example install MS Visual Studio 2022 alongside different Visual C++ Build Tools.
 - [numpy](http://www.numpy.org/)
 - [cython](http://cython.org/)
 - [scipy](http://scipy.org/scipylib/)
@@ -128,9 +126,8 @@ use ```conda deactivate``` to deactivate the environment.
 - [pytables](http://www.pytables.org/)
 - [pytest](https://pypi.python.org/pypi/pytest)
 - [pytest-cov](https://pypi.python.org/pypi/pytest-cov/)
-- six
-- nose, sphinx, blosc, pbr, psutil, coverage, setuptools_scm
-- [parimeko](http://www.paramiko.org/)
+- nose, sphinx, blosc, pbr, psutil, coverage, six
+- [paramiko](http://www.paramiko.org/)
 - [sshtunnel](https://github.com/pahaz/sshtunnel)
 - [pandoc](http://pandoc.org/) , [pypandoc](https://pypi.python.org/pypi/pypandoc):
 - [click](https://click.palletsprojects.com/en/7.x/)
@@ -139,15 +136,13 @@ convert markdown formatted readme file to rst for PyPi compatibility. See also
 issue #22. ```pandoc``` is available in Anaconda. When installing
 ```pypandoc``` via pip, you have to install ```pandoc``` via your package
 manager (Linux/Mac).
-- [twine](https://pypi.python.org/pypi/twine): upload package to
-[PyPi](https://pypi.python.org/pypi)
 
 Install the necessary Python dependencies using the conda package manager:
 
 ```
->> conda install setuptools_scm mock h5py pytables pytest pytest-cov nose sphinx blosc pbr paramiko
->> conda install scipy pandas matplotlib cython xlrd coverage xlwt openpyxl psutil pandoc twine pypandoc click jinja2
->> conda install -c conda-forge sshtunnel --no-deps
+conda install mock h5py pytables pytest pytest-cov nose sphinx blosc pbr paramiko
+conda install scipy pandas matplotlib cython coverage openpyxl psutil click jinja2
+conda install -c conda-forge sshtunnel --no-deps
 ```
 
 Note that ```--no-deps``` avoids that newer packages from the channel
@@ -156,11 +151,6 @@ channel. Depending on which packages get overwritten, this might brake your
 Anaconda root environment. As such, using ```--no-deps``` should be
 used for safety (especially when operating from the root environment).
 
-Note that:
-
-- With Python 2.7, blosc fails to install.
-- With Python 3.6, twine, pypandoc fails to install.
-
 
 ## Get wetb
 
diff --git a/docs/hawc2.rst b/docs/hawc2.rst
index 8bfa15418219d1fe03b1798a9625189269ea3462..0a49eb9d0f0c8ea7e7f4e173c22061193ae4ccd6 100644
--- a/docs/hawc2.rst
+++ b/docs/hawc2.rst
@@ -11,4 +11,4 @@ notebook format. You can download the source notebooks from the
 
     notebooks/hawc2/InputFileWriting
     notebooks/hawc2/RunningSimulations
-    notebooks/hawc2/RunningSimulationsJess
\ No newline at end of file
+    notebooks/hawc2/RunningSimulationsJess
diff --git a/wetb/bladed/prj2hawc.py b/wetb/bladed/prj2hawc.py
index f6ab12ada87db9cdbb3aee2b07f4362f461a8b2f..90eafabd5d98c51f4af99140b7c412f3a7d44600 100644
--- a/wetb/bladed/prj2hawc.py
+++ b/wetb/bladed/prj2hawc.py
@@ -15,16 +15,18 @@ import pandas as pd
 # from wetb.prepost import misc
 from wetb.hawc2 import (HTCFile, AEFile, PCFile, StFile)
 
-from readprj import ReadBladedProject
+from wetb.bladed.readprj import ReadBladedProject
 
 
 # TODO: move to misc?
 def get_circular_inertia(d1, t, m):
     d2 = d1 - 2*t
     A = np.pi/4*(d1**2-d2**2)
+    # area moment of inertia x=y
     I_g = np.pi/64*(d1**4-d2**4)
+    # mass moment of inertia
     I_m = I_g/A*m
-    return A,I_g,I_m
+    return A, I_g, I_m
 
 
 # TODO: move to HTCFile?
@@ -421,7 +423,7 @@ class Convert2Hawc(ReadBladedProject):
         t[0] = t_thick[0,1]
         mat[0,:] = t_mat[0,:].copy()
 
-        A,I_g,I_m = get_circular_inertia(d,t,mat[:,0])
+        A, I_g, I_m = get_circular_inertia(d, t, mat[:,0])
         # FIXME: MAYBE ANOTHER FIX FOR DIFFERENT MATERIALS
         # return c2_arr
         starr = np.zeros((c2_arr.shape[0],19))
@@ -430,8 +432,8 @@ class Convert2Hawc(ReadBladedProject):
         starr[:,2] = 0.0 # no cg offset
         starr[:,3] = 0.0 # no cg offset
         # radius of gyration
-        starr[:,4] = np.sqrt(I_m/m) # ri_x = (I_m/m)**0.5 = (I_g/A)**0.5
-        starr[:,5] = np.sqrt(I_m/m) # ri_y = (I_m/m)**0.5 = (I_g/A)**0.5
+        starr[:,4] = np.sqrt(I_m/mat[:,0]) # ri_x = (I_m/m)**0.5 = (I_g/A)**0.5
+        starr[:,5] = np.sqrt(I_m/mat[:,0]) # ri_y = (I_m/m)**0.5 = (I_g/A)**0.5
         # shear center
         starr[:,6] = 0.0 # no shear center offset
         starr[:,7] = 0.0 # no shear center offset
diff --git a/wetb/prepost/GenerateHydro.py b/wetb/prepost/GenerateHydro.py
index fd798d77df8dcb6f01224b57ef37ea0008d68c36..d55ed4bded731fd411834d2ae518fcd4b6360387 100755
--- a/wetb/prepost/GenerateHydro.py
+++ b/wetb/prepost/GenerateHydro.py
@@ -10,13 +10,35 @@ Description: This script is used for writing the hydro input files for HAWC2
 
 import os
 
+template = """
+begin wkin_input ;
+    wavetype {wavetype} ;
+    wdepth  {wdepth} ;
+    begin reg_airy ;
+        stretching {stretching} ; 0=none, 1=wheeler
+        wave {Hs} {Tp} {wave_phase_shift} ; (Hs, Tp, phase shift)
+;        ignore_water_surface ;
+    end reg_airy ;
+    begin ireg_airy ;
+        stretching {stretching} ; 0=none, 1=wheeler
+        spectrum   {spectrum_id} ; (1=jonswap, 2=pm)
+        mccamyfuchs {mccamyfuchs} {mccamyfuchs_radius};
+        jonswap {Hs} {Tp} {gamma} ; (Hs, Tp, gamma)
+        pm {Hs} {Tp} ; (Hs, Tp)
+;        wn {wn_variance} {wn_f0} {wn_f1};
+        coef {coef_nrs} {coef_seed} {coef_phase_shift} ; (number of coefficients, seed, phase shift)
+        spreading {spreading} {spreading_param};
+    end;
+end;
+"""
+
 class hydro_input(object):
 
     """ Basic class aiming for write the hydrodynamics input file"""
 
     def __init__(self, wavetype, wdepth, spectrum, Hs, Tp, seed, gamma=3.3,
                  stretching=1, wn=None, coef=200, spreading=None,
-                 embed_sf=None, embed_sf_t0=None):
+                 embed_sf=None, embed_sf_t0=None, mccamyfuchs=1):
 
         self.wdepth = wdepth
         if self.wdepth < 0:
diff --git a/wetb/prepost/Simulations.py b/wetb/prepost/Simulations.py
index 8fb188a582413c694e1eb92e1a37a53d53720f25..9d67daac5a4ec7a5a30f589c5f23be3416464207 100755
--- a/wetb/prepost/Simulations.py
+++ b/wetb/prepost/Simulations.py
@@ -1519,7 +1519,11 @@ class HtcMaster(object):
             # case sensitive tag names
 #            htc = htc.replace(str(k), str(value))
             # tag names are case insensitive
-            htc = re.sub(re.escape(str(k)), str(value), htc, flags=re.IGNORECASE)
+            try:
+                htc = re.sub(re.escape(str(k)), str(value), htc, flags=re.IGNORECASE)
+            except re.error:
+                print('ignore replace tag, value: ', k, value)
+            # htc = re.sub(str(k), str(value), htc, flags=re.IGNORECASE)
 
         # and save the the case htc file:
         cname = self.tags['[case_id]'] + '.htc'
diff --git a/wetb/prepost/aep_del.py b/wetb/prepost/aep_del.py
new file mode 100755
index 0000000000000000000000000000000000000000..7c3b62d4f3abee059386415fe58ae36685a2a6dd
--- /dev/null
+++ b/wetb/prepost/aep_del.py
@@ -0,0 +1,179 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Fri Oct 30 13:40:44 2020
+
+@author: dave
+"""
+
+import os
+from argparse import ArgumentParser
+import math
+
+import numpy as np
+import pandas as pd
+
+from wetb.dlc.high_level import Weibull_IEC
+
+
+def aep_del(df_stats, resdir):
+    """Works for regular wetb statistics df.
+    """
+
+    # note about the unique channel names: they are saved under
+    # prepost-data/{sim_id}_unique-channel-names.csv
+    # after running parpost.py
+
+    # TODO: make option to have user define list of channels
+    # channels = df_stats['channel'].unique()
+
+    # DLC12: assumes that case_id (the file name) has the following format:
+    # dlc10_wsp04_wdir000_s1001.htc
+
+    # -----------------------------------------------------
+    # USER INPUT
+    neq_life = 1e7
+    vref = 50 # used for the IEC Weibull distribution
+    fact_1year = 365 * 24 * 0.975
+    years_del = 20
+    # specify which channel to use for the wind speed
+    chwind = 'windspeed-global-Vy-0.00-0.00--119.00'
+    # specify which channel to use for the electrical power output
+    chpowe = 'DLL-generator_servo-inpvec-2'
+    yawdist = {'wdir000':0.5, 'wdir010':0.25, 'wdir350':0.25}
+    dlc10_dir = 'dlc10_powercurve'
+    dlc12_dir = 'dlc12_iec61400-1ed3'
+    # -----------------------------------------------------
+
+    df_stats['hour_weight'] = 0
+
+    # -----------------
+    # DLC10
+    # -----------------
+    df_pc_dlc10 = pd.DataFrame()
+    resdir_dlc10 = os.path.join(resdir, dlc10_dir)
+    df_dlc10 = df_stats[df_stats['res_dir']==resdir_dlc10]
+    if len(df_dlc10) > 0:
+        df_pivot = df_dlc10.pivot(index='case_id', columns='channel', values='mean')
+        df_pivot.sort_values(chwind, inplace=True)
+        # wind speed and power
+        wsp = df_pivot[chwind].values
+        powe = df_pivot[chpowe].values
+        # wind speed probability distribution for 1 year
+        wsp_dist = Weibull_IEC(vref, wsp) * fact_1year
+        # save data into the DataFrame table
+        df_pc_dlc10['wsp'] = wsp
+        df_pc_dlc10['powe'] = powe
+        df_pc_dlc10['hours'] = wsp_dist
+        df_pc_dlc10['aep'] = (powe * wsp_dist).sum()
+
+    # -----------------
+    # DLC12
+    # -----------------
+    resdir_dlc12 = os.path.join(resdir, dlc12_dir)
+    df_dlc12 = df_stats[df_stats['res_dir']==resdir_dlc12].copy()
+    df_pivot = df_dlc12.pivot(index='case_id', columns='channel', values='mean')
+    # now add the wsp, yaw, seed columns again
+    tmp = pd.DataFrame(df_pivot.index.values)
+    df_pivot['dlc'] = ''
+    df_pivot['wsp'] = ''
+    df_pivot['yaw'] = ''
+    df_pivot['seed'] = ''
+    # assumes that case_id (the file name) has the following format:
+    # dlc12_wsp04_wdir000_s1001.htc
+    df_pivot[['dlc', 'wsp', 'yaw', 'seed']] = tmp[0].str.split('_', expand=True).values
+    df_pivot['wsp'] = (df_pivot['wsp'].str[3:]).astype(int)
+
+    # sorted on wsp, yaw, seed
+    df_pivot.sort_index(inplace=True)
+
+    # figure out how many seeds there are per yaw and wsp
+    seeds = {'wsp_yaw':[], 'nr_seeds':[]}
+    for ii, grii in df_pivot.groupby('wsp'):
+        for jj, grjj in grii.groupby('yaw'):
+            seeds['wsp_yaw'].append('%02i_%s' % (ii, jj))
+            seeds['nr_seeds'].append(len(grjj))
+
+    nr_seeds = np.array(seeds['nr_seeds']).max()
+    seeds_min = np.array(seeds['nr_seeds']).min()
+    assert nr_seeds == seeds_min
+
+    # yaw error probabilities
+    for yaw, dist in yawdist.items():
+        df_pivot.loc[df_pivot['yaw']==yaw, 'hour_weight'] = dist/nr_seeds
+    df_pivot['annual_hours_wsp'] = 0
+
+    # Weibull hour distribution as function of wind speeds
+    wsps = df_pivot['wsp'].unique()
+    wsps.sort()
+    wsp_dist = Weibull_IEC(vref, wsps) * fact_1year
+    for i, wsp in enumerate(wsps):
+        sel = df_pivot['wsp']==wsp
+        df_pivot.loc[sel, 'annual_hours_wsp'] = wsp_dist[i]
+
+    df_pivot['annual_hour_dist'] = df_pivot['annual_hours_wsp'] * df_pivot['hour_weight']
+
+    # check we still have the same amount of hours in a year
+    hours1 = df_pivot['annual_hour_dist'].sum()
+    hours2 = wsp_dist.sum()
+    assert np.allclose(hours1, hours2)
+
+    aep = (df_pivot['annual_hour_dist'] * df_pivot[chpowe]).sum()
+
+    df_pc_dlc12 = df_pivot[[chwind, chpowe, 'dlc', 'wsp', 'yaw', 'seed',
+                            'hour_weight', 'annual_hours_wsp', 'annual_hour_dist']].copy()
+    df_pc_dlc12['aep'] = aep
+
+    # -----------------
+    # DLC12 DEL
+    ms = [k for k in df_dlc12.columns if k.find('m=') > -1]
+    dict_Leq = {m:[] for m in ms}
+    dict_Leq['channel'] = []
+
+    # TODO: make option to have user define list of channels
+    # for channel in channels:
+        #statsel = df_dlc12[df_dlc12['channel']==channel].copy()
+
+    for channel, grch in df_dlc12.groupby('channel'):
+        # sort the case_id values in the same way as the pivot table is to
+        # align the hours with the corresponding case
+        statsel = grch.copy()
+        statsel.set_index('case_id', inplace=True)
+        statsel = statsel.loc[df_pivot.index,:]
+
+        dict_Leq['channel'].append(channel)
+
+        for m in ms:
+            m_ = float(m.split('=')[1])
+            eq1hz_mod = np.power(statsel[m].values, m_)
+            # R_eq_mod will have to be scaled from its simulation length
+            # to 1 hour (hour distribution is in hours...). Since the
+            # simulation time has not been multiplied out of R_eq_mod yet,
+            # we can just multiply with 3600 (instead of doing 3600/neq)
+            tmp = (eq1hz_mod * df_pivot['annual_hour_dist'] * years_del * 3600).sum()
+            # the effective Leq for each of the material constants
+            leq = math.pow(tmp/neq_life, 1.0/m_)
+            dict_Leq[m].append(leq)
+
+    df_leq = pd.DataFrame(dict_Leq)
+    df_leq.set_index('channel', inplace=True)
+
+    return df_pc_dlc10, df_pc_dlc12, df_leq
+
+
+if __name__ == '__main__':
+
+    parser = ArgumentParser(description = "Calculate AEP and LEQ")
+    parser.add_argument('--res', type=str, default='res', action='store',
+                        dest='resdir', help='Directory containing result files')
+    opt = parser.parse_args()
+
+    sim_id = os.path.basename(os.getcwd())
+    fname = f'prepost-data/{sim_id}_statistics.h5'
+    df_stats = pd.read_hdf(fname, key='table')
+
+    df_pc_dlc10, df_pc_dlc12, df_leq= aep_del(df_stats, opt.resdir)
+
+    # save fot Excel files
+    df_leq.to_excel(f'prepost-data/{sim_id}_del.xlsx')
+    df_pc_dlc10.to_excel(f'prepost-data/{sim_id}_aep_10.xlsx')
+    df_pc_dlc12.to_excel(f'prepost-data/{sim_id}_aep_12.xlsx')
diff --git a/wetb/prepost/dlcdefs.py b/wetb/prepost/dlcdefs.py
index 36ddfc447017b30123e1726b89f36580d2eac527..c3da75b90edb0518238da9fc83ca15d5093bbe33 100644
--- a/wetb/prepost/dlcdefs.py
+++ b/wetb/prepost/dlcdefs.py
@@ -124,34 +124,6 @@ def variable_tag_func(master, case_id_short=False):
     return master
 
 
-def vartag_dlcs(master):
-
-    mt = master.tags
-
-    dlc_case = mt['[Case folder]']
-    mt['[data_dir]'] = 'data/'
-    mt['[res_dir]'] = 'res/%s/' % dlc_case
-    mt['[log_dir]'] = 'logfiles/%s/' % dlc_case
-    mt['[htc_dir]'] = 'htc/%s/' % dlc_case
-    mt['[case_id]'] = mt['[Case id.]']
-    mt['[time_stop]'] = mt['[time stop]']
-    mt['[turb_base_name]'] = mt['[Turb base name]']
-    mt['[DLC]'] = mt['[Case id.]'].split('_')[0][3:]
-    mt['[pbs_out_dir]'] = 'pbs_out/%s/' % dlc_case
-    mt['[pbs_in_dir]'] = 'pbs_in/%s/' % dlc_case
-    mt['[iter_dir]'] = 'iter/%s/' % dlc_case
-    if mt['[eigen_analysis]']:
-        rpl = (dlc_case, mt['[Case id.]'])
-        mt['[eigenfreq_dir]'] = 'res_eigen/%s/%s/' % rpl
-    mt['[duration]'] = str(float(mt['[time_stop]']) - float(mt['[t0]']))
-    # replace nan with empty
-    for ii, jj in mt.items():
-        if jj == 'nan':
-            mt[ii] = ''
-
-    return master
-
-
 def vartag_excel_stabcon(master):
     """Variable tag function type that generates a hydro input file for the
     wave kinematics dll if [hydro input name] is defined properly.
@@ -419,7 +391,7 @@ def excel_stabcon(proot, fext='xlsx', pignore=None, pinclude=None, sheet=0,
         for count, row in df2.iterrows():
             tags_dict = {}
             # construct to dict, convert unicode keys/values to strings
-            for key, value in row.iteritems():
+            for key, value in row.items():
                 if isinstance(value, str):
                     tags_dict[str(key)] = str(value)
                 else:
diff --git a/wetb/prepost/dlcplots.py b/wetb/prepost/dlcplots.py
index c55ee8705e0f18fddbdb46eb8390acb2d4051c48..e0c9f3a8ea6cedbc73719debf9c86f0d3e41c915 100644
--- a/wetb/prepost/dlcplots.py
+++ b/wetb/prepost/dlcplots.py
@@ -52,6 +52,7 @@ def merge_sim_ids(sim_ids, post_dirs, post_dir_save=False, columns=None):
 
     # map the run_dir to the same order as the post_dirs, labels
     run_dirs = []
+
     # avoid saving merged cases if there is only one!
     if type(sim_ids).__name__ == 'list' and len(sim_ids) == 1:
         sim_ids = sim_ids[0]
@@ -142,7 +143,11 @@ def merge_sim_ids(sim_ids, post_dirs, post_dir_save=False, columns=None):
         df_stats, _, _ = cc.load_stats(columns=columns, leq=False)
         if columns is not None:
             df_stats = df_stats[columns]
-        run_dirs = [df_stats['[run_dir]'].unique()[0]]
+
+        try:
+            run_dirs = [df_stats['[run_dir]'].unique()[0]]
+        except KeyError:
+            run_dirs = []
 
         # stats has only a few columns identifying the different cases
         # add some more for selecting them
@@ -157,7 +162,7 @@ def merge_sim_ids(sim_ids, post_dirs, post_dir_save=False, columns=None):
         add_cols = list(cols_cc - set(df_stats.columns))
         add_cols.append('[case_id]')
         dfc = dfc[add_cols]
-        df_stats = pd.merge(df_stats, dfc, on='[case_id]')
+        df_stats = pd.merge(df_stats, dfc, right_on='[case_id]', left_on='case_id')
         if '[Windspeed]' in df_stats.columns and '[wsp]' in df_stats.columns:
             df_stats.drop('[wsp]', axis=1, inplace=True)
         if wsp != '[Windspeed]':
@@ -355,11 +360,11 @@ def plot_dlc_stats(df_stats, plot_chans, fig_dir_base, labels=None,
 
             fig, axes = mplutils.make_fig(nrows=1, ncols=1,
                                           figsize=figsize, dpi=120)
-            ax = axes[0,0]
+            ax = axes
             # seperate figure for the mean of the 1Hz equivalent loads
             fig2, axes2 = mplutils.make_fig(nrows=1, ncols=1,
                                             figsize=figsize, dpi=120)
-            ax2 = axes2[0,0]
+            ax2 = axes2
 
             if fig_dir_base is None and len(sim_ids) < 2:
                 res_dir = df_chan['[res_dir]'][:1].values[0]
diff --git a/wetb/prepost/dlctemplate.py b/wetb/prepost/dlctemplate.py
index 2e658ef978633563b535dba1a558e96bf3017140..b503b19a684edcb063db0fa7525a00b4573d4bf6 100644
--- a/wetb/prepost/dlctemplate.py
+++ b/wetb/prepost/dlctemplate.py
@@ -276,19 +276,6 @@ def launch_dlcs_excel(sim_id, silent=False, verbose=False, pbs_turb=False,
         wine_prefix = None
         prelude = 'module load mpi/openmpi_1.6.5_intelv14.0.0\n'
 
-    # if linux:
-    #     pyenv = 'py36-wetb'
-    #     pyenv_cmd = 'source /home/python/miniconda3/bin/activate'
-    #     exesingle = "{hawc2_exe:} {fname_htc:}"
-    #     exechunks = "({winenumactl:} {hawc2_exe:} {fname_htc:}) "
-    #     exechunks += "2>&1 | tee {fname_pbs_out:}"
-    # else:
-    #     pyenv = ''
-    #     pyenv_cmd = 'source /home/ozgo/bin/activate_hawc2cfd.sh'
-    #     exesingle = "time {hawc2_exe:} {fname_htc:}"
-    #     exechunks = "(time numactl --physcpubind=$CPU_NR {hawc2_exe:} {fname_htc:}) "
-    #     exechunks += "2>&1 | tee {fname_pbs_out:}"
-
     # see if a htc/DLCs dir exists
     # Load all DLC definitions and make some assumptions on tags that are not
     # defined
diff --git a/wetb/prepost/h2_vs_hs2.py b/wetb/prepost/h2_vs_hs2.py
index 1aca192eb0f489f639d1c40af97324eebc03be71..b86dc2466734eb54e1d115db48a2e1403410222f 100644
--- a/wetb/prepost/h2_vs_hs2.py
+++ b/wetb/prepost/h2_vs_hs2.py
@@ -415,7 +415,7 @@ class Sims(object):
                            copyback_turb=False, msg='', update_cases=False,
                            ignore_non_unique=False, run_only_new=False,
                            pbs_fname_appendix=False, short_job_names=False,
-                           windows_nr_cpus=2)
+                           windows_nr_cpus=2, update_model_data=True)
 
     def get_control_tuning(self, fpath):
         """
@@ -530,9 +530,9 @@ class MappingsH2HS2(object):
 
     def _powercurve_hs2(self, fname):
 
-        mappings = {'P [kW]'  :'P_aero',
-                    'T [kN]'  :'T_aero',
-                    'V [m/s]' :'windspeed'}
+        mappings = {'P'  :'P_aero',
+                    'T'  :'T_aero',
+                    'V' :'windspeed'}
 
         df_pwr, units = self.hs2_res.load_pwr_df(fname)
 
@@ -550,14 +550,21 @@ class MappingsH2HS2(object):
         if h2_df_stats is not None:
             self.h2_df_stats = h2_df_stats
             if fname_h2_tors is not None:
-                self.distribution_stats_h2(fname_h2_tors, 'Tors_e', 'torsion')
+                # self.distribution_stats_h2(fname_h2_tors, 'Tors_e', 'torsion',
+                #                            xaxis='radius')
+                self.distribution_stats_h2(fname_h2_tors, 'statevec_new', 'torsion',
+                                           component='Rz', xaxis='s')
+                self.distribution_stats_h2(fname_h2_tors, 'statevec_new', 'def_x_svn',
+                                            component='Dx', xaxis='s')
+                self.distribution_stats_h2(fname_h2_tors, 'statevec_new', 'def_y_svn',
+                                            component='Dy', xaxis='s')
 
     def _distribution_hs2(self):
         """Read a HAWCStab2 *.ind file (blade distribution loading)
 
         rot_angle and rot_vec_123 in HS2 should be in rotor polar coordinates
         """
-
+        # mapping_hs2[hawcstab2_key] = general_key
         mapping_hs2 =  {'s [m]'       :'curved_s',
                         'CL0 [-]'     :'Cl',
                         'CD0 [-]'     :'Cd',
@@ -613,6 +620,7 @@ class MappingsH2HS2(object):
         self.hs_aero['twist'] *= (180.0/np.pi)
 
     def _distribution_h2(self):
+        # mapping_h2[hawc2_key] = general_key
         mapping_h2 =  { 'Radius_s'  :'curved_s',
                         'Cl'        :'Cl',
                         'Cd'        :'Cd',
@@ -623,9 +631,9 @@ class MappingsH2HS2(object):
                         'Vrel'      :'vrel',
                         'Inflow_ang':'inflow_angle',
                         'alfa'      :'AoA',
-                        'pos_RP_x'  :'pos_x',
-                        'pos_RP_y'  :'pos_y',
-                        'pos_RP_z'  :'pos_z',
+                        'Pos_RP_x'  :'pos_x',
+                        'Pos_RP_y'  :'pos_y',
+                        'Pos_RP_z'  :'pos_z',
                         'Chord'     :'chord',
                         'Secfrc_RPx':'F_x',
                         'Secfrc_RPy':'F_y',
@@ -637,9 +645,13 @@ class MappingsH2HS2(object):
         h2_aero = self.h2_res[h2_cols].copy()
         # change column names to the standard form that is shared with HS
         h2_aero.columns = std_cols
+
+        # using the distributed aero outputs
         h2_aero['def_x'] = self.h2_res['Pos_B_x'] - self.h2_res['Inipos_x_x']
         h2_aero['def_y'] = self.h2_res['Pos_B_y'] - self.h2_res['Inipos_y_y']
         h2_aero['def_z'] = self.h2_res['Pos_B_z'] - self.h2_res['Inipos_z_z']
+        # note that distribution_stats_h2() will set def_x from statevec_new
+
         h2_aero['ax_ind_vel'] *= (-1.0)
 #        h2_aero['pos_x'] += (self.h2_res['Chord'] / 2.0)
         h2_aero['F_x'] *= (1e3)
@@ -651,7 +663,8 @@ class MappingsH2HS2(object):
 #        h2_aero = h2_aero[1:-1]
         self.h2_aero = h2_aero
 
-    def distribution_stats_h2(self, fname_h2, sensortype, newname):
+    def distribution_stats_h2(self, fname_h2, sensortype, newname, xaxis='s',
+                              component=None):
         """Determine blade distribution sensor from the HAWC2 statistics.
         This requires that for each aerodynamic calculation point there should
         be an output sensor defined manually in the output section.
@@ -665,6 +678,14 @@ class MappingsH2HS2(object):
 
         newname
 
+        xaxis : string
+            column name in LoadResults.ch_df identifying the blade radial
+            coordinate. Valid values are 's', 'radius', 'radius_actual',
+            'srel'
+
+        component : string
+            For statevec_new we also need to specify which component, Dx, Rx, etc.
+
         """
         if not hasattr(self, 'h2_aero'):
             raise UserWarning('first run blade_distribution')
@@ -674,34 +695,39 @@ class MappingsH2HS2(object):
         fname = os.path.basename(fname_h2)
         res = sim.windIO.LoadResults(fpath, fname, readdata=True)
         sel = res.ch_df[res.ch_df.sensortype == sensortype].copy()
+        if component is not None:
+            sel = sel[sel['component']=='Rz']
         if len(sel) == 0:
             msg = 'HAWC2 sensor type "%s" is missing, are they defined?'
             raise ValueError(msg % sensortype)
-        sel.sort_values(['radius'], inplace=True)
-        tors_e_channels = sel.unique_ch_name.tolist()
+        # for backward compatibilitiy with tors_e
+        sel.sort_values([xaxis], inplace=True)
+        chan_sel = sel.unique_ch_name.tolist()
 
         # find the current case in the statistics DataFrame
         case = fname.replace('.htc', '')
         df_case = self.h2_df_stats[self.h2_df_stats['[case_id]']==case].copy()
         # and select all the torsion channels
-        df_tors_e = df_case[df_case.channel.isin(tors_e_channels)].copy()
+        df_chan_sel = df_case[df_case.channel.isin(chan_sel)].copy()
         # join the stats with the channel descriptions DataFrames, have the
         # same name on the joining column
-        df_tors_e.set_index('channel', inplace=True)
+        df_chan_sel.set_index('channel', inplace=True)
         sel.set_index('unique_ch_name', inplace=True)
 
         # joining happens on the index, and for which the same channel has been
         # used: the unique HAWC2 channel naming scheme
-        df_tors_e = pd.concat([df_tors_e, sel], axis=1)
-        df_tors_e.radius = df_tors_e.radius.astype(np.float64)
-        # sorting on radius, combine with ch_df
-        df_tors_e.sort_values(['radius'], inplace=True)
+        df_chan_sel = pd.concat([df_chan_sel, sel], axis=1)
+        df_chan_sel[xaxis] = df_chan_sel[xaxis].astype(np.float64)
+        # sorting on radius/s/etc, combine with ch_df
+        df_chan_sel.sort_values([xaxis], inplace=True)
+
+        # TODO: check if the outputs are at the same positions as the aero outputs
 
         # FIXME: what if number of torsion outputs is less than aero
         # calculation points?
-        self.h2_aero['%s' % newname] = df_tors_e['mean'].values.copy()
-        self.h2_aero['%s_std' % newname] = df_tors_e['std'].values.copy()
-        self.h2_aero['%s_radius_s' % newname] = df_tors_e['radius'].values.copy()
+        self.h2_aero['%s' % newname] = df_chan_sel['mean'].values.copy()
+        self.h2_aero['%s_std' % newname] = df_chan_sel['std'].values.copy()
+        self.h2_aero['%s_%s_s' % (newname, xaxis)] = df_chan_sel[xaxis].values.copy()
 
     def body_structure_modes(self, fname_h2, fname_hs):
         self._body_structure_modes_h2(fname_h2)
@@ -791,7 +817,14 @@ class Plots(object):
         if h2_df_stats is not None:
             res.h2_df_stats = h2_df_stats
             if fname_h2_tors is not None:
-                res.distribution_stats_h2(fname_h2_tors, 'Tors_e', 'torsion')
+                # res.distribution_stats_h2(fname_h2_tors, 'Tors_e', 'torsion',
+                #                           xaxis='radius')
+                res.distribution_stats_h2(fname_h2_tors, 'statevec_new', 'torsion',
+                                          component='Rz', xaxis='s')
+                res.distribution_stats_h2(fname_h2_tors, 'statevec_new', 'def_x_svn',
+                                          component='Dx', xaxis='s')
+                res.distribution_stats_h2(fname_h2_tors, 'statevec_new', 'def_y_svn',
+                                          component='Dy', xaxis='s')
 
         return res
 
diff --git a/wetb/prepost/hawcstab2.py b/wetb/prepost/hawcstab2.py
index dbf920b37ea730c36b3a3027c7d4a81f529ee5c7..3053874c809adfea76703d6b9ec215b598a0afb5 100644
--- a/wetb/prepost/hawcstab2.py
+++ b/wetb/prepost/hawcstab2.py
@@ -564,7 +564,7 @@ def read_cmb_all(f_cmb, f_pwr=None, f_modid=None, f_save=None, ps=[1,3,6]):
         # add p in frequencies
         if f_pwr is not None:
             for p in ps:
-                tmp[f'{p}P'] = p*df_perf['rpm'].values / 60
+                tmp[f'{p}P'] = p*df_perf['Speed'].values / 60
         # sort columns on mean frequeny over wind speeds
         icolsort = tmp.values.mean(axis=0).argsort()
         tmp = tmp[tmp.columns[icolsort]]
@@ -634,7 +634,7 @@ def plot_pwr(figname, fnames, labels=[], figsize=(11,7.15), dpi=120):
 
     print('saving figure: %s ... ' % figname, end='')
     figpath = os.path.dirname(figname)
-    if not os.path.exists(figpath):
+    if len(figpath)>0 and not os.path.exists(figpath):
         os.makedirs(figpath)
     fig.savefig(figname)
     fig.clear()
@@ -669,30 +669,71 @@ class PlotCampbell(object):
         if xpos == 'random':
             pos = np.random.randint(1, nr_xpos-5, nr_series)
         elif xpos == 'centre':
-            pos = np.zeros((nr_series,))
+            pos = np.zeros((nr_series,), dtype=int)
             pos[0:len(pos):2] = np.ceil(nr_xpos/4.0)
             pos[1:len(pos):2] = np.ceil(2.0*nr_xpos/4.0)
         elif xpos == 'borders':
-            pos = np.zeros((nr_series,))
+            pos = np.zeros((nr_series,), dtype=int)
             pos[0:len(pos):2] = 2
             pos[2:len(pos):4] += 1
-            pos[1:len(pos):2] = np.floor(3.0*nr_xpos/4.0)
+            pos[1:len(pos):2] = np.floor(3.3*nr_xpos/4.0)
             # and +1 alternating on the right
             pos[1:len(pos):4] += 1
             pos[3:len(pos):4] -= 1
         elif xpos == 'right':
-            pos = np.zeros((nr_series,))
+            pos = np.zeros((nr_series,), dtype=int)
             pos[0:len(pos):2] = 2
             pos[1:len(pos):2] = np.ceil(1.0*nr_xpos/4.0)
         elif xpos == 'left':
-            pos = np.zeros((nr_series,))
+            pos = np.zeros((nr_series,), dtype=int)
             pos[0:len(pos):2] = np.ceil(2.0*nr_xpos/4.0)
             pos[1:len(pos):2] = np.ceil(3.0*nr_xpos/4.0)
+        elif xpos == 'spread':
+            # TODO: we should determine the spacing based on wind speeds since
+            # it might not be equally spaced
+            pos = np.zeros((nr_series,), dtype=int)
+            pos[0:len(pos):4] = 0
+            pos[1:len(pos):4] = np.ceil(1.0*nr_xpos/4.0)
+            pos[2:len(pos):4] = np.ceil(2.0*nr_xpos/4.0)
+            pos[3:len(pos):4] = nr_xpos - 2
 
         return pos
 
     def plot_freq(self, ax, xpos='random', col='k', mark='^', ls='-',
                   modes='all'):
+        """
+
+
+        Parameters
+        ----------
+        ax : TYPE
+            An Axes matplotlib instance that will be used for plotting.
+        xpos : TYPE, str
+            Specify strategy how to place the labels indifying the different modes.
+            Valid string values are: 'random', 'centre', 'borders', 'right', 'left', 'spread'.
+            The default is 'random'. If a list is passed, it should contain at
+            least nr_modes elements. Items in the list refer to the index of the
+            operating point (wind speed).
+        col : string, optional
+            Color used for the line plot. The default is 'k'. Can also be a list
+            of colors in case modes should have different colors.
+        mark : string, optional
+            Marker to use for the line plot. The default is '^'. Can also be a list
+            of mark symbols in case modes should have different symbols.
+        ls : TYPE, optional
+            Line style to use. The default is '-'. Can also be a list
+            of line styles in case modes should have different line styles.
+        modes : string, optional
+            Identify which modes to plot. The default is 'all'. Optionally
+            specify an integer to select the first x modes, or a list of indices
+            to cherry pick modes of interest.
+
+        Returns
+        -------
+        ax : matplotlib.axes
+            An updated Axes matplotlib instance of the plot.
+
+        """
 
         if isinstance(modes, str) and modes == 'all':
             df_freq = self.df_freq
@@ -703,7 +744,12 @@ class PlotCampbell(object):
 
         nr_winds = df_freq.shape[0]
         nr_modes = df_freq.shape[1]
-        pos = self._inplot_label_pos(nr_winds, nr_modes, xpos)
+
+        pos = xpos
+        if isinstance(xpos, str):
+            pos = self._inplot_label_pos(nr_winds, nr_modes, xpos)
+        elif isinstance(xpos, list) and len(xpos) < nr_modes:
+            raise ValueError(f'xpos has only {len(xpos)}, while it needs to be >= {nr_modes}')
 
         if isinstance(col, str):
             col = [col]
@@ -716,7 +762,7 @@ class PlotCampbell(object):
         mark = mark*nr_modes
         ls = ls*nr_modes
 
-        for i, (name, row) in enumerate(df_freq.iteritems()):
+        for i, (name, row) in enumerate(df_freq.items()):
             colmark = '%s%s%s' % (col[i], ls[i], mark[i])
             ax.plot(self.wind, row.values, colmark)#, mfc='w')
             x, y = self.wind[pos[i]], row.values[pos[i]]
@@ -733,9 +779,14 @@ class PlotCampbell(object):
 
     def plot_damp(self, ax, xpos='random', col='r', mark='o' ,ls='--',
                   modes=14):
+        """see plot_freq
+
+        """
 
         # reduce the number of modes we are going to plot
-        if isinstance(modes, int):
+        if isinstance(modes, str) and modes == 'all':
+            df_damp = self.df_damp
+        elif isinstance(modes, int):
             nr_modes = modes
             # sort the columns according to damping: lowest damped modes first
             # sort according to damping at lowest wind speed
@@ -744,18 +795,34 @@ class PlotCampbell(object):
             df_damp = self.df_damp[modes_sort_reduced]
         else:
             df_damp = self.df_damp[modes]
-            nr_modes = len(modes)
 
-        # put the labels in sensible places
         nr_winds = df_damp.shape[0]
-        nr_damps = df_damp.shape[1]
-        pos = self._inplot_label_pos(nr_winds, nr_damps, xpos)
-        bbox = dict(boxstyle="round", alpha=self.alpha_box, edgecolor=col,
-                    facecolor=col,)
-        for imode, (name, row) in enumerate(df_damp.iteritems()):
-            colmark = '%s%s%s' % (col, ls, mark)
+        nr_modes = df_damp.shape[1]
+
+        if isinstance(col, str):
+            col = [col]
+        if isinstance(mark, str):
+            mark = [mark]
+        if isinstance(ls, str):
+            ls = [ls]
+        # just to make sure we always have enough colors/marks,lss
+        col = col*nr_modes
+        mark = mark*nr_modes
+        ls = ls*nr_modes
+
+        # put the labels in sensible places
+        pos = xpos
+        if isinstance(xpos, str):
+            pos = self._inplot_label_pos(nr_winds, nr_modes, xpos)
+        elif isinstance(xpos, list) and len(xpos) < nr_modes:
+            raise ValueError(f'xpos has only {len(xpos)}, while it needs to be >= {nr_modes}')
+
+        for i, (name, row) in enumerate(df_damp.items()):
+            colmark = '%s%s%s' % (col[i], ls[i], mark[i])
             ax.plot(self.wind, row.values, colmark, alpha=0.8)#, mfc='w')
-            x, y = self.wind[pos[imode]], row.values[pos[imode]]
+            x, y = self.wind[pos[i]], row.values[pos[i]]
+            bbox = dict(boxstyle="round", alpha=self.alpha_box,
+                        edgecolor=col[i], facecolor=col[i])
             ax.annotate(name, xy=(x, y), xycoords='data',
                         xytext=(-6, 20), textcoords='offset points',
                         fontsize=12, bbox=bbox, arrowprops=dict(arrowstyle="->",
diff --git a/wetb/prepost/parpost.py b/wetb/prepost/parpost.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e928c1580e94f42302d794ed3531f6c1c080c50
--- /dev/null
+++ b/wetb/prepost/parpost.py
@@ -0,0 +1,424 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Thu May 24 13:41:56 2018
+
+@author: dave
+"""
+
+import time
+import os
+from multiprocessing import Pool
+from glob import glob
+from argparse import ArgumentParser
+import json
+
+import numpy as np
+import pandas as pd
+# import scipy.io as sio
+import tables as tbl
+
+from tqdm import tqdm
+
+from wetb.utils.envelope import compute_envelope
+from wetb.prepost import windIO
+from wetb.prepost.Simulations import EnvelopeClass
+from wetb.prepost.simchunks import AppendDataFrames
+
+
+def logcheck(fname):
+    """Check the log file of a single HAWC2 simulation and save results to
+    textfile.
+    """
+    fsave = None
+    mode = 'w'
+
+    logf = windIO.LogFile()
+    logf.readlog(fname)
+    contents = logf._msglistlog2csv('')
+    if fsave is None:
+        fsave = fname.replace('.log', '.csv')
+    with open(fsave, mode) as f:
+        f.write(contents)
+
+
+def add_channels(res):
+    """Add channels in memory so they can be included in the statistics calculations
+    """
+
+    # EXAMPLE: tower bottom resultant moment
+    # chitx = res.ch_dict['tower-tower-node-001-momentvec-x']['chi']
+    # chity = res.ch_dict['tower-tower-node-001-momentvec-y']['chi']
+    # mx = res.sig[:,chitx]
+    # my = res.sig[:,chity]
+    # data = np.sqrt(mx*mx + my*my).reshape(len(res.sig),1)
+    # # add the channel
+    # res.add_channel(data, 'TB_res', 'kNm', 'Tower bottom resultant bending moment')
+
+    # blade resultant loads
+    chitx = res.ch_dict['blade1-blade1-node-004-forcevec-x']['chi']
+    chity = res.ch_dict['blade1-blade1-node-004-forcevec-y']['chi']
+    x = res.sig[:,chitx]
+    y = res.sig[:,chity]
+    data = np.sqrt(x*x + y*y).reshape(len(res.sig),1)
+    res.add_channel(data, 'BR_F_res', 'kN', 'Blade root resultant shear force')
+
+    chitx = res.ch_dict['blade1-blade1-node-004-momentvec-x']['chi']
+    chity = res.ch_dict['blade1-blade1-node-004-momentvec-y']['chi']
+    x = res.sig[:,chitx]
+    y = res.sig[:,chity]
+    data = np.sqrt(x*x + y*y).reshape(len(res.sig),1)
+    res.add_channel(data, 'BR_M_res', 'kNm', 'Blade root resultant bending moment')
+
+    return res
+
+
+def loads_at_extreme(dfres, chans_selmax, chans_atmax):
+    """For a range of channels defined in chans_list, list their value respective
+    values at the time when a max/min occurs.
+
+    It makes most sense to use chans_selmax == chans_atmax (BLADED style reports)
+
+    Other use is to track loads at for example the maximum yaw angle.
+    """
+    statparams = ['max', 'min']
+
+    # create multi-index: first is the selected channel, second is the stat param
+    chans_selmax_ = []
+    for k in chans_selmax: chans_selmax_.extend([k ,k])
+    index = pd.MultiIndex.from_arrays([chans_selmax_, statparams*len(chans_selmax)])
+    # initiale the table
+    dfextr = pd.DataFrame(np.zeros((len(chans_selmax_), (len(chans_atmax)))),
+                          index=index, columns=chans_atmax)
+
+    # vectorised slightly faster for 8x8 channels
+    # # 9.08 ms ± 72.9 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
+    # select the index at which the max/minimum occurs for the channels in chans_selmax
+    idxmax = dfres[chans_selmax].idxmax(axis=0).values
+    idxmin = dfres[chans_selmax].idxmin(axis=0).values
+    dfextr.loc[(chans_selmax, 'max'), chans_atmax] = dfres.loc[idxmax, chans_atmax].values
+    dfextr.loc[(chans_selmax, 'min'), chans_atmax] = dfres.loc[idxmin, chans_atmax].values
+
+    # # looping is slower for 8x8 channels
+    # # 11.1 ms ± 21.1 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
+    # for chan in chans_selmax:
+    #     idxmax = dfres[chan].idxmax(axis=0)
+    #     idxmin = dfres[chan].idxmin(axis=0)
+    #     for atchan in chans_atmax:
+    #         dfextr.loc[(chan, 'max'),atchan] = dfres.loc[idxmax, atchan]
+    #         dfextr.loc[(chan, 'min'),atchan] = dfres.loc[idxmin, atchan]
+
+    # dfextr.loc[pd.IndexSlice[:,'max'],chan] = dfres.iloc[imax, ichan]
+    # dfextr.loc[pd.IndexSlice[:,'min'],chan] = dfres.iloc[imin, ichan]
+    # dfextr.loc[pd.IndexSlice[:,:],:].values
+    # assert np.allclose(dfextr.loc[:,:].values, dfextr2.loc[:,:].values)
+
+    return dfextr
+
+
+def envelope(res, int_env=False, Nx=300):
+
+    # define which sensors to use by using the unique_ch_name feature
+    ch_list = []
+
+    # find all available outputs for tower, blades of type momentvec Mx
+    sel = ((  (res.ch_df['bodyname']=='tower')
+            | (res.ch_df['bodyname']=='blade1')
+            | (res.ch_df['bodyname']=='blade2')
+            | (res.ch_df['bodyname']=='blade3') )
+           & (res.ch_df['sensortype']=='momentvec') & (res.ch_df['component']=='x')
+          )
+    nodeselx = res.ch_df[sel]['unique_ch_name'].values.tolist()
+
+    # all momentvec My
+    sel = ((  (res.ch_df['bodyname']=='tower')
+            | (res.ch_df['bodyname']=='blade1')
+            | (res.ch_df['bodyname']=='blade2')
+            | (res.ch_df['bodyname']=='blade3') )
+           & (res.ch_df['sensortype']=='momentvec') & (res.ch_df['component']=='y')
+          )
+    nodesely = set(res.ch_df[sel]['unique_ch_name'].values.tolist())
+
+    for nodex in nodeselx:
+        nodey = nodex.replace('-x', '-y')
+        # make sure both Mx and My are defined
+        if nodey not in nodesely:
+            continue
+        ch_list.append([nodex, nodey])
+
+    fname = res.FileName + '_envelopes.h5'
+    filt = tbl.Filters(complevel=9)
+    with tbl.open_file(fname, mode="w", title=str(SIM_ID), filters=filt) as h5f:
+        groupname = str(os.path.basename(res.FileName))
+        groupname = groupname.replace('-', '_').replace('.', '_')
+        ctab = h5f.create_group("/", groupname)
+
+        # envelope = {}
+        for ch_names in ch_list:
+
+            ichans = []
+            for ch_name in ch_names:
+                ichans.append(res.ch_dict[ch_name]['chi'])
+            cloud = res.sig[:, ichans]
+            # Compute a Convex Hull, the vertices number varies according to
+            # the shape of the poligon
+            vertices = compute_envelope(cloud, int_env=int_env, Nx=Nx)
+            # envelope[ch_names[0]] = vertices
+
+            # # save as simple text file
+            # title = ch_names[0]
+            # fname = res.FileName.replace('res/', 'prepost-data/env_txt/') + f'_{title}.txt'
+            # os.makedirs(os.path.dirname(fname), exist_ok=True)
+            # np.savetxt(fname, vertices)
+
+            # save as HDF5 table
+            title = str(ch_names[0].replace('-', '_'))
+            csv_table = h5f.create_table(ctab, title,
+                                          EnvelopeClass.section,
+                                          title=title)
+            tablerow = csv_table.row
+            for row in vertices: #envelope[ch_names[0]]:
+                tablerow['Mx'] = float(row[0])
+                tablerow['My'] = float(row[1])
+                if len(row)>2:
+                    tablerow['Mz'] = float(row[2])
+                    if len(row)>3:
+                        tablerow['Fx'] = float(row[3])
+                        tablerow['Fy'] = float(row[4])
+                        tablerow['Fz'] = float(row[5])
+                    else:
+                        tablerow['Fx'] = 0.0
+                        tablerow['Fy'] = 0.0
+                        tablerow['Fz'] = 0.0
+                else:
+                    tablerow['Mz'] = 0.0
+                    tablerow['Fx'] = 0.0
+                    tablerow['Fy'] = 0.0
+                    tablerow['Fz'] = 0.0
+                tablerow.append()
+            csv_table.flush()
+    # h5f.close()
+
+
+def calc(fpath, do_envelope, no_bins, m, atmax):
+
+    t0 = time.time()
+    i0 = 0
+    i1 = None
+    ftype = '.csv'
+
+    # remove the extension
+    ext = fpath.split('.')[-1]
+    if ext in ('sel', 'dat', 'hdf5'):
+        fpath = '.'.join(fpath.split('.')[:-1])
+    else:
+        print('invalid file extension, ignored:', fpath)
+        return
+
+    fdir = os.path.dirname(fpath)
+    fname = os.path.basename(fpath)
+    try:
+        res = windIO.LoadResults(fdir, fname, usecols=None, readdata=True)
+    except Exception as e:
+        print(f'loading {fpath} failed:')
+        print(e)
+        return
+    neq = res.sig[-1,0] - res.sig[0,0]
+
+    # add channels in memory so they can be included in the statistics
+    # they are not saved back to disk
+    res = add_channels(res)
+
+    # convert to DataFrame
+    dfres = res.sig2df()
+
+    # save the envelope
+    if do_envelope:
+        envelope(res, int_env=False, Nx=300)
+
+    # extremes and corresponding loads at the same time
+    if atmax is not None:
+        for node, val in atmax.items():
+            dfextr = loads_at_extreme(dfres, val['chans_selmax'], val['chans_selmax'])
+            dfextr.to_excel(fpath + f'_{node}_loads_at_extreme.xlsx')
+
+    # statistics
+    df_stats = res.statsdel_df(i0=i0, i1=i1, statchans='all', neq=neq,
+                               no_bins=no_bins, m=m, delchans='all')
+
+    # add path and fname as columns
+    df_stats['case_id'] = fname
+    df_stats['res_dir'] = fdir
+
+    if ftype == '.csv':
+        df_stats.to_csv(fpath+ftype)
+    elif ftype == '.h5':
+        df_stats.to_hdf(fpath+ftype, 'table', complib='zlib', complevel=9)
+    print('% 7.03f  ' % (time.time() - t0), fname)
+
+
+def par(logdir, resdir, cpus=30, chunksize=30, nostats=False, nolog=False,
+        do_envelope=True, no_bins=46, m=[3,4,6,8,9,10,12], atmax=None):
+    """Sophia has 32 CPUs per node.
+    """
+
+    # log file analysis
+    if not nolog:
+        files = glob(os.path.join(logdir, '**', '*.log'), recursive=True)
+        print(f'start processing {len(files)} logfiles from dir: {logdir}')
+        with Pool(processes=cpus) as pool:
+            # chunksize = 10 #this may take some guessing ...
+            for ind, res in enumerate(pool.imap(logcheck, files), chunksize):
+                pass
+    print()
+
+    # consider both hdf5 and sel outputs
+    if not nostats:
+        files = glob(os.path.join(resdir, '**', '*.hdf5'), recursive=True)
+        files.extend(glob(os.path.join(resdir, '**', '*.sel'), recursive=True))
+        print(f'start processing {len(files)} resfiles from dir: {resdir}')
+        # prepare the other arguments
+        combis = [(k, do_envelope, no_bins, m, atmax) for k in files]
+
+        # sequential loop
+        # for combi in combis:
+        #     calc(*combi)
+
+        # start the parallal for-loop using X number of cpus
+        with Pool(processes=cpus) as pool:
+            # This method chops the iterable into a number of chunks which it submits
+            # to the process pool as separate tasks. The (approximate) size of these
+            # chunks can be specified by setting chunksize to a positive integer.
+            for ind, res in enumerate(pool.starmap(calc, combis), chunksize):
+                pass
+    print()
+
+
+def merge_stats(post_dir='prepost-data'):
+    """Merge stats for each case into one stats table
+    """
+
+    print('start merging statistics into single table...')
+
+    os.makedirs(post_dir, exist_ok=True)
+    # -------------------------------------------------------------------------
+    # MERGE POSTPRO ON NODE APPROACH INTO ONE DataFrame
+    # -------------------------------------------------------------------------
+    path_pattern = os.path.join('res', '**', '*.csv')
+    csv_fname = '%s_statistics.csv' % SIM_ID
+    # if zipchunks:
+    #     path_pattern = os.path.join(post_dir, 'statsdel_chnk*.tar.xz')
+    fcsv = os.path.join(post_dir, csv_fname)
+    mdf = AppendDataFrames(tqdm=tqdm)
+    cols = mdf.txt2txt(fcsv, path_pattern, tarmode='r:xz', header=0, sep=',',
+                       header_fjoined=None, recursive=True)#, fname_col='[case_id]')
+    # and convert to df: takes 2 minutes
+    fdf = fcsv.replace('.csv', '.h5')
+    store = pd.HDFStore(fdf, mode='w', complevel=9, complib='zlib')
+    colnames = cols.split(',')
+    # the first column is the channel name but the header is emtpy
+    assert colnames[0] == ''
+    colnames[0] = 'channel'
+    dtypes = {col:np.float64 for col in colnames}
+    dtypes['channel'] = str
+    dtypes['case_id'] = str
+    dtypes['res_dir'] = str
+    # when using min_itemsize the column names should be valid variable names
+    # mitemsize = {'channel':60, '[case_id]':60}
+    mdf.csv2df_chunks(store, fcsv, chunksize=1000000, min_itemsize={}, sep=',',
+                      colnames=colnames, dtypes=dtypes, header=0)
+    store.close()
+    print(f'\nsaved at: {fcsv}')
+
+
+def merge_logs(post_dir='prepost-data'):
+
+    print('start merging log analysis into single table...')
+
+    os.makedirs(post_dir, exist_ok=True)
+    # -------------------------------------------------------------------------
+    # MERGE POSTPRO ON NODE APPROACH INTO ONE DataFrame
+    # -------------------------------------------------------------------------
+    lf = windIO.LogFile()
+    path_pattern = os.path.join('logfiles', '**', '*.csv')
+    # if zipchunks:
+    #     path_pattern = os.path.join(POST_DIR, 'loganalysis_chnk*.tar.xz')
+    csv_fname = '%s_ErrorLogs.csv' % SIM_ID
+    fcsv = os.path.join(post_dir, csv_fname)
+    mdf = AppendDataFrames(tqdm=tqdm)
+    # individual log file analysis does not have header, make sure to include
+    # a line for the header
+    cols = mdf.txt2txt(fcsv, path_pattern, tarmode='r:xz', header=None,
+                       header_fjoined=lf._header(), recursive=True)
+
+    # FIXME: this is due to bug in log file analysis. What is going on here??
+    # fix that some cases do not have enough columns
+    with open(fcsv.replace('.csv', '2.csv'), 'w') as f1:
+        with open(fcsv) as f2:
+            for line in f2.readlines():
+                if len(line.split(';'))==96:
+                    line = line.replace(';0.00000000000;nan;-0.0000;',
+                                        '0.00000000000;nan;-0.0000;')
+                f1.write(line)
+
+    # convert from CSV to DataFrame and save as Excel
+    df = lf.csv2df(fcsv.replace('.csv', '2.csv'))
+    # since this is mainly text it doesn't make any sense to convert to hdf5
+    # for one DLB example, csv is 350 kB, hdf 2 MB.
+    # df.to_hdf(fcsv.replace('.csv', '.h5'), 'table')
+    df.to_excel(fcsv.replace('.csv', '.xlsx'))
+    print(f"\nsaved at: {fcsv.replace('.csv', '2.csv')}")
+
+
+def save_unique_chan_names(post_dir='prepost-data'):
+
+    fdf = os.path.join(post_dir, '%s_statistics.h5' % SIM_ID)
+    df_stats = pd.read_hdf(fdf, 'table')
+    chans = df_stats['channel'].unique()
+    # TODO: print table with HTC channel names on one side, and the unique
+    # channel names on the other. Problem: one HTC line can have multiple channels
+    # do not sort, leave in same order as in the sel file
+    # chans.sort()
+    fname = os.path.join(post_dir, '%s_unique-channel-names.csv' % SIM_ID)
+    pd.DataFrame(chans).to_csv(fname)
+
+
+if __name__ == '__main__':
+
+    parser = ArgumentParser(description = "Parallel post-processing with the "
+                            "Python build-in multiprocessing module.")
+    parser.add_argument('-n', type=int, default=2, action='store',
+                        dest='cpus', help='Number of CPUs to use on the node')
+    parser.add_argument('-c', type=int, default=80, action='store',
+                        dest='chunksize', help='Chunksize Pool.')
+
+    parser.add_argument('--res', type=str, default='res', action='store',
+                        dest='resdir', help='Directory containing result files')
+    parser.add_argument('--log', type=str, default='log', action='store',
+                        dest='logdir', help='Directory containing HAWC2 log files')
+
+    parser.add_argument('--nologs', default=False, action='store_true',
+                        dest='nologs', help='Do not perform the logfile analysis.')
+    parser.add_argument('--nostats', default=False, action='store_true',
+                        dest='nostats', help='Do not calculate statistics.')
+    parser.add_argument('--envelope', default=False, action='store_true',
+                        dest='do_envelope', help='Calculate envelopes.')
+    parser.add_argument('--no_bins', default=46, action='store',
+                        dest='no_bins', help='Number of bins for the rainflow counting.')
+    parser.add_argument('--atmax', default=False, action='store', type=str,
+                        dest='atmax', help='File name that holds the channels to create '
+                        'the table at which maxima and corresponding values are selected.')
+    opt = parser.parse_args()
+
+    if opt.atmax:
+        with open(opt.atmax) as fio:
+            atmax_js = json.load(fio)
+
+    SIM_ID = os.path.basename(os.getcwd())
+    par(opt.logdir, opt.resdir, cpus=opt.cpus, chunksize=opt.chunksize,
+        nolog=opt.nologs, nostats=opt.nostats, do_envelope=opt.do_envelope,
+        no_bins=opt.no_bins, m=[3,4,6,8,9,10,12], atmax=atmax_js)
+    if not opt.nostats:
+        merge_stats(post_dir='prepost-data')
+        save_unique_chan_names(post_dir='prepost-data')
+    if not opt.nologs:
+        merge_logs(post_dir='prepost-data')
diff --git a/wetb/prepost/simchunks.py b/wetb/prepost/simchunks.py
index 1bda571d86fad2ed357dd9a70d985b2619fabb8d..1a89a31b517dcafa7b13ed44e52b5720f89af29d 100644
--- a/wetb/prepost/simchunks.py
+++ b/wetb/prepost/simchunks.py
@@ -876,12 +876,16 @@ class AppendDataFrames(object):
                     if write_header:
                         if header_fjoined is None:
                             header_fjoined = lines[header]
+                            header_f0 = lines[header]
                         # add extra column with the file name if applicable
                         if fname_col:
                             rpl = sep + fname_col + '\n'
                             header_fjoined = header_fjoined.replace('\n', rpl)
                         ft.write(header_fjoined)
                         write_header = False
+                    # check if the header is the same for each file
+                    if header is not None:
+                        assert lines[header] == header_f0
                     # but cut out the header on all other occurances
                     case_id = '.'.join(case_id.split('.')[:-1])
                     for line in lines[icut:]:
diff --git a/wetb/prepost/windIO.py b/wetb/prepost/windIO.py
index 414200be0593dde859aea401775e093ed9dd69da..914baf1c759d685fd0bdadc71146e3c95855bc27 100755
--- a/wetb/prepost/windIO.py
+++ b/wetb/prepost/windIO.py
@@ -19,6 +19,7 @@ import re as re
 
 import numpy as np
 import scipy as sp
+import scipy.io as sio
 import scipy.integrate as integrate
 import pandas as pd
 
@@ -1249,20 +1250,29 @@ class LoadResults(ReadHawc2):
             # Free wind speed Vdir_hor, gl. coo, of gl. pos  0.00,  0.00, -2.31
 
             # -----------------------------------------------------------------
-            # WATER SURFACE gl. coo, at gl. coo, x,y=   0.00,   0.00
+            # Water surface gl. coo, at gl. coo, x,y=   0.00,   0.00
+            # Water vel., x-dir, at gl. coo, x,y,z=   0.00,   0.00,   1.00
             elif self.ch_details[ch, 2].startswith('Water'):
                 units = self.ch_details[ch, 1]
+                channelinfo = {}
 
-                # but remove the comma
-                x = items_ch2[-2][:-1]
-                y = items_ch2[-1]
+                # surface, vel or acc?
+                if items_ch2[1]=='surface':
+                    # but remove the comma
+                    x = items_ch2[-2][:-1]
+                    y = items_ch2[-1]
+                    # and tag it
+                    tag = 'watersurface-global-%s-%s' % (x, y)
+                    channelinfo['pos'] = (float(x), float(y))
+                else:
+                    # but remove the comma
+                    x = items_ch2[-3][:-1]
+                    y = items_ch2[-2][:-1]
+                    z = items_ch2[-1]
+                    tag = f'{items_ch0[0]}-{x}-{y}-{z}'
 
-                # and tag it
-                tag = 'watersurface-global-%s-%s' % (x, y)
                 # save all info in the dict
-                channelinfo = {}
                 channelinfo['coord'] = 'global'
-                channelinfo['pos'] = (float(x), float(y))
                 channelinfo['units'] = units
                 channelinfo['chi'] = ch
 
@@ -1725,6 +1735,49 @@ class LoadResults(ReadHawc2):
 
         return np.array(zvals), np.array(yvals)
 
+    def add_channel(self, data, name, units, description='', options=None):
+        """Add a channel to self.sig and self.ch_df such that self.statsdel_df
+        also calculates the statistics for this channel.
+
+        Parameters
+        ----------
+
+        data : np.ndarray(n, 1)
+            Array containing the new channel. Should be of shape (n, 1). If not
+            it will be reshaped to (len(data),1).
+
+        name : str
+            Unique name of the new channel
+
+        units : str
+            Channel units
+
+        description : str, default=''
+            channel description
+        """
+        # valid keys for self.res.ch_df
+        # add = {'radius':np.nan, 'bearing_name':'', 'azimuth':np.nan, 'coord':'',
+        #       'sensortype':'', 'io_nr':np.nan, 'wake_source_nr':np.nan,
+        #       'dll':'', 'direction':'', 'blade_nr':np.nan, 'bodyname':'',
+        #       'pos':'', 'flap_nr':'', 'sensortag':'', 'component':'', 'units':'',
+        #       'io':'', 'unique_ch_name':'new_channel'}
+
+        add = {k:'' for k in self.ch_df.columns}
+        if options is not None:
+            add.update(options)
+        add['unique_ch_name'] = name
+        row = [add[k] for k in self.ch_df.columns]
+
+        # add the meta-data to ch_df and ch_details
+        self.ch_df.loc[len(self.ch_df)] = row
+        cols = [[name, units, description]]
+        self.ch_details = np.append(self.ch_details, cols, axis=0)
+
+        # and add to the results array
+        if data.shape != (len(data),1):
+            data = data.reshape(len(data),1)
+        self.sig = np.append(self.sig, data, axis=1)
+
     def save_chan_names(self, fname):
         """Save unique channel names to text file.
         """
@@ -1790,6 +1843,16 @@ class LoadResults(ReadHawc2):
         self.ch_details
         self.ch_dict
 
+    def save_matlab(self, fname):
+        """Save output in Matlab format.
+        """
+        # all channels
+        details = np.zeros((self.sig.shape[1],4), dtype=np.object)
+        for i in range(self.sig.shape[1]):
+            details[i,0:3] = self.ch_details[i,:]
+            details[i,3] = self.ch_df.loc[i,'unique_ch_name']
+        sio.savemat(fname, {'sig':self.sig, 'description':details})
+
 
 def ReadOutputAtTime(fname):
     """Distributed blade loading as generated by the HAWC2 output_at_time
@@ -1977,6 +2040,31 @@ def ReadEigenStructure(fname, debug=False):
     return df
 
 
+def ReadStructInertia(fname):
+
+    with open(fname) as f:
+        lines = f.readlines()
+
+    marks = []
+    for i, line in enumerate(lines):
+        if line.startswith('_________') > 0:
+            marks.append(i)
+
+    header = ['body_name'] + lines[7].split()[2:]
+    data = lines[9:marks[4]]
+    bodies = {i:[] for i in header}
+    for row in data:
+        row_els = row[:-1].split()
+        for colname, col  in zip(header, row_els):
+            bodies[colname].append(col)
+
+    bodies = pd.DataFrame(bodies)
+    for k in header[1:]:
+        bodies[k] = bodies[k].astype(float)
+
+    return bodies
+
+
 class UserWind(object):
     """
     """