diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000000000000000000000000000000000000..d37346bd3e6985da46022d9c87d0ce66083686e7
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,17 @@
+Contributions
+-------------
+
+If you make a change in the toolbox, that others can benefit from please make a merge request.
+
+If you can, please submit a merge request with the fix or improvements including tests.
+
+The workflow to make a merge request is as follows:
+
+
+-Create a feature branch, branch away from master
+-Write tests and code
+-Push the commit(s) to your fork
+-Submit a merge request (MR) to the master branch of
+-Link any relevant issues in the merge request description and leave a comment on them with a link back to the MR
+-Your tests should run as fast as possible, and if it uses test files, these files should be as small as possible.
+-Please keep the change in a single MR as small as possible. Split the functionality if you can
\ No newline at end of file
diff --git a/docs/howto-make-dlcs.md b/docs/howto-make-dlcs.md
index bad92e69297f0651994dc6b9c131ad4a2f8ac062..1d95f83894927012584bd3fb1dd59662f7ad33de 100644
--- a/docs/howto-make-dlcs.md
+++ b/docs/howto-make-dlcs.md
@@ -415,6 +415,21 @@ Optional
 * ```[mooring_dir] = False```, all files and sub-folders copied to node
 * ```[hydro_dir] = False```, all files and sub-folders copied to node
 
+The mooring line dll has a fixed name init file that has to be in the root of
+the HAWC2 folder. When you have to use various init files (e.g. when the water
+depth is varying for different load cases) it would be convienent to be able
+to control which init file is used for which case (e.g. water depth).
+
+When running a load case for which the mooring lines will run in init mode:
+* ```[copyback_f1]``` = 'ESYSMooring_init.dat'
+* ```[copyback_f1_rename]``` = 'mooringinits/ESYSMooring_init_vXYZ.dat'
+
+When using an a priory cacluated init file for the mooring lines:
+* ```[copyto_generic_f1]``` = 'mooringinits/ESYSMooring_init_vXYZ.dat'
+* ```[copyto_f1]``` = 'ESYSMooring_init.dat'
+
+Replace ```vXYZ``` with an appropriate identifier for your case.
+
 A zip file will be created which contains all files in the model root directory,
 and all the contents (files and folders) of the following directories:
 ```[control_dir], [mooring_dir], [hydro_dir], 'externalforce/', [data_dir]```.
@@ -424,7 +439,7 @@ during simulation time in the ```[log_dir]```, ```[res_dir]```,
 ```[animation_dir]```, and ```[eigenfreq_dir]``` will be copied back.
 
 
-### Advanced configuration options
+### Advanced configuration options by modifying dlctemplate.py
 
 > Note that not all features are documented yet...
 
@@ -432,8 +447,8 @@ Special tags: copy special result files from the compute node back to the HAWC2
 working directory on the network drive, and optionally rename the file in case
 it would otherwise be overwritten by other cases in your DLB:
 * ```[copyback_files] = ['ESYSMooring_init.dat']```
-* ```[copyback_frename] = ['path/to/ESYSMooring_init_vXYZ.dat']```, optionally specify
-a different file path/name
+* ```[copyback_frename] = ['path/to/ESYSMooring_init_vXYZ.dat']```, optionally
+specify a different file path/name
 
 Copy files from the HAWC2 working directory with a special name to the compute
 node for which the a fixed file name is assumed
@@ -470,10 +485,9 @@ tags:
 * ```[hydro input name]```
 * ```[wave_type]``` : see HAWC2 manual for options
 * ```[wave_spectrum]``` : see HAWC2 manual for options
-* ```[hydro_dir]```
 * ```[wdepth]```
-* ```[hs]``` : see HAWC2 manual for options
-* ```[tp]``` : see HAWC2 manual for options
+* ```[Hs]``` : see HAWC2 manual for options
+* ```[Tp]``` : see HAWC2 manual for options
 * ```[wave_seed]``` : see HAWC2 manual for options
 
 And the corresponding section the htc master file:
diff --git a/docs/tutorials/1-creating-master-excel.md b/docs/tutorials/1-creating-master-excel.md
index 75a6cdae40104c74d515e16cc22c648413224bad..4edf95f652a467ad148f7c9356b7ab14a95b9b57 100644
--- a/docs/tutorials/1-creating-master-excel.md
+++ b/docs/tutorials/1-creating-master-excel.md
@@ -86,13 +86,15 @@ Generate the master Excel file in a few easy steps:
 the Wind Energy Toolbox tutorials directory.
 3. From a terminal/command window, run the code to generate the Excel file 
 from a folder of text files:
-    * Windows:  
+    * Windows (from the wetb tutorials folder):  
     ```python ..\..\wetb\prepost\write_master.py --folder data\DLCs_onshore --filename DLCs_onshore.xlsx```
-    * Mac/Linux:  
+    * Mac/Linux (from the wetb tutorials folder):  
     ```python ../../wetb/prepost/write_master.py --folder data/DLCs_onshore  --filename DLCs_onshore.xlsx```
-    * Gorm:  
-    ```python  /home/MET/repositories/toolbox/WindEnergyToolbox/wetb/prepost/write_master.py --folder /home/MET/repositories/toolbox/WindEnergyToolbox/wetb/docs/tutoria
-ls/data/DLCs_onshore --filename DLCs_onshore.xlsx```
+    * Gorm (from any folder that contains a subfolder with your text files. Note
+you must activate the wetb environment (see Step 5 [here](https://gitlab.windenergy.dtu.dk/toolbox/WindEnergyToolbox/blob/master/docs/getting-started-with-dlbs.md)
+) before this command will work. This command also assumes the folder with your
+text files is called "DLCs_onshore" and is located in the working directory.):  
+    ```python  /home/MET/repositories/toolbox/WindEnergyToolbox/wetb/prepost/write_master.py --folder ./DLCs_onshore --filename ./DLCs_onshore.xlsx```
  
 The master Excel file "DLCs_onshore.xlsx" should now be in the your current 
 directory.
diff --git a/requirements.txt b/requirements.txt
index 5ab191d8a44d73363db0c44d95075668eee1f2fb..889438e8f44a2119fc04764ae9a642812055c844 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -17,4 +17,5 @@ pbr
 PyScaffold
 pytest-cov
 sshtunnel
+xlsxwriter
 
diff --git a/wetb/fatigue_tools/tests/test_fatigue.py b/wetb/fatigue_tools/tests/test_fatigue.py
index 0ef2fef2e7c85b38b02b63123e48067c30832df5..a25745e157215dc1666b40e66dee3bfb4d5247dc 100644
--- a/wetb/fatigue_tools/tests/test_fatigue.py
+++ b/wetb/fatigue_tools/tests/test_fatigue.py
@@ -23,6 +23,36 @@ testfilepath = os.path.join(os.path.dirname(__file__), 'test_files/')  # test fi
 
 class TestFatigueTools(unittest.TestCase):
 
+    def test_leq_1hz(self):
+        """Simple test of wetb.fatigue_tools.fatigue.eq_load using a sine
+        signal.
+        """
+        amplitude = 1
+        m = 1
+        point_per_deg = 100
+
+        # sine signal with 10 periods (20 peaks)
+        nr_periods = 10
+        time = np.linspace(0, nr_periods*2*np.pi, point_per_deg*180)
+        neq = time[-1]
+        # mean value of the signal shouldn't matter
+        signal = amplitude * np.sin(time) + 5
+        r_eq_1hz = eq_load(signal, no_bins=1, m=m, neq=neq)[0]
+        r_eq_1hz_expected = ((2*nr_periods*amplitude**m)/neq)**(1/m)
+        np.testing.assert_allclose(r_eq_1hz, r_eq_1hz_expected)
+
+        # sine signal with 20 periods (40 peaks)
+        nr_periods = 20
+        time = np.linspace(0, nr_periods*2*np.pi, point_per_deg*180)
+        neq = time[-1]
+        # mean value of the signal shouldn't matter
+        signal = amplitude * np.sin(time) + 9
+        r_eq_1hz2 = eq_load(signal, no_bins=1, m=m, neq=neq)[0]
+        r_eq_1hz_expected2 = ((2*nr_periods*amplitude**m)/neq)**(1/m)
+        np.testing.assert_allclose(r_eq_1hz2, r_eq_1hz_expected2)
+
+        # 1hz equivalent load should be independent of the length of the signal
+        np.testing.assert_allclose(r_eq_1hz, r_eq_1hz2)
 
     def test_astm1(self):
 
@@ -59,7 +89,7 @@ class TestFatigueTools(unittest.TestCase):
 #     def test_windap3(self):
 #         data = Hawc2io.ReadHawc2(testfilepath + "test").ReadBinary([2]).flatten()
 #         from wetb.fatigue_tools.rainflowcounting import peak_trough
-#         self.assertTrue(peak_trough.__file__.lower()[-4:] == ".pyd" or peak_trough.__file__.lower()[-3:] == ".so", 
+#         self.assertTrue(peak_trough.__file__.lower()[-4:] == ".pyd" or peak_trough.__file__.lower()[-3:] == ".so",
 #                         "not compiled, %s, %s\n%s"%(sys.executable, peak_trough.__file__, os.listdir(os.path.dirname(peak_trough.__file__))))
 #         np.testing.assert_array_equal(cycle_matrix(data, 4, 4, rainflow_func=rainflow_windap)[0], np.array([[  14., 65., 39., 24.],
 #                                                                    [  0., 1., 4., 0.],
@@ -73,7 +103,7 @@ class TestFatigueTools(unittest.TestCase):
                                                                                                            [  0., 1., 4., 0.],
                                                                                                            [  0., 0., 0., 0.],
                                                                                                            [  0., 1., 2., 0.]]) / 2, 0.001)
-        
+
     def test_astm_weighted(self):
         data = Hawc2io.ReadHawc2(testfilepath + "test").ReadBinary([2]).flatten()
         np.testing.assert_allclose(cycle_matrix([(1, data),(1,data)], 4, 4, rainflow_func=rainflow_astm)[0], np.array([[ 24., 83., 53., 26.],
diff --git a/wetb/prepost/GenerateDLCs.py b/wetb/prepost/GenerateDLCs.py
index 5cb2fe9bc7f2150ea524441566780234ce6aa75c..fa997d9898ed08d31f32857b10c09347e3ddf26d 100644
--- a/wetb/prepost/GenerateDLCs.py
+++ b/wetb/prepost/GenerateDLCs.py
@@ -72,15 +72,39 @@ class GeneralDLC(object):
                     pass
             if tag == '[seed]':
                 cases_len.append(int(v[0]))
+            elif tag == '[wave_seed]':
+                cases_len.append(int(v[0]))
             else:
                 cases_len.append(len(v))
         cases_index = multi_for(list(map(range, cases_len)))
 
+#        for irow, row in enumerate(cases_index):
+#            counter = floor(irow/len(variables['[wsp]']))+1
+#            for icol, col in enumerate(row):
+#                if variables_order[icol] == '[seed]':
+#                    value = '%4.4i' % (1000*counter + row[variables_order.index('[wsp]')]+1)
+#                elif variables_order[icol] == '[wave_seed]':  #shfe: wave_seed
+#                    value = '%4.4i' % (1000*counter + row[variables_order.index('[wsp]')]+1)
+#                else:
+#                    value = variables[variables_order[icol]][col]
+#                    if not isinstance(value, float) and not isinstance(value, int):
+#                        value = str(value)
+#                dlc[variables_order[icol]].append(value)
         for irow, row in enumerate(cases_index):
             counter = floor(irow/len(variables['[wsp]']))+1
             for icol, col in enumerate(row):
                 if variables_order[icol] == '[seed]':
                     value = '%4.4i' % (1000*counter + row[variables_order.index('[wsp]')]+1)
+                elif variables_order[icol] == '[wave_seed]':
+                    value = '%4.4i' % ( 100*(row[variables_order.index('[wsp]')]+1) + \
+                                        row[variables_order.index('[wave_seed]')]+1)
+
+#                    value = '%4.4i' % (irow+1)
+#                    value = '%4.4i' % (10000*(row[variables_order.index('[wave_dir]')]+1) + \
+#                                        1000*(row[variables_order.index('[Hs]')]+1) + \
+#                                        10*(row[variables_order.index('[Tp]')]+1) +\
+#                                        row[variables_order.index('[seed]')]+1)
+
                 else:
                     value = variables[variables_order[icol]][col]
                     if not isinstance(value, float) and not isinstance(value, int):
@@ -125,6 +149,14 @@ class GeneralDLC(object):
 
         keys_list = self.sort_formulas(formulas)
 
+        # specify the precision of the tag as used in the formulas
+        # this does NOT affect the precision of the tag itself, only when used
+        # in a formula based tag.
+        formats = {'[wsp]':'%2.2i', '[gridgustdelay]':'%2.2i',
+                   '[wdir]':'%3.3i', '[G_phi0]':'%3.3i',
+                   '[sign]':'%s',
+                   '[Hs]':'%05.02f', '[Tp]':'%05.02f'}
+
         for fkey in keys_list:
             flist = []
             for i in range(len(dlc['[wsp]'])):
@@ -132,22 +164,22 @@ class GeneralDLC(object):
                 for key in dlc.keys():
                     if key in formula:
                         if formula[0] == '"':
-                            if key == '[wsp]' or key == '[gridgustdelay]':
-                                fmt = '%2.2i'
-                                formula = formula.replace(key, fmt%int(dlc[key][i]))
-                            elif key == '[wdir]' or key == '[G_phi0]':
-                                fmt = '%3.3i'
-                                formula = formula.replace(key, fmt%int(dlc[key][i]))
-                            elif key == '[sign]':
-                                fmt = '%s'
-                                formula = formula.replace(key, fmt%dlc[key][i])
-                            else:
+                            try:
+                                fmt = formats[key]
+                            except KeyError:
                                 fmt = '%4.4i'
-                                formula = formula.replace(key, fmt % int(dlc[key][i]))
+                            try:
+                                value = float(dlc[key][i])
+                            except ValueError:
+                                # this is for string tags
+                                value = dlc[key][i]
+                                fmt = '%s'
+                            formula = formula.replace(key, fmt % value)
                         elif key in formula:
                             formula = formula.replace(key, '%s' % dlc[key][i])
                 formula = formula.replace(',', '.')
                 formula = formula.replace(';', ',')
+                formula = formula.replace('\n', ' ')
                 flist.append(eval(formula))
 
             dlc[fkey] = flist
@@ -275,4 +307,3 @@ if __name__ == '__main__':
     opt = parser.parse_args()
     DLB = GenerateDLCCases()
     DLB.execute(filename=opt.filename, folder=opt.folder)
-
diff --git a/wetb/prepost/GenerateHydro.py b/wetb/prepost/GenerateHydro.py
index f2cfadfadd8794dc42f7a6fc3315eeba3ac2efa1..fd798d77df8dcb6f01224b57ef37ea0008d68c36 100755
--- a/wetb/prepost/GenerateHydro.py
+++ b/wetb/prepost/GenerateHydro.py
@@ -6,19 +6,17 @@ Created on Fri Apr 15 18:34:15 2016
 
 Description: This script is used for writing the hydro input files for HAWC2
 (the wave type det_airy is not included)
-
 """
 
 import os
 
-
 class hydro_input(object):
 
     """ Basic class aiming for write the hydrodynamics input file"""
 
-    def __init__(self, wavetype, wdepth, spectrum, Hs, Tp, seed,
-                 gamma = 3.3, stretching = 1, wn = None, coef = 200,
-                 spreading = None):
+    def __init__(self, wavetype, wdepth, spectrum, Hs, Tp, seed, gamma=3.3,
+                 stretching=1, wn=None, coef=200, spreading=None,
+                 embed_sf=None, embed_sf_t0=None):
 
         self.wdepth = wdepth
         if self.wdepth < 0:
@@ -27,7 +25,7 @@ class hydro_input(object):
         # Regular Airy Wave Input
         if wavetype == 'reg_airy':
             self.waveno = 0
-            self.argument = 'begin %s ;\n\t\tstretching %d;\n\t\twave %d %d;\n\tend;' \
+            self.argument = 'begin %s ;\n\t\tstretching %d;\n\t\twave %.2f %.2f;\n\tend;' \
                             %(wavetype,stretching,Hs,Tp)
 
         # Iregular Airy Wave Input
@@ -37,46 +35,51 @@ class hydro_input(object):
             if spectrum == 'jonswap':
                 spectrumno = 1
                 self.argument = 'begin %s ;\n\t\tstretching %d;\n\t\tspectrum %d;'\
-                                '\n\t\tjonswap %.1f %.1f %.1f;\n\t\tcoef %d %d;' \
+                                '\n\t\tjonswap %.2f %.2f %.1f;\n\t\tcoef %d %d;' \
                                 %(wavetype,stretching,spectrumno,Hs,Tp,gamma,coef,seed)
 
             # Pierson Moscowitz spectrum
             elif spectrum == 'pm':
                 spectrumno = 2
                 self.argument = 'begin %s ;\n\t\tstretching %d;\n\t\tspectrum %d;'\
-                                '\n\t\tpm %.1f %.1f ;\n\t\tcoef %d %d;' \
+                                '\n\t\tpm %.2f %.2f ;\n\t\tcoef %d %d;' \
                                 %(wavetype,stretching,spectrumno,Hs,
                                   Tp,coef,seed)
 
             # check the spreading function
             if spreading is not None:
                 self.argument += '\n\t\tspreading 1 %d;'%(spreading)
-            self.argument += '\n\tend;';
+            # check the embeded stream function
+            if embed_sf is not None:
+                self.argument += '\n\t\tembed_sf %.2f %d;'%(embed_sf, embed_sf_t0)
+            self.argument += '\n\tend;'
 
         # Stream Wave Input
         if wavetype == 'strf':
             self.waveno = 3
-            self.argument = 'begin %s ;\n\t\twave %d %d;\n\tend;' \
+            self.argument = 'begin %s ;\n\t\twave %.2f %.2f 0.0;\n\tend;' \
                             %(wavetype,Hs,Tp)
 
     def execute(self, filename, folder):
-        cwd = os.getcwd()
-        folder_path = os.path.join(cwd,folder)
-        file_path = os.path.join(folder_path,filename)
+        file_path = os.path.join(folder, filename)
         # check if the hydro input file exists
         if os.path.exists(file_path):
             pass
         else:
+            # create directory if non existing
+            if not os.path.exists(folder):
+                os.makedirs(folder)
             FILE = open(file_path,'w+')
             line1 = 'begin wkin_input ;'
             line2 = 'wavetype %d ;' %self.waveno
-            line3 = 'wdepth %d ;' %self.wdepth
+            line3 = 'wdepth %.1f ;' %self.wdepth
             line4 = 'end ;'
             file_contents = '%s\n\t%s\n\t%s\n\t%s\n%s\n;\nexit;' \
                             %(line1,line2,line3,self.argument,line4)
             FILE.write(file_contents)
             FILE.close()
 
+
 if __name__ == '__main__':
     hs = 3
     Tp = 11
diff --git a/wetb/prepost/Simulations.py b/wetb/prepost/Simulations.py
index d26c64e1d72dea8c91954171bcd1531167f644e0..3ca38cd379f62c09da615d415a49b6d0fae1ca9f 100755
--- a/wetb/prepost/Simulations.py
+++ b/wetb/prepost/Simulations.py
@@ -738,30 +738,12 @@ def prepare_launch(iter_dict, opt_tags, master, variable_tag_func,
             if verbose:
                 print('created cases for: %s.htc\n' % master.tags['[case_id]'])
 
-            # shfe: flag to generate hydro input file
-            if master.tags['[hydro_dir]'] is not False:
-                if '[hydro input name]' not in master.tags:
-                    continue
-                hydro_filename = master.tags['[hydro input name]']
-                print('creating hydro input file for: %s.inp\n' % hydro_filename)
-                wavetype = master.tags['[wave_type]']
-                wavespectrum = master.tags['[wave_spectrum]']
-                hydro_folder = master.tags['[hydro_dir]']
-                wdepth = float(master.tags['[wdepth]'])
-                hs = float(master.tags['[hs]'])
-                tp = float(master.tags['[tp]'])
-                wave_seed = int(float(master.tags['[wave_seed]']))
-                hydro_inputfile = hydro_input(wavetype=wavetype, Hs=hs, Tp=tp,
-                                              wdepth = wdepth, seed=wave_seed,
-                                              spectrum=wavespectrum,
-                                              spreading=None)
-                hydro_inputfile.execute(filename=hydro_filename + '.inp',
-                                        folder=hydro_folder)
 #    print(master.queue.get())
 
     # only copy data and create zip after all htc files have been created.
     # Note that createcase could also creat other input files
     # create the execution folder structure and copy all data to it
+    # FIXME: this approach only considers the tags as set in the last case!
     if update_model_data:
         master.copy_model_data()
         # create the zip file
@@ -1442,70 +1424,71 @@ class HtcMaster(object):
         data_local = os.path.join(self.tags['[model_dir_local]'],
                                   self.tags['[data_dir]'])
         data_run = os.path.join(self.tags['[run_dir]'], self.tags['[data_dir]'])
-        if not data_local == data_run:
-
-            # copy root files
-            model_root = self.tags['[model_dir_local]']
-            run_root = self.tags['[run_dir]']
-            for fname in self.tags['[zip_root_files]']:
-                shutil.copy2(model_root + fname, run_root + fname)
-
-            # copy special files with changing file names
-            if '[ESYSMooring_init_fname]' in self.tags:
-                if isinstance(self.tags['[ESYSMooring_init_fname]'], str):
-                    fname_source = self.tags['[ESYSMooring_init_fname]']
-                    fname_target = 'ESYSMooring_init.dat'
-                    shutil.copy2(model_root + fname_source,
-                                 run_root + fname_target)
-
-            # copy the master file into the htc/_master dir
-            src = os.path.join(self.tags['[master_htc_dir]'],
-                               self.tags['[master_htc_file]'])
-            # FIXME: htc_dir can contain the DLC folder name
-            dst = os.path.join(self.tags['[run_dir]'], 'htc', '_master')
-            if not os.path.exists(dst):
-                os.makedirs(dst)
-            shutil.copy2(src, dst)
+        if data_local == data_run:
+            return
 
-            # copy all content of the following dirs
-            dirs = [self.tags['[control_dir]'], self.tags['[hydro_dir]'],
-                    self.tags['[mooring_dir]'], self.tags['[externalforce]'],
-                    self.tags['[data_dir]'], 'htc/DLCs/']
-            plocal = self.tags['[model_dir_local]']
-            prun = self.tags['[run_dir]']
+        # copy root files
+        model_root = self.tags['[model_dir_local]']
+        run_root = self.tags['[run_dir]']
+        for fname in self.tags['[zip_root_files]']:
+            shutil.copy2(model_root + fname, run_root + fname)
 
-            # copy all files present in the specified folders
-            for path in dirs:
-                if not path:
-                    continue
-                elif not os.path.exists(os.path.join(plocal, path)):
-                    continue
-                for root, dirs, files in os.walk(os.path.join(plocal, path)):
-                    for file_name in files:
-                        src = os.path.join(root, file_name)
-                        dst = os.path.abspath(root).replace(os.path.abspath(plocal),
-                                           os.path.abspath(prun))
-                        if not os.path.exists(dst):
-                            os.makedirs(dst)
-                        dst = os.path.join(dst, file_name)
-                        shutil.copy2(src, dst)
-
-            # and last copies: the files with generic input names
-            if not isinstance(self.tags['[fname_source]'], list):
-                raise ValueError('[fname_source] needs to be a list')
-            if not isinstance(self.tags['[fname_default_target]'], list):
-                raise ValueError('[fname_default_target] needs to be a list')
-            len1 = len(self.tags['[fname_source]'])
-            len2 = len(self.tags['[fname_default_target]'])
-            if len1 != len2:
-                raise ValueError('[fname_source] and [fname_default_target] '
-                                 'need to have the same number of items')
-            for i in range(len1):
-                src = os.path.join(plocal, self.tags['[fname_source]'][i])
-                dst = os.path.join(prun, self.tags['[fname_default_target]'][i])
-                if not os.path.exists(os.path.dirname(dst)):
-                    os.makedirs(os.path.dirname(dst))
-                shutil.copy2(src, dst)
+        # copy special files with changing file names
+        if '[ESYSMooring_init_fname]' in self.tags:
+            if isinstance(self.tags['[ESYSMooring_init_fname]'], str):
+                fname_source = self.tags['[ESYSMooring_init_fname]']
+                fname_target = 'ESYSMooring_init.dat'
+                shutil.copy2(model_root + fname_source,
+                             run_root + fname_target)
+
+        # copy the master file into the htc/_master dir
+        src = os.path.join(self.tags['[master_htc_dir]'],
+                           self.tags['[master_htc_file]'])
+        # FIXME: htc_dir can contain the DLC folder name
+        dst = os.path.join(self.tags['[run_dir]'], 'htc', '_master')
+        if not os.path.exists(dst):
+            os.makedirs(dst)
+        shutil.copy2(src, dst)
+
+        # copy all content of the following dirs
+        dirs = [self.tags['[control_dir]'], self.tags['[hydro_dir]'],
+                self.tags['[mooring_dir]'], self.tags['[externalforce]'],
+                self.tags['[data_dir]'], 'htc/DLCs/']
+        plocal = self.tags['[model_dir_local]']
+        prun = self.tags['[run_dir]']
+
+        # copy all files present in the specified folders
+        for path in dirs:
+            if not path:
+                continue
+            elif not os.path.exists(os.path.join(plocal, path)):
+                continue
+            for root, dirs, files in os.walk(os.path.join(plocal, path)):
+                for file_name in files:
+                    src = os.path.join(root, file_name)
+                    dst = os.path.abspath(root).replace(os.path.abspath(plocal),
+                                       os.path.abspath(prun))
+                    if not os.path.exists(dst):
+                        os.makedirs(dst)
+                    dst = os.path.join(dst, file_name)
+                    shutil.copy2(src, dst)
+
+        # and last copies: the files with generic input names
+        if not isinstance(self.tags['[fname_source]'], list):
+            raise ValueError('[fname_source] needs to be a list')
+        if not isinstance(self.tags['[fname_default_target]'], list):
+            raise ValueError('[fname_default_target] needs to be a list')
+        len1 = len(self.tags['[fname_source]'])
+        len2 = len(self.tags['[fname_default_target]'])
+        if len1 != len2:
+            raise ValueError('[fname_source] and [fname_default_target] '
+                             'need to have the same number of items')
+        for i in range(len1):
+            src = os.path.join(plocal, self.tags['[fname_source]'][i])
+            dst = os.path.join(prun, self.tags['[fname_default_target]'][i])
+            if not os.path.exists(os.path.dirname(dst)):
+                os.makedirs(os.path.dirname(dst))
+            shutil.copy2(src, dst)
 
     # TODO: copy_model_data and create_model_zip should be the same.
     def create_model_zip(self):
@@ -2153,6 +2136,19 @@ class PBS(object):
             except KeyError:
                 pass
 
+            # one using just one file so it can be used together with the
+            # DLC spreadsheets
+            try:
+                self.copyback_files = [tag_dict['[copyback_f1]']]
+                self.copyback_frename = [tag_dict['[copyback_f1_rename]']]
+            except KeyError:
+                pass
+            try:
+                self.copyto_generic = [tag_dict['[copyto_generic_f1]']]
+                self.copyto_files = [tag_dict['[copyto_f1]']]
+            except KeyError:
+                pass
+
             # related to the dynamically setting the walltime
             duration = float(tag_dict['[time_stop]'])
             dt = float(tag_dict['[dt_sim]'])
@@ -3990,6 +3986,15 @@ class Cases(object):
         leq : bool, default=False
 
         columns : list, default=None
+
+        Returns
+        -------
+
+        stats_df : pandas.DataFrame
+
+        Leq_df : pandas.DataFrame
+
+        AEP_df : pandas.DataFrame
         """
         post_dir = kwargs.get('post_dir', self.post_dir)
         sim_id = kwargs.get('sim_id', self.sim_id)
@@ -4678,10 +4683,11 @@ class Cases(object):
             [(filename, hours),...] where, filename is the name of the file
             (can be a full path, but only the base path is considered), hours
             is the number of hours over the life time. When fh_lst is set,
-            res_dir, dlc_folder and dlc_name are not used.
+            years, res_dir, dlc_folder and dlc_name are not used.
 
         years : float, default=20
-            Total life time expressed in years.
+            Total life time expressed in years, only relevant when fh_lst is
+            None.
 
         Returns
         -------
@@ -4797,15 +4803,21 @@ class Cases(object):
                 # in case the original dfs holds multiple DLC cases.
                 dict_Leq[col].append(sel_sort[col].unique()[0])
 
-            # R_eq is usually expressed as the 1Hz equivalent load
-            neq_1hz = sel_sort['neq'].values
+            # R_eq is assumed to be expressed as the 1Hz equivalent load
+            # where neq is set to the simulation lenght
+#            neq_1hz = sel_sort['neq'].values
 
             for m in ms:
                 # sel_sort[m] holds the equivalent loads for each of the DLC
                 # cases: such all the different wind speeds for dlc1.2
                 m_ = float(m.split('=')[1])
-                R_eq_mod = np.power(sel_sort[m].values, m_) * neq_1hz
-                tmp = (R_eq_mod*np.array(hours)).sum()
+                # do not multi-ply out neq_1hz from R_eq
+                R_eq_mod = np.power(sel_sort[m].values, m_)
+                # R_eq_mod will have to be scaled from its simulation length
+                # to 1 hour (hour distribution is in hours...). Since the
+                # simulation time has not been multiplied out of R_eq_mod yet,
+                # we can just multiply with 3600 (instead of doing 3600/neq)
+                tmp = (R_eq_mod * np.array(hours) * 3600).sum()
                 # the effective Leq for each of the material constants
                 dict_Leq[m].append(math.pow(tmp/neq_life, 1.0/m_))
                 # the following is twice as slow:
diff --git a/wetb/prepost/dlcdefs.py b/wetb/prepost/dlcdefs.py
index e1a8b99b332ee0a419271d9ffa439f38fefff9f7..8cad56b23bae54b85eeaafc508a66aadb3a0aad2 100644
--- a/wetb/prepost/dlcdefs.py
+++ b/wetb/prepost/dlcdefs.py
@@ -20,6 +20,9 @@ from glob import glob
 import pandas as pd
 
 from wetb.prepost import misc
+from wetb.prepost.GenerateHydro import hydro_input
+from wetb.prepost import hawcstab2
+
 
 def casedict2xlsx():
     """
@@ -129,7 +132,7 @@ def variable_tag_func(master, case_id_short=False):
     return master
 
 
-def vartags_dlcs(master):
+def vartag_dlcs(master):
 
     mt = master.tags
 
@@ -157,6 +160,61 @@ def vartags_dlcs(master):
     return master
 
 
+def vartag_excel_stabcon(master):
+    """Variable tag function type that generates a hydro input file for the
+    wave kinematics dll if [hydro input name] is defined properly.
+    """
+
+    mt = master.tags
+    if '[hydro input name]' not in mt or not mt['[hydro input name]']:
+        return master
+
+    print('creating hydro input file for: %s.inp\n' % mt['[hydro input name]'])
+
+    mt['[wdepth]'] = float(mt['[wdepth]'])
+    mt['[Hs]'] = float(mt['[Hs]'])
+    mt['[Tp]'] = float(mt['[Tp]'])
+
+
+    if '[wave_gamma]' not in mt or not mt['[wave_gamma]']:
+        mt['[wave_gamma]'] = 3.3
+    else:
+        mt['[wave_gamma]'] = float(mt['[wave_gamma]'])
+
+    if '[wave_coef]' not in mt or not mt['[wave_coef]']:
+        mt['[wave_coef]'] = 200
+    else:
+        mt['[wave_coef]'] = int(mt['[wave_coef]'])
+
+    if '[stretching]' not in mt or not mt['[stretching]']:
+        mt['[stretching]'] = 1
+    else:
+        mt['[stretching]'] = int(mt['[stretching]'])
+
+    if '[wave_seed]' not in mt or not mt['[wave_seed]']:
+        mt['[wave_seed]'] = int(mt['[seed]'])
+    else:
+        mt['[wave_seed]'] = int(mt['[wave_seed]'])
+
+    try:
+        embed_sf = float(master.tags['[embed_sf]'])
+        embed_sf_t0 = int(master.tags['[t0]']) + 20
+    except KeyError:
+        embed_sf = None
+        embed_sf_t0 = None
+
+    hio = hydro_input(wavetype=mt['[wave_type]'], Hs=mt['[Hs]'], Tp=mt['[Tp]'],
+                      gamma=mt['[wave_gamma]'], wdepth=mt['[wdepth]'],
+                      spectrum=mt['[wave_spectrum]'], seed=mt['[wave_seed]'],
+                      stretching=mt['[stretching]'], coef=mt['[wave_coef]'],
+                      embed_sf=embed_sf, embed_sf_t0=embed_sf_t0, spreading=None)
+
+    hio.execute(filename=mt['[hydro input name]'] + '.inp',
+                folder=mt['[hydro_dir]'])
+
+    return master
+
+
 def tags_dlcs(master):
     """
     Initiate tags that are defined in the DLC spreadsheets
@@ -327,11 +385,12 @@ def excel_stabcon(proot, fext='xlsx', pignore=None, pinclude=None, sheet=0,
 
     if not silent:
         print('found %i Excel file(s), ' % len(dict_dfs), end='')
-    k = 0
-    for df in dict_dfs:
-        k += len(df)
+
     if not silent:
-        print('in which a total of %s cases are defined.' % k)
+        k = 0
+        for df in dict_dfs:
+            k += len(df)
+        print('in which a total of %i cases are defined.' % k)
 
     opt_tags = []
 
@@ -410,6 +469,19 @@ def excel_stabcon(proot, fext='xlsx', pignore=None, pinclude=None, sheet=0,
             t_stop = float(tags_dict['[time_stop]'])
             t0 = float(tags_dict['[t0]'])
             tags_dict['[duration]'] = str(t_stop - t0)
+            # in case there is a controller input file defined
+            if '[controller_tuning_file]' in tags_dict:
+                hs2 = hawcstab2.ReadControlTuning()
+                hs2.read_parameters(tags_dict['[controller_tuning_file]'])
+                tags_dict['[pi_gen_reg1.K]'] = hs2.pi_gen_reg1.K
+                tags_dict['[pi_gen_reg2.Kp]'] = hs2.pi_gen_reg2.Kp
+                tags_dict['[pi_gen_reg2.Ki]'] = hs2.pi_gen_reg2.Ki
+                tags_dict['[pi_gen_reg2.Kd]'] = 0.0
+                tags_dict['[pi_pitch_reg3.Kp]'] = hs2.pi_pitch_reg3.Kp
+                tags_dict['[pi_pitch_reg3.Ki]'] = hs2.pi_pitch_reg3.Ki
+                tags_dict['[pi_pitch_reg3.K1]'] = hs2.pi_pitch_reg3.K1
+                tags_dict['[pi_pitch_reg3.K2]'] = hs2.pi_pitch_reg3.K2
+            # save a copy of the current case an one opt_tags entry
             opt_tags.append(tags_dict.copy())
 
     return opt_tags
diff --git a/wetb/prepost/dlcplots.py b/wetb/prepost/dlcplots.py
index 97cf0bf8ad8273192bcba61368fd1e8f4c138e2f..2e8351c90f64feb5515e8b83d726e573b2957379 100644
--- a/wetb/prepost/dlcplots.py
+++ b/wetb/prepost/dlcplots.py
@@ -76,8 +76,12 @@ def merge_sim_ids(sim_ids, post_dirs, post_dir_save=False):
                 wsp = '[wsp]'
             else:
                 wsp = '[Windspeed]'
-            dfc = dfc[['[case_id]', '[run_dir]', wsp, '[res_dir]',
-                       '[wdir]', '[DLC]']]
+            # columns we want to add from cc.cases (cases dict) to stats
+            cols_cc = set(['[run_dir]', wsp, '[res_dir]', '[wdir]', '[DLC]'])
+            # do not add column twice, some might already be in df stats
+            add_cols = list(cols_cc - set(df_stats.columns))
+            add_cols.append('[case_id]')
+            dfc = dfc[add_cols]
             df_stats = pd.merge(df_stats, dfc, on='[case_id]')
             df_stats.rename(columns={wsp:'[Windspeed]'}, inplace=True)
 
@@ -118,6 +122,8 @@ def merge_sim_ids(sim_ids, post_dirs, post_dir_save=False):
         sim_id = sim_ids
         sim_ids = [sim_id]
         post_dir = post_dirs
+        if isinstance(post_dirs, list):
+            post_dir = post_dirs[0]
         cc = sim.Cases(post_dir, sim_id, rem_failed=True)
         df_stats, _, _ = cc.load_stats(leq=False)
         run_dirs = [df_stats['[run_dir]'].unique()[0]]
@@ -129,8 +135,12 @@ def merge_sim_ids(sim_ids, post_dirs, post_dir_save=False):
             wsp = '[wsp]'
         else:
             wsp = '[Windspeed]'
-        dfc = dfc[['[case_id]', '[run_dir]', wsp, '[res_dir]',
-                   '[wdir]', '[DLC]']]
+        # columns we want to add from cc.cases (cases dict) to stats
+        cols_cc = set(['[run_dir]', wsp, '[res_dir]', '[wdir]', '[DLC]'])
+        # do not add column twice, some might already be in df stats
+        add_cols = list(cols_cc - set(df_stats.columns))
+        add_cols.append('[case_id]')
+        dfc = dfc[add_cols]
         df_stats = pd.merge(df_stats, dfc, on='[case_id]')
         df_stats.rename(columns={wsp:'[Windspeed]'}, inplace=True)
 
@@ -348,7 +358,6 @@ def plot_stats(sim_ids, post_dirs, fig_dir_base=None):
             fig.clear()
             print('saved: %s' % fig_path)
 
-
             ax2.grid()
             ax2.set_xlim([3, 27])
             leg = ax2.legend(loc='best', ncol=2)
@@ -972,6 +981,7 @@ class PlotPerf(object):
         print('done')
         self.fig.clear()
 
+
 def plot_dlc01_powercurve(sim_ids, post_dirs, run_dirs, fig_dir_base):
     """
     Create power curve based on steady DLC01 results
@@ -1029,6 +1039,7 @@ def plot_dlc00(sim_ids, post_dirs, run_dirs, fig_dir_base=None, labels=None,
         fig_path = os.path.join(fig_dir_base, dlcf)
         fp.final(fig_path, _cname.replace('.htc', '.png'))
 
+
 def plot_staircase(sim_ids, post_dirs, run_dirs, fig_dir_base=None,
                    cname='dlc00_stair_wsp04_25_noturb.htc'):
     """
diff --git a/wetb/prepost/dlctemplate.py b/wetb/prepost/dlctemplate.py
index 000b1b1ed8d0409cf03254c71fe3c79b28be2aa5..a989bd8d8ed2ecdb5616a07832d63725a6d6703e 100644
--- a/wetb/prepost/dlctemplate.py
+++ b/wetb/prepost/dlctemplate.py
@@ -275,6 +275,8 @@ def launch_dlcs_excel(sim_id, silent=False, verbose=False, pbs_turb=False,
 
     # see if a htc/DLCs dir exists
     dlcs_dir = os.path.join(P_SOURCE, 'htc', 'DLCs')
+    # Load all DLC definitions and make some assumptions on tags that are not
+    # defined
     if os.path.exists(dlcs_dir):
         opt_tags = dlcdefs.excel_stabcon(dlcs_dir, silent=silent)
     else:
@@ -290,9 +292,8 @@ def launch_dlcs_excel(sim_id, silent=False, verbose=False, pbs_turb=False,
     for (dirpath, dirnames, fnames) in os.walk(P_SOURCE):
         # remove all zip files
         for i, fname in enumerate(fnames):
-            if fname.endswith('.zip'):
-                fnames.pop(i)
-        f_ziproot.extend(fnames)
+            if not fname.endswith('.zip'):
+                f_ziproot.append(fname)
         break
     # and add those files
     for opt in opt_tags:
@@ -312,13 +313,11 @@ def launch_dlcs_excel(sim_id, silent=False, verbose=False, pbs_turb=False,
 
     # all tags set in master_tags will be overwritten by the values set in
     # variable_tag_func(), iter_dict and opt_tags
-    # values set in iter_dict have precedence over opt_tags
-    # variable_tag_func() has precedense over iter_dict, which has precedence
-    # over opt_tags. So opt_tags comes last
-    # variable_tag func is not required because everything is already done
-    # in dlcdefs.excel_stabcon
-    no_variable_tag_func = None
-    cases = sim.prepare_launch(iter_dict, opt_tags, master, no_variable_tag_func,
+    # values set in iter_dict have precedence over opt_tags vartag_func()
+    # has precedense over iter_dict, which has precedence over opt_tags.
+    # dlcdefs.vartag_excel_stabcon adds support for creating hydro files
+    vartag_func = dlcdefs.vartag_excel_stabcon
+    cases = sim.prepare_launch(iter_dict, opt_tags, master, vartag_func,
                                write_htc=write_htc, runmethod=runmethod,
                                copyback_turb=True, update_cases=False, msg='',
                                ignore_non_unique=False, run_only_new=False,
diff --git a/wetb/prepost/tests/test_Simulations.py b/wetb/prepost/tests/test_Simulations.py
index b6ea82859fdef30b0dde5edc01a83dd429e13e01..f4da4ea23c6525a0c67d265a92e495b2fd63cd54 100644
--- a/wetb/prepost/tests/test_Simulations.py
+++ b/wetb/prepost/tests/test_Simulations.py
@@ -14,16 +14,22 @@ import unittest
 import os
 import filecmp
 import shutil
-#import pickle
 
-from wetb.prepost import dlctemplate as tmpl
+import numpy as np
+import pandas as pd
 
+from wetb.prepost import dlctemplate as tmpl
+from wetb.prepost import Simulations as sim
+from wetb.fatigue_tools.fatigue import eq_load
 
-class TestGenerateInputs(unittest.TestCase):
 
+class Template(unittest.TestCase):
     def setUp(self):
         self.basepath = os.path.dirname(__file__)
 
+
+class TestGenerateInputs(Template):
+
     def test_launch_dlcs_excel(self):
         # manually configure paths, HAWC2 model root path is then constructed as
         # p_root_remote/PROJECT/sim_id, and p_root_local/PROJECT/sim_id
@@ -75,5 +81,75 @@ class TestGenerateInputs(unittest.TestCase):
 #        self.assertTrue(pkl_remote == pkl_ref)
 
 
+class TestFatigueLifetime(Template):
+
+    def test_leq_life(self):
+        """Verify if prepost.Simulation.Cases.fatigue_lifetime() returns
+        the expected life time equivalent load.
+        """
+        # ---------------------------------------------------------------------
+        # very simple case
+        cases = {'case1':{'[post_dir]':'no-path', '[sim_id]':'A0'},
+                 'case2':{'[post_dir]':'no-path', '[sim_id]':'A0'}}
+        cc = sim.Cases(cases)
+
+        fh_list = [('case1', 10/3600), ('case2', 20/3600)]
+        dfs = pd.DataFrame({'m=1.0' : [2, 3],
+                            'channel' : ['channel1', 'channel1'],
+                            '[case_id]' : ['case1', 'case2']})
+        neq_life = 1.0
+        df_Leq = cc.fatigue_lifetime(dfs, neq_life, fh_lst=fh_list,
+                                     save=False, update=False, csv=False,
+                                     xlsx=False, silent=False)
+        np.testing.assert_allclose(df_Leq['m=1.0'].values, 2*10 + 3*20)
+        self.assertTrue(df_Leq['channel'].values[0]=='channel1')
+
+        # ---------------------------------------------------------------------
+        # slightly more complicated
+        neq_life = 3.0
+        df_Leq = cc.fatigue_lifetime(dfs, neq_life, fh_lst=fh_list,
+                                     save=False, update=False, csv=False,
+                                     xlsx=False, silent=False)
+        np.testing.assert_allclose(df_Leq['m=1.0'].values,
+                                   (2*10 + 3*20)/neq_life)
+
+        # ---------------------------------------------------------------------
+        # a bit more complex and also test the sorting of fh_lst and dfs
+        cases = {'case1':{'[post_dir]':'no-path', '[sim_id]':'A0'},
+                 'case2':{'[post_dir]':'no-path', '[sim_id]':'A0'},
+                 'case3':{'[post_dir]':'no-path', '[sim_id]':'A0'},
+                 'case4':{'[post_dir]':'no-path', '[sim_id]':'A0'}}
+        cc = sim.Cases(cases)
+
+        fh_list = [('case3', 10/3600), ('case2', 20/3600),
+                   ('case1', 50/3600), ('case4', 40/3600)]
+        dfs = pd.DataFrame({'m=3.0' : [2, 3, 4, 5],
+                            'channel' : ['channel1']*4,
+                            '[case_id]' : ['case4', 'case2', 'case3', 'case1']})
+        neq_life = 5.0
+        df_Leq = cc.fatigue_lifetime(dfs, neq_life, fh_lst=fh_list,
+                                     save=False, update=False, csv=False,
+                                     xlsx=False, silent=False)
+        expected = ((2*2*2*40 + 3*3*3*20 + 4*4*4*10 + 5*5*5*50)/5)**(1/3)
+        np.testing.assert_allclose(df_Leq['m=3.0'].values, expected)
+
+        # ---------------------------------------------------------------------
+        # more cases and with sorting
+        base = {'[post_dir]':'no-path', '[sim_id]':'A0'}
+        cases = {'case%i' % k : base for k in range(50)}
+        cc = sim.Cases(cases)
+        # reverse the order of how they appear in dfs and fh_lst
+        fh_list = [('case%i' % k, k*10/3600) for k in range(49,-1,-1)]
+        dfs = pd.DataFrame({'m=5.2' : np.arange(1,51,1),
+                            'channel' : ['channel1']*50,
+                            '[case_id]' : ['case%i' % k for k in range(50)]})
+        df_Leq = cc.fatigue_lifetime(dfs, neq_life, fh_lst=fh_list,
+                                     save=False, update=False, csv=False,
+                                     xlsx=False, silent=False)
+        expected = np.sum(np.power(np.arange(1,51,1), 5.2)*np.arange(0,50,1)*10)
+        expected = np.power(expected/neq_life, 1/5.2)
+        np.testing.assert_allclose(df_Leq['m=5.2'].values, expected)
+
+
 if __name__ == "__main__":
     unittest.main()
diff --git a/wetb/prepost/windIO.py b/wetb/prepost/windIO.py
index 5bdcb5e0f90603e81f1a51e5981793883be9387e..b4cdd3e6298be0474533cb4e0f5cf8836e8b6127 100755
--- a/wetb/prepost/windIO.py
+++ b/wetb/prepost/windIO.py
@@ -1166,12 +1166,19 @@ class LoadResults(ReadHawc2):
             # -----------------------------------------------------------------
             # WIND SPEED
             # WSP gl. coo.,Vx
+            # Free wind speed Vx, gl. coo, of gl. pos    0.00,   0.00,  -6.00  LABEL
             elif self.ch_details[ch, 0].startswith('WSP gl.'):
                 units = self.ch_details[ch, 1]
                 direction = self.ch_details[ch, 0].split(',')[1]
                 tmp = self.ch_details[ch, 2].split('pos')[1]
                 x, y, z = tmp.split(',')
                 x, y, z = x.strip(), y.strip(), z.strip()
+                tmp = z.split('  ')
+                sensortag = ''
+                if len(tmp) == 2:
+                    z, sensortag = tmp
+                elif len(tmp) == 1:
+                    z = tmp[0]
 
                 # and tag it
                 tag = 'windspeed-global-%s-%s-%s-%s' % (direction, x, y, z)
@@ -1181,6 +1188,9 @@ class LoadResults(ReadHawc2):
                 channelinfo['pos'] = (x, y, z)
                 channelinfo['units'] = units
                 channelinfo['chi'] = ch
+                channelinfo['sensortag'] = sensortag
+                # FIXME: direction is the same as component, right?
+                channelinfo['direction'] = direction
 
             # WIND SPEED AT BLADE
             # 0: WSP Vx, glco, R= 61.5
@@ -1201,6 +1211,7 @@ class LoadResults(ReadHawc2):
                 # save all info in the dict
                 channelinfo = {}
                 channelinfo['coord'] = coord
+                # FIXME: direction is the same as component, right?
                 channelinfo['direction'] = direction
                 channelinfo['blade_nr'] = int(blade_nr)
                 channelinfo['radius'] = float(radius)
diff --git a/wetb/prepost/write_master.py b/wetb/prepost/write_master.py
index badf3bcd534ed8fa78726dff90fbf6af39bae31e..94a2a6ccccf0de480b49525b8aba79feb6dad5ad 100644
--- a/wetb/prepost/write_master.py
+++ b/wetb/prepost/write_master.py
@@ -29,48 +29,58 @@ import numpy as np
 import os
 import pandas as pd
 
+
 def write_master(path_to_texts,
                  excel_name='DLCs.xlsx', file_end='.txt',
                  delimiter='\t'):
     """ Write a master Excel sheet from a series of text files
-    
-    Args:
-        path_to_texts (str): path to directory with text files
-        excel_name (str): filename of generated master Excel file
-        file_end (str): file ending of text files
-        delimiter (str): column delimiter in text files
+
+    Parameters
+    ----------
+
+    path_to_texts : str
+        path to directory with text files
+
+    excel_name : str
+        filename of generated master Excel file
+
+    file_end : str
+        file ending of text files
+
+    delimiter : str
+        column delimiter in text files
     """
 
     # formatting for header cells
     header_dict = {'bold': True, 'font_color': '#1F497D',
                    'bottom': 2, 'bottom_color': '#95B3D7'}
-    
+
     # get list of text files
     text_files = [f for f in os.listdir(path_to_texts) \
                                   if f.endswith(file_end)]
-    
+
     # check if main text file in the specified directory
     if 'Main'+file_end not in text_files:
         raise ValueError('\"Main\" file not in CSV directory')
-    
-    # rearrange text files so main page is first and everything 
+
+    # rearrange text files so main page is first and everything
     #   else is alphabetical
     text_files.remove('Main'+file_end)
     text_files = ['Main'+file_end] + sorted(text_files)
-    
+
     # open excel file
     writer = pd.ExcelWriter(excel_name, engine='xlsxwriter')
-    
+
     # create workbook and add formast
     workbook  = writer.book
     header    = workbook.add_format(header_dict)
-    
+
     # loop through text files
     for text_name in text_files:
-    
+
         # define path to csv file
         text_path = os.path.join(path_to_texts,text_name)
-        
+
         # read data, write to Excel file, and define worksheet handle
         text_df = pd.read_table(text_path,
                                 delimiter=delimiter, dtype=str,
@@ -78,10 +88,10 @@ def write_master(path_to_texts,
         text_df.to_excel(writer, sheet_name=text_name.rstrip(file_end),
                          index=False, header=False)
         worksheet = writer.sheets[text_name.rstrip(file_end)]
-        
+
         # get column widths by calculating max string lenths
         col_widths = text_df.apply(lambda x: np.max([len(str(s)) for s in x]))
-        
+
         # add formatting
         for i_col, width in enumerate(col_widths):
             worksheet.set_column(i_col, i_col, width)
@@ -92,11 +102,10 @@ def write_master(path_to_texts,
         else:
             worksheet.set_row(1, cell_format=header)
         worksheet.set_zoom(zoom=85)
-        
+
     # save worksheet
     writer.save()
-    
-    return
+
 
 if __name__ == '__main__':