From 2a4f80147fcf3108f832004b96f61bf90059e01e Mon Sep 17 00:00:00 2001
From: dave <dave@dtu.dk>
Date: Thu, 10 Dec 2015 13:34:34 +0100
Subject: [PATCH] running 2to3, plus some other minor tweaks

---
 wetb/prepost/DataChecks.py  |   6 +-
 wetb/prepost/Simulations.py | 264 ++++++++++++++++++------------------
 wetb/prepost/dlcdefs.py     |  20 +--
 wetb/prepost/dlcplots.py    |  18 +--
 wetb/prepost/dlctemplate.py |  14 +-
 wetb/prepost/filters.py     |  16 +--
 wetb/prepost/h2_vs_hs2.py   | 106 +++++++--------
 wetb/prepost/hawcstab2.py   |   6 +-
 wetb/prepost/misc.py        |  22 +--
 wetb/prepost/mplutils.py    |   4 +-
 wetb/prepost/prepost.py     |   4 +-
 wetb/prepost/windIO.py      |  30 ++--
 12 files changed, 255 insertions(+), 255 deletions(-)

diff --git a/wetb/prepost/DataChecks.py b/wetb/prepost/DataChecks.py
index aeaa61de..e241210d 100644
--- a/wetb/prepost/DataChecks.py
+++ b/wetb/prepost/DataChecks.py
@@ -5,8 +5,8 @@ Created on Mon Mar  5 16:00:02 2012
 @author: dave
 """
 
-from __future__ import division
-from __future__ import print_function
+
+
 
 # time and data should be 1 dimensional arrays
 def array_1d(array):
@@ -21,7 +21,7 @@ def array_1d(array):
             if (array.shape[0] == 1) or (array.shape[1] == 1):
                 return True
             else:
-                raise ValueError, 'only 1D arrays are accepted'
+                raise ValueError('only 1D arrays are accepted')
     else:
         return True
 
diff --git a/wetb/prepost/Simulations.py b/wetb/prepost/Simulations.py
index fc0e4fe3..69e020ee 100755
--- a/wetb/prepost/Simulations.py
+++ b/wetb/prepost/Simulations.py
@@ -7,8 +7,8 @@ __author__ = "David Verelst <dave@dtu.dk>"
 __license__ = "GPL-2+"
 """
 
-from __future__ import division
-from __future__ import print_function
+
+
 #print(*objects, sep=' ', end='\n', file=sys.stdout)
 
 # standard python library
@@ -39,9 +39,9 @@ import pandas as pd
 import tables as tbl
 
 # custom libraries
-import misc
-import windIO
-import prepost
+from . import misc
+from . import windIO
+from . import prepost
 try:
     import fatigue_tools.dlc_fatigue as dlc_ft
 except ImportError:
@@ -127,7 +127,7 @@ def create_multiloop_list(iter_dict, debug=False):
     iter_list = []
 
     # fix the order of the keys
-    key_order = iter_dict.keys()
+    key_order = list(iter_dict.keys())
     nr_keys = len(key_order)
     nr_values,indices = [],[]
     # determine how many items on each key
@@ -250,7 +250,7 @@ def local_windows_script(cases, sim_id, nr_cpus=2):
 
     stop = False
 
-    for i_case, (cname, case) in enumerate(cases.iteritems()):
+    for i_case, (cname, case) in enumerate(cases.items()):
 #    for i_case, case in enumerate(sorted(cases.keys())):
 
         shellscript += 'rem\nrem\n'
@@ -428,7 +428,7 @@ def run_local(cases, silent=False, check_log=True):
         print('')
         print('='*79)
         print('Be advised, launching %i HAWC2 simulation(s) sequentially' % nr)
-        print('run dir: %s' % cases[cases.keys()[0]]['[run_dir]'])
+        print('run dir: %s' % cases[list(cases.keys())[0]]['[run_dir]'])
         print('')
 
     if check_log:
@@ -711,17 +711,17 @@ def prepare_launch(iter_dict, opt_tags, master, variable_tag_func,
 
             # make sure the current cases is unique!
             if not ignore_non_unique:
-                if htc.keys()[0] in cases:
-                    msg = 'non unique case in cases: %s' % htc.keys()[0]
+                if list(htc.keys())[0] in cases:
+                    msg = 'non unique case in cases: %s' % list(htc.keys())[0]
                     raise KeyError(msg)
 
             # save in the big cases. Note that values() gives a copy!
-            cases[htc.keys()[0]] = htc.values()[0]
+            cases[list(htc.keys())[0]] = list(htc.values())[0]
             # if we have an update scenario, keep track of the cases we want
             # to run again. This prevents us from running all cases on every
             # update
             if run_only_new:
-                cases_to_run[htc.keys()[0]] = htc.values()[0]
+                cases_to_run[list(htc.keys())[0]] = list(htc.values())[0]
 
             if verbose:
                 print('created cases for: %s.htc\n' % master.tags['[case_id]'])
@@ -756,7 +756,7 @@ def prepare_launch(iter_dict, opt_tags, master, variable_tag_func,
     tagfile += '='*79 + '\n'
     tagfile += 'iter_dict\n'.rjust(30)
     tagfile += '='*79 + '\n'
-    iter_dict_list = sorted(iter_dict.iteritems(), key=itemgetter(0))
+    iter_dict_list = sorted(iter(iter_dict.items()), key=itemgetter(0))
     for k in iter_dict_list:
         tagfile += str(k[0]).rjust(30) + ' : ' + str(k[1]).ljust(20) + '\n'
 
@@ -769,7 +769,7 @@ def prepare_launch(iter_dict, opt_tags, master, variable_tag_func,
         tagfile += '-'*79 + '\n'
         tagfile += 'opt_tags set\n'.rjust(30)
         tagfile += '-'*79 + '\n'
-        opt_dict = sorted(k.iteritems(), key=itemgetter(0), reverse=False)
+        opt_dict = sorted(iter(k.items()), key=itemgetter(0), reverse=False)
         for kk in opt_dict:
             tagfile += str(kk[0]).rjust(30)+' : '+str(kk[1]).ljust(20) + '\n'
     if update_cases:
@@ -805,7 +805,7 @@ def prepare_relaunch(cases, runmethod='gorm', verbose=False, write_htc=True,
     master = HtcMaster()
     # for invariant tags, load random case. Necessary before we can load
     # the master file, otherwise we don't know which master to load
-    master.tags = cases[cases.keys()[0]]
+    master.tags = cases[list(cases.keys())[0]]
     master.loadmaster()
 
     # load the original cases dict
@@ -816,7 +816,7 @@ def prepare_relaunch(cases, runmethod='gorm', verbose=False, write_htc=True,
 
     sim_nr = 0
     sim_total = len(cases)
-    for case, casedict in cases.iteritems():
+    for case, casedict in cases.items():
         sim_nr += 1
 
         # set all the tags in the HtcMaster file
@@ -842,7 +842,7 @@ def prepare_relaunch(cases, runmethod='gorm', verbose=False, write_htc=True,
         # save in the big cases. Note that values() gives a copy!
         # remark, what about the copying done at the end of master.createcase?
         # is that redundant then?
-        cases[htc.keys()[0]] = htc.values()[0]
+        cases[list(htc.keys())[0]] = list(htc.values())[0]
 
         if verbose:
             print('created cases for: %s.htc\n' % master.tags['[case_id]'])
@@ -880,7 +880,7 @@ def prepare_launch_cases(cases, runmethod='gorm', verbose=False,write_htc=True,
     master = HtcMaster()
     # for invariant tags, load random case. Necessary before we can load
     # the master file, otherwise we don't know which master to load
-    master.tags = cases[cases.keys()[0]]
+    master.tags = cases[list(cases.keys())[0]]
     # load the master htc file as a string under the master.tags
     master.loadmaster()
     # create the execution folder structure and copy all data to it
@@ -910,7 +910,7 @@ def prepare_launch_cases(cases, runmethod='gorm', verbose=False,write_htc=True,
     cases_new = {}
 
     # cycle thourgh all the combinations
-    for case, casedict in cases.iteritems():
+    for case, casedict in cases.items():
         sim_nr += 1
 
         sim_id = casedict['[sim_id]']
@@ -950,13 +950,13 @@ def prepare_launch_cases(cases, runmethod='gorm', verbose=False,write_htc=True,
             print('===master.tags===\n', master.tags)
 
         # make sure the current cases is unique!
-        if htc.keys()[0] in cases_new:
-            msg = 'non unique case in cases: %s' % htc.keys()[0]
+        if list(htc.keys())[0] in cases_new:
+            msg = 'non unique case in cases: %s' % list(htc.keys())[0]
             raise KeyError(msg)
         # save in the big cases. Note that values() gives a copy!
         # remark, what about the copying done at the end of master.createcase?
         # is that redundant then?
-        cases_new[htc.keys()[0]] = htc.values()[0]
+        cases_new[list(htc.keys())[0]] = list(htc.values())[0]
 
         if verbose:
             print('created cases for: %s.htc\n' % master.tags['[case_id]'])
@@ -1008,7 +1008,7 @@ def launch(cases, runmethod='local', verbose=False, copyback_turb=True,
         'thyra' or 'gorm', PBS scripts are written to the respective server.
     """
 
-    random_case = cases.keys()[0]
+    random_case = list(cases.keys())[0]
     sim_id = cases[random_case]['[sim_id]']
     pbs_out_dir = cases[random_case]['[pbs_out_dir]']
 
@@ -1063,7 +1063,7 @@ def post_launch(cases, save_iter=False):
     # load one case dictionary from the cases to get data that is the same
     # over all simulations in the cases
     try:
-        master = cases.keys()[0]
+        master = list(cases.keys())[0]
     except IndexError:
         print('there are no cases, aborting...')
         return None
@@ -1089,7 +1089,7 @@ def post_launch(cases, save_iter=False):
     nr = 1
     nr_tot = len(cases)
 
-    tmp = cases.keys()[0]
+    tmp = list(cases.keys())[0]
     print('checking logs, path (from a random item in cases):')
     print(os.path.join(run_dir, log_dir))
 
@@ -1124,7 +1124,7 @@ def post_launch(cases, save_iter=False):
     # now see how many cases resulted in an error and add to the general LOG
     # determine how long the first case name is
     try:
-        spacing = len(errorlogs.MsgListLog2.keys()[0]) + 9
+        spacing = len(list(errorlogs.MsgListLog2.keys())[0]) + 9
     except Exception as e:
         print('nr of OK cases: %i' % (len(cases) - len(cases_fail)))
         raise(e)
@@ -1745,7 +1745,7 @@ class HtcMaster:
             print('checking if following case is in htc_dict_repo: ')
             print(self.tags['[case_id]'] + '.htc')
 
-        if htc_dict_repo.has_key(self.tags['[case_id]'] + '.htc'):
+        if self.tags['[case_id]'] + '.htc' in htc_dict_repo:
             # if the new case_id already exists in the htc_dict_repo
             # do not add it again!
             # print('case_id key is not unique in the given htc_dict_repo!'
@@ -1966,7 +1966,7 @@ class PBS:
         # first check if the pbs_out_dir exists, this dir is considered to be
         # the same for all cases present in cases
         # self.tags['[run_dir]']
-        case0 = self.cases.keys()[0]
+        case0 = list(self.cases.keys())[0]
         path = self.cases[case0]['[run_dir]'] + self.pbs_out_dir
         if not os.path.exists(path):
             os.makedirs(path)
@@ -2427,7 +2427,7 @@ class PBS:
         print('checking if all log and result files are present...', end='')
 
         # check for each case if we have results and a log file
-        for cname, case in cases.iteritems():
+        for cname, case in cases.items():
             run_dir = case['[run_dir]']
             res_dir = case['[res_dir]']
             log_dir = case['[log_dir]']
@@ -2509,77 +2509,77 @@ class ErrorLogs:
 
         # error messages that appear during initialisation
         self.err_init = {}
-        self.err_init[' *** ERROR *** Error in com'] = len(self.err_init.keys())
-        self.err_init[' *** ERROR ***  in command '] = len(self.err_init.keys())
+        self.err_init[' *** ERROR *** Error in com'] = len(self.err_init)
+        self.err_init[' *** ERROR ***  in command '] = len(self.err_init)
         #  *** WARNING *** A comma "," is written within the command line
-        self.err_init[' *** WARNING *** A comma ",'] = len(self.err_init.keys())
+        self.err_init[' *** WARNING *** A comma ",'] = len(self.err_init)
         #  *** ERROR *** Not correct number of parameters
-        self.err_init[' *** ERROR *** Not correct '] = len(self.err_init.keys())
+        self.err_init[' *** ERROR *** Not correct '] = len(self.err_init)
         #  *** INFO *** End of file reached
-        self.err_init[' *** INFO *** End of file r'] = len(self.err_init.keys())
+        self.err_init[' *** INFO *** End of file r'] = len(self.err_init)
         #  *** ERROR *** No line termination in command line
-        self.err_init[' *** ERROR *** No line term'] = len(self.err_init.keys())
+        self.err_init[' *** ERROR *** No line term'] = len(self.err_init)
         #  *** ERROR *** MATRIX IS NOT DEFINITE
-        self.err_init[' *** ERROR *** MATRIX IS NO'] = len(self.err_init.keys())
+        self.err_init[' *** ERROR *** MATRIX IS NO'] = len(self.err_init)
         #  *** ERROR *** There are unused relative
-        self.err_init[' *** ERROR *** There are un'] = len(self.err_init.keys())
+        self.err_init[' *** ERROR *** There are un'] = len(self.err_init)
         #  *** ERROR *** Error finding body based
-        self.err_init[' *** ERROR *** Error findin'] = len(self.err_init.keys())
+        self.err_init[' *** ERROR *** Error findin'] = len(self.err_init)
         #  *** ERROR *** In body actions
-        self.err_init[' *** ERROR *** In body acti'] = len(self.err_init.keys())
+        self.err_init[' *** ERROR *** In body acti'] = len(self.err_init)
         #  *** ERROR *** Command unknown
-        self.err_init[' *** ERROR *** Command unkn'] = len(self.err_init.keys())
+        self.err_init[' *** ERROR *** Command unkn'] = len(self.err_init)
         #  *** ERROR *** ERROR - More bodies than elements on main_body: tower
-        self.err_init[' *** ERROR *** ERROR - More'] = len(self.err_init.keys())
+        self.err_init[' *** ERROR *** ERROR - More'] = len(self.err_init)
         #  *** ERROR *** The program will stop
-        self.err_init[' *** ERROR *** The program '] = len(self.err_init.keys())
+        self.err_init[' *** ERROR *** The program '] = len(self.err_init)
         #  *** ERROR *** Unknown begin command in topologi.
-        self.err_init[' *** ERROR *** Unknown begi'] = len(self.err_init.keys())
+        self.err_init[' *** ERROR *** Unknown begi'] = len(self.err_init)
         #  *** ERROR *** Not all needed topologi main body commands present
-        self.err_init[' *** ERROR *** Not all need'] = len(self.err_init.keys())
+        self.err_init[' *** ERROR *** Not all need'] = len(self.err_init)
         #  *** ERROR ***  opening timoschenko data file
-        self.err_init[' *** ERROR ***  opening tim'] = len(self.err_init.keys())
+        self.err_init[' *** ERROR ***  opening tim'] = len(self.err_init)
         #  *** ERROR *** Error opening AE data file
-        self.err_init[' *** ERROR *** Error openin'] = len(self.err_init.keys())
+        self.err_init[' *** ERROR *** Error openin'] = len(self.err_init)
         #  *** ERROR *** Requested blade _ae set number not found in _ae file
-        self.err_init[' *** ERROR *** Requested bl'] = len(self.err_init.keys())
+        self.err_init[' *** ERROR *** Requested bl'] = len(self.err_init)
         #  Error opening PC data file
-        self.err_init[' Error opening PC data file'] = len(self.err_init.keys())
+        self.err_init[' Error opening PC data file'] = len(self.err_init)
         #  *** ERROR *** error reading mann turbulence
-        self.err_init[' *** ERROR *** error readin'] = len(self.err_init.keys())
+        self.err_init[' *** ERROR *** error readin'] = len(self.err_init)
         #  *** INFO *** The DLL subroutine
-        self.err_init[' *** INFO *** The DLL subro'] = len(self.err_init.keys())
+        self.err_init[' *** INFO *** The DLL subro'] = len(self.err_init)
         #  ** WARNING: FROM ESYS ELASTICBAR: No keyword
-        self.err_init[' ** WARNING: FROM ESYS ELAS'] = len(self.err_init.keys())
+        self.err_init[' ** WARNING: FROM ESYS ELAS'] = len(self.err_init)
         #  *** ERROR *** DLL ./control/killtrans.dll could not be loaded - error!
-        self.err_init[' *** ERROR *** DLL'] = len(self.err_init.keys())
+        self.err_init[' *** ERROR *** DLL'] = len(self.err_init)
         # *** ERROR *** The DLL subroutine
-        self.err_init[' *** ERROR *** The DLL subr'] = len(self.err_init.keys())
+        self.err_init[' *** ERROR *** The DLL subr'] = len(self.err_init)
         # *** WARNING *** Shear center x location not in elastic center, set to zero
-        self.err_init[' *** WARNING *** Shear cent'] = len(self.err_init.keys())
-        self.err_init[' *** WARNING ***'] = len(self.err_init.keys())
-        self.err_init[' *** ERROR ***'] = len(self.err_init.keys())
-        self.err_init[' WARNING'] = len(self.err_init.keys())
-        self.err_init[' ERROR'] = len(self.err_init.keys())
+        self.err_init[' *** WARNING *** Shear cent'] = len(self.err_init)
+        self.err_init[' *** WARNING ***'] = len(self.err_init)
+        self.err_init[' *** ERROR ***'] = len(self.err_init)
+        self.err_init[' WARNING'] = len(self.err_init)
+        self.err_init[' ERROR'] = len(self.err_init)
 
         # error messages that appear during simulation
         self.err_sim = {}
         #  *** ERROR *** Wind speed requested inside
-        self.err_sim[' *** ERROR *** Wind speed r'] = len(self.err_sim.keys())
+        self.err_sim[' *** ERROR *** Wind speed r'] = len(self.err_sim)
         #  Maximum iterations exceeded at time step:
-        self.err_sim[' Maximum iterations exceede'] = len(self.err_sim.keys())
+        self.err_sim[' Maximum iterations exceede'] = len(self.err_sim)
         #  Solver seems not to converge:
-        self.err_sim[' Solver seems not to conver'] = len(self.err_sim.keys())
+        self.err_sim[' Solver seems not to conver'] = len(self.err_sim)
         #  *** ERROR *** Out of x bounds:
-        self.err_sim[' *** ERROR *** Out of x bou'] = len(self.err_sim.keys())
+        self.err_sim[' *** ERROR *** Out of x bou'] = len(self.err_sim)
         #  *** ERROR *** Out of limits in user defined shear field - limit value used
-        self.err_sim[' *** ERROR *** Out of limit'] = len(self.err_sim.keys())
+        self.err_sim[' *** ERROR *** Out of limit'] = len(self.err_sim)
 
         # TODO: error message from a non existing channel output/input
         # add more messages if required...
 
-        self.init_cols = len(self.err_init.keys())
-        self.sim_cols = len(self.err_sim.keys())
+        self.init_cols = len(self.err_init)
+        self.sim_cols = len(self.err_sim)
 
     # TODO: save this not a csv text string but a df_dict, and save as excel
     # and DataFrame!
@@ -2953,7 +2953,7 @@ class ModelData:
                 # it is possible that the NSET line is not defined
                 parts = line.split(' ')
                 try:
-                    for k in xrange(10):
+                    for k in range(10):
                         parts.remove(' ') # throws error when can't find
                 except ValueError:
                     pass
@@ -3005,7 +3005,7 @@ class ModelData:
                 #if subset_nr > 0 :
                 key = '%03i-%03i-a' % (set_nr, subset_nr+1)
                 # in case it is not the first comment line
-                if st_dict.has_key(key): st_dict[key] += line
+                if key in st_dict: st_dict[key] += line
                 else: st_dict[key]  = line
                 ## otherwise we have the set comments
                 #else:
@@ -3083,7 +3083,7 @@ class ModelData:
         content = ''
 
         # sort the key list
-        keysort = self.st_dict.keys()
+        keysort = list(self.st_dict.keys())
         keysort.sort()
 
         for key in keysort:
@@ -3134,7 +3134,7 @@ class ModelData:
                 'x_e [m]', 'y_e [m]', 'k_x [-]', 'k_y [-]', 'pitch [deg]']
 
         if len(selection) < 1:
-            for key in self.st_dict.keys():
+            for key in self.st_dict:
                 # but now only take the ones that hold data
                 if key[-1] == 'd':
                     selection.append([int(key[:3]), int(key[4:7])])
@@ -3153,43 +3153,43 @@ class ModelData:
             # build the latex table header
 #            textable = u"\\begin{table}[b!]\n"
 #            textable += u"\\begin{center}\n"
-            textable_p1 = u"\\centering\n"
-            textable_p1 += u"\\begin{tabular}"
+            textable_p1 = "\\centering\n"
+            textable_p1 += "\\begin{tabular}"
             # configure the column properties
-            tmp = [u'C{2.0 cm}' for k in cols_p1]
-            tmp = u"|".join(tmp)
-            textable_p1 += u'{|' + tmp + u'|}'
-            textable_p1 += u'\hline\n'
+            tmp = ['C{2.0 cm}' for k in cols_p1]
+            tmp = "|".join(tmp)
+            textable_p1 += '{|' + tmp + '|}'
+            textable_p1 += '\hline\n'
             # add formula mode for the headers
             tmp = []
             for k in cols_p1:
                 k1, k2 = k.split(' ')
-                tmp.append(u'$%s$ $%s$' % (k1,k2) )
+                tmp.append('$%s$ $%s$' % (k1,k2) )
 #            tmp = [u'$%s$' % k for k in cols_p1]
-            textable_p1 += u' & '.join(tmp)
-            textable_p1 += u'\\\\ \n'
-            textable_p1 += u'\hline\n'
+            textable_p1 += ' & '.join(tmp)
+            textable_p1 += '\\\\ \n'
+            textable_p1 += '\hline\n'
 
-            textable_p2 = u"\\centering\n"
-            textable_p2 += u"\\begin{tabular}"
+            textable_p2 = "\\centering\n"
+            textable_p2 += "\\begin{tabular}"
             # configure the column properties
-            tmp = [u'C{1.5 cm}' for k in cols_p2]
-            tmp = u"|".join(tmp)
-            textable_p2 += u'{|' + tmp + u'|}'
-            textable_p2 += u'\hline\n'
+            tmp = ['C{1.5 cm}' for k in cols_p2]
+            tmp = "|".join(tmp)
+            textable_p2 += '{|' + tmp + '|}'
+            textable_p2 += '\hline\n'
             # add formula mode for the headers
             tmp = []
             for k in cols_p2:
                 k1, k2 = k.split(' ')
-                tmp.append(u'$%s$ $%s$' % (k1,k2) )
+                tmp.append('$%s$ $%s$' % (k1,k2) )
 #            tmp = [u'$%s$ $%s$' % (k1, k2) for k in cols_p2]
             # hack: spread the last element over two lines
 #            tmp[-1] = '$pitch$ $[deg]$'
-            textable_p2 += u' & '.join(tmp)
-            textable_p2 += u'\\\\ \n'
-            textable_p2 += u'\hline\n'
+            textable_p2 += ' & '.join(tmp)
+            textable_p2 += '\\\\ \n'
+            textable_p2 += '\hline\n'
 
-            for row in xrange(st_arr.shape[0]):
+            for row in range(st_arr.shape[0]):
                 r    = st_arr[row, self.st_headers.r]
                 m    = st_arr[row,self.st_headers.m]
                 x_cg = st_arr[row,self.st_headers.x_cg]
@@ -3213,19 +3213,19 @@ class ModelData:
                 p1 = [r, m, m*ri_x*ri_x, m*ri_y*ri_y, E*Ixx, E*Iyy, E*A,I_p*G]
                 p2 = [r, x_cg, y_cg, x_sh, y_sh, x_e, y_e, k_x, k_y, pitch]
 
-                textable_p1 += u" & ".join([self._format_nr(k) for k in p1])
-                textable_p1 += u'\\\\ \n'
+                textable_p1 += " & ".join([self._format_nr(k) for k in p1])
+                textable_p1 += '\\\\ \n'
 
-                textable_p2 += u" & ".join([self._format_nr(k) for k in p2])
-                textable_p2 += u'\\\\ \n'
+                textable_p2 += " & ".join([self._format_nr(k) for k in p2])
+                textable_p2 += '\\\\ \n'
 
             # default caption
             if caption == '':
                 caption = 'HAWC2 cross sectional parameters for body: %s' % set_comment
 
-            textable_p1 += u"\hline\n"
-            textable_p1 += u"\end{tabular}\n"
-            textable_p1 += u"\caption{%s}\n" % caption
+            textable_p1 += "\hline\n"
+            textable_p1 += "\end{tabular}\n"
+            textable_p1 += "\caption{%s}\n" % caption
 #            textable += u"\end{center}\n"
 #            textable += u"\end{table}\n"
 
@@ -3234,9 +3234,9 @@ class ModelData:
             with open(fpath + fname, 'w') as f:
                 f.write(textable_p1)
 
-            textable_p2 += u"\hline\n"
-            textable_p2 += u"\end{tabular}\n"
-            textable_p2 += u"\caption{%s}\n" % caption
+            textable_p2 += "\hline\n"
+            textable_p2 += "\end{tabular}\n"
+            textable_p2 += "\caption{%s}\n" % caption
 #            textable += u"\end{center}\n"
 #            textable += u"\end{table}\n"
 
@@ -3540,7 +3540,7 @@ class Cases:
 
     def force_lower_case_id(self):
         tmp_cases = {}
-        for cname, case in self.cases.iteritems():
+        for cname, case in self.cases.items():
             tmp_cases[cname.lower()] = case.copy()
         self.cases = tmp_cases
 
@@ -3580,8 +3580,8 @@ class Cases:
 
         # maybe some cases have tags that others don't, create a set with
         # all the tags that occur
-        for cname, tags in self.cases.iteritems():
-            tag_set.extend(tags.keys())
+        for cname, tags in self.cases.items():
+            tag_set.extend(list(tags.keys()))
         # also add cname as a tag
         tag_set.append('cname')
         # only unique tags
@@ -3589,9 +3589,9 @@ class Cases:
         # and build the df_dict with all the tags
         df_dict = {tag:[] for tag in tag_set}
 
-        for cname, tags in self.cases.iteritems():
+        for cname, tags in self.cases.items():
             current_tags = set(tags.keys())
-            for tag, value in tags.iteritems():
+            for tag, value in tags.items():
                 df_dict[tag].append(value)
             # and the missing ones
             for tag in (tag_set - current_tags):
@@ -3665,7 +3665,7 @@ class Cases:
             return ValueError, 'Only one case allowed in refcase dict'
 
         # take an arbritrary case as baseline for comparison
-        refcase = refcase_dict[refcase_dict.keys()[0]]
+        refcase = refcase_dict[list(refcase_dict.keys())[0]]
         #reftags = sim_dict[refcase]
 
         diffdict = dict()
@@ -3862,7 +3862,7 @@ class Cases:
             """
             add a new channel to the df_dict format of ch_df
             """
-            for col, value in kwargs.iteritems():
+            for col, value in kwargs.items():
                 df_dict[col].append(value)
             for col in (self.res.cols - set(kwargs.keys())):
                 df_dict[col].append('')
@@ -3894,7 +3894,7 @@ class Cases:
 
         # get some basic parameters required to calculate statistics
         try:
-            case = self.cases.keys()[0]
+            case = list(self.cases.keys())[0]
         except IndexError:
             print('no cases to select so no statistics, aborting ...')
             return None
@@ -3914,7 +3914,7 @@ class Cases:
         df_dict = None
         add_stats = True
 
-        for ii, (cname, case) in enumerate(self.cases.iteritems()):
+        for ii, (cname, case) in enumerate(self.cases.items()):
 
             # build the basic df_dict if not defined
             if df_dict is None:
@@ -4080,7 +4080,7 @@ class Cases:
 
             if save_new_sigs and new_sigs.shape[1] > 0:
                 chis, keys = [], []
-                for key, value in ch_dict_new.iteritems():
+                for key, value in ch_dict_new.items():
                     chis.append(value['chi'])
                     keys.append(key)
                 # sort on channel number, so it agrees with the new_sigs array
@@ -4113,7 +4113,7 @@ class Cases:
             # when different cases have a different number of output channels
             # By default, just take all channels in the result file.
             if ch_sel_init is None:
-                ch_sel = ch_dict.keys()
+                ch_sel = list(ch_dict.keys())
 #                ch_sel = ch_df.ch_name.tolist()
 #                ch_sel = [str(k) for k in ch_sel]
                 print('    selecting all channels for statistics')
@@ -4159,7 +4159,7 @@ class Cases:
             # statistics, fatigue and htc tags will not change
             if add_stats:
                 # statistical parameters
-                for statparam in stats.keys():
+                for statparam in list(stats.keys()):
                     df_dict[statparam] = []
 #                # additional tags
 #                for tag in tags:
@@ -4205,7 +4205,7 @@ class Cases:
 
                 # for all the statistics keys, save the values for the
                 # current channel
-                for statparam in stats.keys():
+                for statparam in list(stats.keys()):
                     df_dict[statparam].append(stats[statparam][chi])
                 # and save the tags from the input htc file in order to
                 # label each different case properly
@@ -4213,7 +4213,7 @@ class Cases:
                     df_dict[tag].append(case[tag])
                 # append any fatigue channels if applicable, otherwise nan
                 if ch_id in fatigue:
-                    for m_fatigue, eq_ in fatigue[ch_id].iteritems():
+                    for m_fatigue, eq_ in fatigue[ch_id].items():
                         df_dict[m_fatigue].append(eq_)
                 else:
                     for tag in tags_fatigue:
@@ -4337,7 +4337,7 @@ class Cases:
         # FIXME: this approach will result in twice the memory useage though...
         # we can not pop/delete items from a dict while iterating over it
         df_dict2 = {}
-        for colkey, col in df_dict.iteritems():
+        for colkey, col in df_dict.items():
             # if we have a list, convert to string
             if type(col[0]).__name__ == 'list':
                 for ii, item in enumerate(col):
@@ -4438,7 +4438,7 @@ class Cases:
 
         # get some basic parameters required to calculate statistics
         try:
-            case = self.cases.keys()[0]
+            case = list(self.cases.keys())[0]
         except IndexError:
             print('no cases to select so no statistics, aborting ...')
             return None
@@ -4449,13 +4449,13 @@ class Cases:
         else:
             sim_id = new_sim_id
         # we assume the run_dir (root) is the same every where
-        run_dir = self.cases[self.cases.keys()[0]]['[run_dir]']
+        run_dir = self.cases[list(self.cases.keys())[0]]['[run_dir]']
         path = os.path.join(run_dir, res_dir)
 
         if fh_lst is None:
             wb = WeibullParameters()
-            if 'Weibull' in self.config.keys():
-                for key in self.config['Weibull'].keys():
+            if 'Weibull' in self.config:
+                for key in self.config['Weibull']:
                     setattr(wb, key, self.config['Weibull'][key])
 
             dlc_dict = dlc_ft.dlc_dict(Vin=wb.Vin, Vr=wb.Vr, Vout=wb.Vout,
@@ -4474,7 +4474,7 @@ class Cases:
         # ---------------------------------------------------------------------
         # available material constants
         ms, cols = [], []
-        for key in dfs.keys():
+        for key in dfs:
             if key[:2] == 'm=':
                 ms.append(key)
         # when multiple DLC cases are included, add extra cols to identify each
@@ -4581,7 +4581,7 @@ class Cases:
 
         # get some basic parameters required to calculate statistics
         try:
-            case = self.cases.keys()[0]
+            case = list(self.cases.keys())[0]
         except IndexError:
             print('no cases to select so no statistics, aborting ...')
             return None
@@ -4592,13 +4592,13 @@ class Cases:
         else:
             sim_id = new_sim_id
         # we assume the run_dir (root) is the same every where
-        run_dir = self.cases[self.cases.keys()[0]]['[run_dir]']
+        run_dir = self.cases[list(self.cases.keys())[0]]['[run_dir]']
         path = os.path.join(run_dir, res_dir)
 
         if fh_lst is None:
             wb = WeibullParameters()
-            if 'Weibull' in self.config.keys():
-                for key in self.config['Weibull'].keys():
+            if 'Weibull' in self.config:
+                for key in self.config['Weibull']:
                     setattr(wb, key, self.config['Weibull'][key])
             dlc_dict = dlc_ft.dlc_dict(Vin=wb.Vin, Vr=wb.Vr, Vout=wb.Vout,
                                        Vref=wb.Vref, Vstep=wb.Vstep,
@@ -4694,7 +4694,7 @@ class Cases:
 
         df_dict = {}
 
-        for cname, case in self.cases.iteritems():
+        for cname, case in self.cases.items():
 
             # make sure the selected tags exist
             if len(tags) != len(set(case) and tags):
@@ -4704,9 +4704,9 @@ class Cases:
             ch_dict = self.stats_dict[cname]['ch_dict']
 
             if ch_sel is None:
-                ch_sel = { (i, i) for i in ch_dict.keys() }
+                ch_sel = { (i, i) for i in ch_dict }
 
-            for ch_short, ch_name in ch_sel.iteritems():
+            for ch_short, ch_name in ch_sel.items():
 
                 chi = ch_dict[ch_name]['chi']
                 # sig_stat = [(0=value,1=index),statistic parameter, channel]
@@ -4736,7 +4736,7 @@ class Cases:
 
         # and create for each channel a dataframe
         dfs = {}
-        for ch_short, df_values in df_dict.iteritems():
+        for ch_short, df_values in df_dict.items():
             dfs[ch_short] = pd.DataFrame(df_values)
 
         return dfs
@@ -4896,7 +4896,7 @@ class Cases:
         """
         # get some basic parameters required to calculate statistics
         try:
-            case = self.cases.keys()[0]
+            case = list(self.cases.keys())[0]
         except IndexError:
             print('no cases to select so no statistics, aborting ...')
             return None
@@ -4914,7 +4914,7 @@ class Cases:
                            filters=tbl.Filters(complevel=9))
 
         # Create a new group under "/" (root)
-        for ii, (cname, case) in enumerate(self.cases.iteritems()):
+        for ii, (cname, case) in enumerate(self.cases.items()):
 
             groupname = str(cname[:-4])
             groupname = groupname.replace('-', '_')
@@ -5065,7 +5065,7 @@ class ManTurb64(object):
 
     def gen_pbs(cases):
 
-        case0 = cases[cases.keys()[0]]
+        case0 = cases[list(cases.keys())[0]]
         pbs = prepost.PBSScript()
         # make sure the path's end with a trailing separator
         pbs.pbsworkdir = os.path.join(case0['[run_dir]'], '')
@@ -5073,7 +5073,7 @@ class ManTurb64(object):
         pbs.path_pbs_o = os.path.join(case0['[pbs_out_dir]'], '')
         pbs.path_pbs_i = os.path.join(case0['[pbs_in_dir]'], '')
         pbs.check_dirs()
-        for cname, case in cases.iteritems():
+        for cname, case in cases.items():
             base = case['[case_id]']
             pbs.path_pbs_e = os.path.join(case['[pbs_out_dir]'], base + '.err')
             pbs.path_pbs_o = os.path.join(case['[pbs_out_dir]'], base + '.out')
diff --git a/wetb/prepost/dlcdefs.py b/wetb/prepost/dlcdefs.py
index ec98c703..8e4dc006 100644
--- a/wetb/prepost/dlcdefs.py
+++ b/wetb/prepost/dlcdefs.py
@@ -4,15 +4,15 @@ Created on Wed Nov  5 14:01:25 2014
 
 @author: dave
 """
-from __future__ import division
-from __future__ import print_function
+
+
 
 import os
 import unittest
 
 import pandas as pd
 
-import misc
+from . import misc
 
 def casedict2xlsx():
     """
@@ -129,7 +129,7 @@ def vartags_dlcs(master):
         mt['[eigenfreq_dir]'] = 'res_eigen/%s/%s/' % rpl
     mt['[duration]'] = str(float(mt['[time_stop]']) - float(mt['[t0]']))
     # replace nan with empty
-    for ii, jj in mt.iteritems():
+    for ii, jj in mt.items():
         if jj == 'nan':
             mt[ii] = ''
 
@@ -277,7 +277,7 @@ def excel_stabcon(proot, fext='xlsx', pignore=None, sheet=0,
 
     opt_tags = []
 
-    for dlc, df in df_list.iteritems():
+    for dlc, df in df_list.items():
         # replace ';' with False, and Nan(='') with True
         # this is more easy when testing for the presence of stuff compared
         # to checking if a value is either True/False or ''/';'
@@ -291,8 +291,8 @@ def excel_stabcon(proot, fext='xlsx', pignore=None, sheet=0,
         for count, row in df2.iterrows():
             tags_dict = {}
             # construct to dict, convert unicode keys/values to strings
-            for key, value in row.iteritems():
-                if isinstance(value, unicode):
+            for key, value in row.items():
+                if isinstance(value, str):
                     tags_dict[str(key)] = str(value)
                 else:
                     tags_dict[str(key)] = value
@@ -349,8 +349,8 @@ def read_tags_spreadsheet(fname):
     for count, row in df2.iterrows():
         tags_dict = {}
         # construct to dict, convert unicode keys/values to strings
-        for key, value in row.iteritems():
-            if isinstance(value, unicode):
+        for key, value in row.items():
+            if isinstance(value, str):
                 tags_dict[str(key)] = str(value)
             else:
                 tags_dict[str(key)] = value
@@ -378,7 +378,7 @@ class Tests(unittest.TestCase):
         df_list = misc.read_excel_files(self.fpath, fext='xlsx', pignore=None,
                                         sheet=0, pinclude=None)
 
-        df = df_list[df_list.keys()[0]]
+        df = df_list[list(df_list.keys())[0]]
 #        df.fillna('', inplace=True)
 #        df.replace(';', False, inplace=True)
 
diff --git a/wetb/prepost/dlcplots.py b/wetb/prepost/dlcplots.py
index 87d18cdd..695323eb 100644
--- a/wetb/prepost/dlcplots.py
+++ b/wetb/prepost/dlcplots.py
@@ -5,8 +5,8 @@ Created on Tue Sep 16 10:21:11 2014
 @author: dave
 """
 
-from __future__ import division
-from __future__ import print_function
+
+
 #print(*objects, sep=' ', end='\n', file=sys.stdout)
 
 import os
@@ -31,9 +31,9 @@ import pandas as pd
 #import numpy as np
 
 #import windIO
-import mplutils
-import Simulations as sim
-import dlcdefs
+from . import mplutils
+from . import Simulations as sim
+from . import dlcdefs
 
 plt.rc('font', family='serif')
 plt.rc('xtick', labelsize=10)
@@ -390,7 +390,7 @@ def plot_stats2(sim_ids, post_dirs, fig_dir_base=None, labels=None,
         if dlc_name in dlc_ignore:
             continue
         # cycle through all the target plot channels
-        for ch_dscr, ch_names in plot_chans.iteritems():
+        for ch_dscr, ch_names in plot_chans.items():
             # second, group per channel. Note that when the channel names are not
             # identical, we need to manually pick them.
             # figure file name will be the first channel
@@ -744,7 +744,7 @@ class PlotStats(object):
         df.sort(columns=3, inplace=True)
         assert set(df[1].unique()) == set(['blade3', 'blade2', 'blade1'])
 
-        leqs = df.keys()[1:10]
+        leqs = list(df.keys())[1:10]
         df_ext = {leq:[] for leq in leqs}
         df_ext['node'] = []
         df_ext['comp'] = []
@@ -789,7 +789,7 @@ class PlotPerf(object):
         self.t0, self.t1 = time[0], time[-1]
 
         # find the wind speed
-        for channame, chan in res.ch_dict.iteritems():
+        for channame, chan in res.ch_dict.items():
             if channame.startswith('windspeed-global-Vy-0.00-0.00'):
                 break
         wind = res.sig[:,chan['chi']]
@@ -980,7 +980,7 @@ def plot_staircase(sim_ids, post_dirs, run_dirs, fig_dir_base=None,
         t0, t1 = time[0], time[-1]
 
         # find the wind speed
-        for channame, chan in res.ch_dict.iteritems():
+        for channame, chan in res.ch_dict.items():
             if channame.startswith('windspeed-global-Vy-0.00-0.00'):
                 break
         wind = res.sig[:,chan['chi']]
diff --git a/wetb/prepost/dlctemplate.py b/wetb/prepost/dlctemplate.py
index 00be5a34..b0e6d77c 100755
--- a/wetb/prepost/dlctemplate.py
+++ b/wetb/prepost/dlctemplate.py
@@ -4,8 +4,8 @@ Created on Thu Sep 18 13:00:25 2014
 
 @author: dave
 """
-from __future__ import division
-from __future__ import print_function
+
+
 
 import os
 import socket
@@ -16,11 +16,11 @@ from argparse import ArgumentParser
 from matplotlib import pyplot as plt
 #import matplotlib as mpl
 
-import Simulations as sim
+from . import Simulations as sim
 #import misc
 #import windIO
-import dlcdefs
-import dlcplots
+from . import dlcdefs
+from . import dlcplots
 
 plt.rc('font', family='serif')
 plt.rc('xtick', labelsize=10)
@@ -147,7 +147,7 @@ def variable_tag_func(master, case_id_short=False):
         mt['[eigenfreq_dir]'] = 'res_eigen/%s/' % rpl
     mt['[duration]'] = str(float(mt['[time_stop]']) - float(mt['[t0]']))
     # replace nan with empty
-    for ii, jj in mt.iteritems():
+    for ii, jj in mt.items():
         if jj == 'nan':
             mt[ii] = ''
 
@@ -326,7 +326,7 @@ def post_launch(sim_id, statistics=True, rem_failed=True, check_logs=True,
                           'hub3-blade3-node-%03i-momentvec-y' % nn_blr]]
         i0, i1 = 0, -1
 
-        tags = cc.cases[cc.cases.keys()[0]].keys()
+        tags = list(cc.cases[list(cc.cases.keys())[0]].keys())
         add = None
         # general statistics for all channels channel
         df_stats = cc.statistics(calc_mech_power=True, i0=i0, i1=i1,
diff --git a/wetb/prepost/filters.py b/wetb/prepost/filters.py
index a4f7ae9a..8e96fb2a 100644
--- a/wetb/prepost/filters.py
+++ b/wetb/prepost/filters.py
@@ -5,15 +5,15 @@ Created on Sun Jan 20 18:14:02 2013
 @author: dave
 """
 
-from __future__ import division
-from __future__ import print_function
+
+
 
 import numpy as np
 import scipy as sp
 
-import DataChecks as chk
-from misc import calc_sample_rate
-import mplutils
+from . import DataChecks as chk
+from .misc import calc_sample_rate
+from . import mplutils
 
 
 class Filters:
@@ -61,11 +61,11 @@ class Filters:
         """
 
         if x.ndim != 1:
-            raise ValueError, "smooth only accepts 1 dimension arrays."
+            raise ValueError("smooth only accepts 1 dimension arrays.")
 
         if x.size < window_len:
             msg = "Input vector needs to be bigger than window size."
-            raise ValueError, msg
+            raise ValueError(msg)
 
         if window_len<3:
             return x
@@ -74,7 +74,7 @@ class Filters:
         if not window in windowlist:
             msg = "Window should be 'flat', 'hanning', 'hamming', 'bartlett',"
             msg += " or 'blackman'"
-            raise ValueError, msg
+            raise ValueError(msg)
 
         s = np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]
         #print(len(s))
diff --git a/wetb/prepost/h2_vs_hs2.py b/wetb/prepost/h2_vs_hs2.py
index 132a325a..d2179269 100644
--- a/wetb/prepost/h2_vs_hs2.py
+++ b/wetb/prepost/h2_vs_hs2.py
@@ -5,8 +5,8 @@ Created on Mon Nov  2 15:23:15 2015
 @author: dave
 """
 
-from __future__ import division
-from __future__ import print_function
+
+
 
 import os
 
@@ -15,10 +15,10 @@ import numpy as np
 import pandas as pd
 from matplotlib import pyplot as plt
 
-import Simulations as sim
-import dlcdefs
-import hawcstab2 as hs2
-import mplutils
+from . import Simulations as sim
+from . import dlcdefs
+from . import hawcstab2 as hs2
+from . import mplutils
 
 
 class Configurations:
@@ -453,7 +453,7 @@ class Sims(object):
             self.p_root = '/mnt/hawc2sim/h2_vs_hs2'
         else:
             msg='unsupported runmethod, options: none, local, gorm or opt'
-            raise ValueError, msg
+            raise ValueError(msg)
 
         if not runmethod == 'here':
             self.P_RUN = os.path.join(self.p_root, self.PROJECT, self.sim_id)
@@ -747,7 +747,7 @@ class MappingsH2HS2(object):
         df_mean = pd.DataFrame()
         df_std = pd.DataFrame()
 
-        for key, value in mappings.iteritems():
+        for key, value in mappings.items():
             tmp = df_stats[df_stats['channel']==key]
             df_mean[value] = tmp['mean'].values.copy()
             df_std[value] = tmp['std'].values.copy()
@@ -762,14 +762,14 @@ class MappingsH2HS2(object):
 
     def _powercurve_hs2(self, fname):
 
-        mappings = {u'P [kW]'  :'P_aero',
-                    u'T [kN]'  :'T_aero',
-                    u'V [m/s]' :'windspeed'}
+        mappings = {'P [kW]'  :'P_aero',
+                    'T [kN]'  :'T_aero',
+                    'V [m/s]' :'windspeed'}
 
         df_pwr, units = self.hs2_res.load_pwr_df(fname)
 
         self.pwr_hs = pd.DataFrame()
-        for key, value in mappings.iteritems():
+        for key, value in mappings.items():
             self.pwr_hs[value] = df_pwr[key].values.copy()
 
     def blade_distribution(self, fname_h2, fname_hs2, h2_df_stats=None,
@@ -788,28 +788,28 @@ class MappingsH2HS2(object):
         """Read a HAWCStab2 *.ind file (blade distribution loading)
         """
 
-        mapping_hs2 =  {u's [m]'       :'curved_s',
-                        u'CL0 [-]'     :'Cl',
-                        u'CD0 [-]'     :'Cd',
-                        u'CT [-]'      :'Ct',
-                        u'CP [-]'      :'Cp',
-                        u'A [-]'       :'ax_ind',
-                        u'AP [-]'      :'tan_ind',
-                        u'U0 [m/s]'    :'vrel',
-                        u'PHI0 [rad]'  :'inflow_angle',
-                        u'ALPHA0 [rad]':'AoA',
-                        u'X_AC0 [m]'   :'pos_x',
-                        u'Y_AC0 [m]'   :'pos_y',
-                        u'Z_AC0 [m]'   :'pos_z',
-                        u'UX0 [m]'     :'def_x',
-                        u'UY0 [m]'     :'def_y',
-                        u'Tors. [rad]' :'torsion',
-                        u'Twist[rad]'  :'twist',
-                        u'V_a [m/s]'   :'ax_ind_vel',
-                        u'V_t [m/s]'   :'tan_ind_vel',
-                        u'FX0 [N/m]'   :'F_x',
-                        u'FY0 [N/m]'   :'F_y',
-                        u'M0 [Nm/m]'   :'M'}
+        mapping_hs2 =  {'s [m]'       :'curved_s',
+                        'CL0 [-]'     :'Cl',
+                        'CD0 [-]'     :'Cd',
+                        'CT [-]'      :'Ct',
+                        'CP [-]'      :'Cp',
+                        'A [-]'       :'ax_ind',
+                        'AP [-]'      :'tan_ind',
+                        'U0 [m/s]'    :'vrel',
+                        'PHI0 [rad]'  :'inflow_angle',
+                        'ALPHA0 [rad]':'AoA',
+                        'X_AC0 [m]'   :'pos_x',
+                        'Y_AC0 [m]'   :'pos_y',
+                        'Z_AC0 [m]'   :'pos_z',
+                        'UX0 [m]'     :'def_x',
+                        'UY0 [m]'     :'def_y',
+                        'Tors. [rad]' :'torsion',
+                        'Twist[rad]'  :'twist',
+                        'V_a [m/s]'   :'ax_ind_vel',
+                        'V_t [m/s]'   :'tan_ind_vel',
+                        'FX0 [N/m]'   :'F_x',
+                        'FY0 [N/m]'   :'F_y',
+                        'M0 [Nm/m]'   :'M'}
 
         try:
             hs2_cols = [k for k in mapping_hs2]
@@ -819,8 +819,8 @@ class MappingsH2HS2(object):
         except KeyError:
             # some results have been created with older HAWCStab2 that did not
             # include CT and CP columns
-            mapping_hs2.pop(u'CT [-]')
-            mapping_hs2.pop(u'CP [-]')
+            mapping_hs2.pop('CT [-]')
+            mapping_hs2.pop('CP [-]')
             hs2_cols = [k for k in mapping_hs2]
             std_cols = [mapping_hs2[k] for k in hs2_cols]
             # select only the HS channels that will be used for the mapping
@@ -834,22 +834,22 @@ class MappingsH2HS2(object):
 #        self.hs_aero['pos_x'] = (-1.0) # self.chord_length / 4.0
 
     def _distribution_h2(self):
-        mapping_h2 =  { u'Radius_s'  :'curved_s',
-                        u'Cl'        :'Cl',
-                        u'Cd'        :'Cd',
-                        u'Ct_local'  :'Ct',
-                        u'Cq_local'  :'Cq',
-                        u'Induc_RPy' :'ax_ind_vel',
-                        u'Induc_RPx' :'tan_ind_vel',
-                        u'Vrel'      :'vrel',
-                        u'Inflow_ang':'inflow_angle',
-                        u'alfa'      :'AoA',
-                        u'pos_RP_x'  :'pos_x',
-                        u'pos_RP_y'  :'pos_y',
-                        u'pos_RP_z'  :'pos_z',
-                        u'Secfrc_RPx':'F_x',
-                        u'Secfrc_RPy':'F_y',
-                        u'Secmom_RPz':'M'}
+        mapping_h2 =  { 'Radius_s'  :'curved_s',
+                        'Cl'        :'Cl',
+                        'Cd'        :'Cd',
+                        'Ct_local'  :'Ct',
+                        'Cq_local'  :'Cq',
+                        'Induc_RPy' :'ax_ind_vel',
+                        'Induc_RPx' :'tan_ind_vel',
+                        'Vrel'      :'vrel',
+                        'Inflow_ang':'inflow_angle',
+                        'alfa'      :'AoA',
+                        'pos_RP_x'  :'pos_x',
+                        'pos_RP_y'  :'pos_y',
+                        'pos_RP_z'  :'pos_z',
+                        'Secfrc_RPx':'F_x',
+                        'Secfrc_RPy':'F_y',
+                        'Secmom_RPz':'M'}
         h2_cols = [k for k in mapping_h2]
         std_cols = [mapping_h2[k] for k in h2_cols]
 
@@ -1086,7 +1086,7 @@ class Plots(object):
     def all_h2_channels(self, results, labels, fpath, channels=None):
         """Results is a list of res (=HAWC2 results object)"""
 
-        for chan, details in results[0].ch_dict.iteritems():
+        for chan, details in results[0].ch_dict.items():
             if channels is None or chan not in channels:
                 continue
             resp = []
diff --git a/wetb/prepost/hawcstab2.py b/wetb/prepost/hawcstab2.py
index 60240bd0..5be756a2 100644
--- a/wetb/prepost/hawcstab2.py
+++ b/wetb/prepost/hawcstab2.py
@@ -5,8 +5,8 @@ Created on Tue Jan 14 14:12:58 2014
 @author: dave
 """
 
-from __future__ import print_function
-from __future__ import division
+
+
 import unittest
 import os
 import re
@@ -14,7 +14,7 @@ import re
 import numpy as np
 import pandas as pd
 
-import mplutils
+from . import mplutils
 
 
 class dummy:
diff --git a/wetb/prepost/misc.py b/wetb/prepost/misc.py
index fafdfb50..0c7bd3bb 100644
--- a/wetb/prepost/misc.py
+++ b/wetb/prepost/misc.py
@@ -7,8 +7,8 @@ Library for general stuff
 @author: dave
 """
 
-from __future__ import division
-from __future__ import print_function
+
+
 #print(*objects, sep=' ', end='\n', file=sys.stdout)
 import os
 import sys
@@ -104,7 +104,7 @@ def unique(s):
     except TypeError:
         del u  # move on to the next method
     else:
-        return u.keys()
+        return list(u.keys())
 
     # We can't hash all the elements.  Second fastest is to sort,
     # which brings the equal elements together; then duplicates are
@@ -273,7 +273,7 @@ def find0(array, xi=0, yi=1, verbose=False, zerovalue=0.0):
     neg0i = isort[0]
     sign = int(np.sign(array[neg0i,xi]))
     # only search for ten points
-    for i in xrange(1,20):
+    for i in range(1,20):
         # first time we switch sign, we have it
         if int(np.sign(array[isort[i],xi])) is not sign:
             pos0i = isort[i]
@@ -335,7 +335,7 @@ def remove_items(list, value):
     """
     # remove list entries who are equal to value
     ind_del = []
-    for i in xrange(len(list)):
+    for i in range(len(list)):
         if list[i] == value:
             # add item at the beginning of the list
             ind_del.insert(0, i)
@@ -393,7 +393,7 @@ class DictDB(object):
             # column value
             init = True
             alltrue = True
-            for col_search, val_search in dict_search.items():
+            for col_search, val_search in list(dict_search.items()):
                 # for backwards compatibility, convert val_search to list
                 if not type(val_search).__name__ in ['set', 'list']:
                     # conversion to set is more costly than what you gain
@@ -441,7 +441,7 @@ class DictDB(object):
             # and see for each row if its name contains the search strings
             init = True
             alltrue = True
-            for col_search, inc_exc in dict_search.iteritems():
+            for col_search, inc_exc in dict_search.items():
                 # is it inclusive the search string or exclusive?
                 if (row.find(col_search) > -1) == inc_exc:
                     if init:
@@ -747,7 +747,7 @@ def check_df_dict(df_dict):
     makes sense
     """
     collens = {}
-    for col, values in df_dict.iteritems():
+    for col, values in df_dict.items():
         print('%6i : %s' % (len(values), col))
         collens[col] = len(values)
     return collens
@@ -870,10 +870,10 @@ def rebin(hist, bins, nrbins):
     bins_ = np.linspace(bins[0], bins[-1], num=nrbins+1)
 
     if width_ < width:
-        raise(ValueError, 'you can only rebin to larger bins')
+        raise ValueError('you can only rebin to larger bins')
 
     if not len(hist)+1 == len(bins):
-        raise(ValueError, 'bins should contain the bin edges')
+        raise ValueError('bins should contain the bin edges')
 
     window, j = width, 0
 #    print('width:', width)
@@ -964,7 +964,7 @@ def df_dict_check_datatypes(df_dict):
     # FIXME: this approach will result in twice the memory useage though...
     # we can not pop/delete items from a dict while iterating over it
     df_dict2 = {}
-    for colkey, col in df_dict.iteritems():
+    for colkey, col in df_dict.items():
         # if we have a list, convert to string
         if type(col[0]).__name__ == 'list':
             for ii, item in enumerate(col):
diff --git a/wetb/prepost/mplutils.py b/wetb/prepost/mplutils.py
index 26bc47d5..1090368c 100644
--- a/wetb/prepost/mplutils.py
+++ b/wetb/prepost/mplutils.py
@@ -5,8 +5,8 @@ Created on Wed Nov 23 11:22:50 2011
 @author: dave
 """
 
-from __future__ import division
-from __future__ import print_function
+
+
 
 # external libraries
 import numpy as np
diff --git a/wetb/prepost/prepost.py b/wetb/prepost/prepost.py
index e06cc6c1..0aa94dd5 100644
--- a/wetb/prepost/prepost.py
+++ b/wetb/prepost/prepost.py
@@ -5,8 +5,8 @@ Created on Tue Mar 10 18:47:32 2015
 @author: dave
 """
 
-from __future__ import division
-from __future__ import print_function
+
+
 
 import os
 import copy
diff --git a/wetb/prepost/windIO.py b/wetb/prepost/windIO.py
index 2e229917..62108d1c 100755
--- a/wetb/prepost/windIO.py
+++ b/wetb/prepost/windIO.py
@@ -5,8 +5,8 @@ Created on Thu Apr  3 19:53:59 2014
 @author: dave
 """
 
-from __future__ import division # always devide as floats
-from __future__ import print_function
+ # always devide as floats
+
 #print(*objects, sep=' ', end='\n', file=sys.stdout)
 
 __author__ = 'David Verelst'
@@ -32,7 +32,7 @@ import pandas as pd
 
 # misc is part of prepost, which is available on the dtu wind gitlab server:
 # https://gitlab.windenergy.dtu.dk/dave/prepost
-import misc
+from . import misc
 # wind energy python toolbox, available on the dtu wind redmine server:
 # http://vind-redmine.win.dtu.dk/projects/pythontoolbox/repository/show/fatigue_tools
 import fatigue
@@ -252,7 +252,7 @@ class LoadResults:
 
     def read_bin(self, scale_factors, usecols=False):
         if not usecols:
-            usecols = range(0, self.Nch)
+            usecols = list(range(0, self.Nch))
         fid = open(os.path.join(self.file_path, self.file_name) + '.dat', 'rb')
         self.sig = np.zeros( (self.N, len(usecols)) )
         for j, i in enumerate(usecols):
@@ -901,12 +901,12 @@ class LoadResults:
         """
         # identify all the different columns
         cols = set()
-        for ch_name, channelinfo in self.ch_dict.iteritems():
+        for ch_name, channelinfo in self.ch_dict.items():
             cols.update(set(channelinfo.keys()))
 
         df_dict = {col:[] for col in cols}
         df_dict['ch_name'] = []
-        for ch_name, channelinfo in self.ch_dict.iteritems():
+        for ch_name, channelinfo in self.ch_dict.items():
             cols_ch = set(channelinfo.keys())
             for col in cols_ch:
                 df_dict[col].append(channelinfo[col])
@@ -1071,7 +1071,7 @@ class LoadResults:
         """
         map_sorting = {}
         # first, sort on channel index
-        for ch_key, ch in self.ch_dict.iteritems():
+        for ch_key, ch in self.ch_dict.items():
             map_sorting[ch['chi']] = ch_key
 
         header = []
@@ -1085,7 +1085,7 @@ class LoadResults:
 
         # and save
         print('saving...', end='')
-        np.savetxt(fname, self.sig[:,map_sorting.keys()], fmt=fmt,
+        np.savetxt(fname, self.sig[:,list(map_sorting.keys())], fmt=fmt,
                    delimiter=delimiter, header=delimiter.join(header))
         print(fname)
 
@@ -1860,9 +1860,9 @@ class Tests(unittest.TestCase):
         fid.close()
         u = np.zeros((8192,32,32))
 
-        for i in xrange(8192):
-            for j in xrange(32):
-                for k in xrange(32):
+        for i in range(8192):
+            for j in range(32):
+                for k in range(32):
                     u[i,j,k] = turb[ i*1024 + j*32 + k]
 
         u2 = np.reshape(turb, (8192, 32, 32))
@@ -1939,7 +1939,7 @@ class Tests(unittest.TestCase):
         print(struct.unpack("f",fid.read(4))[0])
         # save in a list using struct
         items = (os.path.getsize(fpath + basename + '.wnd')-104)/2
-        data_list = [struct.unpack("h",fid.read(2))[0] for k in xrange(items)]
+        data_list = [struct.unpack("h",fid.read(2))[0] for k in range(items)]
 
 
         fid.seek(104)
@@ -1977,9 +1977,9 @@ class Tests(unittest.TestCase):
         fid.close()
 
         check = []
-        for i in xrange(8192):
-            for j in xrange(32):
-                for k in xrange(32):
+        for i in range(8192):
+            for j in range(32):
+                for k in range(32):
                     check.append(i*1024 + j*32 + k)
 
         qq = np.array(check)
-- 
GitLab