diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 307784f8f8a750aabb68dd96c0239d5553084404..03a585734c5ee4377274a68ab3756ecda0efd92e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -4,4 +4,6 @@ before_script: test-3.4: image: mmpe/wetb script: - - python3 setup.py test \ No newline at end of file + #- python3 setup.py test + - python3 -m coverage run ./tests/run_pytest.py + - python3 -m coverage report -m \ No newline at end of file diff --git a/README.md b/README.md index b8f2fbbb678a74baacf870f4f16355e27f5c5cc1..f019bc546495292a060e55d267991173a96ff24f 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,6 @@ +[](https://gitlab.windenergy.dtu.dk/toolbox/WindEnergyToolbox/commits/master) +[](https://gitlab.windenergy.dtu.dk/toolbox/WindEnergyToolbox/commits/master) # Introduction The Wind Energy Toolbox (or ```wetb```, pronounce as wee-tee-bee) is a collection diff --git a/wetb/prepost/Simulations.py b/wetb/prepost/Simulations.py index a60e9b286b71e729a4e24443853342d5834034f9..b2ccffdf089f4f1d5b38d1a53c5dd08c33129159 100755 --- a/wetb/prepost/Simulations.py +++ b/wetb/prepost/Simulations.py @@ -1058,6 +1058,19 @@ def post_launch(cases, save_iter=False, silent=False, suffix=None, ---------- cases : either a string (path to file) or the cases itself + + save_iter : boolean, default=False + Set to True to save the number of iterations per time step in + *.iter file (in the same folder as the logfile) + + path_errorlog : str, default=None + Root path of the error logfiles. If set to None (default), the + value set in the [run_dir] tag is used as the root folder of the + logfiles. + + suffix : str, default=None + If not None, the suffix will be appended to file name of the error + log analysis file as follows: "ErrorLog_suffix.csv". """ # TODO: finish support for default location of the cases and file name @@ -3563,20 +3576,41 @@ class Cases(object): launch(self.cases, runmethod=runmethod, verbose=verbose, silent=silent, check_log=check_log, copyback_turb=copyback_turb) - def post_launch(self, save_iter=False, copy_pbs_failed=True, suffix=None, + def post_launch(self, save_iter=False, pbs_failed_path=False, suffix=None, path_errorlog=None, silent=False): """ Post Launching Maintenance check the logs files and make sure result files are present and accounted for. + + Parameters + ---------- + + save_iter : boolean, default=False + Set to True to save the number of iterations per time step in + *.iter file (in the same folder as the logfile) + + pbs_failed_path : str, default=False + If not False, specify the path to which the *.p files of the + failed cases should be copied to. For example, the dlctemplate + will set this value to "pbs_in_fail". + + path_errorlog : str, default=None + Root path of the error logfiles. If set to None (default), the + value set in the [run_dir] tag is used as the root folder of the + logfiles. + + suffix : str, default=None + If not None, the suffix will be appended to file name of the error + log analysis file as follows: "ErrorLog_suffix.csv". """ # TODO: integrate global post_launch in here self.cases_fail = post_launch(self.cases, save_iter=save_iter, suffix=suffix, path_errorlog=path_errorlog) - if copy_pbs_failed: - copy_pbs_in_failedcases(self.cases_fail, path='pbs_in_fail', + if pbs_failed_path is not False: + copy_pbs_in_failedcases(self.cases_fail, path=pbs_failed_path, silent=silent) if self.rem_failed: @@ -4609,6 +4643,12 @@ class Cases(object): # --------------------------------------------------------------------- # column definitions # --------------------------------------------------------------------- + # FIXME: for backward compatibility, the column name of the unique + # channel name has been changed in the past.... + if 'unique_ch_name' in dfs.columns: + chan_col_name = 'unique_ch_name' + else: + chan_col_name = 'channel' # available material constants ms, cols = [], [] for key in dfs: @@ -4617,7 +4657,7 @@ class Cases(object): # when multiple DLC cases are included, add extra cols to identify each # DLC group. Make a copy, because extra_cols does not get re-initiated # when defined as an optional keyword argument - extra_cols_ = copy.copy(extra_cols + ['channel']) + extra_cols_ = copy.copy(extra_cols + [chan_col_name]) cols = copy.copy(ms) cols.extend(extra_cols_) # --------------------------------------------------------------------- @@ -4628,7 +4668,7 @@ class Cases(object): dfs = dfs.set_index('[case_id]') # which rows to keep: a # select for each channel all the cases - for grname, gr in dfs.groupby(dfs.channel): + for grname, gr in dfs.groupby(dfs[chan_col_name]): # if one m has any nan's, assume none of them are good and throw # away # if np.isnan(gr[ms[0]].values).any(): diff --git a/wetb/prepost/dlctemplate.py b/wetb/prepost/dlctemplate.py index 22a1899a14fc17d8c0be6debd2e77c702b787b2e..07213815eefd7877dac5d103a8ec99fcf3ad66b2 100644 --- a/wetb/prepost/dlctemplate.py +++ b/wetb/prepost/dlctemplate.py @@ -266,7 +266,7 @@ def post_launch(sim_id, statistics=True, rem_failed=True, check_logs=True, m=[1, 3, 4, 5, 6, 8, 10, 12, 14], neq=None, no_bins=46, years=20.0, fatigue=True, A=None, AEP=False, save_new_sigs=False, envelopeturbine=False, envelopeblade=False, - save_iter=False): + save_iter=False, pbs_failed_path=False): # ========================================================================= # check logfiles, results files, pbs output files @@ -286,7 +286,7 @@ def post_launch(sim_id, statistics=True, rem_failed=True, check_logs=True, cc.cases[case]['[run_dir]'] = force_dir if check_logs: - cc.post_launch(save_iter=save_iter) + cc.post_launch(save_iter=save_iter, pbs_failed_path=pbs_failed_path) elif rem_failed: cc.remove_failed() @@ -360,6 +360,11 @@ if __name__ == '__main__': dest='prep', help='create htc, pbs, files') parser.add_argument('--check_logs', action='store_true', default=False, dest='check_logs', help='check the log files') + parser.add_argument('--pbs_failed_path', default='pbs_in_fail', type=str, + action='store', dest='pbs_failed_path', + help='Copy pbs launch files of the failed cases to a ' + 'new directory in order to prepare a re-run. Default ' + 'value: pbs_in_failed.') parser.add_argument('--stats', action='store_true', default=False, dest='stats', help='calculate statistics and 1Hz ' 'equivalent loads') @@ -443,7 +448,7 @@ if __name__ == '__main__': force_dir=P_RUN, saveinterval=2000, csv=opt.csv, statistics=opt.stats, years=opt.years, neq=opt.neq, fatigue=opt.fatigue, A=opt.rotarea, AEP=opt.AEP, - no_bins=opt.no_bins, + no_bins=opt.no_bins, pbs_failed_path=opt.pbs_failed_path, save_new_sigs=opt.save_new_sigs, save_iter=False, envelopeturbine=opt.envelopeturbine, envelopeblade=opt.envelopeblade) diff --git a/wetb/prepost/misc.py b/wetb/prepost/misc.py index f25b97897d3060b4591d7160c9c4590cc6aba107..d95b2b4fdc30686fada422ccf5930fa875e23b66 100644 --- a/wetb/prepost/misc.py +++ b/wetb/prepost/misc.py @@ -19,22 +19,17 @@ from future import standard_library standard_library.install_aliases() from builtins import object - - -#print(*objects, sep=' ', end='\n', file=sys.stdout) import os import sys import shutil import unittest import pickle -#from xlrd import open_workbook import numpy as np import scipy as sp from scipy import optimize as opt from scipy import stats -#import scipy.interpolate -#import scipy.ndimage +from scipy.interpolate import griddata as interp from matplotlib import pyplot as plt import pandas as pd @@ -950,6 +945,9 @@ def histfit(hist, bin_edges, xnew): http://nbviewer.ipython.org/url/xweb.geos.ed.ac.uk/~jsteven5/blog/ fitting_distributions_from_percentiles.ipynb + Calculate the CDF of given PDF, and fit a lognorm distribution onto the + CDF. This obviously only works if your PDF is lognorm. + Parameters ---------- @@ -988,6 +986,87 @@ def histfit(hist, bin_edges, xnew): return shape_out, scale_out, pdf_fit +def histfit_arbritrary(edges, pdf, edges_new, resolution=100): + """Re-bin based on the CDF of a PDF. Assume normal distribution within + a bin to transform the CDF to higher resolution. + + Parameters + ---------- + + edges : ndarray(n+1) + edges of the bins, inlcuding most left and right edges. + + pdf : ndarray(n) + probability of the bins + + edges_new : ndarray(m+1) + edges of the new bins + + resolution : int + resolution of the intermediate CDF used for re-fitting. + + + Returns + ------- + + centers_new : ndarray(m) + + widths_new : ndarray(m) + + pdf_new : ndarray(m) + + """ + + x_hd = np.ndarray((0,)) + cdf_hd = np.ndarray((0,)) + binw = np.ndarray((0,)) + + for i in range(len(pdf)): + # HD grid for x + x_inc = np.linspace(edges[i], edges[i+1], num=resolution) + + # FIXME: let the distribution in a bin be a user configurable input + # define a distribution within the bin: norm + shape = 2.5 + scale = shape*2/10 + x_inc = np.linspace(0, scale*10, num=resolution) + cdf_inc = stats.norm.cdf(x_inc, shape, scale=scale) + + # scale cdf_inc and x-coordinates + cdf_inc_scale = pdf[i] * cdf_inc / cdf_inc[-1] + binw = edges[i+1] - edges[i] + x_inc_scale = edges[i] + (binw * x_inc / x_inc[-1]) + + # add to the new hd corodinates and cdf + x_hd = np.append(x_hd, x_inc_scale) + if i == 0: + cdf_i = 0 + else: + cdf_i = cdf_hd[-1] + cdf_hd = np.append(cdf_hd, cdf_inc_scale + cdf_i) + +# plt.plot(x_inc, cdf_inc) +# plt.plot(x_inc_scale, cdf_inc_scale) + + cdf_new = interp(x_hd, cdf_hd, edges_new) + # last point includes everything that comes after + cdf_new[-1] = 1 + pdf_new = np.diff(cdf_new) + widths_new = np.diff(edges_new) + centers_new = widths_new + edges[0] + # the first bin also includes everything that came before + pdf_new[0] += cdf_new[0] + pdf_new /= pdf_new.sum() + +# plt.plot(x_hd, cdf_hd) +# plt.plot(edges_new, cdf_new, 'rs') +# +# plt.bar(edges_new[:-1], pdf_new, width=widths_new, color='b') +# plt.bar(edges[:-1], pdf, width=np.diff(edges), color='r', alpha=0.7) + + return centers_new, widths_new, pdf_new + + def hist_centers2edges(centers): """Given the centers of bins, return its edges and bin widths. """