Skip to content
Snippets Groups Projects
Commit b2b098fd authored by mads's avatar mads
Browse files

high_level.py updated

parent 0422f3a2
No related branches found
No related tags found
No related merge requests found
......@@ -12,17 +12,26 @@ import functools
from wetb.hawc2.sel_file import SelFile
from wetb.functions.caching import cache_function
from collections import OrderedDict
HOURS_PR_20YEAR = 20 * 365 * 24
def Weibull(u, k, start, stop, step):
C = 2 * u / np.sqrt(np.pi)
cdf = lambda x :-np.exp(-(x / C) ** k)
return {wsp:-cdf(wsp - step / 2) + cdf(wsp + step / 2) for wsp in np.arange(start, stop + step, step)}
def Weibull2(u, k, wsp_lst):
C = 2 * u / np.sqrt(np.pi)
cdf = lambda x :-np.exp(-(x / C) ** k)
edges = np.r_[wsp_lst[0] - (wsp_lst[1] - wsp_lst[0]) / 2, (wsp_lst[1:] + wsp_lst[:-1]) / 2, wsp_lst[-1] + (wsp_lst[-1] - wsp_lst[-2]) / 2]
return [-cdf(e1) + cdf(e2) for wsp, e1, e2 in zip(wsp_lst, edges[:-1], edges[1:])]
class DLCHighLevel(object):
def __init__(self, filename):
def __init__(self, filename, fail_on_resfile_not_found=False):
self.filename = filename
self.fail_on_resfile_not_found = fail_on_resfile_not_found
wb = xlrd.open_workbook(self.filename)
# Variables
......@@ -38,9 +47,21 @@ class DLCHighLevel(object):
#DLC sheet
sheet = wb.sheet_by_name("DLC")
self.dlc_df = pd.DataFrame({sheet.cell(0, col_index).value.lower(): [sheet.cell(row_index, col_index).value for row_index in range(2, sheet.nrows) if sheet.cell(row_index, 0).value != ""] for col_index in range(sheet.ncols)})
for k in ['name', 'load', 'wsp', 'wdir', 'dlc_dist', 'wsp_dist', 'wdir_dist']:
for k in ['load', 'dlc_dist', 'wsp_dist']:
assert k.lower() in self.dlc_df.keys(), "DLC sheet must have a '%s' column" % k
self.dlc_df['name'] = [n.lower().replace("dlc", "") for n in self.dlc_df['name']]
self.dist_value_keys = [('dlc_dist', 'dlc'), ('wsp_dist', 'wsp')]
self.dist_value_keys.extend([(k, k.replace("_dist", "")) for k in self.dlc_df.keys() if k.endswith("_dist") and k not in ('dlc_dist', 'wsp_dist')])
for i, (dk, vk) in enumerate(self.dist_value_keys):
try:
assert vk in self.dlc_df.keys(), "DLC sheet must have a '%s'-column when having a '%s'-column" % (vk, dk)
except AssertionError as e:
if vk == "dlc" and 'name' in self.dlc_df.keys():
columns = list(self.dlc_df.columns)
columns[columns.index('name')] = 'dlc'
self.dlc_df.columns = columns
else:
raise e
self.dlc_df[vk] = [str(n).lower().replace(vk, "") for n in self.dlc_df[vk]]
if 'psf' not in self.dlc_df:
self.dlc_df['psf'] = 1
......@@ -85,36 +106,90 @@ class DLCHighLevel(object):
wdir = get_lst(dlc_row['wdir'])
return wsp, wdir
def distribution(self, value_key, dist_key, row):
values = self.dlc_df[value_key][row]
if ":" in values:
start, step, stop = [float(eval(v, globals(), self.__dict__)) for v in values.lower().split(":")]
values = np.arange(start, stop + step, step)
else:
values = [(eval(v, globals(), self.__dict__)) for v in str(values).lower().replace("/", ",").split(",")]
dist = self.dlc_df[dist_key][row]
if str(dist).lower() == "weibull" or str(dist).lower() == "rayleigh":
dist = Weibull2(self.vref * .2, 2, values)
else:
def fmt(v):
if "#" in str(v):
return v
else:
if v == "":
return 0
else:
return float(v) / 100
dist = [fmt(v) for v in str(self.dlc_df[dist_key][row]).replace("/", ",").split(",")]
assert len(values) == len(dist), "Number of %s-values (%d)!= number of %s-values(%d)" % (value_key, len(values), dist_key, len(dist))
return OrderedDict(zip(map(self.format_tag_value, values), dist))
def fatigue_distribution(self):
fatigue_dist = {}
data = self.dlc_df #[[sheet.cell(row_index, col_index).value for row_index in range(1, sheet.nrows)] for col_index in range(sheet.ncols)]
for i, load in enumerate(data['load']):
if "F" in str(load).upper():
dlc = data['name'][i].lower().replace("dlc", "")
def fmt(v):
if "#" in str(v):
return v
else:
if v == "":
return 0
else:
return float(v) / 100
dlc_dist = fmt(data['dlc_dist'][i])
wsp_dist = data['wsp_dist'][i]
wsp = data['wsp'][i]
if wsp_dist.lower() == "weibull" or wsp_dist.lower() == "rayleigh":
start, step, stop = [float(eval(v, globals(), self.__dict__)) for v in wsp.lower().split(":")]
wsp_dist = Weibull(self.vref * .2, 2, start, stop, step)
else:
wsp = [(eval(v, globals(), self.__dict__)) for v in str(wsp).lower().replace("/", ",").split(",")]
wsp_dist = [fmt(v) for v in str(wsp_dist).lower().replace("/", ",").split(",")]
assert len(wsp) == len(wsp_dist), "\nWsp: %s\nWsp_dist: %s" % (wsp , wsp_dist)
wsp_dist = {k:v for k, v in zip(wsp, wsp_dist)}
wdir_dist = [float(v) for v in str(data['wdir_dist'][i]).replace("/", ",").split(",")]
wdir = [float(v) for v in str(data['wdir'][i]).replace("/", ",").split(",")]
fatigue_dist[dlc] = (dlc_dist, wsp_dist, {k:v / 100 for k, v in zip(wdir, wdir_dist)})
for row, load in enumerate(self.dlc_df['load']):
if "F" not in str(load).upper():
continue
dlc = self.dlc_df[self.dist_value_keys[0][1]][row]
fatigue_dist[str(dlc)] = [self.distribution(value_key, dist_key, row) for dist_key, value_key in self.dist_value_keys]
return fatigue_dist
def files_dict(self):
if not hasattr(self, "res_folder") or self.res_folder == "":
files = glob.glob(os.path.join(self.res_path, "*.sel")) + glob.glob(os.path.join(self.res_path, "*/*.sel"))
else:
files = []
fatigue_dlcs = self.dlc_df[['F' in str(l).upper() for l in self.dlc_df['load']]]['dlc']
for dlc_id in fatigue_dlcs:
dlc_id = str(dlc_id)
if "%" in self.res_folder:
folder = self.res_folder % dlc_id
else:
folder = self.res_folder
files.extend(glob.glob(os.path.join(self.res_path , folder, "*.sel")))
keys = list(zip(*self.dist_value_keys))[1]
fmt = self.format_tag_value
tags = [[fmt(tag.replace(key, "")) for tag, key in zip(os.path.basename(f).split("_"), keys)] for f in files]
dlc_tags = list(zip(*tags))[0]
files_dict = {dlc_tag:{} for dlc_tag in dlc_tags}
for tag_row, f in zip(tags, files):
d = files_dict[tag_row[0]]
for tag in tag_row[1:]:
if tag not in d:
d[tag] = {}
d = d[tag]
if 'files' not in d:
d['files'] = []
d['files'].append(f)
return files_dict
def format_tag_value(self, v):
try:
if int(float(v)) == float(v):
return int(float(v))
return float(v)
except ValueError:
return v
def probability(self, props, f, files):
total_prop = 1
for prop in props[::-1]:
if str(prop).startswith("#"):
duration = SelFile(f).duration
prop = float(prop[1:]) * duration / (60 * 60 * 24 * 365)
return prop * total_prop
else:
total_prop *= prop
return total_prop
def file_hour_lst(self):
"""Create a list of (filename, hours_pr_year) that can be used as input for LifeTimeEqLoad
......@@ -127,39 +202,58 @@ class DLCHighLevel(object):
"""
fh_lst = []
dlc_dict = self.fatigue_distribution()
for dlc_id in sorted(dlc_dict.keys()):
dlc_dist, wsp_dict, wdir_dict = dlc_dict[dlc_id]
for wsp in sorted(wsp_dict.keys()):
wsp_dist = wsp_dict[wsp]
for wdir in sorted(wdir_dict.keys()):
wdir_dist = wdir_dict[wdir]
if not hasattr(self, "res_folder"):
folder = ""
elif "%" in self.res_folder:
folder = self.res_folder % dlc_id
dist_dict = self.fatigue_distribution()
files_dict = self.files_dict()
for dlc_id in sorted(dist_dict.keys()):
dlc_id = str(dlc_id)
fmt = self.format_tag_value
def tag_prop_lst(dist_lst):
if len(dist_lst) == 0:
return [[]]
return [[(fmt(tag), prop)] + tl for tl in tag_prop_lst(dist_lst[1:]) for tag, prop in dist_lst[0].items()]
def files_from_tags(self, f_dict, tags):
if len(tags) == 0:
return f_dict['files']
try:
return files_from_tags(self, f_dict[tags[0]], tags[1:])
except KeyError:
if self.dist_value_keys[-len(tags)][1] == "wdir":
try:
return files_from_tags(self, f_dict[tags[0] % 360], tags[1:])
except:
pass
raise
for tag_props in (tag_prop_lst(dist_dict[dlc_id])):
tags, props = zip(*tag_props)
try:
files = (files_from_tags(self, files_dict, tags))
except KeyError:
if self.fail_on_resfile_not_found:
raise FileNotFoundError("Result files for %s not found" % (", ".join(["%s='%s'" % (dv[1], t) for dv, t in zip(self.dist_value_keys, tags)])))
else:
folder = self.res_folder
files = glob.glob(os.path.join(self.res_path , folder, "dlc%s_wsp%02d_wdir%03d*.sel" % (dlc_id, wsp, wdir % 360)))
for f in sorted(files):
if "#" in str(dlc_dist):
duration = SelFile(f).duration
dlc_dist = float(dlc_dist[1:]) * duration / (60 * 60 * 24 * 365)
if "#" in str(wsp_dist):
total = sum([float(v[1:]) for v in wsp_dict.values()])
wsp_dist = float(wsp_dist[1:]) / total
f_prob = dlc_dist * wsp_dist * wdir_dist / len(files)
f_hours_pr_20year = HOURS_PR_20YEAR * f_prob
continue
if files:
f_prob = self.probability(props, files[0], files) / len(files)
f_hours_pr_20year = HOURS_PR_20YEAR * f_prob
for f in files:
fh_lst.append((f, f_hours_pr_20year))
return fh_lst
def dlc_lst(self, load='all'):
dlc_lst = np.array(self.dlc_df['name'])[np.array([load == 'all' or load.lower() in d.lower() for d in self.dlc_df['load']])]
dlc_lst = np.array(self.dlc_df['dlc'])[np.array([load == 'all' or load.lower() in d.lower() for d in self.dlc_df['load']])]
return [v.lower().replace('dlc', '') for v in dlc_lst]
@cache_function
def psf(self):
return {dlc.lower().replace('dlc', ''): float((psf, 1)[psf == ""]) for dlc, psf in zip(self.dlc_df['name'], self.dlc_df['psf']) if dlc != ""}
return {dlc: float((psf, 1)[psf == ""]) for dlc, psf in zip(self.dlc_df['dlc'], self.dlc_df['psf']) if dlc != ""}
if __name__ == "__main__":
dlc_hl = DLCHighLevel(r'X:\NREL5MW\dlc.xlsx')
......
......@@ -27,7 +27,7 @@ class TestDLCHighLevel(unittest.TestCase):
def test_fatigue_distribution_pct(self):
dlc, wsp, wdir = self.dlc_hl.fatigue_distribution()['12']
self.assertEqual(dlc, 0.975)
self.assertEqual(dlc[12], 0.975)
self.assertEqual(min(wsp.keys()), 4)
self.assertEqual(max(wsp.keys()), 26)
self.assertEqual(wsp[4], 0.11002961306549919)
......@@ -66,6 +66,9 @@ class TestDLCHighLevel(unittest.TestCase):
for k in ['name', 'nr', 'description', 'unit', 'statistic', 'ultimate', 'fatigue', 'm', 'neql', 'bearingdamage', 'mindistance', 'maxdistance', 'extremeload']:
self.assertTrue(k in self.dlc_hl.sensor_info().keys(), k)
def test_fail_on_res_not_fount(self):
self.dlc_hl = DLCHighLevel('test_files/DLC_test.xlsx', fail_on_resfile_not_found=True)
self.assertRaisesRegex(FileNotFoundError, "Result files for dlc='12', wsp='6', wdir='-10' not found")
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment