Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • toolbox/WindEnergyToolbox
  • tlbl/WindEnergyToolbox
  • cpav/WindEnergyToolbox
  • frza/WindEnergyToolbox
  • borg/WindEnergyToolbox
  • mmpe/WindEnergyToolbox
  • ozgo/WindEnergyToolbox
  • dave/WindEnergyToolbox
  • mmir/WindEnergyToolbox
  • wluo/WindEnergyToolbox
  • welad/WindEnergyToolbox
  • chpav/WindEnergyToolbox
  • rink/WindEnergyToolbox
  • shfe/WindEnergyToolbox
  • shfe1/WindEnergyToolbox
  • acdi/WindEnergyToolbox
  • angl/WindEnergyToolbox
  • wliang/WindEnergyToolbox
  • mimc/WindEnergyToolbox
  • wtlib/WindEnergyToolbox
  • cmos/WindEnergyToolbox
  • fabpi/WindEnergyToolbox
22 results
Show changes
Showing
with 500 additions and 705 deletions
......@@ -3,16 +3,6 @@ Created on 01/10/2014
@author: MMPE
'''
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import int
from builtins import map
from builtins import str
from builtins import zip
from future import standard_library
standard_library.install_aliases()
import pandas as pd
import numpy as np
import glob
......@@ -34,15 +24,20 @@ except NameError as e:
def Weibull(u, k, start, stop, step):
C = 2 * u / np.sqrt(np.pi)
cdf = lambda x :-np.exp(-(x / C) ** k)
return {wsp:-cdf(wsp - step / 2) + cdf(wsp + step / 2) for wsp in np.arange(start, stop + step, step)}
def cdf(x): return -np.exp(-(x / C) ** k)
return {wsp: -cdf(wsp - step / 2) + cdf(wsp + step / 2) for wsp in np.arange(start, stop + step, step)}
def Weibull2(u, k, wsp_lst):
C = 2 * u / np.sqrt(np.pi)
cdf = lambda x :-np.exp(-(x / C) ** k)
edges = np.r_[wsp_lst[0] - (wsp_lst[1] - wsp_lst[0]) / 2, (wsp_lst[1:] + wsp_lst[:-1]) / 2, wsp_lst[-1] + (wsp_lst[-1] - wsp_lst[-2]) / 2]
return [-cdf(e1) + cdf(e2) for wsp, e1, e2 in zip(wsp_lst, edges[:-1], edges[1:])]
def cdf(x): return -np.exp(-(x / C) ** k)
edges = [wsp_lst[0] - (wsp_lst[1] - wsp_lst[0])/2]
edges += [(wsp_lst[i] + wsp_lst[i + 1])/2 for i in range(len(wsp_lst) - 1)]
edges += [wsp_lst[-1] + (wsp_lst[-1] - wsp_lst[-2])/2]
return {wsp_lst[i]: cdf(edges[i + 1]) - cdf(edges[i]) for i in range(len(wsp_lst))}
def Weibull_IEC(Vref, Vhub_lst):
"""Weibull distribution according to IEC 61400-1:2005, page 24
......@@ -64,17 +59,18 @@ def Weibull_IEC(Vref, Vhub_lst):
[ 0.11002961 0.14116891 0.15124155]
"""
Vhub_lst = np.array(Vhub_lst)
#Average wind speed
Vave=.2*Vref
#Rayleigh distribution
Pr = lambda x : 1 - np.exp(-np.pi*(x/(2*Vave))**2)
#Wsp bin edges: [4,6,8] -> [3,5,7,9]
wsp_bin_edges = np.r_[Vhub_lst[0] - (Vhub_lst[1] - Vhub_lst[0]) / 2, (Vhub_lst[1:] + Vhub_lst[:-1]) / 2, Vhub_lst[-1] + (Vhub_lst[-1] - Vhub_lst[-2]) / 2]
#probabilities of 3-5, 5-7, 7-9
# Average wind speed
Vave = .2 * Vref
# Rayleigh distribution
def Pr(x): return 1 - np.exp(-np.pi * (x / (2 * Vave))**2)
# Wsp bin edges: [4,6,8] -> [3,5,7,9]
wsp_bin_edges = np.r_[Vhub_lst[0] - (Vhub_lst[1] - Vhub_lst[0]) / 2, (Vhub_lst[1:] +
Vhub_lst[:-1]) / 2, Vhub_lst[-1] + (Vhub_lst[-1] - Vhub_lst[-2]) / 2]
# probabilities of 3-5, 5-7, 7-9
return np.array([-Pr(e1) + Pr(e2) for e1, e2 in zip(wsp_bin_edges[:-1], wsp_bin_edges[1:])])
class DLCHighLevel(object):
def __init__(self, filename, fail_on_resfile_not_found=False, shape_k=2.0):
......@@ -85,8 +81,8 @@ class DLCHighLevel(object):
self.shape_k = shape_k
# Variables
df_vars = pd.read_excel(self.filename, sheetname='Variables',
index_col='Name')
df_vars = pd.read_excel(self.filename, 'Variables', index_col='Name')
df_vars.fillna('', inplace=True)
for name, value in zip(df_vars.index, df_vars.Value.values):
setattr(self, name.lower(), value)
......@@ -96,8 +92,8 @@ class DLCHighLevel(object):
"result folder" % self.filename)
self.res_path = os.path.join(os.path.dirname(self.filename), self.res_path)
#DLC sheet
self.dlc_df = pd.read_excel(self.filename, sheetname='DLC', skiprows=[1])
# DLC sheet
self.dlc_df = pd.read_excel(self.filename, 'DLC', skiprows=[1])
# empty strings are now nans, convert back to empty strings
self.dlc_df.fillna('', inplace=True)
# force headers to lower case
......@@ -129,7 +125,7 @@ class DLCHighLevel(object):
self.dlc_df['psf'] = 1
# Sensors sheet
self.sensor_df = pd.read_excel(self.filename, sheetname='Sensors')
self.sensor_df = pd.read_excel(self.filename, 'Sensors')
# empty strings are now nans, convert back to empty strings
self.sensor_df.fillna('', inplace=True)
# force headers to lower case
......@@ -137,9 +133,11 @@ class DLCHighLevel(object):
for k in ['Name', 'Nr']:
assert k.lower() in self.sensor_df.keys(), "Sensor sheet must have a '%s' column" % k
self.sensor_df = self.sensor_df[self.sensor_df.name!=""]
assert not any(self.sensor_df['name'].duplicated()), "Duplicate sensor names: %s" % ",".join(self.sensor_df['name'][self.sensor_df['name'].duplicated()].values)
for k in ['description', 'unit', 'statistic', 'ultimate', 'fatigue', 'm', 'neql', 'extremeload', 'bearingdamage', 'mindistance', 'maxdistance']:
self.sensor_df = self.sensor_df[self.sensor_df.name != ""]
assert not any(self.sensor_df['name'].duplicated()), "Duplicate sensor names: %s" % ",".join(
self.sensor_df['name'][self.sensor_df['name'].duplicated()].values)
for k in ['description', 'unit', 'statistic', 'ultimate', 'fatigue', 'm',
'neql', 'extremeload', 'bearingdamage', 'mindistance', 'maxdistance']:
if k not in self.sensor_df.keys():
self.sensor_df[k] = ""
for _, row in self.sensor_df[self.sensor_df['fatigue'] != ""].iterrows():
......@@ -158,12 +156,14 @@ class DLCHighLevel(object):
if sensors != []:
sensors = np.atleast_1d(sensors)
empty_column = pd.DataFrame([""] * len(self.sensor_df.name))[0]
return self.sensor_df[functools.reduce(np.logical_or, [((self.sensor_df.get(f, empty_column).values != "") | (self.sensor_df.name == f)) for f in sensors])]
return self.sensor_df[functools.reduce(
np.logical_or, [((self.sensor_df.get(f, empty_column).values != "") | (self.sensor_df.name == f)) for f in sensors])]
else:
return self.sensor_df
def dlc_variables(self, dlc):
dlc_row = self.dlc_df[self.dlc_df['name'] == dlc]
def get_lst(x):
if isinstance(x, pd.Series):
x = x.iloc[0]
......@@ -178,14 +178,18 @@ class DLCHighLevel(object):
def distribution(self, value_key, dist_key, row):
values = self.dlc_df[value_key][row]
if ":" in values:
if ":" in str(values):
start, step, stop = [float(eval(v, globals(), self.__dict__)) for v in values.lower().split(":")]
values = np.arange(start, stop + step, step)
else:
try:
values = [(eval(v, globals(), self.__dict__)) for v in str(values).lower().replace("/", ",").split(",")]
except SyntaxError:
values = [(eval(v.lstrip('0'), globals(), self.__dict__)) for v in str(values).lower().replace("/", ",").split(",")]
try:
values = [(eval(v.lstrip('0'), globals(), self.__dict__))
for v in str(values).lower().replace("/", ",").split(",")]
except Exception:
values = str(values).lower().replace("/", ",").split(",")
dist = self.dlc_df[dist_key][row]
if str(dist).lower() == "weibull" or str(dist).lower() == "rayleigh":
......@@ -200,16 +204,18 @@ class DLCHighLevel(object):
else:
return float(v) / 100
dist = [fmt(v) for v in str(self.dlc_df[dist_key][row]).replace("/", ",").split(",")]
assert len(values) == len(dist), "Number of %s-values (%d)!= number of %s-values(%d)" % (value_key, len(values), dist_key, len(dist))
assert len(values) == len(dist), "Number of %s-values (%d)!= number of %s-values(%d)" % (value_key,
len(values), dist_key, len(dist))
return OrderedDict(zip(map(self.format_tag_value, values), dist))
def fatigue_distribution(self):
fatigue_dist = {}
for row, load in self.dlc_df['load'].iteritems():
for row, load in self.dlc_df['load'].items():
if "F" not in str(load).upper():
continue
dlc = self.dlc_df[self.dist_value_keys[0][1]][row]
fatigue_dist[str(dlc)] = [self.distribution(value_key, dist_key, row) for dist_key, value_key in self.dist_value_keys]
fatigue_dist[str(dlc)] = [self.distribution(value_key, dist_key, row)
for dist_key, value_key in self.dist_value_keys]
return fatigue_dist
def files_dict(self, files=None):
......@@ -235,9 +241,11 @@ class DLCHighLevel(object):
if isinstance(files, list):
pass
elif not hasattr(self, "res_folder") or self.res_folder == "":
files = glob.glob(os.path.join(self.res_path, "*"+ext)) + glob.glob(os.path.join(self.res_path, "*/*"+ext))
if len(files)==0:
raise Exception('No *%s files found in:\n%s or\n%s'%(ext, self.res_path, os.path.join(self.res_path, "*/")))
files = glob.glob(os.path.join(self.res_path, "*" + ext)) + \
glob.glob(os.path.join(self.res_path, "*/*" + ext))
if len(files) == 0:
raise Exception('No *%s files found in:\n%s or\n%s' %
(ext, self.res_path, os.path.join(self.res_path, "*/")))
else:
files = []
for dlc_id in fatigue_dlcs:
......@@ -246,15 +254,16 @@ class DLCHighLevel(object):
folder = self.res_folder % dlc_id
else:
folder = self.res_folder
dlc_files = (glob.glob(os.path.join(self.res_path , folder, "*"+ext)))
if len(dlc_files)==0:
raise Exception('DLC%s included in fatigue analysis, but no *%s files found in:\n%s'%(dlc_id, ext, os.path.join(self.res_path , folder)))
dlc_files = (glob.glob(os.path.join(self.res_path, folder, "*" + ext)))
if len(dlc_files) == 0:
raise Exception('DLC%s included in fatigue analysis, but no *%s files found in:\n%s' %
(dlc_id, ext, os.path.join(self.res_path, folder)))
files.extend(dlc_files)
keys = list(zip(*self.dist_value_keys))[1]
fmt = self.format_tag_value
tags = [[fmt(tag.replace(key, "")) for tag, key in zip(os.path.basename(f).split("_"), keys)] for f in files]
tags = [[fmt(tag.replace(key, "")) for tag, key in zip(os.path.basename(f).lower().split("_"), keys)] for f in files]
dlc_tags = list(zip(*tags))[0]
files_dict = {dlc_tag:{} for dlc_tag in dlc_tags}
files_dict = {dlc_tag: {} for dlc_tag in dlc_tags}
for tag_row, f in zip(tags, files):
d = files_dict[tag_row[0]]
for tag in tag_row[1:]:
......@@ -306,10 +315,12 @@ class DLCHighLevel(object):
dlc_id = str(dlc_id)
fmt = self.format_tag_value
def tag_prop_lst(dist_lst):
if len(dist_lst) == 0:
return [[]]
return [[(fmt(tag), prop)] + tl for tl in tag_prop_lst(dist_lst[1:]) for tag, prop in dist_lst[0].items()]
return [[(fmt(tag), prop)] + tl for tl in tag_prop_lst(dist_lst[1:])
for tag, prop in dist_lst[0].items()]
def files_from_tags(self, f_dict, tags):
if len(tags) == 0:
......@@ -320,7 +331,7 @@ class DLCHighLevel(object):
if self.dist_value_keys[-len(tags)][1] == "wdir":
try:
return files_from_tags(self, f_dict[tags[0] % 360], tags[1:])
except:
except Exception:
pass
raise
......@@ -330,7 +341,8 @@ class DLCHighLevel(object):
files = (files_from_tags(self, files_dict, tags))
except KeyError:
if self.fail_on_resfile_not_found:
raise FileNotFoundError("Result files for %s not found" % (", ".join(["%s='%s'" % (dv[1], t) for dv, t in zip(self.dist_value_keys, tags)])))
raise FileNotFoundError("Result files for %s not found" % (
", ".join(["%s='%s'" % (dv[1], t) for dv, t in zip(self.dist_value_keys, tags)])))
else:
continue
if files:
......@@ -341,19 +353,21 @@ class DLCHighLevel(object):
return fh_lst
def dlc_lst(self, load='all'):
dlc_lst = np.array(self.dlc_df['dlc'])[np.array([load == 'all' or load.lower() in d.lower() for d in self.dlc_df['load']])]
dlc_lst = np.array(self.dlc_df['dlc'])[np.array(
[load == 'all' or load.lower() in d.lower() for d in self.dlc_df['load']])]
return [v.lower().replace('dlc', '') for v in dlc_lst]
@cache_function
def psf(self):
return {dlc: float((psf, 1)[psf == ""]) for dlc, psf in zip(self.dlc_df['dlc'], self.dlc_df['psf']) if dlc != ""}
return {dlc: float((psf, 1)[psf == ""])
for dlc, psf in zip(self.dlc_df['dlc'], self.dlc_df['psf']) if dlc != ""}
if __name__ == "__main__":
dlc_hl = DLCHighLevel(r'X:\DTU10MW\Q0010\DLC_post_betas1.xlsx')
#print (DLCHighLevelInputFile(r'C:\mmpe\Projects\DLC.xlsx').sensor_info(0, 0, 1)['Name'])
#print (dlc_dict()['64'])
#print (dlc_hl.fatigue_distribution()['64'])
print (dlc_hl.file_hour_lst(r"X:\DTU10MW/Q0010/res/"))
# dlc_hl = DLCHighLevel(r'X:\DTU10MW\Q0010\DLC_post_betas1.xlsx')
# #print (DLCHighLevelInputFile(r'C:\mmpe\Projects\DLC.xlsx').sensor_info(0, 0, 1)['Name'])
# #print (dlc_dict()['64'])
# #print (dlc_hl.fatigue_distribution()['64'])
# print(dlc_hl.file_hour_lst(r"X:\DTU10MW/Q0010/res/"))
dlc = DLCHighLevel(r'C:\Users\mmpe\Downloads\Post Processing v7 - FATIGUE.xlsx', fail_on_resfile_not_found=False)
print(dlc.file_hour_lst())
......@@ -3,12 +3,6 @@ Created on 09/10/2014
@author: MMPE
'''
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
import unittest
from wetb.dlc.high_level import DLCHighLevel, Weibull, Weibull_IEC
import os
......
from wetb.hawc2.htc_file import HTCFile
from wetb.hawc2.tests import test_files
import os
def main():
if __name__ == '__main__':
# ======================================================================
# load existing htc file
# ======================================================================
path = os.path.dirname(test_files.__file__) + "/simulation_setup/DTU10MWRef6.0/"
htc = HTCFile(path + "htc/DTU_10MW_RWT.htc")
# ======================================================================
# modify wind speed and turbulence intensity
# ======================================================================
htc.wind.wsp = 10
# access wind section and change turbulence intensity
wind = htc.wind
wind.tint = .1
#=======================================================================
# print contents
#=======================================================================
print(htc) # print htc file
print(htc.keys()) # print htc sections
print(wind) # print wind section
#=======================================================================
# change tilt angle
#=======================================================================
orientation = htc.new_htc_structure.orientation
# Two ways to access the relative orientation between towertop and shaft
# 1) Knowning that it is the second relative section:
rel_tt_shaft = orientation.relative__2
# 2) Knowning that the section contains a field "body1" with value "topertop"
rel_tt_shaft = orientation.get_subsection_by_name(name='towertop', field='body1')
rel_tt_shaft.body2_eulerang__2 = 6, 0, 0
print(rel_tt_shaft.body2_eulerang__2)
# ======================================================================
# set time, name and save
# ======================================================================
# set time of simulation, first output section and wind.scale_time_start
htc.set_time(start=5, stop=10, step=0.1)
# change name of logfile, animation, visualization and first output section
htc.set_name("tmp_wsp10_tint0.1_tilt6")
htc.save() # Save htc modified htcfile as "tmp_wsp10_tint0.1_tilt6"
# ======================================================================
# run simulation
# ======================================================================
# htc.simulate("<path2hawc2>/hawc2mb.exe")
main()
"""
Created on Wed Oct 10 12:47:10 2018
@author: dave
"""
import os
from wetb.prepost import windIO
from wetb.hawc2 import Hawc2io
from wetb.prepost import hawcstab2
if __name__ == '__main__':
# =============================================================================
# READ HAWC2 RESULT FILE
# =============================================================================
# METHOD A
fname = '../wetb/hawc2/tests/test_files/hawc2io/Hawc2ascii.sel'
res = Hawc2io.ReadHawc2(fname)
sig = res.ReadAll()
# METHOD B
path, file = os.path.dirname(fname), os.path.basename(fname)
res = windIO.LoadResults(path, file)
sig = res.sig
sel = res.ch_details
# result in dataframe with unique channel names (instead of indices)
sig_df = res.sig2df()
ch_df = res.ch_df
# =============================================================================
# READ HAWCStab2 files
# =============================================================================
res = hawcstab2.results()
fname = '../wetb/prepost/tests/data/campbell_diagram.cmb'
df_cmb = res.load_cmb_df(fname)
fname = '../wetb/prepost/tests/data/dtu10mw_v1_defl_u10000.ind'
df_ind = res.load_ind(fname)
fname = '../wetb/prepost/tests/data/dtu10mw.opt'
df_opt = res.load_operation(fname)
fname = '../wetb/prepost/tests/data/dtu10mw_v1.pwr'
df_pwr, units = res.load_pwr_df(fname)
fname = '../wetb/prepost/tests/data/controller_input_quadratic.txt'
tuning = hawcstab2.ReadControlTuning()
tuning.read_parameters(fname)
# tuning parameters are saved as attributes
tuning.pi_gen_reg1.K
tuning.pi_gen_reg2.I
tuning.pi_gen_reg2.Kp
tuning.pi_gen_reg2.Ki
tuning.pi_gen_reg2.Kd
tuning.pi_pitch_reg3.Kp
tuning.pi_pitch_reg3.Ki
tuning.pi_pitch_reg3.Kd
tuning.pi_pitch_reg3.K1
tuning.pi_pitch_reg3.K2
tuning.aero_damp.Kp2
tuning.aero_damp.Ko1
tuning.aero_damp.Ko2
# or you can get them as a dictionary
tune_tags = tuning.parameters2tags()
'''
Created on 03/09/2015
@author: MMPE
'''
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from io import open
from builtins import map
from builtins import range
from builtins import chr
from future import standard_library
standard_library.install_aliases()
import os
import numpy as np
import struct
from itertools import takewhile
def load_output(filename):
"""Load a FAST binary or ascii output file
......@@ -45,25 +32,37 @@ def load_output(filename):
return load_binary_output(filename)
return load_ascii_output(filename)
def load_ascii_output(filename):
with open(filename) as f:
info = {}
info['name'] = os.path.splitext(os.path.basename(filename))[0]
try:
header = [f.readline() for _ in range(8)]
info['description'] = header[4].strip()
info['attribute_names'] = header[6].split()
info['attribute_units'] = [unit[1:-1] for unit in header[7].split()] #removing "()"
data = np.array([line.split() for line in f.readlines()]).astype(np.float)
return data, info
except (ValueError, AssertionError):
raise
# Header is whatever is before the keyword `time`
in_header = True
header = []
while in_header:
l = f.readline()
if not l:
raise Exception('Error finding the end of FAST out file header. Keyword Time missing.')
in_header = (l + ' dummy').lower().split()[0] != 'time'
if in_header:
header.append(l)
else:
info['description'] = header
info['attribute_names'] = l.split()
info['attribute_units'] = [unit[1:-1] for unit in f.readline().split()]
# Data, up to end of file or empty line (potential comment line at the end)
data = np.array([l.strip().split() for l in takewhile(
lambda x: len(x.strip()) > 0, f.readlines())]).astype(float)
return data, info
def load_binary_output(filename, use_buffer=True):
"""
03/09/15: Ported from ReadFASTbinary.m by Mads M Pedersen, DTU Wind
24/10/18: Low memory/buffered version by E. Branlard, NREL
def load_binary_output(filename):
"""Ported from ReadFASTbinary.m by Mads M Pedersen, DTU Wind
Info about ReadFASTbinary.m:
% Author: Bonnie Jonkman, National Renewable Energy Laboratory
% (c) 2012, National Renewable Energy Laboratory
......@@ -71,87 +70,156 @@ def load_binary_output(filename):
% Edited for FAST v7.02.00b-bjj 22-Oct-2012
"""
def fread(fid, n, type):
fmt, nbytes = {'uint8': ('B', 1), 'int16':('h', 2), 'int32':('i', 4), 'float32':('f', 4), 'float64':('d', 8)}[type]
fmt, nbytes = {
'uint8': ('B', 1),
'int16': ('h', 2),
'int32': ('i', 4),
'float32': ('f', 4),
'float64': ('d', 8)}[type]
return struct.unpack(fmt * n, fid.read(nbytes * n))
FileFmtID_WithTime = 1 #% File identifiers used in FAST
def freadRowOrderTableBuffered(fid, n, type_in, nCols, nOff=0, type_out='float64'):
"""
Reads of row-ordered table from a binary file.
Read `n` data of type `type_in`, assumed to be a row ordered table of `nCols` columns.
Memory usage is optimized by allocating the data only once.
Buffered reading is done for improved performances (in particular for 32bit python)
`nOff` allows for additional column space at the begining of the storage table.
Typically, `nOff=1`, provides a column at the beginning to store the time vector.
@author E.Branlard, NREL
"""
fmt, nbytes = {
'uint8': (
'B', 1), 'int16': (
'h', 2), 'int32': (
'i', 4), 'float32': (
'f', 4), 'float64': (
'd', 8)}[type_in]
nLines = int(n / nCols)
GoodBufferSize = 4096 * 40
nLinesPerBuffer = int(GoodBufferSize / nCols)
BufferSize = nCols * nLinesPerBuffer
nBuffer = int(n / BufferSize)
# Allocation of data
data = np.zeros((nLines, nCols + nOff), dtype=type_out)
# Reading
try:
nIntRead = 0
nLinesRead = 0
while nIntRead < n:
nIntToRead = min(n - nIntRead, BufferSize)
nLinesToRead = int(nIntToRead / nCols)
Buffer = np.array(struct.unpack(fmt * nIntToRead, fid.read(nbytes * nIntToRead)))
Buffer = Buffer.reshape(-1, nCols)
data[nLinesRead:(nLinesRead + nLinesToRead), nOff:(nOff + nCols)] = Buffer
nLinesRead = nLinesRead + nLinesToRead
nIntRead = nIntRead + nIntToRead
except Exception:
raise Exception('Read only %d of %d values in file: %s' % (nIntRead, n, filename))
return data
FileFmtID_WithTime = 1 # % File identifiers used in FAST
FileFmtID_WithoutTime = 2
LenName = 10 #; % number of characters per channel name
LenUnit = 10 #; % number of characters per unit name
FileFmtID_NoCompressWithoutTime = 3
FileFmtID_ChanLen_In = 4 # time channel and channel length is not included
with open(filename, 'rb') as fid:
FileID = fread(fid, 1, 'int16') #; % FAST output file format, INT(2)
FileID = fread(fid, 1, 'int16')[0] # ; % FAST output file format, INT(2)
if FileID not in [FileFmtID_WithTime, FileFmtID_WithoutTime,
FileFmtID_ChanLen_In, FileFmtID_NoCompressWithoutTime]:
raise Exception('FileID not supported {}. Is it a FAST binary file?'.format(FileID))
NumOutChans = fread(fid, 1, 'int32')[0] #; % The number of output channels, INT(4)
NT = fread(fid, 1, 'int32')[0] #; % The number of time steps, INT(4)
if FileID == FileFmtID_WithTime:
TimeScl = fread(fid, 1, 'float64') #; % The time slopes for scaling, REAL(8)
TimeOff = fread(fid, 1, 'float64') #; % The time offsets for scaling, REAL(8)
if FileID == FileFmtID_ChanLen_In:
LenName = fread(fid, 1, 'int16')[0] # Number of characters in channel names and units
else:
TimeOut1 = fread(fid, 1, 'float64') #; % The first time in the time series, REAL(8)
TimeIncr = fread(fid, 1, 'float64') #; % The time increment, REAL(8)
LenName = 10 # default number of characters per channel name
NumOutChans = fread(fid, 1, 'int32')[0] # ; % The number of output channels, INT(4)
NT = fread(fid, 1, 'int32')[0] # ; % The number of time steps, INT(4)
if FileID == FileFmtID_WithTime:
TimeScl = fread(fid, 1, 'float64') # ; % The time slopes for scaling, REAL(8)
TimeOff = fread(fid, 1, 'float64') # ; % The time offsets for scaling, REAL(8)
else:
TimeOut1 = fread(fid, 1, 'float64') # ; % The first time in the time series, REAL(8)
TimeIncr = fread(fid, 1, 'float64') # ; % The time increment, REAL(8)
ColScl = fread(fid, NumOutChans, 'float32') #; % The channel slopes for scaling, REAL(4)
ColOff = fread(fid, NumOutChans, 'float32') #; % The channel offsets for scaling, REAL(4)
if FileID == FileFmtID_NoCompressWithoutTime:
ColScl = np.ones(NumOutChans)
ColOff = np.zeros(NumOutChans)
else:
ColScl = fread(fid, NumOutChans, 'float32') # ; % The channel slopes for scaling, REAL(4)
ColOff = fread(fid, NumOutChans, 'float32') # ; % The channel offsets for scaling, REAL(4)
LenDesc = fread(fid, 1, 'int32')[0] #; % The number of characters in the description string, INT(4)
DescStrASCII = fread(fid, LenDesc, 'uint8') #; % DescStr converted to ASCII
LenDesc = fread(fid, 1, 'int32')[0] # ; % The number of characters in the description string, INT(4)
DescStrASCII = fread(fid, LenDesc, 'uint8') # ; % DescStr converted to ASCII
DescStr = "".join(map(chr, DescStrASCII)).strip()
ChanName = [] # initialize the ChanName cell array
for iChan in range(NumOutChans + 1):
ChanNameASCII = fread(fid, LenName, 'uint8') #; % ChanName converted to numeric ASCII
ChanNameASCII = fread(fid, LenName, 'uint8') # ; % ChanName converted to numeric ASCII
ChanName.append("".join(map(chr, ChanNameASCII)).strip())
ChanUnit = [] # initialize the ChanUnit cell array
for iChan in range(NumOutChans + 1):
ChanUnitASCII = fread(fid, LenUnit, 'uint8') #; % ChanUnit converted to numeric ASCII
ChanUnitASCII = fread(fid, LenName, 'uint8') # ; % ChanUnit converted to numeric ASCII
ChanUnit.append("".join(map(chr, ChanUnitASCII)).strip()[1:-1])
# %-------------------------
# % get the channel time series
# %-------------------------
nPts = NT * NumOutChans #; % number of data points in the file
nPts = NT * NumOutChans # ; % number of data points in the file
if FileID == FileFmtID_WithTime:
PackedTime = fread(fid, NT, 'int32') #; % read the time data
PackedTime = fread(fid, NT, 'int32') # ; % read the time data
cnt = len(PackedTime)
if cnt < NT:
raise Exception('Could not read entire %s file: read %d of %d time values' % (filename, cnt, NT))
PackedData = fread(fid, nPts, 'int16') #; % read the channel data
cnt = len(PackedData)
if cnt < nPts:
raise Exception('Could not read entire %s file: read %d of %d values' % (filename, cnt, nPts))
# %-------------------------
# % Scale the packed binary to real data
# %-------------------------
#
data = np.array(PackedData).reshape(NT, NumOutChans)
data = (data - ColOff) / ColScl
if use_buffer:
# Reading data using buffers, and allowing an offset for time column (nOff=1)
if FileID == FileFmtID_NoCompressWithoutTime:
data = freadRowOrderTableBuffered(fid, nPts, 'float64', NumOutChans, nOff=1, type_out='float64')
else:
data = freadRowOrderTableBuffered(fid, nPts, 'int16', NumOutChans, nOff=1, type_out='float64')
else:
# NOTE: unpacking huge data not possible on 32bit machines
if FileID == FileFmtID_NoCompressWithoutTime:
PackedData = fread(fid, nPts, 'float64') # ; % read the channel data
else:
PackedData = fread(fid, nPts, 'int16') # ; % read the channel data
cnt = len(PackedData)
if cnt < nPts:
raise Exception('Could not read entire %s file: read %d of %d values' % (filename, cnt, nPts))
data = np.array(PackedData).reshape(NT, NumOutChans)
del PackedData
if FileID == FileFmtID_WithTime:
time = (np.array(PackedTime) - TimeOff) / TimeScl;
time = (np.array(PackedTime) - TimeOff) / TimeScl
else:
time = TimeOut1 + TimeIncr * np.arange(NT)
data = np.concatenate([time.reshape(NT, 1), data], 1)
# %-------------------------
# % Scale the packed binary to real data
# %-------------------------
if use_buffer:
# Scaling Data
for iCol in range(NumOutChans):
data[:, iCol + 1] = (data[:, iCol + 1] - ColOff[iCol]) / ColScl[iCol]
# Adding time column
data[:, 0] = time
else:
# NOTE: memory expensive due to time conversion, and concatenation
data = (data - ColOff) / ColScl
data = np.concatenate([time.reshape(NT, 1), data], 1)
info = {'name': os.path.splitext(os.path.basename(filename))[0],
'description': DescStr,
'attribute_names': ChanName,
'attribute_units': ChanUnit}
return data, info
'''
Created on 03/09/2015
@author: MMPE
'''
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
import unittest
from wetb.fast.fast_io import load_output
from tests import npt
import pytest
import numpy as np
from wetb.fast.fast_io import load_output, load_binary_output
import os
testfilepath = os.path.join(os.path.dirname(__file__), 'test_files/') # test file path
class TestFastIO(unittest.TestCase):
def test_load_output(self):
data, info = load_output(testfilepath + 'DTU10MW.out')
self.assertAlmostEqual(data[4, 3], 4.295E-04)
self.assertEqual(info['name'], "DTU10MW")
self.assertEqual(info['attribute_names'][1], "RotPwr")
self.assertEqual(info['attribute_units'][1], "kW")
def test_load_binary(self):
data, info = load_output(testfilepath + 'test_binary.outb')
self.assertEqual(info['name'], 'test_binary')
self.assertEqual(info['description'], 'Modified by mwDeriveSensors on 27-Jul-2015 16:32:06')
self.assertEqual(info['attribute_names'][4], 'RotPwr')
self.assertEqual(info['attribute_units'][7], 'deg/s^2')
self.assertAlmostEqual(data[10, 4], 138.822277739535)
def test_load_output2(self):
data, info = load_output(testfilepath + 'DTU10MW.out')
self.assertEqual(info['name'], "DTU10MW")
self.assertEqual(info['attribute_names'][1], "RotPwr")
self.assertEqual(info['attribute_units'][1], "kW")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testload_output']
unittest.main()
def test_load_output():
data, info = load_output(testfilepath + 'DTU10MW.out')
npt.assert_equal(data[4, 3], 4.295E-04)
npt.assert_equal(info['name'], "DTU10MW")
npt.assert_equal(info['attribute_names'][1], "RotPwr")
npt.assert_equal(info['attribute_units'][1], "kW")
def test_load_binary():
data, info = load_output(testfilepath + 'test_binary.outb')
npt.assert_equal(info['name'], 'test_binary')
npt.assert_equal(info['description'], 'Modified by mwDeriveSensors on 27-Jul-2015 16:32:06')
npt.assert_equal(info['attribute_names'][4], 'RotPwr')
npt.assert_equal(info['attribute_units'][7], 'deg/s^2')
npt.assert_almost_equal(data[10, 4], 138.822277739535)
def test_load_binary_buffered():
# The old method was not using a buffer and was also memory expensive
# Now use_buffer is set to true by default
import numpy as np
data, info = load_binary_output(testfilepath + 'test_binary.outb', use_buffer=True)
data_old, info_old = load_binary_output(testfilepath + 'test_binary.outb', use_buffer=False)
npt.assert_equal(info['name'], info_old['name'])
np.testing.assert_array_equal(data[0, :], data_old[0, :])
np.testing.assert_array_equal(data[-1, :], data_old[-1, :])
@pytest.mark.parametrize('fid,tol', [(2, 1), (3, 1e-4)])
@pytest.mark.parametrize('buffer', [True, False])
def test_load_bindary_fid(fid, tol, buffer):
data2, info2 = load_output(testfilepath + '5MW_Land_BD_DLL_WTurb_fid04.outb')
data, info = load_binary_output(testfilepath + '5MW_Land_BD_DLL_WTurb_fid%02d.outb' % fid, use_buffer=buffer)
for k, v in info2.items():
if k not in {'name', 'description'}:
npt.assert_array_equal(info[k], v)
r = data.max(0) - data.min(0) + 1e-20
npt.assert_array_less(np.abs(data - data2).max(0), r * tol)
def test_load_output2():
data, info = load_output(testfilepath + 'DTU10MW.out')
npt.assert_equal(info['name'], "DTU10MW")
npt.assert_equal(info['attribute_names'][1], "RotPwr")
npt.assert_equal(info['attribute_units'][1], "kW")
def test_load_output3():
# This file has an extra comment at the end
data, info = load_output(testfilepath + 'FASTOut_Hydro.out')
npt.assert_almost_equal(data[3, 1], -1.0E+01)
File added
File added
File added
These predictions were generated by HydroDyn on 19-Oct-2018 at 10:15:15.
Time Wave1Elev
(sec) (m)
0.0 0.0e+01
1.0 1.0e+01
2.0 0.0e+01
3.0 -1.0e+01
4.0 0.0e+01
This output file was closed on 19-Oct-2018 at 10:15:16.
......@@ -3,13 +3,6 @@ Created on 13/10/2014
@author: MMPE
'''
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from builtins import zip
from future import standard_library
standard_library.install_aliases()
def bearing_damage(angle_moment_lst, m=3, thresshold=0.1):
"""Function ported from Matlab.
......
......@@ -14,13 +14,7 @@ or
- 'rainflow_astm' (based on the c-implementation by Adam Nieslony found at the MATLAB Central File Exchange
http://www.mathworks.com/matlabcentral/fileexchange/3026)
'''
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from future import standard_library
import warnings
standard_library.install_aliases()
import numpy as np
from wetb.fatigue_tools.rainflowcounting import rainflowcount
......@@ -71,7 +65,8 @@ def eq_load(signals, no_bins=46, m=[3, 4, 6, 8, 10, 12], neq=1, rainflow_func=ra
return [[np.nan] * len(np.atleast_1d(m))] * len(np.atleast_1d(neq))
def eq_load_and_cycles(signals, no_bins=46, m=[3, 4, 6, 8, 10, 12], neq=[10 ** 6, 10 ** 7, 10 ** 8], rainflow_func=rainflow_windap):
def eq_load_and_cycles(signals, no_bins=46, m=[3, 4, 6, 8, 10, 12], neq=[
10 ** 6, 10 ** 7, 10 ** 8], rainflow_func=rainflow_windap):
"""Calculate combined fatigue equivalent load
Parameters
......@@ -102,12 +97,13 @@ def eq_load_and_cycles(signals, no_bins=46, m=[3, 4, 6, 8, 10, 12], neq=[10 ** 6
Edges of the amplitude bins
"""
cycles, ampl_bin_mean, ampl_bin_edges, _, _ = cycle_matrix(signals, no_bins, 1, rainflow_func)
if 0: #to be similar to windap
if 0: # to be similar to windap
ampl_bin_mean = (ampl_bin_edges[:-1] + ampl_bin_edges[1:]) / 2
cycles, ampl_bin_mean = cycles.flatten(), ampl_bin_mean.flatten()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
eq_loads = [[((np.nansum(cycles * ampl_bin_mean ** _m) / _neq) ** (1. / _m)) for _m in np.atleast_1d(m)] for _neq in np.atleast_1d(neq)]
eq_loads = [[((np.nansum(cycles * ampl_bin_mean ** _m) / _neq) ** (1. / _m))
for _m in np.atleast_1d(m)] for _neq in np.atleast_1d(neq)]
return eq_loads, cycles, ampl_bin_mean, ampl_bin_edges
......@@ -152,17 +148,18 @@ def cycle_matrix(signals, ampl_bins=10, mean_bins=10, rainflow_func=rainflow_win
"""
if isinstance(signals[0], tuple):
weights, ampls, means = np.array([(np.zeros_like(ampl)+weight,ampl,mean) for weight, signal in signals for ampl,mean in rainflow_func(signal[:]).T], dtype=np.float64).T
weights, ampls, means = np.array([(np.zeros_like(ampl) + weight, ampl, mean) for weight,
signal in signals for ampl, mean in rainflow_func(signal[:]).T], dtype=np.float64).T
else:
ampls, means = rainflow_func(signals[:])
weights = np.ones_like(ampls)
if isinstance(ampl_bins, int):
ampl_bins = np.linspace(0, 1, num=ampl_bins + 1) * ampls[weights>0].max()
ampl_bins = np.linspace(0, 1, num=ampl_bins + 1) * ampls[weights > 0].max()
cycles, ampl_edges, mean_edges = np.histogram2d(ampls, means, [ampl_bins, mean_bins], weights=weights)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ampl_bin_sum = np.histogram2d(ampls, means, [ampl_bins, mean_bins], weights=weights * ampls)[0]
ampl_bin_mean = np.nanmean(ampl_bin_sum / np.where(cycles,cycles,np.nan),1)
ampl_bin_mean = np.nanmean(ampl_bin_sum / np.where(cycles, cycles, np.nan), 1)
mean_bin_sum = np.histogram2d(ampls, means, [ampl_bins, mean_bins], weights=weights * means)[0]
mean_bin_mean = np.nanmean(mean_bin_sum / np.where(cycles, cycles, np.nan), 1)
cycles = cycles / 2 # to get full cycles
......@@ -213,19 +210,46 @@ def cycle_matrix2(signal, nrb_amp, nrb_mean, rainflow_func=rainflow_windap):
return cycles, ampl_edges, mean_edges
def eq_loads_from_Markov(markov_matrix, m_list, neq=1e7):
"""
Calculate the damage equivalent loads from the cycles and amplitudes
of a Markov matrix for different Woehler slopes.
Parameters
----------
markov_matrix : array (Nbins x 2)
Array containing the number of cycles and mean ampltude for each bin.
Can have extra leading dimensions such as Nsensors x Ndimensions
m_list : list (Nwoehlerslopes)
List containing different Woehler slopes.
neq : int or float, optional
Number of equivalent load cycles. The default is 1e7.
Returns
-------
Array (Nwoehlerslopes)
Array containing the equivalent loads for each Woehler slope and
for each extra leading dimension if any
"""
cycles = markov_matrix[..., 0]
amplitudes = markov_matrix[..., 1]
eq_loads = np.array([((np.nansum(cycles*amplitudes**m, axis=-1)/neq)**(1/m))
for m in m_list])
eq_loads = np.transpose(eq_loads, list(range(1, eq_loads.ndim)) + [0])
return eq_loads
if __name__ == "__main__":
signal1 = np.array([-2.0, 0.0, 1.0, 0.0, -3.0, 0.0, 5.0, 0.0, -1.0, 0.0, 3.0, 0.0, -4.0, 0.0, 4.0, 0.0, -2.0])
signal2 = signal1 * 1.1
# equivalent load for default wohler slopes
print (eq_load(signal1, no_bins=50, neq=17, rainflow_func=rainflow_windap))
print (eq_load(signal1, no_bins=50, neq=17, rainflow_func=rainflow_astm))
print(eq_load(signal1, no_bins=50, neq=17, rainflow_func=rainflow_windap))
print(eq_load(signal1, no_bins=50, neq=17, rainflow_func=rainflow_astm))
# Cycle matrix with 4 amplitude bins and 4 mean value bins
print (cycle_matrix(signal1, 4, 4, rainflow_func=rainflow_windap))
print (cycle_matrix(signal1, 4, 4, rainflow_func=rainflow_astm))
print(cycle_matrix(signal1, 4, 4, rainflow_func=rainflow_windap))
print(cycle_matrix(signal1, 4, 4, rainflow_func=rainflow_astm))
# Cycle matrix where signal1 and signal2 contributes with 50% each
print (cycle_matrix([(.5, signal1), (.5, signal2)], 4, 8, rainflow_func=rainflow_astm))
print(cycle_matrix([(.5, signal1), (.5, signal2)], 4, 8, rainflow_func=rainflow_astm))
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
import cython
from numba import njit
import numpy as np
# cimport numpy as np
@cython.locals(p=cython.int, q=cython.int, f=cython.int, flow=list, k=cython.int, n=cython.int, ptr=cython.int)
def pair_range_amplitude(x): # cpdef pair_range(np.ndarray[long,ndim=1] x):
@njit(cache=True) # jit faster than previous cython compiled extension
def pair_range_amplitude(x):
"""
Returns a list of half-cycle-amplitudes
x: Peak-Trough sequence (integer list of local minima and maxima)
......@@ -75,11 +67,8 @@ def pair_range_amplitude(x): # cpdef pair_range(np.ndarray[long,ndim=1] x):
return flow
@cython.locals(p=cython.int, q=cython.int, f=cython.int, flow=list, k=cython.int, n=cython.int, ptr=cython.int)
def pair_range_from_to(x): # cpdef pair_range(np.ndarray[long,ndim=1] x):
@njit(cache=True)
def pair_range_from_to(x):
"""
Returns a list of half-cycle-amplitudes
x: Peak-Trough sequence (integer list of local minima and maxima)
......@@ -112,9 +101,9 @@ def pair_range_from_to(x): # cpdef pair_range(np.ndarray[long,ndim=1] x):
if q == n:
f = 1
while p >= 4:
#print S[p - 3:p + 1]
#print S[p - 2], ">", S[p - 3], ", ", S[p - 1], ">=", S[p - 3], ", ", S[p], ">=", S[p - 2], (S[p - 2] > S[p - 3] and S[p - 1] >= S[p - 3] and S[p] >= S[p - 2])
#print S[p - 2], "<", S[p - 3], ", ", S[p - 1], "<=", S[p - 3], ", ", S[p], "<=", S[p - 2], (S[p - 2] < S[p - 3] and S[p - 1] <= S[p - 3] and S[p] <= S[p - 2])
# print S[p - 3:p + 1]
# print S[p - 2], ">", S[p - 3], ", ", S[p - 1], ">=", S[p - 3], ", ", S[p], ">=", S[p - 2], (S[p - 2] > S[p - 3] and S[p - 1] >= S[p - 3] and S[p] >= S[p - 2])
# print S[p - 2], "<", S[p - 3], ", ", S[p - 1], "<=", S[p - 3], ", ", S[p], "<=", S[p - 2], (S[p - 2] < S[p - 3] and S[p - 1] <= S[p - 3] and S[p] <= S[p - 2])
#print (S[p - 2] > S[p - 3] and S[p - 1] >= S[p - 3] and S[p] >= S[p - 2]) or (S[p - 2] < S[p - 3] and S[p - 1] <= S[p - 3] and S[p] <= S[p - 2])
if (S[p - 2] > S[p - 3] and S[p - 1] >= S[p - 3] and S[p] >= S[p - 2]) or \
(S[p - 2] < S[p - 3] and S[p - 1] <= S[p - 3] and S[p] <= S[p - 2]):
......@@ -134,12 +123,13 @@ def pair_range_from_to(x): # cpdef pair_range(np.ndarray[long,ndim=1] x):
if p == q:
break
else:
#print S[q], "to", S[q + 1]
# print S[q], "to", S[q + 1]
A[S[q], S[q + 1]] += 1
return A
@cython.locals(p=cython.int, q=cython.int, f=cython.int, flow=list, k=cython.int, n=cython.int, ptr=cython.int)
def pair_range_amplitude_mean(x): # cpdef pair_range(np.ndarray[long,ndim=1] x):
@njit(cache=True)
def pair_range_amplitude_mean(x):
"""
Returns a list of half-cycle-amplitudes
x: Peak-Trough sequence (integer list of local minima and maxima)
......@@ -165,7 +155,7 @@ def pair_range_amplitude_mean(x): # cpdef pair_range(np.ndarray[long,ndim=1] x
p += 1
q += 1
# read
# read
S[p] = x[ptr]
ptr += 1
......
import cython
import numpy as np
cimport numpy as np
import cython
import numpy as np
# cimport numpy as np
@cython.locals(p=cython.int, q=cython.int, f=cython.int, flow=list, k=cython.int, n=cython.int, ptr=cython.int)
def pair_range_amplitude(x): # cpdef pair_range(np.ndarray[long,ndim=1] x):
"""
Returns a list of half-cycle-amplitudes
x: Peak-Trough sequence (integer list of local minima and maxima)
This routine is implemented according to
"Recommended Practices for Wind Turbine Testing - 3. Fatigue Loads", 2. edition 1990, Appendix A
except that a list of half-cycle-amplitudes are returned instead of a from_level-to_level-matrix
"""
x = x - np.min(x)
k = np.max(x)
n = x.shape[0]
S = np.zeros(n + 1)
#A = np.zeros(k+1)
flow = []
S[1] = x[0]
ptr = 1
p = 1
q = 1
f = 0
# phase 1
while True:
p += 1
q += 1
# read
S[p] = x[ptr]
ptr += 1
if q == n:
f = 1
while p >= 4:
if (S[p - 2] > S[p - 3] and S[p - 1] >= S[p - 3] and S[p] >= S[p - 2]) \
or\
(S[p - 2] < S[p - 3] and S[p - 1] <= S[p - 3] and S[p] <= S[p - 2]):
ampl = abs(S[p - 2] - S[p - 1])
# A[ampl]+=2 #Two half cycles
flow.append(ampl)
flow.append(ampl)
S[p - 2] = S[p]
p -= 2
else:
break
if f == 0:
pass
else:
break
# phase 2
q = 0
while True:
q += 1
if p == q:
break
else:
ampl = abs(S[q + 1] - S[q])
# A[ampl]+=1
flow.append(ampl)
return flow
@cython.locals(p=cython.int, q=cython.int, f=cython.int, flow=list, k=cython.int, n=cython.int, ptr=cython.int)
def pair_range_from_to(x): # cpdef pair_range(np.ndarray[long,ndim=1] x):
"""
Returns a list of half-cycle-amplitudes
x: Peak-Trough sequence (integer list of local minima and maxima)
This routine is implemented according to
"Recommended Practices for Wind Turbine Testing - 3. Fatigue Loads", 2. edition 1990, Appendix A
except that a list of half-cycle-amplitudes are returned instead of a from_level-to_level-matrix
"""
x = x - np.min(x)
k = np.max(x)
n = x.shape[0]
S = np.zeros(n + 1)
A = np.zeros((k + 1, k + 1))
S[1] = x[0]
ptr = 1
p = 1
q = 1
f = 0
# phase 1
while True:
p += 1
q += 1
# read
S[p] = x[ptr]
ptr += 1
if q == n:
f = 1
while p >= 4:
#print S[p - 3:p + 1]
#print S[p - 2], ">", S[p - 3], ", ", S[p - 1], ">=", S[p - 3], ", ", S[p], ">=", S[p - 2], (S[p - 2] > S[p - 3] and S[p - 1] >= S[p - 3] and S[p] >= S[p - 2])
#print S[p - 2], "<", S[p - 3], ", ", S[p - 1], "<=", S[p - 3], ", ", S[p], "<=", S[p - 2], (S[p - 2] < S[p - 3] and S[p - 1] <= S[p - 3] and S[p] <= S[p - 2])
#print (S[p - 2] > S[p - 3] and S[p - 1] >= S[p - 3] and S[p] >= S[p - 2]) or (S[p - 2] < S[p - 3] and S[p - 1] <= S[p - 3] and S[p] <= S[p - 2])
if (S[p - 2] > S[p - 3] and S[p - 1] >= S[p - 3] and S[p] >= S[p - 2]) or \
(S[p - 2] < S[p - 3] and S[p - 1] <= S[p - 3] and S[p] <= S[p - 2]):
A[S[p - 2], S[p - 1]] += 1
A[S[p - 1], S[p - 2]] += 1
S[p - 2] = S[p]
p -= 2
else:
break
if f == 1:
break # q==n
# phase 2
q = 0
while True:
q += 1
if p == q:
break
else:
#print S[q], "to", S[q + 1]
A[S[q], S[q + 1]] += 1
return A
@cython.locals(p=cython.int, q=cython.int, f=cython.int, flow=list, k=cython.int, n=cython.int, ptr=cython.int)
def pair_range_amplitude_mean(x): # cpdef pair_range(np.ndarray[long,ndim=1] x):
"""
Returns a list of half-cycle-amplitudes
x: Peak-Trough sequence (integer list of local minima and maxima)
This routine is implemented according to
"Recommended Practices for Wind Turbine Testing - 3. Fatigue Loads", 2. edition 1990, Appendix A
except that a list of half-cycle-amplitudes are returned instead of a from_level-to_level-matrix
"""
x = x - np.min(x)
k = np.max(x)
n = x.shape[0]
S = np.zeros(n + 1)
ampl_mean = []
A = np.zeros((k + 1, k + 1))
S[1] = x[0]
ptr = 1
p = 1
q = 1
f = 0
# phase 1
while True:
p += 1
q += 1
# read
S[p] = x[ptr]
ptr += 1
if q == n:
f = 1
while p >= 4:
if (S[p - 2] > S[p - 3] and S[p - 1] >= S[p - 3] and S[p] >= S[p - 2]) \
or\
(S[p - 2] < S[p - 3] and S[p - 1] <= S[p - 3] and S[p] <= S[p - 2]):
# Extract two intermediate half cycles
ampl = abs(S[p - 2] - S[p - 1])
mean = (S[p - 2] + S[p - 1]) / 2
ampl_mean.append((ampl, mean))
ampl_mean.append((ampl, mean))
S[p - 2] = S[p]
p -= 2
else:
break
if f == 0:
pass
else:
break
# phase 2
q = 0
while True:
q += 1
if p == q:
break
else:
ampl = abs(S[q + 1] - S[q])
mean = (S[q + 1] + S[q]) / 2
ampl_mean.append((ampl, mean))
return ampl_mean
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
import cython
import numpy as np
#cimport numpy as np
from numba.core.decorators import njit
@cython.locals(BEGIN=cython.int, MINZO=cython.int, MAXZO=cython.int, ENDZO=cython.int, \
R=cython.int, L=cython.int, i=cython.int, p=cython.int, f=cython.int)
def peak_trough(x, R): #cpdef np.ndarray[long,ndim=1] peak_trough(np.ndarray[long,ndim=1] x, int R):
@njit(cache=True) # jit faster than previous cython compiled extension
def peak_trough(x, R):
"""
Returns list of local maxima/minima.
......@@ -25,12 +18,12 @@ def peak_trough(x, R): #cpdef np.ndarray[long,ndim=1] peak_trough(np.ndarray[lo
MINZO = 1
MAXZO = 2
ENDZO = 3
S = np.zeros(x.shape[0] + 1, dtype=np.int)
S = np.full(x.shape[0] + 1, 0) # np.zeros(...).astype(int) not supported by numba
L = x.shape[0]
goto = BEGIN
while 1:
while True:
if goto == BEGIN:
trough = x[0]
peak = x[0]
......
import cython
import numpy as np
cimport numpy as np
import cython
import numpy as np
cimport numpy as np
@cython.locals(BEGIN=cython.int, MINZO=cython.int, MAXZO=cython.int, ENDZO=cython.int, \
R=cython.int, L=cython.int, i=cython.int, p=cython.int, f=cython.int)
cpdef np.ndarray[long,ndim=1] peak_trough(np.ndarray[long,ndim=1] x, int R):
"""
Returns list of local maxima/minima.
x: 1-dimensional numpy array containing signal
R: Thresshold (minimum difference between succeeding min and max
This routine is implemented directly as described in
"Recommended Practices for Wind Turbine Testing - 3. Fatigue Loads", 2. edition 1990, Appendix A
"""
BEGIN = 0
MINZO = 1
MAXZO = 2
ENDZO = 3
S = np.zeros(x.shape[0] + 1, dtype=np.int)
L = x.shape[0]
goto = BEGIN
while 1:
if goto == BEGIN:
trough = x[0]
peak = x[0]
i = 0
p = 1
f = 0
while goto == BEGIN:
i += 1
if i == L:
goto = ENDZO
continue
else:
if x[i] > peak:
peak = x[i]
if peak - trough >= R:
S[p] = trough
goto = MAXZO
continue
elif x[i] < trough:
trough = x[i]
if peak - trough >= R:
S[p] = peak
goto = MINZO
continue
elif goto == MINZO:
f = -1
while goto == MINZO:
i += 1
if i == L:
goto = ENDZO
continue
else:
if x[i] < trough:
trough = x[i]
else:
if x[i] - trough >= R:
p += 1
S[p] = trough
peak = x[i]
goto = MAXZO
continue
elif goto == MAXZO:
f = 1
while goto == MAXZO:
i += 1
if i == L:
goto = ENDZO
continue
else:
if x[i] > peak:
peak = x[i]
else:
if peak - x[i] >= R:
p += 1
S[p] = peak
trough = x[i]
goto = MINZO
continue
elif goto == ENDZO:
n = p + 1
if abs(f) == 1:
if f == 1:
S[n] = peak
else:
S[n] = trough
else:
S[n] = (trough + peak) / 2
S = S[1:n + 1]
return S
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from builtins import str
from future import standard_library
standard_library.install_aliases()
import numpy as np
from wetb.fatigue_tools.rainflowcounting import peak_trough
from wetb.fatigue_tools.rainflowcounting import pair_range
from wetb.fatigue_tools.rainflowcounting import rainflowcount_astm
def check_signal(signal):
......@@ -25,7 +19,7 @@ def check_signal(signal):
raise TypeError("Signal contains no variation")
def rainflow_windap(signal, levels=255., thresshold=(255 / 50)):
def rainflow_windap(signal, levels=255., thresshold=(255 / 50), peak_trough=peak_trough, pair_range=pair_range):
"""Windap equivalent rainflow counting
......@@ -62,7 +56,7 @@ def rainflow_windap(signal, levels=255., thresshold=(255 / 50)):
>>> ampl, mean = rainflow_windap(signal)
"""
check_signal(signal)
#type <double> is required by <find_extreme> and <rainflow>
# type <double> is required by <find_extreme> and <rainflow>
signal = signal.astype(np.double)
if np.all(np.isnan(signal)):
return None
......@@ -71,17 +65,12 @@ def rainflow_windap(signal, levels=255., thresshold=(255 / 50)):
if np.nanmax(signal) > 0:
gain = np.nanmax(signal) / levels
signal = signal / gain
signal = np.round(signal).astype(np.int)
signal = np.round(signal).astype(int)
# If possible the module is compiled using cython otherwise the python implementation is used
#Convert to list of local minima/maxima where difference > thresshold
# Convert to list of local minima/maxima where difference > thresshold
sig_ext = peak_trough.peak_trough(signal, thresshold)
#rainflow count
# rainflow count
ampl_mean = pair_range.pair_range_amplitude_mean(sig_ext)
ampl_mean = np.array(ampl_mean)
......@@ -90,8 +79,7 @@ def rainflow_windap(signal, levels=255., thresshold=(255 / 50)):
return ampl_mean.T
def rainflow_astm(signal):
def rainflow_astm(signal, rainflowcount_astm=rainflowcount_astm):
"""Matlab equivalent rainflow counting
Calculate the amplitude and mean values of half cycles in signal
......@@ -124,15 +112,10 @@ def rainflow_astm(signal):
# type <double> is reuqired by <find_extreme> and <rainflow>
signal = signal.astype(np.double)
# Import find extremes and rainflow.
# If possible the module is compiled using cython otherwise the python implementation is used
from wetb.fatigue_tools.rainflowcounting.rainflowcount_astm import find_extremes, rainflowcount
# Remove points which is not local minimum/maximum
sig_ext = find_extremes(signal)
sig_ext = rainflowcount_astm.find_extremes(signal)
# rainflow count
ampl_mean = np.array(rainflowcount(sig_ext))
ampl_mean = np.array(rainflowcount_astm.rainflowcount(sig_ext))
return np.array(ampl_mean).T
......@@ -13,31 +13,25 @@ from cy_rainflowcount import find_extremes,rainflow
ext = find_extremes(np.array([-2,0,1,0,-3,0,5,0,-1,0,3,0,-4,0,4,0,-2]).astype(np.double))
print rainflow(ext)
'''
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
from future import standard_library
standard_library.install_aliases()
import numpy as np
from numba.core.decorators import njit
def find_extremes(signal): #cpdef find_extremes(np.ndarray[double,ndim=1] signal):
@njit(cache=True) # jit faster than previous cython compiled extension
def find_extremes(signal):
"""return indexes of local minima and maxima plus first and last element of signal"""
#cdef int pi, i
# sign of gradient
sign_grad = np.int8(np.sign(np.diff(signal)))
sign_grad = np.sign(np.diff(signal))
# remove plateaus(sign_grad==0) by sign_grad[plateau_index]=sign_grad[plateau_index-1]
plateau_indexes, = np.where(sign_grad == 0)
if len(plateau_indexes) > 0 and plateau_indexes[0] == 0:
# first element is a plateau
if len(plateau_indexes) == len(sign_grad):
# All values are equal to crossing level!
return np.array([0])
# All values are equal to crossing level!
return np.array([0.])
# set first element = first element which is not a plateau and delete plateau index
i = 0
......@@ -47,19 +41,19 @@ def find_extremes(signal): #cpdef find_extremes(np.ndarray[double,ndim=1] signa
plateau_indexes = np.delete(plateau_indexes, 0)
for pi in plateau_indexes.tolist():
for pi in plateau_indexes:
sign_grad[pi] = sign_grad[pi - 1]
extremes, = np.where(np.r_[1, (sign_grad[1:] * sign_grad[:-1] < 0), 1])
extremes = np.full(len(sign_grad) + 1, True)
extremes[1:-1] = sign_grad[1:] * sign_grad[:-1] < 0
#extremes, = np.where(np.concatenate([np.array([True]), (sign_grad[1:] * sign_grad[:-1] < 0), np.array([True])]))
e = signal[extremes]
return e
return signal[extremes]
def rainflowcount(sig): #cpdef rainflowcount(np.ndarray[double,ndim=1] sig):
"""Cython compilable rain ampl_mean count without time analysis
This implemementation is based on the c-implementation by Adam Nieslony found at
@njit(cache=True)
def rainflowcount(sig):
"""This implemementation is based on the c-implementation by Adam Nieslony found at
the MATLAB Central File Exchange http://www.mathworks.com/matlabcentral/fileexchange/3026
References
......@@ -75,14 +69,13 @@ def rainflowcount(sig): #cpdef rainflowcount(np.ndarray[double,ndim=1] sig):
Copyright (c) 1999-2002 by Adam Nieslony
Ported to Cython compilable Python by Mads M Pedersen
Ported to Cython compilable Python by Mads M Pedersen but later jit seems to be faster
In addition peak amplitude is changed to peak to peak amplitude
"""
#cdef int sig_ptr, index
#cdef double ampl
a = []
sig_ptr = 0
ampl_mean = []
......@@ -91,7 +84,7 @@ def rainflowcount(sig): #cpdef rainflowcount(np.ndarray[double,ndim=1] sig):
sig_ptr += 1
while len(a) > 2 and abs(a[-3] - a[-2]) <= abs(a[-2] - a[-1]):
ampl = abs(a[-3] - a[-2])
mean = (a[-3] + a[-2]) / 2;
mean = (a[-3] + a[-2]) / 2
if len(a) == 3:
del a[0]
if ampl > 0:
......@@ -103,7 +96,7 @@ def rainflowcount(sig): #cpdef rainflowcount(np.ndarray[double,ndim=1] sig):
ampl_mean.append((ampl, mean))
for index in range(len(a) - 1):
ampl = abs(a[index] - a[index + 1])
mean = (a[index] + a[index + 1]) / 2;
mean = (a[index] + a[index + 1]) / 2
if ampl > 0:
ampl_mean.append((ampl, mean))
return ampl_mean
import cython
import numpy as np
cimport numpy as np
'''
Created on 27/02/2013
@author: mmpe
How to use:
import_cython("cy_rainflowcount",'cy_rainflowcount.py','')
from cy_rainflowcount import find_extremes,rainflow
ext = find_extremes(np.array([-2,0,1,0,-3,0,5,0,-1,0,3,0,-4,0,4,0,-2]).astype(np.double))
print rainflow(ext)
'''
import numpy as np
cpdef find_extremes(np.ndarray[double,ndim=1] signal):
"""return indexes of local minima and maxima plus first and last element of signal"""
cdef int pi, i
# sign of gradient
sign_grad = np.int8(np.sign(np.diff(signal)))
# remove plateaus(sign_grad==0) by sign_grad[plateau_index]=sign_grad[plateau_index-1]
plateau_indexes, = np.where(sign_grad == 0)
if len(plateau_indexes) > 0 and plateau_indexes[0] == 0:
# first element is a plateau
if len(plateau_indexes) == len(sign_grad):
# All values are equal to crossing level!
return np.array([0])
# set first element = first element which is not a plateau and delete plateau index
i = 0
while sign_grad[i] == 0:
i += 1
sign_grad[0] = sign_grad[i]
plateau_indexes = np.delete(plateau_indexes, 0)
for pi in plateau_indexes.tolist():
sign_grad[pi] = sign_grad[pi - 1]
extremes, = np.where(np.r_[1, (sign_grad[1:] * sign_grad[:-1] < 0), 1])
return signal[extremes]
cpdef rainflowcount(np.ndarray[double,ndim=1] sig):
"""Cython compilable rain ampl_mean count without time analysis
This implemementation is based on the c-implementation by Adam Nieslony found at
the MATLAB Central File Exchange http://www.mathworks.com/matlabcentral/fileexchange/3026
References
----------
Adam Nieslony, "Determination of fragments of multiaxial service loading
strongly influencing the fatigue of machine components,"
Mechanical Systems and Signal Processing 23, no. 8 (2009): 2712-2721.
and is based on the following standard:
ASTM E 1049-85 (Reapproved 1997), Standard practices for cycle counting in
fatigue analysis, in: Annual Book of ASTM Standards, vol. 03.01, ASTM,
Philadelphia, 1999, pp. 710-718.
Copyright (c) 1999-2002 by Adam Nieslony
Ported to Cython compilable Python by Mads M Pedersen
In addition peak amplitude is changed to peak to peak amplitude
"""
cdef int sig_ptr, index
cdef double ampl
a = []
sig_ptr = 0
ampl_mean = []
for _ in range(len(sig)):
a.append(sig[sig_ptr])
sig_ptr += 1
while len(a) > 2 and abs(a[-3] - a[-2]) <= abs(a[-2] - a[-1]):
ampl = abs(a[-3] - a[-2])
mean = (a[-3] + a[-2]) / 2;
if len(a) == 3:
del a[0]
if ampl > 0:
ampl_mean.append((ampl, mean))
elif len(a) > 3:
del a[-3:-1]
if ampl > 0:
ampl_mean.append((ampl, mean))
ampl_mean.append((ampl, mean))
for index in range(len(a) - 1):
ampl = abs(a[index] - a[index + 1])
mean = (a[index] + a[index + 1]) / 2;
if ampl > 0:
ampl_mean.append((ampl, mean))
return ampl_mean
......@@ -3,13 +3,7 @@ Created on 16/07/2013
@author: mmpe
'''
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
from wetb import gtsdf
standard_library.install_aliases()
import unittest
......