Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • toolbox/WindEnergyToolbox
  • tlbl/WindEnergyToolbox
  • cpav/WindEnergyToolbox
  • frza/WindEnergyToolbox
  • borg/WindEnergyToolbox
  • mmpe/WindEnergyToolbox
  • ozgo/WindEnergyToolbox
  • dave/WindEnergyToolbox
  • mmir/WindEnergyToolbox
  • wluo/WindEnergyToolbox
  • welad/WindEnergyToolbox
  • chpav/WindEnergyToolbox
  • rink/WindEnergyToolbox
  • shfe/WindEnergyToolbox
  • shfe1/WindEnergyToolbox
  • acdi/WindEnergyToolbox
  • angl/WindEnergyToolbox
  • wliang/WindEnergyToolbox
  • mimc/WindEnergyToolbox
  • wtlib/WindEnergyToolbox
  • cmos/WindEnergyToolbox
  • fabpi/WindEnergyToolbox
22 results
Show changes
Showing
with 3447 additions and 182 deletions
[metadata]
name = wetb
summary = Wind Energy Toolbox
author = DTU Wind Energy
author-email = mmpe@dtu.dk
license = GPLv3
home-page = https://gitlab.windenergy.dtu.dk/toolbox/WindEnergyToolbox
description-file = README
# Add here all kinds of additional classifiers as defined under
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers = Development Status :: 4 - Beta,
Programming Language :: Python,
Programming Language :: Python :: 2.7,
Programming Language :: Python :: 3,
Programming Language :: Python :: 3.3,
Programming Language :: Python :: 3.4,
Programming Language :: Python :: 3.5,
Programming Language :: Python :: 3.6,
Environment :: Console,
Intended Audience :: Education,
Intended Audience :: Science/Research,
License :: OSI Approved :: GPL License,
Operating System :: OS Independent,
Operating System :: POSIX :: Linux,
Operating System :: Unix,
Operating System :: MacOS,
Operating System :: Microsoft :: Windows
Topic :: Scientific/Engineering :: Mathematics
[entry_points]
# Add here console scripts like:
# console_scripts =
# hello_world = wetb.module:function
# as well as other entry_points.
[files]
# Add here 'data_files', 'packages' or 'namespace_packages'.
# Additional data files are defined as key value pairs of source and target:
packages =
wetb
# data_files =
# share/wetb_docs = docs/*
[extras]
# Add here additional requirements for extra features, like:
# PDF =
# ReportLab>=1.2
# RXP
#ALL =
# django
# cookiecutter
[test]
# py.test options when running `python setup.py test`
#addopts = tests
[tool:pytest]
# Options for py.test:
# Specify command line options as you would do when invoking py.test directly.
# e.g. --cov-report html (or xml) for html/xml output or --junitxml junit.xml
# in order to write a coverage file that can be read by Jenkins.
#addopts =
# --cov wetb --cov-report term-missing
# --verbose
python_files = WindEnergyToolbox/wetb/*
[aliases]
docs = build_sphinx
[bdist_wheel]
# Use this option if your package is pure-python
universal = 0
[build_sphinx]
# Options for Sphinx build
source_dir = docs
build_dir = docs/_build
[pbr]
# Let pbr run sphinx-apidoc
autodoc_tree_index_modules = True
# autodoc_tree_excludes = ...
# Let pbr itself generate the apidoc
# autodoc_index_modules = True
# autodoc_exclude_modules = ...
# Convert warnings to errors
# warnerrors = True
[devpi:upload]
# Options for the devpi: PyPI serer and packaging tool
# VCS export must be deactivated since we are using setuptools-scm
no-vcs = 1
formats = bdist_wheel
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup file for wetb.
This file was generated with PyScaffold 2.5, a tool that easily
puts up a scaffold for your new Python project. Learn more under:
http://pyscaffold.readthedocs.org/
"""
import os
import sys
from setuptools import setup
try:
from pypandoc import convert_file
read_md = lambda f: convert_file(f, 'rst', format='md')
except ImportError:
print("warning: pypandoc module not found, could not convert Markdown to RST")
read_md = lambda f: open(f, 'r').read()
import numpy as np
from distutils.extension import Extension
from Cython.Distutils import build_ext
def setup_package():
ex_info = [('wetb.fatigue_tools.rainflowcounting', ['pair_range', 'peak_trough', 'rainflowcount_astm']),
('wetb.signal.filters', ['cy_filters'])]
extlist = [Extension('%s.%s' % (module, n),
[os.path.join(module.replace(".","/"), n)+'.pyx'],
include_dirs=[np.get_include()]) for module, names in ex_info for n in names]
needs_sphinx = {'build_sphinx', 'upload_docs'}.intersection(sys.argv)
sphinx = ['sphinx'] if needs_sphinx else []
setup(setup_requires=['six', 'pyscaffold>=2.5a0,<2.6a0'] + sphinx,
cmdclass = {'build_ext': build_ext},
ext_modules = extlist,
use_pyscaffold=True,
long_description=read_md('README.md'))
if __name__ == "__main__":
setup_package()
import numpy as np
npt = np.testing
\ No newline at end of file
import os
import sys
from unittest import mock
import pytest
import matplotlib.pyplot as plt
def run_module_main(module):
# check that all main module examples run without errors
if os.name == 'posix' and "DISPLAY" not in os.environ:
pytest.xfail("No display")
def no_show(*args, **kwargs):
pass
plt.show = no_show # disable plt show that requires the user to close the plot
def no_print(s):
pass
try:
with mock.patch.object(module, "__name__", "__main__"):
with mock.patch.object(module, "print", no_print):
getattr(module, 'main')()
except Exception as e:
raise type(e)(str(e) +
' in %s.main' % module.__name__).with_traceback(sys.exc_info()[2])
import importlib
import os
import pkgutil
import warnings
import mock
import pytest
import matplotlib.pyplot as plt
import sys
from wetb import examples
from tests.run_main import run_module_main
def get_main_modules():
package = examples
modules = []
for _, modname, _ in pkgutil.walk_packages(package.__path__, package.__name__ + '.'):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
m = importlib.import_module(modname)
if 'main' in dir(m):
modules.append(m)
return modules
def print_main_modules():
print("\n".join([m.__name__ for m in get_main_modules()]))
@pytest.mark.parametrize("module", get_main_modules())
def test_main(module):
run_module_main(module)
if __name__ == '__main__':
print_main_modules()
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
test = "TEST"
try:
import pkg_resources
__version__ = pkg_resources.safe_version(pkg_resources.get_distribution(__name__).version)
except:
__version__ = 'unknown'
__version__ = '0.0.10'
# try:
# import pkg_resources
# __version__ = pkg_resources.safe_version(pkg_resources.get_distribution(__name__).version)
# except:
# __version__ = 'unknown'
This diff is collapsed.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 23 11:07:50 2021
@author: dave
"""
# from os.path import join as pjoin
import numpy as np
from lxml import objectify#, etree)
# from matplotlib import pyplot as plt
# import pandas as pd
# import wetb
from wetb.prepost import misc
# from wetb.hawc2 import (HTCFile, AEFile, PCFile, StFile)
# TODO: this dictionary utility could go to a dict sub-class in misc?
def find_next_unique_key(d, key):
k = 1
while True:
k += 1
if key + f'__{k}' not in d:
key = key + f'__{k}'
return key
# TODO: this dictionary utility could go to a dict sub-class in misc?
def add_new_unique_key(key, values, d):
"""
Add key:values to dictionary d. If key already occurs, append __x
where x is the number of previous occurances of key(__x).
Parameters
----------
key : str
DESCRIPTION.
values : list
DESCRIPTION.
d : dict
Dictionary.
Returns
-------
key : str
DESCRIPTION.
d : dict
DESCRIPTION.
"""
if key in d:
key = find_next_unique_key(d, key)
d[key] = [values]
return key, d
class ReadBladedProject:
def __init__(self, fname):
with open(fname, encoding='utf-8') as fobj:
xml_str = fobj.read().encode('utf-8')
self.bd, self.xmlroot = self.parse_bladeddata(xml_str)
self.set_attr_and_check()
# some things are just a little different
# TMASS has a list of materials and their properties
# get rid of the quotes
# tmp = [el.replace("'", '') for el in self.bd['TMASS']['MATERIAL'][0]]
# self.bd['TMASS']['MATERIAL'] = tmp
unique_mat = set(self.get_key('TMASS', 'MATERIAL').flatten().tolist())
self.tow_mat_prop = {k:self.get_key('TMASS', k) for k in unique_mat}
# material_props = {}
# for k in unique_mat:
# material_props[k.replace("'", '')] = self.get_key('TMASS', k)
def parse_bladeddata(self, xml_str):
"""
The XML field BladedData contains what seems like the main core input
data for BLADED, and formatted in some structured way.
Parameters
----------
xml_str : TYPE
DESCRIPTION.
Returns
-------
bd : TYPE
DESCRIPTION.
xmlroot : TYPE
DESCRIPTION.
"""
# root = etree.fromstring(xml)
# elems = root.getchildren()
# bladeddata = elems[1].text
xmlroot = objectify.fromstring(xml_str)
# the non-xml formatted BLADED model is situated in a non-xml field
# called BladedData
bd = {}
mstart = None
for i, line in enumerate(xmlroot.BladedData.text.split('\n')):
# TODO: values embedded in double quotes (") can contain entire
# MSTART/MEND sub-sections (so embedded sections)
# split replace tabs with spaces, split on spaces, remove empty
linels = misc.remove_items(line.replace('\t', ' ').split(' '), '')
# commas can also be separators, in addition to spaces
linels2 = []
for k in linels:
linels2.extend(k.split(','))
linels = misc.remove_items(linels2, '')
# ignore empty lines
if len(linels) < 1:
continue
# entries can be numbers if the previous key contains multiple data points
try:
float(linels[0])
el0isnum = True
except ValueError:
el0isnum = False
# start of a sub-section that contains (non-unique) keys as well
if linels[0].upper().startswith('MSTART'):
mtag = linels[-1]
mstart = {}
# at the end of the sub-section add the sub-section to the main dict
elif linels[0].upper().startswith('MEND'):
# FIXME: implement MSTART sections embedded in double quoted values
try:
# if the section key is not unique, make it so by appending __X
if mtag in bd:
mtag = find_next_unique_key(bd, mtag)
bd[mtag] = mstart
except UnboundLocalError:
print('warning: ignored embedded mstart/mend section')
print(f'at line: {i+1}')
mstart = None
# if we are under a sub-section
elif mstart is not None:
# if the line contains a keyword
if not el0isnum:
tag, mstart = add_new_unique_key(linels[0], linels[1:], mstart)
# line is datapoint that needs to be added to key that occured before
else:
mstart[tag].append(linels)
# add numerical values to key that occured before
elif el0isnum:
bd[tag].append(linels)
else:
tag, bd = add_new_unique_key(linels[0], linels[1:], bd)
return bd, xmlroot
def get_key(self, key1, key2=False):
"""
Get key from the BladedData CDATA section and format to int32 or
float32 numpy arrays if possible withouth precision loss.
Parameters
----------
key1 : str
DESCRIPTION.
key2 : str, optional
DESCRIPTION. The default is False.
Returns
-------
numpy.array
Values from key1/key2 formatted as a numpy array. Converted to
numpy.int32, numpy.float32 if possible withouth precision loss,
otherwise an object array is returned.
"""
if key1 not in self.bd:
raise KeyError(f'{key1} not found in BLADED file.')
if key2 is not False:
if key2 not in self.bd[key1]:
raise KeyError(f'{key2} not found in MSTART {key1} of BLADED file.')
data = self.bd[key1][key2]
else:
data = self.bd[key1]
# in case we defined a mstart key
if isinstance(data, dict):
return data
# i ,j = len(data), len(data[0])
# FIXME: this is a very expensive way of converting it, but it might
# not matter at all since very little model data is actually considered
data_arr = np.array(data)
try:
data_arr = data_arr.astype(np.int32)
except ValueError:
try:
data_arr = data_arr.astype(np.float32)
except ValueError:
pass
return data_arr
# return np.array(data, dtype=np.float32)
# if isinstance(data[0], list) and len(data[0]) == 1:
# data = float(data[0])
# if isinstance(data[0], list) and len(data[0]) > 1:
# data_arr = np.array(data, dtype=np.float32)
def set_attr_and_check(self):
"""Check that BGEOMMB, BMASSMB, BSTIFFMB has indeed the same node
repated twice every time.
"""
# self.bd['BGEOMMB'].keys(), but only those relevant
keysg = ['RJ', 'DIST', 'REF_X', 'REF_Y', 'CHORD', 'TWIST', 'CE_X',
'CE_Y', 'BTHICK', 'FOIL', 'MOVING']
nbe = self.get_key('BGEOMMB', 'NBE')[0,0]
# self.bd['BMASSMB'].keys(), but only those relevant
keysm = ['CM_X', 'CM_Y', 'MASS', 'SINER', 'RGRATIO', 'BETA_M']
# self.bd['BSTIFFMB'].keys(), but only those relevant
keyss = ['EIFLAP', 'EIEDGE', 'BETA_S', 'GJ', 'CS_X', 'CS_Y', 'GAFLAP',
'GAEDGE']
mkeys = ['BGEOMMB', 'BMASSMB', 'BSTIFFMB']
for mkey, keys in zip(mkeys, [keysg, keysm, keyss]):
for key in keys:
res = self.get_key(mkey, key)
try:
assert np.allclose(res[0,0::2], res[0,1::2])
except TypeError:
# allclose doesn't make sense for text arrays
assert np.compare_chararrays(res[0,0::2], res[0,1::2],
'==', True).all()
assert res.shape[1]==nbe
if hasattr(self, key.lower()):
raise UserWarning(key, 'already exists')
setattr(self, key.lower(), res[0,0::2])
def print_xml_tree(self, fname):
"""For discovery purposes: print full tree + values/text
Parameters
----------
fname : TYPE
DESCRIPTION.
Returns
-------
None.
"""
# def print_rec(root):
# # if hasattr(root, 'getparent'):
# # print(root.getparent().tag.title, end='.')
# # print_rec(root.getparent())
# for el in root.getchildren():
# print(print_rec(el))
# def print_root_tree(root):
# root.getparent()
# print()
# tree = etree.fromstring(xml_str)
# els = tree.xpath('/')
# for el in els:
# print(el)
# tree = etree.fromstring(xml_str)
# xmlroot = objectify.fromstring(xml_str)
# with open(fname+'.structure', 'w') as f:
# for line in xmlroot.descendantpaths():
# f.write(line + '\n')
# Recursive XML parsing python using ElementTree
# https://stackoverflow.com/q/28194703/3156685
roottree = self.xmlroot.getroottree()
def print_tree_recursive(root):
# print(roottree.getpath(root), end=' : ')
# print(root.tag.title())
f.write(roottree.getpath(root) + ' : ')
f.writelines(root.values())
if root.text is not None:
f.write(' : ' + root.text)
f.write('\n')
for elem in root.getchildren():
print_tree_recursive(elem)
with open(fname+'.structure.value', 'w') as f:
for el in self.xmlroot.getchildren():
if el.tag.title()!='Bladeddata':
print_tree_recursive(el)
# def get_frequencies(self):
# """
# """
# blades = self.xmlroot.NewGUI.Turbine.Blades.FlexibilityModel
# blades.Settings.ModesWithDampingDefined
# blades.PartModes # with lots of BladeModeContainer
# blades.WholeBladeModes
# blades.WholeBladeModes.WholeBladeMode
# blades.WholeBladeModes.WholeBladeMode.Name
# blades.WholeBladeModes.WholeBladeMode.Frequency
# blades.WholeBladeModes.WholeBladeMode.Damping
# blades.WholeBladeModes.WholeBladeMode.Components
# len(blades.WholeBladeModes.WholeBladeMode.Components.getchildren())
# # 60 elements
# for wholeblademode in blades.WholeBladeModes.iterchildren():
# print(wholeblademode.Name)
# print(wholeblademode.Frequency, wholeblademode.Damping)
# tower = self.xmlroot.NewGUI.Turbine.SupportStructure.FlexibilityModel
# for towermode in tower.Modes.getchildren():
# print(towermode.Description)
# towermode.Frequency
# towermode.Damping
This diff is collapsed.
1 number of sets, Nset
-----------------
#1 Nset number 1
================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
r [0] m [1] x_cg [2] y_cg [3] ri_x [4] ri_y [5] x_sh [6] y_sh [7] E [8] G [9] I_x [10] I_y [11] K [12] k_x [13] k_y [14] A [15] pitch [16] x_e [17] y_e [18]
================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
$1 2 subset number 1
0.000000000000000e+00 0.500000000000000e+00 0.000000000000000e+00 0.000000000000000e+00 0.500000000000000e+00 0.500000000000000e+00 0.000000000000000e+00 0.000000000000000e+00 1.000000000000000e+14 1.000000000000000e+13 1.000000000000000e+00 1.000000000000000e+00 1.000000000000000e+00 1.000000000000000e+00 1.000000000000000e+00 1.000000000000000e+00 0.000000000000000e+00 0.000000000000000e+00 0.000000000000000e+00
1.000000000000000e+00 0.500000000000000e+00 0.000000000000000e+00 0.000000000000000e+00 0.500000000000000e+00 0.500000000000000e+00 0.000000000000000e+00 0.000000000000000e+00 1.000000000000000e+14 1.000000000000000e+13 1.000000000000000e+00 1.000000000000000e+00 1.000000000000000e+00 1.000000000000000e+00 1.000000000000000e+00 1.000000000000000e+00 0.000000000000000e+00 0.000000000000000e+00 0.000000000000000e+00
......@@ -5,10 +5,6 @@ Created on Thu Aug 04 09:24:51 2016
@author: tlbl
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
......
......@@ -5,12 +5,6 @@ Created on Thu Aug 04 11:09:43 2016
@author: tlbl
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
import unittest
from wetb.control import control
......
This diff is collapsed.
import numpy as np
import pandas as pd
from openpyxl import load_workbook
from openpyxl.styles import PatternFill
import matplotlib.pyplot as plt
import os
from wetb.dlb.dlb_postprocs import get_DLC
def dataarray_to_excel(dataarray, path):
"""
Generate excel file from a DataArray.
Parameters
----------
dataarray : xarray.DataArray
DataArray containing data from the DLB.
path : str
Path to the new excel file to be created
Returns
-------
None.
"""
df = dataarray.to_dataframe('value').reset_index()
df.to_excel(path, index=False)
def DLB_extreme_loads_to_excel(DLB_extreme_loads, path):
"""
Generate excel report of DLB extreme loads.
Parameters
----------
DLB_extreme_loads : xarray.DataArray
DataArray containing the DLB extreme loading matrices for each load sensor.
path : str
Path to the new excel file to be created
Returns
-------
None.
"""
with pd.ExcelWriter(path, engine="openpyxl") as writer:
for sensor in DLB_extreme_loads.coords['sensor_name'].values:
df = DLB_extreme_loads.sel(sensor_name=sensor).to_pandas()
df['group'] = DLB_extreme_loads.sel(sensor_name=sensor)['group'].values
df.to_excel(writer, sheet_name=sensor, index_label='driver')
workbook = load_workbook(path)
highlight = PatternFill(start_color="C0C0C0", end_color="C0C0C0", fill_type="solid")
for sensor in DLB_extreme_loads.coords['sensor_name'].values:
for i in range(12):
workbook[sensor].cell(row=i + 2, column=int(i/2) + 2).fill = highlight
workbook[sensor].cell(row=14, column=8).fill = highlight
workbook[sensor].cell(row=15, column=10).fill = highlight
workbook.save(path)
def plot_DLB_directional_extreme_loads(DLB_extreme_loads, folder, extension='.png'):
"""
Plot the DLB directional extreme loads and save them to a given folder.
Parameters
----------
DLB_extreme_loads : xarray.DataArray ('sensor_name', 'angle')
DataArray containing the extreme loads of the DLB.
folder : str
Path to the folder where the plots will be saved
extension: str, optional
Extension of the plot files. The default is '.png'.
Returns
-------
None.
"""
angles = DLB_extreme_loads.coords['angle'].values
for i in range(DLB_extreme_loads.shape[0]):
x = [float(DLB_extreme_loads[i, j])*np.cos(np.deg2rad(angles[j]))
for j in range(len(angles))]
y = [float(DLB_extreme_loads[i, j])*np.sin(np.deg2rad(angles[j]))
for j in range(len(angles))]
DLC = [get_DLC(DLB_extreme_loads[i, j].group.values[()]) for j in range(len(angles))]
plt.scatter(x, y)
[plt.plot([0, x[k]], [0, y[k]], color='black') for k in range(len(x))]
plt.xlabel('Mx')
plt.ylabel('My')
plt.title(DLB_extreme_loads[i].coords['sensor_name'].values[()])
plt.axhline(0, color='black',linewidth=1)
plt.axvline(0, color='black',linewidth=1)
plt.xlim(-max(abs(min(x)), abs(max(x)))*1.2, max(abs(min(x)), abs(max(x)))*1.2)
plt.ylim(-max(abs(min(y)), abs(max(y)))*1.2, max(abs(min(y)), abs(max(y)))*1.2)
for j in range(len(x)):
plt.annotate(DLC[j], (x[j], y[j]), textcoords="offset points", xytext=(0,10), ha='center')
plt.savefig(os.path.join(folder, 'Extreme_' + DLB_extreme_loads[i].coords['sensor_name'].values[()] + extension))
plt.show()
def plot_DLB_directional_equivalent_loads(DLB_fatigue_loads, folder, extension='.png'):
"""
Plot the DLB directional equivalent loads and save them to a given folder.
Parameters
----------
DLB_fatigue_loads : xarray.DataArray ('sensor_name', 'angle', 'm')
DataArray containing the fatigue loads of the DLB. It matches the
output from get_DLB_fatigue_loads
folder : str
Path to the folder where the plots will be saved
extension: str, optional
Extension of the plot files. The default is '.png'.
Returns
-------
None.
"""
m_list = DLB_fatigue_loads.coords['m'].values
angles = DLB_fatigue_loads.coords['angle'].values
for i in range(DLB_fatigue_loads.shape[0]):
for j in range(len(m_list)):
x = [float(DLB_fatigue_loads[i, k, j])*np.cos(np.deg2rad(angles[k]))
for k in range(len(angles))]
y = [float(DLB_fatigue_loads[i, k, j])*np.sin(np.deg2rad(angles[k]))
for k in range(len(angles))]
plt.scatter(x, y, label='m = ' + str(m_list[j]))
[plt.plot([0, x[k]], [0, y[k]], color='black') for k in range(len(angles))]
plt.xlabel('Mx')
plt.ylabel('My')
plt.title(DLB_fatigue_loads[i].coords['sensor_name'].values[()])
plt.axhline(0, color='black',linewidth=1)
plt.axvline(0, color='black',linewidth=1)
plt.xlim(-max(abs(min(x)), abs(max(x)))*1.2, max(abs(min(x)), abs(max(x)))*1.2)
plt.ylim(-max(abs(min(y)), abs(max(y)))*1.2, max(abs(min(y)), abs(max(y)))*1.2)
plt.legend()
plt.savefig(os.path.join(folder, 'Fatigue_' + DLB_fatigue_loads[i].coords['sensor_name'].values[()] + extension))
plt.show()
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
File added
from wetb.dlb import iec61400_1
import pytest
from wetb.dlb.hawc2_iec_dlc_writer import HAWC2_IEC_DLC_Writer
import os
from wetb.hawc2.tests import test_files
import shutil
from tests import npt, run_main
from wetb.hawc2.htc_file import HTCFile
from tests.run_main import run_module_main
from wetb.dlb.iec61400_1 import DTU_IEC61400_1_Ref_DLB
path = os.path.dirname(test_files.__file__) + '/simulation_setup/DTU10MWRef6.0/htc/tmp/'
def clean_up():
if os.path.isdir(path):
shutil.rmtree(path)
@pytest.yield_fixture(autouse=True)
def run_around_tests():
clean_up()
yield
clean_up()
@pytest.fixture
def writer():
return HAWC2_IEC_DLC_Writer(path + '../DTU_10MW_RWT.htc', diameter=127)
def test_main():
run_main.run_module_main(iec61400_1)
def test_DLC12(writer):
dlc12 = DTU_IEC61400_1_Ref_DLB(iec_wt_class='1A', Vin=4, Vout=26, Vr=10, D=180, z_hub=90,
Vmaint=18, controller='dtu_we_controller',
generator_servo='generator_servo', pitch_servo='servo_with_limits',
best_azimuth=180)['DLC12']
assert len(dlc12) == 216 # 12 wsp, 3 wdir, 6 seeds
writer.from_pandas(dlc12[::24][:2])
writer.write_all(path)
npt.assert_array_equal(sorted(os.listdir(path + "DLC12")),
['DLC12_wsp04_wdir350_s1001.htc', 'DLC12_wsp06_wdir000_s1101.htc'])
htc = HTCFile(path + "DLC12/DLC12_wsp04_wdir350_s1001.htc")
assert htc.wind.wsp[0] == 4
npt.assert_array_equal(htc.wind.windfield_rotations.values, [-10, 0, 0])
assert htc.wind.turb_format[0] == 1
assert htc.wind.mann.create_turb_parameters[3] == 1001
def test_DLC21(writer):
dlc = DTU_IEC61400_1_Ref_DLB(iec_wt_class='1A', Vin=4, Vout=26, Vr=10, D=180, z_hub=90,
Vmaint=18, controller='dtu_we_controller',
generator_servo='generator_servo', pitch_servo='servo_with_limits',
best_azimuth=180)['DLC21']
assert len(dlc) == 144 # 12 wsp, 3 wdir, 4 seeds
writer.from_pandas(dlc[::16][:2])
writer.write_all(path)
npt.assert_array_equal(sorted(os.listdir(path + "DLC21")),
['DLC21_wsp04_wdir350_s1001.htc', 'DLC21_wsp06_wdir000_s1101.htc'])
htc = HTCFile(path + "DLC21/DLC21_wsp04_wdir350_s1001.htc")
assert htc.wind.wsp[0] == 4
npt.assert_array_equal(htc.wind.windfield_rotations.values, [-10, 0, 0])
assert htc.wind.turb_format[0] == 1
assert htc.wind.mann.create_turb_parameters[3] == 1001
assert htc.dll.get_subsection_by_name('generator_servo', 'name').init.constant__7.values == [7, 110]
def test_DLC22y(writer):
dlc = DTU_IEC61400_1_Ref_DLB(iec_wt_class='1A', Vin=4, Vout=26, Vr=10, D=180, z_hub=90,
Vmaint=18, controller='dtu_we_controller',
generator_servo='generator_servo', pitch_servo='servo_with_limits',
best_azimuth=180)['DLC22y']
assert len(dlc) == 276 # 12 wsp, 23 wdir, 1 seeds
writer.from_pandas(dlc[::24][:2])
writer.write_all(path)
npt.assert_array_equal(sorted(os.listdir(path + "DLC22y")),
['DLC22y_wsp04_wdir015_s1001.htc', 'DLC22y_wsp06_wdir030_s1101.htc'])
htc = HTCFile(path + "DLC22y/DLC22y_wsp04_wdir015_s1001.htc")
assert htc.wind.wsp[0] == 4
npt.assert_array_equal(htc.wind.windfield_rotations.values, [15, 0, 0])
assert htc.wind.turb_format[0] == 1
assert htc.wind.mann.create_turb_parameters[3] == 1001