...
 
Commits (29)
[run]
omit =
pyconturb/tests/*
pyconturb/io/*
pyconturb/_hawcres_io.py
pyconturb/_version.py
[report]
fail_under = 70
\ No newline at end of file
# debugging files
# repo-specific ignores files
DELETEME*
junk/*
*.bts
*.bin
pyconturb/tests/fast_sims/*
# Byte-compiled / optimized / DLL files
__pycache__/
......
......@@ -36,5 +36,6 @@ Contents:
installation
overview
notebooks/outputs
reference_guide
examples
\ No newline at end of file
......@@ -84,7 +84,7 @@
"\n",
"Now let's define our custom functions for the spatial variation of the mean wind speed. Note that the wind speed function must be of the form\n",
"```\n",
"wsp_values = wsp_func(y, z, **kwargs)\n",
"wsp_values = wsp_func(spat_df, **kwargs)\n",
"```\n",
"where `kwargs` is a dictionary of the keyword arguments for the profile function. (It can also include unused keyword arguments.)"
]
......@@ -95,11 +95,12 @@
"metadata": {},
"outputs": [],
"source": [
"def wake_wsp(y, z, cntr_pos=[0, 90], rad=50, u_inf=10, max_def=0.5, **kwargs):\n",
"def wake_wsp(spat_df, cntr_pos=[0, 90], rad=50, u_inf=10, max_def=0.5, **kwargs):\n",
" \"\"\"Non-realistic wake deficit.\n",
" rad is the wake of the wake, u_inf is undisturbed inflow, and max_def is the max. deficit.\"\"\"\n",
" y, z = spat_df.loc[['y', 'z']].values\n",
" dist_to_cntr = np.sqrt((y - cntr_pos[0])**2 + (z - cntr_pos[1])**2) # distance to center point\n",
" freestream = constant_profile(y, z, u_ref=u_inf) # no wake deficit\n",
" freestream = constant_profile(spat_df, u_ref=u_inf) # no wake deficit\n",
" wake_def = max_def * np.sin(np.pi/2 * (rad - dist_to_cntr) / rad) # sinusoidal\n",
" wake_def = wake_def * (dist_to_cntr < rad) # deficit is not outside of rad\n",
" return np.array(freestream - wake_def) # must return array regardless of input"
......@@ -122,7 +123,7 @@
"plot_vals = [([0, 119], 90, 0.5), ([0, 119], 30, 0.5), ([-30, 80], 90, 0.5), ([60, 130], 90, 2)]\n",
"for iax, (cnt, r, mdef) in enumerate(plot_vals):\n",
" ax = axs[iax//2, iax%2]\n",
" wsp_values = wake_wsp(spat_df.loc['y'], spat_df.loc['z'], cntr_pos=cnt, rad=r, u_inf=u_inf, max_def=mdef)\n",
" wsp_values = wake_wsp(spat_df, cntr_pos=cnt, rad=r, u_inf=u_inf, max_def=mdef)\n",
" cnt = ax.contourf(y, z, wsp_values.reshape(y.size, z.size).T)\n",
" ax.axis('equal')\n",
" plt.colorbar(cnt, ax=ax);\n",
......@@ -137,7 +138,7 @@
"\n",
"Just like we did for the mean wind speed, now let us define a function for the turbulence standard deviation as a function of spatial location (and turbulence component). Note that the sig function must be of the form\n",
"```\n",
"sig_values = sig_func(k, y, z, **kwargs)\n",
"sig_values = sig_func(spat_df, **kwargs)\n",
"```\n",
"where `kwargs` is a dictionary of the keyword arguments for the profile function. (It can also include unused keyword arguments.)"
]
......@@ -148,12 +149,13 @@
"metadata": {},
"outputs": [],
"source": [
"def wake_sig(k, y, z, cntr_pos=[0, 90], rad=50, sig_inf=1.2, max_perc=0.20, **kwargs):\n",
"def wake_sig(spat_df, cntr_pos=[0, 90], rad=50, sig_inf=1.2, max_perc=0.20, **kwargs):\n",
" \"\"\"Non-realistic wake turbulence. sig_inf is the undisturbed standard deviation and max_perc is the\n",
" maximum percentage increase of sigma at the center.\"\"\"\n",
" y, z = spat_df.loc[['y', 'z']].values\n",
" dist_to_cntr = np.sqrt((y - cntr_pos[0])**2 + (z - cntr_pos[1])**2) # distance to center point\n",
" mask = (dist_to_cntr < rad) # points that fall within the wake\n",
" wake_sig = sig_inf * constant_profile(y, z, u_ref=1) # freestream sig\n",
" wake_sig = sig_inf * constant_profile(spat_df, u_ref=1) # freestream sig\n",
" wake_sig[mask] += max_perc*sig_inf * np.sin(np.pi/2 * (rad - dist_to_cntr[mask]) / rad)\n",
" return np.array(wake_sig) # must return array regardless of input"
]
......@@ -168,8 +170,8 @@
"plot_vals = [([-60, 160], 1.2, 0.2), ([10, 100], 3.5, 0.4)]\n",
"for iax, (cntr, sig, mperc) in enumerate(plot_vals):\n",
" ax = axs[iax]\n",
" sig_values = wake_sig(spat_df.loc['k'], spat_df.loc['y'], spat_df.loc['z'],\n",
" cntr_pos=cntr, rad=rad, sig_inf=sig, max_perc=mperc)\n",
" sig_values = wake_sig(spat_df, cntr_pos=cntr, rad=rad, sig_inf=sig,\n",
" max_perc=mperc)\n",
" cnt = ax.contourf(y, z, sig_values.reshape(y.size, z.size).T)\n",
" ax.axis('equal')\n",
" plt.colorbar(cnt, ax=ax);\n",
......
......@@ -59,6 +59,24 @@
"con_tc.index = con_tc.index.map(lambda x: float(x) if (x not in 'kxyz') else x) # index cleaning"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We also need to create the spatial dataframe that defines the area to be simulated. Due to dimensions assumed in the plotting function used in this example, we won't use `gen_spat_grid` but will rather construct the dataframe manually."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Y, Z = np.meshgrid([-50, 50], np.linspace(10, 140)) # 2d arrays for contours\n",
"k, x = 0*Y.flatten(), 0*Y.flatten()\n",
"spat_df = pd.DataFrame([k, x, Y.flatten(), Z.flatten()], index=['k', 'x', 'y', 'z'])"
]
},
{
"cell_type": "markdown",
"metadata": {},
......@@ -78,14 +96,13 @@
"metadata": {},
"outputs": [],
"source": [
"# define the points we want to query and call data_profile\n",
"y, z = np.meshgrid([-50, 50], np.linspace(10, 140))\n",
"wsp_intp = data_profile(y.flatten(), z.flatten(), con_tc=con_tc)\n",
"# get the data profile points for interpolation\n",
"wsp_intp = data_profile(spat_df, con_tc=con_tc)\n",
"# calculate the values fron con_tc for comparison\n",
"yp, zp = np.zeros(6), con_tc.loc['z'].unique()\n",
"wspp = con_tc.get_time().filter(regex='u_').mean().values\n",
"# plot the interpolated values\n",
"plot_interp(yp, zp, wspp, y, z, wsp_intp)"
"plot_interp(yp, zp, wspp, Y, Z, wsp_intp)"
]
},
{
......@@ -104,17 +121,17 @@
"outputs": [],
"source": [
"# define the points we want to query and call data_sig\n",
"y, z = np.meshgrid([-50, 50], np.linspace(10, 140))\n",
"plt.figure(1, figsize=(15, 4))\n",
"for k in range(3): # loop through components\n",
" ks = k * np.ones(y.size)\n",
" sig_intp = data_sig(ks, y.flatten(), z.flatten(), con_tc=con_tc)\n",
" kspat_df = spat_df.copy(deep=True) # copy spat_df\n",
" kspat_df.loc['k'] = k # assign component\n",
" sig_intp = data_sig(kspat_df, con_tc=con_tc)\n",
" # calculate the values fron con_tc for comparison\n",
" yp, zp = np.zeros(6), con_tc.loc['z'].unique()\n",
" sigp = con_tc.get_time().filter(regex=f'{\"uvw\"[k]}_').std().values\n",
" # plot the interpolated values\n",
" plt.subplot(1, 3, 1 + k)\n",
" plot_interp(yp, zp, sigp, y, z, sig_intp)\n",
" plot_interp(yp, zp, sigp, Y, Z, sig_intp)\n",
" plt.title(f'Component {\"uvw\"[k]} sig')"
]
},
......@@ -135,20 +152,27 @@
"source": [
"# define the points we want to query and call data_spectrum\n",
"f, f_idx = 0.05, 5 # frequency to look at and its index in the freq vector\n",
"y, z = np.meshgrid([-50, 50], np.linspace(10, 140))\n",
"plt.figure(1, figsize=(14, 10))\n",
"for k in range(3): # loop through components\n",
" ks = k * np.ones(y.size)\n",
" spc_intp = data_spectrum(f, ks, y.flatten(), z.flatten(), con_tc=con_tc)\n",
" kspat_df = spat_df.copy(deep=True) # copy spat_df\n",
" kspat_df.loc['k'] = k # assign component\n",
" spc_intp = data_spectrum(f, kspat_df, con_tc=con_tc)\n",
" # calculate the values fron con_tc for comparison\n",
" yp, zp = np.zeros(6), con_tc.loc['z'].unique()\n",
" mags = 2*np.abs(np.fft.rfft(con_tc.get_time().filter(regex=f'{\"uvw\"[k]}_'), axis=0))**2\n",
" spcp = mags[f_idx, :]\n",
" # plot the interpolated values\n",
" plt.subplot(2, 2, 1 + k)\n",
" plot_interp(yp, zp, spcp, y, z, spc_intp, fmt='%.2e')\n",
" plot_interp(yp, zp, spcp, Y, Z, spc_intp, fmt='%.2e')\n",
" plt.title(f'Component {\"uvw\"[k]} spectrum, f = {f} Hz')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We can see that the variation with height of the interpolated spectrum matches, as desired. The scaling between the original and interpolated spectra is off, but this is because the spectra should be scaled according to the standard deviation, which we do not do in this example for brevity."
]
}
],
"metadata": {
......
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Output formats"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Once a turbulence box has been generated, it can be converted to either a HAWC2 turbulence binary file (.bin) or a full-field TurbSim binary file (.bts).\n",
"\n",
"Here is a quick example that generates turbulence, saves it to file and then reloads it.\n",
"\n",
"**Note**: These read/write functions are only applicable to 3D turbulence boxes generated on a grid with a specific column naming convention/order. Please either use `gen_spat_grid` combined with `gen_turb` or be very sure that your column ordering/naming in your dataframe is consistent with PyConTurb's ordering."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# import needed functions\n",
"from pyconturb import gen_spat_grid, gen_turb\n",
"from pyconturb.io import df_to_h2turb, h2turb_to_df, df_to_bts, bts_to_df\n",
"import matplotlib.pyplot as plt"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# create the turbulence box\n",
"nt, dt = 100, 0.2\n",
"spat_df = gen_spat_grid(0, [50, 70]) # two points\n",
"turb_df = gen_turb(spat_df, T=nt*dt, dt=dt, u_ref=12)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### HAWC2 turbulence format\n",
"\n",
"Because the HAWC2 binary format does not include all necessary information to save/reconstruct the turbulence box (e.g., the grid points), we must also pass `spat_df` into the functions. There are 3 output files that have endings \"u.bin\", \"v.bin\" and \"u.bin\". The `prefix` keyword argument allows you to prepend a string in from of the endings."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# save in HAWC2 format (u.bin, v.bin, w.bin)\n",
"path, prefix = '.', 'h2turb_' # path is directory, prefix is prepended to \"u.bin\", \"v.bin\", \"w.bin\"\n",
"df_to_h2turb(turb_df, spat_df, path, prefix=prefix) # save file\n",
"\n",
"# reload file\n",
"h2_df = h2turb_to_df(spat_df, path, prefix=prefix, nt=nt, dt=dt)\n",
"\n",
"# compare the original and reloaded turbulence files\n",
"plt.plot(turb_df.u_p1, label='Original')\n",
"plt.plot(h2_df.u_p1, label='Reloaded')\n",
"plt.legend();"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### TurbSim turbulence format\n",
"\n",
"Unlike the HAWC2 format, the full-field TurbSim binary format stores all three components as well as all information required to reconstruct the turbulence box. However, we are still required to pass in `spat_df` on saving, because we need to encode the spatial information in `spat_df` into the output binary file.\n",
"\n",
"Because TurbSim requires specifying a hub height and corresponding wind speed, the write function allows you to specify those values directly. If no coordinates are given, the center of the grid is used."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# save in TurbSim format (path.bts)\n",
"path = 'ts.bts' # path is name of file (with or without ending)\n",
"df_to_bts(turb_df, spat_df, path, uzhub=None) # assume hub height is in center of box\n",
"\n",
"# reload file\n",
"ts_df = bts_to_df(path)\n",
"\n",
"# compare the original and reloaded turbulence files\n",
"plt.plot(turb_df.u_p1, label='Original')\n",
"plt.plot(ts_df.u_p1, label='Reloaded')\n",
"plt.legend();"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.8"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
......@@ -76,6 +76,15 @@ Here is a quick, step-by-step guide for fixing a bug:
(``git checkout master``, then ``git pull origin master``).
List of contributors
----------------------
The following lovely people have made contributions to PyConTurb:
* Emmanual Branlard
* Jenni Rinker
Theory
-------
......
"""Input-output module for files related to HAWC2
"""Reading HAWC2 results files (.set and .dat)
Notes
-----
Much of this was copied, modified, and/or cleaned from DTU's Wind Energy
Toolbox.
Author
------
Jenni Rinker
rink@dtu.dk
Toolbox. https://gitlab.windenergy.dtu.dk/toolbox/WindEnergyToolbox
"""
import os
......
......@@ -11,7 +11,6 @@ import scipy.interpolate as sciint
_spat_rownames = ['k', 'x', 'y', 'z'] # row names of spatial df
_DEF_KWARGS = {'u_ref': 0, 'z_ref': 90, 'alpha': 0.2, 'turb_class': 'A',
'l_c': 340.2} # lc for coherence ***NOTE THESE OVERWRITE FUNCTION DEFS
_HAWC2_BIN_FMT = '<f' # HAWC2 binary turbulence datatype
_HAWC2_TURB_COOR = {'u': -1, 'v': -1, 'w': 1} # hawc2 turb xyz to uvw
......@@ -81,28 +80,6 @@ def combine_spat_con(spat_df, con_tc, drop_duplicates=True, decimals=10):
return comb_df
def df_to_h2turb(turb_df, spat_df, path, prefix=''):
"""ksec3d-style turbulence dataframe to binary files for hawc2
Notes
-----
* The turbulence must have been generated on a y-z grid.
* The naming convention must be 'u_p0', 'v_p0, 'w_p0', 'u_p1', etc.,
where the point indices proceed vertically along z before horizontally
along y.
"""
nx = turb_df.shape[0] # turbulence dimensions for reshaping
ny = len(set(spat_df.loc['y'].values))
nz = len(set(spat_df.loc['z'].values))
# make and save binary files for all three components
for c in 'uvw':
arr = turb_df.filter(regex=f'{c}_', axis=1).values.reshape((nx, ny, nz))
bin_path = os.path.join(path, f'{prefix}{c}.bin')
with open(bin_path, 'wb') as bin_fid:
arr.astype(np.dtype(_HAWC2_BIN_FMT)).tofile(bin_fid)
return
def gen_spat_grid(y, z, comps=[0, 1, 2]):
"""Generate spat_df (all turbulent components and grid defined by x and z)
......@@ -130,30 +107,6 @@ def get_freq(**kwargs):
return t, freq
def h2turb_to_arr(spat_df, path):
"""raw-load a hawc2 turbulent binary file to numeric array"""
ny, nz = pd.unique(spat_df.loc['y']).size, pd.unique(spat_df.loc['z']).size
bin_arr = np.fromfile(path, dtype=np.dtype(_HAWC2_BIN_FMT))
nx = bin_arr.size // (ny * nz)
if (nx * ny * nz) != bin_arr.size:
raise ValueError('Binary file size does not match spat_df!')
bin_arr.shape = (nx, ny, nz)
return bin_arr
def h2turb_to_df(spat_df, path, prefix=''):
"""load a hawc2 binary file into a pandas datafram with transform to uvw"""
turb_df = pd.DataFrame()
for c in 'uvw':
comp_path = os.path.join(path, f'{prefix}{c}.bin')
arr = h2turb_to_arr(spat_df, comp_path)
nx, ny, nz = arr.shape
comp_df = pd.DataFrame(arr.reshape(nx, ny*nz)).add_prefix(f'{c}_p')
turb_df = turb_df.join(comp_df, how='outer')
turb_df = turb_df[[f'{c}_p{i}' for i in range(2) for c in 'uvw']]
return turb_df
def is_line(points):
"""see if the points given in points fall along a single line"""
# first rotate by first pair to remove issue with vertical lines
......
# version: 1.0 First pass at PyConTurb
# 2.0 Switch to uvw, other changes
# 2.5 Fixed bug in custom profiles
__version__ = '2.5.dev1'
__release__ = '2.5.dev1'
__version__ = '2.6.dev1'
__release__ = '2.6.dev1'
......@@ -10,19 +10,19 @@ def get_coh_mat(freq, spat_df, coh_model='iec', dtype=np.float64, **kwargs):
"""Create coherence matrix for given frequencies and coherence model
"""
if coh_model == 'iec': # IEC coherence model
if 'ed' not in kwargs.keys(): # add IEC ed to kwargs if not passed in
kwargs['ed'] = 3
coh_mat = get_iec_coh_mat(freq, spat_df, dtype=dtype, **kwargs)
elif coh_model == '3d': # 3D coherence model
coh_mat = get_3d_coh_mat(freq, spat_df, dtype=dtype, **kwargs)
coh_mat = get_iec_coh_mat(freq, spat_df, dtype=dtype, coh_model=coh_model,
**kwargs)
elif coh_model == 'iec3d':
coh_mat = get_iec3d_coh_mat(freq, spat_df, dtype=dtype, coh_model=coh_model,
**kwargs)
else: # unknown coherence model
raise ValueError(f'Coherence model "{coh_model}" not recognized.')
raise ValueError(f'Coherence model "{coh_model}" not recognized!')
return coh_mat
def chunker(iterable,nPerChunks):
""" Return list of nPerChunks elements of an iterable """
def chunker(iterable, nPerChunks):
"""Return list of nPerChunks elements of an iterable"""
it = iter(iterable)
while True:
chunk = list(itertools.islice(it, nPerChunks))
......@@ -30,64 +30,60 @@ def chunker(iterable,nPerChunks):
return
yield chunk
def get_iec_coh_mat(freq, spat_df, dtype=np.float64, **kwargs):
"""Create IEC 61400-1 Ed. 3 coherence matrix for given frequencies
def get_iec_coh_mat(freq, spat_df, dtype=np.float64, ed=4, **kwargs):
"""Create IEC 61400-1 Ed. 3/4 coherence matrix for given frequencies
"""
# preliminaries
if kwargs['ed'] != 3: # only allow edition 3
raise ValueError('Only edition 3 is permitted.')
if any([k not in kwargs.keys() for k in ['u_ref', 'l_c']]): # check kwargs
if ed not in [3, 4]: # only allow edition 3
raise ValueError('Only editions 3 or 4 are permitted.')
if any([k not in kwargs for k in ['u_ref', 'l_c']]): # check kwargs
raise ValueError('Missing keyword arguments for IEC coherence model')
freq = np.array(freq).reshape(1, -1)
n_f, n_s = freq.size, spat_df.shape[1]
# misc storage
xyz = spat_df.loc[['x', 'y', 'z']].values.astype(float)
coh_mat = np.repeat(np.atleast_3d(np.eye((n_s),dtype=dtype)), n_f, axis=2) # TODO use sparse matrix
exp_constant = np.sqrt( (1/ kwargs['u_ref'] * freq)**2 + (0.12 / kwargs['l_c'])**2).astype(dtype)
Icomp = np.arange(n_s)[spat_df.iloc[0, :].values==0] # Selecting only u-components
freq = np.array(freq).reshape(-1, 1) # nf x 1
nf, ns = freq.size, spat_df.shape[1]
# intermediate variables
yz = spat_df.loc[['y', 'z']].values.astype(float)
coh_mat = np.repeat(np.eye((ns), dtype=dtype)[None, :, :], nf, axis=0) # TODO use sparse matrix
exp_constant = np.sqrt( (1/ kwargs['u_ref'] * freq)**2 + (0.12 / kwargs['l_c'])**2).astype(dtype) # nf x 1
i_comp = np.arange(ns)[spat_df.iloc[0, :].values == 0] # Selecting only u-components
# loop through number of combinations, nPerChunks at a time to reduce memory impact
for ii_jj in chunker(itertools.combinations(Icomp, 2), nPerChunks=10000):
for ii_jj in chunker(itertools.combinations(i_comp, 2), nPerChunks=10000):
# get indices of point-pairs
ii = np.array([tup[0] for tup in ii_jj])
jj = np.array([tup[1] for tup in ii_jj])
# calculate distances and coherences
r = np.sqrt((xyz[1, ii] - xyz[1, jj])**2 + (xyz[2, ii] - xyz[2, jj])**2)
coh_values = np.exp(-12 * np.abs(r.reshape(-1, 1)) * exp_constant)
r = np.sqrt((yz[0, ii] - yz[0, jj])**2 + (yz[1, ii] - yz[1, jj])**2) # nchunk
coh_values = np.exp(-12 * np.abs(r) * exp_constant) # nf x nchunk
# Previous method (same math, different numerics)
# coh_values = np.exp(-12 *
# np.sqrt((r.reshape(-1, 1) / kwargs['u_ref'] * freq)**2
# + (0.12 * r.reshape(-1, 1) / kwargs['l_c'])**2))
coh_mat[ii, jj, :] = coh_values
coh_mat[jj, ii, :] = np.conj(coh_values)
coh_mat[:, ii, jj] = coh_values # nf x nchunk
coh_mat[:, jj, ii] = np.conj(coh_values)
return coh_mat
def get_3d_coh_mat(freq, spat_df, dtype=np.float64, **kwargs):
"""Create coherence matrix with 3d coherence for given frequencies
def get_iec3d_coh_mat(freq, spat_df, dtype=np.float64, **kwargs):
"""Create IEC-flavor coherence but for all 3 components
"""
if any([k not in kwargs.keys() for k in ['u_ref', 'l_c']]): # check kwargs
# preliminaries
if any([k not in kwargs for k in ['u_ref', 'l_c']]): # check kwargs
raise ValueError('Missing keyword arguments for IEC coherence model')
freq = np.array(freq).reshape(-1, 1) # nf x 1
nf, ns = freq.size, spat_df.shape[1]
# intermediate variables
freq = np.array(freq).reshape(1, -1)
n_f, n_s = freq.size, spat_df.shape[1]
# misc storage
xyz = spat_df.loc[['x', 'y', 'z']].values
coh_mat = np.repeat(np.atleast_3d(np.eye((n_s),dtype=dtype)), n_f, axis=2)
# loop through the three components
for (k, lc_scale) in [(0, 1), (1, 2.7 / 8.1), (2, 0.66 / 8.1)]:
Icomp = np.arange(n_s)[spat_df.iloc[0, :].values==k] # Selecting only 1 component
l_c = kwargs['l_c'] * lc_scale
exp_constant = np.sqrt( (1/ kwargs['u_ref'] * freq)**2 + (0.12 / l_c)**2).astype(dtype)
yz = spat_df.loc[['y', 'z']].values.astype(float)
coh_mat = np.repeat(np.eye((ns), dtype=dtype)[None, :, :], nf, axis=0) # TODO use sparse matrix
for ic in range(3):
Lc = kwargs['l_c'] * [1, 2.7/8.1 , 0.66/8.1 ][ic]
exp_constant = np.sqrt( (1/ kwargs['u_ref'] * freq)**2 + (0.12 / Lc)**2).astype(dtype)
i_comp = np.arange(ns)[spat_df.iloc[0, :].values == ic] # Selecting only u-components
# loop through number of combinations, nPerChunks at a time to reduce memory impact
for ii_jj in chunker(itertools.combinations(Icomp, 2), nPerChunks=10000):
for ii_jj in chunker(itertools.combinations(i_comp, 2), nPerChunks=10000):
# get indices of point-pairs
ii = np.array([tup[0] for tup in ii_jj])
jj = np.array([tup[1] for tup in ii_jj])
r = np.sqrt((xyz[1, ii] - xyz[1, jj])**2 + (xyz[2, ii] - xyz[2, jj])**2)
coh_values = np.exp(-12 * np.abs(r.reshape(-1, 1)) * exp_constant)
# calculate distances and coherences
r = np.sqrt((yz[0, ii] - yz[0, jj])**2 + (yz[1, ii] - yz[1, jj])**2)
coh_values = np.exp(-12 * np.abs(r) * exp_constant)
# Previous method (same math, different numerics)
#coh_values = np.exp(-12 *
# np.sqrt((r.reshape(-1, 1) / kwargs['u_ref'] * freq)**2
# + (0.12 * r.reshape(-1, 1) / l_c)**2))
coh_mat[ii, jj, :] = coh_values
coh_mat[jj, ii, :] = np.conj(coh_values)
coh_mat[:, ii, jj] = coh_values # nf x nchunk
coh_mat[:, jj, ii] = np.conj(coh_values)
return coh_mat
"""Input-output for PyConTurb files
"""
import os
from struct import pack, unpack
import time
import numpy as np
import pandas as pd
from pyconturb._version import __version__
_HAWC2_BIN_FMT = '<f' # HAWC2 binary turbulence datatype
_TS_BIN_FMT = '<h4l12fl' # TurbSim binary format
def bts_to_df(path):
"""Load TurbSim-style .bts file to pyconturb dataframe"""
if path.endswith('.bts'): # remove file extension, will be added back later
path = path[:-4]
u_scl = np.zeros(3, np.float32)
u_off = np.zeros(3, np.float32)
with open(path + '.bts', 'rb') as fl:
(_, # periodic (7) or non periodic (8), unused
nz,
ny,
_, # no. tower ptsbelow box, unused
nt,
dz,
dy,
dt,
uhub,
zhub,
z0,
u_scl[0],
u_off[0],
u_scl[1],
u_off[1],
u_scl[2],
u_off[2],
strlen) = unpack(_TS_BIN_FMT, fl.read(70))
desc_str = fl.read(strlen) # skip these bytes
nbt = 3 * ny * nz * nt
dat = np.rollaxis(np.frombuffer(fl.read(2 * nbt), dtype=np.int16).astype(
np.float32).reshape([3, ny, nz, nt], order='F'), 2, 1) # 3 x ny x nz x nt
dat -= u_off[:, None, None, None]
dat /= u_scl[:, None, None, None]
# create pyconturb-flavor dataframe
t = np.arange(nt) * dt
turb_df = pd.DataFrame(index=t)
for i, c in enumerate('uvw'):
arr = np.transpose(dat[i], (2, 0, 1)) # nt x ny x nz
comp_df = pd.DataFrame(arr.reshape(nt, ny*nz),
index=t).add_prefix(f'{c}_p')
turb_df = turb_df.join(comp_df, how='outer')
return turb_df
def df_to_bts(turb_df, spat_df, path, uzhub=None, periodic=True):
"""pyconturb-style turbulence dataframe to TurbSim-style binary file
Code modified based on `turbsim` in PyTurbSim:
https://github.com/lkilcher/pyTurbSim/blob/master/pyts/io/write.py
Notes
-----
* The turbulence must have been generated on a y-z grid.
* The naming convention must be 'u_p0', 'v_p0, 'w_p0', 'u_p1', etc.,
where the point indices proceed vertically along z before horizontally
along y.
"""
if path.endswith('.bts'): # remove file extension, will be added back later
path = path[:-4]
# format-specific constants
intmin = -32768 # minimum integer
intrng = 65535 # range of integers
# calculate intermediate parameters
y = np.sort(np.unique(spat_df.loc['y'].values))
z = np.sort(np.unique(spat_df.loc['z'].values))
ny = y.size # no. of y points in grid
nz = z.size # no. of z points in grif
nt = turb_df.shape[0] # no. of time steps
if y.size == 1:
dy = 0
else:
dy = np.mean(y[1:] - y[:-1]) # hopefully will reduce possible errors
if z.size == 1:
dz = 0
else:
dz = np.mean(z[1:] - z[:-1]) # hopefully will reduce possible errors
dt = turb_df.index[-1] / (turb_df.shape[0] - 1) # time step
if uzhub is None: # default is center of grid
zhub = z[z.size // 2] # halfway up
u_df = turb_df.filter(regex=f'u_', axis=1).values
uhub = u_df[:, u_df.shape[1] // 2].mean() # mean of center of grid
else:
uhub, zhub = uzhub
# convert pyconturb dataframe to pyturbsim format (3 x ny x nz x nt)
ts = np.empty((3, ny, nz, nt))
for i, c in enumerate('uvw'):
arr = turb_df.filter(regex=f'{c}_', axis=1).values.reshape((nt, ny, nz))
ts[i] = np.transpose(arr, (1, 2, 0)) # reorder to ny, nz, nt
# initialize output arrays
u_minmax = np.empty((3, 2), dtype=np.float32) # mins and maxs of time series
u_off = np.empty((3), dtype=np.float32) # offsets of each time series
u_scl = np.empty((3), dtype=np.float32) # scales of each time series
desc_str = 'generated by PyConTurb v%s, %s.' % (
__version__,
time.strftime('%b %d, %Y, %H:%M (%Z)', time.localtime())) # description
# calculate the scales and offsets of each time series
out = np.empty((3, ny, nz, nt), dtype=np.int16)
for ind in range(3):
u_minmax[ind] = ts[ind].min(), ts[ind].max()
if np.isclose(u_minmax[ind][0], u_minmax[ind][1]):
u_scl[ind] = 1
else:
u_scl[ind] = intrng / np.diff(u_minmax[ind])
u_off[ind] = intmin - u_scl[ind] * u_minmax[ind, 0]
out[ind] = (ts[ind] * u_scl[ind] + u_off[ind]).astype(np.int16)
with open(path + '.bts', 'wb') as fl:
# write the header
fl.write(pack(_TS_BIN_FMT,
[7, 8][periodic], # 7 is not periodic, 8 is periodic
nz,
ny,
0, # assuming 0 tower points below grid
nt,
dz,
dy,
dt,
uhub,
zhub,
z[0],
u_scl[0],
u_off[0],
u_scl[1],
u_off[1],
u_scl[2],
u_off[2],
len(desc_str)))
fl.write(desc_str.encode(encoding='UTF-8'))
# Swap the y and z indices so that fortran-order writing agrees with the file format.
# Also, we swap the order of z-axis to agree with the file format.
# Write the data so that the first index varies fastest (F order).
# The indexes vary in the following order:
# component (fastest), y-index, z-index, time (slowest).
fl.write(np.rollaxis(out, 2, 1).tostring(order='F'))
def df_to_h2turb(turb_df, spat_df, path, prefix=''):
"""pyconturb-style turbulence dataframe to binary files for hawc2
Notes
-----
* The turbulence must have been generated on a y-z grid.
* The naming convention must be 'u_p0', 'v_p0, 'w_p0', 'u_p1', etc.,
where the point indices proceed vertically along z before horizontally
along y.
"""
nx = turb_df.shape[0] # turbulence dimensions for reshaping
ny = len(set(spat_df.loc['y'].values))
nz = len(set(spat_df.loc['z'].values))
# make and save binary files for all three components
for c in 'uvw':
arr = turb_df.filter(regex=f'{c}_', axis=1).values.reshape((nx, ny, nz))
bin_path = os.path.join(path, f'{prefix}{c}.bin')
with open(bin_path, 'wb') as bin_fid:
arr.astype(np.dtype(_HAWC2_BIN_FMT)).tofile(bin_fid)
return
def h2turb_to_arr(spat_df, path):
"""Raw-load a hawc2 turbulent binary file to numeric array"""
ny, nz = pd.unique(spat_df.loc['y']).size, pd.unique(spat_df.loc['z']).size
bin_arr = np.fromfile(path, dtype=np.dtype(_HAWC2_BIN_FMT))
nx = bin_arr.size // (ny * nz)
if (nx * ny * nz) != bin_arr.size:
raise ValueError('Binary file size does not match spat_df!')
bin_arr.shape = (nx, ny, nz)
return bin_arr
def h2turb_to_df(spat_df, path, nt=600, dt=1, prefix=''):
"""load a hawc2 binary file into a pandas datafram with transform to uvw"""
t = np.arange(nt) * dt
turb_df = pd.DataFrame(index=t)
for c in 'uvw':
comp_path = os.path.join(path, f'{prefix}{c}.bin')
arr = h2turb_to_arr(spat_df, comp_path)
nx, ny, nz = arr.shape
comp_df = pd.DataFrame(arr.reshape(nx, ny*nz),
index=t).add_prefix(f'{c}_p')
turb_df = turb_df.join(comp_df, how='outer')
turb_df = turb_df[[f'{c}_p{i}' for i in range(ny*nz) for c in 'uvw']]
return turb_df
"""Input-output module for files with measurements
Author
------
Jenni Rinker
rink@dtu.dk
"""
import os
import numpy as np
import pandas as pd
from pyconturb.core.helpers import rotate_time_series
# hard-code parameters from .set file format
_v52_mast_hts = np.array([18, 31, 44, 57, 70]) # msmt hts for risø v52 mast
def v52_pickle_to_condata(path):
"""Convert pickled dataframe from V52 met mast to condata for gen_turb
Arguments
---------
path : str
Path to pickled dataframe
Returns
-------
con_data : dict
Collection of 'con_spat_df', which has the spatial information of the
constraining points, and 'con_turb_df', which has the constraining
time series.
"""
# spatial df
con_spat_df = pd.DataFrame(columns=['k', 'p_id', 'x', 'y', 'z'],
index=range(len(_v52_mast_hts) * 3))
for i_ht, ht in enumerate(_v52_mast_hts):
con_spat_df.loc[i_ht*3, :] = ['vxt', f'p{i_ht}', 0, 0, ht]
con_spat_df.loc[i_ht*3 + 1, :] = ['vyt', f'p{i_ht}', 0, 0, ht]
con_spat_df.loc[i_ht*3 + 2, :] = ['vzt', f'p{i_ht}', 0, 0, ht]
# load raw data from pickle
time_df = pd.read_pickle(path)
time = np.arange(time_df.shape[0]) * 600. / time_df.shape[0]
# conturb df
conturb_df = pd.DataFrame(index=time,
columns=con_spat_df.k + '_' + con_spat_df.p_id)
for i_ht, ht in enumerate(_v52_mast_hts):
x, y, z = [time_df[f'{c}_{ht:.0f}m'] for c in ['X', 'Y', 'Z']]
u, v, w = rotate_time_series(x.values, y.values, z.values)
conturb_df[f'vxt_p{i_ht}'] = -u
conturb_df[f'vyt_p{i_ht}'] = -v
conturb_df[f'vzt_p{i_ht}'] = w
# assemble both to dictionary
con_data = {'con_spat_df': con_spat_df,
'con_turb_df': conturb_df}
return con_data
# -*- coding: utf-8 -*-
"""Command-line script for converting V52 met mast file to HAWC2 turbulence box
Usage
-----
$ python metmast_to_h2turb.py ny nz sy sz zc sim_type path_mmast path_turbdir
(see comments in script for what these mean)
"""
import sys
import numpy as np
from pyconturb.core.simulation import gen_turb
from pyconturb.core.helpers import df_to_hawc2, gen_spat_grid
from pyconturb.io.data_conversion import v52_pickle_to_condata
if __name__ == '__main__':
inp_args = sys.argv # get list of input arguments
# default parameters for optional inputs
seed = None # random seed
turb_name = 'turb_' # prefix for turbulence box name when saved
mem_gb = 0.20 # amount of memory to use in gigabytes
verbose = False # print out arguments during simulation [-]
T_sim = 600 # length of time to simulatione [s]
dt_sim = 0.10 # desired simulation time step [s]
i_hub = 2 # index of hub height measurement [-]
scale = True # scale spectra to ensure correct std dev [-]
# load values from command line
ny = int(inp_args[1]) # no. points in y direction
nz = int(inp_args[2]) # no. points in z direction
sy = float(inp_args[3]) # lateral box size [m]
sz = float(inp_args[4]) # vertical box size [m]
zc = float(inp_args[5]) # height of box center [m]
sim_type = inp_args[6] # unconstr or constr simltn ('unc' or 'con')
path_metmast = inp_args[7] # path to met mast pickle file
path_turb = inp_args[8] # path to direct to save turb (no trailing \)
# calculate coherence length scale from box center
l_c = 8.1 * (42 * (zc > 60) + 0.7 * zc * (zc <= 60))
# create spatial dataframe for pyconturb
y = np.linspace(-sy/2, sy/2, ny)
z = np.linspace(-sz/2, sz/2, nz) + zc
spat_df = gen_spat_grid(y, z)
# calculate hub-height wind speed for both unc and con cases
# (note this is only used for coherenc calculations -- the mean wind speed
# profile is defined in the htc file so the turb box is zero-mean)
con_data = v52_pickle_to_condata(path_metmast)
con_spat_df = con_data['con_spat_df']
con_turb_df = con_data['con_turb_df']
v_hub = -np.mean(con_turb_df[f'vxt_p{i_hub:.0f}'].values)
# calculate reference turbulence intensity
sig_u = np.std(con_turb_df[f'vxt_p{i_hub:.0f}'].values)
i_ref = sig_u / (0.75 * v_hub + 5.6) # eq. 11 in IEC 61400-1
# assign keyword arguments
kwargs = {'v_hub': v_hub, 'ed': 3, 'l_c': l_c, 'z_hub': zc,
'T': T_sim, 'dt': dt_sim, 'i_ref': i_ref}
# assign values for unconstrained case
if sim_type == 'unc':
con_data = None
coh_model, spc_model, wsp_model = 'iec', 'kaimal', 'none'
# assign values for constrained case
elif sim_type == 'con':
kwargs['method'] = 'z_interp' # how to get spectral values @ other hts
coh_model, spc_model, wsp_model = 'iec', 'data', 'none'
# resample constraint data to match dt_sim and T_sim
t_sim = np.arange(0, T_sim, dt_sim) # new time vector
con_turb_df = con_turb_df.reindex(index=t_sim) # add nans
con_turb_df = con_turb_df.interpolate(
method='linear').fillna(method='bfill') # interpolate nans
con_turb_df = con_turb_df.loc[t_sim] # pull out only sim times
# throw error if unrecognized input
else:
raise ValueError(f'Unrecognized simulation type "{sim_type}"')
# simulate turbulence
turb_df = gen_turb(spat_df, con_data=con_data,
coh_model=coh_model, spc_model=spc_model,
wsp_model=wsp_model, scale=scale,
seed=seed, mem_gb=mem_gb, verbose=verbose,
**kwargs)
# save in hawc2 format
df_to_hawc2(turb_df, spat_df, path_turb,
prefix=turb_name)
......@@ -14,7 +14,7 @@ def get_sig_values(spat_df, sig_func, **kwargs):
The ``sig_func`` must be a function of the form::
sig_values = sig_func(k, y, z, **kwargs)
sig_values = sig_func(spat_df, **kwargs)
where k, y and z can be floats, np.arrays or pandas.Series. You can use
the functions built into PyConTurb (see below) or define your own custom
......@@ -38,10 +38,10 @@ def get_sig_values(spat_df, sig_func, **kwargs):
[m/s] Turbulence standard deviation(s) for the given spatial locations(s)
/component(s). Dimension is ``(n_sp,)``.
"""
return sig_func(spat_df.loc['k'], spat_df.loc['y'], spat_df.loc['z'], **kwargs)
return sig_func(spat_df, **kwargs)
def data_sig(k, y, z, con_tc=None, **kwargs):
def data_sig(spat_df, con_tc=None, **kwargs):
"""Turbulence standard deviation interpolated from a TimeConstraint object.
See the Examples and/or Reference Guide for details on the interpolator logic or for
......@@ -50,14 +50,11 @@ def data_sig(k, y, z, con_tc=None, **kwargs):
Parameters
----------
k : array-like
[-] Integer indicator of the turbulence component. 0=u, 1=v, 2=w.
y : array-like
[m] Location of point(s) in the lateral direction. Can be int/float,
np.array or pandas.Series.
z : array-like
[m] Location of point(s) in the vertical direction. Can be int/float,
np.array or pandas.Series.
spat_df : pandas.DataFrame
Spatial information on the points to simulate. Must have columns
``[k, p_id, x, y, z]``, and each of the ``n_sp`` rows corresponds
to a different spatial location and turbuine component (u, v or
w).
con_tc : pyconturb.TimeConstraint
[-] Constraint object. Must have correct format; see documentation on
PyConTurb's TimeConstraint object for more details.
......@@ -72,10 +69,10 @@ def data_sig(k, y, z, con_tc=None, **kwargs):
"""
if con_tc is None:
raise ValueError('No data provided!')
k, y, z = np.array(k, dtype=int), np.array(y), np.array(z)
k, y, z = spat_df.loc[['k', 'y', 'z']].values
out_array = np.empty_like(y, dtype=float)
for kval in np.unique(k): # loop over passed-in components
out_mask = (k == kval)
out_mask = np.isclose(k, kval)
con_mask = (con_tc.loc['k'] == kval).values
ypts = con_tc.iloc[2, con_mask].values.astype(float)
zpts = con_tc.iloc[3, con_mask].values.astype(float)
......@@ -84,16 +81,16 @@ def data_sig(k, y, z, con_tc=None, **kwargs):
return out_array
def iec_sig(k, y, z, turb_class=_DEF_KWARGS['turb_class'], **kwargs):
def iec_sig(spat_df, turb_class=_DEF_KWARGS['turb_class'], **kwargs):
"""Turbulence standard deviation as specified in IEC 61400-1 Ed. 3.
Parameters
----------
k : array-like
[-] Integer indicator of the turbulence component. 0=u, 1=v, 2=w.
y : array-like
[m] Location of point(s) in the lateral direction. Can be int/float,
np.array or pandas.Series.
spat_df : pandas.DataFrame
Spatial information on the points to simulate. Must have columns
``[k, p_id, x, y, z]``, and each of the ``n_sp`` rows corresponds
to a different spatial location and turbuine component (u, v or
w).
z : array-like
[m] Location of point(s) in the vertical direction. Can be int/float,
np.array or pandas.Series.
......@@ -111,6 +108,8 @@ def iec_sig(k, y, z, turb_class=_DEF_KWARGS['turb_class'], **kwargs):
kwargs = {**{'turb_class': turb_class}, **kwargs} # add dflts if not given
assert kwargs['turb_class'].lower() in 'abc', 'Invalid or no turbulence class!'
i_ref = {'a': 0.16, 'b': 0.14, 'c': 0.12}[kwargs['turb_class'].lower()]
k = spat_df.loc[['k']].values.astype(int)
y, z = spat_df.loc[['y', 'z']].values
sig1 = i_ref * (0.75 * kwargs['u_ref'] + 5.6) # std dev in u
sig_k = sig1 * np.asarray(1.0 * (k == 0) + 0.8 * (k == 1) + 0.5 * (k == 2))
return np.array(sig_k, dtype=float)
return np.array(sig_k, dtype=float).squeeze()
......@@ -39,7 +39,7 @@ def gen_turb(spat_df, T=600, dt=1, con_tc=None, coh_model='iec',
Optional constraining data for the simulation. The TimeConstraint object is built
into PyConTurb; see documentation for more details.
coh_model : str, optional
Spatial coherence model specifier. Default is IEC 61400-1.
Spatial coherence model specifier. Default is IEC 61400-1, Ed. 4.
wsp_func : function, optional
Function to specify spatial variation of mean wind speed. See details
in `Mean wind speed profiles` section.
......@@ -81,12 +81,9 @@ def gen_turb(spat_df, T=600, dt=1, con_tc=None, coh_model='iec',
if verbose:
print('Beginning turbulence simulation...')
# if con_data passed in, throw deprecation warning
if ('con_data' in kwargs) and (con_tc is None): # don't use con_data please
warnings.warn('The con_data option is deprecated and will be removed in future' +
' versions. Please see the documentation for how to specify' +
' time constraints.',
DeprecationWarning, stacklevel=2)
con_tc = TimeConstraint().from_con_data(kwargs['con_data'])
if 'con_data' in kwargs:
raise ValueError('The "con_data" option is deprecated and can no longer be used.'
' Please see documentation for updated usage.')
# if asked to interpret but no data, throw warning
if (((interp_data == 'all') or isinstance(interp_data, list)) and (con_tc is None)):
raise ValueError('If "interp_data" is not "none", constraints must be given!')
......@@ -166,8 +163,9 @@ def gen_turb(spat_df, T=600, dt=1, con_tc=None, coh_model='iec',
**kwargs)
# assemble "sigma" matrix, which is coh matrix times mag arrays
sigma = np.einsum('i,j->ij', all_mags[i_f, :],
all_mags[i_f, :]) * all_coh_mat[:, :, i_f % nf_chunk]
coh_mat = all_coh_mat[i_f % nf_chunk] # ns x ns
sigma = (all_mags[i_f].reshape(-1, 1) *
all_mags[i_f].reshape(1, -1)) * coh_mat # ns x ns
# get cholesky decomposition of sigma matrix
cor_mat = np.linalg.cholesky(sigma)
......
......@@ -16,10 +16,9 @@ def get_spec_values(f, spat_df, spec_func, **kwargs):
The ``spec_func`` must be a function of the form::
spec_values = spec_func(f, k, y, z, **kwargs)
spec_values = spec_func(f, spat_df, **kwargs).
where f, k, y and z can be floats, np.arrays or pandas.Series. You can use
the functions built into PyConTurb (see below) or define your own custom
You can use the functions built into PyConTurb (see below) or define your own custom
function. The output is assumed to be in (m^2/s^2)/Hz = m^2/s. There is no
need to scale to the correct variance -- the spectrum is scaled during simulation
in order to produce the standard deviation specified by ``sig_func``.
......@@ -45,10 +44,10 @@ def get_spec_values(f, spat_df, spec_func, **kwargs):
[m^2/s] PSD values for the given spatial locations(s)/component(s).
Dimension is ``(n_f, n_sp,)``.
"""
return spec_func(f, spat_df.loc['k'], spat_df.loc['y'], spat_df.loc['z'], **kwargs)
return spec_func(f, spat_df, **kwargs)
def data_spectrum(f, k, y, z, con_tc=None, **kwargs):
def data_spectrum(f, spat_df, con_tc=None, **kwargs):
"""Power spectrum interpolated from a TimeConstraint object.
See the Examples and/or Reference Guide for details on the interpolator logic or for
......@@ -59,15 +58,11 @@ def data_spectrum(f, k, y, z, con_tc=None, **kwargs):
----------
f : array-like
[Hz] Frequency(s) for which PSD is to be calculated. Size is ``n_f``.
k : array-like
[-] Integer indicator of the turbulence component. 0=u, 1=v, 2=w. Size is
``n_sp``.
y : array-like
[m] Location of point(s) in the lateral direction. Can be int/float,
np.array or pandas.Series.
z : array-like
[m] Location of point(s) in the vertical direction. Can be int/float,
np.array or pandas.Series.
spat_df : pandas.DataFrame
Spatial information on the points to simulate. Must have columns
``[k, x, y, z]``, and each of the ``n_sp`` rows corresponds
to a different spatial location and turbuine component (u, v or
w).
con_tc : pyconturb.TimeConstraint
[-] Constraint object. Must have correct format; see documentation on
PyConTurb's TimeConstraint object for more details.
......@@ -85,12 +80,13 @@ def data_spectrum(f, k, y, z, con_tc=None, **kwargs):
# get time array and isolate/sanitize useful variables
time_df = con_tc.get_time()
df, nt = 1 / (time_df.index[-1] + time_df.index[1]), time_df.shape[0]
f_reqs, k = np.array(f, ndmin=1), np.array(k, ndmin=1, dtype=int)
y, z = np.array(y, ndmin=1, dtype=float), np.array(z, ndmin=1, dtype=float)
f_reqs = np.array(f, ndmin=1)
k, y, z = spat_df.loc[['k', 'y', 'z']].values
# y, z = np.array(y, ndmin=1, dtype=float), np.array(z, ndmin=1, dtype=float)
# initialize spectral values
spec_values = np.empty((f_reqs.size, y.size), dtype=float)
for kval in np.unique(k): # loop over the unique requested k values
spec_mask = (k == kval) # cols in spec_values corr. to this kval
spec_mask = np.isclose(k, kval) # cols in spec_values corr. to this kval
con_mask = (con_tc.loc['k'] == kval).values
ypts = con_tc.iloc[2, con_mask].values.astype(float)
zpts = con_tc.iloc[3, con_mask].values.astype(float)
......@@ -105,7 +101,7 @@ def data_spectrum(f, k, y, z, con_tc=None, **kwargs):
return spec_values
def kaimal_spectrum(f, k, y, z, u_ref=_DEF_KWARGS['u_ref'], **kwargs):
def kaimal_spectrum(f, spat_df, u_ref=_DEF_KWARGS['u_ref'], **kwargs):
"""Kaimal PSD as specified in IEC 61400-1 Ed. 3.
f is (nf,); k, y and z are (n_sp,), u_ref is float or int. returns (nf, n_sp,).
No std scaling -- that's done with the magnitudes.
......@@ -114,14 +110,11 @@ def kaimal_spectrum(f, k, y, z, u_ref=_DEF_KWARGS['u_ref'], **kwargs):
----------
f : array-like
[Hz] Frequency(s) for which PSD is to be calculated. Size is ``n_f``.
k : array-like
[-] Integer indicator of the turbulence component. 0=u, 1=v, 2=w.
y : array-like
[m] Location of point(s) in the lateral direction. Can be int/float,
np.array or pandas.Series.
z : array-like
[m] Location of point(s) in the vertical direction. Can be int/float,
np.array or pandas.Series.
spat_df : pandas.DataFrame
Spatial information on the points to simulate. Must have columns
``[k, x, y, z]``, and each of the ``n_sp`` rows corresponds
to a different spatial location and turbuine component (u, v or
w).
u_ref : int/float, optional
[m/s] Mean wind speed at reference height.
**kwargs
......@@ -134,10 +127,10 @@ def kaimal_spectrum(f, k, y, z, u_ref=_DEF_KWARGS['u_ref'], **kwargs):
Dimension is ``(n_f, n_sp,)``.
"""
kwargs = {**{'u_ref': u_ref}, **kwargs} # add dflts if not given
k, y, z = [np.asarray(x) for x in (k, y, z)] # in case pd.series passed in
k, y, z = spat_df.loc[['k', 'y', 'z']].values # in case pd.series passed in
f = np.reshape(f, (-1, 1)) # convert to column array
lambda_1 = 0.7 * z * (z < 60) + 42 * (z >= 60) # length scale changes with z
l_k = lambda_1 * (8.1 * (k == 0) + 2.7 * (k == 1) + 0.66 * (k == 2))
l_k = lambda_1 * (8.1 * np.isclose(k, 0) + 2.7 * np.isclose(k, 1) + 0.66 * np.isclose(k, 2))
tau = np.reshape((l_k / kwargs['u_ref']), (1, -1)) # L_k / U. row vector
spec_values = (4 * tau) / np.power(1. + 6 * tau * f, 5. / 3.) # Kaimal 1972
return spec_values.astype(float) # pandas causes object issues, ensure float
......@@ -17,116 +17,109 @@ from pyconturb.coherence import get_coh_mat
from pyconturb._utils import gen_spat_grid, _spat_rownames
def test_main_default():
"""Check the default value for get_coherence
# ========================== tests for get_coh_mat ==========================
def test_get_coh_mat_default():
"""Check the default value is IEC Ed. 3, standard coh
"""
# given
spat_df = pd.DataFrame([[0, 0], [0, 0], [0, 0], [0, 1]], index=_spat_rownames, columns=['u_p0', 'u_p1'])
freq = 1
kwargs = {'u_ref': 1, 'l_c': 1}
coh_theory = np.array([[1, 5.637379774e-6],
[5.637379774e-6, 1]])
coh_theo = np.array([[1, 5.637379774e-6],
[5.637379774e-6, 1]])
# when
coh = get_coh_mat(freq, spat_df, **kwargs)[:, :, 0]
coh_mat = get_coh_mat(freq, spat_df, **kwargs)
coh_out = coh_mat[0]
# then
np.testing.assert_allclose(coh, coh_theory)
np.testing.assert_allclose(coh_out, coh_theo)
def test_main_badcohmodel():
def test_get_coh_mat_badcohmodel():
"""Should raise an error if a wrong coherence model is passed in
"""
# given
spat_df = pd.DataFrame([[0], [0], [0], [0]], index=_spat_rownames, columns=['u_p0'])
freq = 1
coh_model = 'garbage'
# when & then
with pytest.raises(ValueError):
get_coh_mat(spat_df, freq, coh_model='garbage')
get_coh_mat(spat_df, freq, coh_model=coh_model)
# ========================== tests for get_iec_coh_mat ==========================
def test_iec_badedition():
"""IEC coherence should raise an error if any edn other than 3 is given
def test_get_iec_coh_mat_badedition():
"""should raise an error if any edn other than 3 or 4 is given
"""
# given
spat_df = pd.DataFrame([[0], [0], [0], [0]], index=_spat_rownames, columns=['u_p0'])
freq, coh_model = 1, 'iec'
kwargs = {'ed': 4, 'u_ref': 12, 'l_c': 340.2}
# when & then
with pytest.raises(ValueError):
get_coh_mat(spat_df, freq, **kwargs)
def test_iec_missingkwargs():
"""IEC coherence should raise an error if missing parameter(s)
"""
# given
spat_df = pd.DataFrame([[0, 0], [0, 0], [0, 0], [0, 1]], index=_spat_rownames, columns=['u_p0', 'u_p1'])
freq, kwargs, coh_model = 1, {'ed': 3, 'u_ref': 12}, 'iec'
# when & then
with pytest.raises(ValueError):
get_coh_mat(freq, spat_df, coh_model=coh_model, **kwargs)
# when
eds = [3, 4, 5]
for ed in eds:
kwargs = {'ed': ed, 'u_ref': 12, 'l_c': 340.2}
if ed > 4: # expect error
with pytest.raises(ValueError):
get_coh_mat(freq, spat_df, coh_model=coh_model, **kwargs)
else:
get_coh_mat(freq, spat_df, coh_model=coh_model, **kwargs)
def test_3d_missingkwargs():
def test_get_iec_coh_mat_missingkwargs():
"""IEC coherence should raise an error if missing parameter(s)
"""
# given
spat_df = pd.DataFrame([[0, 0], [0, 0], [0, 0], [0, 1]], index=_spat_rownames, columns=['u_p0', 'u_p1'])
freq, kwargs, coh_model = 1, {'ed': 3, 'u_ref': 12}, '3d'
freq, kwargs, coh_model = 1, {'ed': 3, 'u_ref': 12}, 'iec' # missing l_c
# when & then
with pytest.raises(ValueError):
get_coh_mat(freq, spat_df, coh_model=coh_model, **kwargs)
def test_iec_value():
"""Verify that the value of IEC coherence matches theory
def test_get_iec_coh_mat_value_dtype():
"""Verify that the value and datatype of IEC coherence matches theory
"""
# 1: same comp, 2: diff comp
for comp2, coh2 in [(0, 0.0479231144), (1, 0)]:
# given
spat_df = pd.DataFrame([[0, comp2], [0, 0], [0, 0], [0, 1]],
index=_spat_rownames, columns=['u_p0', 'x_p1'])
freq = 0.5
kwargs = {'ed': 3, 'u_ref': 2, 'l_c': 3, 'coh_model': 'iec'}
coh_theory = np.array([[1., coh2], [coh2, 1.]])
# when
coh = get_coh_mat(freq, spat_df, **kwargs)[:, :, 0]
# then
np.testing.assert_allclose(coh, coh_theory)
def test_3d_value():
"""Verify that the value of 3d coherence matches theory"""
# 1: same comp, 2: diff comp
for comp, coh2 in [(0, 0.0479231144), (1, 0.0358754554), (2, 0.0013457414)]:
# given (for both sets of functions)
spat_df = pd.DataFrame([[0, 1, 2, 0, 1, 2],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1]],
index=_spat_rownames)
# -------------------- standard IEC (no 3d), mult freqs --------------------
coh_model = 'iec'
dtypes = [np.float64, np.float32]
for dtype in dtypes:
# given
spat_df = pd.DataFrame([[comp, comp], [0, 0], [0, 0], [0, 1]],
index=_spat_rownames, columns=['x_p0', 'y_p1'])
freq = 0.5
kwargs = {'u_ref': 2, 'l_c': 3, 'coh_model': '3d'}
coh_theory = np.array([[1., coh2], [coh2, 1.]])
freq, u_ref, l_c = [0.5, 1], 2, 3
coh_theo = np.tile(np.eye(6), (2, 1, 1))
for i in range(2):
coh_theo[i, 0, 3] = np.exp(-12*np.sqrt((freq[i]/u_ref)**2+(0.12/l_c)**2))
coh_theo[i, 3, 0] = np.exp(-12*np.sqrt((freq[i]/u_ref)**2+(0.12/l_c)**2))
kwargs = {'ed': 3, 'u_ref': u_ref, 'l_c': l_c, 'coh_model': coh_model}
# when
coh = get_coh_mat(freq, spat_df, **kwargs)[:, :, 0]
coh_out = get_coh_mat(freq, spat_df, dtype=dtype, **kwargs)
# then
np.testing.assert_allclose(coh, coh_theory, atol=1e-6)
np.testing.assert_allclose(coh_out, coh_theo, atol=1e-6)
assert coh_out.dtype == dtype
@pytest.mark.slow # mark this as a slow test
@pytest.mark.skipci # don't run in CI
def test_verify_iec_sim_coherence():
"""check that the simulated box has the right coherence
def test_get_iec_coh_mat_verify_coherence():
"""check that the simulated box has the right coherence (requires many simulations)
"""
# given
y, z = [0], [70, 80]
spat_df = gen_spat_grid(y, z)
kwargs = {'u_ref': 10, 'turb_class': 'B', 'l_c': 340.2, 'z_ref': 70,
'T': 300, 'dt': 100}
'T': 300, 'dt': 100, 'ed': 3}
coh_model = 'iec'
n_real = 1000 # number of realizations in ensemble
coh_thresh = 0.12 # coherence threshold
# get theoretical coherence
idcs = np.triu_indices(spat_df.shape[1], k=1)
coh_theo = get_coh_mat(1 / kwargs['T'], spat_df, coh_model=coh_model,
**kwargs)[idcs].flatten()
**kwargs)[0][idcs]
# when
ii_jj = [(i, j) for (i, j) in
itertools.combinations(np.arange(spat_df.shape[1]), 2)] # pairwise indices
......@@ -145,12 +138,63 @@ def test_verify_iec_sim_coherence():
# then
assert max_coh_diff < coh_thresh
# ========================== tests for get_iec3d_coh_mat ==========================
def test_get_iec3d_coh_mat_value_dtype():
"""Verify that the value and datatype of 3d coherence matches theory
"""
spat_df = pd.DataFrame([[0, 1, 2, 0, 1, 2],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1]],
index=_spat_rownames)
# -------------------- IEC 3d, singe freq --------------------
coh_model = 'iec3d'
dtypes = [np.float64, np.float32]
for dtype in dtypes:
# given
spat_df = pd.DataFrame([[0, 1, 2, 0, 1, 2],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1]],
index=_spat_rownames)
# spat_df = pd.DataFrame([[comp, comp], [0, 0], [0, 0], [0, 1]],
# index=_spat_rownames, columns=['x_p0', 'y_p1'])
freqs, u_ref, l_c = 0.5, 2, 3
kwargs = {'ed': 3, 'u_ref': u_ref, 'l_c': l_c, 'coh_model': coh_model}
# coh_theo = np.array([[1., coh2], [coh2, 1.]])
coh_theo = np.eye(6)
coh_theo[0, 3] = np.exp(-12*np.sqrt((freqs/u_ref)**2+(0.12/l_c)**2))
coh_theo[3, 0] = coh_theo[0, 3]
coh_theo[1, 4] = np.exp(-12*np.sqrt((freqs/u_ref)**2+(0.12/l_c*8.1/2.7)**2))
coh_theo[4, 1] = coh_theo[1, 4]
coh_theo[2, 5] = np.exp(-12*np.sqrt((freqs/u_ref)**2+(0.12/l_c*8.1/0.66)**2))
coh_theo[5, 2] = coh_theo[2, 5]
# when
coh_mat = get_coh_mat(freqs, spat_df, dtype=dtype, **kwargs)
coh_out = coh_mat[0]
# then
np.testing.assert_allclose(coh_out, coh_theo, atol=1e-6)
assert coh_out.dtype == dtype
def test_get_iec3d_coh_mat_missingkwargs():
"""IEC coherence should raise an error if missing parameter(s)
"""
# given
spat_df = pd.DataFrame([[0, 0], [0, 0], [0, 0], [0, 1]], index=_spat_rownames, columns=['u_p0', 'u_p1'])
freq, kwargs, coh_model = 1, {'ed': 3, 'u_ref': 12}, 'iec3d' # missing l_c
# when & then
with pytest.raises