The Gitlab server is succesfully updated to version 14.4.0

Many users are receiving emails regarding excessive amounts of log-in attempts, which are brute-force attempts to crack your password. We are working on a 2FA solution, so no action is needed yet. If you receive emails regarding "sign-in from new location", please check the IP address and if unknown, notify frza@dtu.dk immediately.

Commit ba96ce57 authored by Emmanuel Branlard's avatar Emmanuel Branlard
Browse files

Merge remote-tracking branch 'upstream/master' into f/dtype

parents 763d9f4a 062967f8
Pipeline #13870 passed with stage
in 1 minute and 13 seconds
[run]
omit =
pyconturb/tests/*
pyconturb/io/*
pyconturb/_hawcres_io.py
pyconturb/_version.py
[report]
fail_under = 70
\ No newline at end of file
# debugging files
# repo-specific ignores files
DELETEME*
junk/*
*.bts
*.bin
pyconturb/tests/fast_sims/*
# Byte-compiled / optimized / DLL files
__pycache__/
......
......@@ -36,5 +36,6 @@ Contents:
installation
overview
notebooks/outputs
reference_guide
examples
\ No newline at end of file
......@@ -84,7 +84,7 @@
"\n",
"Now let's define our custom functions for the spatial variation of the mean wind speed. Note that the wind speed function must be of the form\n",
"```\n",
"wsp_values = wsp_func(y, z, **kwargs)\n",
"wsp_values = wsp_func(spat_df, **kwargs)\n",
"```\n",
"where `kwargs` is a dictionary of the keyword arguments for the profile function. (It can also include unused keyword arguments.)"
]
......@@ -95,11 +95,12 @@
"metadata": {},
"outputs": [],
"source": [
"def wake_wsp(y, z, cntr_pos=[0, 90], rad=50, u_inf=10, max_def=0.5, **kwargs):\n",
"def wake_wsp(spat_df, cntr_pos=[0, 90], rad=50, u_inf=10, max_def=0.5, **kwargs):\n",
" \"\"\"Non-realistic wake deficit.\n",
" rad is the wake of the wake, u_inf is undisturbed inflow, and max_def is the max. deficit.\"\"\"\n",
" y, z = spat_df.loc[['y', 'z']].values\n",
" dist_to_cntr = np.sqrt((y - cntr_pos[0])**2 + (z - cntr_pos[1])**2) # distance to center point\n",
" freestream = constant_profile(y, z, u_ref=u_inf) # no wake deficit\n",
" freestream = constant_profile(spat_df, u_ref=u_inf) # no wake deficit\n",
" wake_def = max_def * np.sin(np.pi/2 * (rad - dist_to_cntr) / rad) # sinusoidal\n",
" wake_def = wake_def * (dist_to_cntr < rad) # deficit is not outside of rad\n",
" return np.array(freestream - wake_def) # must return array regardless of input"
......@@ -122,7 +123,7 @@
"plot_vals = [([0, 119], 90, 0.5), ([0, 119], 30, 0.5), ([-30, 80], 90, 0.5), ([60, 130], 90, 2)]\n",
"for iax, (cnt, r, mdef) in enumerate(plot_vals):\n",
" ax = axs[iax//2, iax%2]\n",
" wsp_values = wake_wsp(spat_df.loc['y'], spat_df.loc['z'], cntr_pos=cnt, rad=r, u_inf=u_inf, max_def=mdef)\n",
" wsp_values = wake_wsp(spat_df, cntr_pos=cnt, rad=r, u_inf=u_inf, max_def=mdef)\n",
" cnt = ax.contourf(y, z, wsp_values.reshape(y.size, z.size).T)\n",
" ax.axis('equal')\n",
" plt.colorbar(cnt, ax=ax);\n",
......@@ -137,7 +138,7 @@
"\n",
"Just like we did for the mean wind speed, now let us define a function for the turbulence standard deviation as a function of spatial location (and turbulence component). Note that the sig function must be of the form\n",
"```\n",
"sig_values = sig_func(k, y, z, **kwargs)\n",
"sig_values = sig_func(spat_df, **kwargs)\n",
"```\n",
"where `kwargs` is a dictionary of the keyword arguments for the profile function. (It can also include unused keyword arguments.)"
]
......@@ -148,12 +149,13 @@
"metadata": {},
"outputs": [],
"source": [
"def wake_sig(k, y, z, cntr_pos=[0, 90], rad=50, sig_inf=1.2, max_perc=0.20, **kwargs):\n",
"def wake_sig(spat_df, cntr_pos=[0, 90], rad=50, sig_inf=1.2, max_perc=0.20, **kwargs):\n",
" \"\"\"Non-realistic wake turbulence. sig_inf is the undisturbed standard deviation and max_perc is the\n",
" maximum percentage increase of sigma at the center.\"\"\"\n",
" y, z = spat_df.loc[['y', 'z']].values\n",
" dist_to_cntr = np.sqrt((y - cntr_pos[0])**2 + (z - cntr_pos[1])**2) # distance to center point\n",
" mask = (dist_to_cntr < rad) # points that fall within the wake\n",
" wake_sig = sig_inf * constant_profile(y, z, u_ref=1) # freestream sig\n",
" wake_sig = sig_inf * constant_profile(spat_df, u_ref=1) # freestream sig\n",
" wake_sig[mask] += max_perc*sig_inf * np.sin(np.pi/2 * (rad - dist_to_cntr[mask]) / rad)\n",
" return np.array(wake_sig) # must return array regardless of input"
]
......@@ -168,8 +170,8 @@
"plot_vals = [([-60, 160], 1.2, 0.2), ([10, 100], 3.5, 0.4)]\n",
"for iax, (cntr, sig, mperc) in enumerate(plot_vals):\n",
" ax = axs[iax]\n",
" sig_values = wake_sig(spat_df.loc['k'], spat_df.loc['y'], spat_df.loc['z'],\n",
" cntr_pos=cntr, rad=rad, sig_inf=sig, max_perc=mperc)\n",
" sig_values = wake_sig(spat_df, cntr_pos=cntr, rad=rad, sig_inf=sig,\n",
" max_perc=mperc)\n",
" cnt = ax.contourf(y, z, sig_values.reshape(y.size, z.size).T)\n",
" ax.axis('equal')\n",
" plt.colorbar(cnt, ax=ax);\n",
......
......@@ -59,6 +59,24 @@
"con_tc.index = con_tc.index.map(lambda x: float(x) if (x not in 'kxyz') else x) # index cleaning"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We also need to create the spatial dataframe that defines the area to be simulated. Due to dimensions assumed in the plotting function used in this example, we won't use `gen_spat_grid` but will rather construct the dataframe manually."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Y, Z = np.meshgrid([-50, 50], np.linspace(10, 140)) # 2d arrays for contours\n",
"k, x = 0*Y.flatten(), 0*Y.flatten()\n",
"spat_df = pd.DataFrame([k, x, Y.flatten(), Z.flatten()], index=['k', 'x', 'y', 'z'])"
]
},
{
"cell_type": "markdown",
"metadata": {},
......@@ -78,14 +96,13 @@
"metadata": {},
"outputs": [],
"source": [
"# define the points we want to query and call data_profile\n",
"y, z = np.meshgrid([-50, 50], np.linspace(10, 140))\n",
"wsp_intp = data_profile(y.flatten(), z.flatten(), con_tc=con_tc)\n",
"# get the data profile points for interpolation\n",
"wsp_intp = data_profile(spat_df, con_tc=con_tc)\n",
"# calculate the values fron con_tc for comparison\n",
"yp, zp = np.zeros(6), con_tc.loc['z'].unique()\n",
"wspp = con_tc.get_time().filter(regex='u_').mean().values\n",
"# plot the interpolated values\n",
"plot_interp(yp, zp, wspp, y, z, wsp_intp)"
"plot_interp(yp, zp, wspp, Y, Z, wsp_intp)"
]
},
{
......@@ -104,17 +121,17 @@
"outputs": [],
"source": [
"# define the points we want to query and call data_sig\n",
"y, z = np.meshgrid([-50, 50], np.linspace(10, 140))\n",
"plt.figure(1, figsize=(15, 4))\n",
"for k in range(3): # loop through components\n",
" ks = k * np.ones(y.size)\n",
" sig_intp = data_sig(ks, y.flatten(), z.flatten(), con_tc=con_tc)\n",
" kspat_df = spat_df.copy(deep=True) # copy spat_df\n",
" kspat_df.loc['k'] = k # assign component\n",
" sig_intp = data_sig(kspat_df, con_tc=con_tc)\n",
" # calculate the values fron con_tc for comparison\n",
" yp, zp = np.zeros(6), con_tc.loc['z'].unique()\n",
" sigp = con_tc.get_time().filter(regex=f'{\"uvw\"[k]}_').std().values\n",
" # plot the interpolated values\n",
" plt.subplot(1, 3, 1 + k)\n",
" plot_interp(yp, zp, sigp, y, z, sig_intp)\n",
" plot_interp(yp, zp, sigp, Y, Z, sig_intp)\n",
" plt.title(f'Component {\"uvw\"[k]} sig')"
]
},
......@@ -135,20 +152,27 @@
"source": [
"# define the points we want to query and call data_spectrum\n",
"f, f_idx = 0.05, 5 # frequency to look at and its index in the freq vector\n",
"y, z = np.meshgrid([-50, 50], np.linspace(10, 140))\n",
"plt.figure(1, figsize=(14, 10))\n",
"for k in range(3): # loop through components\n",
" ks = k * np.ones(y.size)\n",
" spc_intp = data_spectrum(f, ks, y.flatten(), z.flatten(), con_tc=con_tc)\n",
" kspat_df = spat_df.copy(deep=True) # copy spat_df\n",
" kspat_df.loc['k'] = k # assign component\n",
" spc_intp = data_spectrum(f, kspat_df, con_tc=con_tc)\n",
" # calculate the values fron con_tc for comparison\n",
" yp, zp = np.zeros(6), con_tc.loc['z'].unique()\n",
" mags = 2*np.abs(np.fft.rfft(con_tc.get_time().filter(regex=f'{\"uvw\"[k]}_'), axis=0))**2\n",
" spcp = mags[f_idx, :]\n",
" # plot the interpolated values\n",
" plt.subplot(2, 2, 1 + k)\n",
" plot_interp(yp, zp, spcp, y, z, spc_intp, fmt='%.2e')\n",
" plot_interp(yp, zp, spcp, Y, Z, spc_intp, fmt='%.2e')\n",
" plt.title(f'Component {\"uvw\"[k]} spectrum, f = {f} Hz')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We can see that the variation with height of the interpolated spectrum matches, as desired. The scaling between the original and interpolated spectra is off, but this is because the spectra should be scaled according to the standard deviation, which we do not do in this example for brevity."
]
}
],
"metadata": {
......
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Output formats"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Once a turbulence box has been generated, it can be converted to either a HAWC2 turbulence binary file (.bin) or a full-field TurbSim binary file (.bts).\n",
"\n",
"Here is a quick example that generates turbulence, saves it to file and then reloads it.\n",
"\n",
"**Note**: These read/write functions are only applicable to 3D turbulence boxes generated on a grid with a specific column naming convention/order. Please either use `gen_spat_grid` combined with `gen_turb` or be very sure that your column ordering/naming in your dataframe is consistent with PyConTurb's ordering."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# import needed functions\n",
"from pyconturb import gen_spat_grid, gen_turb\n",
"from pyconturb.io import df_to_h2turb, h2turb_to_df, df_to_bts, bts_to_df\n",
"import matplotlib.pyplot as plt"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# create the turbulence box\n",
"nt, dt = 100, 0.2\n",
"spat_df = gen_spat_grid(0, [50, 70]) # two points\n",
"turb_df = gen_turb(spat_df, T=nt*dt, dt=dt, u_ref=12)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### HAWC2 turbulence format\n",
"\n",
"Because the HAWC2 binary format does not include all necessary information to save/reconstruct the turbulence box (e.g., the grid points), we must also pass `spat_df` into the functions. There are 3 output files that have endings \"u.bin\", \"v.bin\" and \"u.bin\". The `prefix` keyword argument allows you to prepend a string in from of the endings."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# save in HAWC2 format (u.bin, v.bin, w.bin)\n",
"path, prefix = '.', 'h2turb_' # path is directory, prefix is prepended to \"u.bin\", \"v.bin\", \"w.bin\"\n",
"df_to_h2turb(turb_df, spat_df, path, prefix=prefix) # save file\n",
"\n",
"# reload file\n",
"h2_df = h2turb_to_df(spat_df, path, prefix=prefix, nt=nt, dt=dt)\n",
"\n",
"# compare the original and reloaded turbulence files\n",
"plt.plot(turb_df.u_p1, label='Original')\n",
"plt.plot(h2_df.u_p1, label='Reloaded')\n",
"plt.legend();"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### TurbSim turbulence format\n",
"\n",
"Unlike the HAWC2 format, the full-field TurbSim binary format stores all three components as well as all information required to reconstruct the turbulence box. However, we are still required to pass in `spat_df` on saving, because we need to encode the spatial information in `spat_df` into the output binary file.\n",
"\n",
"Because TurbSim requires specifying a hub height and corresponding wind speed, the write function allows you to specify those values directly. If no coordinates are given, the center of the grid is used."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# save in TurbSim format (path.bts)\n",
"path = 'ts.bts' # path is name of file (with or without ending)\n",
"df_to_bts(turb_df, spat_df, path, uzhub=None) # assume hub height is in center of box\n",
"\n",
"# reload file\n",
"ts_df = bts_to_df(path)\n",
"\n",
"# compare the original and reloaded turbulence files\n",
"plt.plot(turb_df.u_p1, label='Original')\n",
"plt.plot(ts_df.u_p1, label='Reloaded')\n",
"plt.legend();"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.8"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
......@@ -76,6 +76,15 @@ Here is a quick, step-by-step guide for fixing a bug:
(``git checkout master``, then ``git pull origin master``).
List of contributors
----------------------
The following lovely people have made contributions to PyConTurb:
* Emmanual Branlard
* Jenni Rinker
Theory
-------
......
"""Input-output module for files related to HAWC2
"""Reading HAWC2 results files (.set and .dat)
Notes
-----
Much of this was copied, modified, and/or cleaned from DTU's Wind Energy
Toolbox.
Author
------
Jenni Rinker
rink@dtu.dk
Toolbox. https://gitlab.windenergy.dtu.dk/toolbox/WindEnergyToolbox
"""
import os
......
......@@ -11,7 +11,6 @@ import scipy.interpolate as sciint
_spat_rownames = ['k', 'x', 'y', 'z'] # row names of spatial df
_DEF_KWARGS = {'u_ref': 0, 'z_ref': 90, 'alpha': 0.2, 'turb_class': 'A',
'l_c': 340.2} # lc for coherence ***NOTE THESE OVERWRITE FUNCTION DEFS
_HAWC2_BIN_FMT = '<f' # HAWC2 binary turbulence datatype
_HAWC2_TURB_COOR = {'u': -1, 'v': -1, 'w': 1} # hawc2 turb xyz to uvw
......@@ -81,28 +80,6 @@ def combine_spat_con(spat_df, con_tc, drop_duplicates=True, decimals=10):
return comb_df
def df_to_h2turb(turb_df, spat_df, path, prefix=''):
"""ksec3d-style turbulence dataframe to binary files for hawc2
Notes
-----
* The turbulence must have been generated on a y-z grid.
* The naming convention must be 'u_p0', 'v_p0, 'w_p0', 'u_p1', etc.,
where the point indices proceed vertically along z before horizontally
along y.
"""
nx = turb_df.shape[0] # turbulence dimensions for reshaping
ny = len(set(spat_df.loc['y'].values))
nz = len(set(spat_df.loc['z'].values))
# make and save binary files for all three components
for c in 'uvw':
arr = turb_df.filter(regex=f'{c}_', axis=1).values.reshape((nx, ny, nz))
bin_path = os.path.join(path, f'{prefix}{c}.bin')
with open(bin_path, 'wb') as bin_fid:
arr.astype(np.dtype(_HAWC2_BIN_FMT)).tofile(bin_fid)
return
def gen_spat_grid(y, z, comps=[0, 1, 2]):
"""Generate spat_df (all turbulent components and grid defined by x and z)
......@@ -130,30 +107,6 @@ def get_freq(**kwargs):
return t, freq
def h2turb_to_arr(spat_df, path):
"""raw-load a hawc2 turbulent binary file to numeric array"""
ny, nz = pd.unique(spat_df.loc['y']).size, pd.unique(spat_df.loc['z']).size
bin_arr = np.fromfile(path, dtype=np.dtype(_HAWC2_BIN_FMT))
nx = bin_arr.size // (ny * nz)
if (nx * ny * nz) != bin_arr.size:
raise ValueError('Binary file size does not match spat_df!')
bin_arr.shape = (nx, ny, nz)
return bin_arr
def h2turb_to_df(spat_df, path, prefix=''):
"""load a hawc2 binary file into a pandas datafram with transform to uvw"""
turb_df = pd.DataFrame()
for c in 'uvw':
comp_path = os.path.join(path, f'{prefix}{c}.bin')
arr = h2turb_to_arr(spat_df, comp_path)
nx, ny, nz = arr.shape
comp_df = pd.DataFrame(arr.reshape(nx, ny*nz)).add_prefix(f'{c}_p')
turb_df = turb_df.join(comp_df, how='outer')
turb_df = turb_df[[f'{c}_p{i}' for i in range(2) for c in 'uvw']]
return turb_df
def is_line(points):
"""see if the points given in points fall along a single line"""
# first rotate by first pair to remove issue with vertical lines
......
# version: 1.0 First pass at PyConTurb
# 2.0 Switch to uvw, other changes
# 2.5 Fixed bug in custom profiles
__version__ = '2.5.dev1'
__release__ = '2.5.dev1'
__version__ = '2.6.dev1'
__release__ = '2.6.dev1'
......@@ -10,19 +10,19 @@ def get_coh_mat(freq, spat_df, coh_model='iec', dtype=np.float64, **kwargs):
"""Create coherence matrix for given frequencies and coherence model
"""
if coh_model == 'iec': # IEC coherence model
if 'ed' not in kwargs.keys(): # add IEC ed to kwargs if not passed in
kwargs['ed'] = 3
coh_mat = get_iec_coh_mat(freq, spat_df, dtype=dtype, **kwargs)
elif coh_model == '3d': # 3D coherence model
coh_mat = get_3d_coh_mat(freq, spat_df, dtype=dtype, **kwargs)
coh_mat = get_iec_coh_mat(freq, spat_df, dtype=dtype, coh_model=coh_model,
**kwargs)
elif coh_model == 'iec3d':
coh_mat = get_iec3d_coh_mat(freq, spat_df, dtype=dtype, coh_model=coh_model,
**kwargs)
else: # unknown coherence model
raise ValueError(f'Coherence model "{coh_model}" not recognized.')
raise ValueError(f'Coherence model "{coh_model}" not recognized!')
return coh_mat
def chunker(iterable,nPerChunks):
""" Return list of nPerChunks elements of an iterable """
def chunker(iterable, nPerChunks):
"""Return list of nPerChunks elements of an iterable"""
it = iter(iterable)
while True:
chunk = list(itertools.islice(it, nPerChunks))
......@@ -30,64 +30,60 @@ def chunker(iterable,nPerChunks):
return
yield chunk
def get_iec_coh_mat(freq, spat_df, dtype=np.float64, **kwargs):
"""Create IEC 61400-1 Ed. 3 coherence matrix for given frequencies
def get_iec_coh_mat(freq, spat_df, dtype=np.float64, ed=4, **kwargs):
"""Create IEC 61400-1 Ed. 3/4 coherence matrix for given frequencies
"""
# preliminaries
if kwargs['ed'] != 3: # only allow edition 3
raise ValueError('Only edition 3 is permitted.')
if any([k not in kwargs.keys() for k in ['u_ref', 'l_c']]): # check kwargs
if ed not in [3, 4]: # only allow edition 3
raise ValueError('Only editions 3 or 4 are permitted.')
if any([k not in kwargs for k in ['u_ref', 'l_c']]): # check kwargs
raise ValueError('Missing keyword arguments for IEC coherence model')
freq = np.array(freq).reshape(1, -1)
n_f, n_s = freq.size, spat_df.shape[1]
# misc storage
xyz = spat_df.loc[['x', 'y', 'z']].values.astype(float)
coh_mat = np.repeat(np.atleast_3d(np.eye((n_s),dtype=dtype)), n_f, axis=2) # TODO use sparse matrix
exp_constant = np.sqrt( (1/ kwargs['u_ref'] * freq)**2 + (0.12 / kwargs['l_c'])**2).astype(dtype)
Icomp = np.arange(n_s)[spat_df.iloc[0, :].values==0] # Selecting only u-components
freq = np.array(freq).reshape(-1, 1) # nf x 1
nf, ns = freq.size, spat_df.shape[1]
# intermediate variables
yz = spat_df.loc[['y', 'z']].values.astype(float)
coh_mat = np.repeat(np.eye((ns), dtype=dtype)[None, :, :], nf, axis=0) # TODO use sparse matrix
exp_constant = np.sqrt( (1/ kwargs['u_ref'] * freq)**2 + (0.12 / kwargs['l_c'])**2).astype(dtype) # nf x 1
i_comp = np.arange(ns)[spat_df.iloc[0, :].values == 0] # Selecting only u-components
# loop through number of combinations, nPerChunks at a time to reduce memory impact
for ii_jj in chunker(itertools.combinations(Icomp, 2), nPerChunks=10000):
for ii_jj in chunker(itertools.combinations(i_comp, 2), nPerChunks=10000):
# get indices of point-pairs
ii = np.array([tup[0] for tup in ii_jj])
jj = np.array([tup[1] for tup in ii_jj])
# calculate distances and coherences
r = np.sqrt((xyz[1, ii] - xyz[1, jj])**2 + (xyz[2, ii] - xyz[2, jj])**2)
coh_values = np.exp(-12 * np.abs(r.reshape(-1, 1)) * exp_constant)
r = np.sqrt((yz[0, ii] - yz[0, jj])**2 + (yz[1, ii] - yz[1, jj])**2) # nchunk
coh_values = np.exp(-12 * np.abs(r) * exp_constant) # nf x nchunk
# Previous method (same math, different numerics)
# coh_values = np.exp(-12 *
# np.sqrt((r.reshape(-1, 1) / kwargs['u_ref'] * freq)**2
# + (0.12 * r.reshape(-1, 1) / kwargs['l_c'])**2))
coh_mat[ii, jj, :] = coh_values
coh_mat[jj, ii, :] = np.conj(coh_values)
coh_mat[:, ii, jj] = coh_values # nf x nchunk
coh_mat[:, jj, ii] = np.conj(coh_values)
return coh_mat
def get_3d_coh_mat(freq, spat_df, dtype=np.float64, **kwargs):
"""Create coherence matrix with 3d coherence for given frequencies
def get_iec3d_coh_mat(freq, spat_df, dtype=np.float64, **kwargs):
"""Create IEC-flavor coherence but for all 3 components
"""
if any([k not in kwargs.keys() for k in ['u_ref', 'l_c']]): # check kwargs
# preliminaries
if any([k not in kwargs for k in ['u_ref', 'l_c']]): # check kwargs
raise ValueError('Missing keyword arguments for IEC coherence model')
freq = np.array(freq).reshape(-1, 1) # nf x 1
nf, ns = freq.size, spat_df.shape[1]
# intermediate variables
freq = np.array(freq).reshape(1, -1)
n_f, n_s = freq.size, spat_df.shape[1]
# misc storage
xyz = spat_df.loc[['x', 'y', 'z']].values
coh_mat = np.repeat(np.atleast_3d(np.eye((n_s),dtype=dtype)), n_f, axis=2)
# loop through the three components
for (k, lc_scale) in [(0, 1), (1, 2.7 / 8.1), (2, 0.66 / 8.1)]:
Icomp = np.arange(n_s)[spat_df.iloc[0, :].values==k] # Selecting only 1 component
l_c = kwargs['l_c'] * lc_scale
exp_constant = np.sqrt( (1/ kwargs['u_ref'] * freq)**2 + (0.12 / l_c)**2).astype(dtype)
yz = spat_df.loc[['y', 'z']].values.astype(float)
coh_mat = np.repeat(np.eye((ns), dtype=dtype)[None, :, :], nf, axis=0) # TODO use sparse matrix
for ic in range(3):
Lc = kwargs['l_c'] * [1, 2.7/8.1 , 0.66/8.1 ][ic]
exp_constant = np.sqrt( (1/ kwargs['u_ref'] * freq)**2 + (0.12 / Lc)**2).astype(dtype)
i_comp = np.arange(ns)[spat_df.iloc[0, :].values == ic] # Selecting only u-components
# loop through number of combinations, nPerChunks at a time to reduce memory impact
for ii_jj in chunker(itertools.combinations(Icomp, 2), nPerChunks=10000):
for ii_jj in chunker(itertools.combinations(i_comp, 2), nPerChunks=10000):
# get indices of point-pairs
ii = np.array([tup[0] for tup in ii_jj])
jj = np.array([tup[1] for tup in ii_jj])
r = np.sqrt((xyz[1, ii] - xyz[1, jj])**2 + (xyz[2, ii] - xyz[2, jj])**2)
coh_values = np.exp(-12 * np.abs(r.reshape(-1, 1)) * exp_constant)
# calculate distances and coherences
r = np.sqrt((yz[0, ii] - yz[0, jj])**2 + (yz[1, ii] - yz[1, jj])**2)
coh_values = np.exp(-12 * np.abs(r) * exp_constant)
# Previous method (same math, different numerics)
#coh_values = np.exp(-12 *
# np.sqrt((r.reshape(-1, 1) / kwargs['u_ref'] * freq)**2
# + (0.12 * r.reshape(-1, 1) / l_c)**2))
coh_mat[ii, jj, :] = coh_values
coh_mat[jj, ii, :] = np.conj(coh_values)
coh_mat[:, ii, jj] = coh_values # nf x nchunk
coh_mat[:, jj, ii] = np.conj(coh_values)
return coh_mat
"""Input-output for PyConTurb files
"""
import os
from struct import pack, unpack
import time
import numpy as np
import pandas as pd
from pyconturb._version import __version__
_HAWC2_BIN_FMT = '<f' # HAWC2 binary turbulence datatype
_TS_BIN_FMT = '<h4l12fl' # TurbSim binary format
def bts_to_df(path):
"""Load TurbSim-style .bts file to pyconturb dataframe"""
if path.endswith('.bts'): # remove file extension, will be added back later
path = path[:-4]
u_scl = np.zeros(3, np.float32)
u_off = np.zeros(3, np.float32)
with open(path + '.bts', 'rb') as fl:
(_, # periodic (7) or non periodic (8), unused
nz,
ny,
_, # no. tower ptsbelow box, unused
nt,
dz,
dy,
dt,
uhub,
zhub,
z0,
u_scl[0],
u_off[0],
u_scl[1],
u_off[1],
u_scl[2],
u_off[2],