Skip to content
Snippets Groups Projects
Commit fc4eb21d authored by David Verelst's avatar David Verelst
Browse files

Merge branch 'master' of gitlab.windenergy.dtu.dk:toolbox/WindEnergyToolbox

parents 962badd9 4d8959bd
No related branches found
No related tags found
No related merge requests found
Showing
with 575 additions and 10 deletions
......@@ -27,7 +27,7 @@ from Cython.Distutils import build_ext
def setup_package():
ex_info = [('wetb.fatigue_tools.rainflowcounting', ['pair_range', 'peak_trough', 'rainflowcount_astm']),
('wetb.signal_tools.filters', ['cy_filters'])]
('wetb.signal.filters', ['cy_filters'])]
extlist = [Extension('%s.%s' % (module, n),
[os.path.join(module.replace(".","/"), n)+'.pyx'],
include_dirs=[np.get_include()]) for module, names in ex_info for n in names]
......
......@@ -14,7 +14,7 @@ from io import open
from builtins import str
from future import standard_library
from wetb.utils.process_exec import pexec
from wetb.utils.cluster_tools.cluster_resource import unix_path, unix_path_old
from wetb.utils.cluster_tools.cluster_resource import unix_path_old
standard_library.install_aliases()
from collections import OrderedDict
......
......@@ -388,6 +388,8 @@ class Simulation(object):
def set_id(self, *args, **kwargs):
pass
def progress_callback(self,*args, **kwargs):
pass
class UpdateSimStatusThread(Thread):
def __init__(self, simulation, interval=1):
......
......@@ -16,12 +16,11 @@ import time
from wetb.hawc2 import log_file
from wetb.hawc2.log_file import LogInfo, LogFile
from wetb.hawc2.simulation import ERROR, ABORTED
from wetb.utils.cluster_tools import pbsjob
from wetb.utils.cluster_tools.cluster_resource import LocalResource, \
SSHPBSClusterResource, unix_path
SSHPBSClusterResource
from wetb.utils.cluster_tools import pbsjob
from wetb.utils.cluster_tools.pbsjob import SSHPBSJob, NOT_SUBMITTED, DONE
from wetb.utils.cluster_tools.ssh_client import SSHClient
from wetb.utils.timing import print_time
from wetb.hawc2.htc_file import fmt_path
import numpy as np
......@@ -249,7 +248,8 @@ class GormSimulationResource(PBSClusterSimulationResource):
def __init__(self, username, password, wine_cmd="WINEARCH=win32 WINEPREFIX=~/.wine32 wine"):
init_cmd = """export PATH=/home/python/miniconda3/bin:$PATH
source activate wetb_py3"""
PBSClusterSimulationResource.__init__(self, "gorm.risoe.dk", username, password, 22, 25, 100, init_cmd, wine_cmd, "python")
from wetb.utils.cluster_tools.ssh_client import SSHClient
PBSClusterSimulationResource.__init__(self, SSHClient('gorm.risoe.dk', username, password, 22), 25, 100, init_cmd, wine_cmd, "python")
class PBSClusterSimulationHost(SimulationHost):
......
'''
Created on 30/06/2016
@author: MMPE
'''
from scipy.interpolate.interpolate import interp1d
import numpy as np
from wetb.signal.fit import bin_fit
def rms(a, b):
"""Calculate the Root-Mean-Squared Error of two value sets
Parameters
---------
a : array_like
First value set
b : array_like
Second value set
Returns
-------
y : float
Root mean squared error of a and b
"""
a, b = [np.array(ab[:]) for ab in [a, b]]
if a.shape != b.shape:
raise ValueError("Dimensions differ: %s!=%s" % (a.shape, b.shape))
if len(a) == 0:
return np.nan
return np.sqrt(np.nanmean((a - b) ** 2))
def rms2fit(x, y, fit_func=bin_fit):
"""
Calculate the rms error of the points (xi, yi) relative to the mean curve
The mean curve is computed by:\n
- Divide x into bins + 1 bins\n
- Remove bins with less than 2 elements\n
- Calculate the mean of x and y in the bins\n
- Do a linear interpolation between the bin mean values\n
- Extrapolate to the minimum and maximum value of x using the slope of the first and last line segment\n
Usefull for calculating e.g. power curve scatter
Parameters
---------
x : array_like
x values
y : array_like
y values
bins : int or array_like, optional
If int: Number of control points for the mean curve, default is 10\n
If array_like: Bin egdes
kind : str or int, optional
Specifies the kind of interpolation as a string ('linear', 'nearest', 'zero', 'slinear',
'quadratic','cubic' where 'slinear', 'quadratic' and 'cubic' refer to a spline interpolation
of first, second or third order) or as an integer specifying the order of the spline
interpolator to use. Default is 'cubic'.
fit_func : function, optional
Function to apply on each bin to find control points for fit
Returns
-------
err : float
Mean error of points compared to mean curve
f : function
Interpolation function
"""
x, y = np.array(x[:]), np.array(y[:])
_, fit = fit_func(x,y)
return rms(fit(x),y), fit
def rms2fit_old(x, y, bins=10, kind='cubic', fit_func=np.nanmean, normalize_with_slope=False):
"""
Calculate the rms error of the points (xi, yi) relative to the mean curve
The mean curve is computed by:\n
- Divide x into bins + 1 bins\n
- Remove bins with less than 2 elements\n
- Calculate the mean of x and y in the bins\n
- Do a linear interpolation between the bin mean values\n
- Extrapolate to the minimum and maximum value of x using the slope of the first and last line segment\n
Usefull for calculating e.g. power curve scatter
Parameters
---------
x : array_like
x values
y : array_like
y values
bins : int or array_like, optional
If int: Number of control points for the mean curve, default is 10\n
If array_like: Bin egdes
kind : str or int, optional
Specifies the kind of interpolation as a string ('linear', 'nearest', 'zero', 'slinear',
'quadratic','cubic' where 'slinear', 'quadratic' and 'cubic' refer to a spline interpolation
of first, second or third order) or as an integer specifying the order of the spline
interpolator to use. Default is 'cubic'.
fit_func : function, optional
Function to apply on each bin to find control points for fit
normalize_with_slope : boolean, optional
If True, the mean error in each bin is normalized with the slope of the corresponding line segment
Returns
-------
err : float
Mean error of points compared to mean curve
f : function
Interpolation function
"""
x, y = np.array(x[:]), np.array(y[:])
if isinstance(bins, int):
bins = np.linspace(np.nanmin(x), np.nanmax(x) + 1e-10, bins + 1)
digitized = np.digitize(x, bins)
digitized[np.isnan(x) | np.isnan(y)] = -1
masks = [digitized == i for i in range(1, len(bins))]
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
bin_x = np.array([np.nanmean(x[mask]) for mask in masks])
bin_y = np.array([fit_func(y[mask]) for mask in masks])
bin_count = np.array([np.sum(mask) for mask in masks])
bin_x_fit, bin_y = [b[bin_count >= 1] for b in [bin_x, bin_y]]
#extrapolate to first and last value of x
if bin_x_fit[0] > np.nanmin(x):
bin_y = np.r_[bin_y[0] - (bin_x_fit[0] - np.nanmin(x)) * (bin_y[1] - bin_y[0]) / (bin_x_fit[1] - bin_x_fit[0]), bin_y]
bin_x_fit = np.r_[np.nanmin(x), bin_x_fit]
if bin_x_fit[-1] < np.nanmax(x):
bin_y = np.r_[bin_y, bin_y[-1] + (np.nanmax(x) - bin_x_fit[-1]) * (bin_y[-1] - bin_y[-2]) / (bin_x_fit[-1] - bin_x_fit[-2]) ]
bin_x_fit = np.r_[bin_x_fit, np.nanmax(x)]
#Create mean function
f = lambda x : interp1d(bin_x_fit, bin_y, kind)(x[:])
#calculate error of segment
digitized = np.digitize(x, bin_x[bin_count > 0])
bin_err = np.array([rms(y[digitized == i], f(x[digitized == i])) for i in range(1, len(bin_x_fit))])
if normalize_with_slope:
slopes = np.diff(bin_y) / np.diff(bin_x_fit)
return np.nanmean(bin_err / np.abs(slopes)), f
return np.sqrt(np.nanmean(bin_err ** 2)), f
def rms2mean(x, y, bins=10, kind='cubic', normalize_with_slope=False):
"""
Calculate the rms error of the points (xi, yi) relative to the mean curve
The mean curve is computed by:\n
- Divide x into bins + 1 bins\n
- Remove bins with less than 2 elements\n
- Calculate the mean of x and y in the bins\n
- Do a linear interpolation between the bin mean values\n
- Extrapolate to the minimum and maximum value of x using the slope of the first and last line segment\n
Usefull for calculating e.g. power curve scatter
Parameters
---------
x : array_like
x values
y : array_like
y values
bins : int or array_like, optional
If int: Number of control points for the mean curve, default is 10\n
If array_like: Bin egdes
kind : str or int, optional
Specifies the kind of interpolation as a string ('linear', 'nearest', 'zero', 'slinear',
'quadratic','cubic' where 'slinear', 'quadratic' and 'cubic' refer to a spline interpolation
of first, second or third order) or as an integer specifying the order of the spline
interpolator to use. Default is 'cubic'.
normalize_with_slope : boolean, optional
If True, the mean error in each bin is normalized with the slope of the corresponding line segment
Returns
-------
err : float
Mean error of points compared to mean curve
f : function
Interpolation function
"""
return rms2fit(x, y, lambda x,y : bin_fit(x, y, bins, kind))
def bootstrap_comparison(x, y, kind=1, N=15, M=100):
f_lst = []
y_lst = []
x_min, x_max = max(np.percentile(x, 2), np.sort(x)[2]), min(np.percentile(x, 98), np.sort(x)[-2])
y_arr = np.empty((M, N * 10)) + np.NaN
inside = 0
for i in range(M):
indexes = np.random.randint(0, len(x) - 1, len(x))
while x[indexes].min() > x_min or x[indexes].max() < x_max:
indexes = np.random.randint(0, len(x) - 1, len(x))
#indexes = np.arange(i, len(x), M)
_, f = rms2fit(x[indexes], y[indexes], lambda x,y : bin_fit(x,y, kind=kind, bins=N))
x_ = np.linspace(x_min, x_max, N * 10)
y_ = (f(x_))
if i > 10:
if np.all(y_ < np.nanmax(y_arr, 0)) and np.all(y_ > np.nanmin(y_arr, 0)):
inside += 1
if inside == 5:
#print ("break", i)
#break
pass
y_arr[i, :] = y_
return (np.mean(np.std(y_arr, 0)), x_, y_arr[~np.isnan(y_arr[:, 0])])
......@@ -4,8 +4,8 @@ Created on 13/07/2016
@author: MMPE
'''
import numpy as np
from wetb.signal_tools.filters.first_order import low_pass
from wetb.signal_tools.filters import replacer
from wetb.signal.filters.first_order import low_pass
from wetb.signal.filters import replacer
replace_by_nan = replacer.replace_by_nan
......
......@@ -4,7 +4,7 @@ Created on 10/01/2015
@author: mmpe
'''
import numpy as np
from wetb.signal_tools.filters import cy_filters
from wetb.signal.filters import cy_filters
def low_pass(input, delta_t, tau, method=1):
if isinstance(tau, (int, float)):
......
d = None
d = dir()
from wetb.signal.fit._linear_fit import *
from wetb.signal.fit._bin_fit import *
from wetb.signal.fit._fourier_fit import *
__all__ = sorted([m for m in set(dir()) - set(d)])
import numpy as np
from scipy.interpolate.interpolate import interp1d
def bin_fit(x,y, bins=10, kind='linear', bin_func=np.nanmean, bin_min_count=3, lower_upper='discard'):
"""Fit observations based on bin statistics
Parameters
---------
x : array_like
x observations
y : array_like
y observations
bins : int, array_like or (int, int)
if int: <bins> binx evenly distributed on the x-axis
if (xbins,ybins): <xbins> and <ybins> evenly distributed on the x and y axis respectively\n
Note that ybins only make sense if every y-value maps to a single x-value
kind : int or string
degree of polynomial for fit (argument passed to scipy.interpolate.interpolate.interp1d)
bin_func : function, optional
Statistic function to apply on bins, default is nanmean
bin_min_count : int, optional
Minimum number of observations in bins to include
Default is 3
lower_upper : str, int, (str,str), (int,int)
How to handle observations below and above first and last bin values. Can be:\n
- "discard":
- "extrapolate":
- int: Set f(max(x)) to mean of first/last int observations
Returns
-------
bin_x, fit_function
"""
x, y = np.array(x[:]), np.array(y[:])
if isinstance(bins, int):
bins = np.linspace(np.nanmin(x), np.nanmax(x) + 1e-10, bins + 1)
elif isinstance(bins, tuple) and len(bins)==2 and isinstance(bins[0], int) and isinstance(bins[1], int):
xbins, ybins = bins
if xbins>0:
xbinsx = np.linspace(np.nanmin(x), np.nanmax(x) + 1e-10, xbins + 1)
else:
xbinsx = []
if ybins>0:
x1, f1 = bin_fit(y,x, kind=1, bins=ybins)
xbinsy = f1(x1)
else:
xbinsy = []
#x2, f2 = bin_fit(x,y, kind=1, bins=xbins)
bins = sorted(np.r_[xbinsx, xbinsy ])
digitized = np.digitize(x, bins)
digitized[np.isnan(x) | np.isnan(y)] = -1
masks = [digitized == i for i in range(1, len(bins))]
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
bin_x = np.array([np.nanmean(x[mask]) for mask in masks])
bin_y = np.array([bin_func(y[mask]) for mask in masks])
bin_count = np.array([np.sum(mask) for mask in masks])
#bin_x_fit, bin_y = [b[bin_count >= bin_min_count] for b in [bin_x, bin_y]]
bin_x_fit = bin_x
m = np.isnan(bin_x_fit)
bin_x_fit[m] = ((bins[:-1]+bins[1:])/2)[m]
bin_y_fit = bin_y.copy()
bin_y_fit[bin_count<bin_min_count]= np.nan
if isinstance(lower_upper, (str, int)):
lower = upper = lower_upper
else:
lower, upper = lower_upper
#Add value to min(x)
if bin_x_fit[0] > np.nanmin(x):
if lower =='extrapolate':
bin_y_fit = np.r_[bin_y_fit[0] - (bin_x_fit[0] - np.nanmin(x)) * (bin_y_fit[1] - bin_y_fit[0]) / (bin_x_fit[1] - bin_x_fit[0]), bin_y_fit]
bin_x_fit = np.r_[np.nanmin(x), bin_x_fit]
elif lower=="discard":
pass
elif isinstance(lower, int):
bin_y_fit = np.r_[np.mean(y[~np.isnan(x)][np.argsort(x[~np.isnan(x)])[:lower]]), bin_y_fit]
bin_x_fit = np.r_[np.nanmin(x), bin_x_fit]
else:
raise NotImplementedError("Argument for handling lower observations, %s, not implemented"%lower)
#add value to max(x)
if bin_x_fit[-1] < np.nanmax(x):
if upper == 'extrapolate':
bin_y_fit = np.r_[bin_y_fit, bin_y_fit[-1] + (np.nanmax(x) - bin_x_fit[-1]) * (bin_y_fit[-1] - bin_y_fit[-2]) / (bin_x_fit[-1] - bin_x_fit[-2]) ]
bin_x_fit = np.r_[bin_x_fit, np.nanmax(x)]
elif upper=="discard":
pass
elif isinstance(upper, int):
bin_y_fit = np.r_[bin_y_fit, np.mean(y[~np.isnan(x)][np.argsort(x[~np.isnan(x)])[-upper:]])]
bin_x_fit = np.r_[bin_x_fit, np.nanmax(x)]
else:
raise NotImplementedError("Argument for handling upper observations, %s, not implemented"%upper)
#Create mean function
def fit(x):
x = x[:].copy().astype(np.float)
x[x<bin_x_fit[0]] = np.nan
x[x>bin_x_fit[-1]] = np.nan
return interp1d(bin_x_fit, bin_y_fit, kind)(x[:])
return bin_x_fit, fit
def perpendicular_bin_fit(x, y, bins = 30, fit_func=None, bin_min_count=3, plt=None):
"""Fit a curve to the values, (x,y) using bins that are perpendicular to an initial fit
Parameters
---------
x : array_like
x observations
y : array_like
y observations
bins : int
Number of perpendicular bins
fit_func : function(x,y) -> (x,y) or None
Initial fit function
If None, bin_fit with same number of bins are used
bin_min_count : int, optional
Minimum number of observations in bins to include
Default is 3
plt : pyplot or None
If pyplot the fitting process is plotted on plt
Returns
-------
fit_x, fit_y
"""
if fit_func is None:
fit_func = lambda x,y : bin_fit(x, y, bins, bin_func=np.nanmean)
x,y = [v[~np.isnan(x)&~np.isnan(y)] for v in [x,y]]
bfx,f = fit_func(x, y)
bfy = f(bfx)
bfx, bfy = [v[~np.isnan(bfx)&~np.isnan(bfy)] for v in [bfx,bfy]]
if plt:
x_range, y_range = [v.max()-v.min() for v in [x,y]]
plt.ylim([y.min()-y_range*.1, y.max()+y_range*.1])
plt.xlim([x.min()-x_range*.1, x.max()+x_range*.1])
# divide curve into N segments of same normalized curve length
xg, xo = np.nanmax(bfx)-np.nanmin(bfx), np.nanmin(bfx)
yg, yo = np.nanmax(bfy)-np.nanmin(bfy), np.nanmin(bfy)
nbfx = (bfx-xo)/xg
nbfy = (bfy-yo)/yg
l = np.cumsum(np.sqrt(np.diff(nbfx)**2+np.diff(nbfy)**2))
nx, ny = [np.interp(np.linspace(l[0], l[-1], bins), l, (xy[1:]+xy[:-1])/2) for xy in [nbfx,nbfy]]
last = (-1,0)
pc = []
used = np.zeros_like(x).astype(np.bool)
for i in range(0,len(nx)):
i1,i2 = max(0,i-1), min(len(nx)-1,i+1)
a =-(nx[i2]-nx[i1])/ (ny[i2]-ny[i1])
b = (ny[i]-(a*nx[i]))*yg+yo
a *=yg/xg
x_ = [np.nanmin(x), np.nanmax(x)]
m1 = np.sign(last[0])*y < np.sign(last[0])*((x-xo)*last[0]+last[1])
m2 = np.sign(a)*y>np.sign(a)*(a*(x-xo)+b)
m = m1&m2&~used
if plt:
plt.plot(x_, ((a)*(x_-xo))+b)
plt.plot(x[m], y[m],'.')
if np.sum(m)>=bin_min_count:
pc.append((np.median(x[m]), np.median(y[m])))
used = used|m
last = (a,b)
#bfx,bfy = zip(*pc)
if plt:
pbfx, pbfy = np.array(pc).T
plt.plot(bfx,bfy, 'orange', label='initial_fit')
plt.plot(pbfx, pbfy, 'gray', label="perpendicular fit")
plt.legend()
#PlotData(None, bfx,bfy)
return np.array(pc).T
'''
Created on 07/07/2015
@author: MMPE
'''
import numpy as np
from wetb.signal.fit import bin_fit
def fourier_fit(y, max_nfft, x=None):
"""Approximate a signal, y, with Fourier fit"""
d = np.arange(360)
return d, lambda deg : np.interp(deg%360, d, F2x(x2F(y, max_nfft, x)))
def fourier_fit_old(y, nfft):
F = np.zeros(len(y), dtype=np.complex)
F[:nfft + 1] = x2F(y, nfft)[:nfft + 1]
return np.fft.ifft(F) * len(F)
def F2x(F_coefficients):
"""Compute signal from Fourier coefficients"""
F = np.zeros(360, dtype=np.complex)
nfft = len(F_coefficients) // 2
F[:nfft + 1] = np.conj(F_coefficients[:nfft + 1])
F[1:nfft + 1] += (F_coefficients[-nfft:][::-1])
return np.real(np.fft.ifft(F) * len(F))
def x2F(y, max_nfft, x=None):
"""Compute Fourier coefficients from signal (signal may contain NANs)"""
d = np.arange(360)
if x is not None:
x,fit = bin_fit(x,y, d)
y = fit(d)
nfft = min(max_nfft, len(y) // 2 + 1)
n = len(y)
N = nfft * 2 + 1
theta = np.linspace(0, 2 * np.pi, n + 1)[:n]
theta[np.isnan(y)] = np.nan
a = np.empty((nfft * 2 + 1, nfft * 2 + 1))
b = np.empty(nfft * 2 + 1)
A0_lst = lambda dF : 2 * np.nansum(1 * dF)
A_lst = lambda dF : [2 * np.nansum(np.cos(i * theta) * dF) for i in range(1, nfft + 1)]
B_lst = lambda dF : [2 * np.nansum(np.sin(i * theta) * dF) for i in range(1, nfft + 1)]
row = lambda dF : np.r_[A0_lst(dF), A_lst(dF), B_lst(dF)]
for i in range(nfft + 1):
a[i, :] = row(np.cos(i * theta))
b[i] = 2 * np.nansum(y * np.cos(i * theta))
for i, r in enumerate(range(nfft + 1, nfft * 2 + 1), 1):
a[r, :] = row(np.sin(i * theta))
b[r] = 2 * np.nansum(y * np.sin(i * theta))
AB = np.linalg.solve(a, b)
F = np.zeros(n, dtype=np.complex)
F = np.r_[AB[0], (AB[1:nfft + 1] + 1j * AB[nfft + 1:]), np.zeros(nfft) ]
return F
def rx2F(y, max_nfft, x=None):
"""Convert non-complex signal, y, to single sided Fourier components, that satifies x(t) = sum(X(cos(iw)+sin(iw)), i=0..N)"""
d = np.arange(360)
if x is not None:
x,fit = bin_fit(x,y, d)
y = fit(d)
F = np.fft.rfft(y) / len(y)
F[1:-1] *= 2 # add negative side
F = np.conj(F)
return F[:max_nfft + 1]
def rF2x(rF):
"""Convert single sided Fourier components, that satisfies x(t) = sum(X(cos(iw)+sin(iw)), i=0..N) to non-complex signal"""
rF = np.conj(rF)
rF[1:] /= 2
rF = np.r_[rF, np.zeros(181 - len(rF), dtype=np.complex)]
return np.fft.irfft(rF) * 360
'''
Created on 22. mar. 2017
@author: mmpe
'''
import numpy as np
def linear_fit(x,y):
from scipy.stats import linregress
slope, intercept, r_value, p_value, std_err = linregress(x,y)
return np.array([x.min(), x.max()]), lambda x : np.array(x)*slope+intercept , (slope, intercept)
\ No newline at end of file
File moved
File moved
'''
Created on 20/07/2016
@author: MMPE
'''
import os
import unittest
import numpy as np
from wetb.signal.error_measures import rms2fit_old, rms2fit
from wetb.signal.fit import bin_fit
tfp = os.path.join(os.path.dirname(__file__), 'test_files/')
class Test(unittest.TestCase):
# def test_rms2mean(self):
# data = np.load(tfp + "wsp_power.npy")
# print (data.shape)
# wsp = data[:, 1].flatten()
# power = data[:, 0].flatten()
#
# import matplotlib.pyplot as plt
# plt.plot(wsp, power, '.')
# x = np.linspace(wsp.min(), wsp.max(), 100)
# err, f = rms2mean(wsp, power)
# plt.plot(x, f(x), label='rms2mean, err=%.1f' % err)
# err, f = rms2fit(wsp, power, bins=20, kind=3, fit_func=np.median)
# plt.plot(x, f(x), label='rms2median, err=%.1f' % err)
# print (list(x))
# print (list(f(x)))
# plt.legend()
# plt.show()
def test_rms2fit(self):
x = np.array([10.234302313156817, 13.98517783627376, 7.7902362498947921, 11.08597865379001, 8.430623529700588, 12.279982848438033, 33.89151260027775, 12.095047111211629, 13.731371675689642, 14.858309846006723, 15.185588405617654])
y = np.array([28.515665187174477, 46.285328159179684, 17.763652093098958, 32.949007991536462, 20.788106673177083, 38.819226477864589, 96.53278479817709, 38.479684539388025, 46.072654127604167, 51.875484233398439, 53.379342967122398])
err, fit = rms2fit_old(x, y, kind=1, bins=15)
err2, fit2 = rms2fit(x, y, fit_func=lambda x,y: bin_fit(x,y, kind=1, bins=15, bin_min_count=1, lower_upper='extrapolate'))
self.assertAlmostEqual(err, 0.306,2)
if 0:
import matplotlib.pyplot as plt
plt.plot(x,y, '.')
x_ = np.linspace(x.min(), x.max(), 100)
plt.plot(x_, fit(x_), label='rms2fit, err=%.5f' % err)
plt.plot(x_, fit2(x_), label='rms2fit, err=%.5f' % err2)
plt.legend()
plt.show()
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.test_rms2mean']
unittest.main()
File added
File added
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment