Newer
Older
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
tag_dict = self.cases[case]
# group all values loaded from the tag_dict here, to keep overview
# the directories to SAVE the results/logs/turb files
# load all relevant dir settings: the result/logfile/turbulence/zip
# they are now also available for starting() and ending() parts
hawc2_exe = tag_dict['[hawc2_exe]']
self.results_dir = tag_dict['[res_dir]']
self.eigenfreq_dir = tag_dict['[eigenfreq_dir]']
self.logs_dir = tag_dict['[log_dir]']
self.animation_dir = tag_dict['[animation_dir]']
self.TurbDirName = tag_dict['[turb_dir]']
self.TurbDb = tag_dict['[turb_db_dir]']
self.wakeDb = tag_dict['[wake_db_dir]']
self.meandDb = tag_dict['[meand_db_dir]']
self.WakeDirName = tag_dict['[wake_dir]']
self.MeanderDirName = tag_dict['[meander_dir]']
self.ModelZipFile = tag_dict['[model_zip]']
self.htc_dir = tag_dict['[htc_dir]']
self.hydro_dir = tag_dict['[hydro_dir]']
self.mooring_dir = tag_dict['[mooring_dir]']
self.model_path = tag_dict['[run_dir]']
self.turb_base_name = tag_dict['[turb_base_name]']
self.wake_base_name = tag_dict['[wake_base_name]']
self.meand_base_name = tag_dict['[meand_base_name]']
self.pbs_queue_command = tag_dict['[pbs_queue_command]']
self.walltime = tag_dict['[walltime]']
self.dyn_walltime = tag_dict['[auto_walltime]']
# create the pbs_out_dir if necesary
try:
path = tag_dict['[run_dir]'] + tag_dict['[pbs_out_dir]']
if not os.path.exists(path):
os.makedirs(path)
self.pbs_out_dir = tag_dict['[pbs_out_dir]']
except:
pass
# create pbs_in subdirectories if necessary
try:
path = tag_dict['[run_dir]'] + tag_dict['[pbs_in_dir]']
if not os.path.exists(path):
os.makedirs(path)
self.pbs_in_dir = tag_dict['[pbs_in_dir]']
except:
pass
try:
self.copyback_files = tag_dict['[copyback_files]']
self.copyback_frename = tag_dict['[copyback_frename]']
except KeyError:
pass
try:
self.copyto_generic = tag_dict['[copyto_generic]']
self.copyto_files = tag_dict['[copyto_files]']
except KeyError:
pass
# related to the dynamically setting the walltime
duration = float(tag_dict['[time_stop]'])
dt = float(tag_dict['[dt_sim]'])
self.nr_time_steps.append(duration/dt)
self.duration.append(float(tag_dict['[duration]']))
self.t0.append(float(tag_dict['[t0]']))
if self.verbose:
print('htc_dir in pbs.create:')
print(self.htc_dir)
print(self.model_path)
# we only start a new case, if we have something that ended before
# the very first case has to start with starting
if ended:
count1 = 1
# # when jobs depend on other jobs (constant node loading)
# if self.que_jobdeps:
# jobid = self.pref + str(self.jobid_list[i-1])
# jobid_dep = self.pref + str(self.jobid_deps[i-1])
# else:
# jobid = self.pref + str(count2)
# jobid_dep = None
if self.short_job_names:
jobid = self.pref + str(count2)
else:
jobid = tag_dict['[case_id]']
if self.pbs_fname_appendix and self.short_job_names:
# define the path for the new pbs script
pbs_in_fname = '%s_%s.p' % (tag_dict['[case_id]'], jobid)
else:
pbs_in_fname = '%s.p' % (tag_dict['[case_id]'])
pbs_path = self.model_path + self.pbs_in_dir + pbs_in_fname
# Start a new pbs script, we only need the tag_dict here
self.starting(tag_dict, jobid)
ended = False
# -----------------------------------------------------------------
# WRITING THE ACTUAL JOB PARAMETERS
# output the current scratch directory
self.pbs += "pwd\n"
# zip file has been copied to the node before (in start_pbs())
# unzip now in the node
self.pbs += "/usr/bin/unzip " + self.ModelZipFile + '\n'
# create all directories, especially relevant if there are case
# dependent sub directories that are not present in the ZIP file
self.pbs += "mkdir -p " + self.htc_dir + '\n'
self.pbs += "mkdir -p " + self.results_dir + '\n'
self.pbs += "mkdir -p " + self.logs_dir + '\n'
self.pbs += "mkdir -p " + self.TurbDirName + '\n'
if self.WakeDirName:
self.pbs += "mkdir -p " + self.WakeDirName + '\n'
if self.MeanderDirName:
self.pbs += "mkdir -p " + self.MeanderDirName + '\n'
if self.hydro_dir:
self.pbs += "mkdir -p " + self.hydro_dir + '\n'
# create the eigen analysis dir just in case that is necessary
if self.eigenfreq_dir:
self.pbs += 'mkdir -p %s \n' % self.eigenfreq_dir
# and copy the htc file to the node
self.pbs += "cp -R $PBS_O_WORKDIR/" + self.htc_dir \
+ case +" ./" + self.htc_dir + '\n'
# if there is a turbulence file data base dir, copy from there
if self.TurbDb:
turb_dir_src = os.path.join('$PBS_O_WORKDIR', self.TurbDb)
turb_dir_src = os.path.join('$PBS_O_WORKDIR', self.TurbDirName)
# the original behaviour makes assumptions on the turbulence box
# names: turb_base_name_xxx_u.bin, turb_base_name_xxx_v.bin
if self.turb_base_name is not None:
turb_src = os.path.join(turb_dir_src, self.turb_base_name)
self.pbs += "cp -R %s*.bin %s \n" % (turb_src, self.TurbDirName)
# more generally, literally define the names of the boxes for u,v,w
# components
elif '[turb_fname_u]' in tag_dict:
turb_u = os.path.join(turb_dir_src, tag_dict['[turb_fname_u]'])
turb_v = os.path.join(turb_dir_src, tag_dict['[turb_fname_v]'])
turb_w = os.path.join(turb_dir_src, tag_dict['[turb_fname_w]'])
self.pbs += "cp %s %s \n" % (turb_u, self.TurbDirName)
self.pbs += "cp %s %s \n" % (turb_v, self.TurbDirName)
self.pbs += "cp %s %s \n" % (turb_w, self.TurbDirName)
# if there is a turbulence file data base dir, copy from there
if self.wakeDb and self.WakeDirName:
wake_dir_src = os.path.join('$PBS_O_WORKDIR', self.wakeDb)
wake_dir_src = os.path.join('$PBS_O_WORKDIR', self.WakeDirName)
if self.wake_base_name is not None:
wake_src = os.path.join(wake_dir_src, self.wake_base_name)
self.pbs += "cp -R %s*.bin %s \n" % (wake_src, self.WakeDirName)
# if there is a turbulence file data base dir, copy from there
if self.meandDb and self.MeanderDirName:
meand_dir_src = os.path.join('$PBS_O_WORKDIR', self.meandDb)
meand_dir_src = os.path.join('$PBS_O_WORKDIR', self.MeanderDirName)
if self.meand_base_name is not None:
meand_src = os.path.join(meand_dir_src, self.meand_base_name)
self.pbs += "cp -R %s*.bin %s \n" % (meand_src, self.MeanderDirName)
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
# copy and rename input files with given versioned name to the
# required non unique generic version
for fname, fgen in zip(self.copyto_files, self.copyto_generic):
self.pbs += "cp -R $PBS_O_WORKDIR/%s ./%s \n" % (fname, fgen)
# the hawc2 execution commands via wine
param = (self.wine, hawc2_exe, self.htc_dir+case, self.wine_appendix)
self.pbs += "%s %s ./%s %s &\n" % param
#self.pbs += "wine get_mac_adresses" + '\n'
# self.pbs += "cp -R ./*.mac $PBS_O_WORKDIR/." + '\n'
# -----------------------------------------------------------------
# and we end when the cpu's per node are full
if int(count1/self.maxcpu) == 1:
# write the end part of the pbs script
self.ending(pbs_path)
ended = True
# print progress:
replace = ((i/self.maxcpu), (i_tot/self.maxcpu), self.walltime)
print('pbs script %3i/%i walltime=%s' % replace)
count2 += 1
i += 1
# the next cpu
count1 += 1
# it could be that the last node was not fully loaded. In that case
# we do not have had a succesfull ending, and we still need to finish
if not ended:
# write the end part of the pbs script
self.ending(pbs_path)
# progress printing
replace = ( (i/self.maxcpu), (i_tot/self.maxcpu), self.walltime )
print('pbs script %3i/%i walltime=%s, partially loaded' % replace)
# print 'pbs progress, script '+format(i/self.maxcpu,'2.0f')\
# + '/' + format(i_tot/self.maxcpu, '2.0f') \
# + ' partially loaded...'
def starting(self, tag_dict, jobid):
"""
First part of the pbs script
"""
# a new clean pbs script!
self.pbs = ''
self.pbs += "### Standard Output" + ' \n'
case_id = tag_dict['[case_id]']
# PBS job name
self.pbs += "#PBS -N %s \n" % (jobid)
self.pbs += "#PBS -o ./" + self.pbs_out_dir + case_id + ".out" + '\n'
# self.pbs += "#PBS -o ./pbs_out/" + jobid + ".out" + '\n'
self.pbs += "### Standard Error" + ' \n'
self.pbs += "#PBS -e ./" + self.pbs_out_dir + case_id + ".err" + '\n'
# self.pbs += "#PBS -e ./pbs_out/" + jobid + ".err" + '\n'
self.pbs += '#PBS -W umask=003\n'
self.pbs += "### Maximum wallclock time format HOURS:MINUTES:SECONDS\n"
# self.pbs += "#PBS -l walltime=" + self.walltime + '\n'
self.pbs += "#PBS -l walltime=[walltime]\n"
if self.qsub == 'time':
self.pbs += "#PBS -a [start_time]" + '\n'
elif self.qsub == 'depend':
# set job dependencies, job_id refers to PBS job_id, which is only
# assigned to a job at the moment it gets qsubbed into the que
self.pbs += "[nodeps]PBS -W depend=afterany:[job_id]\n"
# if self.que_jobdeps:
# self.pbs += "#PBS -W depend=afterany:%s\n" % jobid_dep
# else:
# self.pbs += "#PBS -a [start_time]" + '\n'
# in case of gorm, we need to make it work correctly. Now each job
# has a different scratch dir. If we set maxcpu to 12 they all have
# the same scratch dir. In that case there should be done something
# differently
# specify the number of nodes and cpu's per node required
if self.maxcpu > 1:
# Number of nodes and cpus per node (ppn)
lnodes = int(math.ceil(len(self.cases)/float(self.maxcpu)))
lnodes = 1
self.pbs += "#PBS -l nodes=%i:ppn=%i\n" % (lnodes, self.maxcpu)
else:
self.pbs += "#PBS -l nodes=1:ppn=1\n"
# Number of nodes and cpus per node (ppn)
self.pbs += "### Queue name" + '\n'
# queue names for Thyra are as follows:
# short walltime queue (shorter than an hour): '#PBS -q xpresq'
# or otherwise for longer jobs: '#PBS -q workq'
self.pbs += self.pbs_queue_command + '\n'
self.pbs += "### Create scratch directory and copy data to it \n"
# output the current directory
self.pbs += "cd $PBS_O_WORKDIR" + '\n'
self.pbs += 'echo "current working dir (pwd):"\n'
self.pbs += "pwd \n"
# The batch system on Gorm allows more than one job per node.
# Because of this the scratch directory name includes both the
# user name and the job ID, that is /scratch/$USER/$PBS_JOBID
# if not scratch, make the dir
if self.node_run_root != '/scratch':
self.pbs += 'mkdir -p %s/$USER\n' % self.node_run_root
self.pbs += 'mkdir -p %s/$USER/$PBS_JOBID\n' % self.node_run_root
# copy the zip files to the scratch dir on the node
self.pbs += "cp -R ./" + self.ModelZipFile + \
' %s/$USER/$PBS_JOBID\n' % (self.node_run_root)
self.pbs += '\n\n'
self.pbs += 'echo ""\n'
self.pbs += 'echo "Execute commands on scratch nodes"\n'
self.pbs += 'cd %s/$USER/$PBS_JOBID\n' % self.node_run_root
# # also copy all the HAWC2 exe's to the scratch dir
# self.pbs += "cp -R %s/* ./\n" % self.wine_dir
# # custom name hawc2 exe
# self.h2_new = tag_dict['[hawc2_exe]'] + '-' + jobid + '.exe'
# self.pbs += "mv %s.exe %s\n" % (tag_dict['[hawc2_exe]'], self.h2_new)
def ending(self, pbs_path):
"""
Last part of the pbs script, including command to write script to disc
COPY BACK: from node to
"""
self.pbs += "### wait for jobs to finish \n"
self.pbs += "wait\n"
self.pbs += 'echo ""\n'
self.pbs += 'echo "Copy back from scratch directory" \n'
for i in range(1,self.maxcpu+1,1):
# navigate to the cpu dir on the node
# The batch system on Gorm allows more than one job per node.
# Because of this the scratch directory name includes both the
# user name and the job ID, that is /scratch/$USER/$PBS_JOBID
# NB! This is different from Thyra!
self.pbs += "cd %s/$USER/$PBS_JOBID\n" % self.node_run_root
# create the log, res etc dirs in case they do not exist
self.pbs += "mkdir -p $PBS_O_WORKDIR/" + self.results_dir + "\n"
self.pbs += "mkdir -p $PBS_O_WORKDIR/" + self.logs_dir + "\n"
if self.animation_dir:
self.pbs += "mkdir -p $PBS_O_WORKDIR/" + self.animation_dir + "\n"
if self.copyback_turb and self.TurbDb:
self.pbs += "mkdir -p $PBS_O_WORKDIR/" + self.TurbDb + "\n"
elif self.copyback_turb:
self.pbs += "mkdir -p $PBS_O_WORKDIR/" + self.TurbDirName + "\n"
if self.copyback_turb and self.wakeDb:
self.pbs += "mkdir -p $PBS_O_WORKDIR/" + self.wakeDb + "\n"
elif self.WakeDirName:
self.pbs += "mkdir -p $PBS_O_WORKDIR/" + self.WakeDirName + "\n"
if self.copyback_turb and self.meandDb:
self.pbs += "mkdir -p $PBS_O_WORKDIR/" + self.meandDb + "\n"
elif self.MeanderDirName:
self.pbs += "mkdir -p $PBS_O_WORKDIR/" + self.MeanderDirName + "\n"
# and copy the results and log files frome the node to the
# thyra home dir
self.pbs += "cp -R " + self.results_dir + \
". $PBS_O_WORKDIR/" + self.results_dir + ".\n"
self.pbs += "cp -R " + self.logs_dir + \
". $PBS_O_WORKDIR/" + self.logs_dir + ".\n"
if self.animation_dir:
self.pbs += "cp -R " + self.animation_dir + \
". $PBS_O_WORKDIR/" + self.animation_dir + ".\n"
if self.eigenfreq_dir:
# just in case the eig dir has subdirs for the results, only
# select the base path and cp -r will take care of the rest
p1 = self.eigenfreq_dir.split('/')[0]
self.pbs += "cp -R %s/. $PBS_O_WORKDIR/%s/. \n" % (p1, p1)
# for eigen analysis with floater, modes are in root
eig_dir_sys = '%ssystem/' % self.eigenfreq_dir
self.pbs += 'mkdir -p $PBS_O_WORKDIR/%s \n' % eig_dir_sys
self.pbs += "cp -R mode* $PBS_O_WORKDIR/%s. \n" % eig_dir_sys
# only copy the turbulence files back if they do not exist
# for all *.bin files on the node
cmd = 'for i in `ls *.bin`; do if [ -e $PBS_O_WORKDIR/%s$i ]; '
cmd += 'then echo "$i exists no copyback"; else echo "$i copyback"; '
cmd += 'cp $i $PBS_O_WORKDIR/%s; fi; done\n'
# copy back turbulence file?
# browse to the node turb dir
self.pbs += '\necho ""\n'
self.pbs += 'echo "COPY BACK TURB IF APPLICABLE"\n'
if self.TurbDirName:
self.pbs += 'cd %s\n' % self.TurbDirName
if self.copyback_turb and self.TurbDb:
tmp = (self.TurbDb, self.TurbDb)
self.pbs += cmd % tmp
elif self.copyback_turb:
tmp = (self.TurbDirName, self.TurbDirName)
self.pbs += cmd % tmp
if self.TurbDirName:
# and back to normal model root
self.pbs += "cd %s/$USER/$PBS_JOBID\n" % self.node_run_root
if self.WakeDirName:
self.pbs += 'cd %s\n' % self.WakeDirName
if self.copyback_turb and self.wakeDb:
tmp = (self.wakeDb, self.wakeDb)
self.pbs += cmd % tmp
elif self.copyback_turb and self.WakeDirName:
tmp = (self.WakeDirName, self.WakeDirName)
self.pbs += cmd % tmp
if self.WakeDirName:
# and back to normal model root
self.pbs += "cd %s/$USER/$PBS_JOBID\n" % self.node_run_root
if self.MeanderDirName:
self.pbs += 'cd %s\n' % self.MeanderDirName
if self.copyback_turb and self.meandDb:
tmp = (self.meandDb, self.meandDb)
self.pbs += cmd % tmp
elif self.copyback_turb and self.MeanderDirName:
tmp = (self.MeanderDirName, self.MeanderDirName)
self.pbs += cmd % tmp
if self.MeanderDirName:
# and back to normal model root
self.pbs += "cd %s/$USER/$PBS_JOBID\n" % self.node_run_root
self.pbs += 'echo "END COPY BACK TURB"\n'
self.pbs += 'echo ""\n\n'
# copy back any other kind of file specified
if len(self.copyback_frename) == 0:
self.copyback_frename = self.copyback_files
for fname, fnew in zip(self.copyback_files, self.copyback_frename):
self.pbs += "cp -R %s $PBS_O_WORKDIR/%s \n" % (fname, fnew)
# check what is left
self.pbs += 'echo ""\n'
self.pbs += 'echo "following files are on the node (find .):"\n'
self.pbs += 'find .\n'
# # and delete it all (but that is not allowed)
# self.pbs += 'cd ..\n'
# self.pbs += 'ls -lah\n'
# self.pbs += 'echo $PBS_JOBID\n'
# self.pbs += 'rm -r $PBS_JOBID \n'
# Delete the batch file at the end. However, is this possible since
# the batch file is still open at this point????
# self.pbs += "rm "
# base walltime on the longest simulation in the batch
nr_time_steps = max(self.nr_time_steps)
# TODO: take into acccount the difference between time steps with
# and without output. This penelaty also depends on the number of
# channels outputted. So from 0 until t0 we have no penalty,
# from t0 until t0+duration we have the output penalty.
# always a predifined lead time to account for startup losses
tmax = int(nr_time_steps*self.secperiter*self.iterperstep + self.tlead)
if self.dyn_walltime:
dt_seconds = datetime.datetime.fromtimestamp(tmax)
self.walltime = dt_seconds.strftime('%H:%M:%S')
self.pbs = self.pbs.replace('[walltime]', self.walltime)
else:
self.pbs = self.pbs.replace('[walltime]', self.walltime)
# and reset the nr_time_steps list for the next pbs job file
self.nr_time_steps = []
self.t0 = []
self.duration = []
# TODO: add logfile checking support directly here. In that way each
# node will do the logfile checking and statistics calculations right
# after the simulation. Figure out a way how to merge the data from
# all the different cases afterwards
self.pbs += "exit\n"
if self.verbose:
print('writing pbs script to path: ' + pbs_path)
# and write the script to a file:
write_file(pbs_path, self.pbs, 'w')
# make the string empty again, for memory
self.pbs = ''
def check_results(self, cases):
"""
Cross-check if all simulations on the list have returned a simulation.
Combine with ErrorLogs to identify which errors occur where.
"""
cases_fail = {}
print('checking if all log and result files are present...', end='')
# check for each case if we have results and a log file
for cname, case in cases.items():
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
run_dir = case['[run_dir]']
res_dir = case['[res_dir]']
log_dir = case['[log_dir]']
cname_ = cname.replace('.htc', '')
f_log = os.path.join(run_dir, log_dir, cname_)
f_res = os.path.join(run_dir, res_dir, cname_)
if not os.path.exists(f_log + '.log'):
cases_fail[cname] = copy.copy(cases[cname])
continue
try:
size_sel = os.stat(f_res + '.sel').st_size
size_dat = os.stat(f_res + '.dat').st_size
except OSError:
size_sel = 0
size_dat = 0
if size_sel < 5 or size_dat < 5:
cases_fail[cname] = copy.copy(cases[cname])
print('done!')
# length will be zero if there are no failures
return cases_fail
# TODO: rewrite the error log analysis to something better. Take different
# approach: start from the case and see if the results are present. Than we
# also have the tags_dict available when log-checking a certain case
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
"""
Analyse all HAWC2 log files in any given directory
==================================================
Usage:
logs = ErrorLogs()
logs.MsgList : list with the to be checked messages. Add more if required
logs.ResultFile : name of the result file (default is ErrorLog.csv)
logs.PathToLogs : specify the directory where the logsfile reside,
the ResultFile will be saved in the same directory.
It is also possible to give the path of a specific
file, the logfile will not be saved in this case. Save
when all required messages are analysed with save()
logs.check() to analyse all the logfiles and create the ResultFile
logs.save() to save after single file analysis
logs.MsgListLog : [ [case, line nr, error1, line nr, error2, ....], [], ...]
holding the error messages, empty if no err msg found
will survive as long as the logs object exists. Keep in
mind that when processing many messages with many error types (as defined)
in MsgList might lead to an increase in memory usage.
logs.MsgListLog2 : dict(key=case, value=[found_error, exit_correct]
where found_error and exit_correct are booleans. Found error will just
indicate whether or not any error message has been found
All files in the speficied folder (PathToLogs) will be evaluated.
When Any item present in MsgList occurs, the line number of the first
occurance will be displayed in the ResultFile.
If more messages are required, add them to the MsgList
"""
# TODO: move to the HAWC2 plugin for cases
def __init__(self, silent=False, cases=None):
self.silent = silent
# specify folder which contains the log files
self.PathToLogs = ''
self.ResultFile = 'ErrorLog.csv'
self.cases = cases
# the total message list log:
self.MsgListLog = []
# a smaller version, just indication if there are errors:
self.MsgListLog2 = dict()
# specify which message to look for. The number track's the order.
# this makes it easier to view afterwards in spreadsheet:
# every error will have its own column
# error messages that appear during initialisation
self.err_init = {}
self.err_init[' *** ERROR *** Error in com'] = len(self.err_init)
self.err_init[' *** ERROR *** in command '] = len(self.err_init)
# *** WARNING *** A comma "," is written within the command line
self.err_init[' *** WARNING *** A comma ",'] = len(self.err_init)
# *** ERROR *** Not correct number of parameters
self.err_init[' *** ERROR *** Not correct '] = len(self.err_init)
# *** INFO *** End of file reached
self.err_init[' *** INFO *** End of file r'] = len(self.err_init)
# *** ERROR *** No line termination in command line
self.err_init[' *** ERROR *** No line term'] = len(self.err_init)
# *** ERROR *** MATRIX IS NOT DEFINITE
self.err_init[' *** ERROR *** MATRIX IS NO'] = len(self.err_init)
# *** ERROR *** There are unused relative
self.err_init[' *** ERROR *** There are un'] = len(self.err_init)
# *** ERROR *** Error finding body based
self.err_init[' *** ERROR *** Error findin'] = len(self.err_init)
# *** ERROR *** In body actions
self.err_init[' *** ERROR *** In body acti'] = len(self.err_init)
# *** ERROR *** Command unknown
self.err_init[' *** ERROR *** Command unkn'] = len(self.err_init)
# *** ERROR *** ERROR - More bodies than elements on main_body: tower
self.err_init[' *** ERROR *** ERROR - More'] = len(self.err_init)
# *** ERROR *** The program will stop
self.err_init[' *** ERROR *** The program '] = len(self.err_init)
# *** ERROR *** Unknown begin command in topologi.
self.err_init[' *** ERROR *** Unknown begi'] = len(self.err_init)
# *** ERROR *** Not all needed topologi main body commands present
self.err_init[' *** ERROR *** Not all need'] = len(self.err_init)
# *** ERROR *** opening timoschenko data file
self.err_init[' *** ERROR *** opening tim'] = len(self.err_init)
# *** ERROR *** Error opening AE data file
self.err_init[' *** ERROR *** Error openin'] = len(self.err_init)
# *** ERROR *** Requested blade _ae set number not found in _ae file
self.err_init[' *** ERROR *** Requested bl'] = len(self.err_init)
# Error opening PC data file
self.err_init[' Error opening PC data file'] = len(self.err_init)
# *** ERROR *** error reading mann turbulence
self.err_init[' *** ERROR *** error readin'] = len(self.err_init)
# *** INFO *** The DLL subroutine
self.err_init[' *** INFO *** The DLL subro'] = len(self.err_init)
# ** WARNING: FROM ESYS ELASTICBAR: No keyword
self.err_init[' ** WARNING: FROM ESYS ELAS'] = len(self.err_init)
# *** ERROR *** DLL ./control/killtrans.dll could not be loaded - error!
self.err_init[' *** ERROR *** DLL'] = len(self.err_init)
# *** ERROR *** The DLL subroutine
self.err_init[' *** ERROR *** The DLL subr'] = len(self.err_init)
# *** WARNING *** Shear center x location not in elastic center, set to zero
self.err_init[' *** WARNING *** Shear cent'] = len(self.err_init)
# Turbulence file ./xyz.bin does not exist
self.err_init[' Turbulence file '] = len(self.err_init)
self.err_init[' *** WARNING ***'] = len(self.err_init)
self.err_init[' *** ERROR ***'] = len(self.err_init)
self.err_init[' WARNING'] = len(self.err_init)
self.err_init[' ERROR'] = len(self.err_init)
# error messages that appear during simulation
self.err_sim = {}
# *** ERROR *** Wind speed requested inside
self.err_sim[' *** ERROR *** Wind speed r'] = len(self.err_sim)
# Maximum iterations exceeded at time step:
self.err_sim[' Maximum iterations exceede'] = len(self.err_sim)
# Solver seems not to converge:
self.err_sim[' Solver seems not to conver'] = len(self.err_sim)
# *** ERROR *** Out of x bounds:
self.err_sim[' *** ERROR *** Out of x bou'] = len(self.err_sim)
# *** ERROR *** Out of limits in user defined shear field - limit value used
self.err_sim[' *** ERROR *** Out of limit'] = len(self.err_sim)
# TODO: error message from a non existing channel output/input
# add more messages if required...
self.init_cols = len(self.err_init)
self.sim_cols = len(self.err_sim)
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
# TODO: save this not a csv text string but a df_dict, and save as excel
# and DataFrame!
def check(self, appendlog=False, save_iter=False):
# MsgListLog = []
# load all the files in the given path
FileList = []
for files in os.walk(self.PathToLogs):
FileList.append(files)
# if the instead of a directory, a file path is given
# the generated FileList will be empty!
try:
NrFiles = len(FileList[0][2])
# input was a single file:
except:
NrFiles = 1
# simulate one entry on FileList[0][2], give it the file name
# and save the directory on in self.PathToLogs
tmp = self.PathToLogs.split(os.path.sep)[-1]
# cut out the file name from the directory
self.PathToLogs = self.PathToLogs.replace(tmp, '')
FileList.append([ [],[],[tmp] ])
single_file = True
i=1
# walk trough the files present in the folder path
for fname in FileList[0][2]:
fname_lower = fname.lower()
# progress indicator
if NrFiles > 1:
if not self.silent:
print('progress: ' + str(i) + '/' + str(NrFiles))
# open the current log file
f_log = os.path.join(self.PathToLogs, str(fname_lower))
with open(f_log, 'r') as f:
lines = f.readlines()
# keep track of the messages allready found in this file
tempLog = []
tempLog.append(fname)
exit_correct, found_error = False, False
# create empty list item for the different messages and line
# number. Include one column for non identified messages
for j in range(self.init_cols + self.sim_cols + 1):
tempLog.append('')
tempLog.append('')
# if there is a cases object, see how many time steps we expect
if self.cases is not None:
case = self.cases[fname.replace('.log', '.htc')]
dt = float(case['[dt_sim]'])
time_steps = float(case['[time_stop]']) / dt
iterations = np.ndarray( (time_steps+1,3), dtype=np.float32 )
else:
iterations = np.ndarray( (len(lines),3), dtype=np.float32 )
dt = False
iterations[:,0:2] = -1
iterations[:,2] = 0
# keep track of the time_step number
time_step, init_block = -1, True
# check for messages in the current line
# for speed: delete from message watch list if message is found
for j, line in enumerate(lines):
# all id's of errors are 27 characters long
msg = line[:27]

David Verelst
committed
# remove the line terminator, this seems to take 2 characters
# on PY2, but only one in PY3
line = line.replace('\n', '')
# keep track of the number of iterations
if line[:12] == ' Global time':
time_step += 1
iterations[time_step,0] = float(line[14:40])

David Verelst
committed
# for PY2, new line is 2 characters, for PY3 it is one char
iterations[time_step,1] = int(line[-6:])
# time step is the first time stamp
if not dt:
dt = float(line[15:40])
# no need to look for messages if global time is mentioned
continue
elif line[:20] == ' Starting simulation':
init_block = False
elif init_block:
# if string is shorter, we just get a shorter string.
# checking presence in dict is faster compared to checking
# the length of the string
if msg in self.err_init:
col_nr = self.err_init[msg]
# 2nd item is the column position of the message

David Verelst
committed
tempLog[2*(col_nr+1)] = line
# line number of the message
tempLog[2*col_nr+1] += '%i, ' % j
found_error = True
# find errors that can occur during simulation
elif msg in self.err_sim:
col_nr = self.err_sim[msg] + self.init_cols
# 2nd item is the column position of the message

David Verelst
committed
tempLog[2*(col_nr+1)] = line
# in case stuff already goes wrong on the first time step
if time_step == -1:
time_step = 0
# line number of the message
tempLog[2*col_nr+1] += '%i, ' % time_step
found_error = True
iterations[time_step,2] = 1
# method of last resort, we have no idea what message
elif line[:10] == ' *** ERROR' or line[:10]==' ** WARNING':

David Verelst
committed
tempLog[-2] = line
# line number of the message
tempLog[-1] = j
found_error = True
# in case stuff already goes wrong on the first time step
if time_step == -1:
time_step = 0
iterations[time_step,2] = 1
# simulation and simulation output time
if self.cases is not None:
t_stop = float(case['[time_stop]'])
duration = float(case['[duration]'])
else:
t_stop = -1
duration = -1
# see if the last line holds the sim time
if line[:15] == ' Elapsed time :':
exit_correct = True

David Verelst
committed
elapsed_time = float(line[15:-1])
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
tempLog.append( elapsed_time )
# in some cases, Elapsed time is not given, and the last message
# might be: " Closing of external type2 DLL"
elif line[:20] == ' Closing of external':
exit_correct = True
elapsed_time = iterations[time_step,0]
tempLog.append( elapsed_time )
elif np.allclose(iterations[time_step,0], t_stop):
exit_correct = True
elapsed_time = iterations[time_step,0]
tempLog.append( elapsed_time )
else:
elapsed_time = -1
tempLog.append('')
# give the last recorded time step
tempLog.append('%1.11f' % iterations[time_step,0])
# simulation and simulation output time
tempLog.append('%1.01f' % t_stop)
tempLog.append('%1.04f' % (t_stop/elapsed_time))
tempLog.append('%1.01f' % duration)
# as last element, add the total number of iterations
itertotal = np.nansum(iterations[:,1])
tempLog.append('%i' % itertotal)
# the delta t used for the simulation
if dt:
tempLog.append('%1.7f' % dt)
else:
tempLog.append('failed to find dt')
# number of time steps
tempLog.append('%i' % len(iterations) )
# if the simulation didn't end correctly, the elapsed_time doesn't
# exist. Add the average and maximum nr of iterations per step
# or, if only the structural and eigen analysis is done, we have 0
try:
ratio = float(elapsed_time)/float(itertotal)
tempLog.append('%1.6f' % ratio)
except (UnboundLocalError, ZeroDivisionError, ValueError) as e:
tempLog.append('')
# when there are no time steps (structural analysis only)
try:
tempLog.append('%1.2f' % iterations[:,1].mean() )
tempLog.append('%1.2f' % iterations[:,1].max() )
except ValueError:
tempLog.append('')
tempLog.append('')
# save the iterations in the results folder
if save_iter:
fiter = fname.replace('.log', '.iter')
fmt = ['%12.06f', '%4i', '%4i']
if self.cases is not None:
fpath = os.path.join(case['[run_dir]'], case['[iter_dir]'])
# in case it has subdirectories
for tt in [3,2,1]:
tmp = os.path.sep.join(fpath.split(os.path.sep)[:-tt])
if not os.path.exists(tmp):
os.makedirs(tmp)
if not os.path.exists(fpath):
os.makedirs(fpath)
np.savetxt(fpath + fiter, iterations, fmt=fmt)
else:
np.savetxt(os.path.join(self.PathToLogs, fiter), iterations,
fmt=fmt)
# append the messages found in the current file to the overview log
self.MsgListLog.append(tempLog)
self.MsgListLog2[fname] = [found_error, exit_correct]
i += 1
# # if no messages are found for the current file, than say so:
# if len(MsgList2) == len(self.MsgList):
# tempLog[-1] = 'NO MESSAGES FOUND'
# if we have only one file, don't save the log file to disk. It is
# expected that if we analyse many different single files, this will
# cause a slower script
if single_file:
# now we make it available over the object to save and let it grow
# over many analysis
# self.MsgListLog = copy.copy(MsgListLog)
pass
else:
self.save(appendlog=appendlog)
def save(self, appendlog=False):
# write the results in a file, start with a header
contents = 'file name;' + 'lnr;msg;'*(self.init_cols)
contents += 'iter_nr;msg;'*(self.sim_cols)
contents += 'lnr;msg;'
# and add headers for elapsed time, nr of iterations, and sec/iteration
contents += 'Elapsted time;last time step;Simulation time;'
contents += 'real sim time;Sim output time;'
contents += 'total iterations;dt;nr time steps;'
contents += 'seconds/iteration;average iterations/time step;'
contents += 'maximum iterations/time step;\n'
for k in self.MsgListLog:
for n in k:
contents = contents + str(n) + ';'
# at the end of each line, new line symbol
contents = contents + '\n'
# write csv file to disk, append to facilitate more logfile analysis
fname = os.path.join(self.PathToLogs, str(self.ResultFile))
if not self.silent:
print('Error log analysis saved at:')
print(fname)
if appendlog:
mode = 'a'
else:
mode = 'w'
with open(fname, mode) as f:
f.write(contents)
"""
Second generation ModelData function. The HawcPy version is crappy, buggy
and not mutch of use in the optimisation context.
"""
class st_headers(object):
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
"""
Indices to the respective parameters in the HAWC2 st data file
"""
r = 0
m = 1
x_cg = 2
y_cg = 3
ri_x = 4
ri_y = 5
x_sh = 6
y_sh = 7
E = 8
G = 9
Ixx = 10
Iyy = 11
I_p = 12
k_x = 13
k_y = 14
A = 15
pitch = 16
x_e = 17
y_e = 18
def __init__(self, verbose=False, silent=False):
self.verbose = verbose
self.silent = silent
# define the column width for printing
self.col_width = 13
# formatting and precision
self.float_hi = 9999.9999
self.float_lo = 0.01
self.prec_float = ' 9.05f'
self.prec_exp = ' 8.04e'
self.prec_loss = 0.01
#0 1 2 3 4 5 6 7 8 9 10 11
#r m x_cg y_cg ri_x ri_y x_sh y_sh E G I_x I_y
#12 13 14 15 16 17 18
#I_p/K k_x k_y A pitch x_e y_e
# 19 cols
self.st_column_header_list = ['r', 'm', 'x_cg', 'y_cg', 'ri_x', \
'ri_y', 'x_sh', 'y_sh', 'E', 'G', 'I_x', 'I_y', 'J', 'k_x', \
'k_y', 'A', 'pitch', 'x_e', 'y_e']
self.st_column_header_list_latex = ['r','m','x_{cg}','y_{cg}','ri_x',\
'ri_y', 'x_{sh}','y_{sh}','E', 'G', 'I_x', 'I_y', 'J', 'k_x', \
'k_y', 'A', 'pitch', 'x_e', 'y_e']
# make the column header
self.column_header_line = 19 * self.col_width * '=' + '\n'
for k in self.st_column_header_list:
self.column_header_line += k.rjust(self.col_width)
self.column_header_line += '\n' + (19 * self.col_width * '=') + '\n'
def fromline(self, line, separator=' '):
# TODO: move this to the global function space (dav-general-module)
"""
split a line, but ignore any blank spaces and return a list with only
the values, not empty places
"""
# remove all tabs, new lines, etc? (\t, \r, \n)
line = line.replace('\t',' ').replace('\n','').replace('\r','')
# trailing and ending spaces
line = line.strip()
line = line.split(separator)
values = []
for k in range(len(line)):
if len(line[k]) > 0: #and k == item_nr:
values.append(line[k])
# break
return values
def load_st(self, file_path, file_name):
"""
Now a better format: st_dict has following key/value pairs
'nset' : total number of sets in the file (int).
This should be autocalculated every time when writing
a new file.
'007-000-0' : set number line in one peace
'007-001-a' : comments for set-subset nr 07-01 (str)
'007-001-b' : subset nr and number of data points, should be
autocalculate every time you generate a file
'007-001-d' : data for set-subset nr 07-01 (ndarray(n,19))
NOW WE ONLY CONSIDER SUBSET COMMENTS, SET COMMENTS, HOW ARE THEY
TREADED NOW??
st_dict is for easy remaking the same file. We need a different format
for easy reading the comments as well. For that we have the st_comments
"""
# TODO: store this in an HDF5 format! This is perfect for that.
# read all the lines of the file into memory
self.st_path, self.st_file = file_path, file_name
FILE = open(os.path.join(file_path, file_name))
lines = FILE.readlines()
FILE.close()
subset = False
st_dict = dict()
st_comments = dict()
for i, line in enumerate(lines):
# convert line to list space seperated list
line_list = self.fromline(line)
# see if the first character is marking something
if i == 0:
# it is possible that the NSET line is not defined
parts = line.split(' ')
try:
parts.remove(' ') # throws error when can't find
except ValueError:
pass
# we don't care what is on the nset line, just capture if
# there are any comments lines
set_nr = 0
subset_nr = 0
st_dict['000-000-0'] = line
# marks the start of a set
if line[0] == '#':