-
Notifications
You must be signed in to change notification settings - Fork 0
/
generate_training_data.py
69 lines (52 loc) · 3.31 KB
/
generate_training_data.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
"""
Author: Andreas Bott
year: 2024
mail: [email protected] or [email protected]
This script generates random samples of power plant and demand inputs and corresponding grid states. The samples are
stored in csv files which are numbered consecutively.
csv file format:
columns: [Q1, Q2, Q3, Q4, Q0], [T1, T2, T3, T4, T0], [[grid state vector == (T, mf, p, T_end)]]
In order to generate samples faster, an importance-sampling-approach is used which will be published in the paper:
"Efficient Training of Learning-Based Thermal Power Flow for 4th Generation District Heating Grids"
by Andreas Bott, Mario Beykirch, and Florian Steinke
currently under review at the journal "Energy", preprint available at: https://arxiv.org/abs/2403.11877
The script is designed to be run in parallel, where each instance computes a fraction of the samples.
:param n_samples: number of samples generated by all instances
:param n_instances: number of instances running in parallel
:param instant: integer argument to distinguish the different instances expected to be passed as sys.argv[1]
"""
import sys
import lib_dnn_soc.DNN_lib as NN_lib
import lib_dnn_soc.utility as util
import time
def __main__(results_file, grid_settings, n_samples, save_file, verbose=True):
Results = util.ResFile(name=results_file, path='./results/', print=verbose)
Results.log(f'Run DNN training script for {grid_identifier}, current time: {time.ctime()}')
SE, d_prior_dist, T_prior_dist, cycles, grid = util.setup_training(grid_identifier, grid_settings)
# setup importance sampler, i.e. compute proxy distribution
IS = NN_lib.ImportanceSampler(d_prior_dist, T_prior_dist, SE, cycles, grid, results_file=save_file)
IS.setup()
# generate training samples
IS.generate_training_samples(n_samples, include_slack=True, file_spec=save_file, verbose=verbose)
Results.log(f'Finished generating {n_samples} training data for {grid_identifier}, current time: {time.ctime()}')
if __name__ == '__main__':
grid_identifier = 'ladder5'
results_file = 'results.out'
from Settings import grid_settings, DNN_settings
n_instances = 1 # number of simultaneous runs, each computing a fraction of the samples
n_samples = DNN_settings['n_training'] + DNN_settings['n_val'] + DNN_settings['n_test']
n_per_file = 2000
# in order to avoid overwriting existing files, n_samples / n_per_file * n_runs should be an integer
assert n_samples % (n_per_file * n_instances) == 0, (f'n_samples / (n_per_file * n_runs) should be an integer; '
f'n_samples: {n_samples}, n_per_file: {n_per_file}, '
f'n_runs: {n_instances}')
# number of samples computed by this instance:
n_samples = int(n_samples / n_instances)
instant = 0 if len(sys.argv) < 2 else int(sys.argv[1])
# choose save file dynamically to fill up files subsequently. Starting file depends on the instance number.
def save_file(idx):
zerofile = instant * (n_samples // n_per_file)
folder = f'DNN_model/data/{grid_identifier}_{grid_settings["d_prior_type"]}/'
file = f'{grid_identifier}_{idx // n_per_file + zerofile}.csv'
return folder + file
__main__(results_file, grid_settings, n_samples, save_file, verbose=True)