-
Notifications
You must be signed in to change notification settings - Fork 245
/
small.yml.j2
127 lines (121 loc) · 3.46 KB
/
small.yml.j2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
{% import "examples/configs/librispeech/sentencepiece/sp.yml.j2" as decoder_config with context %}
{{decoder_config}}
model_config:
class_name: tensorflow_asr.models.transducer.conformer>Conformer
config:
name: conformer
speech_config:
sample_rate: 16000
frame_ms: 25
stride_ms: 10
num_feature_bins: 80
feature_type: log_mel_spectrogram
augmentation_config:
feature_augment:
time_masking:
prob: 1.0
num_masks: 10
mask_factor: -1
p_upperbound: 0.05
mask_value: 0
freq_masking:
prob: 1.0
num_masks: 1
mask_factor: 27
mask_value: 0
encoder_subsampling:
class_name: tensorflow_asr.models.layers.subsampling>Conv2dSubsampling
config:
filters: [144, 144]
kernels: [3, 3]
strides: [2, 2]
paddings: ["causal", "causal"]
norms: ["batch", "batch"]
activations: ["swish", "swish"]
encoder_ffm_residual_factor: 0.5
encoder_mhsam_residual_factor: 1.0
encoder_convm_residual_factor: 1.0
encoder_dmodel: 144
encoder_num_blocks: 16
encoder_head_size: 36 # == dmodel // num_heads
encoder_num_heads: 4
encoder_mha_type: relmha
encoder_interleave_relpe: True
encoder_use_attention_causal_mask: False
encoder_use_attention_auto_mask: True
encoder_mhsam_use_attention_bias: True
encoder_kernel_size: 32
encoder_dropout: 0.1
encoder_padding: causal
encoder_memory_length: null
prediction_label_encode_mode: embedding
prediction_embed_dim: 320
prediction_num_rnns: 1
prediction_rnn_units: 320
prediction_rnn_type: lstm
prediction_rnn_implementation: 2
prediction_rnn_unroll: False
prediction_layer_norm: True
prediction_projection_units: 0
joint_dim: 320
prejoint_encoder_linear: True
prejoint_prediction_linear: True
postjoint_linear: False
joint_activation: tanh
joint_mode: add
blank: 0
vocab_size: {{decoder_config.vocabsize}}
kernel_regularizer:
class_name: l2
config:
l2: 1e-6
bias_regularizer:
class_name: l2
config:
l2: 1e-6
learning_config:
optimizer_config:
class_name: Custom>Adam
config:
learning_rate:
class_name: tensorflow_asr.optimizers.schedules>TransformerSchedule
config:
dmodel: 144
warmup_steps: 10000
max_lr: 0.05/(144**0.5)
min_lr: 1e-6
scale: 2.0
beta_1: 0.9
beta_2: 0.98
epsilon: 1e-9
weight_decay: 1e-6
gwn_config:
predict_net_step: 20000
predict_net_stddev: 0.075
gradn_config: null
batch_size: 2
ga_steps: 16
num_epochs: 300
callbacks:
- class_name: tensorflow_asr.callbacks>TerminateOnNaN
config: {}
- class_name: tensorflow_asr.callbacks>ModelCheckpoint
config:
filepath: {{modeldir}}/checkpoints/{epoch:02d}.h5
save_best_only: False
save_weights_only: True
save_freq: epoch
- class_name: tensorflow_asr.callbacks>BackupAndRestore
config:
backup_dir: {{modeldir}}/states
save_freq: epoch
delete_checkpoint: False
- class_name: tensorflow_asr.callbacks>TensorBoard
config:
log_dir: {{modeldir}}/tensorboard
histogram_freq: 0
write_graph: False
write_images: False
write_steps_per_second: False
update_freq: batch
profile_batch: 0