khanhld commited on
Commit
913c535
·
1 Parent(s): a4e6fdc

modify config file to match model implementation

Browse files
Files changed (1) hide show
  1. config.yaml +7 -11
config.yaml CHANGED
@@ -12,22 +12,18 @@ encoder_conf:
12
  dropout_rate: 0.1
13
  positional_dropout_rate: 0.1
14
  attention_dropout_rate: 0.1
15
- input_layer: 'depthwise' # encoder input type, you can chose conv2d, conv2d6 and conv2d8
16
  normalize_before: true
17
  cnn_module_kernel: 15
18
  use_cnn_module: true
19
  activation_type: 'swish'
20
- pos_enc_layer_type: 'stream_rel_pos'
21
- selfattention_layer_type: 'stream_rel_selfattn'
22
  causal: false
23
  use_dynamic_chunk: false
24
- use_limited_chunk: false
25
- use_context_hint_chunk: false
26
- right_context_probs: [0.75]
27
- right_context_sizes: [128, 128, 128]
28
- limited_decoding_chunk_sizes: [64, 128, 256]
29
- limited_left_chunk_sizes: [128, 256, 128]
30
  cnn_module_norm: 'layer_norm' # using nn.LayerNorm makes model converge faster
31
  use_dynamic_left_chunk: false
32
- use_dynamic_conv: true
33
- freeze_subsampling_layer: false
 
12
  dropout_rate: 0.1
13
  positional_dropout_rate: 0.1
14
  attention_dropout_rate: 0.1
15
+ input_layer: 'dw_striding' # encoder input type, you can chose conv2d, conv2d6 and conv2d8
16
  normalize_before: true
17
  cnn_module_kernel: 15
18
  use_cnn_module: true
19
  activation_type: 'swish'
20
+ pos_enc_layer_type: 'chunk_rel_pos'
21
+ selfattention_layer_type: 'chunk_rel_seflattn'
22
  causal: false
23
  use_dynamic_chunk: false
24
+ dynamic_chunk_sizes: [64, 128, 256]
25
+ dynamic_right_context_sizes: [128, 128, 128]
26
+ dynamic_left_context_sizes: [128, 256, 128]
 
 
 
27
  cnn_module_norm: 'layer_norm' # using nn.LayerNorm makes model converge faster
28
  use_dynamic_left_chunk: false
29
+ dynamic_conv: true