forked from Atul-Anand-Jha/Speaker-Identification-Python
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodeltraining.py
52 lines (43 loc) · 1.55 KB
/
modeltraining.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import cPickle
import numpy as np
from scipy.io.wavfile import read
from sklearn.mixture import GMM
from featureextraction import extract_features
#from speakerfeatures import extract_features
import warnings
warnings.filterwarnings("ignore")
#path to training data
# source = "development_set/"
source = "trainingData/"
#path where training speakers will be saved
# dest = "speaker_models/"
# train_file = "development_set_enroll.txt"
dest = "Speakers_models/"
train_file = "trainingDataPath.txt"
file_paths = open(train_file,'r')
count = 1
# Extracting features for each speaker (5 files per speakers)
features = np.asarray(())
for path in file_paths:
path = path.strip()
print path
# read the audio
sr,audio = read(source + path)
# extract 40 dimensional MFCC & delta MFCC features
vector = extract_features(audio,sr)
if features.size == 0:
features = vector
else:
features = np.vstack((features, vector))
# when features of 5 files of speaker are concatenated, then do model training
# -> if count == 5: --> edited below
if count == 15:
gmm = GMM(n_components = 16, n_iter = 200, covariance_type='diag',n_init = 3)
gmm.fit(features)
# dumping the trained gaussian model
picklefile = path.split("-")[0]+".gmm"
cPickle.dump(gmm,open(dest + picklefile,'w'))
print '+ modeling completed for speaker:',picklefile," with data point = ",features.shape
features = np.asarray(())
count = 0
count = count + 1