Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Download
436 views
Kernel: Python 3

#Exemplar Example

#####Exemplar vectors: Distributed representations Lexical identity: N units = number of distinct lexical items (see parameters nHF_words, nLF_words below)
Place of articulation: 2 units (labial vs. velar)
Voicing: 2 units (voiced vs. voiceless)
VOT: 1 unit

#####Probe vectors Same as exemplars, except no specification for VOT

#####Constraints a ≡\equiv vector of activation of exemplars
Faithfulness: +1 for each matching element in distributed representation, weighted by activation of exemplar; sum over all exemplars
Quantization harmony:HQ0+HQ1\hspace{.1in}H^0_{\cal Q} + H^1_{\cal Q}
HQ0≡−sum(a2(1−a)2\hspace{.25in} H^0_{\cal Q} \equiv - sum(a^2 (1 - a)^2
HQ1≡−(sum(a2)−1)2\hspace{.25in}H^1_{\cal Q}\equiv -(sum (a^2)-1)^2
Unit harmony: H1≡−12β(a−12)2+β8\hspace{.1in} H_1 \equiv -\frac{1}{2} \beta (a - \frac{1}{2})^2 + \frac{\beta}{8}

#####Output Optimal blend of exemplars
Predicted VOT: Weighted average of exemplar VOTs

# Load in python, matlab libraries %matplotlib inline import numpy as np import matplotlib from matplotlib import pyplot as plt
#Populate exemplar lexicon nHF_words = 2 # number of high frequency words at each POA nLF_words = 2 # number of low frequency words at each POA nHF_exemplars = 5 # number of exemplars per HF word nLF_exemplars = 1 # number of exemplars per LF word mean_labial_VOT = 65 # phonetic implementation of VOT for labials mean_velar_VOT = 80 # phonetic implementation of VOT for velars VOT_sd = 5 total_Words = 2*(nHF_words+nLF_words)+1 # 1 novel labial word total_Exemplars = 2*nHF_words*nHF_exemplars+2*nLF_words*nLF_exemplars + 1 #1 novel labial exemplar exemplar_dimensions = total_Words+5 lexical_idents = np.identity(total_Words) lexical_list = [] # list of each unique word type (no VOT stored) exemplar_list = [] # list of each exemplar (with VOT) # HF labial exemplars for i in range(nHF_words): lexical_list.append(np.concatenate((lexical_idents[i,0:total_Words],[1,0,0,1]))) for j in range(nHF_exemplars): exemplar_list.append(np.concatenate((lexical_idents[i,0:total_Words],[1,0,0,1,np.random.normal(mean_labial_VOT,VOT_sd)]))) # HF velar examplars for i in range(nHF_words): lexical_list.append(np.concatenate((lexical_idents[i+nHF_words,0:total_Words],[0,1,0,1]))) for j in range(nHF_exemplars): exemplar_list.append(np.concatenate((lexical_idents[i+nHF_words,0:total_Words],[0,1,0,1,np.random.normal(mean_velar_VOT,VOT_sd)]))) # LF labial exemplars for i in range(nLF_words): lexical_list.append(np.concatenate((lexical_idents[i+2*nHF_words,0:total_Words],[1,0,0,1]))) for j in range(nLF_exemplars): exemplar_list.append(np.concatenate((lexical_idents[i+2*nHF_words,0:total_Words],[1,0,0,1,np.random.normal(mean_labial_VOT,VOT_sd)]))) # LF velar examplars for i in range(nLF_words): lexical_list.append(np.concatenate((lexical_idents[i+nLF_words+2*nHF_words,0:total_Words],[0,1,0,1]))) for j in range(nLF_exemplars): exemplar_list.append(np.concatenate((lexical_idents[i+nLF_words+2*nHF_words,0:total_Words],[0,1,0,1,np.random.normal(mean_velar_VOT,VOT_sd)]))) #Novel labial lexical_list.append(np.concatenate((lexical_idents[total_Words-1],[1,0,0,1]))) exemplar_list.append(np.concatenate((lexical_idents[total_Words-1],[1,0,0,1,np.random.normal(mean_labial_VOT,VOT_sd)]))) # convert lists to arrays lexical_array = np.asarray(lexical_list) exemplars_array = np.asarray(exemplar_list) # for comparison of exemplars to probe, omit VOT dimension exemplars_probe_dimensions = np.delete(exemplars_array,-1,1) print(exemplars_array)
[[ 1. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 1. 62.77551111] [ 1. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 1. 72.84136104] [ 1. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 1. 75.21589845] [ 1. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 1. 69.7088384 ] [ 1. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 1. 71.31556986] [ 0. 1. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 1. 61.30914024] [ 0. 1. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 1. 68.24221288] [ 0. 1. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 1. 69.6642167 ] [ 0. 1. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 1. 59.23671182] [ 0. 1. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 1. 68.12493114] [ 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 1. 0. 1. 79.0174813 ] [ 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 1. 0. 1. 74.28347632] [ 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 1. 0. 1. 71.85512138] [ 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 1. 0. 1. 79.75213782] [ 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 1. 0. 1. 82.37959887] [ 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 1. 0. 1. 75.86342071] [ 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 1. 0. 1. 71.26733935] [ 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 1. 0. 1. 80.0526634 ] [ 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 1. 0. 1. 86.87251432] [ 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 1. 0. 1. 84.02777846] [ 0. 0. 0. 0. 1. 0. 0. 0. 0. 1. 0. 0. 1. 60.45854098] [ 0. 0. 0. 0. 0. 1. 0. 0. 0. 1. 0. 0. 1. 56.86387184] [ 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 1. 0. 1. 82.37533183] [ 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 1. 0. 1. 78.92324344] [ 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 0. 0. 1. 68.42341449]]
# KEYWORD ARGUMENTS: # probe: vector of input (Lexical identity, place of articulation, voicing) # a: current state of network (in exemplar space) def h_resonance_0(probe, a): activation_matrix = np.tile(a,(exemplar_dimensions-1,1)).T # matrix of activations of each exemplar, with number of columns = size of each exemplar exemplars_weighted = np.multiply(exemplars_probe_dimensions,activation_matrix) # multiplication of each element of each exemplar by exemplar activation resonance_with_exemplars = np.dot(exemplars_weighted,probe) # resonance = dot product of probe with each exemplar return np.sum(resonance_with_exemplars) # sum over dot products is resonance def h_1(a): b = np.array(a) beta = 1 return -np.sum(0.5 * beta * (b - 0.5)**2) + (beta*2) / 8 def h_Q0(a): b = np.array(a) return -np.sum(b**2 * (1-b)**2) def h_Q1(a): b = np.array(a) return -((np.sum(b**2)-1)**2) def h_exemplar(probe,a, q=1): # KEYWORD ARGUMENTS: # probe: vector of input (Lexical identity, place of articulation, voicing) # a: current state of network (in exemplar space) # q: relative strength of Q H_0 = h_resonance_0(probe, a) H_1 = h_1(a) H_Q0 = h_Q0(a) H_Q1 = h_Q1(a) return H_0+H_1+q*(H_Q1+H_Q0) def predict_VOT(a,probe): activation_matrix = np.tile(a,(exemplar_dimensions,1)).T # matrix of activations of each exemplar, with number of columns = size of each exemplar exemplars_weighted = np.multiply(exemplars_array,activation_matrix) # multiplication of each element of each exemplar by exemplar activation exemplar_vot = np.sum(exemplars_weighted[:,exemplar_dimensions-1])/np.sum(a) #exemplar VOT is weighted mean of VOTs if (probe[total_Words] == 1): #if labial SR_vot = mean_labial_VOT else: SR_vot = mean_velar_VOT return ((exemplar_vot+SR_vot)/2) # output of hybrid system combines implementation of SR and exemplar VOT
import scipy.optimize as optim
# run a single example probe = lexical_array[0] # first word init_guess = np.zeros(total_Exemplars) q_0 = 5.0 result_min = optim.minimize(lambda x: -h_exemplar(probe,x,q_0),init_guess) print('Activation of exemplars:\n',result_min.x) print('Predicted VOT:',predict_VOT(result_min.x,probe)) print('\n\nActivation of exemplars scaled\n',result_min.x/np.amax(result_min.x)) resonance = np.dot(exemplars_probe_dimensions,probe) # resonance = dot product of probe with each exemplar print('Resonance (=similarity) scaled\n',resonance/np.amax(resonance))
Activation of exemplars: [ 0.41338741 0.41338741 0.4133871 0.41338715 0.41338715 0.20645068 0.20645084 0.20645068 0.20645085 0.20645145 0.10291774 0.10291774 0.10291694 0.10291677 0.10291774 0.10291774 0.10291729 0.10291677 0.10291797 0.10291774 0.20645072 0.20645035 0.10291775 0.10291718 0.20645153] Predicted VOT: 67.6920858634 Activation of exemplars scaled [ 1. 1. 0.99999925 0.99999935 0.99999935 0.4994121 0.4994125 0.4994121 0.49941252 0.49941398 0.24896196 0.24896196 0.24896003 0.2489596 0.24896196 0.24896196 0.24896086 0.2489596 0.24896252 0.24896196 0.49941219 0.49941132 0.24896198 0.2489606 0.49941415] Resonance (=similarity) scaled [ 1. 1. 1. 1. 1. 0.66666667 0.66666667 0.66666667 0.66666667 0.66666667 0.33333333 0.33333333 0.33333333 0.33333333 0.33333333 0.33333333 0.33333333 0.33333333 0.33333333 0.33333333 0.66666667 0.66666667 0.33333333 0.33333333 0.66666667]
q_0 = 5.0 init_guess = np.zeros(total_Exemplars) # get predicted VOTs for each set hf_labial_vots=[] for i in range(nHF_words): probe = lexical_array[i] # set probe result_min = optim.minimize(lambda x: -h_exemplar(probe,x,q_0),init_guess) # optimize hf_labial_vots.append(predict_VOT(result_min.x,probe)) pretest_hf_labial = np.mean(hf_labial_vots) print('HF labial mean VOT:',pretest_hf_labial) # print predicted VOT lf_labial_vots=[] for i in range(nLF_words): probe = lexical_array[i+2*nHF_words] # set probe result_min = optim.minimize(lambda x: -h_exemplar(probe,x,q_0),init_guess) # optimize lf_labial_vots.append(predict_VOT(result_min.x,probe)) pretest_lf_labial = np.mean(lf_labial_vots) print('LF labial VOT:',pretest_lf_labial) # print predicted VOT probe = lexical_array[total_Words-1] # novel labial result_min = optim.minimize(lambda x: -h_exemplar(probe,x,q_0),init_guess) # optimize pretest_novel_labial = predict_VOT(result_min.x,probe) print('Novel labial VOT:',pretest_novel_labial) # print predicted VOT hf_velar_vots=[] for i in range(nHF_words): probe = lexical_array[i+nHF_words] # set probe result_min = optim.minimize(lambda x: -h_exemplar(probe,x,q_0),init_guess) # optimize hf_velar_vots.append(predict_VOT(result_min.x,probe)) pretest_hf_velar = np.mean(hf_velar_vots) print('\nHF velar VOT:',pretest_hf_velar) # print predicted VOT lf_velar_vots=[] for i in range(nLF_words): probe = lexical_array[i+nLF_words+2*nHF_words] # set probe result_min = optim.minimize(lambda x: -h_exemplar(probe,x,q_0),init_guess) # optimize lf_velar_vots.append(predict_VOT(result_min.x,probe)) pretest_lf_velar = np.mean(lf_velar_vots) print('LF velar VOT:',pretest_lf_velar) # print predicted VOT
HF labial mean VOT: 67.4280655322 LF labial VOT: 67.1264705831 Novel labial VOT: 67.5333359583 HF velar VOT: 77.7034552416 LF velar VOT: 77.6256811169
additional_exemplars = nHF_exemplars # number of additional exemplars for each labial word vot_increase = 50 # amount to increase VOT # update phonetic encoding mean_labial_VOT += vot_increase mean_velar_VOT += vot_increase total_Exemplars += additional_exemplars*(nHF_words+nLF_words) # add to HF labial exemplars for i in range(nHF_words): for j in range(additional_exemplars): exemplar_list.append(np.concatenate((lexical_idents[i,0:total_Words],[1,0,0,1,np.random.normal(mean_labial_VOT+vot_increase,20)]))) # add to LF labial exemplars for i in range(nLF_words): for j in range(additional_exemplars): exemplar_list.append(np.concatenate((lexical_idents[i+2*nHF_words,0:total_Words],[1,0,0,1,np.random.normal(mean_labial_VOT+vot_increase,20)]))) exemplars_array = np.asarray(exemplar_list) # for comparison of exemplars to probe, omit VOT dimension exemplars_probe_dimensions = np.delete(exemplars_array,-1,1)
init_guess = np.zeros(total_Exemplars) # get predicted VOTs for each set after adding exemplars # compare to baseline VOTs hf_labial_vots=[] for i in range(nHF_words): probe = lexical_array[i] # set probe result_min = optim.minimize(lambda x: -h_exemplar(probe,x,q_0),init_guess) # optimize hf_labial_vots.append(predict_VOT(result_min.x,probe)) posttest_hf_labial = np.mean(hf_labial_vots) print('HF labial mean VOT:',posttest_hf_labial) # print predicted VOT print ('Change:',posttest_hf_labial-pretest_hf_labial) lf_labial_vots=[] for i in range(nLF_words): probe = lexical_array[i+2*nHF_words] # set probe result_min = optim.minimize(lambda x: -h_exemplar(probe,x,q_0),init_guess) # optimize lf_labial_vots.append(predict_VOT(result_min.x,probe)) posttest_lf_labial = np.mean(lf_labial_vots) print('LF labial VOT:',posttest_lf_labial) # print predicted VOT print ('Change:',posttest_lf_labial-pretest_lf_labial) probe = lexical_array[total_Words-1] # novel labial result_min = optim.minimize(lambda x: -h_exemplar(probe,x,q_0),init_guess) # optimize posttest_novel_labial = predict_VOT(result_min.x,probe) print('Novel labial VOT:',posttest_novel_labial) # print predicted VOT print ('Change:',posttest_novel_labial-pretest_novel_labial) hf_velar_vots=[] for i in range(nHF_words): probe = lexical_array[i+nHF_words] # set probe result_min = optim.minimize(lambda x: -h_exemplar(probe,x,q_0),init_guess) # optimize hf_velar_vots.append(predict_VOT(result_min.x,probe)) posttest_hf_velar = np.mean(hf_velar_vots) print('\n\nHF velar VOT:',posttest_hf_velar) # print predicted VOT print ('Change:',posttest_hf_velar-pretest_hf_velar) lf_velar_vots=[] for i in range(nLF_words): probe = lexical_array[i+nLF_words+2*nHF_words] # set probe result_min = optim.minimize(lambda x: -h_exemplar(probe,x,q_0),init_guess) # optimize lf_velar_vots.append(predict_VOT(result_min.x,probe)) posttest_lf_velar = np.mean(lf_velar_vots) print('LF velar VOT:',posttest_lf_velar) # print predicted VOT print ('Change:',posttest_lf_velar-pretest_lf_velar)
HF labial mean VOT: 116.476592049 Change: 49.0485265166 LF labial VOT: 117.758528675 Change: 50.6320580915 Novel labial VOT: 116.129849704 Change: 48.5965137456 HF velar VOT: 116.14059377 Change: 38.4371385289 LF velar VOT: 117.097122993 Change: 39.471441876