Skip to content
Snippets Groups Projects
Commit 4e2bffb6 authored by Gros Gabriel's avatar Gros Gabriel
Browse files

recent commit

parent 2dbb0e9a
No related branches found
No related tags found
No related merge requests found
......@@ -579,3 +579,155 @@ def show_loss_path(tau_est,tau_star,tau_min,tau_max,nb_values,m,n,num_steps =100
def show_conv_speed(tau_est,tau_star,m,n,num_steps =1000,eta=0.1):
"""
We take r=2, and let a1 = a2 = 1/2.
We want to look at the landscape of the loss function L(teta) in this configuration.
We Here do uniform sampling with N=2n+1 centered frequencies.
Then, we do a gradient descent from theta_est to (we hope) theta_star.
"""
assert len(tau_star) == 2 , "Issue : r !=2 "
freqs=sample_uniform_frequencies(n)
means = tau_star
weights = [1/2,1/2]
r = len(means)
N=2*n+1
#Useless here bcs we assume there is no noise
"""
# Generate batch data and compute its FCE
data = generate_data(m, means, weights)
freqs = sample_uniform_frequencies(n) # shape (N_freq, 1)
baseline_freqs = sample_uniform_frequencies(n)
"""
# Compute the empirical characteristic function once for the batch
#Y = FCE(data, freqs) # shape (N_freq, 1) #Noise coming from data generation
# Initial ESPRIT estimation: theta_est = [a_est (r,), tau_est (r,)]
###theta_est = ESPRIT_(Y, r, fourier, baseline_freqs, freqs)
#theta_est = np.array([1/2,1/2,4.9,5])
#theta_est [0], theta_est[1] = 1/2,1/2 #ICI AUCUNE ESTIMATION SUR LES WEIGHTS
a_est = np.array([1/2,1/2])
a_star = np.array([1/2,1/2])
tau_est = np.array(tau_est)
theta_est = np.concatenate((a_est,tau_est))
theta_star = np.concatenate((a_star,np.array(tau_star)))
a_est = a_est.reshape(-1, 1)
a_star = a_star.reshape(-1,1)
vitesse_conv = [permute_and_compute_norm(theta_est,theta_star,r)[0]]
# Precompute the "G" matrix: diagonal with entries given by kernel's Fourier transform at freqs.
G_diag = fourier(freqs).flatten() # shape (N_freq,)
G_mat = np.diag(G_diag) # shape (N_freq, N_freq)
# Dictionary function Phi: for a given tau vector (r,), returns a matrix (N_freq x r)
def Phi(tau):
return np.exp(-2j * np.pi * np.outer(freqs.flatten(), tau))
# Model function: Y_model = G * Phi(tau) * a.
def Y_model(a, tau):
return G_mat @ (Phi(tau) @ a)
Y = Y_model(a_star,tau_star) # No noise assumed, perfect data m -> +inf
# Compute K''(0): for K(t)=sum_f |ĝ(f)|^2 exp(2i*pi*f*t), we have
# K''(0)= - (2*pi)^2 * sum_f |ĝ(f)|^2 * f^2.
f_vals = freqs.flatten()
Kpp0 = - (2 * np.pi)**2 * np.sum(np.abs(fourier(freqs).flatten())**2 * (f_vals**2))
# Run batch gradient descent updates on the fixed FCE Y
for step in range(num_steps):
# Compute the current model prediction
Y_estimated = Y_model(a_est, tau_est)
# Residual: difference between model prediction and observed FCE
r_vec = Y_estimated - Y # shape (N_freq, 1)
# Gradient with respect to amplitudes: grad_a = (G*Phi(tau))^H * r
grad_a = (G_mat @ Phi(tau_est)).conj().T @ r_vec # shape (r, 1)
# Gradient with respect to tau:
diag_elements = -2j * np.pi * (np.arange(N) - n) / N
Lambda = np.diag(diag_elements)
grad_tau = np.real(( (a_est.conj()) * (( Lambda @ G_mat @ Phi(tau_est)).conj().T) ) @ r_vec)
# Preconditioner for tau-update:
P_tau = np.diag(1/(-Kpp0) * ((a_est.flatten())**2))
#P_tau = 1
# Gradient descent update with learning rate eta
#a_est = a_est - eta * grad_a #WE DONT MODIFY a_est because we suppose it is known
tau_est = tau_est - eta * (P_tau @ grad_tau).flatten()
theta_est = np.concatenate((a_est.flatten(),tau_est))
vitesse_conv.append(permute_and_compute_norm(theta_est,theta_star,r)[0])
# Final estimated parameters (concatenated amplitudes and centroids)
theta_est_final = np.concatenate((a_est.flatten(), tau_est))
# True parameters vector: [weights, means]
theta_star = np.concatenate((np.array(weights),np.array(means)))
norm , theta_est_final_prime = permute_and_compute_norm(theta_est_final,theta_star,r)
plt.plot( vitesse_conv )
plt.yscale('log')
plt.xscale('log')
plt.show()
return norm, theta_est_final , theta_est_final_prime
def precond_diag(a_k,freqs,r,n):
"""
Build the adaptative diagonal preconditionner matrix.
"""
K_second_0 = 0
N = 2*n+1
L=fourier(freqs).flatten()
for k in range(N):
K_second_0 += (abs(L[k])**2) * (k-n)/N
K_second_0 = (-4*np.pi**2)*K_second_0
P = np.eye(2*r)
for i in range(r):
P[i+r,i+r] = (1/np.sqrt(-K_second_0) ) * (1/a_k[i])**2
return P
def precond_full(a_k,freqs,r,n):
"""
Build the adaptative full preconditionner matrix.
"""
Ak = np.diag(np.concatenate((np.array([1 for _ in range(r)]), np.array(a_k))))
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment