Skip to content

Commit

Permalink
CLN: specify nonsparse rs steps as "if local trapping"
Browse files Browse the repository at this point in the history
  • Loading branch information
Jacob-Stevens-Haas committed May 30, 2024
1 parent 3436aed commit 2d141d9
Show file tree
Hide file tree
Showing 2 changed files with 31 additions and 22 deletions.
4 changes: 4 additions & 0 deletions .gitattributes
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,7 @@

# Interpret Jupyter notebooks as Python
*.ipynb linguist-language=Python

*.ipynb diff=jupyternotebook

*.ipynb merge=jupyternotebook
49 changes: 27 additions & 22 deletions pysindy/optimizers/trapping_sr3.py
Original file line number Diff line number Diff line change
Expand Up @@ -606,6 +606,31 @@ def _update_coef_sparse_rs(

return self._update_coef_cvxpy(xi, cost, var_len, coef_prev, self.eps_solver)

def _update_coef_nonsparse_rs(
self, n_tgts, n_features, Pmatrix, A, coef_prev, xTx, xTy
):
pTp = np.dot(Pmatrix.T, Pmatrix)
H = xTx + pTp / self.eta
P_transpose_A = np.dot(Pmatrix.T, A.flatten())

if self.method == "local":
# notice reshaping PQ here requires fortran-ordering
PQ = np.tensordot(self.mod_matrix, self.PQ_, axes=([1], [0]))
PQ = np.reshape(PQ, (n_tgts * n_tgts * n_tgts, n_tgts * n_features), "F")
PQTPQ = np.dot(PQ.T, PQ)
PQ = np.reshape(
self.PQ_, (n_tgts, n_tgts, n_tgts, n_tgts * n_features), "F"
)
PQ = np.tensordot(self.mod_matrix, PQ, axes=([1], [0]))
PQ_ep = PQ + np.transpose(PQ, [1, 2, 0, 3]) + np.transpose(PQ, [2, 0, 1, 3])
PQ_ep = np.reshape(
PQ_ep, (n_tgts * n_tgts * n_tgts, n_tgts * n_features), "F"
)
PQTPQ_ep = np.dot(PQ_ep.T, PQ_ep)
H += PQTPQ / self.alpha + PQTPQ_ep / self.beta

return self._solve_nonsparse_relax_and_split(H, xTy, P_transpose_A, coef_prev)

def _solve_m_relax_and_split(self, r, N, m_prev, m, A, coef_sparse, tk_previous):
"""
If using the relaxation formulation of trapping SINDy, solves the
Expand Down Expand Up @@ -762,28 +787,8 @@ def _reduce(self, x, y):
# if threshold = 0, there is analytic expression
# for the solve over the coefficients,
# which is coded up here separately
pTp = np.dot(Pmatrix.T, Pmatrix)
# notice reshaping PQ here requires fortran-ordering
PQ = np.tensordot(self.mod_matrix, self.PQ_, axes=([1], [0]))
PQ = np.reshape(
PQ, (n_tgts * n_tgts * n_tgts, n_tgts * n_features), "F"
)
PQTPQ = np.dot(PQ.T, PQ)
PQ = np.reshape(
self.PQ_, (n_tgts, n_tgts, n_tgts, n_tgts * n_features), "F"
)
PQ = np.tensordot(self.mod_matrix, PQ, axes=([1], [0]))
PQ_ep = (
PQ + np.transpose(PQ, [1, 2, 0, 3]) + np.transpose(PQ, [2, 0, 1, 3])
)
PQ_ep = np.reshape(
PQ_ep, (n_tgts * n_tgts * n_tgts, n_tgts * n_features), "F"
)
PQTPQ_ep = np.dot(PQ_ep.T, PQ_ep)
H = xTx + pTp / self.eta + PQTPQ / self.alpha + PQTPQ_ep / self.beta
P_transpose_A = np.dot(Pmatrix.T, A.flatten())
coef_sparse = self._solve_nonsparse_relax_and_split(
H, xTy, P_transpose_A, coef_prev
coef_sparse = self._update_coef_nonsparse_rs(
Pmatrix, A, coef_prev, xTx, xTy
)

# If problem over xi becomes infeasible, break out of the loop
Expand Down

0 comments on commit 2d141d9

Please sign in to comment.