ulf1/oxyba · linreg_ols_svd.py
python logo
def linreg_ols_svd(y, X, rcond=1e-15):
    """Linear Regression, OLS, inv by SVD

    Properties
    ----------
    * Numpy's lstsq is based on LAPACK's _gelsd what applies SVD
    * SVD inverse might be slow (complex Landau O)
    * speed might decline during forward selection
    * no overhead or other computations

    Example:
    --------
        beta = lin_ols_svd(y,X)
    """
    import numpy as np
    try:  # solve OLS formula
        beta, _, _, singu = np.linalg.lstsq(b=y, a=X, rcond=rcond)
    except np.linalg.LinAlgError:
        print("LinAlgError: computation does not converge.")
        return None

    # check singu
    if np.any(singu < 0.0):
        print("Error: A singular value of X is numerically not well-behaved.")
        return None

    # return estimated model parameters
    return beta
Similar code snippets
1.
ulf1/oxyba · linreg_ols_pinv.py
Match rating: 62.9% · See similar code snippets
python logo
def linreg_ols_pinv(y, X, rcond=1e-15):
    """Linear Regression, OLS, by multiplying with Pseudoinverse"""
    import numpy as np
    try:  # multiply with inverse to compute coefficients
        return np.dot(np.linalg.pinv(
            np.dot(X.T, X), rcond=rcond), np.dot(X.T, y))
    except np.linalg.LinAlgError:
        print("LinAlgError: SVD does not converge")
        return None
2.
ulf1/oxyba · linreg_ols_lu.py
Match rating: 61.09% · See similar code snippets
python logo
def linreg_ols_lu(y, X):
    """Linear Regression, OLS, by solving linear equations and LU decomposition

    Properties
    ----------
    * based on LAPACK's _gesv what applies LU decomposition
    * avoids using python's inverse functions
    * should be stable
    * no overhead or other computations

    Example:
    --------
        beta = linreg_ols_lu(y,X)

    Links:
    ------
    * http://oxyba.de/docs/linreg_ols_lu

    """
    import numpy as np
    try:  # solve OLS formula
        return np.linalg.solve(np.dot(X.T, X), np.dot(X.T, y))
    except np.linalg.LinAlgError:
        print("LinAlgError: X*X' is singular or not square.")
        return None
3.
ulf1/oxyba · linreg_ridge_lu.py
Match rating: 60.02% · See similar code snippets
python logo
def linreg_ridge_lu(y, X, lam=0.01):
    import numpy as np
    try:
        cols = X.shape[1]
        P = np.dot(X.T, X) + lam * np.eye(cols)
        return np.linalg.solve(P, np.dot(X.T, y))
    except np.linalg.LinAlgError:
        print("LinAlgError: X*X'+lam*I is singular or not square.")
        return None
4.
ulf1/oxyba · linreg_ols_qr.py
Match rating: 54.65% · See similar code snippets
python logo
def linreg_ols_qr(y, X):
    """Linear Regression, OLS, inverse by QR Factoring"""
    import numpy as np
    try:  # multiply with inverse to compute coefficients
        q, r = np.linalg.qr(np.dot(X.T, X))
        return np.dot(np.dot(np.linalg.inv(r), q.T), np.dot(X.T, y))
    except np.linalg.LinAlgError:
        print("LinAlgError: Factoring failed")
        return None
5.
PaulHancock/Aegean · fitting.py
Match rating: 49.13% · See similar code snippets
python logo
def covar_errors(params, data, errs, B, C=None):
    """
    Take a set of parameters that were fit with lmfit, and replace the errors
    with the 1\sigma errors calculated using the covariance matrix.


    Parameters
    ----------
    params : lmfit.Parameters
        Model

    data : 2d-array
        Image data

    errs : 2d-array ?
        Image noise.

    B : 2d-array
        B matrix.

    C : 2d-array
        C matrix. Optional. If supplied then Bmatrix will not be used.

    Returns
    -------
    params : lmfit.Parameters
        Modified model.
    """

    mask = np.where(np.isfinite(data))

    # calculate the proper parameter errors and copy them across.
    if C is not None:
        try:
            J = lmfit_jacobian(params, mask[0], mask[1], errs=errs)
            covar = np.transpose(J).dot(inv(C)).dot(J)
            onesigma = np.sqrt(np.diag(inv(covar)))
        except (np.linalg.linalg.LinAlgError, ValueError) as _:
            C = None

    if C is None:
        try:
            J = lmfit_jacobian(params, mask[0], mask[1], B=B, errs=errs)
            covar = np.transpose(J).dot(J)
            onesigma = np.sqrt(np.diag(inv(covar)))
        except (np.linalg.linalg.LinAlgError, ValueError) as _:
            onesigma = [-2] * len(mask[0])

    for i in range(params['components'].value):
        prefix = "c{0}_".format(i)
        j = 0
        for p in ['amp', 'xo', 'yo', 'sx', 'sy', 'theta']:
            if params[prefix + p].vary:
                params[prefix + p].stderr = onesigma[j]
                j += 1

    return params
6.
gplepage/gvar · linalg.py
Match rating: 47.64% · See similar code snippets
python logo
def svd(a, compute_uv=True, rcond=None):
    """ svd decomposition of matrix ``a`` containing |GVar|\s.

    Args:
        a: Two-dimensional matrix/array of numbers
            and/or :class:`gvar.GVar`\s.
        compute_uv (bool): It ``True`` (default), returns
            tuple ``(u,s,vT)`` where matrix ``a = u @ np.diag(s) @ vT``
            where matrices ``u`` and ``vT`` satisfy ``u.T @ u = 1``
            and ``vT @ vT.T = 1``, and ``s`` is the list of singular
            values. Only ``s`` is returned if ``compute_uv=False``.
        rcond (float): Singular values whose difference is smaller than
            ``rcond`` times their sum are assumed to be degenerate for
            calculating variances for ``u`` and ``vT``.
            Default (``rcond=None``) is ``max(M,N)`` times machine precision.

    Returns:
        Tuple ``(u,s,vT)`` where matrix ``a = u @ np.diag(s) @ vT``
        where matrices ``u`` and ``vT`` satisfy ``u.T @ u = 1``
        and ``vT @ vT.T = 1``, and ``s`` is the list of singular
        values. If ``a.shape=(N,M)``, then ``u.shape=(N,K)``
        and ``vT.shape=(K,M)`` where ``K`` is the number of
        nonzero singular values (``len(s)==K``).
        If ``compute_uv==False`` only ``s`` is returned.

    Raises:
        ValueError: If matrix is not two-dimensional.
    """
    a = numpy.asarray(a)
    if a.dtype != object:
        return numpy.linalg.svd(a, compute_uv=compute_uv)
    amean = gvar.mean(a)
    if amean.ndim != 2:
        raise ValueError(
            'matrix must have dimension 2: actual shape = ' + str(a.shape)
            )
    if rcond is None:
        rcond = numpy.finfo(float).eps * max(a.shape)
    da = a - amean
    u0,s0,v0T = numpy.linalg.svd(amean, compute_uv=True, full_matrices=True)
    k = min(a.shape)
    s = s0 + [
        u0[:, i].dot(da.dot(v0T[i, :])) for i in range(k)
        ]
    if compute_uv:
        u = numpy.array(u0, dtype=object)
        vT = numpy.array(v0T, dtype=object)
        # u first
        daaT = da.dot(a.T) + a.dot(da.T)
        s02 = numpy.zeros(daaT.shape[0], float)
        s02[:len(s0)] = s0 ** 2
        for j in range(s02.shape[0]):
            for i in range(k):
                if i == j:
                    continue
                ds2 = s02[i]  - s02[j]
                if abs(ds2) < rcond * abs(s02[i] + s02[j]) or ds2 == 0:
                    continue
                u[:, i] +=  u0[:, j]  * u0[:, j].dot(daaT.dot(u0[:, i])) / ds2
        # v next
        daTa = da.T.dot(a) + a.T.dot(da)
        s02 = numpy.zeros(daTa.shape[0], float)
        s02[:len(s0)] = s0 ** 2
        for j in range(s02.shape[0]):
            for i in range(k):
                if i == j:
                    continue
                ds2 = s02[i]  - s02[j]
                if abs(ds2) < rcond * abs(s02[i] + s02[j]) or ds2 == 0:
                    continue
                vT[i, :] +=  v0T[j, :]  * v0T[j, :].dot(daTa.dot(v0T[i, :])) / ds2
        return u[:,:k], s, vT[:k, :]
    else:
        return s
7.
SheffieldML/GPy · state_space_main.py
Match rating: 47.1% · See similar code snippets
python logo
def R_isrk(self, k):
        """
        Function returns the inverse square root of R matrix on step k.
        """
        ind = int(self.index[self.R_time_var_index, k])
        R = self.R[:, :, ind]

        if (R.shape[0] == 1):  # 1-D case handle simplier. No storage
            # of the result, just compute it each time.
            inv_square_root = np.sqrt(1.0/R)
        else:
            if self.svd_each_time:

                (U, S, Vh) = sp.linalg.svd(R, full_matrices=False,
                                           compute_uv=True, overwrite_a=False,
                                           check_finite=True)

                inv_square_root = U * 1.0/np.sqrt(S)
            else:
                if ind in self.R_square_root:
                    inv_square_root = self.R_square_root[ind]
                else:
                    (U, S, Vh) = sp.linalg.svd(R, full_matrices=False,
                                               compute_uv=True,
                                               overwrite_a=False,
                                               check_finite=True)

                    inv_square_root = U * 1.0/np.sqrt(S)

                    self.R_square_root[ind] = inv_square_root

        return inv_square_root
8.
scot-dev/scot · connectivity.py
Match rating: 46.97% · See similar code snippets
python logo
def Cinv(self):
        """Inverse of the noise covariance."""
        try:
            return np.linalg.inv(self.c)
        except np.linalg.linalg.LinAlgError:
            print('Warning: non-invertible noise covariance matrix c.')
            return np.eye(self.c.shape[0])
9.
Match rating: 46.15% · See similar code snippets
python logo
def features_check_singular(X, tol=1e-8):
    """Checks if a set of features/variables X result in a
    ill-conditioned matrix dot(X.T,T)

    Parameters:
    -----------
    X : ndarray
        An NxM array with N observations (rows)
        and M features/variables (columns).

        Note: Make sure that X variables are all normalized or
        or scaled, e.g.
            X = sklearn.preprocessing.normalize(rawdata, norm='l2')

    tol : float
        Threshold when to consider a Singular Value s_k (U*S*V^T of SVD)
        is considered to small s_k<tol. The Default is tol=1e-8.

    Returns:
    --------
    flag : bool
        True if the X leads to singular matrix dot(X.T,T), or
        False if X does not not lead to singular matrix.

    num : int
        Number of Singular Values that failed the s_k<tol test

    s : ndarray
        The Singular Values computed by numpy.linalg.svd

    Usage:
    ------
    * flag. During Forward-Selection check if an newly added
      variable causes an ill-conditioned matrix.
    * num. Get an indication how many variables still needs to
      be eliminated during Backward-Selection
    """
    import numpy as np
    _, s, _ = np.linalg.svd(np.dot(X.T, X))
    failed = s < tol
    flag = True if np.any(failed) else False
    return flag, failed.sum(), s
10.
cokelaer/spectrum · cholesky.py
Match rating: 45.87% · See similar code snippets
python logo
def _numpy_cholesky(A, B):
    """Solve Ax=B using numpy cholesky solver

    A = LU

    in the case where A is square and Hermitian, A = L.L* where L* is
    transpoed and conjugate matrix

    Ly = b

    where

    Ux=y

    so x = U^{-1} y
    where U = L*
    and y = L^{-1} B
    """
    L = numpy.linalg.cholesky(A)
    # A=L*numpy.transpose(L).conjugate()
    # Ly = b
    y = numpy.linalg.solve(L,B)
    # Ux = y
    x = numpy.linalg.solve(L.transpose().conjugate(),y)
    return x, L