Skip to content

Commit

Permalink
BF/ENH: fixed 'ValueError: buffer ValueError: buffer' when dict_fact_…
Browse files Browse the repository at this point in the history
…fast.pyx functions are called with mmapped X
  • Loading branch information
dohmatob committed Jan 13, 2017
1 parent d993dbb commit c69fdf2
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 11 deletions.
26 changes: 17 additions & 9 deletions modl/dict_fact_fast.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,11 @@ ctypedef void (*AXPY)(int* N, floating* alpha, floating* X, int* incX,
ctypedef floating (*ASUM)(int* N, floating* X, int* incX) nogil

def _enet_regression_multi_gram(floating[:, :, ::1] G, floating[:, ::1] Dx,
floating[:, ::1] X,

# on mmapped input, floating[:, ::1] X ==> ValueError: buffer
# source array is read-only
np.ndarray[floating, ndim=2] X,

floating[: , ::1] code,
long[:] indices,
floating l1_ratio, floating alpha,
Expand Down Expand Up @@ -65,7 +69,7 @@ def _enet_regression_multi_gram(floating[:, :, ::1] G, floating[:, ::1] Dx,

cdef floating[:] this_code
cdef floating[::1] this_Dx
cdef floating[:] this_X
cdef np.ndarray[floating, ndim=1] this_X
cdef floating[:, ::1] this_G

if floating is float:
Expand Down Expand Up @@ -113,7 +117,11 @@ def _batch_weight(long count, long batch_size,


def _enet_regression_single_gram(floating[:, ::1] G, floating[:, ::1] Dx,
floating[:, ::1] X,

# on mmapped input, floating[:, ::1] X ==> ValueError: buffer
# source array is read-only
np.ndarray[floating, ndim=2] X,

floating[:, ::1] code,
long[:] indices,
floating l1_ratio, floating alpha,
Expand Down Expand Up @@ -147,9 +155,8 @@ def _enet_regression_single_gram(floating[:, ::1] G, floating[:, ::1] Dx,
cdef str format
cdef floating[:] this_code
cdef floating[::1] this_Dx
cdef floating[:] this_X
cdef np.ndarray[floating, ndim=1] this_X
cdef floating[:, ::1] G_copy
cdef floating[:, ::1] code_copy

if floating is float:
posv = sposv
Expand All @@ -168,9 +175,6 @@ def _enet_regression_single_gram(floating[:, ::1] G, floating[:, ::1] Dx,
for j in range(n_components):
G[j, j] += alpha

code_copy = view.array((batch_size, n_components),
sizeof(floating),
format=format, mode='c')
posv(&UP, &n_components, &batch_size,
G_ptr,
&n_components,
Expand Down Expand Up @@ -251,7 +255,11 @@ cdef floating max(int n, floating* a) nogil:
cdef void enet_coordinate_descent_gram(floating[:] w, floating alpha, floating beta,
floating[:, ::1] Q,
floating[::1] q,
floating[:] y,

# on mmapped input, floating[:, ::1] y ==> ValueError: buffer
# source array is read-only
np.ndarray[floating, ndim=1] y,

int max_iter, floating tol, bint positive):
"""Cython version of the coordinate descent algorithm
for Elastic-Net regression
Expand Down
5 changes: 3 additions & 2 deletions modl/fmri.py
Original file line number Diff line number Diff line change
Expand Up @@ -322,8 +322,9 @@ def fit(self, imgs, y=None, confounds=None, raw=False):

self.masker_._shelving = False

# components = self.dict_fact_.components_
# self.components_ = _normalize_and_flip(components)
# normalize learned components
components = self.dict_fact_.components_
self.components_ = _normalize_and_flip(components)

return self

Expand Down

0 comments on commit c69fdf2

Please sign in to comment.