From 51c0cce288eec375c6badefad518aeab5a246876 Mon Sep 17 00:00:00 2001 From: Berenike Dieterle <94169337+BD-00@users.noreply.github.com> Date: Fri, 30 Aug 2024 15:11:35 +0200 Subject: [PATCH 01/12] field version --- src/Sparse/StructuredGauss.jl | 770 ++++++++++++++++++++++++++++++++++ 1 file changed, 770 insertions(+) create mode 100644 src/Sparse/StructuredGauss.jl diff --git a/src/Sparse/StructuredGauss.jl b/src/Sparse/StructuredGauss.jl new file mode 100644 index 0000000000..6e2a42309a --- /dev/null +++ b/src/Sparse/StructuredGauss.jl @@ -0,0 +1,770 @@ +mutable struct data_StructGauss + A + Y + single_row_limit + base + nlight + nsingle + light_weight + col_list + col_list_perm #perm gives new ordering of original A[i] via their indices + col_list_permi #A[permi[i]]=A[i] before permutation = list of sigma(i) (recent position of former A[i]) + is_light_col + is_single_col + single_col #single_col[i] = c >= 0 if col c has its only entry in row i, i always recent position + heavy_ext + heavy_col_idx + heavy_col_len + heavy_mapi + heavy_map + + function data_StructGauss(A) + Y = sparse_matrix(base_ring(A), 0, ncols(A)) + col_list = _col_list(A) + return new(A, + Y, + 1, + 1, + ncols(A), + 0, + [length(A[i]) for i in 1:nrows(A)], + col_list, + collect(1:nrows(A)), + collect(1:nrows(A)), + fill(true, ncols(A)), + fill(false, ncols(A)), + fill(0, nrows(A)), + 0, + Int[], + Int[], + Int[], + fill(0, ncols(A))) + end +end + +function _col_list(A) + n = nrows(A) + m = ncols(A) + col_list = [Array{Int64}([]) for i = 1:m] + for i in 1:n + for c in A[i].pos + col = col_list[c] + push!(col, i) + end + end + return col_list +end + + +function structured_gauss(A::SMat{T}) where T <: RingElem + SG = part_echolonize!(A) + return compute_kernel(SG) +end + +function structured_gauss_field(A::SMat{T}) where T <: FieldElem + SG = part_echolonize_field!(A) + return compute_kernel_field(SG) +end + +################################################################################ +# +# Partial Echolonization +# +################################################################################ + +#= +function part_echolonize!(A) + A = delete_zero_rows!(A) + n = nrows(A) + m = ncols(A) + SG = data_StructGauss(A) + single_rows_to_top!(SG) + + while SG.nlight > 0 && SG.base <= n + build_part_ref!(SG) + for i = 1:m + SG.is_light_col[i] && @assert length(SG.col_list[i]) != 1 + end + (SG.nlight == 0 || SG.base > n) && break + best_single_row = find_best_single_row(SG) + best_single_row < 0 && @assert(SG.base == SG.single_row_limit) + + if best_single_row < 0 + find_dense_cols(SG) + turn_heavy(SG) + continue #while SG.nlight > 0 && SG.base <= SG.A.r + end + + eliminate_and_update!(best_single_row, SG) + end + return SG +end +=# + +function part_echolonize_field!(A) + A = delete_zero_rows!(A) + n = nrows(A) + m = ncols(A) + SG = data_StructGauss(A) + single_rows_to_top!(SG) + + while SG.nlight > 0 && SG.base <= n + build_part_ref_field!(SG) + for i = 1:m + SG.is_light_col[i] && @assert length(SG.col_list[i]) != 1 + end + (SG.nlight == 0 || SG.base > n) && break + best_single_row = find_best_single_row_field(SG) + best_single_row < 0 && @assert(SG.base == SG.single_row_limit) + + if best_single_row < 0 + find_dense_cols(SG) + turn_heavy(SG) + continue #while SG.nlight > 0 && SG.base <= SG.A.r + end + + eliminate_and_update_field!(best_single_row, SG) + end + return SG +end + +function single_rows_to_top!(SG) + for i = 1:nrows(SG.A) + len = length(SG.A[i]) + @assert !iszero(len) + if len == 1 + @assert SG.single_row_limit <=i + if i != SG.single_row_limit + swap_rows_perm(i, SG.single_row_limit, SG) + end + SG.single_row_limit +=1 + end + end + return SG +end + +#= +function build_part_ref!(SG) + queue = collect(ncols(SG.A):-1:1) + while !isempty(queue) + queue_new = Int[] + for j in queue + if length(SG.col_list[j])==1 && SG.is_light_col[j] + singleton_row_origin = only(SG.col_list[j]) + singleton_row_idx = SG.col_list_permi[singleton_row_origin] + for jj in SG.A[singleton_row_idx].pos + if SG.is_light_col[jj] + @assert singleton_row_origin in SG.col_list[jj] + j_idx = findfirst(isequal(singleton_row_origin), SG.col_list[jj]) + deleteat!(SG.col_list[jj],j_idx) + length(SG.col_list[jj]) == 1 && push!(queue_new, jj) + end + end + SG.is_light_col[j] = false + SG.is_single_col[j] = true + SG.single_col[singleton_row_idx] = j + SG.nlight-=1 + #SG.nlight<0 && print("nlight < 0 singleton case") + SG.nsingle+=1 + swap_i_into_base(singleton_row_idx, SG) + SG.base+=1 + end + end + queue = queue_new + end +end +=# + +function build_part_ref_field!(SG) + queue = collect(ncols(SG.A):-1:1) + while !isempty(queue) + queue_new = Int[] + for j in queue + if length(SG.col_list[j])==1 && SG.is_light_col[j] + singleton_row_origin = only(SG.col_list[j]) + singleton_row_idx = SG.col_list_permi[singleton_row_origin] + @assert !iszero(SG.A[singleton_row_idx, j]) + scale_row!(SG.A, singleton_row_idx, inv(SG.A[singleton_row_idx, j])) + for jj in SG.A[singleton_row_idx].pos + if SG.is_light_col[jj] + @assert singleton_row_origin in SG.col_list[jj] + j_idx = findfirst(isequal(singleton_row_origin), SG.col_list[jj]) + deleteat!(SG.col_list[jj],j_idx) + length(SG.col_list[jj]) == 1 && push!(queue_new, jj) + end + end + SG.is_light_col[j] = false + SG.is_single_col[j] = true + SG.single_col[singleton_row_idx] = j + SG.nlight-=1 + #SG.nlight<0 && print("nlight < 0 singleton case") + SG.nsingle+=1 + swap_i_into_base(singleton_row_idx, SG) + SG.base+=1 + end + end + queue = queue_new + end +end + +#= +function find_best_single_row(SG) + best_single_row = -1 + best_col = NaN + best_val = NaN + best_len = -1 + best_is_one = false + for i = SG.base:SG.single_row_limit-1 #here not the case in first loop + single_row = SG.A[i] + single_row_len = length(single_row) + w = SG.light_weight[i] + @assert w == 1 + j_light = find_light_entry(single_row.pos, SG.is_light_col) + single_row_val = SG.A[i, j_light] + @assert length(SG.col_list[j_light]) > 1 + is_one = isone(single_row_val)||isone(-single_row_val) + if best_single_row < 0 + best_single_row = i + best_col = j_light + best_len = single_row_len + best_is_one = is_one + best_val = single_row_val + elseif !best_is_one && is_one + best_single_row = i + best_col = j_light + best_len = single_row_len + best_is_one = true + best_val = single_row_val + elseif (is_one == best_is_one && single_row_len < best_len) + best_single_row = i + best_col = j_light + best_len = single_row_len + best_is_one = is_one + best_val = single_row_val + end + end + return best_single_row +end +=# + +function find_best_single_row_field(SG) + best_single_row = -1 + best_len = -1 + for i = SG.base:SG.single_row_limit-1 + single_row = SG.A[i] + single_row_len = length(single_row) + w = SG.light_weight[i] + @assert w == 1 + if best_single_row < 0 || single_row_len < best_len + best_single_row = i + best_len = single_row_len + end + end + return best_single_row +end + +function find_dense_cols(SG) + m = ncols(SG.A) + nheavy = m - (SG.nlight + SG.nsingle) + nheavy == 0 ? SG.heavy_ext = max(div(SG.nlight,20),1) : SG.heavy_ext = max(div(SG.nlight,100),1) + SG.heavy_col_idx = fill(-1, SG.heavy_ext) #indices (descending order for same length) + SG.heavy_col_len = fill(-1, SG.heavy_ext)#length of cols in heavy_idcs (ascending) + light_cols = findall(x->SG.is_light_col[x], 1:m) + for i = m:-1:1 + if SG.is_light_col[i] + col_len = length(SG.col_list[i]) + if col_len > SG.heavy_col_len[1] + if SG.heavy_ext == 1 + SG.heavy_col_idx[1] = i + SG.heavy_col_len[1] = col_len + else + #jk = SG.heavy_ext + for j = SG.heavy_ext:-1:2 + if col_len >= SG.heavy_col_len[j] + for k = 1:j-1 + SG.heavy_col_idx[k] = SG.heavy_col_idx[k + 1]#replace by delete and insert!!! + SG.heavy_col_len[k] = SG.heavy_col_len[k + 1] + end + SG.heavy_col_idx[j] = i + SG.heavy_col_len[j] = col_len + break + end + end + end + end + end + end + @assert light_cols == findall(x->SG.is_light_col[x], 1:m) +end + +function turn_heavy(SG) + for j = 1:SG.heavy_ext + c = SG.heavy_col_idx[j] + if c<0 + continue + end + SG.is_light_col[c] = false + lt2hvy_col = SG.col_list[c] + lt2hvy_len = length(lt2hvy_col) + @assert lt2hvy_len == length(SG.col_list[c]) + for k = 1:lt2hvy_len + i_origin = lt2hvy_col[k] + i_now = SG.col_list_permi[i_origin] + @assert SG.light_weight[i_now] > 0 + SG.light_weight[i_now]-=1 + handle_new_light_weight!(i_now, SG) + #test_Y(SG) + end + end + SG.nlight -= SG.heavy_ext + #SG.nlight<0 && print("nlight < 0 extension case") +end + +function handle_new_light_weight!(i, SG) + w = SG.light_weight[i] + if w == 0 + swap_i_into_base(i, SG) + SG.single_col[SG.base] = 0 + move_into_Y(SG.base, SG) + SG.base+=1 + elseif w == 1 + if i > SG.single_row_limit + swap_rows_perm(i, SG.single_row_limit, SG) + end + SG.single_row_limit += 1 + end + return SG +end + +#= +function eliminate_and_update!(best_single_row, SG) + @assert best_single_row != 0 + best_row = deepcopy(SG.A[best_single_row]) + best_col = find_light_entry(best_row.pos, SG.is_light_col) + @assert length(SG.col_list[best_col]) > 1 + best_val = SG.A[best_single_row, best_col] + @assert !iszero(best_val) + best_col_idces = SG.col_list[best_col] + row_idx = 0 + while length(best_col_idces) > 1 + row_idx = find_row_to_add_on(row_idx, best_row, best_col_idces, SG) + @assert best_col_idces == SG.col_list[best_col] + @assert row_idx > 0 + @assert SG.col_list_perm[row_idx] in SG.col_list[best_col] + L_row = SG.col_list_perm[row_idx] + add_to_eliminate!(L_row, row_idx, best_row, best_col, best_val, SG) + update_after_addition!(L_row, row_idx, best_col, SG) + handle_new_light_weight!(row_idx, SG) + end + return SG +end +=# + +function eliminate_and_update_field!(best_single_row, SG) + @assert best_single_row != 0 + best_col = find_light_entry(SG.A[best_single_row].pos, SG.is_light_col) + @assert length(SG.col_list[best_col]) > 1 + best_val = SG.A[best_single_row, best_col] + @assert !iszero(best_val) + scale_row!(SG.A, best_single_row, inv(best_val)) + best_val = SG.A[best_single_row, best_col] + @assert isone(best_val) + best_row = deepcopy(SG.A[best_single_row]) + best_col_idces = SG.col_list[best_col] + row_idx = 0 + while length(best_col_idces) > 1 + row_idx = find_row_to_add_on(row_idx, best_row, best_col_idces, SG) + @assert best_col_idces == SG.col_list[best_col] + @assert SG.col_list_perm[row_idx] in SG.col_list[best_col] + @assert row_idx > 0 + L_row = SG.col_list_perm[row_idx] + add_to_eliminate_field!(L_row, row_idx, best_row, best_col, best_val, SG) + update_after_addition!(L_row, row_idx, best_col, SG) + handle_new_light_weight!(row_idx, SG) + end + return SG +end + +function find_row_to_add_on(row_idx, best_row, best_col_idces::Vector{Int64}, SG::data_StructGauss) + for L_row in best_col_idces[end:-1:1] #right??? breaking condition missing? + row_idx = SG.col_list_permi[L_row] + SG.A[row_idx] == best_row && continue + row_idx < SG.base && continue + break + end + return row_idx +end + +#= +function add_to_eliminate!(L_row, row_idx, best_row, best_col, best_val, SG) + @assert L_row in SG.col_list[best_col] + @assert !(0 in SG.A[row_idx].values) + val = SG.A[row_idx, best_col] + @assert !iszero(val) + #case !over_field && over_Z: + #g = gcd(lift(ZZ, val), lift(ZZ, best_val)) + g = gcd(val, best_val) + val_red = divexact(val, g) + best_val_red = divexact(best_val, g) + @assert L_row in SG.col_list[best_col] + for c in SG.A[row_idx].pos + @assert !isempty(SG.col_list[c]) + if SG.is_light_col[c] + jj = findfirst(isequal(L_row), SG.col_list[c]) + deleteat!(SG.col_list[c], jj) + end + end + scale_row!(SG.A, row_idx, best_val_red) + @assert !(0 in SG.A[row_idx].values) + Hecke.add_scaled_row!(best_row, SG.A[row_idx], -val_red) + @assert iszero(SG.A[row_idx, best_col]) + return SG +end +=# + +function add_to_eliminate_field!(L_row, row_idx, best_row, best_col, best_val, SG) + @assert L_row in SG.col_list[best_col] + @assert !(0 in SG.A[row_idx].values) + val = SG.A[row_idx, best_col] + @assert !iszero(val) + @assert L_row in SG.col_list[best_col] + for c in SG.A[row_idx].pos + @assert !isempty(SG.col_list[c]) + if SG.is_light_col[c] + jj = findfirst(isequal(L_row), SG.col_list[c]) + deleteat!(SG.col_list[c], jj) + end + end + #scale_row!(SG.A, row_idx, best_val_red) + @assert !(0 in SG.A[row_idx].values) + Hecke.add_scaled_row!(best_row, SG.A[row_idx], -val) + @assert iszero(SG.A[row_idx, best_col]) + return SG +end + +function update_after_addition!(L_row, row_idx::Int, best_col, SG::data_StructGauss) + SG.light_weight[row_idx] = 0 + for c in SG.A[row_idx].pos + @assert c != best_col + SG.is_light_col[c] && sort!(push!(SG.col_list[c], L_row)) + SG.is_light_col[c] && (SG.light_weight[row_idx]+=1) + end + return SG +end + +################################################################################ +# +# Small Auxiliary Functions +# +################################################################################ + +function swap_entries(v, i,j) #swaps entries v[i], v[j] + v[i],v[j] = v[j],v[i] +end + +function swap_rows_perm(i, j, SG) + if i != j + swap_rows!(SG.A, i, j) #swap!(A[i],A[j]) throws error later??? + swap_entries(SG.single_col, i, j) + swap_entries(SG.col_list_perm, i, j) + swap_entries(SG.col_list_permi, SG.col_list_perm[i], SG.col_list_perm[j]) + swap_entries(SG.light_weight, i, j) + end +end + +function swap_i_into_base(i, SG::data_StructGauss) + if i < SG.single_row_limit + swap_rows_perm(i, SG.base, SG) + else + if i != SG.single_row_limit + swap_rows_perm(SG.base, SG.single_row_limit, SG) + end + SG.single_row_limit +=1 + swap_rows_perm(SG.base, i, SG) + end +end + +function find_light_entry(position_array::Vector{Int64}, is_light_col::Vector{Bool})::Int64 + for j in position_array[end:-1:1] + if is_light_col[j] + return j + end + end +end + +function move_into_Y(i, SG::data_StructGauss) + @assert i == SG.base + push!(SG.Y, deepcopy(SG.A[SG.base])) + for base_c in SG.A[SG.base].pos + @assert !SG.is_light_col[base_c] + @assert !isempty(SG.col_list[base_c]) + end + SG.A.nnz-=length(SG.A[SG.base]) + empty!(SG.A[SG.base].pos), empty!(SG.A[SG.base].values) +end + +################################################################################ +# +# Kernel Computation +# +################################################################################ + +#= +function compute_kernel(SG, with_light = true) + update_light_cols!(SG) + @assert SG.nlight > -1 + collect_dense_cols!(SG) + _nullity, _dense_kernel = dense_kernel(SG) + l, K = init_kernel(_nullity, _dense_kernel, SG, with_light) + return compose_kernel(l, K, SG) +end +=# + +function compute_kernel_field(SG, with_light = true) + update_light_cols!(SG) + @assert SG.nlight > -1 + collect_dense_cols!(SG) + _nullity, _dense_kernel = dense_kernel(SG) + l, K = init_kernel(_nullity, _dense_kernel, SG, with_light) + return compose_kernel_field(l, K, SG) +end + +function update_light_cols!(SG) + for i = 1:ncols(SG.A) + if SG.is_light_col[i] && !isempty(SG.col_list[i]) + SG.is_light_col[i] = false + SG.nlight -= 1 + end + end + return SG +end + +function collect_dense_cols!(SG) + m = ncols(SG.A) + nheavy = m - SG.nlight - SG.nsingle + j = 1 + for i = 1:m + if !SG.is_light_col[i] && !SG.is_single_col[i] + SG.heavy_map[i] = j + push!(SG.heavy_mapi,i) + j+=1 + end + end + @assert length(SG.heavy_mapi)==nheavy + return +end + +function dense_kernel(SG) + ST = sparse_matrix(base_ring(SG.A), 0, nrows(SG.Y)) + YT = transpose(SG.Y) + for j in SG.heavy_mapi + push!(ST, YT[j]) + end + S = transpose(ST) + d, _dense_kernel = nullspace(matrix(S)) + return size(_dense_kernel)[2], _dense_kernel +end + +#= +function dense_kernel_new(SG) + ST = sparse_matrix(base_ring(SG.A), 0, nrows(SG.Y)) + YT = transpose(SG.Y) + for j in SG.heavy_mapi + push!(ST, YT[j]) + end + S = transpose(ST) + _dense_kernel = kernel(matrix(S), side=:right) + return 1, _dense_kernel + end + =# + +function init_kernel(_nullity, _dense_kernel, SG, with_light=false) + R = base_ring(SG.A) + m = ncols(SG.A) + if with_light + l = _nullity+SG.nlight + else + l = _nullity + end + K = zeros(R, m, l) + #initialisation + ilight = 1 + for i = 1:m + if SG.is_light_col[i] + if with_light + @assert ilight <= SG.nlight + K[i, _nullity+ilight] = one(R) + ilight +=1 + end + else + j = SG.heavy_map[i] + if j>0 + for c = 1:_nullity + K[i,c] = _dense_kernel[j, c] + end + end + end + end + return l, K +end + +#= +function compose_kernel_elem(_kernel, single_part, SG) + nfail = 0 + failure = [] + for i=SG.base-1:-1:1 + c = SG.single_col[i] + if c>0 + y = 0 + x = NaN + j=1 + while j <= length(SG.A[i]) + cc = SG.A[i].pos[j] + xx = SG.A[i].values[j] + if cc==c + x = xx + j+=1 + elseif !(cc in single_part) + y += (xx*_kernel[cc]) + j+=1 + else + break + end + end + if j <= length(SG.A[i]) + nfail +=1 + push!(failure, i) + else + _kernel[c]=-y*inv(x) + end + end + end + return _kernel, failure +end +=# + +#= +function compose_kernel(l, K, SG) + R = base_ring(SG.A) + n = nrows(SG.A) + for i = n:-1:1 + c = SG.single_col[i] + if c>0 + x = R(0) + y = zeros(R,l) + for idx in 1:length(SG.A[i]) + cc = SG.A[i].pos[idx] + xx = SG.A[i].values[idx] + if cc == c + x = xx + else + for k = 1:l + kern_c = K[cc,k] + !iszero(kern_c) && (y[k]-=xx*kern_c) + end + end + end + for k = 1:l + x_inv = try + inv(x) + catch + R(0) + end + if iszero(x_inv) + K[:,k] *= x + K[c, k] = y[k] + else + K[c, k] = y[k]*x_inv + end + end + end + end + return l, K +end +=# + +function compose_kernel_field(l, K, SG) + R = base_ring(SG.A) + n = nrows(SG.A) + for i = n:-1:1 + c = SG.single_col[i] + if c>0 + y = zeros(R,l) + for idx in 1:length(SG.A[i]) + cc = SG.A[i].pos[idx] + xx = SG.A[i].values[idx] + if cc == c + @assert isone(xx) + else + for k = 1:l + kern_c = K[cc,k] + !iszero(kern_c) && (y[k]-=xx*kern_c) + end + end + end + for k = 1:l + K[c, k] = y[k] + end + end + end + return l, K +end + +#= +function kernel_matrix(_nullity, _dense_kernel, SG) + R = base_ring(SG.A) + n = nrows(SG.A) + m = ncols(SG.A) + l = _nullity+SG.nlight + K = zeros(R, m, l) + #initialisation + ilight = 1 + for i = 1:m + if SG.is_light_col[i] + @assert ilight <= SG.nlight + K[i, _nullity+ilight] = one(R) + ilight +=1 + else + j = SG.heavy_map[i] + if j>0 + for c = 1:_nullity + K[i,c] = _dense_kernel[j, c] + end + end + end + end +#compose kernel in single cols + for i = n:-1:1 + c = SG.single_col[i] + if c>0 + y = zeros(R,l) + for idx in 1:length(SG.A[i]) + cc = SG.A[i].pos[idx] + xx = SG.A[i].values[idx] + if cc == c + @assert isone(xx) + else + for k = 1:l + kern_c = K[cc,k] + !iszero(kern_c) && (y[k]-=xx*kern_c) + end + end + end + for k = 1:l + K[c, k] = y[k] + end + end + end + return(l, K) +end +=# + +function delete_zero_rows!(A::SMat{T}) where T #where s denotes the first row where we wanna start + for i=A.r:-1:1 + if isempty(A[i].pos) + deleteat!(A.rows, i) + A.r-=1 + end + end + return A +end From 205c9264995ea936c7082bdedb3e3710999d34ac Mon Sep 17 00:00:00 2001 From: Berenike Dieterle <94169337+BD-00@users.noreply.github.com> Date: Fri, 30 Aug 2024 15:11:35 +0200 Subject: [PATCH 02/12] field version --- src/Sparse/StructuredGauss.jl | 770 ++++++++++++++++++++++++++++++++++ 1 file changed, 770 insertions(+) create mode 100644 src/Sparse/StructuredGauss.jl diff --git a/src/Sparse/StructuredGauss.jl b/src/Sparse/StructuredGauss.jl new file mode 100644 index 0000000000..6e2a42309a --- /dev/null +++ b/src/Sparse/StructuredGauss.jl @@ -0,0 +1,770 @@ +mutable struct data_StructGauss + A + Y + single_row_limit + base + nlight + nsingle + light_weight + col_list + col_list_perm #perm gives new ordering of original A[i] via their indices + col_list_permi #A[permi[i]]=A[i] before permutation = list of sigma(i) (recent position of former A[i]) + is_light_col + is_single_col + single_col #single_col[i] = c >= 0 if col c has its only entry in row i, i always recent position + heavy_ext + heavy_col_idx + heavy_col_len + heavy_mapi + heavy_map + + function data_StructGauss(A) + Y = sparse_matrix(base_ring(A), 0, ncols(A)) + col_list = _col_list(A) + return new(A, + Y, + 1, + 1, + ncols(A), + 0, + [length(A[i]) for i in 1:nrows(A)], + col_list, + collect(1:nrows(A)), + collect(1:nrows(A)), + fill(true, ncols(A)), + fill(false, ncols(A)), + fill(0, nrows(A)), + 0, + Int[], + Int[], + Int[], + fill(0, ncols(A))) + end +end + +function _col_list(A) + n = nrows(A) + m = ncols(A) + col_list = [Array{Int64}([]) for i = 1:m] + for i in 1:n + for c in A[i].pos + col = col_list[c] + push!(col, i) + end + end + return col_list +end + + +function structured_gauss(A::SMat{T}) where T <: RingElem + SG = part_echolonize!(A) + return compute_kernel(SG) +end + +function structured_gauss_field(A::SMat{T}) where T <: FieldElem + SG = part_echolonize_field!(A) + return compute_kernel_field(SG) +end + +################################################################################ +# +# Partial Echolonization +# +################################################################################ + +#= +function part_echolonize!(A) + A = delete_zero_rows!(A) + n = nrows(A) + m = ncols(A) + SG = data_StructGauss(A) + single_rows_to_top!(SG) + + while SG.nlight > 0 && SG.base <= n + build_part_ref!(SG) + for i = 1:m + SG.is_light_col[i] && @assert length(SG.col_list[i]) != 1 + end + (SG.nlight == 0 || SG.base > n) && break + best_single_row = find_best_single_row(SG) + best_single_row < 0 && @assert(SG.base == SG.single_row_limit) + + if best_single_row < 0 + find_dense_cols(SG) + turn_heavy(SG) + continue #while SG.nlight > 0 && SG.base <= SG.A.r + end + + eliminate_and_update!(best_single_row, SG) + end + return SG +end +=# + +function part_echolonize_field!(A) + A = delete_zero_rows!(A) + n = nrows(A) + m = ncols(A) + SG = data_StructGauss(A) + single_rows_to_top!(SG) + + while SG.nlight > 0 && SG.base <= n + build_part_ref_field!(SG) + for i = 1:m + SG.is_light_col[i] && @assert length(SG.col_list[i]) != 1 + end + (SG.nlight == 0 || SG.base > n) && break + best_single_row = find_best_single_row_field(SG) + best_single_row < 0 && @assert(SG.base == SG.single_row_limit) + + if best_single_row < 0 + find_dense_cols(SG) + turn_heavy(SG) + continue #while SG.nlight > 0 && SG.base <= SG.A.r + end + + eliminate_and_update_field!(best_single_row, SG) + end + return SG +end + +function single_rows_to_top!(SG) + for i = 1:nrows(SG.A) + len = length(SG.A[i]) + @assert !iszero(len) + if len == 1 + @assert SG.single_row_limit <=i + if i != SG.single_row_limit + swap_rows_perm(i, SG.single_row_limit, SG) + end + SG.single_row_limit +=1 + end + end + return SG +end + +#= +function build_part_ref!(SG) + queue = collect(ncols(SG.A):-1:1) + while !isempty(queue) + queue_new = Int[] + for j in queue + if length(SG.col_list[j])==1 && SG.is_light_col[j] + singleton_row_origin = only(SG.col_list[j]) + singleton_row_idx = SG.col_list_permi[singleton_row_origin] + for jj in SG.A[singleton_row_idx].pos + if SG.is_light_col[jj] + @assert singleton_row_origin in SG.col_list[jj] + j_idx = findfirst(isequal(singleton_row_origin), SG.col_list[jj]) + deleteat!(SG.col_list[jj],j_idx) + length(SG.col_list[jj]) == 1 && push!(queue_new, jj) + end + end + SG.is_light_col[j] = false + SG.is_single_col[j] = true + SG.single_col[singleton_row_idx] = j + SG.nlight-=1 + #SG.nlight<0 && print("nlight < 0 singleton case") + SG.nsingle+=1 + swap_i_into_base(singleton_row_idx, SG) + SG.base+=1 + end + end + queue = queue_new + end +end +=# + +function build_part_ref_field!(SG) + queue = collect(ncols(SG.A):-1:1) + while !isempty(queue) + queue_new = Int[] + for j in queue + if length(SG.col_list[j])==1 && SG.is_light_col[j] + singleton_row_origin = only(SG.col_list[j]) + singleton_row_idx = SG.col_list_permi[singleton_row_origin] + @assert !iszero(SG.A[singleton_row_idx, j]) + scale_row!(SG.A, singleton_row_idx, inv(SG.A[singleton_row_idx, j])) + for jj in SG.A[singleton_row_idx].pos + if SG.is_light_col[jj] + @assert singleton_row_origin in SG.col_list[jj] + j_idx = findfirst(isequal(singleton_row_origin), SG.col_list[jj]) + deleteat!(SG.col_list[jj],j_idx) + length(SG.col_list[jj]) == 1 && push!(queue_new, jj) + end + end + SG.is_light_col[j] = false + SG.is_single_col[j] = true + SG.single_col[singleton_row_idx] = j + SG.nlight-=1 + #SG.nlight<0 && print("nlight < 0 singleton case") + SG.nsingle+=1 + swap_i_into_base(singleton_row_idx, SG) + SG.base+=1 + end + end + queue = queue_new + end +end + +#= +function find_best_single_row(SG) + best_single_row = -1 + best_col = NaN + best_val = NaN + best_len = -1 + best_is_one = false + for i = SG.base:SG.single_row_limit-1 #here not the case in first loop + single_row = SG.A[i] + single_row_len = length(single_row) + w = SG.light_weight[i] + @assert w == 1 + j_light = find_light_entry(single_row.pos, SG.is_light_col) + single_row_val = SG.A[i, j_light] + @assert length(SG.col_list[j_light]) > 1 + is_one = isone(single_row_val)||isone(-single_row_val) + if best_single_row < 0 + best_single_row = i + best_col = j_light + best_len = single_row_len + best_is_one = is_one + best_val = single_row_val + elseif !best_is_one && is_one + best_single_row = i + best_col = j_light + best_len = single_row_len + best_is_one = true + best_val = single_row_val + elseif (is_one == best_is_one && single_row_len < best_len) + best_single_row = i + best_col = j_light + best_len = single_row_len + best_is_one = is_one + best_val = single_row_val + end + end + return best_single_row +end +=# + +function find_best_single_row_field(SG) + best_single_row = -1 + best_len = -1 + for i = SG.base:SG.single_row_limit-1 + single_row = SG.A[i] + single_row_len = length(single_row) + w = SG.light_weight[i] + @assert w == 1 + if best_single_row < 0 || single_row_len < best_len + best_single_row = i + best_len = single_row_len + end + end + return best_single_row +end + +function find_dense_cols(SG) + m = ncols(SG.A) + nheavy = m - (SG.nlight + SG.nsingle) + nheavy == 0 ? SG.heavy_ext = max(div(SG.nlight,20),1) : SG.heavy_ext = max(div(SG.nlight,100),1) + SG.heavy_col_idx = fill(-1, SG.heavy_ext) #indices (descending order for same length) + SG.heavy_col_len = fill(-1, SG.heavy_ext)#length of cols in heavy_idcs (ascending) + light_cols = findall(x->SG.is_light_col[x], 1:m) + for i = m:-1:1 + if SG.is_light_col[i] + col_len = length(SG.col_list[i]) + if col_len > SG.heavy_col_len[1] + if SG.heavy_ext == 1 + SG.heavy_col_idx[1] = i + SG.heavy_col_len[1] = col_len + else + #jk = SG.heavy_ext + for j = SG.heavy_ext:-1:2 + if col_len >= SG.heavy_col_len[j] + for k = 1:j-1 + SG.heavy_col_idx[k] = SG.heavy_col_idx[k + 1]#replace by delete and insert!!! + SG.heavy_col_len[k] = SG.heavy_col_len[k + 1] + end + SG.heavy_col_idx[j] = i + SG.heavy_col_len[j] = col_len + break + end + end + end + end + end + end + @assert light_cols == findall(x->SG.is_light_col[x], 1:m) +end + +function turn_heavy(SG) + for j = 1:SG.heavy_ext + c = SG.heavy_col_idx[j] + if c<0 + continue + end + SG.is_light_col[c] = false + lt2hvy_col = SG.col_list[c] + lt2hvy_len = length(lt2hvy_col) + @assert lt2hvy_len == length(SG.col_list[c]) + for k = 1:lt2hvy_len + i_origin = lt2hvy_col[k] + i_now = SG.col_list_permi[i_origin] + @assert SG.light_weight[i_now] > 0 + SG.light_weight[i_now]-=1 + handle_new_light_weight!(i_now, SG) + #test_Y(SG) + end + end + SG.nlight -= SG.heavy_ext + #SG.nlight<0 && print("nlight < 0 extension case") +end + +function handle_new_light_weight!(i, SG) + w = SG.light_weight[i] + if w == 0 + swap_i_into_base(i, SG) + SG.single_col[SG.base] = 0 + move_into_Y(SG.base, SG) + SG.base+=1 + elseif w == 1 + if i > SG.single_row_limit + swap_rows_perm(i, SG.single_row_limit, SG) + end + SG.single_row_limit += 1 + end + return SG +end + +#= +function eliminate_and_update!(best_single_row, SG) + @assert best_single_row != 0 + best_row = deepcopy(SG.A[best_single_row]) + best_col = find_light_entry(best_row.pos, SG.is_light_col) + @assert length(SG.col_list[best_col]) > 1 + best_val = SG.A[best_single_row, best_col] + @assert !iszero(best_val) + best_col_idces = SG.col_list[best_col] + row_idx = 0 + while length(best_col_idces) > 1 + row_idx = find_row_to_add_on(row_idx, best_row, best_col_idces, SG) + @assert best_col_idces == SG.col_list[best_col] + @assert row_idx > 0 + @assert SG.col_list_perm[row_idx] in SG.col_list[best_col] + L_row = SG.col_list_perm[row_idx] + add_to_eliminate!(L_row, row_idx, best_row, best_col, best_val, SG) + update_after_addition!(L_row, row_idx, best_col, SG) + handle_new_light_weight!(row_idx, SG) + end + return SG +end +=# + +function eliminate_and_update_field!(best_single_row, SG) + @assert best_single_row != 0 + best_col = find_light_entry(SG.A[best_single_row].pos, SG.is_light_col) + @assert length(SG.col_list[best_col]) > 1 + best_val = SG.A[best_single_row, best_col] + @assert !iszero(best_val) + scale_row!(SG.A, best_single_row, inv(best_val)) + best_val = SG.A[best_single_row, best_col] + @assert isone(best_val) + best_row = deepcopy(SG.A[best_single_row]) + best_col_idces = SG.col_list[best_col] + row_idx = 0 + while length(best_col_idces) > 1 + row_idx = find_row_to_add_on(row_idx, best_row, best_col_idces, SG) + @assert best_col_idces == SG.col_list[best_col] + @assert SG.col_list_perm[row_idx] in SG.col_list[best_col] + @assert row_idx > 0 + L_row = SG.col_list_perm[row_idx] + add_to_eliminate_field!(L_row, row_idx, best_row, best_col, best_val, SG) + update_after_addition!(L_row, row_idx, best_col, SG) + handle_new_light_weight!(row_idx, SG) + end + return SG +end + +function find_row_to_add_on(row_idx, best_row, best_col_idces::Vector{Int64}, SG::data_StructGauss) + for L_row in best_col_idces[end:-1:1] #right??? breaking condition missing? + row_idx = SG.col_list_permi[L_row] + SG.A[row_idx] == best_row && continue + row_idx < SG.base && continue + break + end + return row_idx +end + +#= +function add_to_eliminate!(L_row, row_idx, best_row, best_col, best_val, SG) + @assert L_row in SG.col_list[best_col] + @assert !(0 in SG.A[row_idx].values) + val = SG.A[row_idx, best_col] + @assert !iszero(val) + #case !over_field && over_Z: + #g = gcd(lift(ZZ, val), lift(ZZ, best_val)) + g = gcd(val, best_val) + val_red = divexact(val, g) + best_val_red = divexact(best_val, g) + @assert L_row in SG.col_list[best_col] + for c in SG.A[row_idx].pos + @assert !isempty(SG.col_list[c]) + if SG.is_light_col[c] + jj = findfirst(isequal(L_row), SG.col_list[c]) + deleteat!(SG.col_list[c], jj) + end + end + scale_row!(SG.A, row_idx, best_val_red) + @assert !(0 in SG.A[row_idx].values) + Hecke.add_scaled_row!(best_row, SG.A[row_idx], -val_red) + @assert iszero(SG.A[row_idx, best_col]) + return SG +end +=# + +function add_to_eliminate_field!(L_row, row_idx, best_row, best_col, best_val, SG) + @assert L_row in SG.col_list[best_col] + @assert !(0 in SG.A[row_idx].values) + val = SG.A[row_idx, best_col] + @assert !iszero(val) + @assert L_row in SG.col_list[best_col] + for c in SG.A[row_idx].pos + @assert !isempty(SG.col_list[c]) + if SG.is_light_col[c] + jj = findfirst(isequal(L_row), SG.col_list[c]) + deleteat!(SG.col_list[c], jj) + end + end + #scale_row!(SG.A, row_idx, best_val_red) + @assert !(0 in SG.A[row_idx].values) + Hecke.add_scaled_row!(best_row, SG.A[row_idx], -val) + @assert iszero(SG.A[row_idx, best_col]) + return SG +end + +function update_after_addition!(L_row, row_idx::Int, best_col, SG::data_StructGauss) + SG.light_weight[row_idx] = 0 + for c in SG.A[row_idx].pos + @assert c != best_col + SG.is_light_col[c] && sort!(push!(SG.col_list[c], L_row)) + SG.is_light_col[c] && (SG.light_weight[row_idx]+=1) + end + return SG +end + +################################################################################ +# +# Small Auxiliary Functions +# +################################################################################ + +function swap_entries(v, i,j) #swaps entries v[i], v[j] + v[i],v[j] = v[j],v[i] +end + +function swap_rows_perm(i, j, SG) + if i != j + swap_rows!(SG.A, i, j) #swap!(A[i],A[j]) throws error later??? + swap_entries(SG.single_col, i, j) + swap_entries(SG.col_list_perm, i, j) + swap_entries(SG.col_list_permi, SG.col_list_perm[i], SG.col_list_perm[j]) + swap_entries(SG.light_weight, i, j) + end +end + +function swap_i_into_base(i, SG::data_StructGauss) + if i < SG.single_row_limit + swap_rows_perm(i, SG.base, SG) + else + if i != SG.single_row_limit + swap_rows_perm(SG.base, SG.single_row_limit, SG) + end + SG.single_row_limit +=1 + swap_rows_perm(SG.base, i, SG) + end +end + +function find_light_entry(position_array::Vector{Int64}, is_light_col::Vector{Bool})::Int64 + for j in position_array[end:-1:1] + if is_light_col[j] + return j + end + end +end + +function move_into_Y(i, SG::data_StructGauss) + @assert i == SG.base + push!(SG.Y, deepcopy(SG.A[SG.base])) + for base_c in SG.A[SG.base].pos + @assert !SG.is_light_col[base_c] + @assert !isempty(SG.col_list[base_c]) + end + SG.A.nnz-=length(SG.A[SG.base]) + empty!(SG.A[SG.base].pos), empty!(SG.A[SG.base].values) +end + +################################################################################ +# +# Kernel Computation +# +################################################################################ + +#= +function compute_kernel(SG, with_light = true) + update_light_cols!(SG) + @assert SG.nlight > -1 + collect_dense_cols!(SG) + _nullity, _dense_kernel = dense_kernel(SG) + l, K = init_kernel(_nullity, _dense_kernel, SG, with_light) + return compose_kernel(l, K, SG) +end +=# + +function compute_kernel_field(SG, with_light = true) + update_light_cols!(SG) + @assert SG.nlight > -1 + collect_dense_cols!(SG) + _nullity, _dense_kernel = dense_kernel(SG) + l, K = init_kernel(_nullity, _dense_kernel, SG, with_light) + return compose_kernel_field(l, K, SG) +end + +function update_light_cols!(SG) + for i = 1:ncols(SG.A) + if SG.is_light_col[i] && !isempty(SG.col_list[i]) + SG.is_light_col[i] = false + SG.nlight -= 1 + end + end + return SG +end + +function collect_dense_cols!(SG) + m = ncols(SG.A) + nheavy = m - SG.nlight - SG.nsingle + j = 1 + for i = 1:m + if !SG.is_light_col[i] && !SG.is_single_col[i] + SG.heavy_map[i] = j + push!(SG.heavy_mapi,i) + j+=1 + end + end + @assert length(SG.heavy_mapi)==nheavy + return +end + +function dense_kernel(SG) + ST = sparse_matrix(base_ring(SG.A), 0, nrows(SG.Y)) + YT = transpose(SG.Y) + for j in SG.heavy_mapi + push!(ST, YT[j]) + end + S = transpose(ST) + d, _dense_kernel = nullspace(matrix(S)) + return size(_dense_kernel)[2], _dense_kernel +end + +#= +function dense_kernel_new(SG) + ST = sparse_matrix(base_ring(SG.A), 0, nrows(SG.Y)) + YT = transpose(SG.Y) + for j in SG.heavy_mapi + push!(ST, YT[j]) + end + S = transpose(ST) + _dense_kernel = kernel(matrix(S), side=:right) + return 1, _dense_kernel + end + =# + +function init_kernel(_nullity, _dense_kernel, SG, with_light=false) + R = base_ring(SG.A) + m = ncols(SG.A) + if with_light + l = _nullity+SG.nlight + else + l = _nullity + end + K = zeros(R, m, l) + #initialisation + ilight = 1 + for i = 1:m + if SG.is_light_col[i] + if with_light + @assert ilight <= SG.nlight + K[i, _nullity+ilight] = one(R) + ilight +=1 + end + else + j = SG.heavy_map[i] + if j>0 + for c = 1:_nullity + K[i,c] = _dense_kernel[j, c] + end + end + end + end + return l, K +end + +#= +function compose_kernel_elem(_kernel, single_part, SG) + nfail = 0 + failure = [] + for i=SG.base-1:-1:1 + c = SG.single_col[i] + if c>0 + y = 0 + x = NaN + j=1 + while j <= length(SG.A[i]) + cc = SG.A[i].pos[j] + xx = SG.A[i].values[j] + if cc==c + x = xx + j+=1 + elseif !(cc in single_part) + y += (xx*_kernel[cc]) + j+=1 + else + break + end + end + if j <= length(SG.A[i]) + nfail +=1 + push!(failure, i) + else + _kernel[c]=-y*inv(x) + end + end + end + return _kernel, failure +end +=# + +#= +function compose_kernel(l, K, SG) + R = base_ring(SG.A) + n = nrows(SG.A) + for i = n:-1:1 + c = SG.single_col[i] + if c>0 + x = R(0) + y = zeros(R,l) + for idx in 1:length(SG.A[i]) + cc = SG.A[i].pos[idx] + xx = SG.A[i].values[idx] + if cc == c + x = xx + else + for k = 1:l + kern_c = K[cc,k] + !iszero(kern_c) && (y[k]-=xx*kern_c) + end + end + end + for k = 1:l + x_inv = try + inv(x) + catch + R(0) + end + if iszero(x_inv) + K[:,k] *= x + K[c, k] = y[k] + else + K[c, k] = y[k]*x_inv + end + end + end + end + return l, K +end +=# + +function compose_kernel_field(l, K, SG) + R = base_ring(SG.A) + n = nrows(SG.A) + for i = n:-1:1 + c = SG.single_col[i] + if c>0 + y = zeros(R,l) + for idx in 1:length(SG.A[i]) + cc = SG.A[i].pos[idx] + xx = SG.A[i].values[idx] + if cc == c + @assert isone(xx) + else + for k = 1:l + kern_c = K[cc,k] + !iszero(kern_c) && (y[k]-=xx*kern_c) + end + end + end + for k = 1:l + K[c, k] = y[k] + end + end + end + return l, K +end + +#= +function kernel_matrix(_nullity, _dense_kernel, SG) + R = base_ring(SG.A) + n = nrows(SG.A) + m = ncols(SG.A) + l = _nullity+SG.nlight + K = zeros(R, m, l) + #initialisation + ilight = 1 + for i = 1:m + if SG.is_light_col[i] + @assert ilight <= SG.nlight + K[i, _nullity+ilight] = one(R) + ilight +=1 + else + j = SG.heavy_map[i] + if j>0 + for c = 1:_nullity + K[i,c] = _dense_kernel[j, c] + end + end + end + end +#compose kernel in single cols + for i = n:-1:1 + c = SG.single_col[i] + if c>0 + y = zeros(R,l) + for idx in 1:length(SG.A[i]) + cc = SG.A[i].pos[idx] + xx = SG.A[i].values[idx] + if cc == c + @assert isone(xx) + else + for k = 1:l + kern_c = K[cc,k] + !iszero(kern_c) && (y[k]-=xx*kern_c) + end + end + end + for k = 1:l + K[c, k] = y[k] + end + end + end + return(l, K) +end +=# + +function delete_zero_rows!(A::SMat{T}) where T #where s denotes the first row where we wanna start + for i=A.r:-1:1 + if isempty(A[i].pos) + deleteat!(A.rows, i) + A.r-=1 + end + end + return A +end From 4113aa9791186a4a889d7594bec74ebb1421a68d Mon Sep 17 00:00:00 2001 From: Berenike Dieterle <94169337+BD-00@users.noreply.github.com> Date: Fri, 30 Aug 2024 17:18:27 +0200 Subject: [PATCH 03/12] include --- src/Sparse.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Sparse.jl b/src/Sparse.jl index 8543381644..90074db986 100644 --- a/src/Sparse.jl +++ b/src/Sparse.jl @@ -28,5 +28,6 @@ include("Sparse/Module.jl") include("Sparse/Trafo.jl") include("Sparse/HNF.jl") include("Sparse/Solve.jl") +include("Sparse/StructuredGauss.jl") include("Sparse/UpperTriangular.jl") include("Sparse/Rref.jl") From 25f6d86b00078b2b0fddd353d9cced544a384d45 Mon Sep 17 00:00:00 2001 From: Berenike Dieterle <94169337+BD-00@users.noreply.github.com> Date: Sat, 31 Aug 2024 12:43:18 +0200 Subject: [PATCH 04/12] loosen range restriction in sub --- src/Sparse/Matrix.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Sparse/Matrix.jl b/src/Sparse/Matrix.jl index 7d5dc3dedf..b5fe744b06 100644 --- a/src/Sparse/Matrix.jl +++ b/src/Sparse/Matrix.jl @@ -846,7 +846,7 @@ end Return the submatrix of $A$, where the rows correspond to $r$ and the columns correspond to $c$. """ -function sub(A::SMat{T}, r::AbstractUnitRange, c::AbstractUnitRange) where T +function sub(A::SMat{T}, r::AbstractRange, c::AbstractRange) where T B = sparse_matrix(base_ring(A)) B.nnz = 0 B.c = length(c) @@ -856,7 +856,7 @@ function sub(A::SMat{T}, r::AbstractUnitRange, c::AbstractUnitRange) where T for j=1:length(ra.values) if ra.pos[j] in c push!(rw.values, ra.values[j]) - push!(rw.pos, ra.pos[j]-first(c)+1) + push!(rw.pos, div(ra.pos[j]-first(c),step(c))+1) end end push!(B, rw) From ac7eb91c4f284c9204a3fa482837380ec38f766b Mon Sep 17 00:00:00 2001 From: Berenike Dieterle <94169337+BD-00@users.noreply.github.com> Date: Sat, 31 Aug 2024 20:19:32 +0200 Subject: [PATCH 05/12] Ring version --- src/Sparse/StructuredGauss.jl | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/src/Sparse/StructuredGauss.jl b/src/Sparse/StructuredGauss.jl index 6e2a42309a..2ba3e03f53 100644 --- a/src/Sparse/StructuredGauss.jl +++ b/src/Sparse/StructuredGauss.jl @@ -72,7 +72,7 @@ end # ################################################################################ -#= + function part_echolonize!(A) A = delete_zero_rows!(A) n = nrows(A) @@ -99,7 +99,6 @@ function part_echolonize!(A) end return SG end -=# function part_echolonize_field!(A) A = delete_zero_rows!(A) @@ -143,7 +142,6 @@ function single_rows_to_top!(SG) return SG end -#= function build_part_ref!(SG) queue = collect(ncols(SG.A):-1:1) while !isempty(queue) @@ -173,7 +171,6 @@ function build_part_ref!(SG) queue = queue_new end end -=# function build_part_ref_field!(SG) queue = collect(ncols(SG.A):-1:1) @@ -207,7 +204,6 @@ function build_part_ref_field!(SG) end end -#= function find_best_single_row(SG) best_single_row = -1 best_col = NaN @@ -245,7 +241,6 @@ function find_best_single_row(SG) end return best_single_row end -=# function find_best_single_row_field(SG) best_single_row = -1 @@ -336,7 +331,6 @@ function handle_new_light_weight!(i, SG) return SG end -#= function eliminate_and_update!(best_single_row, SG) @assert best_single_row != 0 best_row = deepcopy(SG.A[best_single_row]) @@ -358,7 +352,6 @@ function eliminate_and_update!(best_single_row, SG) end return SG end -=# function eliminate_and_update_field!(best_single_row, SG) @assert best_single_row != 0 @@ -395,7 +388,6 @@ function find_row_to_add_on(row_idx, best_row, best_col_idces::Vector{Int64}, SG return row_idx end -#= function add_to_eliminate!(L_row, row_idx, best_row, best_col, best_val, SG) @assert L_row in SG.col_list[best_col] @assert !(0 in SG.A[row_idx].values) @@ -414,13 +406,18 @@ function add_to_eliminate!(L_row, row_idx, best_row, best_col, best_val, SG) deleteat!(SG.col_list[c], jj) end end + @assert !(0 in SG.A[row_idx].values)#test scale_row!(SG.A, row_idx, best_val_red) - @assert !(0 in SG.A[row_idx].values) + @assert !(0 in SG.A[row_idx].values)#test + @assert !(0 in best_row.values) + if row_idx == 171 + @show(best_row,SG.A[row_idx], -val_red) + end Hecke.add_scaled_row!(best_row, SG.A[row_idx], -val_red) @assert iszero(SG.A[row_idx, best_col]) + @assert !(0 in SG.A[row_idx].values) return SG end -=# function add_to_eliminate_field!(L_row, row_idx, best_row, best_col, best_val, SG) @assert L_row in SG.col_list[best_col] @@ -509,7 +506,6 @@ end # ################################################################################ -#= function compute_kernel(SG, with_light = true) update_light_cols!(SG) @assert SG.nlight > -1 @@ -518,7 +514,6 @@ function compute_kernel(SG, with_light = true) l, K = init_kernel(_nullity, _dense_kernel, SG, with_light) return compose_kernel(l, K, SG) end -=# function compute_kernel_field(SG, with_light = true) update_light_cols!(SG) @@ -643,7 +638,6 @@ function compose_kernel_elem(_kernel, single_part, SG) end =# -#= function compose_kernel(l, K, SG) R = base_ring(SG.A) n = nrows(SG.A) @@ -681,7 +675,6 @@ function compose_kernel(l, K, SG) end return l, K end -=# function compose_kernel_field(l, K, SG) R = base_ring(SG.A) From 03d0667dc739dab11079abd1347cbcc2911c416a Mon Sep 17 00:00:00 2001 From: Berenike Dieterle <94169337+BD-00@users.noreply.github.com> Date: Sun, 1 Sep 2024 17:33:32 +0200 Subject: [PATCH 06/12] TODO: nullspace for rings --- src/Sparse/Row.jl | 162 ++++++---------------------------- src/Sparse/StructuredGauss.jl | 5 -- 2 files changed, 29 insertions(+), 138 deletions(-) diff --git a/src/Sparse/Row.jl b/src/Sparse/Row.jl index f3716541b8..b8a6a8d2ff 100644 --- a/src/Sparse/Row.jl +++ b/src/Sparse/Row.jl @@ -51,9 +51,9 @@ julia> sparse_row_type(typeof(zero(QQ))) == typeof(x) true ``` """ -sparse_row_type(::T) where {T <: Union{NCRing, NCRingElem}} = sparse_row_type(T) -sparse_row_type(::Type{T}) where {T <: NCRing} = sparse_row_type(elem_type(T)) -sparse_row_type(::Type{T}) where {T <: NCRingElem} = SRow{T, sparse_inner_type(T)} +sparse_row_type(::T) where {T <: Union{Ring, RingElem}} = sparse_row_type(T) +sparse_row_type(::Type{T}) where {T <: Ring} = sparse_row_type(elem_type(T)) +sparse_row_type(::Type{T}) where {T <: RingElem} = SRow{T, sparse_inner_type(T)} ==(x::SRow{T}, y::SRow{T}) where {T} = (x.pos == y.pos) && (x.values == y.values) @@ -69,7 +69,7 @@ sparse_row_type(::Type{T}) where {T <: NCRingElem} = SRow{T, sparse_inner_type(T Constructs an empty row with base ring $R$. """ -function sparse_row(R::NCRing) +function sparse_row(R::Ring) return SRow(R) end @@ -79,7 +79,7 @@ end Constructs the sparse row $(a_i)_i$ with $a_{i_j} = x_j$, where $J = (i_j, x_j)_j$. The elements $x_i$ must belong to the ring $R$. """ -function sparse_row(R::NCRing, A::Vector{Tuple{Int, T}}; sort::Bool = true) where T +function sparse_row(R::Ring, A::Vector{Tuple{Int, T}}; sort::Bool = true) where T if sort && length(A) > 1 A = Base.sort(A, lt=(a,b) -> isless(a[1], b[1])) end @@ -92,7 +92,7 @@ end Constructs the sparse row $(a_i)_i$ over $R$ with $a_{i_j} = x_j$, where $J = (i_j, x_j)_j$. """ -function sparse_row(R::NCRing, A::Vector{Tuple{Int, Int}}; sort::Bool = true) +function sparse_row(R::Ring, A::Vector{Tuple{Int, Int}}; sort::Bool = true) if sort && length(A) > 1 A = Base.sort(A, lt=(a,b) -> isless(a[1], b[1])) end @@ -112,12 +112,12 @@ function swap!(A::SRow, B::SRow) end @doc raw""" - sparse_row(R::NCRing, J::Vector{Int}, V::Vector{T}) -> SRow{T} + sparse_row(R::Ring, J::Vector{Int}, V::Vector{T}) -> SRow{T} Constructs the sparse row $(a_i)_i$ over $R$ with $a_{i_j} = x_j$, where $J = (i_j)_j$ and $V = (x_j)_j$. """ -function sparse_row(R::NCRing, pos::Vector{Int}, val::AbstractVector{T}; sort::Bool = true) where T +function sparse_row(R::Ring, pos::Vector{Int}, val::AbstractVector{T}; sort::Bool = true) where T if sort && length(pos) > 1 p = sortperm(pos) pos = pos[p] @@ -294,7 +294,7 @@ end Create a new sparse row by coercing all elements into the ring $R$. """ -function change_base_ring(R::S, A::SRow{T}) where {T <: NCRingElem, S <: NCRing} +function change_base_ring(R::S, A::SRow{T}) where {T <: RingElem, S <: Ring} z = sparse_row(R) for (i, v) in A nv = R(v) @@ -321,7 +321,7 @@ end Given a sparse row $(a_i)_{i}$ and an index $j$ return $a_j$. """ -function Base.getindex(A::SRow{T}, i::Int) where {T <: NCRingElem} +function Base.getindex(A::SRow{T}, i::Int) where {T <: RingElem} i < 1 && error("Index must be positive") p = findfirst(isequal(i), A.pos) if p === nothing @@ -371,7 +371,7 @@ Base.IteratorSize(::SRow{T}) where T = Base.HasLength() @doc raw""" dot(A::SRow, B::SRow) -> RingElem -Returns the dot product of $A$ and $B$. Note the order matters in non-commutative case. +Returns the dot product of $A$ and $B$. """ function dot(A::SRow{T}, B::SRow{T}) where T @assert length(A) != 0 @@ -385,7 +385,7 @@ function dot(A::SRow{T}, B::SRow{T}) where T return v end if B.pos[b] == A.pos[a] - v += A.values[a] * B.values[b] + v += B.values[b] * A.values[a] end end return v @@ -447,39 +447,11 @@ end # Inplace scaling # ################################################################################ -@doc raw""" - scale_row!(a::SRow, b::NCRingElem) -> SRow -Returns the (left) product of $b \times a$ and reassigns the value of $a$ to this product. -For rows, the standard multiplication is from the left. -""" function scale_row!(a::SRow{T}, b::T) where T @assert !iszero(b) if isone(b) - return a - end - i = 1 - while i <= length(a) - a.values[i] = b*a.values[i] - if iszero(a.values[i]) - deleteat!(a.values, i) - deleteat!(a.pos, i) - else - i += 1 - end - end - return a -end - -@doc raw""" - scale_row_right!(a::SRow, b::NCRingElem) -> SRow - -Returns the (right) product of $a \times b$ and modifies $a$ to this product. -""" -function scale_row_right!(a::SRow{T}, b::T) where T - @assert !iszero(b) - if isone(b) - return a + return end i = 1 while i <= length(a) @@ -491,11 +463,6 @@ function scale_row_right!(a::SRow{T}, b::T) where T i += 1 end end - return a -end - -function scale_row_left!(a::SRow{T}, b::T) where T - return scale_row!(a,b) end ################################################################################ @@ -583,7 +550,6 @@ function *(A::SRow, b) return A*base_ring(A)(b) end -#left and right div not implemented function div(A::SRow{T}, b::T) where T B = sparse_row(base_ring(A)) if iszero(b) @@ -606,20 +572,13 @@ function div(A::SRow{T}, b::Integer) where T return div(A, base_ring(A)(b)) end -@doc raw""" - divexact(A::SRow, b::RingElem; check::Bool = true) -> SRow - -Return $C$ such that $a = b \cdot C$. Calls the function `divexact_left(A,b;check)` -""" -divexact(A::SRow{T}, b::T; check::Bool=true) where T <: RingElem = divexact_left(A, b; check) - -function divexact_left(A::SRow{T}, b::T; check::Bool=true) where T <: NCRingElem +function divexact(A::SRow{T}, b::T; check::Bool=true) where T B = sparse_row(base_ring(A)) if iszero(b) return error("Division by zero") end for (p,v) = A - nv = divexact_left(v, b; check=check) + nv = divexact(v, b; check=check) @assert !iszero(nv) push!(B.pos, p) push!(B.values, nv) @@ -631,33 +590,7 @@ function divexact(A::SRow{T}, b::Integer; check::Bool=true) where T if length(A.values) == 0 return deepcopy(A) end - return divexact_left(A, base_ring(A)(b), check=check) -end - -@doc raw""" - divexact_right(A::SRow, b::NCRingElem; check::Bool = true) -> SRow - -Return $C$ such that $A = C \cdot b. -""" -function divexact_right(A::SRow{T}, b::T; check::Bool=true) where T - B = sparse_row(base_ring(A)) - if iszero(b) - return error("Division by zero") - end - for (p,v) = A - nv = divexact_right(v, b; check=check) - @assert !iszero(nv) - push!(B.pos, p) - push!(B.values, nv) - end - return B -end - -function divexact_right(A::SRow{T}, b::Integer; check::Bool=true) where T - if length(A.values) == 0 - return deepcopy(A) - end - return divexact_right(A, base_ring(A)(b); check=check) + return divexact(A, base_ring(A)(b); check=check) end ################################################################################ @@ -684,12 +617,11 @@ end Returns the row $c A + B$. """ add_scaled_row(a::SRow{T}, b::SRow{T}, c::T) where {T} = add_scaled_row!(a, deepcopy(b), c) -add_left_scaled_row(a::SRow{T}, b::SRow{T}, c::T) where {T} = add_scaled_row!(a, deepcopy(b), c) @doc raw""" add_scaled_row!(A::SRow{T}, B::SRow{T}, c::T) -> SRow{T} -Adds the left scaled row $c A$ to $B$. +Returns the row $c A + B$ by changing $B$ in place. """ function add_scaled_row!(a::SRow{T}, b::SRow{T}, c::T) where T @assert a !== b @@ -698,58 +630,17 @@ function add_scaled_row!(a::SRow{T}, b::SRow{T}, c::T) where T t = base_ring(a)() while i <= length(a) && j <= length(b) if a.pos[i] < b.pos[j] - insert!(b.pos, j, a.pos[i]) - insert!(b.values, j, c*a.values[i]) - i += 1 - j += 1 - elseif a.pos[i] > b.pos[j] - j += 1 - else t = mul!(t, c, a.values[i]) - b.values[j] = addeq!(b.values[j], t) - - if iszero(b.values[j]) - deleteat!(b.values, j) - deleteat!(b.pos, j) - else + if !iszero(t) + insert!(b.pos, j, a.pos[i]) + insert!(b.values, j, c*a.values[i]) j += 1 end i += 1 - end - end - while i <= length(a) - push!(b.pos, a.pos[i]) - push!(b.values, c*a.values[i]) - i += 1 - end - return b -end - -add_scaled_row!(a::SRow{T}, b::SRow{T}, c::T, tmp::SRow{T}) where T = add_scaled_row!(a, b, c) - -add_right_scaled_row(a::SRow{T}, b::SRow{T}, c::T) where {T} = add_right_scaled_row!(a, deepcopy(b), c) - -@doc raw""" - add_right_scaled_row!(A::SRow{T}, B::SRow{T}, c::T) -> SRow{T} - -Return the right scaled row $c A$ to $B$ by changing $B$ in place. -""" - -function add_right_scaled_row!(a::SRow{T}, b::SRow{T}, c::T) where T - @assert a !== b - i = 1 - j = 1 - t = base_ring(a)() - while i <= length(a) && j <= length(b) - if a.pos[i] < b.pos[j] - insert!(b.pos, j, a.pos[i]) - insert!(b.values, j, a.values[i]*c) - i += 1 - j += 1 elseif a.pos[i] > b.pos[j] j += 1 else - t = mul!(t, a.values[i], c) + t = mul!(t, c, a.values[i]) b.values[j] = addeq!(b.values[j], t) if iszero(b.values[j]) @@ -762,13 +653,18 @@ function add_right_scaled_row!(a::SRow{T}, b::SRow{T}, c::T) where T end end while i <= length(a) - push!(b.pos, a.pos[i]) - push!(b.values, a.values[i]*c) + t = mul!(t, c, a.values[i]) + if !iszero(t) + push!(b.pos, a.pos[i]) + push!(b.values, c*a.values[i]) + end i += 1 end return b end +add_scaled_row!(a::SRow{T}, b::SRow{T}, c::T, tmp::SRow{T}) where T = add_scaled_row!(a, b, c) + ################################################################################ # # Lifting @@ -801,7 +697,7 @@ end Returns $A \cdot A^t$. """ function norm2(A::SRow{T}) where {T} - return sum(T[x * x for x in A.values]; init = zero(base_ring(A))) + return sum([x * x for x in A.values]) end ################################################################################ diff --git a/src/Sparse/StructuredGauss.jl b/src/Sparse/StructuredGauss.jl index 2ba3e03f53..ae5cac435d 100644 --- a/src/Sparse/StructuredGauss.jl +++ b/src/Sparse/StructuredGauss.jl @@ -406,13 +406,8 @@ function add_to_eliminate!(L_row, row_idx, best_row, best_col, best_val, SG) deleteat!(SG.col_list[c], jj) end end - @assert !(0 in SG.A[row_idx].values)#test scale_row!(SG.A, row_idx, best_val_red) - @assert !(0 in SG.A[row_idx].values)#test @assert !(0 in best_row.values) - if row_idx == 171 - @show(best_row,SG.A[row_idx], -val_red) - end Hecke.add_scaled_row!(best_row, SG.A[row_idx], -val_red) @assert iszero(SG.A[row_idx, best_col]) @assert !(0 in SG.A[row_idx].values) From 1447d6357f7df42b2ac15ac9ec014e2ebf567e26 Mon Sep 17 00:00:00 2001 From: Berenike Dieterle <94169337+BD-00@users.noreply.github.com> Date: Tue, 3 Sep 2024 15:09:40 +0200 Subject: [PATCH 07/12] initialize output with MatrixSpace --- src/Sparse/Row.jl | 148 +++++++++++++++++++++++++++++----- src/Sparse/StructuredGauss.jl | 3 +- 2 files changed, 131 insertions(+), 20 deletions(-) diff --git a/src/Sparse/Row.jl b/src/Sparse/Row.jl index b8a6a8d2ff..f05a0d4594 100644 --- a/src/Sparse/Row.jl +++ b/src/Sparse/Row.jl @@ -51,9 +51,9 @@ julia> sparse_row_type(typeof(zero(QQ))) == typeof(x) true ``` """ -sparse_row_type(::T) where {T <: Union{Ring, RingElem}} = sparse_row_type(T) -sparse_row_type(::Type{T}) where {T <: Ring} = sparse_row_type(elem_type(T)) -sparse_row_type(::Type{T}) where {T <: RingElem} = SRow{T, sparse_inner_type(T)} +sparse_row_type(::T) where {T <: Union{NCRing, NCRingElem}} = sparse_row_type(T) +sparse_row_type(::Type{T}) where {T <: NCRing} = sparse_row_type(elem_type(T)) +sparse_row_type(::Type{T}) where {T <: NCRingElem} = SRow{T, sparse_inner_type(T)} ==(x::SRow{T}, y::SRow{T}) where {T} = (x.pos == y.pos) && (x.values == y.values) @@ -69,7 +69,7 @@ sparse_row_type(::Type{T}) where {T <: RingElem} = SRow{T, sparse_inner_type(T)} Constructs an empty row with base ring $R$. """ -function sparse_row(R::Ring) +function sparse_row(R::NCRing) return SRow(R) end @@ -79,7 +79,7 @@ end Constructs the sparse row $(a_i)_i$ with $a_{i_j} = x_j$, where $J = (i_j, x_j)_j$. The elements $x_i$ must belong to the ring $R$. """ -function sparse_row(R::Ring, A::Vector{Tuple{Int, T}}; sort::Bool = true) where T +function sparse_row(R::NCRing, A::Vector{Tuple{Int, T}}; sort::Bool = true) where T if sort && length(A) > 1 A = Base.sort(A, lt=(a,b) -> isless(a[1], b[1])) end @@ -92,7 +92,7 @@ end Constructs the sparse row $(a_i)_i$ over $R$ with $a_{i_j} = x_j$, where $J = (i_j, x_j)_j$. """ -function sparse_row(R::Ring, A::Vector{Tuple{Int, Int}}; sort::Bool = true) +function sparse_row(R::NCRing, A::Vector{Tuple{Int, Int}}; sort::Bool = true) if sort && length(A) > 1 A = Base.sort(A, lt=(a,b) -> isless(a[1], b[1])) end @@ -112,12 +112,12 @@ function swap!(A::SRow, B::SRow) end @doc raw""" - sparse_row(R::Ring, J::Vector{Int}, V::Vector{T}) -> SRow{T} + sparse_row(R::NCRing, J::Vector{Int}, V::Vector{T}) -> SRow{T} Constructs the sparse row $(a_i)_i$ over $R$ with $a_{i_j} = x_j$, where $J = (i_j)_j$ and $V = (x_j)_j$. """ -function sparse_row(R::Ring, pos::Vector{Int}, val::AbstractVector{T}; sort::Bool = true) where T +function sparse_row(R::NCRing, pos::Vector{Int}, val::AbstractVector{T}; sort::Bool = true) where T if sort && length(pos) > 1 p = sortperm(pos) pos = pos[p] @@ -294,7 +294,7 @@ end Create a new sparse row by coercing all elements into the ring $R$. """ -function change_base_ring(R::S, A::SRow{T}) where {T <: RingElem, S <: Ring} +function change_base_ring(R::S, A::SRow{T}) where {T <: NCRingElem, S <: NCRing} z = sparse_row(R) for (i, v) in A nv = R(v) @@ -321,7 +321,7 @@ end Given a sparse row $(a_i)_{i}$ and an index $j$ return $a_j$. """ -function Base.getindex(A::SRow{T}, i::Int) where {T <: RingElem} +function Base.getindex(A::SRow{T}, i::Int) where {T <: NCRingElem} i < 1 && error("Index must be positive") p = findfirst(isequal(i), A.pos) if p === nothing @@ -371,7 +371,7 @@ Base.IteratorSize(::SRow{T}) where T = Base.HasLength() @doc raw""" dot(A::SRow, B::SRow) -> RingElem -Returns the dot product of $A$ and $B$. +Returns the dot product of $A$ and $B$. Note the order matters in non-commutative case. """ function dot(A::SRow{T}, B::SRow{T}) where T @assert length(A) != 0 @@ -385,7 +385,7 @@ function dot(A::SRow{T}, B::SRow{T}) where T return v end if B.pos[b] == A.pos[a] - v += B.values[b] * A.values[a] + v += A.values[a] * B.values[b] end end return v @@ -447,11 +447,39 @@ end # Inplace scaling # ################################################################################ +@doc raw""" + scale_row!(a::SRow, b::NCRingElem) -> SRow +Returns the (left) product of $b \times a$ and reassigns the value of $a$ to this product. +For rows, the standard multiplication is from the left. +""" function scale_row!(a::SRow{T}, b::T) where T @assert !iszero(b) if isone(b) - return + return a + end + i = 1 + while i <= length(a) + a.values[i] = b*a.values[i] + if iszero(a.values[i]) + deleteat!(a.values, i) + deleteat!(a.pos, i) + else + i += 1 + end + end + return a +end + +@doc raw""" + scale_row_right!(a::SRow, b::NCRingElem) -> SRow + +Returns the (right) product of $a \times b$ and modifies $a$ to this product. +""" +function scale_row_right!(a::SRow{T}, b::T) where T + @assert !iszero(b) + if isone(b) + return a end i = 1 while i <= length(a) @@ -463,6 +491,11 @@ function scale_row!(a::SRow{T}, b::T) where T i += 1 end end + return a +end + +function scale_row_left!(a::SRow{T}, b::T) where T + return scale_row!(a,b) end ################################################################################ @@ -550,6 +583,7 @@ function *(A::SRow, b) return A*base_ring(A)(b) end +#left and right div not implemented function div(A::SRow{T}, b::T) where T B = sparse_row(base_ring(A)) if iszero(b) @@ -572,13 +606,20 @@ function div(A::SRow{T}, b::Integer) where T return div(A, base_ring(A)(b)) end -function divexact(A::SRow{T}, b::T; check::Bool=true) where T +@doc raw""" + divexact(A::SRow, b::RingElem; check::Bool = true) -> SRow + +Return $C$ such that $a = b \cdot C$. Calls the function `divexact_left(A,b;check)` +""" +divexact(A::SRow{T}, b::T; check::Bool=true) where T <: RingElem = divexact_left(A, b; check) + +function divexact_left(A::SRow{T}, b::T; check::Bool=true) where T <: NCRingElem B = sparse_row(base_ring(A)) if iszero(b) return error("Division by zero") end for (p,v) = A - nv = divexact(v, b; check=check) + nv = divexact_left(v, b; check=check) @assert !iszero(nv) push!(B.pos, p) push!(B.values, nv) @@ -590,7 +631,33 @@ function divexact(A::SRow{T}, b::Integer; check::Bool=true) where T if length(A.values) == 0 return deepcopy(A) end - return divexact(A, base_ring(A)(b); check=check) + return divexact_left(A, base_ring(A)(b), check=check) +end + +@doc raw""" + divexact_right(A::SRow, b::NCRingElem; check::Bool = true) -> SRow + +Return $C$ such that $A = C \cdot b. +""" +function divexact_right(A::SRow{T}, b::T; check::Bool=true) where T + B = sparse_row(base_ring(A)) + if iszero(b) + return error("Division by zero") + end + for (p,v) = A + nv = divexact_right(v, b; check=check) + @assert !iszero(nv) + push!(B.pos, p) + push!(B.values, nv) + end + return B +end + +function divexact_right(A::SRow{T}, b::Integer; check::Bool=true) where T + if length(A.values) == 0 + return deepcopy(A) + end + return divexact_right(A, base_ring(A)(b); check=check) end ################################################################################ @@ -617,11 +684,12 @@ end Returns the row $c A + B$. """ add_scaled_row(a::SRow{T}, b::SRow{T}, c::T) where {T} = add_scaled_row!(a, deepcopy(b), c) +add_left_scaled_row(a::SRow{T}, b::SRow{T}, c::T) where {T} = add_scaled_row!(a, deepcopy(b), c) @doc raw""" add_scaled_row!(A::SRow{T}, B::SRow{T}, c::T) -> SRow{T} -Returns the row $c A + B$ by changing $B$ in place. +Adds the left scaled row $c A$ to $B$. """ function add_scaled_row!(a::SRow{T}, b::SRow{T}, c::T) where T @assert a !== b @@ -663,7 +731,49 @@ function add_scaled_row!(a::SRow{T}, b::SRow{T}, c::T) where T return b end -add_scaled_row!(a::SRow{T}, b::SRow{T}, c::T, tmp::SRow{T}) where T = add_scaled_row!(a, b, c) +add_scaled_row!(a::SRow{T}, b::SRow{T}, c::T, tmp::SRow{T}) where T = add_scaled_row!(a, b, c) + +add_right_scaled_row(a::SRow{T}, b::SRow{T}, c::T) where {T} = add_right_scaled_row!(a, deepcopy(b), c) + +@doc raw""" + add_right_scaled_row!(A::SRow{T}, B::SRow{T}, c::T) -> SRow{T} + +Return the right scaled row $c A$ to $B$ by changing $B$ in place. +""" + +function add_right_scaled_row!(a::SRow{T}, b::SRow{T}, c::T) where T + @assert a !== b + i = 1 + j = 1 + t = base_ring(a)() + while i <= length(a) && j <= length(b) + if a.pos[i] < b.pos[j] + insert!(b.pos, j, a.pos[i]) + insert!(b.values, j, a.values[i]*c) + i += 1 + j += 1 + elseif a.pos[i] > b.pos[j] + j += 1 + else + t = mul!(t, a.values[i], c) + b.values[j] = addeq!(b.values[j], t) + + if iszero(b.values[j]) + deleteat!(b.values, j) + deleteat!(b.pos, j) + else + j += 1 + end + i += 1 + end + end + while i <= length(a) + push!(b.pos, a.pos[i]) + push!(b.values, a.values[i]*c) + i += 1 + end + return b +end ################################################################################ # @@ -697,7 +807,7 @@ end Returns $A \cdot A^t$. """ function norm2(A::SRow{T}) where {T} - return sum([x * x for x in A.values]) + return sum(T[x * x for x in A.values]; init = zero(base_ring(A))) end ################################################################################ diff --git a/src/Sparse/StructuredGauss.jl b/src/Sparse/StructuredGauss.jl index ae5cac435d..9f6e84cac8 100644 --- a/src/Sparse/StructuredGauss.jl +++ b/src/Sparse/StructuredGauss.jl @@ -576,7 +576,8 @@ function init_kernel(_nullity, _dense_kernel, SG, with_light=false) else l = _nullity end - K = zeros(R, m, l) + M = matrix_space(R, m, l) + K = M() #initialisation ilight = 1 for i = 1:m From a9d4c9a94e6cc1acb407a16c61f9826975ac95e5 Mon Sep 17 00:00:00 2001 From: Berenike Dieterle <94169337+BD-00@users.noreply.github.com> Date: Wed, 4 Sep 2024 21:24:50 +0200 Subject: [PATCH 08/12] minor changes --- src/Sparse/StructuredGauss.jl | 117 +++------------------------------- 1 file changed, 9 insertions(+), 108 deletions(-) diff --git a/src/Sparse/StructuredGauss.jl b/src/Sparse/StructuredGauss.jl index 9f6e84cac8..c36cea872b 100644 --- a/src/Sparse/StructuredGauss.jl +++ b/src/Sparse/StructuredGauss.jl @@ -162,7 +162,6 @@ function build_part_ref!(SG) SG.is_single_col[j] = true SG.single_col[singleton_row_idx] = j SG.nlight-=1 - #SG.nlight<0 && print("nlight < 0 singleton case") SG.nsingle+=1 swap_i_into_base(singleton_row_idx, SG) SG.base+=1 @@ -180,7 +179,6 @@ function build_part_ref_field!(SG) if length(SG.col_list[j])==1 && SG.is_light_col[j] singleton_row_origin = only(SG.col_list[j]) singleton_row_idx = SG.col_list_permi[singleton_row_origin] - @assert !iszero(SG.A[singleton_row_idx, j]) scale_row!(SG.A, singleton_row_idx, inv(SG.A[singleton_row_idx, j])) for jj in SG.A[singleton_row_idx].pos if SG.is_light_col[jj] @@ -194,7 +192,6 @@ function build_part_ref_field!(SG) SG.is_single_col[j] = true SG.single_col[singleton_row_idx] = j SG.nlight-=1 - #SG.nlight<0 && print("nlight < 0 singleton case") SG.nsingle+=1 swap_i_into_base(singleton_row_idx, SG) SG.base+=1 @@ -312,7 +309,6 @@ function turn_heavy(SG) end end SG.nlight -= SG.heavy_ext - #SG.nlight<0 && print("nlight < 0 extension case") end function handle_new_light_weight!(i, SG) @@ -333,7 +329,8 @@ end function eliminate_and_update!(best_single_row, SG) @assert best_single_row != 0 - best_row = deepcopy(SG.A[best_single_row]) + @show best_single_row + @show best_row = deepcopy(SG.A[best_single_row]) best_col = find_light_entry(best_row.pos, SG.is_light_col) @assert length(SG.col_list[best_col]) > 1 best_val = SG.A[best_single_row, best_col] @@ -342,6 +339,7 @@ function eliminate_and_update!(best_single_row, SG) row_idx = 0 while length(best_col_idces) > 1 row_idx = find_row_to_add_on(row_idx, best_row, best_col_idces, SG) + best_single_row == 115 && @show row_idx @assert best_col_idces == SG.col_list[best_col] @assert row_idx > 0 @assert SG.col_list_perm[row_idx] in SG.col_list[best_col] @@ -379,7 +377,7 @@ function eliminate_and_update_field!(best_single_row, SG) end function find_row_to_add_on(row_idx, best_row, best_col_idces::Vector{Int64}, SG::data_StructGauss) - for L_row in best_col_idces[end:-1:1] #right??? breaking condition missing? + for L_row in best_col_idces[end:-1:1] row_idx = SG.col_list_permi[L_row] SG.A[row_idx] == best_row && continue row_idx < SG.base && continue @@ -389,12 +387,12 @@ function find_row_to_add_on(row_idx, best_row, best_col_idces::Vector{Int64}, SG end function add_to_eliminate!(L_row, row_idx, best_row, best_col, best_val, SG) + @show best_val @assert L_row in SG.col_list[best_col] @assert !(0 in SG.A[row_idx].values) - val = SG.A[row_idx, best_col] + val = SG.A[row_idx, best_col] @assert !iszero(val) #case !over_field && over_Z: - #g = gcd(lift(ZZ, val), lift(ZZ, best_val)) g = gcd(val, best_val) val_red = divexact(val, g) best_val_red = divexact(best_val, g) @@ -406,6 +404,7 @@ function add_to_eliminate!(L_row, row_idx, best_row, best_col, best_val, SG) deleteat!(SG.col_list[c], jj) end end + row_idx == 112 && @show val, best_val, g, val_red, best_val_red scale_row!(SG.A, row_idx, best_val_red) @assert !(0 in best_row.values) Hecke.add_scaled_row!(best_row, SG.A[row_idx], -val_red) @@ -427,7 +426,6 @@ function add_to_eliminate_field!(L_row, row_idx, best_row, best_col, best_val, S deleteat!(SG.col_list[c], jj) end end - #scale_row!(SG.A, row_idx, best_val_red) @assert !(0 in SG.A[row_idx].values) Hecke.add_scaled_row!(best_row, SG.A[row_idx], -val) @assert iszero(SG.A[row_idx, best_col]) @@ -450,13 +448,13 @@ end # ################################################################################ -function swap_entries(v, i,j) #swaps entries v[i], v[j] +function swap_entries(v, i,j) v[i],v[j] = v[j],v[i] end function swap_rows_perm(i, j, SG) if i != j - swap_rows!(SG.A, i, j) #swap!(A[i],A[j]) throws error later??? + swap_rows!(SG.A, i, j) swap_entries(SG.single_col, i, j) swap_entries(SG.col_list_perm, i, j) swap_entries(SG.col_list_permi, SG.col_list_perm[i], SG.col_list_perm[j]) @@ -555,19 +553,6 @@ function dense_kernel(SG) return size(_dense_kernel)[2], _dense_kernel end -#= -function dense_kernel_new(SG) - ST = sparse_matrix(base_ring(SG.A), 0, nrows(SG.Y)) - YT = transpose(SG.Y) - for j in SG.heavy_mapi - push!(ST, YT[j]) - end - S = transpose(ST) - _dense_kernel = kernel(matrix(S), side=:right) - return 1, _dense_kernel - end - =# - function init_kernel(_nullity, _dense_kernel, SG, with_light=false) R = base_ring(SG.A) m = ncols(SG.A) @@ -599,41 +584,6 @@ function init_kernel(_nullity, _dense_kernel, SG, with_light=false) return l, K end -#= -function compose_kernel_elem(_kernel, single_part, SG) - nfail = 0 - failure = [] - for i=SG.base-1:-1:1 - c = SG.single_col[i] - if c>0 - y = 0 - x = NaN - j=1 - while j <= length(SG.A[i]) - cc = SG.A[i].pos[j] - xx = SG.A[i].values[j] - if cc==c - x = xx - j+=1 - elseif !(cc in single_part) - y += (xx*_kernel[cc]) - j+=1 - else - break - end - end - if j <= length(SG.A[i]) - nfail +=1 - push!(failure, i) - else - _kernel[c]=-y*inv(x) - end - end - end - return _kernel, failure -end -=# - function compose_kernel(l, K, SG) R = base_ring(SG.A) n = nrows(SG.A) @@ -699,55 +649,6 @@ function compose_kernel_field(l, K, SG) return l, K end -#= -function kernel_matrix(_nullity, _dense_kernel, SG) - R = base_ring(SG.A) - n = nrows(SG.A) - m = ncols(SG.A) - l = _nullity+SG.nlight - K = zeros(R, m, l) - #initialisation - ilight = 1 - for i = 1:m - if SG.is_light_col[i] - @assert ilight <= SG.nlight - K[i, _nullity+ilight] = one(R) - ilight +=1 - else - j = SG.heavy_map[i] - if j>0 - for c = 1:_nullity - K[i,c] = _dense_kernel[j, c] - end - end - end - end -#compose kernel in single cols - for i = n:-1:1 - c = SG.single_col[i] - if c>0 - y = zeros(R,l) - for idx in 1:length(SG.A[i]) - cc = SG.A[i].pos[idx] - xx = SG.A[i].values[idx] - if cc == c - @assert isone(xx) - else - for k = 1:l - kern_c = K[cc,k] - !iszero(kern_c) && (y[k]-=xx*kern_c) - end - end - end - for k = 1:l - K[c, k] = y[k] - end - end - end - return(l, K) -end -=# - function delete_zero_rows!(A::SMat{T}) where T #where s denotes the first row where we wanna start for i=A.r:-1:1 if isempty(A[i].pos) From 2317f34e62a12c2d0ecee1d7c5985694fa7bf587 Mon Sep 17 00:00:00 2001 From: Berenike Dieterle <94169337+BD-00@users.noreply.github.com> Date: Mon, 16 Sep 2024 10:26:09 +0200 Subject: [PATCH 09/12] draft pr --- src/Sparse/StructuredGauss.jl | 69 ++++++++++++++++++++++------------- 1 file changed, 43 insertions(+), 26 deletions(-) diff --git a/src/Sparse/StructuredGauss.jl b/src/Sparse/StructuredGauss.jl index 3b1ae4305e..bf5123725b 100644 --- a/src/Sparse/StructuredGauss.jl +++ b/src/Sparse/StructuredGauss.jl @@ -55,12 +55,27 @@ function _col_list(A) return col_list end +@doc_raw""" + structured_gauss(A::SMat{T}) where T <: RingElem -function structured_gauss(A::SMat{T}) where T <: RingElem +Return a tuple (\nu, N) consisting of the nullity \nu of A and a basis N +(consisting of column vectors) for the right nullspace of A, i.e. such that +AN is the zero matrix. +""" + +function structured_gauss(A::SMat{T}) where T <: RingElem SG = part_echolonize!(A) return compute_kernel(SG) end +@doc_raw""" + structured_gauss(A::SMat{T}) where T <: FieldElem + +Return a tuple (\nu, N) consisting of the nullity \nu of A and a basis N +(consisting of column vectors) for the right nullspace of A, i.e. such that +AN is the zero matrix. +""" + function structured_gauss_field(A::SMat{T}) where T <: FieldElem SG = part_echolonize_field!(A) return compute_kernel_field(SG) @@ -72,6 +87,9 @@ end # ################################################################################ +#Build an upper triangular matrix for as many columns as possible compromising +#the loss of sparsity during this process. + function part_echolonize!(A) A = delete_zero_rows!(A) n = nrows(A) @@ -93,7 +111,6 @@ function part_echolonize!(A) turn_heavy(SG) continue #while SG.nlight > 0 && SG.base <= SG.A.r end - eliminate_and_update!(best_single_row, SG) end return SG @@ -206,7 +223,7 @@ function find_best_single_row(SG) best_val = NaN best_len = -1 best_is_one = false - for i = SG.base:SG.single_row_limit-1 #here not the case in first loop + for i = SG.base:SG.single_row_limit-1 single_row = SG.A[i] single_row_len = length(single_row) w = SG.light_weight[i] @@ -266,14 +283,13 @@ function find_dense_cols(SG) col_len = length(SG.col_list[i]) if col_len > SG.heavy_col_len[1] if SG.heavy_ext == 1 - SG.heavy_col_idx[1] = i + SG.heavy_col_idx[1] = i SG.heavy_col_len[1] = col_len else - #jk = SG.heavy_ext for j = SG.heavy_ext:-1:2 - if col_len >= SG.heavy_col_len[j] + if col_len >= SG.heavy_col_len[j] for k = 1:j-1 - SG.heavy_col_idx[k] = SG.heavy_col_idx[k + 1]#replace by delete and insert!!! + SG.heavy_col_idx[k] = SG.heavy_col_idx[k + 1] SG.heavy_col_len[k] = SG.heavy_col_len[k + 1] end SG.heavy_col_idx[j] = i @@ -304,7 +320,6 @@ function turn_heavy(SG) @assert SG.light_weight[i_now] > 0 SG.light_weight[i_now]-=1 handle_new_light_weight!(i_now, SG) - #test_Y(SG) end end SG.nlight -= SG.heavy_ext @@ -328,17 +343,15 @@ end function eliminate_and_update!(best_single_row, SG) @assert best_single_row != 0 - @show best_single_row - @show best_row = deepcopy(SG.A[best_single_row]) + best_row = deepcopy(SG.A[best_single_row]) best_col = find_light_entry(best_row.pos, SG.is_light_col) @assert length(SG.col_list[best_col]) > 1 - best_val = SG.A[best_single_row, best_col] + best_val = deepcopy(SG.A[best_single_row, best_col]) @assert !iszero(best_val) best_col_idces = SG.col_list[best_col] row_idx = 0 while length(best_col_idces) > 1 row_idx = find_row_to_add_on(row_idx, best_row, best_col_idces, SG) - best_single_row == 115 && @show row_idx @assert best_col_idces == SG.col_list[best_col] @assert row_idx > 0 @assert SG.col_list_perm[row_idx] in SG.col_list[best_col] @@ -386,12 +399,10 @@ function find_row_to_add_on(row_idx, best_row, best_col_idces::Vector{Int64}, SG end function add_to_eliminate!(L_row, row_idx, best_row, best_col, best_val, SG) - @show best_val @assert L_row in SG.col_list[best_col] @assert !(0 in SG.A[row_idx].values) val = SG.A[row_idx, best_col] @assert !iszero(val) - #case !over_field && over_Z: g = gcd(val, best_val) val_red = divexact(val, g) best_val_red = divexact(best_val, g) @@ -403,7 +414,6 @@ function add_to_eliminate!(L_row, row_idx, best_row, best_col, best_val, SG) deleteat!(SG.col_list[c], jj) end end - row_idx == 112 && @show val, best_val, g, val_red, best_val_red scale_row!(SG.A, row_idx, best_val_red) @assert !(0 in best_row.values) Hecke.add_scaled_row!(best_row, SG.A[row_idx], -val_red) @@ -415,7 +425,7 @@ end function add_to_eliminate_field!(L_row, row_idx, best_row, best_col, best_val, SG) @assert L_row in SG.col_list[best_col] @assert !(0 in SG.A[row_idx].values) - val = SG.A[row_idx, best_col] + val = SG.A[row_idx, best_col] @assert !iszero(val) @assert L_row in SG.col_list[best_col] for c in SG.A[row_idx].pos @@ -435,7 +445,7 @@ function update_after_addition!(L_row, row_idx::Int, best_col, SG::data_StructGa SG.light_weight[row_idx] = 0 for c in SG.A[row_idx].pos @assert c != best_col - SG.is_light_col[c] && sort!(push!(SG.col_list[c], L_row)) + SG.is_light_col[c] && sort!(push!(SG.col_list[c], L_row)) SG.is_light_col[c] && (SG.light_weight[row_idx]+=1) end return SG @@ -498,15 +508,18 @@ end # ################################################################################ +#Compute the kernel corresponding to the non echolonized part from above and +#insert backwards using the triangular part to get the full kernel. + function compute_kernel(SG, with_light = true) - update_light_cols!(SG) + Hecke.update_light_cols!(SG) @assert SG.nlight > -1 - collect_dense_cols!(SG) - _nullity, _dense_kernel = dense_kernel(SG) - l, K = init_kernel(_nullity, _dense_kernel, SG, with_light) + Hecke.collect_dense_cols!(SG) + _nullity, _dense_kernel = Hecke.dense_kernel(SG) + l, K = Hecke.init_kernel(_nullity, _dense_kernel, SG, with_light) return compose_kernel(l, K, SG) end - + function compute_kernel_field(SG, with_light = true) update_light_cols!(SG) @assert SG.nlight > -1 @@ -533,7 +546,7 @@ function collect_dense_cols!(SG) for i = 1:m if !SG.is_light_col[i] && !SG.is_single_col[i] SG.heavy_map[i] = j - push!(SG.heavy_mapi,i) + push!(SG.heavy_mapi,i) j+=1 end end @@ -543,7 +556,7 @@ end function dense_kernel(SG) ST = sparse_matrix(base_ring(SG.A), 0, nrows(SG.Y)) - YT = transpose(SG.Y) + YT = transpose(delete_zero_rows!(SG.Y)) for j in SG.heavy_mapi push!(ST, YT[j]) end @@ -571,7 +584,7 @@ function init_kernel(_nullity, _dense_kernel, SG, with_light=false) K[i, _nullity+ilight] = one(R) ilight +=1 end - else + else j = SG.heavy_map[i] if j>0 for c = 1:_nullity @@ -648,7 +661,7 @@ function compose_kernel_field(l, K, SG) return l, K end -function delete_zero_rows!(A::SMat{T}) where T #where s denotes the first row where we wanna start +function delete_zero_rows!(A::SMat{T}) where T for i=A.r:-1:1 if isempty(A[i].pos) deleteat!(A.rows, i) @@ -657,3 +670,7 @@ function delete_zero_rows!(A::SMat{T}) where T #where s denotes the first row wh end return A end + + +#TODO: combine field and ring version +#TODO: make computation kernel in compute_kernel faster \ No newline at end of file From b265b959f3cbebfce6a4af9e512beaeca210fa2e Mon Sep 17 00:00:00 2001 From: Berenike Dieterle <94169337+BD-00@users.noreply.github.com> Date: Mon, 16 Sep 2024 10:26:09 +0200 Subject: [PATCH 10/12] draft pr --- src/Sparse/StructuredGauss.jl | 69 ++++++++++++++++++++++------------- 1 file changed, 43 insertions(+), 26 deletions(-) diff --git a/src/Sparse/StructuredGauss.jl b/src/Sparse/StructuredGauss.jl index 3b1ae4305e..438cee114e 100644 --- a/src/Sparse/StructuredGauss.jl +++ b/src/Sparse/StructuredGauss.jl @@ -55,12 +55,27 @@ function _col_list(A) return col_list end +@doc raw""" + structured_gauss(A::SMat{T}) where T <: RingElem -function structured_gauss(A::SMat{T}) where T <: RingElem +Return a tuple (\nu, N) consisting of the nullity \nu of A and a basis N +(consisting of column vectors) for the right nullspace of A, i.e. such that +AN is the zero matrix. +""" + +function structured_gauss(A::SMat{T}) where T <: RingElem SG = part_echolonize!(A) return compute_kernel(SG) end +@doc raw""" + structured_gauss(A::SMat{T}) where T <: FieldElem + +Return a tuple (\nu, N) consisting of the nullity \nu of A and a basis N +(consisting of column vectors) for the right nullspace of A, i.e. such that +AN is the zero matrix. +""" + function structured_gauss_field(A::SMat{T}) where T <: FieldElem SG = part_echolonize_field!(A) return compute_kernel_field(SG) @@ -72,6 +87,9 @@ end # ################################################################################ +#Build an upper triangular matrix for as many columns as possible compromising +#the loss of sparsity during this process. + function part_echolonize!(A) A = delete_zero_rows!(A) n = nrows(A) @@ -93,7 +111,6 @@ function part_echolonize!(A) turn_heavy(SG) continue #while SG.nlight > 0 && SG.base <= SG.A.r end - eliminate_and_update!(best_single_row, SG) end return SG @@ -206,7 +223,7 @@ function find_best_single_row(SG) best_val = NaN best_len = -1 best_is_one = false - for i = SG.base:SG.single_row_limit-1 #here not the case in first loop + for i = SG.base:SG.single_row_limit-1 single_row = SG.A[i] single_row_len = length(single_row) w = SG.light_weight[i] @@ -266,14 +283,13 @@ function find_dense_cols(SG) col_len = length(SG.col_list[i]) if col_len > SG.heavy_col_len[1] if SG.heavy_ext == 1 - SG.heavy_col_idx[1] = i + SG.heavy_col_idx[1] = i SG.heavy_col_len[1] = col_len else - #jk = SG.heavy_ext for j = SG.heavy_ext:-1:2 - if col_len >= SG.heavy_col_len[j] + if col_len >= SG.heavy_col_len[j] for k = 1:j-1 - SG.heavy_col_idx[k] = SG.heavy_col_idx[k + 1]#replace by delete and insert!!! + SG.heavy_col_idx[k] = SG.heavy_col_idx[k + 1] SG.heavy_col_len[k] = SG.heavy_col_len[k + 1] end SG.heavy_col_idx[j] = i @@ -304,7 +320,6 @@ function turn_heavy(SG) @assert SG.light_weight[i_now] > 0 SG.light_weight[i_now]-=1 handle_new_light_weight!(i_now, SG) - #test_Y(SG) end end SG.nlight -= SG.heavy_ext @@ -328,17 +343,15 @@ end function eliminate_and_update!(best_single_row, SG) @assert best_single_row != 0 - @show best_single_row - @show best_row = deepcopy(SG.A[best_single_row]) + best_row = deepcopy(SG.A[best_single_row]) best_col = find_light_entry(best_row.pos, SG.is_light_col) @assert length(SG.col_list[best_col]) > 1 - best_val = SG.A[best_single_row, best_col] + best_val = deepcopy(SG.A[best_single_row, best_col]) @assert !iszero(best_val) best_col_idces = SG.col_list[best_col] row_idx = 0 while length(best_col_idces) > 1 row_idx = find_row_to_add_on(row_idx, best_row, best_col_idces, SG) - best_single_row == 115 && @show row_idx @assert best_col_idces == SG.col_list[best_col] @assert row_idx > 0 @assert SG.col_list_perm[row_idx] in SG.col_list[best_col] @@ -386,12 +399,10 @@ function find_row_to_add_on(row_idx, best_row, best_col_idces::Vector{Int64}, SG end function add_to_eliminate!(L_row, row_idx, best_row, best_col, best_val, SG) - @show best_val @assert L_row in SG.col_list[best_col] @assert !(0 in SG.A[row_idx].values) val = SG.A[row_idx, best_col] @assert !iszero(val) - #case !over_field && over_Z: g = gcd(val, best_val) val_red = divexact(val, g) best_val_red = divexact(best_val, g) @@ -403,7 +414,6 @@ function add_to_eliminate!(L_row, row_idx, best_row, best_col, best_val, SG) deleteat!(SG.col_list[c], jj) end end - row_idx == 112 && @show val, best_val, g, val_red, best_val_red scale_row!(SG.A, row_idx, best_val_red) @assert !(0 in best_row.values) Hecke.add_scaled_row!(best_row, SG.A[row_idx], -val_red) @@ -415,7 +425,7 @@ end function add_to_eliminate_field!(L_row, row_idx, best_row, best_col, best_val, SG) @assert L_row in SG.col_list[best_col] @assert !(0 in SG.A[row_idx].values) - val = SG.A[row_idx, best_col] + val = SG.A[row_idx, best_col] @assert !iszero(val) @assert L_row in SG.col_list[best_col] for c in SG.A[row_idx].pos @@ -435,7 +445,7 @@ function update_after_addition!(L_row, row_idx::Int, best_col, SG::data_StructGa SG.light_weight[row_idx] = 0 for c in SG.A[row_idx].pos @assert c != best_col - SG.is_light_col[c] && sort!(push!(SG.col_list[c], L_row)) + SG.is_light_col[c] && sort!(push!(SG.col_list[c], L_row)) SG.is_light_col[c] && (SG.light_weight[row_idx]+=1) end return SG @@ -498,15 +508,18 @@ end # ################################################################################ +#Compute the kernel corresponding to the non echolonized part from above and +#insert backwards using the triangular part to get the full kernel. + function compute_kernel(SG, with_light = true) - update_light_cols!(SG) + Hecke.update_light_cols!(SG) @assert SG.nlight > -1 - collect_dense_cols!(SG) - _nullity, _dense_kernel = dense_kernel(SG) - l, K = init_kernel(_nullity, _dense_kernel, SG, with_light) + Hecke.collect_dense_cols!(SG) + _nullity, _dense_kernel = Hecke.dense_kernel(SG) + l, K = Hecke.init_kernel(_nullity, _dense_kernel, SG, with_light) return compose_kernel(l, K, SG) end - + function compute_kernel_field(SG, with_light = true) update_light_cols!(SG) @assert SG.nlight > -1 @@ -533,7 +546,7 @@ function collect_dense_cols!(SG) for i = 1:m if !SG.is_light_col[i] && !SG.is_single_col[i] SG.heavy_map[i] = j - push!(SG.heavy_mapi,i) + push!(SG.heavy_mapi,i) j+=1 end end @@ -543,7 +556,7 @@ end function dense_kernel(SG) ST = sparse_matrix(base_ring(SG.A), 0, nrows(SG.Y)) - YT = transpose(SG.Y) + YT = transpose(delete_zero_rows!(SG.Y)) for j in SG.heavy_mapi push!(ST, YT[j]) end @@ -571,7 +584,7 @@ function init_kernel(_nullity, _dense_kernel, SG, with_light=false) K[i, _nullity+ilight] = one(R) ilight +=1 end - else + else j = SG.heavy_map[i] if j>0 for c = 1:_nullity @@ -648,7 +661,7 @@ function compose_kernel_field(l, K, SG) return l, K end -function delete_zero_rows!(A::SMat{T}) where T #where s denotes the first row where we wanna start +function delete_zero_rows!(A::SMat{T}) where T for i=A.r:-1:1 if isempty(A[i].pos) deleteat!(A.rows, i) @@ -657,3 +670,7 @@ function delete_zero_rows!(A::SMat{T}) where T #where s denotes the first row wh end return A end + + +#TODO: combine field and ring version +#TODO: make computation kernel in compute_kernel faster From 445739d30c26c19a0f08601eb2fe5001de521317 Mon Sep 17 00:00:00 2001 From: Berenike Dieterle <94169337+BD-00@users.noreply.github.com> Date: Thu, 28 Nov 2024 17:20:33 +0100 Subject: [PATCH 11/12] type declaration and separation of ZZ --- src/Sparse/StructuredGauss.jl | 109 ++++---- src/Sparse/ZZStructuredGauss.jl | 384 +++++++++++++++++++++++++++ src/Sparse/sparse_row_echelon.jl | 442 +++++++++++++++++++++++++++++++ 3 files changed, 888 insertions(+), 47 deletions(-) create mode 100644 src/Sparse/ZZStructuredGauss.jl create mode 100644 src/Sparse/sparse_row_echelon.jl diff --git a/src/Sparse/StructuredGauss.jl b/src/Sparse/StructuredGauss.jl index 438cee114e..7e5e210e3b 100644 --- a/src/Sparse/StructuredGauss.jl +++ b/src/Sparse/StructuredGauss.jl @@ -1,27 +1,39 @@ -mutable struct data_StructGauss - A - Y - single_row_limit - base - nlight - nsingle - light_weight - col_list - col_list_perm #perm gives new ordering of original A[i] via their indices - col_list_permi #A[permi[i]]=A[i] before permutation = list of sigma(i) (recent position of former A[i]) - is_light_col - is_single_col - single_col #single_col[i] = c >= 0 if col c has its only entry in row i, i always recent position - heavy_ext - heavy_col_idx - heavy_col_len - heavy_mapi - heavy_map - - function data_StructGauss(A) +#TODO: declare return types +#TODO: sizehints +#TODO: pointer pointer pointer +#TODO: sizehint for heavy stuff can be given later, so don't initialize immediately +#TODO: new struct for second part + +#= +PROBLEMS: +- why more nnzs in SG.A+SG.Y than in PR.A? +- maximum of PR.A greater as in SG.Y? +=# + +mutable struct data_StructGauss{T} + A::SMat{T} + Y::SMat{T} + single_row_limit::Int64 + base::Int64 + nlight::Int64 + nsingle::Int64 + light_weight::Vector{Int64} + col_list::Vector{Vector{Int64}} + col_list_perm::Vector{Int64} #perm gives new ordering of original A[i] via their indices + col_list_permi::Vector{Int64} #A[permi[i]]=A[i] before permutation = list of sigma(i) (recent position of former A[i]) + is_light_col::Vector{Bool} + is_single_col::Vector{Bool} + single_col::Vector{Int64} #single_col[i] = c >= 0 if col c has its only entry in row i, i always recent position + heavy_ext::Int64 + heavy_col_idx::Vector{Int64} + heavy_col_len::Vector{Int64} + #heavy_mapi::Vector{Int64} + #heavy_map::Vector{Int64} + + function data_StructGauss(A::SMat{T}) where T Y = sparse_matrix(base_ring(A), 0, ncols(A)) col_list = _col_list(A) - return new(A, + return new{T}(A, Y, 1, 1, @@ -37,12 +49,13 @@ mutable struct data_StructGauss 0, Int[], Int[], - Int[], - fill(0, ncols(A))) + #Int[], + #fill(0, ncols(A)) + ) end end -function _col_list(A) +function _col_list(A::SMat{T}) where T n = nrows(A) m = ncols(A) col_list = [Array{Int64}([]) for i = 1:m] @@ -63,13 +76,13 @@ Return a tuple (\nu, N) consisting of the nullity \nu of A and a basis N AN is the zero matrix. """ -function structured_gauss(A::SMat{T}) where T <: RingElem +function structured_gauss(A::SMat{T}) where T <: ZZRingElem SG = part_echolonize!(A) return compute_kernel(SG) end @doc raw""" - structured_gauss(A::SMat{T}) where T <: FieldElem + structured_gauss_field(A::SMat{T}) where T <: FieldElem Return a tuple (\nu, N) consisting of the nullity \nu of A and a basis N (consisting of column vectors) for the right nullspace of A, i.e. such that @@ -90,7 +103,7 @@ end #Build an upper triangular matrix for as many columns as possible compromising #the loss of sparsity during this process. -function part_echolonize!(A) +function part_echolonize!(A::SMat{T}) where T <: ZZRingElem A = delete_zero_rows!(A) n = nrows(A) m = ncols(A) @@ -143,7 +156,7 @@ function part_echolonize_field!(A) return SG end -function single_rows_to_top!(SG) +function single_rows_to_top!(SG::data_StructGauss) for i = 1:nrows(SG.A) len = length(SG.A[i]) @assert !iszero(len) @@ -158,7 +171,7 @@ function single_rows_to_top!(SG) return SG end -function build_part_ref!(SG) +function build_part_ref!(SG::data_StructGauss) queue = collect(ncols(SG.A):-1:1) while !isempty(queue) queue_new = Int[] @@ -187,7 +200,7 @@ function build_part_ref!(SG) end end -function build_part_ref_field!(SG) +function build_part_ref_field!(SG::data_StructGauss) queue = collect(ncols(SG.A):-1:1) while !isempty(queue) queue_new = Int[] @@ -217,7 +230,7 @@ function build_part_ref_field!(SG) end end -function find_best_single_row(SG) +function find_best_single_row(SG::data_StructGauss) best_single_row = -1 best_col = NaN best_val = NaN @@ -255,7 +268,7 @@ function find_best_single_row(SG) return best_single_row end -function find_best_single_row_field(SG) +function find_best_single_row_field(SG::data_StructGauss) best_single_row = -1 best_len = -1 for i = SG.base:SG.single_row_limit-1 @@ -271,7 +284,7 @@ function find_best_single_row_field(SG) return best_single_row end -function find_dense_cols(SG) +function find_dense_cols(SG::data_StructGauss) m = ncols(SG.A) nheavy = m - (SG.nlight + SG.nsingle) nheavy == 0 ? SG.heavy_ext = max(div(SG.nlight,20),1) : SG.heavy_ext = max(div(SG.nlight,100),1) @@ -304,7 +317,7 @@ function find_dense_cols(SG) @assert light_cols == findall(x->SG.is_light_col[x], 1:m) end -function turn_heavy(SG) +function turn_heavy(SG::data_StructGauss) for j = 1:SG.heavy_ext c = SG.heavy_col_idx[j] if c<0 @@ -325,7 +338,7 @@ function turn_heavy(SG) SG.nlight -= SG.heavy_ext end -function handle_new_light_weight!(i, SG) +function handle_new_light_weight!(i::Int64, SG::data_StructGauss) w = SG.light_weight[i] if w == 0 swap_i_into_base(i, SG) @@ -341,7 +354,7 @@ function handle_new_light_weight!(i, SG) return SG end -function eliminate_and_update!(best_single_row, SG) +function eliminate_and_update!(best_single_row::Int64, SG::data_StructGauss) @assert best_single_row != 0 best_row = deepcopy(SG.A[best_single_row]) best_col = find_light_entry(best_row.pos, SG.is_light_col) @@ -363,7 +376,7 @@ function eliminate_and_update!(best_single_row, SG) return SG end -function eliminate_and_update_field!(best_single_row, SG) +function eliminate_and_update_field!(best_single_row::Int64, SG::data_StructGauss) @assert best_single_row != 0 best_col = find_light_entry(SG.A[best_single_row].pos, SG.is_light_col) @assert length(SG.col_list[best_col]) > 1 @@ -371,7 +384,7 @@ function eliminate_and_update_field!(best_single_row, SG) @assert !iszero(best_val) scale_row!(SG.A, best_single_row, inv(best_val)) best_val = SG.A[best_single_row, best_col] - @assert isone(best_val) + @assert isone(best_val)A.nnz best_row = deepcopy(SG.A[best_single_row]) best_col_idces = SG.col_list[best_col] row_idx = 0 @@ -388,7 +401,7 @@ function eliminate_and_update_field!(best_single_row, SG) return SG end -function find_row_to_add_on(row_idx, best_row, best_col_idces::Vector{Int64}, SG::data_StructGauss) +function find_row_to_add_on(row_idx::Int64, best_row::SRow{T}, best_col_idces::Vector{Int64}, SG::data_StructGauss) where T for L_row in best_col_idces[end:-1:1] row_idx = SG.col_list_permi[L_row] SG.A[row_idx] == best_row && continue @@ -398,7 +411,7 @@ function find_row_to_add_on(row_idx, best_row, best_col_idces::Vector{Int64}, SG return row_idx end -function add_to_eliminate!(L_row, row_idx, best_row, best_col, best_val, SG) +function add_to_eliminate!(L_row::Int64, row_idx::Int64, best_row::SRow{T}, best_col::Int64, best_val::ZZRingElem, SG::data_StructGauss) where T <: ZZRingElem @assert L_row in SG.col_list[best_col] @assert !(0 in SG.A[row_idx].values) val = SG.A[row_idx, best_col] @@ -416,13 +429,15 @@ function add_to_eliminate!(L_row, row_idx, best_row, best_col, best_val, SG) end scale_row!(SG.A, row_idx, best_val_red) @assert !(0 in best_row.values) + SG.A.nnz -= length(SG.A[row_idx]) Hecke.add_scaled_row!(best_row, SG.A[row_idx], -val_red) + SG.A.nnz += length(SG.A[row_idx]) @assert iszero(SG.A[row_idx, best_col]) @assert !(0 in SG.A[row_idx].values) return SG end -function add_to_eliminate_field!(L_row, row_idx, best_row, best_col, best_val, SG) +function add_to_eliminate_field!(L_row::Int64, row_idx::Int64, best_row::SRow{T}, best_col::Int64, best_val, SG::data_StructGauss) where T @assert L_row in SG.col_list[best_col] @assert !(0 in SG.A[row_idx].values) val = SG.A[row_idx, best_col] @@ -441,7 +456,7 @@ function add_to_eliminate_field!(L_row, row_idx, best_row, best_col, best_val, S return SG end -function update_after_addition!(L_row, row_idx::Int, best_col, SG::data_StructGauss) +function update_after_addition!(L_row::Int64, row_idx::Int64, best_col::Int64, SG::data_StructGauss) SG.light_weight[row_idx] = 0 for c in SG.A[row_idx].pos @assert c != best_col @@ -457,11 +472,11 @@ end # ################################################################################ -function swap_entries(v, i,j) +function swap_entries(v::Vector{Int64}, i::Int64, j::Int64) v[i],v[j] = v[j],v[i] end -function swap_rows_perm(i, j, SG) +function swap_rows_perm(i::Int64, j, SG::data_StructGauss) if i != j swap_rows!(SG.A, i, j) swap_entries(SG.single_col, i, j) @@ -471,7 +486,7 @@ function swap_rows_perm(i, j, SG) end end -function swap_i_into_base(i, SG::data_StructGauss) +function swap_i_into_base(i::Int64, SG::data_StructGauss) if i < SG.single_row_limit swap_rows_perm(i, SG.base, SG) else @@ -491,7 +506,7 @@ function find_light_entry(position_array::Vector{Int64}, is_light_col::Vector{Bo end end -function move_into_Y(i, SG::data_StructGauss) +function move_into_Y(i::Int64, SG::data_StructGauss) @assert i == SG.base push!(SG.Y, deepcopy(SG.A[SG.base])) for base_c in SG.A[SG.base].pos diff --git a/src/Sparse/ZZStructuredGauss.jl b/src/Sparse/ZZStructuredGauss.jl new file mode 100644 index 0000000000..91dba17425 --- /dev/null +++ b/src/Sparse/ZZStructuredGauss.jl @@ -0,0 +1,384 @@ +#TODO: sizehints +#TODO: pointer pointer pointer +#TODO: sizehint for heavy stuff can be given later, so don't initialize immediately +#TODO: new struct for second part +#TODO: add sizehint for Y? + +#= +PROBLEMS: +- why more nnzs in SG.A+SG.Y than in PR.A? +- maximum of PR.A greater as in SG.Y? +=# + +mutable struct data_ZZStructGauss{T} + A::SMat{T} + Y::SMat{T} + single_row_limit::Int64 + base::Int64 + nlight::Int64 + nsingle::Int64 + light_weight::Vector{Int64} + col_list::Vector{Vector{Int64}} + col_list_perm::Vector{Int64} #perm gives new ordering of original A[i] via their indices + col_list_permi::Vector{Int64} #A[permi[i]]=A[i] before permutation = list of sigma(i) (recent position of former A[i]) + is_light_col::Vector{Bool} + is_single_col::Vector{Bool} + single_col::Vector{Int64} #single_col[i] = c >= 0 if col c has its only entry in row i, i always recent position + heavy_ext::Int64 + heavy_col_idx::Vector{Int64} + heavy_col_len::Vector{Int64} + + function data_ZZStructGauss(A::SMat{T}) where T <: ZZRingElem + Y = sparse_matrix(base_ring(A), 0, ncols(A)) + col_list = _col_list(A) + return new{T}(A, + Y, + 1, + 1, + ncols(A), + 0, + [length(A[i]) for i in 1:nrows(A)], + col_list, + collect(1:nrows(A)), + collect(1:nrows(A)), + fill(true, ncols(A)), + fill(false, ncols(A)), + fill(0, nrows(A)), + 0, + Int[], + Int[]) + end + end + + function _col_list(A::SMat{T})::Vector{Vector{Int64}} where T <: ZZRingElem + n = nrows(A) + m = ncols(A) + col_list = [Array{Int64}([]) for i = 1:m] + for i in 1:n + for c in A[i].pos + col = col_list[c] + push!(col, i) + end + end + return col_list + end + + @doc raw""" + structured_gauss(A::SMat{T}) where T <: ZZRingElem + + Return a tuple (\nu, N) consisting of the nullity \nu of A and a basis N + (consisting of column vectors) for the right nullspace of A, i.e. such that + AN is the zero matrix. + """ + + function structured_gauss(A::SMat{T}) where T <: ZZRingElem + SG = part_echolonize!(A) + return compute_kernel(SG) + end + + ################################################################################ + # + # Partial Echolonization + # + ################################################################################ + + #Build an upper triangular matrix for as many columns as possible compromising + #the loss of sparsity during this process. + + function part_echolonize!(A::SMat{T})::data_ZZStructGauss where T <: ZZRingElem + A = delete_zero_rows!(A) + n = nrows(A) + m = ncols(A) + SG = data_ZZStructGauss(A) + single_rows_to_top!(SG) + + while SG.nlight > 0 && SG.base <= n + build_part_ref!(SG) + for i = 1:m + SG.is_light_col[i] && @assert length(SG.col_list[i]) != 1 + end + (SG.nlight == 0 || SG.base > n) && break + best_single_row = find_best_single_row(SG) + best_single_row < 0 && @assert(SG.base == SG.single_row_limit) + + if best_single_row < 0 + find_dense_cols(SG) + turn_heavy(SG) + continue #while SG.nlight > 0 && SG.base <= SG.A.r + end + eliminate_and_update!(best_single_row, SG) + end + return SG + end + + + function single_rows_to_top!(SG::data_ZZStructGauss)::data_ZZStructGauss + for i = 1:nrows(SG.A) + len = length(SG.A[i]) + @assert !iszero(len) + if len == 1 + @assert SG.single_row_limit <=i + if i != SG.single_row_limit + swap_rows_perm(i, SG.single_row_limit, SG) + end + SG.single_row_limit +=1 + end + end + return SG + end + + function build_part_ref!(SG::data_ZZStructGauss) + queue = collect(ncols(SG.A):-1:1) + while !isempty(queue) + queue_new = Int[] + for j in queue + if length(SG.col_list[j])==1 && SG.is_light_col[j] + singleton_row_origin = only(SG.col_list[j]) + singleton_row_idx = SG.col_list_permi[singleton_row_origin] + for jj in SG.A[singleton_row_idx].pos + if SG.is_light_col[jj] + @assert singleton_row_origin in SG.col_list[jj] + j_idx = findfirst(isequal(singleton_row_origin), SG.col_list[jj]) + deleteat!(SG.col_list[jj],j_idx) + length(SG.col_list[jj]) == 1 && push!(queue_new, jj) + end + end + SG.is_light_col[j] = false + SG.is_single_col[j] = true + SG.single_col[singleton_row_idx] = j + SG.nlight-=1 + SG.nsingle+=1 + swap_i_into_base(singleton_row_idx, SG) + SG.base+=1 + end + end + queue = queue_new + end + end + + function find_best_single_row(SG::data_ZZStructGauss)::Int64 + best_single_row = -1 + best_col = NaN + best_val = NaN + best_len = -1 + best_is_one = false + for i = SG.base:SG.single_row_limit-1 + single_row = SG.A[i] + single_row_len = length(single_row) + w = SG.light_weight[i] + @assert w == 1 + j_light = find_light_entry(single_row.pos, SG.is_light_col) + single_row_val = SG.A[i, j_light] + @assert length(SG.col_list[j_light]) > 1 + is_one = isone(single_row_val)||isone(-single_row_val) + if best_single_row < 0 + best_single_row = i + best_col = j_light + best_len = single_row_len + best_is_one = is_one + best_val = single_row_val + elseif !best_is_one && is_one + best_single_row = i + best_col = j_light + best_len = single_row_len + best_is_one = true + best_val = single_row_val + elseif (is_one == best_is_one && single_row_len < best_len) + best_single_row = i + best_col = j_light + best_len = single_row_len + best_is_one = is_one + best_val = single_row_val + end + end + return best_single_row + end + + function find_dense_cols(SG::data_ZZStructGauss) + m = ncols(SG.A) + nheavy = m - (SG.nlight + SG.nsingle) + nheavy == 0 ? SG.heavy_ext = max(div(SG.nlight,20),1) : SG.heavy_ext = max(div(SG.nlight,100),1) + SG.heavy_col_idx = fill(-1, SG.heavy_ext) #indices (descending order for same length) + SG.heavy_col_len = fill(-1, SG.heavy_ext)#length of cols in heavy_idcs (ascending) + light_cols = findall(x->SG.is_light_col[x], 1:m) + for i = m:-1:1 + if SG.is_light_col[i] + col_len = length(SG.col_list[i]) + if col_len > SG.heavy_col_len[1] + if SG.heavy_ext == 1 + SG.heavy_col_idx[1] = i + SG.heavy_col_len[1] = col_len + else + for j = SG.heavy_ext:-1:2 + if col_len >= SG.heavy_col_len[j] + for k = 1:j-1 + SG.heavy_col_idx[k] = SG.heavy_col_idx[k + 1] + SG.heavy_col_len[k] = SG.heavy_col_len[k + 1] + end + SG.heavy_col_idx[j] = i + SG.heavy_col_len[j] = col_len + break + end + end + end + end + end + end + @assert light_cols == findall(x->SG.is_light_col[x], 1:m) + end + + function turn_heavy(SG::data_ZZStructGauss) + for j = 1:SG.heavy_ext + c = SG.heavy_col_idx[j] + if c<0 + continue + end + SG.is_light_col[c] = false + lt2hvy_col = SG.col_list[c] + lt2hvy_len = length(lt2hvy_col) + @assert lt2hvy_len == length(SG.col_list[c]) + for k = 1:lt2hvy_len + i_origin = lt2hvy_col[k] + i_now = SG.col_list_permi[i_origin] + @assert SG.light_weight[i_now] > 0 + SG.light_weight[i_now]-=1 + handle_new_light_weight!(i_now, SG) + end + end + SG.nlight -= SG.heavy_ext + end + + function handle_new_light_weight!(i::Int64, SG::data_ZZStructGauss)::data_ZZStructGauss + w = SG.light_weight[i] + if w == 0 + swap_i_into_base(i, SG) + SG.single_col[SG.base] = 0 + move_into_Y(SG.base, SG) + SG.base+=1 + elseif w == 1 + if i > SG.single_row_limit + swap_rows_perm(i, SG.single_row_limit, SG) + end + SG.single_row_limit += 1 + end + return SG + end + + function eliminate_and_update!(best_single_row::Int64, SG::data_ZZStructGauss)::data_ZZStructGauss + @assert best_single_row != 0 + best_row = deepcopy(SG.A[best_single_row]) + best_col = find_light_entry(best_row.pos, SG.is_light_col) + @assert length(SG.col_list[best_col]) > 1 + best_val = deepcopy(SG.A[best_single_row, best_col]) + @assert !iszero(best_val) + best_col_idces = SG.col_list[best_col] + row_idx = 0 + while length(best_col_idces) > 1 + row_idx = find_row_to_add_on(row_idx, best_row, best_col_idces, SG) + @assert best_col_idces == SG.col_list[best_col] + @assert row_idx > 0 + @assert SG.col_list_perm[row_idx] in SG.col_list[best_col] + L_row = SG.col_list_perm[row_idx] + add_to_eliminate!(L_row, row_idx, best_row, best_col, best_val, SG) + update_after_addition!(L_row, row_idx, best_col, SG) + handle_new_light_weight!(row_idx, SG) + end + return SG + end + + function find_row_to_add_on(row_idx::Int64, best_row::SRow{T}, best_col_idces::Vector{Int64}, SG::data_ZZStructGauss)::Int64 where T <: ZZRingElem + for L_row in best_col_idces[end:-1:1] + row_idx = SG.col_list_permi[L_row] + SG.A[row_idx] == best_row && continue + row_idx < SG.base && continue + break + end + return row_idx + end + + function add_to_eliminate!(L_row::Int64, row_idx::Int64, best_row::SRow{T}, best_col::Int64, best_val::ZZRingElem, SG::data_ZZStructGauss)::data_ZZStructGauss where T <: ZZRingElem + @assert L_row in SG.col_list[best_col] + @assert !(0 in SG.A[row_idx].values) + val = SG.A[row_idx, best_col] + @assert !iszero(val) + g = gcd(val, best_val) + val_red = divexact(val, g) + best_val_red = divexact(best_val, g) + @assert L_row in SG.col_list[best_col] + for c in SG.A[row_idx].pos + @assert !isempty(SG.col_list[c]) + if SG.is_light_col[c] + jj = findfirst(isequal(L_row), SG.col_list[c]) + deleteat!(SG.col_list[c], jj) + end + end + scale_row!(SG.A, row_idx, best_val_red) + @assert !(0 in best_row.values) + SG.A.nnz -= length(SG.A[row_idx]) + Hecke.add_scaled_row!(best_row, SG.A[row_idx], -val_red) + SG.A.nnz += length(SG.A[row_idx]) + @assert iszero(SG.A[row_idx, best_col]) + @assert !(0 in SG.A[row_idx].values) + return SG + end + + function update_after_addition!(L_row::Int64, row_idx::Int64, best_col::Int64, SG::data_ZZStructGauss)::data_ZZStructGauss + SG.light_weight[row_idx] = 0 + for c in SG.A[row_idx].pos + @assert c != best_col + SG.is_light_col[c] && sort!(push!(SG.col_list[c], L_row)) + SG.is_light_col[c] && (SG.light_weight[row_idx]+=1) + end + return SG + end + + ################################################################################ + # + # Small Auxiliary Functions + # + ################################################################################ + + function swap_entries(v::Vector{Int64}, i::Int64, j::Int64) + v[i],v[j] = v[j],v[i] + end + + function swap_rows_perm(i::Int64, j, SG::data_ZZStructGauss) + if i != j + swap_rows!(SG.A, i, j) + swap_entries(SG.single_col, i, j) + swap_entries(SG.col_list_perm, i, j) + swap_entries(SG.col_list_permi, SG.col_list_perm[i], SG.col_list_perm[j]) + swap_entries(SG.light_weight, i, j) + end + end + + function swap_i_into_base(i::Int64, SG::data_ZZStructGauss) + if i < SG.single_row_limit + swap_rows_perm(i, SG.base, SG) + else + if i != SG.single_row_limit + swap_rows_perm(SG.base, SG.single_row_limit, SG) + end + SG.single_row_limit +=1 + swap_rows_perm(SG.base, i, SG) + end + end + + function find_light_entry(position_array::Vector{Int64}, is_light_col::Vector{Bool})::Int64 + for j in position_array[end:-1:1] + if is_light_col[j] + return j + end + end + end + + function move_into_Y(i::Int64, SG::data_ZZStructGauss) + @assert i == SG.base + push!(SG.Y, deepcopy(SG.A[SG.base])) + for base_c in SG.A[SG.base].pos + @assert !SG.is_light_col[base_c] + @assert !isempty(SG.col_list[base_c]) + end + SG.A.nnz-=length(SG.A[SG.base]) + empty!(SG.A[SG.base].pos), empty!(SG.A[SG.base].values) + end \ No newline at end of file diff --git a/src/Sparse/sparse_row_echelon.jl b/src/Sparse/sparse_row_echelon.jl new file mode 100644 index 0000000000..689ddd0f5a --- /dev/null +++ b/src/Sparse/sparse_row_echelon.jl @@ -0,0 +1,442 @@ +mutable struct data_PartRef{T} + A::SMat{T} + R + det_factor::T #tracks change in det + single_row_limit::Int64 #idx first row after rows with one light entry + base::Int64 #idx for first row after ref + dense_start::Int64 #last row before dense part + nlight::Int64 #number of light cols + nsingle::Int64 #number of rows in part ref + light_weight::Vector{Int64} #number of entries in light cols for all rows in A (current ordering) + col_list::Vector{Vector{Int64}} #lists entries outside of triangular form column-wise + col_list_perm::Vector{Int64} #perm gives new ordering of original A[i] via their indices + col_list_permi::Vector{Int64} #A[permi[i]]=A[i] before permutation = list of sigma(i) (recent position of former A[i]) + is_light_col::Vector{Bool} + is_single_col::Vector{Bool} + single_col::Vector{Int64} #single_col[i] = c >= 0 if col c has its only entry in row i, i always recent position + heavy_ext::Int64 + heavy_col_idx::Vector{Int64} + heavy_col_len::Vector{Int64} + heavy_mapi::Vector{Int64} + heavy_map::Vector{Int64} + + function data_PartRef(A::SMat{T}) where T + col_list = _col_list(A) + return new{T}(A, + base_ring(A), + one(base_ring(A)), + 1, + 1, + nrows(A), + ncols(A), + 0, + [length(A[i]) for i in 1:nrows(A)], + col_list, + collect(1:nrows(A)), + collect(1:nrows(A)), + fill(true, ncols(A)), + fill(false, ncols(A)), + fill(0, nrows(A)), + 0, + Int[], + Int[], + Int[], + fill(0, ncols(A))) + end +end + +function _col_list(A) + n = nrows(A) + m = ncols(A) + col_list = [Array{Int64}([]) for i = 1:m] + for i in 1:n + for c in A[i].pos + col = col_list[c] + push!(col, i) + end + end + return col_list +end + +################################################################################ +# +# Compute determinant - given matrix in part ref +# +################################################################################ + +function PR_det(A) + PR = part_ref!(A) + D = compose_dense_matrix(PR) + d = det(D) + for i = 1:PR.nsingle + d*=PR.A[i, PR.single_col[i]] + end + return div(d, PR.det_factor) +end + +################################################################################ +# +# Partial Echolonization with determinant tracking +# +################################################################################ + +#Build an upper triangular matrix for as many columns as possible compromising +#the loss of sparsity during this process. +#Goal: det computation + +function part_ref!(A) + n = nrows(A) + m = ncols(A) + PR = data_PartRef(A) + #for determinant calculations: + @assert n == m + #look for zero_rows: + for i = 1:n + if iszero(length(A)) + PR.det_factor = zero(PR.R) + println("determinant is zero") + return PR + end + end + + single_rows_to_top!(PR) + + while PR.nlight > 0 && PR.base <= PR.dense_start + build_part_ref!(PR) + for i = 1:m + PR.is_light_col[i] && @assert length(PR.col_list[i]) != 1 + end + (PR.nlight == 0 || PR.base > n) && break + best_single_row = find_best_single_row(PR) + best_single_row < 0 && @assert(PR.base == PR.single_row_limit) + + if best_single_row < 0 + find_dense_cols(PR) + turn_heavy(PR) + continue #while PR.nlight > 0 && PR.base <= PR.dense_start + end + eliminate_and_update!(best_single_row, PR) + end + + #assert that zero rows have been catched before: + for i = 1:PR.A.r + @assert !iszero(length(PR.A[i])) + end + + return PR +end + +function single_rows_to_top!(PR) + for i = 1:nrows(PR.A) + len = length(PR.A[i]) + @assert !iszero(len) + if len == 1 + @assert PR.single_row_limit <=i + if i != PR.single_row_limit + swap_rows_perm(i, PR.single_row_limit, PR) + end + PR.single_row_limit +=1 + end + end + return PR +end + +function build_part_ref!(PR) + queue = collect(ncols(PR.A):-1:1) + while !isempty(queue) + queue_new = Int[] + for j in queue + if length(PR.col_list[j]) == 1 && PR.is_light_col[j] + singleton_row_origin = only(PR.col_list[j]) + singleton_row_idx = PR.col_list_permi[singleton_row_origin] + for jj in PR.A[singleton_row_idx].pos + if PR.is_light_col[jj] + @assert singleton_row_origin in PR.col_list[jj] + j_idx = findfirst(isequal(singleton_row_origin), PR.col_list[jj]) + deleteat!(PR.col_list[jj],j_idx) + length(PR.col_list[jj]) == 1 && push!(queue_new, jj) + end + end + PR.is_light_col[j] = false + PR.is_single_col[j] = true + PR.single_col[singleton_row_idx] = j + PR.nlight-=1 + PR.nsingle+=1 + swap_i_into_base(singleton_row_idx, PR) + PR.base+=1 + end + end + queue = queue_new + end +end + +function find_best_single_row(PR) + best_single_row = -1 + best_col = NaN + best_val = NaN + best_len = -1 + best_is_one = false + for i = PR.base:PR.single_row_limit-1 + single_row = PR.A[i] + single_row_len = length(single_row) + w = PR.light_weight[i] + @assert w == 1 + j_light = find_light_entry(single_row.pos, PR.is_light_col) + single_row_val = PR.A[i, j_light] + @assert length(PR.col_list[j_light]) > 1 + is_one = isone(single_row_val)||isone(-single_row_val) + if best_single_row < 0 + best_single_row = i + best_col = j_light + best_len = single_row_len + best_is_one = is_one + best_val = single_row_val + elseif !best_is_one && is_one + best_single_row = i + best_col = j_light + best_len = single_row_len + best_is_one = true + best_val = single_row_val + elseif (is_one == best_is_one && single_row_len < best_len) + best_single_row = i + best_col = j_light + best_len = single_row_len + best_is_one = is_one + best_val = single_row_val + end + end + return best_single_row +end + +function find_dense_cols(PR) + m = ncols(PR.A) + nheavy = m - (PR.nlight + PR.nsingle) + nheavy == 0 ? PR.heavy_ext = max(div(PR.nlight,20),1) : PR.heavy_ext = max(div(PR.nlight,100),1) + PR.heavy_col_idx = fill(-1, PR.heavy_ext) #indices (descending order for same length) + PR.heavy_col_len = fill(-1, PR.heavy_ext)#length of cols in heavy_idcs (ascending) + light_cols = findall(x->PR.is_light_col[x], 1:m) + for i = m:-1:1 + if PR.is_light_col[i] + col_len = length(PR.col_list[i]) + if col_len > PR.heavy_col_len[1] + if PR.heavy_ext == 1 + PR.heavy_col_idx[1] = i + PR.heavy_col_len[1] = col_len + else + for j = PR.heavy_ext:-1:2 + if col_len >= PR.heavy_col_len[j] + for k = 1:j-1 + PR.heavy_col_idx[k] = PR.heavy_col_idx[k + 1] + PR.heavy_col_len[k] = PR.heavy_col_len[k + 1] + end + PR.heavy_col_idx[j] = i + PR.heavy_col_len[j] = col_len + break + end + end + end + end + end + end + @assert light_cols == findall(x->PR.is_light_col[x], 1:m) +end + +function turn_heavy(PR) + for j = 1:PR.heavy_ext + c = PR.heavy_col_idx[j] + if c<0 + continue + end + PR.is_light_col[c] = false + lt2hvy_col = PR.col_list[c] + lt2hvy_len = length(lt2hvy_col) + @assert lt2hvy_len == length(PR.col_list[c]) + for k = 1:lt2hvy_len + i_origin = lt2hvy_col[k] + i_now = PR.col_list_permi[i_origin] + @assert PR.light_weight[i_now] > 0 + PR.light_weight[i_now]-=1 + handle_new_light_weight!(i_now, PR) + end + end + PR.nlight -= PR.heavy_ext +end + +function handle_new_light_weight!(i, PR) + w = PR.light_weight[i] + if w == 0 + if iszero(length(PR.A[i])) + PR.det_factor = zero(PR.R) + println("determinant is zero") + return PR + end + move_to_end(i, PR) + elseif w == 1 + if i > PR.single_row_limit + swap_rows_perm(i, PR.single_row_limit, PR) + end + PR.single_row_limit += 1 + end + return PR +end + +function eliminate_and_update!(best_single_row, PR) + @assert best_single_row != 0 + best_row = deepcopy(PR.A[best_single_row]) + best_col = find_light_entry(best_row.pos, PR.is_light_col) + @assert length(PR.col_list[best_col]) > 1 + best_val = deepcopy(PR.A[best_single_row, best_col]) + @assert !iszero(best_val) + best_col_idces = PR.col_list[best_col] + row_idx = 0 + while length(best_col_idces) > 1 + row_idx = find_row_to_add_on(row_idx, best_row, best_col_idces, PR) + @assert best_col_idces == PR.col_list[best_col] + @assert row_idx > 0 + @assert PR.col_list_perm[row_idx] in PR.col_list[best_col] + L_row = PR.col_list_perm[row_idx] + add_to_eliminate!(L_row, row_idx, best_row, best_col, best_val, PR) + update_after_addition!(L_row, row_idx, best_col, PR) + handle_new_light_weight!(row_idx, PR) + end + return PR +end + +function find_row_to_add_on(row_idx, best_row, best_col_idces::Vector{Int64}, PR::data_PartRef) + for L_row in best_col_idces[end:-1:1] + row_idx = PR.col_list_permi[L_row] + PR.A[row_idx] == best_row && continue + row_idx < PR.base && continue + break + end + return row_idx +end + +function add_to_eliminate!(L_row, row_idx, best_row, best_col, best_val, PR) + @assert L_row in PR.col_list[best_col] + @assert !(0 in PR.A[row_idx].values) + val = PR.A[row_idx, best_col] + @assert !iszero(val) + g = gcd(val, best_val) + val_red = divexact(val, g) + best_val_red = divexact(best_val, g) + @assert L_row in PR.col_list[best_col] + for c in PR.A[row_idx].pos + @assert !isempty(PR.col_list[c]) + if PR.is_light_col[c] + jj = findfirst(isequal(L_row), PR.col_list[c]) + deleteat!(PR.col_list[c], jj) + end + end + scale_row!(PR.A, row_idx, best_val_red) + PR.det_factor *= best_val_red + @assert !iszero(PR.det_factor) + @assert !(0 in PR.A[row_idx].values) + Hecke.add_scaled_row!(best_row, PR.A[row_idx], -val_red) + @assert iszero(PR.A[row_idx, best_col]) + return PR +end + +function update_after_addition!(L_row, row_idx::Int, best_col, PR::data_PartRef) + PR.light_weight[row_idx] = 0 + for c in PR.A[row_idx].pos + @assert c != best_col + if PR.is_light_col[c] + sort!(push!(PR.col_list[c], L_row)) + PR.is_light_col[c] && (PR.light_weight[row_idx]+=1) + end + end + return PR +end + +################################################################################ +# +# Small Auxiliary Functions +# +################################################################################ + +function swap_entries(v, i,j) + v[i],v[j] = v[j],v[i] +end + +function swap_rows_perm(i::Int64, j::Int64, PR::data_PartRef) + if i != j + swap_rows!(PR.A, i, j) + swap_entries(PR.single_col, i, j) + swap_entries(PR.col_list_perm, i, j) + swap_entries(PR.col_list_permi, PR.col_list_perm[i], PR.col_list_perm[j]) + swap_entries(PR.light_weight, i, j) + PR.det_factor = - PR.det_factor + end +end + +function swap_i_into_base(i::Int64, PR::data_PartRef) + if i < PR.single_row_limit + swap_rows_perm(i, PR.base, PR) + else + if i != PR.single_row_limit + swap_rows_perm(PR.base, PR.single_row_limit, PR) + end + PR.single_row_limit +=1 + swap_rows_perm(PR.base, i, PR) + end +end + +function find_light_entry(position_array::Vector{Int64}, is_light_col::Vector{Bool})::Int64 + for j in position_array[end:-1:1] + if is_light_col[j] + return j + end + end +end + +function move_to_end(i::Int64, PR::data_PartRef) + @assert !iszero(length(PR.A[i])) + @assert i >= PR.base + swap_rows_perm(i, PR.dense_start, PR) + if i < PR.single_row_limit + swap_rows_perm(i, PR.single_row_limit-1, PR) + PR.single_row_limit -=1 + end + ds_origin = PR.col_list_perm[PR.dense_start] + for c in PR.A[PR.dense_start].pos + if PR.is_light_col[c] + jj = findfirst(isequal(ds_origin), PR.col_list[c]) + deleteat!(PR.col_list[c], jj) + end + end + PR.dense_start -= 1 +end + +################################################################################ +# +# Isolate non-triangular part +# +################################################################################ + +function compose_dense_matrix(PR::data_PartRef) + @assert PR.nsingle == PR.dense_start + n = nrows(PR.A) + m = ncols(PR.A) + j=1 + for i = 1:m + if !PR.is_single_col[i] + PR.heavy_map[i] = j + push!(PR.heavy_mapi,i) + j+=1 + end + end + + D = sparse_matrix(PR.R, 0, n-PR.nsingle) + for i = PR.dense_start+1:n + p = Int[] + v = ZZRingElem_Array() + sizehint!(v, length(PR.A[i])) + for j = 1:length(PR.A[i].pos) + push!(v, PR.A[i].values[j]) + push!(p, PR.heavy_map[PR.A[i].pos[j]]) + end + push!(D, sparse_row(ZZ, p, v)) + end + return D +end \ No newline at end of file From e25a8504346ade03b70a559aace11435cf458463 Mon Sep 17 00:00:00 2001 From: Berenike Dieterle <94169337+BD-00@users.noreply.github.com> Date: Thu, 28 Nov 2024 22:44:44 +0100 Subject: [PATCH 12/12] some improvement with pointers --- src/Sparse/StructuredGauss.jl | 32 +++--- src/Sparse/ZZStructuredGauss.jl | 166 +++++++++++++++++++++++++++++++- 2 files changed, 182 insertions(+), 16 deletions(-) diff --git a/src/Sparse/StructuredGauss.jl b/src/Sparse/StructuredGauss.jl index 7e5e210e3b..9bb3cbd1b5 100644 --- a/src/Sparse/StructuredGauss.jl +++ b/src/Sparse/StructuredGauss.jl @@ -55,7 +55,7 @@ mutable struct data_StructGauss{T} end end -function _col_list(A::SMat{T}) where T +function _col_list(A::SMat{T})::Vector{Vector{Int64}} where T n = nrows(A) m = ncols(A) col_list = [Array{Int64}([]) for i = 1:m] @@ -76,7 +76,7 @@ Return a tuple (\nu, N) consisting of the nullity \nu of A and a basis N AN is the zero matrix. """ -function structured_gauss(A::SMat{T}) where T <: ZZRingElem +function structured_gauss(A::SMat{T}) where T <: RingElem SG = part_echolonize!(A) return compute_kernel(SG) end @@ -103,7 +103,7 @@ end #Build an upper triangular matrix for as many columns as possible compromising #the loss of sparsity during this process. -function part_echolonize!(A::SMat{T}) where T <: ZZRingElem +function part_echolonize!(A::SMat{T})::data_StructGauss where T <: RingElem A = delete_zero_rows!(A) n = nrows(A) m = ncols(A) @@ -129,7 +129,7 @@ function part_echolonize!(A::SMat{T}) where T <: ZZRingElem return SG end -function part_echolonize_field!(A) +function part_echolonize_field!(A::SMat{T})::data_StructGauss where T <: FieldElem A = delete_zero_rows!(A) n = nrows(A) m = ncols(A) @@ -156,7 +156,7 @@ function part_echolonize_field!(A) return SG end -function single_rows_to_top!(SG::data_StructGauss) +function single_rows_to_top!(SG::data_StructGauss)::data_StructGauss for i = 1:nrows(SG.A) len = length(SG.A[i]) @assert !iszero(len) @@ -230,7 +230,7 @@ function build_part_ref_field!(SG::data_StructGauss) end end -function find_best_single_row(SG::data_StructGauss) +function find_best_single_row(SG::data_StructGauss)::Int64 best_single_row = -1 best_col = NaN best_val = NaN @@ -268,7 +268,7 @@ function find_best_single_row(SG::data_StructGauss) return best_single_row end -function find_best_single_row_field(SG::data_StructGauss) +function find_best_single_row_field(SG::data_StructGauss)::Int64 best_single_row = -1 best_len = -1 for i = SG.base:SG.single_row_limit-1 @@ -338,7 +338,7 @@ function turn_heavy(SG::data_StructGauss) SG.nlight -= SG.heavy_ext end -function handle_new_light_weight!(i::Int64, SG::data_StructGauss) +function handle_new_light_weight!(i::Int64, SG::data_StructGauss)::data_StructGauss w = SG.light_weight[i] if w == 0 swap_i_into_base(i, SG) @@ -354,7 +354,7 @@ function handle_new_light_weight!(i::Int64, SG::data_StructGauss) return SG end -function eliminate_and_update!(best_single_row::Int64, SG::data_StructGauss) +function eliminate_and_update!(best_single_row::Int64, SG::data_StructGauss)::data_StructGauss @assert best_single_row != 0 best_row = deepcopy(SG.A[best_single_row]) best_col = find_light_entry(best_row.pos, SG.is_light_col) @@ -376,7 +376,7 @@ function eliminate_and_update!(best_single_row::Int64, SG::data_StructGauss) return SG end -function eliminate_and_update_field!(best_single_row::Int64, SG::data_StructGauss) +function eliminate_and_update_field!(best_single_row::Int64, SG::data_StructGauss)::data_StructGauss @assert best_single_row != 0 best_col = find_light_entry(SG.A[best_single_row].pos, SG.is_light_col) @assert length(SG.col_list[best_col]) > 1 @@ -384,7 +384,7 @@ function eliminate_and_update_field!(best_single_row::Int64, SG::data_StructGaus @assert !iszero(best_val) scale_row!(SG.A, best_single_row, inv(best_val)) best_val = SG.A[best_single_row, best_col] - @assert isone(best_val)A.nnz + @assert isone(best_val) best_row = deepcopy(SG.A[best_single_row]) best_col_idces = SG.col_list[best_col] row_idx = 0 @@ -401,7 +401,7 @@ function eliminate_and_update_field!(best_single_row::Int64, SG::data_StructGaus return SG end -function find_row_to_add_on(row_idx::Int64, best_row::SRow{T}, best_col_idces::Vector{Int64}, SG::data_StructGauss) where T +function find_row_to_add_on(row_idx::Int64, best_row::SRow{T}, best_col_idces::Vector{Int64}, SG::data_StructGauss)::Int64 where T <: FieldElem for L_row in best_col_idces[end:-1:1] row_idx = SG.col_list_permi[L_row] SG.A[row_idx] == best_row && continue @@ -411,7 +411,7 @@ function find_row_to_add_on(row_idx::Int64, best_row::SRow{T}, best_col_idces::V return row_idx end -function add_to_eliminate!(L_row::Int64, row_idx::Int64, best_row::SRow{T}, best_col::Int64, best_val::ZZRingElem, SG::data_StructGauss) where T <: ZZRingElem +function add_to_eliminate!(L_row::Int64, row_idx::Int64, best_row::SRow{T}, best_col::Int64, best_val::RingElem, SG::data_StructGauss)::data_StructGauss where T <: RingElem @assert L_row in SG.col_list[best_col] @assert !(0 in SG.A[row_idx].values) val = SG.A[row_idx, best_col] @@ -437,7 +437,7 @@ function add_to_eliminate!(L_row::Int64, row_idx::Int64, best_row::SRow{T}, best return SG end -function add_to_eliminate_field!(L_row::Int64, row_idx::Int64, best_row::SRow{T}, best_col::Int64, best_val, SG::data_StructGauss) where T +function add_to_eliminate_field!(L_row::Int64, row_idx::Int64, best_row::SRow{T}, best_col::Int64, best_val::FieldElem, SG::data_StructGauss)::data_StructGauss where T <: FieldElem @assert L_row in SG.col_list[best_col] @assert !(0 in SG.A[row_idx].values) val = SG.A[row_idx, best_col] @@ -451,12 +451,14 @@ function add_to_eliminate_field!(L_row::Int64, row_idx::Int64, best_row::SRow{T} end end @assert !(0 in SG.A[row_idx].values) + SG.A.nnz -= length(SG.A[row_idx]) Hecke.add_scaled_row!(best_row, SG.A[row_idx], -val) + SG.A.nnz += length(SG.A[row_idx]) @assert iszero(SG.A[row_idx, best_col]) return SG end -function update_after_addition!(L_row::Int64, row_idx::Int64, best_col::Int64, SG::data_StructGauss) +function update_after_addition!(L_row::Int64, row_idx::Int64, best_col::Int64, SG::data_StructGauss)::data_StructGauss SG.light_weight[row_idx] = 0 for c in SG.A[row_idx].pos @assert c != best_col diff --git a/src/Sparse/ZZStructuredGauss.jl b/src/Sparse/ZZStructuredGauss.jl index 91dba17425..4724b20b02 100644 --- a/src/Sparse/ZZStructuredGauss.jl +++ b/src/Sparse/ZZStructuredGauss.jl @@ -3,6 +3,7 @@ #TODO: sizehint for heavy stuff can be given later, so don't initialize immediately #TODO: new struct for second part #TODO: add sizehint for Y? +#TODO: remove whitespaces at beginning of lines #= PROBLEMS: @@ -381,4 +382,167 @@ mutable struct data_ZZStructGauss{T} end SG.A.nnz-=length(SG.A[SG.base]) empty!(SG.A[SG.base].pos), empty!(SG.A[SG.base].values) - end \ No newline at end of file + end + +function delete_zero_rows!(A::SMat{T}) where T <: ZZRingElem + for i = A.r:-1:1 + if isempty(A[i].pos) + deleteat!(A.rows, i) + A.r-=1 + end + end + return A +end + +################################################################################ +# +# Kernel Computation +# +################################################################################ + +#Compute the kernel corresponding to the non echolonized part from above and +#insert backwards using the triangular part to get the full kernel. + +mutable struct data_ZZKernel + heavy_mapi::Vector{Int64} + heavy_map::Vector{Int64} + + function data_ZZKernel(SG::data_ZZStructGauss, nheavy::Int64) + return new( + sizehint!(Int[], nheavy), + fill(0, ncols(SG.A)) + ) + end +end + +function compute_kernel(SG, with_light = true) + Hecke.update_light_cols!(SG) + @assert SG.nlight >= 0 + KER = Hecke.collect_dense_cols!(SG) + D = dense_matrix(SG, KER) + _nullity, _dense_kernel = nullspace(D) + l, K = Hecke.init_kernel(_nullity, _dense_kernel, SG, KER, with_light) + return compose_kernel(l, K, SG) +end + +function update_light_cols!(SG) + for i = 1:ncols(SG.A) + if SG.is_light_col[i] && !isempty(SG.col_list[i]) + SG.is_light_col[i] = false + SG.nlight -= 1 + end + end + return SG +end + +function collect_dense_cols!(SG) + m = ncols(SG.A) + nheavy = m - SG.nlight - SG.nsingle + KER = data_ZZKernel(SG, nheavy) + j = 1 + for i = 1:m + if !SG.is_light_col[i] && !SG.is_single_col[i] + KER.heavy_map[i] = j + push!(KER.heavy_mapi, i) + j+=1 + end + end + @assert length(KER.heavy_mapi) == nheavy + return KER +end + +function dense_matrix(SG, KER) + D = ZZMatrix(nrows(SG.A) - SG.nsingle, length(KER.heavy_mapi)) + for i in 1:length(SG.Y) + for j in 1:length(KER.heavy_mapi) + setindex!(D, SG.Y[i], i, j, KER.heavy_mapi[j]) + end + end + return D +end + +function dense_kernel(SG, KER) + ST = sparse_matrix(base_ring(SG.A), 0, nrows(SG.Y)) + YT = transpose(delete_zero_rows!(SG.Y)) + for j in KER.heavy_mapi + push!(ST, YT[j]) + end + S = transpose(ST) + d, _dense_kernel = nullspace(matrix(S)) + return size(_dense_kernel)[2], _dense_kernel +end + +function init_kernel(_nullity, _dense_kernel, SG, KER, with_light=false) + R = base_ring(SG.A) + m = ncols(SG.A) + if with_light + l = _nullity+SG.nlight + else + l = _nullity + end + K = ZZMatrix(m, l) + #initialisation + ilight = 1 + for i = 1:m + if SG.is_light_col[i] + if with_light + @assert ilight <= SG.nlight + K[i, _nullity+ilight] = one(R) + ilight +=1 + end + else + j = KER.heavy_map[i] + if j>0 + for c = 1:_nullity + ccall((:fmpz_set, Nemo.libflint), Cvoid, (Ptr{ZZRingElem}, Ptr{ZZRingElem}), Nemo.mat_entry_ptr(K, i, j), Nemo.mat_entry_ptr(_dense_kernel, j, c)) + #K[i,c] = _dense_kernel[j, c] + end + end + end + end + return l, K +end + +function compose_kernel(l, K, SG) + R = base_ring(SG.A) + n = nrows(SG.A) + for i = n:-1:1 + c = SG.single_col[i] + if c>0 + x = R(0) + y = zeros(R,l) + for idx in 1:length(SG.A[i]) + cc = SG.A[i].pos[idx] + xx = SG.A[i].values[idx] + if cc == c + x = xx + else + for k = 1:l + kern_c = K[cc,k] + !iszero(kern_c) && (y[k]-=xx*kern_c) + end + end + end + for k = 1:l + x_inv = try + inv(x) + catch + R(0) + end + if iszero(x_inv) + K[:,k] *= x + K[c, k] = y[k] + else + K[c, k] = y[k]*x_inv + end + end + end + end + return l, K +end + +#Warning: skips zero entries in sparse matrix +function Base.setindex!(A::ZZMatrix, B::SRow{ZZRingElem}, ar::Int64, ac::Int64, bj::Int64) + isnothing(findfirst(isequal(bj), B.pos)) && return + ccall((:fmpz_set, Nemo.libflint), Cvoid, (Ptr{ZZRingElem}, Ptr{Int}), Nemo.mat_entry_ptr(A, ar, ac), Hecke.get_ptr(B.values, findfirst(isequal(bj), B.pos))) +end \ No newline at end of file