-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathAdjHE_reg_s1.py
207 lines (181 loc) · 8.63 KB
/
AdjHE_reg_s1.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
import pandas as pd
import numpy as np
import sys
import os
import psutil
import scipy.sparse as sp
import scipy.sparse.linalg
import math as math
from scipy.misc import imsave
from struct import unpack, calcsize
from numpy.linalg import inv
import logging
import timeit
import argparse
from argparse import RawTextHelpFormatter
parser = argparse.ArgumentParser(prog='Running adjusted HE regression for large sample size in parallel (1)',description='IMPORTANT: Run this for heritability estimation before HE_reg_s2.py',epilog='After processing all GRMs then run HE_reg_s2.py to get the estimation',formatter_class=RawTextHelpFormatter)
parser.add_argument('--prefix', type=str, help='prefix for GCTA format part GRM files [required]\n e.g. /path/to/partgrm/ukbiobank.part_200_',required=True)
parser.add_argument('--Npart',type=int, help='Specify the total number of part-GRM files. [required]\n e.g. 200',required=True)
parser.add_argument('--job', type=int, help='Specify which part of GRM to process this time. Make jobs run in parallel in your system. [required]\n e.g. ${PBS_ARRAYID}',required=True)
parser.add_argument('--id', type=str, help='Read GCTA format .grm.id file contains all individuals [required]',required=True)
parser.add_argument('--pheno', type=str, help='Read PLINK format phenotype file [required]\nIf --mpheno is not specified then then 3rd column (the 1st phenotype) will be used.', required=True)
parser.add_argument('--mpheno',type=int, default=1,help='Specify which phenotype to use from phenotype file (1 phenotype only)')
parser.set_defaults(mpheno=1)
parser.add_argument('--out',type=str, help='Specify the output directory name. It will save the intermediate results for each part-GRM. Please make sure the directory already exists! [required]',required=True)
parser.add_argument('--covar', type=str, help='Read PLINK format covariate file contains covariates besides PCs to be adjusted')
parser.set_defaults(covar="NULL")
parser.add_argument('--PC', type=str, help='Read PLINK format covariate file contains the PCs \nPCs should be generated using the same set of individuals in GRM files. ID should be matched with --id \nIf --npc is not specified then all PCs in the file will be used.')
parser.set_defaults(PC="NULL")
parser.add_argument('--npc', type=int, help='Specify the number of PCs to be adjusted')
parser.set_defaults(npc=-9)
parser.add_argument('--std',action='store_true',default=False,help='Run SAdj-HE regression (i.e., with standardization)')
args = parser.parse_args()
start_time0 = timeit.default_timer()
def multirange(counts):
counts = np.asarray(counts)
# Remove the following line if counts is always strictly positive.
counts = counts[counts != 0]
counts1 = counts[:-1]
reset_index = np.cumsum(counts1)
incr = np.ones(counts.sum(), dtype=int)
incr[0] = 0
incr[reset_index] = 1 - counts1
# Reuse the incr array for the final result.
incr.cumsum(out=incr)
out = {'a':incr,'b':np.repeat(counts,counts)}
return(out)
def sum_n_vec(m,n):
out = [int(0)] * n
for i in range(n):
out[i] = int(((i + 1) * (i + 2) / 2) - 1+ (m) * (i + 1))
return(out)
def ReadGRMBin(prefix, m=0,AllN = False):
BinFileName = prefix + ".grm.bin"
NFileName = prefix + ".grm.N.bin"
IDFileName = prefix + ".grm.id"
dt = np.dtype('f4') # Relatedness is stored as a float of size 4 in the binary file
entry_format = 'f' # N is stored as a float in the binary file
entry_size = calcsize(entry_format)
## Read IDs
ids = pd.read_csv(IDFileName, sep = '\t', header = None)
ids_vec = ids.iloc[:,1]
n = len(ids.index)
ids_diag = ['NA' for x in range(n)]
n_off = int(n * (n - 1) / 2)
## Read relatedness values
grm = np.fromfile(BinFileName, dtype = dt)
## Read number of markers values
# if AllN:
# N = np.fromfile(NFileName, dtype = dt)
# else:
# with open(NFileName, mode='rb') as f:
# record = f.read(entry_size)
# N = unpack(entry_format, record)[0]
# N = int(N)
i = sum_n_vec(m,n)
out = {'diag': grm[i], 'off': np.delete(grm, i),'id': ids}
return(out)
def smartway(x1,x2):
x12 = x1*x2
temp_sum = np.sum(x12)
lower_sum = 0
for i in range(len(x12)):
temp_sum = temp_sum - x12[i]
lower_sum = lower_sum + x12[i] * temp_sum
return(lower_sum)
def outindex(counter,temp_n):
temp_count = int(temp_n*(temp_n-1)/2)
temp_array1 = np.array(range(temp_count))
temp_array2 = np.array([counter*(i+2) for i in range(temp_n-1)])
temp_array2 = np.repeat(temp_array2,np.array(range(temp_n-1))+1)
out_index = temp_array1+temp_array2
return(out_index)
def fun1(current_sum,counter,n,pc,lower_diag):
temp_out = outindex(counter,n)
temp_pc = pc[counter:counter+n]
pre_pc = pc[:counter]
temp_outer1 = np.outer(temp_pc,temp_pc)
temp_outer2 = np.outer(temp_pc,pre_pc)
cor = multirange(range(n))
temp_sum1 = np.dot(lower_diag[temp_out],temp_outer1[cor['b'],cor['a']])
temp_sum2 = np.dot(np.delete(lower_diag,temp_out),temp_outer2.ravel())
temp_sum = temp_sum1 + temp_sum2
current_sum = current_sum + temp_sum
return(current_sum)
def regout(y):
X = cov_selected
if(len(X.shape)==1):
XTX_inv = 1/np.dot(X.T,X)
else:
XTX_inv = np.linalg.inv(np.dot(X.T,X))
XTY = np.dot(X.T,y)
beta = np.dot(XTX_inv,XTY)
res = y - np.dot(X,beta)
return(res)
npc = args.npc
outprefix = args.out
#if not os.path.exists(outprefix):
# os.mkdir(outprefix)
os.chdir(outprefix)
logging.basicConfig(format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',
level=logging.DEBUG,filename='pheno'+str(args.mpheno)+'.log',filemode='a')
#ids = np.loadtxt(args.id)
ids = pd.read_csv(args.id,sep='\s+',header=None)
n_phen_nona = ids.shape[0]
phenotypes = pd.DataFrame(pd.read_csv(args.pheno,sep='\s+',header=None))
final_phen = pd.merge(ids,phenotypes,how='inner',on=[0,1])
#phenotypes.index = phenotypes.iloc[:,0].astype("int32")
#intersection_indiv = np.intersect1d(ids[:,0].astype("int32"), phenotypes.iloc[:,0].astype("int32"))
#final_phen = phenotypes.loc[intersection_indiv]
cov_selected = np.ones(n_phen_nona)
if (args.covar!="NULL"):
covariates = pd.DataFrame(pd.read_csv(args.covar,sep='\s+',header=None))
final_covar = pd.merge(final_phen,covariates,how='inner',on=[0,1])
final_covar = final_covar.values[:,2:]
cov_selected = np.column_stack((cov_selected,final_covar))
if (args.PC != "NULL"):
PCs = pd.DataFrame(pd.read_csv(args.PC,sep='\s+',header=None))
final_PC = pd.merge(final_phen,PCs,how='inner',on=[0,1])
if (args.npc == -9):
npc = PCs.shape[1] - 2
if (npc != 0):
final_PC = final_PC.values[:,2:(2+npc)]
cov_selected = np.column_stack((cov_selected,final_PC))
y = final_phen.values[:,args.mpheno+1]
res_y = regout(y)
std_y = (res_y - np.mean(res_y))/np.std(res_y)
current_sum_y = 0
current_sum_grm = 0
current_sum_grm_e = 0
current_sum_pc = np.zeros(npc)
#prefix='/home/saonli/shared/bb/dosage_data3/grmblock/316k_unrelated_566k.part_200_'
prefix = args.prefix
igrm = args.job
iformat = len(str(args.Npart))
exec('prefix1 = prefix + \'{:0'+str(iformat)+'}\'.format(igrm)')
#tempid = np.loadtxt(prefix1+'.grm.id')[:,0].astype("int32")
tempid = pd.read_csv(prefix1+'.grm.id',sep='\s+',header=None)
tempid_1 = tempid.iloc[0]
counter = int(np.where((ids[0] == tempid_1[0]) & (ids[1]==tempid_1[1]))[0])
G = ReadGRMBin(prefix1, m = counter)
n_ind = len(G['id'])
lower_diag = G['off'].astype('float64')
if (args.std==False):
diag = G['diag'].astype('float64')
current_sum_grm = np.dot(lower_diag,lower_diag) + np.dot(diag,diag) # A cross A
current_sum_grm_e = np.sum(diag) # A cross I
current_sum_y = fun1(current_sum_y,counter,n_ind,res_y,lower_diag) + np.dot(diag,res_y[counter:counter+n_ind]**2) # A cross yy^T
for j in range(npc):
current_sum_pc[j] = fun1(current_sum_pc[j],counter,n_ind,final_PC[:,j],lower_diag) + np.dot(diag,final_PC[counter:counter+n_ind,j]**2) # A cross PCj
current_out = np.append(current_sum_pc,[current_sum_y,current_sum_grm_e,current_sum_grm])
else:
current_sum_grm = np.dot(lower_diag,lower_diag)
current_sum_y = fun1(current_sum_y,counter,n_ind,std_y,lower_diag)
for j in range(npc):
current_sum_pc[j] = fun1(current_sum_pc[j],counter,n_ind,final_PC[:,j],lower_diag)
current_out = np.append(current_sum_pc,[current_sum_y,current_sum_grm])
logging.info(str(igrm)+'-th GRM loaded and computed done... It takes: ' + str(timeit.default_timer() - start_time0)+' seconds.')
f = open('pheno'+str(args.mpheno)+'_'+str(igrm),'w')
np.savetxt(f,current_out,newline=" ")
f.write('\n')
f.close()