From d3f004d17412176217f2cca5b3f12d4cc258b698 Mon Sep 17 00:00:00 2001 From: Haider Ali Date: Sun, 12 Nov 2023 05:19:47 +0000 Subject: [PATCH 1/2] Print statements and other errors solved --- __pycache__/functions.cpython-310.pyc | Bin 0 -> 1044 bytes agent/__pycache__/__init__.cpython-310.pyc | Bin 0 -> 131 bytes agent/__pycache__/agent.cpython-310.pyc | Bin 0 -> 2156 bytes agent/agent.py | 2 +- evaluate.py | 14 +++++++------- functions.py | 2 +- train.py | 18 +++++++++--------- 7 files changed, 18 insertions(+), 18 deletions(-) create mode 100644 __pycache__/functions.cpython-310.pyc create mode 100644 agent/__pycache__/__init__.cpython-310.pyc create mode 100644 agent/__pycache__/agent.cpython-310.pyc diff --git a/__pycache__/functions.cpython-310.pyc b/__pycache__/functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..196fa8e1ea198d71b2dd08edc75bbcd9446059b6 GIT binary patch literal 1044 zcmZ8gOK%e~5cc@7*(ND1P?V+>6-ecqL4(w zh#Z2~^pHv_*#$Xdl1qV_%Rtsp3mM8K)PaNp64fv9Cg~>N)SuyOf%84iVhe@A$U~w7 zNmGy*uOcNFat?E75sV?a+FhB_o$V{wp4PrSf4%#nH;bqZdYK-_rDgF^5rKvD2S&l8 zSC2biGd(QwI8jCCc&F5{RJzkUNt1GrrNwSOv5m#ueLYB&%cgL}Zh;O_-AI??LJg-+ z_X8SzGJ_IRHl#%)#mUZT1#?o-Ihj#I5fJj7w#gp(d}qcCRIKENRYHn6$e`^D19JSp zIDl&1sgQ9Qccy$dDNZfuOIPd_JiBxcM}g&8u2Rdjilwa=`Djp%2B|7+5a+ldt>}$1 z#6bA1h+8&P6U$CjV#VShcgo|k5Uv=z(pTj{nI*$LeEg$ImT<9f-N0c3R_O+Cs$1yZ zfZ_&oYq%>xA`}%u(IAekO~)7QY8MFk;YiLG%hXx!*g3nML#-I}$Js#M$I~AEHPD;x zO@`v{aM}KY{{kHYhOXnwErQ@ONryt;GsGa!Zp{Iot{B#EU4?cZ*1Yw97w>VTP?Q9z zf_0<=iMl%rjHsZbh8yhM7HJb_wTauG1Jd=hWED4}j~>>@7B%|DjD!_BPWwu)VtT#; zOViwnSnGHa)hu@eTPU^bGYd%B(Fl>ZOe2jjxD89_sfkPF@GqJun(zuZtimH`QvC!M zFsMjR#`#1)L{G0f;TqRd<}>ieu0?j=LVM=JH{(p6jMQ7lrVk&I5JDF6u-@dq0qsxJ Ax&QzG literal 0 HcmV?d00001 diff --git a/agent/__pycache__/__init__.cpython-310.pyc b/agent/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62b50cee73197ce4d52cda1740612b8826b2902c GIT binary patch literal 131 zcmd1j<>g`kg2Q0}sUZ3>h(HF6K#l_t7qb9~6oz01O-8?!3`HPe1o2BvzdXMvySN}R zIki~7P`9KgF(tJ~KQTQuuS7pSJ~J<~BtBlRpz;=nO>TZlX-=vgNPjUCkYHf|0G7fV A9smFU literal 0 HcmV?d00001 diff --git a/agent/__pycache__/agent.cpython-310.pyc b/agent/__pycache__/agent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28bcca502dc8ffb63c899c3bf5bd2ffadfc93c02 GIT binary patch literal 2156 zcmZuyOK%)S5bo}I?(D3c1QRC$q(H)9q+kni0V&`_M9B#gkqc^pMw9LKX4dm^_pB2y zvnTuN|F9OPT=)_E1TK+UTo4z0;c-PsP}LI$6Va=wuBxtnRDD&o^=K3k7@vRnK>p|x z@-qtOhXcZG`1B4CK?E%b$C(whWSmjt9pD}ASl%t%(&L`xy}~a89$20hAs^WN5szT@ zi(wh_n3DTM1R^{lB0OY#Bt+>2s9q!-t*(72OFT3gi}E{6R~iFdqlY-JmDV^?)ImxU4n-c%{S-bMnT~cOh3EjKEfY*Q$H+Sq0@UpspkBv-`oaYo2oGo|e4qmn0F64P zZni@acFsCE#JwXy9>VTre1d0KDH&{^;oEO!ee+4Jc6F1DrQY0ouT`0lYBNJHdb-gZm@rNADsR&?hAsLs z5TE)qhR^<3Ki^>U6^r7=nfaK4>Dgew1n^lb3t(DuNEWoCOGFZom^v0u=Y&%7LhFLY zJ7(uJ{;L|;XLIVzY6_Xv`XUm;V=CffsGJUSeYAag8}8(;lEo}>j5h;^G>70fvo;lZ zX(B{uKSPvGuYqa8)Eh9VH-VgnK)7GzmCRH^t&TgTmXp7p{qe(Rj~;A6masx4jJrjx z;oQ1u^AdnpNubbk<77f8%;z46A?kv0T{d|kl?M{shmlpUBSCC>as|0HAov)K(E0Q- zy#&8C__=Hzzlga-dyIFT4a^3-(MJ3hFc2%EFSc~bmJVk7)ZL}(V#}6LB~$+d;skTB z3_E&47p$erK?lfOCAgBXZG~_qkxe4Qs>b*#t3+KwP+?XVVU-Uon^hC3&_(Nw2^u9u zK5n_6sR`6l;#gOWXA6V}7_7R|uXqh18@KPmAsVB98S*V=D|9~S{e`vvIE$%?cGlVN zfte6+0Ue@;bm6oNnghK8tz(^Z&LO)?zK#}d2Z2C~&P69F*}Kr;%KKk=3vb~sfH}r3WodE$T|U=Uh6^VhF#PRajo-j*^o{Lod+7Qhlb_C zJ#T?E?ilX6D(wIzE5#oX1}zF35`KxheTpIPX{1k#V3cDRkX< z5cr8Se%3S+z_@_>-Uhf)+rQEf4!S-FX$(fqdG0A)6vQxCtj} ztS^^FeNr=*y7U?V9fKS*NbtYst3BW^cWo!^o|xEe-rpirNzaBo+tO!L@I8@AfCEp6L% z&-CDxRN59gztEe$Yz5lZIrpkr*&JAK)H`rCACBvykT$N`{x{ew|L=_^ywyAT3Fd`H Q07C+GVSgbDVZOxv0j1{nZU6uP literal 0 HcmV?d00001 diff --git a/agent/agent.py b/agent/agent.py index 07e163b..b8c6364 100644 --- a/agent/agent.py +++ b/agent/agent.py @@ -44,7 +44,7 @@ def act(self, state): def expReplay(self, batch_size): mini_batch = [] l = len(self.memory) - for i in xrange(l - batch_size + 1, l): + for i in range(l - batch_size + 1, l): mini_batch.append(self.memory[i]) for state, action, reward, next_state, done in mini_batch: diff --git a/evaluate.py b/evaluate.py index 0055e9d..dc980ae 100644 --- a/evaluate.py +++ b/evaluate.py @@ -6,7 +6,7 @@ import sys if len(sys.argv) != 3: - print "Usage: python evaluate.py [stock] [model]" + print ("Usage: python evaluate.py [stock] [model]") exit() stock_name, model_name = sys.argv[1], sys.argv[2] @@ -22,7 +22,7 @@ total_profit = 0 agent.inventory = [] -for t in xrange(l): +for t in range(l): action = agent.act(state) # sit @@ -31,19 +31,19 @@ if action == 1: # buy agent.inventory.append(data[t]) - print "Buy: " + formatPrice(data[t]) + print ("Buy: " + formatPrice(data[t])) elif action == 2 and len(agent.inventory) > 0: # sell bought_price = agent.inventory.pop(0) reward = max(data[t] - bought_price, 0) total_profit += data[t] - bought_price - print "Sell: " + formatPrice(data[t]) + " | Profit: " + formatPrice(data[t] - bought_price) + print ("Sell: " + formatPrice(data[t]) + " | Profit: " + formatPrice(data[t] - bought_price)) done = True if t == l - 1 else False agent.memory.append((state, action, reward, next_state, done)) state = next_state if done: - print "--------------------------------" - print stock_name + " Total Profit: " + formatPrice(total_profit) - print "--------------------------------" + print ("--------------------------------") + print (stock_name + " Total Profit: " + formatPrice(total_profit)) + print ("--------------------------------") diff --git a/functions.py b/functions.py index 5129e0c..f8d216e 100644 --- a/functions.py +++ b/functions.py @@ -24,7 +24,7 @@ def getState(data, t, n): d = t - n + 1 block = data[d:t + 1] if d >= 0 else -d * [data[0]] + data[0:t + 1] # pad with t0 res = [] - for i in xrange(n - 1): + for i in range(n - 1): res.append(sigmoid(block[i + 1] - block[i])) return np.array([res]) diff --git a/train.py b/train.py index c4ba8ed..21398f9 100644 --- a/train.py +++ b/train.py @@ -3,7 +3,7 @@ import sys if len(sys.argv) != 4: - print "Usage: python train.py [stock] [window] [episodes]" + print ("Usage: python train.py [stock] [window] [episodes]") exit() stock_name, window_size, episode_count = sys.argv[1], int(sys.argv[2]), int(sys.argv[3]) @@ -13,14 +13,14 @@ l = len(data) - 1 batch_size = 32 -for e in xrange(episode_count + 1): - print "Episode " + str(e) + "/" + str(episode_count) +for e in range(episode_count + 1): + print ("Episode " + str(e) + "/" + str(episode_count)) state = getState(data, 0, window_size + 1) total_profit = 0 agent.inventory = [] - for t in xrange(l): + for t in range(l): action = agent.act(state) # sit @@ -29,22 +29,22 @@ if action == 1: # buy agent.inventory.append(data[t]) - print "Buy: " + formatPrice(data[t]) + print ("Buy: " + formatPrice(data[t])) elif action == 2 and len(agent.inventory) > 0: # sell bought_price = agent.inventory.pop(0) reward = max(data[t] - bought_price, 0) total_profit += data[t] - bought_price - print "Sell: " + formatPrice(data[t]) + " | Profit: " + formatPrice(data[t] - bought_price) + print ("Sell: " + formatPrice(data[t]) + " | Profit: " + formatPrice(data[t] - bought_price)) done = True if t == l - 1 else False agent.memory.append((state, action, reward, next_state, done)) state = next_state if done: - print "--------------------------------" - print "Total Profit: " + formatPrice(total_profit) - print "--------------------------------" + print ("--------------------------------") + print ("Total Profit: " + formatPrice(total_profit)) + print ("--------------------------------") if len(agent.memory) > batch_size: agent.expReplay(batch_size) From 9b8042d615fc8c3ce7cd14e5576639cc7150669b Mon Sep 17 00:00:00 2001 From: Haider Ali Date: Sun, 12 Nov 2023 13:45:54 +0000 Subject: [PATCH 2/2] Updated --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 21398f9..bd5775f 100644 --- a/train.py +++ b/train.py @@ -9,7 +9,7 @@ stock_name, window_size, episode_count = sys.argv[1], int(sys.argv[2]), int(sys.argv[3]) agent = Agent(window_size) -data = getStockDataVec(stock_name) +data = cha(stock_name) l = len(data) - 1 batch_size = 32