Skip to content

Commit

Permalink
Merge branch 'master' into refs123_decentralise
Browse files Browse the repository at this point in the history
  • Loading branch information
TiagoV-PDMFC committed Nov 9, 2022
2 parents 7110919 + aef89d3 commit f06757f
Show file tree
Hide file tree
Showing 7 changed files with 1,332 additions and 79 deletions.
8 changes: 0 additions & 8 deletions bin/environment/batches/batchesTests.js
Original file line number Diff line number Diff line change
Expand Up @@ -418,14 +418,6 @@ const DFM_BATCHES1 = {
'ZJ1459SC1'
]
},
{
batchNumber: "WB5208",
quantity: 1,
expiry: "2030/12/31",
serialNumbers: [
'VQ8911LQ7'
]
},
{
batchNumber: "WO6983",
quantity: 1,
Expand Down
148 changes: 78 additions & 70 deletions bin/environment/wsCreateStuff.js
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
/**
* Experimental tool to create some products and batches using api services.
* Uses some files common to the setup.js utility,
* #66
* #66, #119
*
* Onde node <= v14 we recommend that you run this script with option
* --unhandled-rejections=strict
Expand All @@ -12,10 +12,10 @@
* See --help for options.
*
* Please add a --sleep=NNNNN (default is 2000 miliseconds = 2 seconds)
* between shipment operations to allow for messages betweem participants to be processed.
* At least 10 seconds seems to be required.
* between shipment operations to allow for messages between participants to be processed.
* At least 10 seconds seems to be required for simulated blockchain.
* For BC --istanbul.blockperiod 1 (1 second per new block) this timing needs to
* be 80 seconds (80000 ms) or more.
* be 120 seconds (120000 ms) or more.
*
* Example for the top README.md scenario - running single MAH (defaults to ROCHE credentials) API on localhost:8081 :
* fgt-workspace/bin/environment$ node --unhandled-rejections=strict wsCreateStuff.js --env=single --scenario=single --sleep=80000 2>&1 | tee -a wsCreateStuffSingle.log
Expand Down Expand Up @@ -49,7 +49,7 @@ const WSH1 = require("../../docker/api/env/whs-1.json");
const WSH2 = require("../../docker/api/env/whs-2.json");
const PHA1 = require("../../docker/api/env/pha-1.json");
const PHA2 = require("../../docker/api/env/pha-2.json");
const SHIPMENTS_ON_PHA = [];
const SHIPMENTS_ON_PHA = []; // records all shipments sent to a PHA, so that they can be sold.

const NUM_SALES = 2; // number of sales to perform - for now consume serialNumbers from 1st MSD batch
const MY_SALES = []; // array of data returned by /sale/create
Expand Down Expand Up @@ -607,40 +607,43 @@ const shipmentsCreateTestDfm = async function (conf, sender) {

const whs = WSH1;
const pha = PHA1;

const gtin1 = sender.products[0].gtin;
const batch1 = sender.batches[gtin1][0];
const batchNumber1 = batch1.batchNumber;
const quantity1 = batch1.quantity;

const shipment1MahToWhs = {
"orderId": whs.id.secret + "-" + (new Date()).toISOString(),
"requesterId": whs.id.secret,
"shipmentLines": [
{
"gtin": gtin1,
"batch": batchNumber1,
"quantity": quantity1
}
]
};

await shipmentCreateAndDeliver(conf, sender, whs, shipment1MahToWhs);

const shipment2WhsToPha = {
"orderId": pha.id.secret + "-" + (new Date()).toISOString(),
"requesterId": pha.id.secret,
"shipmentLines": [
{
"gtin": gtin1,
"batch": batchNumber1,
"quantity": quantity1
}
]
};
for (const product of sender.products) { // ship all DFM products
const gtin = product.gtin;
for (const batch of sender.batches[gtin]) { // ship all DFM batches
const batchNumber = batch.batchNumber;
const quantity = batch.quantity;

const shipment1MahToWhs = {
"orderId": whs.id.secret + "-" + (new Date()).toISOString(),
"requesterId": whs.id.secret,
"shipmentLines": [
{
"gtin": gtin,
"batch": batchNumber,
"quantity": quantity
}
]
};

await shipmentCreateAndDeliver(conf, sender, whs, shipment1MahToWhs);

const shipment2WhsToPha = {
"orderId": pha.id.secret + "-" + (new Date()).toISOString(),
"requesterId": pha.id.secret,
"shipmentLines": [
{
"gtin": gtin,
"batch": batchNumber,
"quantity": quantity
}
]
};

const resShipToPha = await shipmentCreateAndDeliver(conf, whs, pha, shipment2WhsToPha);
SHIPMENTS_ON_PHA.push(resShipToPha);
const resShipToPha = await shipmentCreateAndDeliver(conf, whs, pha, shipment2WhsToPha);
SHIPMENTS_ON_PHA.push(resShipToPha);
}
}
}

const shipmentsCreateTestDefault = async function (conf, sender) {
Expand Down Expand Up @@ -794,42 +797,47 @@ const salesCreateTestDfm = async function (conf, manufActor, sellerActor, mySale
if (!SHIPMENTS_ON_PHA.length) {
throw new Error("No shipments on pharmacies on this run!");
}
if (!SHIPMENTS_ON_PHA[0].shipmentLines.length) {
throw new Error("No shipmentLines on pharmacies on this run!");
}
const shipmentLine0 = SHIPMENTS_ON_PHA[0].shipmentLines[0];
const gtin = shipmentLine0.gtin;
const batchNumber = shipmentLine0.batch;
const batch = findMahOriginalBatch(gtin, batchNumber);
//console.log("batch", batch);

let i=0;
while (i<batch.quantity) { // sell all the batch
const saleSerialNumber = batch.serialNumbers[i];
const saleData = { // see body of http://swagger-pha1.localhost:8080/#/sale/post_sale_create
"id": sellerActor.id.secret + "-" + (new Date()).toISOString(),
"productList": [
{
"gtin": gtin,
"batchNumber": batch.batchNumber,
"serialNumber": saleSerialNumber
}
]
};

const resSale = await jsonPost(conf, sellerActor, {
path: `/traceability/sale/create`,
body: saleData
});
//console.log("Sale", resSale);
if (!resSale || !resSale.productList) {
throw new Error("sale/create "+batch.batchNumber+" at "+sellerActor.id.secret+" reply has no productList: "+JSON.stringify(resSale));
for (const shipment of SHIPMENTS_ON_PHA) {
if (!shipment.shipmentLines.length) {
throw new Error("No shipmentLines on pharmacies on this run!");
}
mySales.push(resSale);
if (shipment.shipmentLines.length != 1) {
throw new Error("DFM only supports shipments with 1 line each!");
}
const shipmentLine0 = shipment.shipmentLines[0];
const gtin = shipmentLine0.gtin;
const batchNumber = shipmentLine0.batch;
const batch = findMahOriginalBatch(gtin, batchNumber);
//console.log("batch", batch);

let i = 0;
while (i < batch.quantity) { // sell all the batch
const saleSerialNumber = batch.serialNumbers[i];
const saleData = { // see body of http://swagger-pha1.localhost:8080/#/sale/post_sale_create
"id": sellerActor.id.secret + "-" + (new Date()).toISOString(),
"productList": [
{
"gtin": gtin,
"batchNumber": batch.batchNumber,
"serialNumber": saleSerialNumber
}
]
};

await sleep(SLEEP_MS);
const resSale = await jsonPost(conf, sellerActor, {
path: `/traceability/sale/create`,
body: saleData
});
//console.log("Sale", resSale);
if (!resSale || !resSale.productList) {
throw new Error("sale/create " + batch.batchNumber + " at " + sellerActor.id.secret + " reply has no productList: " + JSON.stringify(resSale));
}
mySales.push(resSale);

i++;
await sleep(SLEEP_MS);

i++;
}
}
}

Expand Down
2 changes: 1 addition & 1 deletion fgt-api/config/fgt-mah-wallet/credentials.json
Original file line number Diff line number Diff line change
Expand Up @@ -22,4 +22,4 @@
"required": true,
"secret": "This1sSuchAS3curePassw0rd"
}
}
}
10 changes: 10 additions & 0 deletions tests/proxy/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# The Base Image used to create this Image
FROM httpd:2.4.54

# to Copy a file named httpd_proxypass.conf from present working directory to the /usr/local/apache2/conf inside the container
# This is taken from the original httpd.conf inside the base image, and modified to allow ProxyPass
COPY httpd_proxypass.conf /usr/local/apache2/conf/httpd.conf

EXPOSE 8808

CMD ["httpd-foreground"]
115 changes: 115 additions & 0 deletions tests/proxy/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
# FGT participant going through 2 proxies

A short example of configuring an FGT participant using a chain of 2 proxies.
- a static proxy (running at http://localhos:8808 on this example) called httpd-proxy-static that always forwards all requests to https://fgt-dev.pharmaledger.pdmfc.com through a generic proxy (also configured statically - see below).
- a generic proxy (a squid proxy running on http://pdm-00781:3128 for this example)

Port numbers provided are just for example completness. Should be changed to whatever you need.

## DISCLAIMER

This configuration is not security-validated. A lot of apache's default configuration should be removed/inhibited for production purposes.

But this example should be enough to see the Proxy configuration directives required.

(This example was not validated on an environment where the apache was prevented to connect to the internet - as in this example would loose also access to the squid proxy also.)


## Configure an apache httpd 2.4.54 as a static proxy inside a docker

### Edit the static proxy config

Edit the tests/proxy/httpd_proxypass.conf to suite your configs.
The most relevant are at the bottom

```xml
# Global config

SSLProxyEngine on
# all external accesses should go through this proxy (this is my local squid)
# PLACE YOUR OPENSHIFT PROXY HERE - the one that is defined by the https_proxy variable
ProxyRemote * http://pdm-00781:3128

NameVirtualHost *
<VirtualHost *>
ServerName owncloud.mydomain.com

ProxyRequests Off
<Proxy *>
Order deny,allow
Allow from all
</Proxy>

# pass everything to https://fgt-dev.pharmaledger.pdmfc.com
# use https://fgt.pharmaledger.pdmfc.com when confident

ProxyPass / https://fgt-dev.pharmaledger.pdmfc.com/
ProxyPassReverse / https://fgt-dev.pharmaledger.pdmfc.com/
<Location />
Order allow,deny
Allow from all
</Location>
</VirtualHost>
```

Compare (diff) with httpd_original.conf
(note that it is listening on 8088, and needs to load proxy_module proxy_http_module ssl_module)

### Build the static proxy

```sh
docker build -t httpd-proxy-static .
```

### Run the static proxy as a standalone docker at port 8808

```sh
docker run --network="host" -p 8808:8808 httpd-proxy-static
```

The `--network="host"` was needed just to connect to the squid proxy at http://pdm-00781:3128
but, your docker container should have connection to your generic proxy without further configs.

### Edit the apihub-root/external-volume/config/bdns.hosts so that the domain traceability goes through the proxy

The domain traceability should go through the static proxy.
Note that it is an http (not https) proxy.

In this example, as the static proxy is running at http://localhost:8808, that is the address we use.
( Do not confuse with the squid running at http://pdm-00781:3128 ).

```json
"traceability": {
"replicas": [],
"brickStorages": [
"http://localhost:8808"
],
"mqEndpoints": [
"http://localhost:8808"
],
"anchoringServices": [
"http://localhost:8808"
]
},
```


## Notes on how to install and run a squid proxy on Ubuntu 22

```sh
sudo apt install squid
```

Squid will be left running on port 3128.

To test, configure a browser using a proxy at localhost:3128 (or pdm-00781:3121),
test access to a web page, and check /var/log/squid/access.log that all
this browser's accesses go through this proxy.

systemctl enable squid
systemctl start squid

systemctl status squid

systemctl stop squid
systemctl disable squid
Loading

0 comments on commit f06757f

Please sign in to comment.