Skip to content

Commit

Permalink
raft 4 nodes added without Tessera
Browse files Browse the repository at this point in the history
  • Loading branch information
melihbirim committed Jan 9, 2021
1 parent 66389c5 commit 3a0d3b2
Show file tree
Hide file tree
Showing 10 changed files with 1,651 additions and 0 deletions.
64 changes: 64 additions & 0 deletions 7nodes/raft-4nodes/k8s-yaml-pvc/00-quorum-persistent-volumes.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@


---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:

name: quorum-node1-pvc
annotations:
spec:

accessModes:
- ReadWriteOnce
resources:
requests:
storage: 200Mi


---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:

name: quorum-node2-pvc
annotations:
spec:

accessModes:
- ReadWriteOnce
resources:
requests:
storage: 200Mi


---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:

name: quorum-node3-pvc
annotations:
spec:

accessModes:
- ReadWriteOnce
resources:
requests:
storage: 200Mi


---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:

name: quorum-node4-pvc
annotations:
spec:

accessModes:
- ReadWriteOnce
resources:
requests:
storage: 200Mi
67 changes: 67 additions & 0 deletions 7nodes/raft-4nodes/k8s-yaml-pvc/01-quorum-genesis.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@

apiVersion: v1
kind: ConfigMap
metadata:
name: genesis-config

labels:
app: qubernetes
name: genesis-config
data:
genesis-geth.json: |-
{
"alloc": {
"0xed9d02e382b34818e88B88a309c7fe71E65f419d": {
"balance": "1000000000000000000000000000"
},
"0xcA843569e3427144cEad5e4d5999a3D0cCF92B8e": {
"balance": "1000000000000000000000000000"
},
"0x0fBDc686b912d7722dc86510934589E0AAf3b55A": {
"balance": "1000000000000000000000000000"
},
"0x9186eb3d20Cbd1F5f992a950d808C4495153ABd5": {
"balance": "1000000000000000000000000000"
},
"0x0638E1574728b6D862dd5d3A3E0942c3be47D996": {
"balance": "1000000000000000000000000000"
},
"0xAE9bc6cD5145e67FbD1887A5145271fd182F0eE7": {
"balance": "1000000000000000000000000000"
},
"0xCC71C7546429a13796cf1BF9228bFf213e7Ae9cc": {
"balance": "1000000000000000000000000000"
}
},
"coinbase": "0x0000000000000000000000000000000000000000",
"config": {
"homesteadBlock": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 0,
"petersburgBlock": 0,
"istanbulBlock": 0,
"eip150Block": 0,
"eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"eip155Block": 0,
"eip158Block": 0,
"maxCodeSizeConfig": [
{
"block": 0,
"size": 32
}
],
"chainId": 10,
"isQuorum": true
},
"difficulty": "0x0",
"extradata": "0x0000000000000000000000000000000000000000000000000000000000000000",
"gaslimit": "0xE0000000",
"mixhash": "0x00000000000000000000000000000000000000647572616c65787365646c6578",
"nonce": "0x0",
"parenthash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"timestamp": "0x00"
}
161 changes: 161 additions & 0 deletions 7nodes/raft-4nodes/k8s-yaml-pvc/02-quorum-shared-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,161 @@
# kubectl create configmap game-config --from-file=configure-pod-container/dd1/key
apiVersion: v1
kind: ConfigMap
metadata:
name: quorum-permissioned-config

labels:
app: qubernetes
name: quorum-permissioned-config
data:
permissioned-nodes.json: |
[
"enode://ac6b1096ca56b9f6d004b779ae3728bf83f8e22453404cc3cef16a3d9b96608bc67c4b30db88e0a5a6c6390213f7acbe1153ff6d23ce57380104288ae19373ef@quorum-node1:30303?discport=0&raftport=50401",
"enode://0ba6b9f606a43a95edc6247cdb1c1e105145817be7bcafd6b2c0ba15d58145f0dc1a194f70ba73cd6f4cdd6864edc7687f311254c7555cc32e4d45aeb1b80416@quorum-node2:30303?discport=0&raftport=50401",
"enode://579f786d4e2830bbcc02815a27e8a9bacccc9605df4dc6f20bcc1a6eb391e7225fff7cb83e5b4ecd1f3a94d8b733803f2f66b7e871961e7b029e22c155c3a778@quorum-node3:30303?discport=0&raftport=50401",
"enode://3d9ca5956b38557aba991e31cf510d4df641dce9cc26bfeb7de082f0c07abb6ede3a58410c8f249dabeecee4ad3979929ac4c7c496ad20b8cfdd061b7401b4f5@quorum-node4:30303?discport=0&raftport=50401"
]
---
apiVersion: v1
kind: ConfigMap
metadata:
name: contracts-config

labels:
app: qubernetes
name: contracts-config
data:
runscript.sh: |-
#!/bin/ash
PRIVATE_CONFIG=ignore geth --exec "loadScript(\"$1\")" attach --datadir $QUORUM_DATA_DIR ipc:$QUORUM_DATA_DIR/geth.ipc
# set the tm.pub for node1 in the privateFor field.
# supports single node deployment.

public_contract.js: |-
a = eth.accounts[0]
web3.eth.defaultAccount = a;
// abi and bytecode generated from simplestorage.sol:
// > solcjs --bin --abi simplestorage.sol
var abi = [{"constant":true,"inputs":[],"name":"storedData","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"x","type":"uint256"}],"name":"set","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"},{"inputs":[{"name":"initVal","type":"uint256"}],"payable":false,"type":"constructor"}];
var bytecode = "0x6060604052341561000f57600080fd5b604051602080610149833981016040528080519060200190919050505b806000819055505b505b610104806100456000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680632a1afcd914605157806360fe47b11460775780636d4ce63c146097575b600080fd5b3415605b57600080fd5b606160bd565b6040518082815260200191505060405180910390f35b3415608157600080fd5b6095600480803590602001909190505060c3565b005b341560a157600080fd5b60a760ce565b6040518082815260200191505060405180910390f35b60005481565b806000819055505b50565b6000805490505b905600a165627a7a72305820d5851baab720bba574474de3d09dbeaabc674a15f4dd93b974908476542c23f00029";
var simpleContract = web3.eth.contract(abi);
var simple = simpleContract.new(42, {from:web3.eth.accounts[0], data: bytecode, gas: 0x47b760}, function(e, contract) {
if (e) {
console.log("err creating contract", e);
} else {
if (!contract.address) {
console.log("Contract transaction send: TransactionHash: " + contract.transactionHash + " waiting to be mined...");
} else {
console.log("Contract mined! Address: " + contract.address);
console.log(contract);
}
}
});
## Addition config used to manage the nodes lifecycle, e.g. add /remove nodes.
---
apiVersion: v1
kind: ConfigMap
metadata:
name: node-management

labels:
app: qubernetes
name: node-management
data:
## Raft management helpers: adding nodes
raft_add_all_permissioned.sh: |-
#!/bin/sh
set -x
# Read the permissioned-nodes.json (this will be redeployed by k8s)
# and add any new entry into the permissioned set.
PERMISSION_FILE=$QHOME/dd/permissioned-nodes.json
ENODE_URLS=$(cat $PERMISSION_FILE | jq '.[]')
RAFT_ADD_FILE=$QHOME/node-management/raft_add_$(date +%m-%d-%Y)
RAFT_ADD_LOG=$RAFT_ADD_FILE.log
RAFT_ADD_ERR=$RAFT_ADD_FILE.err
RAFT_ADD_FILE=$QHOME/node-management/raft_added.csv
touch $RAFT_ADD_LOG
touch $RAFT_ADD_ERR
date +%m-%d-%Y-%T >> $RAFT_ADD_ERR
date +%m-%d-%Y-%T >> $RAFT_ADD_LOG
echo " Going through ENODE_URLS"
echo " $ENODE_URLS"
echo
for URL in $ENODE_URLS; do
# Check if the URL from the permissioned-nodes is this node, if so
# don't add because it will cause an error.
if echo $URL | grep -Eq $THIS_ENODE; then
echo "skip adding self enodeID [$THIS_ENODE]"
continue;
fi
RAFTID=$(PRIVATE_CONFIG=ignore geth --exec "raft.addPeer($URL)" attach ipc:$QUORUM_HOME/dd/geth.ipc)
# if the addPerr command isn't successful log the returned error and go to next ENODE_URL
if echo $RAFTID | grep -Eiq ERR; then
echo "RaftID Err: [$RAFTID]" >> $RAFT_ADD_ERR
echo $RAFTID%%$URL >> $RAFT_ADD_ERR;
continue;
fi
if echo $RAFTID | grep -Eq '[0-9][0-9]*'; then
echo $RAFTID - $URL
echo --raftjoinexisting $RAFTID
echo "$RAFTID%%$URL" >> $RAFT_ADD_LOG;
# holds all raft nodes added so far on this node.
echo "$RAFTID,$URL" >> $RAFT_ADD_FILE;
fi
## sleep after adding a node, as there is a race condition where if nodes are added too quickly they get the same raft id.
sleep 2
done
echo | tee -a $RAFT_ADD_ERR $RAFT_ADD_LOG
echo ========================================= | tee -a $RAFT_ADD_ERR $RAFT_ADD_LOG
echo | tee -a $RAFT_ADD_ERR $RAFT_ADD_LOG
## include ibft helpers as we don't know which nodes will be running which consensus.
---
apiVersion: v1
kind: ConfigMap
metadata:
name: geth-helpers

labels:
app: qubernetes
name: geth-helpers
data:
geth-attach.sh: |-
#!/bin/sh
# helper for connecting to geth from
# outside the container
# kubectl exec -it $POD -c quorum -- /geth-helpers/geth-attach.sh
echo "connecting to geth $QHOME"
geth attach --datadir $QUORUM_DATA_DIR $QUORUM_DATA_DIR/geth.ipc
geth-exec.sh: |-
#!/bin/sh
# helper for connecting to geth from
# outside the container
# kubectl exec -it $POD -c quorum -- /geth-helpers/geth-exec.sh "admin.peers.length"
GETH_CMD="eth.blockNumber"
if [ "$#" -gt 0 ]; then
GETH_CMD=$1
fi
# see: https://github.com/ethereum/go-ethereum/pull/17281
# https://github.com/ethereum/go-ethereum/issues/16905
# to avoid warning being returned
# "WARN [02-20|00:21:04.382] Sanitizing cache to Go's GC limits provided=1024 updated=663"
geth --exec $GETH_CMD --cache=16 attach --datadir $QUORUM_DATA_DIR $QUORUM_DATA_DIR/geth.ipc
Loading

0 comments on commit 3a0d3b2

Please sign in to comment.