Skip to content

Commit

Permalink
Merge pull request #2 from FederatedAI/develop-1.7.0
Browse files Browse the repository at this point in the history
Develop 1.7.0
  • Loading branch information
dylan-fan authored Dec 1, 2021
2 parents e7e3ba2 + f4980d9 commit 2043e2f
Show file tree
Hide file tree
Showing 39 changed files with 693 additions and 311 deletions.
97 changes: 58 additions & 39 deletions deploy/deploy-fate.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@


def_check_setup() {
local cnames=( "modules" "ssl_roles" "host_ips" "host_special_routes" "guest_ips" "guest_special_routes" )
local cnames=( "modules" "host_ips" "host_special_routes" "guest_ips" "guest_special_routes" )
for cname in ${cnames[*]};
do
tvar=( $( ${workdir}/bin/yq eval '.'"${cname}"'[]' ${workdir}/conf/setup.conf ) )
Expand Down Expand Up @@ -62,15 +62,14 @@ def_get_base_data() {
for role in "host" "guest"; do
eval ${role}_compute_engine\=$( ${workdir}/bin/yq eval ".${role}_compute_engine" ${workdir}/conf/setup.conf )
eval ${role}_spark_home\=$( ${workdir}/bin/yq eval ".${role}_spark_home" ${workdir}/conf/setup.conf )
eval ${role}_linkis_Ips\=$( ${workdir}/bin/yq eval ".${role}_linkis_Ips" ${workdir}/conf/setup.conf )
eval ${role}_hadoop_home\=$( ${workdir}/bin/yq eval ".${role}_hadoop_home" ${workdir}/conf/setup.conf )
eval ${role}_storage_engine\=$( ${workdir}/bin/yq eval ".${role}_storage_engine" ${workdir}/conf/setup.conf )
eval ${role}_hive_ips\=$( ${workdir}/bin/yq eval ".${role}_hive_ips" ${workdir}/conf/setup.conf )
eval ${role}_hdfs_addr\=$( ${workdir}/bin/yq eval ".${role}_hdfs_addr" ${workdir}/conf/setup.conf )
eval ${role}_mq_engine\=$( ${workdir}/bin/yq eval ".${role}_mq_engine" ${workdir}/conf/setup.conf )
eval ${role}_rabbitmq_ips\=$( ${workdir}/bin/yq eval ".${role}_rabbitmq_ips" ${workdir}/conf/setup.conf )
eval ${role}_pulsar_ips\=$( ${workdir}/bin/yq eval ".${role}_pulsar_ips" ${workdir}/conf/setup.conf )
eval ${role}_nginx_ips\=$( ${workdir}/bin/yq eval ".${role}_nginx_ips" ${workdir}/conf/setup.conf )
eval ${role}_route_table\=\( $( ${workdir}/bin/yq eval ".${role}_route_table[]" ${workdir}/conf/setup.conf ) \)
done
fi

Expand Down Expand Up @@ -497,13 +496,14 @@ def_render_fate_init() {
local pip="pypi/${pversion}/pypi"
local version=${pversion%-*}
echo "hello-------------${fversion} ${eversion}"
myvars="deploy_mode=${deploy_mode} deploy_modules=${deploy_modules} pip=${pip} version=${version} fversion=${fversion} bversion=${bversion} eversion=${eversion} roles=${deploy_roles} ssl_roles=${ssl_roles} pname=${pname}"
myvars="deploy_mode=${deploy_mode} deploy_modules=${deploy_modules} pip=${pip} version=${version} fversion=${fversion} bversion=${bversion} eversion=${eversion} roles=${deploy_roles} ssl_roles=${ssl_roles} pname=${pname} default_engines=${default_engines}"
eval eval ${myvars} "${workdir}/bin/yq e \' "\
" .pname \|\=env\(pname\) \| "\
" .deploy_mode \|\=env\(deploy_mode\) \| "\
" .deploy_modules \|\=env\(deploy_modules\) \| "\
" .deploy_roles \|\=env\(roles\) \| "\
" .ssl_roles \|\=env\(ssl_roles\) \| "\
" .default_engines\|\=env\(default_engines\) \| "\
" .python.pip \|\=env\(pip\) \| "\
" .version \|\=env\(version\) \| "\
" .versions.eggroll \|\=env\(eversion\) \| "\
Expand All @@ -519,11 +519,18 @@ def_render_playbook() {
case "${deploy_mode}" in

"install"|"uninstall"|"deploy"|"config")
if [ "${deploy_mode}" == "uninstall" ]
then
cp ${workdir}/files/project-uninstall.yaml $dfile
if [ "${deploy_mode}" == "uninstall" ]; then
if [ "${default_engines}" != "spark" ]; then
cp ${workdir}/files/project-uninstall.yaml $dfile
else
cp ${workdir}/files/spark-project-uninstall.yaml $dfile
fi
else
cp ${workdir}/files/project-install.yaml $dfile
if [ "${default_engines}" != "spark" ]; then
cp ${workdir}/files/project-install.yaml $dfile
else
cp ${workdir}/files/spark-project-install.yaml $dfile
fi
fi
sed -i 's#ENV#'"${deploy_env}"'#g;s#PNAME#'"${pname}"'#g' $dfile
for role in "host" "guest" "exchange";
Expand All @@ -536,10 +543,9 @@ def_render_playbook() {
done

local i=0
all_modules=( "mysql" "eggroll" "fateflow" "fateboard" )
if [ "${deploy_mode}" == "uninstall" ]
then
all_modules=( "mysql_uninstall" "eggroll_uninstall" "fateflow_uninstall" "fateboard_uninstall" )
[ "${default_engines}" != "spark" ] && all_modules=( "mysql" "eggroll" "fateflow" "fateboard" ) || all_modules=( "mysql" "fateflow" "fateboard" )
if [ "${deploy_mode}" == "uninstall" ]; then
[ "${default_engines}" != "spark" ] && all_modules=( "mysql_uninstall" "eggroll_uninstall" "fateflow_uninstall" "fateboard_uninstall" ) || all_modules=( "mysql_uninstall" "fateflow_uninstall" "fateboard_uninstall" )
fi

tmodules=()
Expand All @@ -565,14 +571,15 @@ def_render_playbook() {
fi
done
if [ ${deploy_mode} != "uninstall" ]; then
if [ "${default_engines}" != "spark" ]; then
sed -i '/role: "rabbitmq",/d' $dfile
fi
if [ $i -eq 2 -o "${deploy_mode}" != "deploy" ]
then
sed -i '/role: "python",/d' $dfile
sed -i '/role: "rabbitmq",/d' $dfile
fi
if [ "${default_engines}" == "spark" -a $i -eq 1 ]; then
sed -i '/role: "python",/d' $dfile
sed -i '/role: "rabbitmq",/d' $dfile
fi
fi
;;

Expand Down Expand Up @@ -607,6 +614,10 @@ def_render_setup() {
echo "error: spark no exchange"
exit 1
fi
if [ ${#ssl_roles[*]} -gt 0 ]; then
echo "error: Spark does not support ssl mode"
exit 1
fi
eval eval ${myvars} "${workdir}/bin/yq e \' "\
" .deploy_mode \|\=env\(deploy_mode\) \| "\
" .roles \|\=env\(roles\) \| "\
Expand Down Expand Up @@ -703,80 +714,88 @@ def_render_roles_core() {
fi

if [ "${default_engines}" == "spark" ]; then
eval local pid=\${${role}_pid}
eval local compute_engine="\${${role}_compute_engine}"
eval local mq_engine="\${${role}_mq_engine}"
eval local storage_engine="\${${role}_storage_engine}"
eval local rabbitmq_ips="\${${role}_rabbitmq_ips}"
eval local pulsar_ips="\${${role}_pulsar_ips}"
eval local spark_home="\${${role}_spark_home}"
eval local hadoop_home="\${${role}_hadoop_home}"
eval local hive_ips="\${${role}_hive_ips}"
eval local hdfs_addr="\${${role}_hdfs_addr}"
eval local nginx_ips="\${${role}_nginx_ips}"
for mq in "rabbitmq" "pulsar"; do
eval local temp=\${${role}_mq_engine}
if [ "${temp}" == "$mq" ]; then
if [ "${mq_engine}" == "$mq" ]; then
eval local ${mq}_enable=true
else
eval local ${mq}_enable=false
fi
done
for storage in "hive" "hdfs"; do
eval local temp=\${${role}_storage_engine}
if [ "${temp}" == "${storage}" ]; then
for storage in "hive" "hdfs" "localfs"; do
if [ "${storage_engine}" == "${storage}" ]; then
eval local ${storage}_enable=true
else
eval local ${storage}_enable=false
fi
done
eval local compute_engine="\${${role}_compute_engine}"
eval local rabbitmq_ips="\${${role}_rabbitmq_ips}"
eval local pulsar_ips="\${${role}_pulsar_ips}"
eval local spark_home="\${${role}_spark_home}"
eval local linkis_ips="\${${role}_linkis_Ips}"
eval local hive_ips="\${${role}_hive_ips}"
eval local hdfs_addr="\${${role}_hdfs_addr}"
eval local nginx_ips="\${${role}_nginx_ips}"
[ "${compute_engine}" == "spark" -o "${compute_engine}" == "linkis" ] && local spark_enable=true || local spark_enable=false
[ -n "${linkis_ips}" ] && local linkis_spark_enable=true || local linkis_spark_enable=false
[ "${compute_engine}" == "spark" ] && local spark_enable=true
[ -n "${nginx_ips}" ] && local nginx_enable=true || local nginx_enable=false
eval local pid=\${${role}_pid}
local ${role}_rabbitmq_route\="[{\"id\":${pid},\"routes\":[{\"ip\":\"${rabbitmq_ips}\",\"port\":5672}]}]"
if [ ${#base_roles[*]} -eq 1 ]; then
[ "x" != "x${rabbitmq_ips}" ] && local ${role}_rabbitmq_route\="[{\"id\":${pid},\"routes\":[{\"ip\":\"${rabbitmq_ips}\",\"port\":5672}]}]" || local ${role}_rabbitmq_route\="[]"
[ "x" != "x${pulsar_ips}" ] && local ${role}_pulsar_route\="[{\"id\":${pid},\"routes\":[{\"ip\":\"${pulsar_ips}\",\"port\":6650,\"sslPort\":6651,\"proxy\":\"\"}]}]" || local ${role}_pulsar_route\="[]"
else
[ "x" != "x${rabbitmq_ips}" ] && local ${role}_rabbitmq_route\="[{\"id\":${host_pid},\"routes\":[{\"ip\":\"${host_rabbitmq_ips}\",\"port\":5672}]},{\"id\":${guest_pid},\"routes\":[{\"ip\":\"${guest_rabbitmq_ips}\",\"port\":5672}]}]" || local ${role}_rabbitmq_route\="[]"
[ "x" != "x${pulsar_ips}" ] && local ${role}_pulsar_route\="[{\"id\":${host_pid},\"routes\":[{\"ip\":\"${host_pulsar_ips}\",\"port\":6650,\"sslPort\":6651,\"proxy\":\"\"}]},{\"id\":${guest_pid},\"routes\":[{\"ip\":\"${guest_pulsar_ips}\",\"port\":6650,\"sslPort\":6651,\"proxy\":\"\"}]}]" || local ${role}_pulsar_route\="[]"
fi
eval local rabbitmq_routes=\'$( echo \${${role}_rabbitmq_route} | tr -s '"' '\"' | tr -s '[' '\[' | tr -s ']' '\]' )\'
local ${role}_pulsar_route\="[{\"id\":${pid},\"routes\":[{\"ip\":\"${pulsar_ips}\",\"port\":6650,\"sslPort\":6651,\"proxy\":\"\"}]}]"
eval local pulsar_routes=\'$( echo \${${role}_pulsar_route} | tr -s '"' '\"' | tr -s '[' '\[' | tr -s ']' '\]' )\'
eval echo "${role}_rabbitmq_route: \${${role}_rabbitmq_route}"
eval echo "${role}_pulsar_route: \${${role}_pulsar_route}"
myvars="${myvars} \
pid\=${pid} \
mysql_enable\=\${mysql_enable} \
mysql_ips\=${mysql_ips} \
fateboard_enable\=\${fateboard_enable} \
fateboard_ips\=${fateboard_ips} \
fate_flow_enable\=\${fate_flow_enable} \
fate_flow_ips\=${fate_flow_ips} \
mq_engine\=${mq_engine} \
storage_engine\=${storage_engine} \
spark_enable\=${spark_enable} \
linkis_spark_enable\=${linkis_spark_enable} \
hadoop_home\=${hadoop_home} \
hive_enable\=${hive_enable} \
hdfs_enable\=${hdfs_enable} \
nginx_enable\=${nginx_enable} \
rabbitmq_enable\=${rabbitmq_enable} \
pulsar_enable\=${pulsar_enable} \
rabbitmq_ips\=${rabbitmq_ips:-127.0.0.1} \
pulsar_ips\=${pulsar_ips:-127.0.0.1} \
linkis_ips\=${linkis_ips:-127.0.0.1} \
hive_ips\=${hive_ips:-127.0.0.1} \
hdfs_addr\=${hdfs_addr} \
nginx_ips\=${nginx_ips:-127.0.0.1} \
rabbitmq_routes\=\${rabbitmq_routes} \
pulsar_routes\=\${pulsar_routes} \
spark_home\=${spark_home} "
eval eval ${myvars} "${workdir}/bin/yq e \' "\
" .${role}.partyid\=env\(pid\) \| "\
" .${role}.mysql.enable\=env\(mysql_enable\) \| "\
" .${role}.mysql.ips\|\=env\(mysql_ips\) \| "\
" .${role}.fateboard.enable\=env\(fateboard_enable\) \| "\
" .${role}.fateboard.ips\|\=env\(fateboard_ips\) \| "\
" .${role}.fate_flow.enable\=env\(fate_flow_enable\) \| "\
" .${role}.fate_flow.ips\|\=env\(fate_flow_ips\) \| "\
" .${role}.fate_flow.federation\|\=env\(mq_engine\) \|" \
" .${role}.fate_flow.storage\|\=env\(storage_engine\) \|" \
" .${role}.spark.enable\|\=env\(spark_enable\) \|" \
" .${role}.linkis_spark.enable\|\=env\(linkis_spark_enable\) \|" \
" .${role}.spark.hadoop_home\|\=strenv\(hadoop_home\) \|" \
" .${role}.hive.enable\|\=env\(hive_enable\) \|" \
" .${role}.hdfs.enable\|\=env\(hdfs_enable\) \|" \
" .${role}.nginx.enable\|\=env\(nginx_enable\) \|" \
" .${role}.rabbitmq.enable\|\=env\(rabbitmq_enable\) \|" \
" .${role}.pulsar.enable\|\=env\(pulsar_enable\) \|" \
" .${role}.rabbitmq.host\|\=env\(rabbitmq_ips\) \|" \
" .${role}.pulsar.host\|\=env\(pulsar_ips\) \|" \
" .${role}.linkis_spark.host\|\=env\(linkis_ips\) \|" \
" .${role}.hive.host\|\=env\(hive_ips\) \|" \
" .${role}.hdfs.name_node\|\=strenv\(hdfs_addr\) \|" \
" .${role}.nginx.host\|\=env\(nginx_ips\) \|" \
Expand All @@ -803,7 +822,7 @@ def_render_roles_core() {
special_routes\=\${special_routes} \
default_routes\=\${default_routes} \
self_routes\=\${self_routes} \
rollsite_id\=\${${role}_pid} \
pid\=\${${role}_pid} \
rollsite_ips\=${rollsite_ips} "
eval eval ${myvars} "${workdir}/bin/yq e \' "\
" .${role}.rollsite.server_secure\|\=env\(server_secure\) \| "\
Expand All @@ -823,7 +842,7 @@ def_render_roles_core() {
" .${role}.rollsite.route_tables\|\=env\(special_routes\) \| "\
" .${role}.rollsite.route_tables \+\=env\(default_routes\) \| "\
" .${role}.rollsite.route_tables \+\=env\(self_routes\) \| "\
" .${role}.rollsite.partyid\|\=env\(rollsite_id\) \| " \
" .${role}.partyid\|\=env\(pid\) \| " \
" .${role}.rollsite.ips\|\=env\(rollsite_ips\) " \
" \' ${workdir}/files/fate_${role} -I 2 -P " > ${base}/${pname:-fate}_${role}
fi
Expand Down
2 changes: 1 addition & 1 deletion deploy/deploy.sh
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ case $pname in
;;

*)
echo "Usage: $0 [fate|fate-serving]"
echo "Error: not have deploy/deploy-${pname}.sh file"
;;

esac
2 changes: 1 addition & 1 deletion deploy/files/fate_guest
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
guest:
partyid: 9999
rollsite:
enable: false
partyid: 9999
coordinator: fate
ips: []
port: 9370
Expand Down
2 changes: 1 addition & 1 deletion deploy/files/fate_host
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
host:
partyid: 10000
rollsite:
enable: false
partyid: 10000
coordinator: fate
ips: []
port: 9370
Expand Down
1 change: 1 addition & 0 deletions deploy/files/fate_init
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ pname: fate
deploy_mode: deploy
deploy_modules: []
deploy_roles: []
default_engines: eggroll

version: 1.7.0
versions: {}
Expand Down
14 changes: 5 additions & 9 deletions deploy/files/fate_spark_guest
Original file line number Diff line number Diff line change
@@ -1,15 +1,18 @@
guest:
partyid: 9999
fate_flow:
enable: false
ips: []
grpcPort: 9360
httpPort: 9380
dbname: "fate_flow"
proxy: rollsite
proxy: fateflow
http_app_key:
http_secret_key:
use_deserialize_safe_module: false
default_engines: spark
federation: rabbitmq
storage: hdfs
fateboard:
enable: false
ips: []
Expand Down Expand Up @@ -37,16 +40,9 @@ guest:
spark:
enable: False
home:
hadoop_home:
cores_per_node: 20
nodes: 2
linkis_spark:
enable: False
cores_per_node: 20
nodes: 2
host: 127.0.0.1
port: 9001
token_code: MLSS
python_path: /data/projects/fate/python
hive:
enable: False
host: 127.0.0.1
Expand Down
14 changes: 5 additions & 9 deletions deploy/files/fate_spark_host
Original file line number Diff line number Diff line change
@@ -1,15 +1,18 @@
host:
partyid: 10000
fate_flow:
enable: false
ips: []
grpcPort: 9360
httpPort: 9380
dbname: "fate_flow"
proxy: rollsite
proxy: fateflow
http_app_key:
http_secret_key:
use_deserialize_safe_module: false
default_engines: spark
federation: rabbitmq
storage: hdfs
fateboard:
enable: false
ips: []
Expand Down Expand Up @@ -37,16 +40,9 @@ host:
spark:
enable: False
home:
hadoop_home:
cores_per_node: 20
nodes: 2
linkis_spark:
enable: False
cores_per_node: 20
nodes: 2
host: 127.0.0.1
port: 9001
token_code: MLSS
python_path: /data/projects/fate/python
hive:
enable: False
host: 127.0.0.1
Expand Down
1 change: 0 additions & 1 deletion deploy/files/project-install.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
- { role: "mysql", when: "( 'host' in deploy_roles and ansible_ssh_host in host['mysql']['ips'] and host['mysql']['enable'] == True and host['mysql']['type'] == 'inside' and deploy_mode in [ 'deploy', 'install', 'config' ] ) or ( 'guest' in deploy_roles and ansible_ssh_host in guest['mysql']['ips'] and guest['mysql']['enable'] == True and guest['mysql']['type'] == 'inside' and deploy_mode in [ 'deploy', 'install', 'config' ] )" }
- { role: "python", when: "( 'host' in deploy_roles and ansible_ssh_host in host['fate_flow']['ips'] and host['fate_flow']['enable'] == True and deploy_mode in [ 'deploy', 'install', 'config' ] ) or ( 'host' in deploy_roles and ansible_ssh_host in host['nodemanager']['ips'] and host['nodemanager']['enable'] == True and deploy_mode in [ 'deploy', 'install', 'config' ] ) or ( 'guest' in deploy_roles and ansible_ssh_host in guest['fate_flow']['ips'] and guest['fate_flow']['enable'] == True and deploy_mode in [ 'deploy', 'install', 'config' ] ) or ( 'guest' in deploy_roles and ansible_ssh_host in guest['nodemanager']['ips'] and guest['nodemanager']['enable'] == True and deploy_mode in [ 'deploy', 'install', 'config' ] )" }
- { role: "eggroll", when: "( ( 'exchange' in deploy_roles and ansible_ssh_host in exchange['rollsite']['ips'] and exchange['rollsite']['enable'] == True and deploy_mode in [ 'deploy', 'install', 'config' ] ) or ( 'host' in deploy_roles and ansible_ssh_host in host['rollsite']['ips'] and host['rollsite']['enable'] == True and deploy_mode in [ 'deploy', 'install', 'config' ] ) or ( 'host' in deploy_roles and ansible_ssh_host in host['clustermanager']['ips'] and host['clustermanager']['enable'] == True and deploy_mode in [ 'deploy', 'install', 'config' ] ) or ( 'host' in deploy_roles and ansible_ssh_host in host['nodemanager']['ips'] and host['nodemanager']['enable'] == True and deploy_mode in [ 'deploy', 'install', 'config' ] ) or ( 'host' in deploy_roles and ansible_ssh_host in host['fate_flow']['ips'] and host['fate_flow']['enable'] == True and deploy_mode in [ 'deploy', 'install', 'config' ] ) or ( 'guest' in deploy_roles and ansible_ssh_host in guest['rollsite']['ips'] and guest['rollsite']['enable'] == True and deploy_mode in [ 'deploy', 'install', 'config' ] ) or ( 'guest' in deploy_roles and ansible_ssh_host in guest['clustermanager']['ips'] and guest['clustermanager']['enable'] == True and deploy_mode in [ 'deploy', 'install', 'config' ] ) or ( 'guest' in deploy_roles and ansible_ssh_host in guest['nodemanager']['ips'] and guest['nodemanager']['enable'] == True and deploy_mode in [ 'deploy', 'install', 'config' ] ) or ( 'guest' in deploy_roles and ansible_ssh_host in guest['fate_flow']['ips'] and guest['fate_flow']['enable'] == True and deploy_mode in [ 'deploy', 'install', 'config' ] ) )" }
- { role: "rabbitmq", when: "( 'host' in deploy_roles and ansible_ssh_host == host['rabbitmq']['host'] and host['rabbitmq']['enable'] == True and deploy_mode in [ 'deploy', 'install', 'config' ] ) or ( 'guest' in deploy_roles and ansible_ssh_host == guest['rabbitmq']['host'] and guest['rabbitmq']['enable'] == True and deploy_mode in [ 'deploy', 'install', 'config' ] )" }
- { role: "fateflow", when: "( 'host' in deploy_roles and ansible_ssh_host in host['fate_flow']['ips'] and host['fate_flow']['enable'] == True and deploy_mode in [ 'deploy', 'install', 'config' ] ) or ( 'host' in deploy_roles and ansible_ssh_host in host['nodemanager']['ips'] and host['nodemanager']['enable'] == True and deploy_mode in [ 'deploy', 'install', 'config' ] ) or ( 'guest' in deploy_roles and ansible_ssh_host in guest['fate_flow']['ips'] and guest['fate_flow']['enable'] == True and deploy_mode in [ 'deploy', 'install', 'config' ] ) or ( 'guest' in deploy_roles and ansible_ssh_host in guest['nodemanager']['ips'] and guest['nodemanager']['enable'] == True and deploy_mode in [ 'deploy', 'install', 'config' ] )" }
- { role: "fateboard", when: "( 'host' in deploy_roles and ansible_ssh_host in host['fateboard']['ips'] and host['fateboard']['enable'] == True and deploy_mode in [ 'deploy', 'install', 'config' ] ) or ( 'guest' in deploy_roles and ansible_ssh_host in guest['fateboard']['ips'] and guest['fateboard']['enable'] == True and deploy_mode in [ 'deploy', 'install', 'config' ] )" }

Expand Down
Loading

0 comments on commit 2043e2f

Please sign in to comment.