comm shell command

时间:2023-03-10 07:57:23
comm shell command

常用Shell命令

BASEDIR=$(cd $(dirname $) && pwd)
cd $BASEDIR>/dev/null

选取两个文件中相同的部分:(Must sort first)

comm - <(sort /tmp/gcs.calix.com-wifi-pm-per-radio_50|uniq) <(sort /tmp/gcs.calix.com-wifi-pm-per-radio_dupcheck_50|uniq)

CURL:

curl -H "Accept: application/json" -H "Content-type: application/json" -X POST -d '{"id":100}' http://localhost/test/adduser

变量处理:

#根据变量值获取变量
pass=`eval echo '$'psql_"${env}_pass"`
cmd=`eval echo '$'psql_"$env"`

数值处理:

#判断是否为数字
if [ -n "$org" ] && [ "$org" -eq "$org" ] >/dev/null; then
echo "$org need process"
cat result.txt|grep $org>_result_${org}
fi #数字累加
step=`expr "$step" + 1`

输入参数:

usage="Usage: $0 -o/--org orgId[Must] -p/--prepare t[Option] -f/--force t[Option] -d/--dry t[Option] -k/--kafka t[Option]"
while [ "$1" != "" ]
do
case $ in
-o|--org) shift
orgId=$
;;
-f|--force) shift
if [ "$1" = "-p" ];then
prepare=
fi
force=
;;
-p|--prepare) shift
if [ "$1" = "-f" ];then
force=
fi
prepare=
;;
-d|--dry) shift
dry=
;;
-k|--kafka) shift
kafka_flg=$
;;
*) echo $usage
exit
;;
esac
shift
done
if [ -z $orgId ];then
echo -e "$RED[Error] Missing orgId!$NC\r\n$usage"
exit
fi

确认函数:

check_and_commit()
{
cmd=""
step=`expr "$step" + `
echo ""|tee -a $logs
echo -e "$BLUE[`date +'%Y-%m-%d %H:%M:%S'`][Step $step] exec $1 $NC"|tee -a $log
if [ $force -eq ];then
while true; do
read -p "Do you confirm to execute step $1? [y/n]" yn
case $yn in
[Yy]* )
$;
if [ $dry -eq ];then
echo -e "$GREEN [Dry Run]$NC $cmd"|tee -a $log
else
echo $cmd|tee -a $log
$cmd|tee $logsdir/$.log
fi
break;;
[Nn]* ) echo "ignore step $1"|tee -a $log ;break;;
esac
done
else
$
if [ $dry -eq ];then
echo -e "$GREEN [Dry Run]$NC $cmd"|tee -a $log
else
echo $cmd|tee -a $log
$cmd|tee -a $logsdir/$.log
fi
fi
}
prepare_message_confirm()
{
echo "Please make sure next items be done"
echo -e "${RED} 1.env.sh use correct environment information ${NC}"
echo -e "${RED} 2.all gcs vm had added the onecloud replay URL and restarted${NC}"
echo -e "${RED} 3.make sure this vm can connect to brown field mongo/redshift/CMC gateway ${NC}"
echo -e "${RED} 4.had startup cloud-subscriber with correct version and expose port 3424 ${NC}"
echo -e "${RED} 5.brown field subscrbier-sync pod had patched ${NC}"
if [ $force -eq ];then
while true; do
read -p "Do you confirm ? [y/n]" yn
case $yn in
[Yy]* ) echo "will continue to execute for org :$orgId";break;;
[Nn]* ) exit - ;break;;
esac
done
fi }

时间函数:

#根据时区显示时间:
TZ=:Asia/Hong_Kong date +'%Y-%m-%d %H:%M:%S'
#根据long型时间获取字符值
date_str=$(date -d @$time_long +'%Y-%m-%d')

#计算时间差值:

start_date=$1
if [ -z $start_date ];then
start_date=`date --date="-7 day" +%Y-%m-%d`
fi
#end_date=$(date -d "$start_date 1 day" +%Y-%m-%d)
#end_date=`date --date="-1 day" +%Y-%m-%d`
end_date=`date +%Y-%m-%d`
start_time=$(date -d "$start_date" +%s)
end_time=$(date -d $end_date +%s)

字符处理:

#去除文件后缀
fnocsv=${f%%.csv}

cdevice=`echo $fnocsv|awk -F '_' '{print $1}'`

time_long=`echo $fnocsv|awk -F '_' '{print $2}'`

date_str=$(date -d @$time_long +'%Y-%m-%d')

#orgs截取掉最后的orgId

left="${orgs#*$orgId}"

统计网络连接状态:

#netstat -n | awk '/^tcp/ {++state[$NF]} END {for(key in state) print key,"\t",state[key]}'

  ESTABLISHED 21
  FIN_WAIT1 3
  SYN_SENT 1

#ss -s

  Total: 181 (kernel 0)
  TCP: 19 (estab 13, closed 0, orphaned 1, synrecv 0, timewait 0/0), ports 0

  Transport Total IP IPv6
  * 0 - -
  RAW 0 0 0
  UDP 6 6 0
  TCP 19 17 2
  INET 25 23 2
  FRAG 0 0 0

数组:

IFS=',' read -ra org_array <<< "$orgs"
for orgId in "${org_array[@]}"
do

While 循环

for s in `ls _result_*`
do
while [ `ps -ef|grep pm_missing_data_move.py|grep -v grep|wc -l` -gt ]
do
sleep 1s
done
if [ -f $s ];then
if [ `ps -ef|grep -v grep|grep $s|wc -l` -eq ];then
mv $s run_$s
nohup python pm_missing_data_move.py -e tony.ben@calix.com --password FA200417# --filename run_$s && mv run_$s done_${s} &
fi
fi
done

sed函数

#普通替换
sed -i "s/ENV/$env/g" $sql #读取行
sed -n '1,3p' xxx.txt //读取第一到3行 #删除
sed -i '/xx/d' xxx.txt

query="'{\"orgId\" : \"$orgid\"}'"
echo "$query"
mongoexport -h 199.71.143.62 -d sxa -c sxa-subscribers -f customId,_id -q '{"orgId" : "145079"}' --csv -o customId.csv

sed -i '/customId/d' customId.csv
sed -i "s/^\"/update cloud_subscribers set subscriber_location_id='/" customId.csv
sed -i "s/\",\"/' where subscriber_id ='/" customId.csv
sed -i "s/\"$/' and org_id = '145079';/" customId.csv

source:

customId,_id
"005960","e14f6837-a66b-46a1-84c8-82b1c7e53fa9"
"006280","ce3f714b-c335-46ed-8481-5b5c15eaf5a3"

result:

update cloud_subscribers set subscriber_location_id='005960' where subscriber_id ='e14f6837-a66b-46a1-84c8-82b1c7e53fa9' and org_id = '145079';
update cloud_subscribers set subscriber_location_id='006280' where subscriber_id ='ce3f714b-c335-46ed-8481-5b5c15eaf5a3' and org_id = '145079';

#去除颜色字符

sed -i 's/\x1b\[[0-9;]*m//g' $log_file

postgres command:

#!/bin/bash
export PGPASSWORD='xxx'
pg_dump -h localhost -d cloud -s --exclude-table=cloud_subscriber_devices_0227,calixcalldisposition_backup,cloud_subscriber_devices_0227,cloud_subscribers_0227,david_billing,dv2,sxacc_devices_backup,sxaimsubscriber_next_endpoint_id_bak,sxaimsubscriber_next_endpoint_id_old,tblsizestats,csc_site_scan_results_* -U calixcloud -f schma.sql
#pg_dump -h localhost -d cloud -s -F c \
#--exclude-table cloud_subscriber_devices_0227 calixcalldisposition_backup cloud_subscriber_devices_0227 cloud_subscribers_0227 david_billing dv2 sxacc_devices_backup sxaimsubscriber_next_endpoint_id_bak sxaimsubscriber_next_endpoint_id_old tblsizestats csc_site_scan_results_* \
#-U calixcloud -f schma.sql

打印颜色

RED='\033[0;31m'
BLUE='\033[1;32m'
GREEN='\033[1;34m'
NC='\033[0m' # No Color echo -e "$RED[Error] Missing orgId!$NC\r\n$usage"

mongo

compare_mongo_postgres()
{
source_sxa_mongo=`mongo $source_sxa_mongo --eval "db.isMaster()['primary']"|grep |awk -F ':' '{print $1}'`
export PGPASSWORD=$onecloud_postgres_password
flg=
for c in sxacc-devices sxacc-provisioning-records
do
mnum=`mongo $source_sxa_mongo/sxa --eval "db.getCollection('$c').find({'orgId':'$orgId'}).count()"|awk 'END {print}'`
rm -rf check_collection_num.sql
cp check_collection_num.sql.tmp check_collection_num.sql
sed -i "s/TABLE/$c/g" check_collection_num.sql
sed -i "s/ORGID/$orgId/g" check_collection_num.sql
pnum=`psql -h $onecloud_postgres_host -d $onecloud_postgres_db -U $onecloud_postgres_username -f check_collection_num.sql|sed -n '3p'` echo "$c mongo: $mnum postgres: $pnum"
if [ $mnum -ne $pnum ];then
flg=
echo -e "$RED[Error] c: $c org: $orgId not match, Mongo: $mnum Post: $pnum $NC\r\n"
fi
done
if [ $flg -eq ];then
exit
fi } #选取master节点
target_sxa_mongo=`mongo $target_sxa_mongo --eval "db.isMaster()['primary']"|grep |awk -F ':' '{print $1}'` #export导出
mongoexport --host $source_sxa_mongo --port $source_sxa_mongo_port -d $source_sxa_mongo_db
#import导入
mongoimport --host $target_sxa_mongo --port $target_sxa_mongo_port -d $target_sxa_mongo_db

定制登录欢迎消息

cd /etc/update-motd.d/
cat <<EOF >-custom
#!/bin/sh
echo ""
echo "\033[1;32m----------------------------------------------------\033[0m"
echo "\033[0;31mTony.Ben's ACS Develop Server.\nAny questions please connect with tony.ben@calix.com\033[0m"
echo "\033[1;32m----------------------------------------------------\033[0m"
echo "\033[1;32m+++++++++++++++++++++++++++++++++++++++++++++++++++++\033[0m"
echo "\033[0;31mLast Login Info\033[0m"
last
echo "\033[1;32m+++++++++++++++++++++++++++++++++++++++++++++++++++++\033[0m"
echo ""
echo "\033[1;32m-----------------------------------------------------\033[0m"
ifconfig -a | awk 'BEGIN {FS="\n"; RS=""} {print $1,$2}' | grep -v 'lo' | awk '{print "\t\t"$1,$7}'
echo "\033[1;32m------------------------------------------------------\033[0m"
EOF
chmod 751 100-custom

EOF

EOF是END Of File的缩写,表示自定义终止符.既然自定义,那么EOF就不是固定的,可以随意设置别名,在linux按ctrl-d就代表EOF.

EOF一般会配合cat能够多行文本输出.

通过cat配合重定向能够生成文件并追加操作,在它之前先熟悉几个特殊符号:
< :输入重定向
> :输出重定向
>> :输出重定向,进行追加,不会覆盖之前内容
<< :标准输入来自命令行的一对分隔号的中间内容. 其用法如下:
<<EOF //开始
....
EOF //结束 还可以自定义,比如自定义:
<<BBB //开始
....
BBB //结束 )向文件test.sh里输入内容。
[root@slave-server opt]# cat << EOF >test.sh
>
>
> asdfasdfs
> EOF [root@slave-server opt]# cat test.sh asdfasdfs 追加内容
[root@slave-server opt]# cat << EOF >>test.sh
>
>
> EOF
[root@slave-server opt]# cat test.sh asdfasdfs 覆盖
[root@slave-server opt]# cat << EOF >test.sh
>
> EOF
[root@slave-server opt]# cat test.sh ————————————————
版权声明:本文为****博主「jaryle」的原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.****.net/jaryle/java/article/details/77880500

Rancher/K8S 相关

全量清除Rancher/K8S 节点

docker stop $(docker ps -aq)
docker system prune -f
docker volume rm $(docker volume ls -q)
docker image rm $(docker image ls -q)
rm -rf /etc/ceph \
/etc/cni \
/etc/kubernetes \
/opt/cni \
/opt/rke \
/run/secrets/kubernetes.io \
/run/calico \
/run/flannel \
/var/lib/calico \
/var/lib/etcd \
/var/lib/cni \
/var/lib/kubelet \
/var/lib/rancher/rke/log \
/var/log/containers \
/var/log/pods \
/var/run/calico

获取Rancher dockerid

docker ps | grep -E "rancher/rancher:|rancher/rancher " | awk '{ print $1 }'

启动Rancher

sudo docker run -d --name rancher --restart=unless-stopped -p : -p : rancher/rancher:latest

获取dockerId

docker ps -aq

1.awk command

1.1 Purpose 1: want to distinct and then count and sort by num

1.1.1 Command: cat result.txt|awk '{print $1}'|uniq -c|sort -k 1n

Sort parameters:

 -k: sort by key (in this case column, pairs with -t)

  -n: sort as a number

  -r: reverse order

  (optional) -t: in case you want to change the key separator (default: space)

Uniq parameter:

  -w: choose the first N characters

Explanation:

  In your problem, we need to first sort the first column and then the second one. So there is a -k 1,1 followed by -k 2,2. But, the second key (ONLY) must be sorted as a number and in the reverse order. Thus, it should be -k 2nr,2.

Note that if the -n or -r sort parameters are outside -k parameter, they are applied to the whole input instead of specific keys.

Lastly, me must find the unique lines, but matching only the first 4 chars. Thus, uniq -w 4

2. Ubuntu 安装ping curl

apt-get update
apt-get install iputils-ping -y
apt-get install curl -y