# 就是把服务器的包信息下载到本地电脑缓存起来,makecache建立一个缓存,以后用install时就在缓存中搜索,提高了速度。yum makecache# 不用上网检索就能查找软件信息yum -C search git# 清理缓存yum clean all# 添加 Extra Packages for Enterprise Linux 源,安装后就可以在 /etc/yum.repos.d/ 看到 epel 源信息yum install -y epel-release# 接下来以 ansible 这个软件为例yum install ansible # 安装yum reinstall ansible # 重新安装yum upgrade ansible # 升级yum info ansible # 查看软件信息yum remove ansible # 删除yum update # 升级所有包同时也升级软件和系统内核(慎用yum upgrade # 升级所有包,但不升级软件和系统内核yum list ansible # 查看是否安装yum list all # 列出所有软件yum list installed # 列出所有安装的软件yum list available # 列出所有可以安装的软件yum search ansible # 搜索软件信息yum whatprovides rm # yum源中查找包含rm的软件包yum check-update # 查看可更新的软件列表rpm -ql ansible | more # 查看 ansible 的安装位置# 换源## 备份mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup## 下载新的配置文件### CentOS 6wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-6.repo### CentOS 7wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo### CentOS 8wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-8.repo## 生成缓存yum makecache
# 修改要管理的机器vim /etc/ansible/hosts[webservers]192.168.1.100192.168.1.101
# 在 .bashrc 中放入,可以高亮man手册function man(){ env \ LESS_TERMCAP_mb=$(printf "\e[1;31m") \ LESS_TERMCAP_md=$(printf "\e[1;31m") \ LESS_TERMCAP_me=$(printf "\e[0m") \ LESS_TERMCAP_se=$(printf "\e[0m") \ LESS_TERMCAP_so=$(printf "\e[1;44;33m") \ LESS_TERMCAP_ue=$(printf "\e[0m") \ LESS_TERMCAP_us=$(printf "\e[1;32m") \ man "$@"}
# 安装 zsh gityum install -y zsh git# 设置默认shell为 zshchsh -s /bin/zsh# 安装 on-my-zshsh -c "$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"# 复制配置cp ~/.oh-my-zsh/templates/zshrc.zsh-template ~/.zshrc# 手动安装 zsh http://zsh.sourceforge.net/Arc/source.htmlyum -y install gcc perl-ExtUtils-MakeMaker ncurses-devel# 编译安装tar xvf zsh-5.8.tar.xzcd zsh-5.8./configuremake && make install# 将zsh加入/etc/shellsvim /etc/shells # 添加:/usr/local/bin/zsh
sudo yum install -y https://packages.endpointdev.com/rhel/7/os/x86_64/endpoint-repo.x86_64.rpmsudo yum install -y git
# Download source codegit clone https://github.com/neovim/neovim.git# install cmake and dependencysudo yum install -y cmake gcc-c++ libtool unzip# compile with cmakemake CMAKE_BUILD_TYPE=Release# installsudo make install# fix error: Failed to load python3 hostpip3 install --upgrade --force-reinstall neovim
dnf copr enable -y konimex/neofetchdnf install -y neofetch
# Download source codegit clone https://github.com/creaktive/rainbarf.git# install dependencyyum install -y perl-Module-Build perl-Test-Simple# installperl Build.PL./Build test./Build install
Mange node using nvm
# installationcurl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.37.2/install.sh | bash# set path,put following content into ~/.bashrcexport NVM_DIR="$([ -z "${XDG_CONFIG_HOME-}" ] && printf %s "${HOME}/.nvm" || printf %s "${XDG_CONFIG_HOME}/nvm")"[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm# install latest nodenvm install node
[[docker#安装卸载]]
[[docker#docker-compose]]
docker run -d --restart=unless-stopped \ -p 80:80 -p 443:443 \ --privileged \ --name=lemes-rancher-2.5 \ 10.188.132.44:5000/rancher/rancher:v2.5.12docker restart lemes-rancher-2.5docker stop lemes-rancher-2.5docker start lemes-rancher-2.5docker run -d --restart=unless-stopped \ -p 9080:80 -p 8443:443 \ --privileged \ --name=lemes-rancher-2.5-prod \ 10.188.132.123:5000/rancher/rancher:v2.5.12# 生产docker run -d --restart=unless-stopped \ -p 80:80 -p 443:443 \ --privileged \ --name=lemes-rancher-2.5-prod \ 10.188.132.44:5000/rancher/rancher:v2.5.12docker run -d --restart=unless-stopped \ -p 9080:80 -p 8443:443 \ --privileged \ --name=lemes-rancher-2.5-prod \ 10.188.132.44:5000/rancher/rancher:v2.5.12docker run -d --restart=unless-stopped \ -p 9080:80 -p 9443:443 \ --privileged \ --name=lemes-rancher-2.5-prod \ rancher/rancher:v2.5.12docker run -d --restart=unless-stopped \ -p 8080:80 -p 8083:443 \ --privileged \ --name=lemes-rancher-2.5-prod \ 10.176.2.207:5000/rancher/rancher:v2.5.12
前提: 需要先安装 docker & docker-compose
复制最新的包的链接: https://github.com/goharbor/harbor/releases
wget https://github.com/goharbor/harbor/releases/download/v2.3.1/harbor-offline-installer-v2.3.1.tgztar -zxf harbor-offline-installer-v2.3.1.tgz -C /data/docker/harborsudo chown -R lemes:lemes /data/docker/harborcd /data/docker/harbor/harborcp harbor.yml.tmpl harbor.ymlvi harbor.ymlsudo su rootexport PATH=$PATH:/usr/local/bin./install.sh
# Configuration file of Harbor# The IP address or hostname to access admin UI and registry service.# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.hostname: 10.176.2.207# http related confighttp: # port for http, default is 80. If https enabled, this port will redirect to https port port: 5000# https related config#https:# # https port for harbor, default is 443# port: 443# # The path of cert and key files for nginx# certificate: /your/certificate/path# private_key: /your/private/key/path# # Uncomment following will enable tls communication between all harbor components# internal_tls:# # set enabled to true means internal tls is enabled# enabled: true# # put your cert and key files on dir# dir: /etc/harbor/tls/internal# Uncomment external_url if you want to enable external proxy# And when it enabled the hostname will no longer used# external_url: https://reg.mydomain.com:8433# The initial password of Harbor admin# It only works in first time to install harbor# Remember Change the admin password from UI after launching Harbor.harbor_admin_password: Lenovo2021# Harbor DB configurationdatabase: # The password for the root user of Harbor DB. Change this before any production use. password: root123 # The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained. max_idle_conns: 100 # The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections. # Note: the default number of connections is 1024 for postgres of harbor. max_open_conns: 900# The default data volumedata_volume: /data/docker/harbor# Harbor Storage settings by default is using /data dir on local filesystem# Uncomment storage_service setting If you want to using external storage# storage_service:# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.# ca_bundle:# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss# # for more info about this configuration please refer https://docs.docker.com/registry/configuration/# filesystem:# maxthreads: 100# # set disable to true when you want to disable registry redirect# redirect:# disabled: false# Trivy configuration## Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.# It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached# in the local file system. In addition, the database contains the update timestamp so Trivy can detect whether it# should download a newer version from the Internet or use the cached one. Currently, the database is updated every# 12 hours and published as a new release to GitHub.trivy: # ignoreUnfixed The flag to display only fixed vulnerabilities ignore_unfixed: false # skipUpdate The flag to enable or disable Trivy DB downloads from GitHub # # You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues. # If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and # `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path. skip_update: false # # The offline_scan option prevents Trivy from sending API requests to identify dependencies. # Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it. # For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't # exist in the local repositories. It means a number of detected vulnerabilities might be fewer in offline mode. # It would work if all the dependencies are in local. # This option doesn’t affect DB download. You need to specify "skip-update" as well as "offline-scan" in an air-gapped environment. offline_scan: false # # insecure The flag to skip verifying registry certificate insecure: false # github_token The GitHub access token to download Trivy DB # # Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough # for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000 # requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult # https://developer.github.com/v3/#rate-limiting # # You can create a GitHub token by following the instructions in # https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line # # github_token: xxxjobservice: # Maximum number of job workers in job service max_job_workers: 10notification: # Maximum retry count for webhook job webhook_job_max_retry: 10chart: # Change the value of absolute_url to enabled can enable absolute url in chart absolute_url: disabled# Log configurationslog: # options are debug, info, warning, error, fatal level: info # configs for logs in local storage local: # Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated. rotate_count: 50 # Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes. # If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G # are all valid. rotate_size: 200M # The directory on your host that store log location: /data/docker/harbor/log # Uncomment following lines to enable external syslog endpoint. # external_endpoint: # # protocol used to transmit log to external endpoint, options is tcp or udp # protocol: tcp # # The host of external endpoint # host: localhost # # Port of external endpoint # port: 5140#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!_version: 2.6.0# Uncomment external_database if using external database.# external_database:# harbor:# host: harbor_db_host# port: harbor_db_port# db_name: harbor_db_name# username: harbor_db_username# password: harbor_db_password# ssl_mode: disable# max_idle_conns: 2# max_open_conns: 0# notary_signer:# host: notary_signer_db_host# port: notary_signer_db_port# db_name: notary_signer_db_name# username: notary_signer_db_username# password: notary_signer_db_password# ssl_mode: disable# notary_server:# host: notary_server_db_host# port: notary_server_db_port# db_name: notary_server_db_name# username: notary_server_db_username# password: notary_server_db_password# ssl_mode: disable# Uncomment external_redis if using external Redis server# external_redis:# # support redis, redis+sentinel# # host for redis: <host_redis>:<port_redis># # host for redis+sentinel:# # <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3># host: redis:6379# password: # # sentinel_master_set must be set to support redis+sentinel# #sentinel_master_set:# # db_index 0 is for core, it's unchangeable# registry_db_index: 1# jobservice_db_index: 2# chartmuseum_db_index: 3# trivy_db_index: 5# idle_timeout_seconds: 30# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.# uaa:# ca_file: /path/to/ca# Global proxy# Config http proxy for components, e.g. http://my.proxy.com:3128# Components doesn't need to connect to each others via http proxy.# Remove component from `components` array if want disable proxy# for it. If you want use proxy for replication, MUST enable proxy# for core and jobservice, and set `http_proxy` and `https_proxy`.# Add domain to the `no_proxy` field, when you want disable proxy# for some special registry.proxy: http_proxy: https_proxy: no_proxy: components: - core - jobservice - trivy# metric:# enabled: false# port: 9090# path: /metrics# Trace related config# only can enable one trace provider(jaeger or otel) at the same time,# and when using jaeger as provider, can only enable it with agent mode or collector mode.# if using jaeger collector mode, uncomment endpoint and uncomment username, password if needed# if using jaeger agetn mode uncomment agent_host and agent_port# trace:# enabled: true# # set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth# sample_rate: 1# # # namespace used to differenciate different harbor services# # namespace:# # # attributes is a key value dict contains user defined attributes used to initialize trace provider# # attributes:# # application: harbor# # # jaeger should be 1.26 or newer.# # jaeger:# # endpoint: http://hostname:14268/api/traces# # username:# # password:# # agent_host: hostname# # # export trace data by jaeger.thrift in compact mode# # agent_port: 6831# # otel:# # endpoint: hostname:4318# # url_path: /v1/traces# # compression: false# # insecure: true# # timeout: 10s# enable purge _upload directoriesupload_purging: enabled: true # remove files in _upload directories which exist for a period of time, default is one week. age: 168h # the interval of the purge operations interval: 24h dryrun: false# cache layer configurations# If this feature enabled, harbor will cache the resource# `project/project_metadata/repository/artifact/manifest` in the redis# which can especially help to improve the performance of high concurrent# manifest pulling.# NOTICE# If you are deploying Harbor in HA mode, make sure that all the harbor# instances have the same behaviour, all with caching enabled or disabled,# otherwise it can lead to potential data inconsistency.cache: # not enabled by default enabled: false # keep cache for one day by default expire_hours: 24
# 阻止 vim 样式穿透
# create dir of nexussudo mkdir /data/nexus-data && sudo chown -R 200 /data/nexus-datadocker run -d -p 8081:8081 --name nexus -v /data/nexus-data:/nexus-data 10.188.132.123:5000/library/sonatype/nexus3:3.63.0
修改启动用户,默认 anonymous 在 jenkins 脚本中没有权限创建文件
sudo vi /etc/sysconfig/jenkins# 找到如下内容,修改后面的用户为有权限的用户JENKINS_USER="lemes"# 重启 jenkinsservice jenkins restart
docker run -d --name jenkins -p 9080:8080 10.188.132.44:5000/library/jenkins/jenkins:2.426.2-lts-jdk17sudo mkdir -p /data/jenkins_homesudo chown -R 1000:1000 /data/jenkins_homedocker run -d --name jenkins -p 9080:8080 \ -v /var/run/docker.sock:/var/run/docker.sock \ -v /data/jenkins_home:/var/jenkins_home \ 10.188.132.44:5000/library/jenkins/jenkins:2.426.3-lts-jdk17-dinddocker run -d --name jenkins -p 8080:8080 \ -v /var/run/docker.sock:/var/run/docker.sock \ -v /data/jenkins_home:/var/jenkins_home \ 10.188.132.44:5000/library/jenkins/jenkins:2.426.3-lts-jdk17-dinddocker run -d --name jenkins -p 9080:8080 \ -v /var/run/docker.sock:/var/run/docker.sock \ 10.188.132.44:5000/library/jenkins/jenkins:2.426.3-lts-jdk17-dinddocker run -d --name jenkins -p 9080:8080 \ -v /var/run/docker.sock:/var/run/docker.sock \ 10.188.132.44:5000/library/jenkins/jenkins:2.426.3-lts-jdk17-dinddocker run -d --name jenkins -p 8080:8080 \ -v /var/run/docker.sock:/var/run/docker.sock \ 10.188.132.44:5000/library/jenkins/jenkins:2.426.3-lts-jdk17-dind-plugindocker run -d --name jenkins -p 8080:8080 \ -v /var/run/docker.sock:/var/run/docker.sock \ -v /data/jenkins_home:/var/jenkins_home \ jenkins:2.426.3-lts-jdk17-dinddocker run \ --rm \ -u root \ -p 8080:8080 \ -v jenkins-data:/var/jenkins_home \ -v /var/run/docker.sock:/var/run/docker.sock \ -v "$HOME":/home \ jenkinsci/blueocean
sudo yum install -y epel-releasesudo yum -y install nginx # 安装 nginxsudo yum remove nginx # 卸载 nginx
Download latest installation package from https://www.python.org/downloads/source/
# 安装依赖&编译工具yum -y groupinstall "Development tools"yum -y install zlib-devel bzip2-devel openssl-devel ncurses-devel sqlite-devel readline-devel tk-devel gdbm-devel db4-devel libpcap-devel xz-develyum install -y libffi-devel zlib1g-devyum install zlib* -y# 下载安装包wget https://www.python.org/ftp/python/3.9.1/Python-3.9.1.tgz# 解压tar -xvf Python-3.9.1.tgz# 创建编译目录mkdir /usr/local/python3# 编译./configure --prefix=/usr/local/python3 --enable-optimizations --with-ssl make && make install# 创建软连接ln -s /usr/local/python3/bin/python3 /usr/local/bin/python3ln -s /usr/local/python3/bin/pip3 /usr/local/bin/pip3# 验证python3 -Vpip3 -V
sudo yum install centos-release-scl devtoolset-8-gcc* -y# 激活生效(临时)scl enable devtoolset-8 bashgcc -v
# 创建 jdk 存放目录mkdir -p /data/software/jdkcd /data/software/jdk# 下载 jdk 包wget https://corretto.aws/downloads/latest/amazon-corretto-8-x64-linux-jdk.tar.gz# 解压tar -zxvf amazon-corretto-8-x64-linux-jdk.tar.gz# 设置 JAVA_HOME 和 PATHvi /etc/profileexport JAVA_HOME=/data/software/jdk/amazon-corretto-8.322.06.2-linux-x64export PATH=${JAVA_HOME}/bin:${PATH}# 生效source /etc/profile
sudo rpm --import https://yum.corretto.aws/corretto.key sudo curl -L -o /etc/yum.repos.d/corretto.repo https://yum.corretto.aws/corretto.reposudo yum install -y java-11-amazon-corretto-devel
# 下载 maven 包 https://dlcdn.apache.org/mkdir -p /data/software/mavencd /data/software/mavenwget https://dlcdn.apache.org/maven/maven-3/3.9.1/binaries/apache-maven-3.9.1-bin.tar.gz --no-check-certificate# 解压tar -zxvf apache-maven-3.9.1-bin.tar.gz# 设置环境变量sudo vi /etc/profileMAVEN_HOME=/data/software/maven/apache-maven-3.9.1export PATH=${MAVEN_HOME}/bin:${PATH}# 生效source /etc/profile
# https://redis.io/download/cd /usr/local/curl -LO https://codeload.github.com/redis/redis/tar.gz/refs/tags/7.0.5tar -zxvf redis-7.0.5.tar.gzcd redis-7.0.5/
yum install ntp ntpdatesystemctl start ntpdsystemctl enable ntpd
# 客户端## 生成公私钥对ssh-keygen -t rsa -C "yelog@mail.com"## 复制下面下面打印出来的公钥cat ~/.ssh/id_rsa.pub# 将公钥上传到服务器ssh-copy-id -i ~/.ssh/id_rsa.pub root@xx.xx.xx.xx# 手动将密钥上传到服务器## 创建 authorized_keys(存在则忽略)touch ~/.ssh/authorized_keys## 设置权限chmod 700 -R ~/.ssh## 追加到文件内echo "公钥" >> ~/.ssh/authorized_keys
# 获取当前系统设置的文件数ulimit -n# 软件限制ulimit -Sn# 硬件限制ulimit -Hn# 临时生效ulimit -SHn 10000# 永久生效sudo vim /etc/security/limits.conf* soft nofile 9000000* hard nofile 9000000# 查看当前进程打开了多少句柄数lsof -n|awk '{print $2}'|sort|uniq -c|sort -nr|moresudo vi /etc/sysctl.conf# 添加fs.file-max = 9000000fs.inotify.max_user_instances = 1000000fs.inotify.max_user_watches = 1000000# 生效sudo sysctl -p
# 启动 firewalldsudo systemctl start firewalld# 查看 firewalld 状态sudo systemctl status firewalld# 关闭 firewalldsudo systemctl stop firewalld# 重新加载配置sudo firewall-cmd --reload# 允许端口(tcp)范围进行访问sudo firewall-cmd --zone=public --add-rich-rule='rule family="ipv4" source address="0.0.0.0/0" port port="1-9329" protocol="tcp" accept' --permanent# 允许端口(udp)范围进行访问sudo firewall-cmd --zone=public --add-rich-rule='rule family="ipv4" source address="0.0.0.0/0" port port="1-9329" protocol="udp" accept' --permanent# 添加访问端口 永久生效sudo firewall-cmd --zone=public --add-port=9332/tcp --permanent
# 挂载磁盘 /dev/sda3 到/data目录, 重启失效# 需要提前 创建 /data 目录mount /dev/sda3 /data
永久生效 vi /etc/fstab
, 添加如下内容
/dev/sda3 /data ext4 defaults 0 0
# 查询物理个数grep 'physical id' /proc/cpuinfo | sort -u | wc -l# 查看 CPU 物理核心数量grep 'core id' /proc/cpuinfo | sort -u | wc -l# 查看 CPU 逻辑核心数量(一般说几C几G, 说的是逻辑核心)grep 'processor' /proc/cpuinfo | sort -u | wc -l
# 监控 eth1 网卡的上下行网络watch -d ifstat eth1
# 查找并删除当前文件夹下(包括子目录) 的所有以 .bak 结尾的文件find . -name *.bak -type f -exec rm -rf {} \;# 查找并删除当前文件夹(包括子目录) 的所有 .settings 目录,并执行删除命令find . -name '.settings' -type d -exec rm -rf {} \;
add the following lines to my .bashrc
stty werase undefbind '\C-w:unix-filename-rubout'
set -o vi