sh#目录文件内容
root@harbor:/data/k8s-data/dockerfile/web/tomcat-app1# ll
drwxr-xr-x 2 root root 4096 Jul 7 10:01 ./
drwxr-xr-x 10 root root 4096 Jul 3 15:53 ../
-rw-r--r-- 1 root root 424 Jul 7 09:47 Dockerfile
-rw-r--r-- 1 root root 160 Jul 6 16:41 app1.tar.gz
-rw-r--r-- 1 root root 153 Jul 3 14:27 build-command.sh
-rw-r--r-- 1 root root 23611 Jun 22 2021 catalina.sh
-rw-r--r-- 1 root root 35153210 Jul 7 09:29 filebeat-7.17.9-x86_64.rpm
-rw-r--r-- 1 root root 398 Jul 7 09:46 filebeat.yml
-rwxr-xr-x 1 root root 307 Jul 7 10:00 run_tomcat.sh*
#filebeat.yml内容,我这设置的是转发到redis服务器了,做一个加速
cat filebeat.yml
filebeat.inputs:
- type: log
enabled: true
paths:
- /apps/tomcat/logs/catalina.out
fields:
type: tomcat-catalina
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 1
setup.kibana:
output.redis:
hosts: ["10.0.0.130:6379"] #地址
key: "k8s-zhang-tomcat-app1" #key的名称
db: 1 #数据库序号
timeout: 5 #超时时间
password: "123456" #密码
#Dockerfile内容
cat Dockerfile
#tomcat web1
#基于tomcat-base:v8.5.43镜像
FROM harbor.zhang.org/pub-images/tomcat-base:v8.5.43
#将rpm文件复制到镜像的/tmp下
ADD filebeat-7.17.9-x86_64.rpm /tmp
#安装rpm包并删除rpm源文件,减小镜像大小
RUN yum install -y /tmp/filebeat-7.17.9-x86_64.rpm && rm -rf /tmp/filebeat-7.17.9-x86_64.rpm
#复制配置文件到/etc目录,
ADD filebeat.yml /etc/filebeat/filebeat.yml
#运行写好的脚步文件
ADD run_tomcat.sh /apps/tomcat/bin/run_tomcat.sh
#添加一个页面文件
ADD app1.tar.gz /apps/tomcat/webapps/app1/
#修改所有者和所属组
RUN chown tomcat.tomcat /apps/ -R
#声明开通端口
EXPOSE 8080 8009
#运行脚本
CMD ["/apps/tomcat/bin/run_tomcat.sh"]
#run.tomcat.sh脚本内容
cat run_tomcat.sh
#!/bin/bash
#因为容器里没有systemctl命令,所以必须使用以下这种方式来启动filebeat,可以先在宿主机安装一次,然后使用ps -ef | grep filebeat 查看启动命令
/usr/share/filebeat/bin/filebeat --environment systemd -c /etc/filebeat/filebeat.yml --path.home /usr/share/filebeat --path.config /etc/filebeat --path.data /var/lib/filebeat --path.logs /var/logfilebeat &
#启动tomcat
su - tomcat -c "/apps/tomcat/bin/catalina.sh start"
#运行一个守护进程,不让容器退出
su - tomcat -c "tail -f /etc/hosts"
#build脚本内容
cat build-command.sh
#!/bin/bash
TAG=$1
docker build -t harbor.zhang.org/pub-images/tomcat-app1:${TAG} .
sleep 3
docker push harbor.zhang.org/pub-images/tomcat-app1:${TAG}
#打镜像
bash build-command.sh 20230707
Sending build context to Docker daemon 35.2MB
Step 1/9 : FROM harbor.zhang.org/pub-images/tomcat-base:v8.5.43
---> 6f1aaf76c4d6
Step 2/9 : ADD filebeat-7.17.9-x86_64.rpm /tmp
---> Using cache
---> 76096edf5aa6
Step 3/9 : ADD filebeat.yml /etc/filebeat/filebeat.yml
---> Using cache
---> 7441a51fc600
Step 4/9 : RUN yum install -y /tmp/filebeat-7.17.9-x86_64.rpm && rm -rf /tmp/filebeat-7.17.9-x86_64.rpm
---> Using cache
---> b698f4b43553
Step 5/9 : ADD run_tomcat.sh /apps/tomcat/bin/run_tomcat.sh
---> Using cache
---> ee795e8cb1ee
Step 6/9 : ADD app1.tar.gz /apps/tomcat/webapps/app1/
---> Using cache
---> 935d6bd74a05
Step 7/9 : RUN chown tomcat.tomcat /apps/ -R
---> Using cache
---> b784b3d397fa
Step 8/9 : EXPOSE 8080 8009
---> Using cache
---> cea4b897c86d
Step 9/9 : CMD ["/apps/tomcat/bin/run_tomcat.sh"]
---> Using cache
---> 837b4727b500
Successfully built 837b4727b500
Successfully tagged harbor.zhang.org/pub-images/tomcat-app1:20230707
The push refers to repository [harbor.zhang.org/pub-images/tomcat-app1]
6d4fd3bfe3e0: Layer already exists
c17bcf0fdae7: Layer already exists
884f4fb9ee91: Layer already exists
137e5fd32805: Layer already exists
172e7d2569ca: Layer already exists
86499d97a13b: Layer already exists
96d001b364a5: Layer already exists
239a5284fc41: Layer already exists
35ceed564f97: Layer already exists
f98c04a9cea2: Layer already exists
da8ea075ae87: Layer already exists
11a9a4c8474b: Layer already exists
fb82b029bea0: Layer already exists
20230707: digest: sha256:fb8a85b2caaff466ca636adda2f66ba88bdf8aff28193cea2c1b15ebcc3b8be2 size: 3042
#这里镜像已经打过一次了,所以都是Layer already exists
sh#创建namaspace
cat zhang.yaml
apiVersion: v1
kind: Namespace
metadata:
name: zhang
#创建pod,这里nodeport没有指定,没有指定就会随机分配一个。生产环境中tomcat一般都设置为clusterIP,不对外暴露tomcat
cat tomcat.yaml
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
labels:
app: zhang-tomcat-app1-deployment-label
name: zhang-tomcat-app1-deployment
namespace: zhang
spec:
replicas: 1
selector:
matchLabels:
app: zhang-tomcat-app1-selector
template:
metadata:
labels:
app: zhang-tomcat-app1-selector
spec:
containers:
- name: zhang-tomcat-app1-container
image: harbor.zhang.org/pub-images/tomcat-app1:20230707
#command: ["/apps/tomcat/bin/run_tomcat.sh"]
#imagePullPolicy: IfNotPresent
imagePullPolicy: Always
ports:
- containerPort: 8080
protocol: TCP
name: http
env:
- name: "password"
value: "123456"
- name: "age"
value: "18"
resources:
limits:
cpu: 1
memory: "512Mi"
requests:
cpu: 500m
memory: "512Mi"
volumeMounts:
- name: zhang-images
mountPath: /data/tomcat/images
readOnly: false
- name: zhang-static
mountPath: /data/tomcat/static
readOnly: false
volumes:
- name: zhang-images
nfs:
server: 10.0.0.206
path: /images/k8sdata/zhang/images
- name: zhang-static
nfs:
server: 10.0.0.206
path: /images/k8sdata/zhang/static
# nodeSelector:
# project: zhang
# app: tomcat
---
kind: Service
apiVersion: v1
metadata:
labels:
app: zhang-tomcat-app1-service-label
name: zhang-tomcat-app1-service
namespace: zhang
spec:
type: NodePort
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
#nodePort: 40003
selector:
app: zhang-tomcat-app1-selector
#应用文件
kubectl apply -f tomcat-app1.yaml
#查看pod是否启动,这里因为我对pod设置了hpa自动扩容和缩容,所以pod是两个
root@k8s-master1:/data/k8s-data/tomcat# kubectl get pod -n zhang
NAME READY STATUS RESTARTS AGE
zhang-nginx-deployment-d65497795-wg9dt 1/1 Running 12 (162m ago) 2d20h
zhang-tomcat-app1-deployment-5dd9895c99-9dh4b 1/1 Running 0 161m
zhang-tomcat-app1-deployment-5dd9895c99-m5v4h 1/1 Running 0 161m
#hpa内容
cat hpa-app1.yaml
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: zhang-tomcat-app1-podautoscaler
namespace: zhang
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: zhang-tomcat-app1-deployment #tomcat-app1的控制器名称
minReplicas: 2 #最小pod数
maxReplicas: 10 #最大pod数
metrics:
- type: Resource
resource: #注意这里,tomca-app1必须有资源限制,不然这里无法使用resource
name: cpu #监控pod的cpu数值
target:
type: Utilization
averageUtilization: 50 #利用率达到50%开始扩容
sh#进入pod查看,也可使用dashboard查看,这里看到filebeat和tomcat已经正常启动
kubectl exec -it -n zhang zhang-tomcat-app1-deployment-5dd9895c99-9dh4b bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
[root@zhang-tomcat-app1-deployment-5dd9895c99-9dh4b /]# ps -ef | grep filebeat
root 7 1 0 02:18 ? 00:00:03 /usr/share/filebeat/bin/filebeat --environment systemd -c /etc/filebeatfilebeat.yml --path.home /usr/share/filebeat --path.config /etc/filebeat --path.data /var/lib/filebeat --path.logs /var/logfilebeat
root 106 91 0 05:05 pts/0 00:00:00 grep --color=auto filebeat
[root@zhang-tomcat-app1-deployment-5dd9895c99-9dh4b /]# ps -ef | grep tomcat
root 1 0 0 02:18 ? 00:00:00 /bin/bash /apps/tomcat/bin/run_tomcat.sh
tomcat 34 1 0 02:18 ? 00:00:18 /usr/local/jdk/bin/java -Djava.util.logging.config.file=/apps/tomcat/conf/logging.properties -Djava.util.logging.manager=org.apache.juli.ClassLoaderLogManager -Djdk.tls.ephemeralDHKeySize=2048 -Djava.protocol.handler.pkgs=org.apache.catalina.webresources -Dorg.apache.catalina.security.SecurityListener.UMASK=0027 -Dignore.endorsed.dirs= -classpath /apps/tomcat/bin/bootstrap.jar:/apps/tomcat/bin/tomcat-juli.jar -Dcatalina.base=/apps/tomcat -Dcatalina.home=/apps/tomcat -Djava.io.tmpdir=/apps/tomcat/temp org.apache.catalina.startup.Bootstrap start
root 35 1 0 02:18 ? 00:00:00 su - tomcat -c tail -f /etc/hosts
tomcat 36 35 0 02:18 ? 00:00:00 tail -f /etc/hosts
root 108 91 0 05:06 pts/0 00:00:00 grep --color=auto tomcat
注意:logstash是java开发的,所以必须在服务器上安装jdk才能运行
sh#安装jdk
yum -y install java-1.8.0-openjdk
#安装logstash
yum -y install logstash-7.17.9-x86_64.rpm
#创建配置文件
cat /etc/logstash/conf.d/log-to-es.conf
input {
redis {
data_type => "list" #类型
key => "k8s-zhang-tomcat-app1" #key的名称,和filebeat中对应
host => "10.0.0.130" #地址
port => "6379" #端口
db => "1" #数据库序号
password => "123456" #密码
}
}
output {
if [fields][type] == "tomcat-catalina" { #这里的type和filebeat中的type一致
elasticsearch {
hosts => ["10.0.0.128:9200"] #转发地址
index => "k8s-zhang-filebeat-tomcat-accesslog-%{+YYYY.MM.dd}" #索引名
}}
}
#启动logstash,查看日志是否有错误
[root@rocky4 conf.d]#systemctl restart logstash.service
[root@rocky4 conf.d]#tail -f /var/log/logstash/logstash-plain.log
[2023-07-07T21:25:25,665][INFO ][logstash.outputs.elasticsearch][main] Elasticsearch version determined (7.17.9) {:es_version=>7}
[2023-07-07T21:25:25,667][WARN ][logstash.outputs.elasticsearch][main] Detected a 6.x and above cluster: the `type` event field won't be used to determine the document _type {:es_version=>7}
[2023-07-07T21:25:25,738][INFO ][logstash.outputs.elasticsearch][main] Config is not compliant with data streams. `data_stream => auto` resolved to `false`
[2023-07-07T21:25:25,753][INFO ][logstash.outputs.elasticsearch][main] Config is not compliant with data streams. `data_stream => auto` resolved to `false`
[2023-07-07T21:25:25,852][INFO ][logstash.outputs.elasticsearch][main] Using a default mapping template {:es_version=>7, :ecs_compatibility=>:disabled}
[2023-07-07T21:25:25,883][INFO ][logstash.javapipeline ][main] Starting pipeline {:pipeline_id=>"main", "pipeline.workers"=>2, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>50, "pipeline.max_inflight"=>250, "pipeline.sources"=>["/etc/logstash/conf.d/log-to-es.conf"], :thread=>"#<Thread:0x176ea837 run>"}
[2023-07-07T21:25:26,569][INFO ][logstash.javapipeline ][main] Pipeline Java execution initialization time {"seconds"=>0.68}
[2023-07-07T21:25:26,591][INFO ][logstash.inputs.redis ][main] Registering Redis {:identity=>"redis://<password>@10.0.0.130:6379/1 list:k8s-zhang-tomcat-app1"}
[2023-07-07T21:25:26,614][INFO ][logstash.javapipeline ][main] Pipeline started {"pipeline.id"=>"main"}
[2023-07-07T21:25:26,791][INFO ][logstash.agent ] Pipelines running {:count=>1, :running_pipelines=>[:main], :non_running_pipelines=>[]}
sh#查看elasticsearch配置文件
[root@elk ~]$grep '^[a-Z]' /etc/elasticsearch/elasticsearch.yml
node.name: node-1
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 0.0.0.0
discovery.seed_hosts: ["10.0.0.142"]
cluster.initial_master_nodes: ["node-1"]
#查看kibana的配置文件
[root@elk ~]$grep '^[a-Z]' /etc/kibana/kibana.yml
server.host: "0.0.0.0"
elasticsearch.hosts: ["http://localhost:9200"]
i18n.locale: "zh-CN"
#启动服务,查看10.0.0.142:9200是不是有k8s-zhang-tomcat索引的数据了
本文作者:笑一个吧~
本文链接:
版权声明:本博客所有文章除特别声明外,均采用 本文为博主「笑一个吧~」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。 许可协议。转载请注明出处!