docker高级应用之资源监控
最近忙着开发docker平台,所以挺久没有更新博客了,今天给大家分享一下,我开发docker平台里如何监控资源与进行图像展示的。
默认docker 1.5版本有stats命令查看容器的cpu使用率、内存使用量与网络流量,但此功能有2个必须:
1、必须是docker 1.5版本
2、必须使用默认docker0的网桥(如果你使用ovs这样非原生的网桥无法获取数据的)
我开发的监控里docker是1.5版本,并且通过使用ifconfig来获取容器rx或rx量来获取容器流量,解决了必须使用docker默认网桥才可以获取流量数据。
下面是容器资源监控效果图
1、平台里资源监控界面
2、查看容器yangjing-test的cpu使用率资源监控
3、查看内存使用量资源监控
4、查看容器网络流量信息
下面是监控数据收集脚本信息
使用python写的,由于需要往mysql里写入数据,所以需要安装MySQLdb模块以及服务端mysql开启账号
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
|
[[email protected] code] # cat collect_docker_monitor_data_multi.py
#!/usr/bin/env python #-*- coding: utf-8 -*- #author:Deng Lei #email: [email protected] from docker import Client
import os
import socket, struct, fcntl
import etcd
import MySQLdb
import re
import multiprocessing
import subprocess
import time
def get_local_ip(iface = 'em1' ):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockfd = sock.fileno()
SIOCGIFADDR = 0x8915
ifreq = struct.pack( '16sH14s' , iface, socket.AF_INET, '\x00' *14)
try:
res = fcntl.ioctl(sockfd, SIOCGIFADDR, ifreq)
except:
return None
ip = struct.unpack( '16sH2x4s8x' , res)[2]
return socket.inet_ntoa(ip)
def docker_container_all(): docker_container=docker_client.containers(all=True)
container_name=[]
container_stop_name=[]
for i in docker_container:
container_name.append(i[ 'Names' ])
for b in container_name:
for c in b:
container_stop_name.append(c[1::])
return container_stop_name
def docker_container_run(): docker_container=docker_client.containers(all=True)
container_name=[]
container_stop_name=[]
for i in docker_container:
if re.match( 'Up' ,i[ 'Status' ]):
container_name.append(i[ 'Names' ])
for b in container_name:
for c in b:
container_stop_name.append(c[1::])
return container_stop_name
def check_container_stats(name): container_collect=docker_client.stats(name)
old_result= eval (container_collect.next())
new_result= eval (container_collect.next())
container_collect.close()
cpu_total_usage=new_result[ 'cpu_stats' ][ 'cpu_usage' ][ 'total_usage' ] - old_result[ 'cpu_stats' ][ 'cpu_usage' ][ 'total_usage' ]
cpu_system_uasge=new_result[ 'cpu_stats' ][ 'system_cpu_usage' ] - old_result[ 'cpu_stats' ][ 'system_cpu_usage' ]
cpu_num=len(old_result[ 'cpu_stats' ][ 'cpu_usage' ][ 'percpu_usage' ])
cpu_percent=round((float(cpu_total_usage) /float (cpu_system_uasge))*cpu_num*100.0,2)
mem_usage=new_result[ 'memory_stats' ][ 'usage' ]
mem_limit=new_result[ 'memory_stats' ][ 'limit' ]
mem_percent=round(float(mem_usage) /float (mem_limit)*100.0,2)
#network_rx_packets=new_result['network']['rx_packets']
#network_tx_packets=new_result['network']['tx_packets']
network_check_command= "" "docker exec %s ifconfig eth1|grep bytes|awk -F ':' '{print $2,$3}'|awk -F '(' '{print $1,$2}'|awk -F ')' '{print $1}'|awk '{print " {\\ "rx\\" : "$1" ,\\ "tx\\" : "$2" } "}'" "" %name
network_old_result= eval (((subprocess.Popen(network_check_command,shell=True,stdout=subprocess.PIPE)).stdout.readlines()[0]).strip( '\n' ))
time . sleep (1)
network_new_result= eval (((subprocess.Popen(network_check_command,shell=True,stdout=subprocess.PIPE)).stdout.readlines()[0]).strip( '\n' ))
#unit KB
network_rx_packets=(int(network_new_result[ 'rx' ]) - int(network_old_result[ 'rx' ])) /1024
network_tx_packets=(int(network_new_result[ 'tx' ]) - int(network_old_result[ 'tx' ])) /1024
collect_time=str(new_result[ 'read' ]. split ( '.' )[0]. split ( 'T' )[0])+ ' ' +str(new_result[ 'read' ]. split ( '.' )[0]. split ( 'T' )[1])
msg={ 'Container_name' :name, 'Cpu_percent' :cpu_percent, 'Memory_usage' :mem_usage, 'Memory_limit' :mem_limit, 'Memory_percent' :mem_percent, 'Network_rx_packets' :network_rx_packets, 'Network_tx_packets' :network_tx_packets, 'Collect_time' :collect_time}
#write_mysql(msg)
return msg
def write_mysql(msg): container_name=msg[ 'Container_name' ]
search_sql= "select dc.id from docker_containers dc,docker_physics dp where dc.container_name='%s' and dp.physics_internal_ip='%s';" %(container_name,local_ip)
n=mysql_cur.execute(search_sql)
container_id=[int(i[0]) for i in mysql_cur.fetchall()][0]
insert_sql= "insert into docker_monitor(container_id,cpu_percent,memory_usage,memory_limit,memory_percent,network_rx_packets,network_tx_packets,collect_time) values('%s','%s','%s','%s','%s','%s','%s','%s');" %(container_id,msg[ 'Cpu_percent' ],msg[ 'Memory_usage' ],msg[ 'Memory_limit' ],msg[ 'Memory_percent' ],msg[ 'Network_rx_packets' ],msg[ 'Network_tx_packets' ],msg[ 'Collect_time' ])
n=mysql_cur.execute(insert_sql)
if __name__ == "__main__" :
local_ip=get_local_ip( 'ovs1' )
if local_ip is None:
local_ip=get_local_ip( 'em1' )
etcd_client=etcd.Client(host= '127.0.0.1' , port=4001)
docker_client = Client(base_url= 'unix://var/run/docker.sock' , version= '1.17' )
mysql_conn=MySQLdb.connect(host= '10.10.27.10' ,user= 'ops' , passwd = '[email protected]!#@NVE' ,port=3306,charset= "utf8" )
mysql_cur=mysql_conn.cursor()
mysql_conn.select_db( 'devops' )
#docker_container_all_name=docker_container_all()
docker_container_run_name=docker_container_run()
if len(docker_container_run_name) == 1:
num=1
elif len(docker_container_run_name) >= 4 and len(docker_container_run_name) <=8:
num=4
elif len(docker_container_run_name) >8 and len(docker_container_run_name) <=15:
num=8
elif len(docker_container_run_name) >15 and len(docker_container_run_name) <=30:
num=20
else :
num=40
pool = multiprocessing.Pool(processes=num)
scan_result=[]
#collect container monitor data
for i in docker_container_run_name:
pool.apply_async(check_container_stats, (i,))
scan_result.append(pool.apply_async(check_container_stats, (i,)))
pool.close()
pool. join ()
result=[]
for res in scan_result:
if res.get() is not None:
write_mysql(res.get())
else :
print 'fail is %s' %res.get()
mysql_conn.commit()
mysql_cur.close()
mysql_conn.close()
|
下面是把此脚本放入crontab里每分钟收集一下
1
|
* /1 * * * * python /root/collect_docker_monitor_data_multi .py >> /root/docker_log/docker_monitor .log 2>&1
|
另外说明一下,上面的监控数据图形化使用highstock,使用ajax进行动态加载数据,每次获取容器所有时间监控数据。有问题请留言。
本文转自 reinxu 51CTO博客,原文链接:http://blog.51cto.com/dl528888/1635951,如需转载请自行联系原作者