Hacked By AnonymousFox
# coding=utf-8
#
# Copyright © Cloud Linux GmbH & Cloud Linux Software, Inc 2010-2019 All Rights Reserved
#
# Licensed under CLOUD LINUX LICENSE AGREEMENT
# http://cloudlinux.com/docs/LICENSE.TXT
from __future__ import absolute_import
from __future__ import division
import grp
import json
import logging
import os
import pwd
import time
from clcommon.utils import run_command, ExternalProgramFailed
from lvestats.lib.commons.func import atomic_write_csv, atomic_write_str, deserialize_lve_id
from lvestats.core.plugin import LveStatsPlugin
from lvestats.lib.commons.sizeutil import mempages_to_bytes
__author__ = 'iseletsk'
class FileSaver(LveStatsPlugin):
def __init__(self, fname='/var/lve/info'):
""":type fname: str"""
self.fname = fname
self.log = logging.getLogger('plugin.file_saver')
def write(self, output):
"""
:type output: list
:rtype: None
"""
try:
atomic_write_csv(self.fname, output)
except OSError as e:
self.log.error(f'Error during saving the "{self.fname}" file: {e}')
@staticmethod
def compare_limits(lve_stat, lve_usage, LVE_VERSION):
"""
:type LVE_VERSION: int
:type lve_usage: lvestats.plugins.generic.aggregators.AggregatedLveUsage
:type lve_stat: lvestat.LVEStat
"""
res1 = (
lve_stat.lep != lve_usage.lep or
lve_stat.cpu != lve_usage.lcpu or
lve_stat.lmem != lve_usage.lmem or
lve_stat.lmemphy != lve_usage.lmemphy or
lve_stat.lnproc != lve_usage.lnproc or
lve_stat.io * 1024 != lve_usage.io)
if LVE_VERSION > 6:
res2 = lve_stat.liops != lve_usage.liops
else:
res2 = False
return res1 or res2
def execute(self, lve_data):
""":type lve_data: dict"""
LVE_VERSION = lve_data['LVE_VERSION']
usages = lve_data.get('lve_usage_5s', {})
new = lve_data['stats']
output_rows = []
for lve_id, v in usages.items():
data = []
data.extend([
lve_id, # 0 - id
int(v.mep), # 1 - mep
int(v.lep), # 2 - lep
int(v.cpu_usage), # 3 - cpu_usage
int(v.lcpu), # 4 - lcpu
int(v.mem_usage), # 5 - mem_usage
int(v.lmem), # 6 - lmem
int(v.mem_fault), # 7 - mem_fault
int(v.mep_fault), # 8 - mep_fault
int(v.lmemphy), # 9 - lmemphy
int(v.memphy), # 10 - memphy
int(v.memphy_fault), # 11 - memphy_fault
int(v.lnproc), # 12 - lnproc
int(v.nproc), # 13 - nproc
int(v.nproc_fault), # 14 - nproc_fault
0, # 15 - lcpuw (deprecated not used)
int(v.io_usage)//1024, # 16 - io_usage
int(v.io)//1024]) # 17 - io_limit
if LVE_VERSION >= 8:
data.extend([
int(v.liops), # 18 - liops
int(v.iops)]) # 19 - iops
data.extend(['']*(20-len(data)))
# Mandatory fields for any version
data.append(v.cpu_fault) # 20 - cpu_fault
data.append(v.iops_fault) # 21 - iops_fault
data.append(v.io_fault) # 22 - io_fault
output_rows.append(data)
for lve_id, new_lvestat in new.items():
lve_id, is_reseller = deserialize_lve_id(lve_id)
if is_reseller:
# TODO: need to create some other file format; lvp_id is not supported for now;
continue
if usages and lve_id not in usages:
# limits are NOT equal to defaults ?
if 0 not in usages:
# no defaults?
self.log.warning("No defaults collected to compare with, skipping")
continue
# noinspection PyTypeChecker
if self.compare_limits(lve_stat=new_lvestat, lve_usage=usages[0], LVE_VERSION=LVE_VERSION):
data = []
data.extend([
lve_id, # 0 - id
0, # 1 - mep
int(new_lvestat.lep), # 2 - lep
0, # 3 - cpu_usage
int(new_lvestat.cpu), # 4 - lcpu
0, # 5 - mem_usage
int(new_lvestat.lmem), # 6 - lmem
0, # 7 - mem_fault
0, # 8 - mep_fault
int(new_lvestat.lmemphy), # 9 - lmemphy
0, # 10 - memphy
0, # 11 - memphy_fault
int(new_lvestat.lnproc), # 12 - lnproc
0, # 13 - nproc
0, # 14 - nproc_fault
0, # 15 - lcpuw (deprecated not used)
0, # 16 - io_usage
int(new_lvestat.io)]) # 17 - io_limit
if LVE_VERSION >= 8:
data.extend([
int(new_lvestat.liops), # 18 - liops
0]) # 19 - iops
data.extend(['']*(20-len(data)))
# Mandatory fields for any version
data.append(0) # 20 - cpu_fault
data.append(0) # 21 - iops_fault
data.append(0) # 22 - io_fault
output_rows.append(data)
self.write(output_rows)
class MySQLTopFileSaver(LveStatsPlugin):
dbtop = "/usr/sbin/dbtop"
def __init__(self, fname='/var/lve/cloudlinux_dbtop.json'):
self.fname = fname
self.touch_fname = '/var/lve/governor.ts'
self.log = logging.getLogger('plugin.cloudlinux_dbtop_file_saver')
self._dbtop_exist = self._check_dbtop_exist()
if self._dbtop_exist: # create touch file if dbtop exist
self._create_touch_file()
def __del__(self):
"""
Try delete created files
"""
self._try_unlink_file(self.touch_fname)
self._try_unlink_file(self.fname)
def _try_unlink_file(self, fname):
"""
Try delete file; push warning to log if wrong
:param fname: path to file
"""
self.log.debug('Delete %s file' % fname)
try:
os.unlink(fname)
except OSError as e:
if e.errno != 2: # error number 2 - No such file or directory
self.log.warning('Can\'t delete %s file; %s' % (fname, str(e)))
except SystemError as e:
self.log.warning('Can\'t delete %s file; %s' % (fname, str(e)))
@staticmethod
def _convert_to_bytes(value):
"""
:type value: str
:rtype: int
"""
try:
return int(value)
except ValueError:
if value[-1] in ("k", "K", "kb", "Kb"):
return int(value[:-1]) * 1024
elif value[-1] in ("m", "M", "mb", "Mb"):
return int(value[:-1]) * 1024 * 1024
elif value[-1] in ("g", "G", "gb", "Gb"):
return int(value[:-1]) * 1024 * 1024 * 1024
def _create_touch_file(self):
"""
Create file to touching; file owned by nobody and accessible to change
everyone
:return:
"""
self.log.debug('Create %s file' % self.touch_fname)
try:
uid = pwd.getpwnam("nobody").pw_uid
try:
gid = grp.getgrnam('nobody').gr_gid
except KeyError:
# Ubuntu doesn't have "nobody" group by default so let's try "nogroup"
# But "nobody" on Ubuntu can be added by 3rd party software e.g. cPanel
gid = grp.getgrnam('nogroup').gr_gid
with open(self.touch_fname, 'w'):
pass
os.chown(self.touch_fname, uid, gid)
os.chmod(self.touch_fname, 0o644)
except (IOError, OSError) as e:
self.log.error("Can't create %s file; %s", self.touch_fname, str(e))
def _parse_dbtop_cause(self, value):
"""
:rtype: (str, int)
:type value: str
"""
cause_of_restrict = "-"
time_of_restrict = 0
if value in ("-", ""):
return cause_of_restrict, time_of_restrict
if "cpu" in value:
cause_of_restrict = "cpu"
elif "read" in value:
cause_of_restrict = "read"
elif "write" in value:
cause_of_restrict = "write"
else:
cause_of_restrict = "unknown"
try:
time_of_restrict = int(value.split("/")[-1])
except (KeyError, ValueError):
self.log.error("could not parse cause string: %s", value)
return cause_of_restrict, time_of_restrict
def _check_dbtop_exist(self):
"""
Check if dbtop util exist
:return bool: True if exist
"""
self.log.debug('Check exist %s file' % self.dbtop)
return os.path.exists(self.dbtop)
def get_dbtop_info(self):
"""
Try obtain and parse data from dtop -c output
:rtype: dict
"""
try:
dbtop_output = self._get_dbtop_output()
except ExternalProgramFailed as e:
if "Can't connect to socket" not in str(e):
self.log.error("dbtop execution is failed: %s", str(e))
return {}
result = {}
for output in dbtop_output[1:]:
try:
user, cpu, read, write, cause = output.split()
except ValueError as e:
self.log.error("Can't unpack output: %s\n%s", str(output), str(e))
continue
cause_of_restrict, time_of_restrict = self._parse_dbtop_cause(cause)
result[user] = {
"cpu": int(cpu.split("/")[0]),
"io": self._convert_to_bytes(read.split("/")[0]) + self._convert_to_bytes(write.split("/")[0]),
"cause_of_restrict": cause_of_restrict,
"time_of_restrict": time_of_restrict}
return result
def _get_dbtop_output(self):
"""
Run dbtop -c and split output
:return list[str]:
"""
cmd = [self.dbtop, "-c"]
self.log.debug('Run "%s" command' % ' '.join(cmd))
dbtop_output = run_command(cmd)
return dbtop_output.strip().split("\n")
def _output(self, result):
"""
Dump datat to file as json string
:param result dict: data to need dump
"""
self.log.debug('Dump data to %s file' % self.fname)
json_result = json.dumps(result)
try:
atomic_write_str(self.fname, json_result)
except (IOError, OSError) as e:
self.log.error(f'Error during saving the "{self.fname}" file: {e}')
def _need_dump(self):
"""
Check needed dump data to file. If touch file exist and was touched
last 60 seconds we need dump data
:return bool:
"""
return os.path.exists(self.touch_fname) \
and time.time() - os.stat(self.touch_fname).st_mtime <= 60
def execute(self, lve_data):
if self._dbtop_exist and self._need_dump():
self._output({"dbgov_data": self.get_dbtop_info()})
class CloudLinuxTopFileSaver(LveStatsPlugin):
def __init__(self, fname='/var/lve/cloudlinux_top.json'):
self.fname = fname
self.log = logging.getLogger('plugin.cloudlinux_top_file_saver')
def _output(self, result):
json_result = json.dumps(result)
try:
atomic_write_str(self.fname, json_result)
except (IOError, OSError) as e:
self.log.error(f'Error during saving the "{self.fname}" file: {e}')
def execute(self, lve_data):
lve_version = lve_data['LVE_VERSION']
usages = lve_data.get('lve_usage_5s', {})
users, resellers = [], []
for uid, usage in list(usages.items()):
lve_id, is_reseller = deserialize_lve_id(uid)
if lve_id > 0:
user = {
"id": uid,
"usage": {
"ep": usage.mep,
# Set CPU load measurement unit as % of one CPU core
"cpu": {"all": usage.cpu_usage / 100.0},
"io": {"all": usage.io_usage},
"mem": mempages_to_bytes(usage.memphy),
"pno": usage.nproc},
"limit": {
"ep": usage.lep,
# Set CPU limit measurement unit as % of one CPU core
"cpu": {"all": usage.lcpu / 100.0},
"io": {"all": usage.io},
"mem": mempages_to_bytes(usage.lmemphy),
"pno": usage.lnproc}}
if lve_version >= 8:
user["usage"]["iops"] = usage.iops
user["limit"]["iops"] = usage.liops
if is_reseller:
resellers.append(user)
else:
users.append(user)
self._output({"users": users, "resellers": resellers})
Hacked By AnonymousFox1.0, Coded By AnonymousFox