?
Current Path : /proc/self/root/bin/ |
Linux gator3171.hostgator.com 4.19.286-203.ELK.el7.x86_64 #1 SMP Wed Jun 14 04:33:55 CDT 2023 x86_64 |
Current File : //proc/self/root/bin/kcarectl |
#!/usr/bin/env python2 # Copyright (c) Cloud Linux GmbH & Cloud Linux Software, Inc # Licensed under CLOUD LINUX LICENSE AGREEMENT # http://cloudlinux.com/docs/LICENCE.TXT from __future__ import print_function import ast import base64 import errno import hashlib import json import logging import logging.handlers import os import platform import random import re import shutil import socket import ssl import subprocess import sys import tempfile import time import warnings import fnmatch from argparse import ArgumentParser from datetime import datetime from contextlib import contextmanager # The scanner interface should skip us os.environ['KCARE_SCANNER_INTERFACE_DO_NOTHING'] = '1' if os.path.isdir('/usr/libexec/kcare/python'): # pragma: no cover sys.path.insert(0, '/usr/libexec/kcare/python') warnings.filterwarnings('ignore', category=DeprecationWarning) PY2 = sys.version_info[0] == 2 if PY2: # pragma: no py3 cover from ConfigParser import ConfigParser import httplib from urllib import urlencode from urllib import quote as urlquote from urllib2 import HTTPError, URLError, Request, urlopen as std_urlopen else: # pragma: no py2 cover from urllib.parse import quote as urlquote from configparser import ConfigParser from http import client as httplib from urllib.error import HTTPError, URLError from urllib.parse import urlencode from urllib.request import Request, urlopen as std_urlopen kcarelog = logging.getLogger('kcare') # mocked: py/kcarectl_tests kcarelog.setLevel(logging.INFO) FLAGS = ['keep-registration'] BLACKLIST_FILE = 'kpatch.blacklist' CACHE_ENTRIES = 3 CONFIG = '/etc/sysconfig/kcare/kcare.conf' LOG_FILE = '/var/log/kcarectl.log' CPANEL_GID = 99 EFFECTIVE_LATEST = 'v2' EXPECTED_PREFIX = ('12h', '24h', '48h', 'test') FIXUPS_FILE = 'kpatch.fixups' FREEZER_BLACKLIST = '/etc/sysconfig/kcare/freezer.modules.blacklist' GPG_KEY_DIR = '/etc/pki/kcare-gpg/' KCDOCTOR = '/usr/libexec/kcare/kcdoctor.sh' KERNEL_VERSION_FILE = '/proc/version' KMOD_BIN = 'kcare.ko' KPATCH_CTL = '/usr/libexec/kcare/kpatch_ctl' LEVEL = None # a level to 'stick on' (if 0 then use latest level) LIBCARE_CLIENT = '/usr/libexec/kcare/libcare-client' LIBCARE_DISABLED = True LIBCARE_PATCHES = '/etc/sysconfig/kcare/libcare_patches' PATCH_BIN = 'kpatch.bin' PATCH_CACHE = '/var/cache/kcare' PATCH_DONE = '.done' PATCH_INFO = 'kpatch.info' PATCH_LATEST = ('latest.v2',) PATCH_METHOD = '' PATCH_SERVER = 'https://patches.kernelcare.com' REGISTRATION_API_URL = 'https://cln.cloudlinux.com/api/kcare' SYSCTL_CONFIG = '/etc/sysconfig/kcare/sysctl.conf' TEST_PREFIX = '' VERSION = '2.60-3.el7' VIRTWHAT = '/usr/libexec/kcare/virt-what' USERSPACE_PATCHES = None IM360_LICENSE_FILE = '/var/imunify360/license.json' SYSTEMID = '/etc/sysconfig/kcare/systemid' # urlopen retry options RETRY_DELAY = 3 RETRY_MAX_DELAY = 30 RETRY_BACKOFF = 2 RETRY_COUNT = 4 UPDATE_MODE_MANUAL = 'manual' # update is launched manually bu `kcarectl -u` UPDATE_MODE_AUTO = 'auto' # update is launched by cron UPDATE_MODE_SMART = 'smart' # update is launched by kcare daemon CHECK_CLN_LICENSE_STATUS = True VERSION_RE = re.compile(r'^(\d+[.]\d+[-]\d+)') BLACKLIST_RE = re.compile('==BLACKLIST==\n(.*)==END BLACKLIST==\n', re.DOTALL) CONFLICTING_MODULES_RE = re.compile('(kpatch.*|ksplice.*|kpatch_livepatch.*)') KCARE_UNAME_FILE = '/proc/kcare/effective_version' POLICY_REMOTE = 'REMOTE' POLICY_LOCAL = 'LOCAL' POLICY_LOCAL_FIRST = 'LOCAL_FIRST' UPDATE_POLICY = POLICY_REMOTE AUTO_UPDATE = True LIB_AUTO_UPDATE = True UPDATE_FROM_LOCAL = False USE_SIGNATURE = True IGNORE_UNKNOWN_KERNEL = False LOAD_KCARE_SYSCTL = True KPATCH_DEBUG = False CHECK_SSL_CERTS = True PATCH_TYPE = '' PREV_PATCH_TYPE = 'default' BEFORE_UPDATE_COMMAND = None AFTER_UPDATE_COMMAND = None PRINT_INFO = 1 PRINT_WARN = 2 PRINT_ERROR = 3 PRINT_CRITICAL = 4 PRINT_LEVEL = PRINT_INFO SILENCE_ERRORS = True SUCCESS_TIMEOUT = 5 * 60 REPORT_FQDN = False FORCE_GID = None ntype = type('') btype = type(b'') utype = type(u'') def bstr(data, encoding='latin1'): # pragma: no py2 cover if type(data) is utype: data = data.encode(encoding) return data def ustr(data, encoding='latin1'): # pragma: no py2 cover if type(data) is btype: data = data.decode(encoding) return data def nstr(data, encoding='utf-8'): # pragma: no py2 cover if type(data) is ntype: return data elif type(data) is btype: return data.decode(encoding) else: return data.encode(encoding) # pragma: no py3 cover GPG_BIN = '/usr/bin/gpg' GPG_OWNER_TRUST = bstr('034327E8206469CB296AC14ECCE80D2B8B53D14B:6:\n') def get_freezer_blacklist(): result = set() try: f = open(FREEZER_BLACKLIST, 'r') for line in f: result.add(line.rstrip()) f.close() except Exception: pass return result def _apply_ptype(ptype, filename): name_parts = filename.split('.') if ptype: filename = '.'.join([name_parts[0], ptype, name_parts[-1]]) else: filename = '.'.join([name_parts[0], name_parts[-1]]) return filename def apply_ptype(ptype): global PATCH_BIN, PATCH_INFO, BLACKLIST_FILE, FIXUPS_FILE, PATCH_DONE PATCH_BIN = _apply_ptype(ptype, PATCH_BIN) PATCH_INFO = _apply_ptype(ptype, PATCH_INFO) BLACKLIST_FILE = _apply_ptype(ptype, BLACKLIST_FILE) FIXUPS_FILE = _apply_ptype(ptype, FIXUPS_FILE) PATCH_DONE = _apply_ptype(ptype, PATCH_DONE) def _printlvl(message, level, file=None): if level >= PRINT_LEVEL: print(message, file=file) def loginfo(message): _printlvl(message, PRINT_INFO) kcarelog.info(message) def logerror(message, print_msg=True): if print_msg: _printlvl(message, PRINT_ERROR, file=sys.stderr) kcarelog.error(message) def logexc(message): if PRINT_ERROR >= PRINT_LEVEL: import traceback traceback.print_exc() kcarelog.exception(message) def _timestmap_str(): return str(int(time.time())) def nohup_fork(func, sleep=None): # pragma: no cover """ Run func in a fork in an own process group (will stay alive after kcarectl process death). :param func: function to execute :return: """ pid = os.fork() if pid != 0: os.waitpid(pid, 0) return os.setsid() pid = os.fork() if pid != 0: os._exit(0) # close standard files to release TTY os.close(0) # redirect stdout/stdin into log file with open(LOG_FILE, 'a') as fd: os.dup2(fd.fileno(), 1) os.dup2(fd.fileno(), 2) if sleep: time.sleep(sleep) try: func() except Exception: kcarelog.exception('Wait exception') os._exit(1) os._exit(0) def atomic_write(fname, content, ensure_dir=False): tmp_fname = fname + '.tmp' dname = os.path.dirname(tmp_fname) if ensure_dir and not os.path.exists(dname): os.makedirs(dname) with open(tmp_fname, 'w') as f: f.write(content) os.rename(tmp_fname, fname) def touch_anchor(): """ Check the fact that there was a failed patching attempt. If anchor file not exists we should create an anchor with timestamp and schedule its deletion at $timeout. If anchor exists and its timestamp more than $timeout from now we should raise an error. """ anchor_filepath = os.path.join(PATCH_CACHE, '.kcareprev.lock') if os.path.isfile(anchor_filepath): with open(anchor_filepath, 'r') as afile: try: timestamp = int(afile.read()) # anchor was created quite recently # that means that something went wrong if timestamp + SUCCESS_TIMEOUT > time.time(): raise PreviousPatchFailedException( timestamp, anchor_filepath) except ValueError: pass atomic_write(anchor_filepath, _timestmap_str()) # write a new timestamp def commit_update(state_data): """ See touch_anchor() for detailed explanation of anchor mechanics. See KPT-730 for details about action registration. :param state_data: dict with current level, kernel_id etc. """ try: os.remove(os.path.join(PATCH_CACHE, '.kcareprev.lock')) except OSError: pass register_action('done', state_data) # reset module cache, to allow server_info get fresh data get_loaded_modules.modules = None try: get_latest_patch_level(reason='done') except Exception: kcarelog.exception('Cannot send update info!') def save_to_file(response, dst): parent_dir = os.path.dirname(dst) if not os.path.exists(parent_dir): os.makedirs(parent_dir) with open(dst, 'wb') as f: shutil.copyfileobj(response, f) def clean_directory(directory, exclude_path=None, keep_n=CACHE_ENTRIES, pattern=None): if not os.path.exists(directory): return data = [] items = os.listdir(directory) if pattern is not None: items = fnmatch.filter(items, pattern) for item in items: full_path = os.path.join(directory, item) if full_path != exclude_path: data.append((os.stat(full_path).st_mtime, full_path)) data.sort(reverse=True) for _, entry in data[keep_n:]: if os.path.isfile(entry) or os.path.islink(entry): os.remove(entry) else: shutil.rmtree(entry) def clear_cache(khash, plevel): clean_directory(os.path.join(PATCH_CACHE, 'patches'), exclude_path=get_cache_path(khash, plevel, '')) def get_cache_path(khash, plevel, fname): prefix = TEST_PREFIX or 'none' ptype = PATCH_TYPE or 'default' patch_dir = '-'.join([prefix, khash, plevel, ptype]) result = (PATCH_CACHE, 'patches', patch_dir) if fname: result += (fname,) return os.path.join(*result) def get_current_level_path(khash, fname): prefix = TEST_PREFIX or 'none' module_dir = '-'.join([prefix, khash]) result = (PATCH_CACHE, 'modules', module_dir) if fname: result += (fname,) return os.path.join(*result) def save_cache_latest(khash, patch_level): atomic_write(get_current_level_path(khash, 'latest'), patch_level, ensure_dir=True) def get_cache_latest(khash): path_with_latest = get_current_level_path(khash, 'latest') if os.path.isfile(path_with_latest): return open(path_with_latest, 'r').read() def check_gpg_signature(file_path, signature): # mocked: py/kcarectl_tests """ Check a file signature using the gpg tool. If signature is wrong BadSignatureException will be raised. :param file_path: path to file which signature will be checked :param signature: a file with the signature :return: True in case of valid signature :raises: BadSignatureException """ if gpg_exec(['--verify', signature, file_path]) != 0: raise BadSignatureException('Bad Signature: {0}'.format(file_path)) return True class CertificateError(ValueError): pass class KcareError(Exception): """ Base kernelcare exception which will be considered as expected error and the full traceback will not be shown. """ pass class NotFound(HTTPError): pass class UnknownKernelException(KcareError): def __init__(self): Exception.__init__(self, 'Unknown Kernel ({0} {1} {2})'.format( get_distro()[0], platform.release(), get_kernel_hash())) class UnableToGetLicenseException(KcareError): def __init__(self, code): Exception.__init__(self, 'Unknown Issue when getting trial license. Error code: ' + str(code)) class ApplyPatchError(KcareError): def __init__(self, code, freezer_style, level, patch_file, *args, **kwargs): super(ApplyPatchError, self).__init__(*args, **kwargs) self.code = code self.freezer_style = freezer_style self.level = level self.patch_file = patch_file self.distro = get_distro()[0] self.release = platform.release() def __str__(self): return 'Unable to apply patch ({0} {1} {2} {3} {4}, {5})'.format( self.patch_file, self.level, self.code, self.distro, self.release, ', '.join([str(i) for i in self.freezer_style]) ) class AlreadyTrialedException(KcareError): def __init__(self, ip, created, *args, **kwargs): super(AlreadyTrialedException, self).__init__(*args, **kwargs) self.created = created[0:created.index('T')] self.ip = ip def __str__(self): return 'The IP {0} was already used for a trial license on {1}'.format(self.ip, self.created) class BadSignatureException(KcareError): pass # KCARE-509 class PreviousPatchFailedException(KcareError): def __init__(self, timestamp, anchor, *args, **kwargs): super(PreviousPatchFailedException, self).__init__(*args, **kwargs) self.timestamp = timestamp self.anchor = anchor def __str__(self): message = 'It seems, the latest patch, applying at {0}, crashed, ' \ 'and further attempts will be suspended. ' \ 'To force patch applying, remove `{1}` file' return message.format(self.timestamp, self.anchor) class NoLibcareLicenseException(KcareError): pass def http_request(url, auth_string): request = Request(url) if not UPDATE_FROM_LOCAL and auth_string: request.add_header('Authorization', 'Basic {0}'.format(auth_string)) return request def print_cln_http_error(ex, url=None, stdout=True): url = url or '<route cannot be logged>' logerror('Unable to fetch {0}. Please try again later (error: {1})'.format(url, str(ex)), stdout) def parse_response_date(str_raw): # Try to split it by T str_date, sep, _ = str_raw.partition('T') # No success - split by space if not sep: str_date, _, _ = str_raw.partition(' ') return datetime.strptime(str_date, '%Y-%m-%d') def set_monitoring_key_for_ip_license(key): url = REGISTRATION_API_URL + '/nagios/register_key.plain?key={0}'.format(key) try: response = urlopen(url) res = data_as_dict(nstr(response.read())) code = int(res['code']) if code == 0: print('Key successfully registered') elif code == 1: print('Wrong key format or size') elif code == 2: print('No KernelCare license for that IP') else: print('Unknown error {0}'.format(code)) return code except HTTPError as e: print_cln_http_error(e, url) return -1 @contextmanager def execute_hooks(): if BEFORE_UPDATE_COMMAND: run_command(BEFORE_UPDATE_COMMAND, shell=True) try: yield finally: if AFTER_UPDATE_COMMAND: run_command(AFTER_UPDATE_COMMAND, shell=True) def update_config(prop, value): cf = open(CONFIG) lines = cf.readlines() cf.close() updated = False prop_eq = prop + '=' prop_sp = prop + ' ' for i in range(len(lines)): if lines[i].startswith(prop_eq) or lines[i].startswith(prop_sp): lines[i] = prop + ' = ' + str(value) + '\n' updated = True break if not updated: lines.append(prop + ' = ' + str(value) + '\n') atomic_write(CONFIG, ''.join(lines)) def plugin_info(fmt=None): """ The output will consist of: Ignore output up to the line with "--START--" Line 1: show if update is needed: 0 - updated to latest, 1 - update available, 2 - unknown kernel 3 - kernel doesn't need patches 4 - no license, cannot determine Line 2: licensing message (can be skipped, can be more then one line) Line 3: LICENSE: CODE: 1: license present, 2: trial license present, 0: no license Line 4: Update mode (True - auto-update, False, no auto update) Line 5: Effective kernel version Line 6: Real kernel version Line 7: Patchset Installed # --> If None, no patchset installed Line 8: Uptime (in seconds) If *format* is 'json' return the results in JSON format. Any other output means error retrieving info :return: """ pli = _patch_level_info() update_code = pli.code loaded_pl = pli.applied_lvl license_info_result = license_info() if fmt == 'json': results = { 'updateCode': str(update_code), 'autoUpdate': AUTO_UPDATE, 'effectiveKernel': kcare_uname(), 'realKernel': platform.release(), 'loadedPatchLevel': loaded_pl, 'uptime': int(get_uptime()), 'license': license_info_result, } print('--START--') print(json.dumps(results)) else: print('--START--') print(str(update_code)) print('LICENSE: ' + str(license_info_result)) print(AUTO_UPDATE) print(kcare_uname()) print(platform.release()) print(loaded_pl) print(get_uptime()) def license_info(): server_id = server_id_store.get_serverid() if server_id: url = REGISTRATION_API_URL + '/check.plain?server_id={0}'.format(server_id) try: response = urlopen(url) content = nstr(response.read()) res = data_as_dict(content) if not res or not res.get('code'): print('Unexpected CLN response: {0}'.format(content)) return 1 code = int(res['code']) if code == 0: print('Key-based valid license found') return 1 else: license_type = _get_license_info_by_ip(key_checked=1) if license_type == 0: print('No valid key-based license found') return license_type except HTTPError as e: print_cln_http_error(e, url) return 0 else: return _get_license_info_by_ip() def _get_license_info_by_ip(key_checked=0): url = REGISTRATION_API_URL + '/check.plain' try: response = urlopen(url) content = nstr(response.read()) res = data_as_dict(content) if res['success'].lower() == 'true': code = int(res['code']) if code == 0: print('Valid license found for IP {0}'.format(res['ip'])) return 1 # valid license if code == 1: ip = res['ip'] expires_str = parse_response_date(res['expire_date']).strftime('%Y-%m-%d') print('You have a trial license for the IP {0} that will expire on {1}'.format(ip, expires_str)) return 2 # trial license if code == 2 and key_checked == 0: ip = res['ip'] expires_str = parse_response_date(res['expire_date']).strftime('%Y-%m-%d') print('Your trial license for the IP {0} expired on {1}'.format(ip, expires_str)) if code == 3 and key_checked == 0: if 'ip' in res: print("The IP {0} hasn't been licensed".format(res['ip'])) else: print("This server hasn't been licensed") else: message = res.get('message', '') print('Error retrieving license info: {0}'.format(message)) except HTTPError as e: print_cln_http_error(e, url) except KeyError as key: print('Unexpected CLN response, cannot find {0} key:\n{1}'.format(key, content.strip())) return 0 # no valid license def register_trial(): trial_mark = os.path.join(PATCH_CACHE, 'trial-requested') if os.path.exists(trial_mark): return try: response = urlopen(REGISTRATION_API_URL + '/trial.plain') res = data_as_dict(nstr(response.read())) try: if res['success'].lower() == 'true': atomic_write(trial_mark, '', ensure_dir=True) if res['expired'] == 'true': raise AlreadyTrialedException(res['ip'], res['created']) loginfo('Requesting trial license for IP {0}. Please wait...'.format(res['ip'])) return None elif res['success'] == 'na': atomic_write(trial_mark, '', ensure_dir=True) raise KcareError('Invalid License') else: # TODO: make sane exception messages raise UnableToGetLicenseException(-1) # Invalid response? except KeyError as ke: raise UnableToGetLicenseException(ke) except HTTPError as e: raise UnableToGetLicenseException(e.code) def get_uptime(): f = None try: f = open('/proc/uptime', 'r') line = f.readline() result = str(int(float(line.split()[0]))) f.close() return result except Exception: if f is not None: f.close() return '-1' def get_last_stop(): """ Returns timestamp from PATCH_CACHE/stoped.at if its exsits """ stopped_at_filename = os.path.join(PATCH_CACHE, 'stopped.at') if os.path.exists(stopped_at_filename): with open(stopped_at_filename, 'r') as fh: return fh.read().rstrip() return '-1' def get_distro(): if sys.version_info[:2] < (3, 8): # pragma: no py3 cover return platform.linux_distribution() else: # pragma: no distro cover import distro return distro.linux_distribution(full_distribution_name=False) def edf_fallback_ptype(): distro, version = get_distro()[:2] # From talk with @kolshanov if distro == 'CloudLinux' and version.startswith('7.'): return 'extra' else: return '' def strip_version_timestamp(version): match = VERSION_RE.match(version) return match and match.group(1) or version def server_info(reason, now=None): data = dict() data['ts'] = int(now or time.time()) data['reason'] = reason data['machine'] = platform.machine() data['processor'] = platform.processor() data['release'] = platform.release() data['system'] = platform.system() data['version'] = platform.version() distro = get_distro() data['distro'] = distro[0] data['distro_version'] = distro[1] data['euname'] = kcare_uname() data['kcare_version'] = strip_version_timestamp(VERSION) data['last_stop'] = get_last_stop() data['node'] = get_hostname() data['uptime'] = get_uptime() data['virt'] = check_output([VIRTWHAT]).strip() description = parse_patch_description(loaded_patch_description()) data['ltimestamp'] = description['last-update'] data['patch_level'] = description['patch-level'] data['patch_type'] = description['patch-type'] data['kmod'] = get_current_kmod_version() or '' server_id = server_id_store.get_serverid() if server_id: data['server_id'] = server_id state_file = os.path.join(PATCH_CACHE, 'kcare.state') if os.path.exists(state_file): with open(state_file, 'r') as f: state = f.read() try: data['state'] = ast.literal_eval(state) except (SyntaxError, ValueError): pass return data def based_server_info(reason): return nstr(base64.b16encode(bstr(str(server_info(reason))))) def get_http_auth_string(): server_id = server_id_store.get_serverid() if server_id: return nstr(base64.b64encode(bstr('{0}:{1}'.format(server_id, 'kernelcare')))) return None # addr -> resolved_peer_addr map CONNECTION_STICKY_MAP = {} def sticky_connect(self): """Function remembers IP address of host connected to and uses it for later connections. Replaces stdlib version of httplib.HTTPConnection.connect """ addr = self.host, self.port resolved_addr = CONNECTION_STICKY_MAP.get(addr, addr) self.sock = socket.create_connection(resolved_addr, self.timeout) self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) if addr not in CONNECTION_STICKY_MAP: CONNECTION_STICKY_MAP[addr] = self.sock.getpeername() if self._tunnel_host: self._tunnel() httplib.HTTPConnection.connect = sticky_connect # python >= 2.7.9 stdlib (with ssl.HAS_SNI) is able to process https request on its own, # for earlier versions manual checks should be done if not getattr(ssl, 'HAS_SNI', None): # pragma: no cover unit try: import distutils.version import OpenSSL.SSL if distutils.version.StrictVersion(OpenSSL.__version__) < distutils.version.StrictVersion('0.13'): raise ImportError('No pyOpenSSL module with SNI ability.') except ImportError: pass else: def dummy_verify_callback(*args): # OpenSSL.SSL.Context.set_verify() requires callback # where additional checks could be done; # here is a dummy callback and a hostname check is made externally # to provide original exception from match_hostname() return True PureHTTPSConnection = httplib.HTTPSConnection class SSLSock(object): def __init__(self, sock): self._ssl_conn = sock self._makefile_refs = 0 def makefile(self, *args): self._makefile_refs += 1 return socket._fileobject(self._ssl_conn, *args, close=True) def close(self): if not self._makefile_refs and self._ssl_conn: self._ssl_conn.close() self._ssl_conn = None def sendall(self, *args): return self._ssl_conn.sendall(*args) class PyOpenSSLHTTPSConnection(httplib.HTTPSConnection): def connect(self): httplib.HTTPConnection.connect(self) # workaround to force pyopenssl to use TLSv1.2 ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD) ctx.set_options(OpenSSL.SSL.OP_NO_SSLv2 | OpenSSL.SSL.OP_NO_SSLv3) if CHECK_SSL_CERTS: ctx.set_verify(OpenSSL.SSL.VERIFY_PEER, dummy_verify_callback) else: ctx.set_verify(OpenSSL.SSL.VERIFY_NONE, dummy_verify_callback) ctx.set_default_verify_paths() conn = OpenSSL.SSL.Connection(ctx, self.sock) conn.set_connect_state() # self._tunnel_host is an original hostname in case of proxy use server_host = self._tunnel_host or self.host conn.set_tlsext_host_name(server_host) conn.do_handshake() if CHECK_SSL_CERTS: match_hostname(conn.get_peer_certificate(), server_host) self.sock = SSLSock(conn) httplib.HTTPSConnection = PyOpenSSLHTTPSConnection def _urlopen(url, *args, **kwargs): # mocked: py/kcarectl_tests if hasattr(url, 'get_full_url'): request_url = url.get_full_url() else: request_url = url url = Request(url) url.add_header('KC-Version', VERSION) try: if not CHECK_SSL_CERTS and getattr(ssl, 'HAS_SNI', None): # pragma: no cover unit ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE return std_urlopen(url, *args, context=ctx, **kwargs) return std_urlopen(url, *args, **kwargs) except HTTPError as ex: if ex.code == 404: raise NotFound(ex.url, ex.code, ex.msg, ex.hdrs, ex.fp) # HTTPError is a URLError descendant and contains URL, raise it as is raise except URLError as ex: # Local patches OSError(No such file) should be interpreted as Not found(404) # It was done as a chain because when it implemented with "duck-typing" it will mess # with error context if ex.args and hasattr(ex.args[0], 'errno') and ex.args[0].errno == errno.ENOENT: raise NotFound(url, 404, str(ex), None, None) # there is no information about URL in the base URLError class, add it and raise ex.reason = 'Request for `{0}` failed: {1}'.format(request_url, ex) ex.url = request_url raise def check_exc(*exc_list): def inner(e, state): return isinstance(e, exc_list) return inner def retry(check_retry, count=None, delay=None, backoff=None): if delay is None: delay = RETRY_DELAY if count is None: count = RETRY_COUNT if backoff is None: backoff = RETRY_BACKOFF state = {} def decorator(fn): def inner(*args, **kwargs): ldelay = delay for _ in range(count): try: return fn(*args, **kwargs) except Exception as e: if not check_retry(e, state): raise time.sleep(ldelay) ldelay = min(ldelay * backoff, RETRY_MAX_DELAY) # last try return fn(*args, **kwargs) return inner return decorator def check_urlopen_retry(e, state): if isinstance(e, HTTPError): return e.code >= 500 elif isinstance(e, URLError): return True def check_auth_retry(e, state): if isinstance(e, HTTPError): if e.code in (403, 401): return handle_forbidden(state) return e.code >= 500 elif isinstance(e, URLError): return True def is_local_url(url): if hasattr(url, 'get_full_url'): url = url.get_full_url() return url.startswith('file:') def urlopen(url, *args, **kwargs): if is_local_url(url): return _urlopen(url, *args, **kwargs) return retry(check_urlopen_retry)(_urlopen)(url, *args, **kwargs) def urlopen_auth(url, *args, **kwargs): if is_local_url(url): return _urlopen(url, *args, **kwargs) request = http_request(url, get_http_auth_string()) if kwargs.pop('check_license', True): check = check_auth_retry else: check = check_urlopen_retry return retry(check)(_urlopen)(request, *args, **kwargs) def handle_forbidden(state): """ In case of 403 error we should check what's happen. Case #1. We are trying to register unlicensed machine and should try to register trial. Case #2. We have a valid license but access restrictions on server are not consistent yet and we had to try later. """ if 'license' in state: # license has already been checked and is valid, no need to ask CLN again return True if CHECK_CLN_LICENSE_STATUS: server_id = server_id_store.get_serverid() if server_id: url = REGISTRATION_API_URL + '/check.plain' + '?server_id={0}'.format(server_id) else: url = REGISTRATION_API_URL + '/check.plain' try: # do not retry in case of 500 from CLN! # otherwise, CLN will die in pain because of too many requests content = nstr(_urlopen(url).read()) info = data_as_dict(content) except URLError as ex: print_cln_http_error(ex, url, stdout=False) return if not info or not info.get('code'): kcarelog.error('Unexpected CLN response: {0}'.format(content)) return if info['code'] in ['0', '1']: # license is fine: 0 - valid license, 1 - valid trial license; # looks like htpasswd not updated yet; # mark state as licensed to avoid repeated requests to CLN state['license'] = True logerror('Unable to access server. Retrying...') return True else: register_trial() def fetch_patch_level(reason): if LEVEL is not None: return LEVEL for latest in PATCH_LATEST: if UPDATE_FROM_LOCAL: url = get_kernel_url(KHASH, latest) else: url = get_kernel_url(KHASH, stickyfy(latest)) + '?' + based_server_info(reason) try: response = urlopen_auth(url, check_license=False) return nstr(response.read()).rstrip('\n') except NotFound: pass except HTTPError as ex: # No license - no access if ex.code in (403, 401): raise KcareError('KC licence is required') raise raise UnknownKernelException() def fetch_signature(url, dst, auth=False): urlopen_local = urlopen if auth: urlopen_local = urlopen_auth sig_exts = ['.sig2', '.sig'] for sig_ext in sig_exts: try: signature = urlopen_local(url + sig_ext) break except NotFound as e: if sig_ext == sig_exts[-1]: raise e # pragma: no cover sig_dst = dst + sig_ext save_to_file(signature, sig_dst) return sig_dst # BadSignatureException is the only side effect of interrupted connection, # should retry file extraction in this case # TODO: what about clients with USE_SIGNATURE == False? use content-len? @retry(check_exc(BadSignatureException), count=3, delay=0) def fetch_url(url, dst, check_signature=False): response = urlopen_auth(url) save_to_file(response, dst) if check_signature: try: signature = fetch_signature(url, dst, auth=True) return check_gpg_signature(dst, signature) except BadSignatureException: if os.path.exists(dst): os.rename(dst, dst + '.tmp') raise else: return True def probe_patch(khash, level, ptype): sig_exts = ['.sig2', '.sig'] for sig_ext in sig_exts: url = get_kernel_url(khash, level, _apply_ptype(ptype, PATCH_BIN) + sig_ext) kcarelog.info('Probing patch URL: {0}'.format(url)) try: urlopen_auth(url, check_license=False) except NotFound: kcarelog.info('{0} is not available: 404'.format(url)) if sig_ext != sig_exts[-1]: continue return False except URLError as ex: kcarelog.info('{0} is not available: {1}'.format(url, str(ex))) return True class PatchFetcher(object): def __init__(self, _hash, patch_level=None): self.hash = _hash self.patch_level = patch_level def fetch_patch_file_level(self, level, name, check_signature=False): url = get_kernel_url(self.hash, level, name) dst = get_cache_path(self.hash, level, name) return fetch_url(url, dst, check_signature) def fetch_patch_file(self, name, check_signature=False): assert self.patch_level is not None return self.fetch_patch_file_level(self.patch_level, name, check_signature) def fetch_kmod(self): if 'patches.kernelcare.com' in PATCH_SERVER: url = get_kernel_url(self.hash, self.patch_level, KMOD_BIN) else: # ePortal workaround ): url = get_kernel_url(self.hash, KMOD_BIN) dst = get_cache_path(self.hash, self.patch_level, KMOD_BIN) return fetch_url(url, dst, USE_SIGNATURE) def is_patch_fetched(self): assert self.patch_level is not None patch_files = ( get_cache_path(self.hash, self.patch_level, PATCH_DONE), get_cache_path(self.hash, self.patch_level, PATCH_BIN), get_cache_path(self.hash, self.patch_level, PATCH_INFO), get_cache_path(self.hash, self.patch_level, KMOD_BIN), ) for fname in patch_files: if not os.path.isfile(fname): return False return True def fetch_patch(self): assert self.patch_level is not None if self.patch_level == '0': return self.patch_level if self.is_patch_fetched(): loginfo('Updates already downloaded') return self.patch_level loginfo('Downloading updates') try: self.fetch_patch_file(PATCH_BIN, check_signature=USE_SIGNATURE) except NotFound: raise KcareError('The `{0}` patch level is not found for `{1}` patch type. ' 'Please select valid patch type or patch level' .format(self.patch_level, PATCH_TYPE or 'default')) self.fetch_patch_file(PATCH_INFO, check_signature=USE_SIGNATURE) self.fetch_kmod() self.extract_blacklist() open(get_cache_path(self.hash, self.patch_level, PATCH_DONE), 'wb').close() return self.patch_level def extract_blacklist(self): assert self.patch_level is not None buf = open(get_cache_path(self.hash, self.patch_level, PATCH_INFO), 'r').read() if buf: mo = BLACKLIST_RE.search(buf) if mo: open(get_cache_path(self.hash, self.patch_level, BLACKLIST_FILE), 'w').write(mo.group(1)) def fetch_fixups(self, level): """ Download fixup files for defined patch level :param level: download fixups for this patch level (usually it's a level of loaded patch) :return: None """ if level is None: return try: # never use cache for fixup files, must be downloaded from scratch self.fetch_patch_file_level(level, FIXUPS_FILE, check_signature=USE_SIGNATURE) except NotFound: return fixups = get_cache_path(self.hash, level, FIXUPS_FILE) with open(fixups, 'r') as f: fixups = set([fixup.strip() for fixup in f.readlines()]) for fixup in fixups: self.fetch_patch_file_level(level, fixup, check_signature=USE_SIGNATURE) def get_kernel_hash(): f = open(KERNEL_VERSION_FILE, 'rb') try: return hashlib.sha1(f.read()).hexdigest() finally: f.close() def kcare_check(): pli = _patch_level_info() print(pli.msg) if pli.code == PLI.PATCH_NEED_UPDATE: sys.exit(0) else: sys.exit(1) def kcare_latest_patch_info(is_json=False): """ Retrieve and output to STDOUT latest patch info, so it is easy to get list of CVEs in use. More info at https://cloudlinux.atlassian.net/browse/KCARE-952 :return: None """ try: latest = get_latest_patch_level(reason='info', policy=POLICY_REMOTE) if not latest or latest == '0': raise UnknownKernelException url = get_kernel_url(KHASH, latest, PATCH_INFO) patch_info = nstr(urlopen_auth(url).read()) if is_json: patches, result = [], {} for chunk in patch_info.split('\n\n'): data = data_as_dict(chunk) if data and 'kpatch-name' in data: patches.append(data) else: result.update(data) result['patches'] = patches patch_info = json.dumps(result) print(patch_info) except HTTPError as e: print_cln_http_error(e, e.url) return 1 except UnknownKernelException: print('No patches available') return 0 def _kcare_patch_info_json(pli): result = {'message': pli.msg} if pli.applied_lvl is not None: patch_info = _kcare_patch_info(pli) patches = [] for chunk in patch_info.split('\n\n'): data = data_as_dict(chunk) if data and 'kpatch-name' in data: patches.append(data) else: result.update(data) result['patches'] = patches return json.dumps(result, sort_keys=True) def _kcare_patch_info(pli): khash = get_kernel_hash() cache_path = get_cache_path(khash, pli.applied_lvl, PATCH_INFO) if not os.path.isfile(cache_path): raise KcareError("Can't find information due to the absent patch information file." " Please, run /usr/bin/kcarectl --update and try again.") info = open(cache_path, 'r').read() if info: info = BLACKLIST_RE.sub('', info) return info def patch_info(is_json=False): pli = _patch_level_info() if not is_json: if pli.code != 0: print(pli.msg) if pli.applied_lvl is None: return print(_kcare_patch_info(pli)) else: print(_kcare_patch_info_json(pli)) UNAME_LABEL = 'uname: ' def is_uname_char(c): return str.isalnum(c) or c in '.-_+' def parse_uname(patch_level): khash = get_kernel_hash() f = open(get_cache_path(khash, patch_level, PATCH_INFO), 'r') try: for line in f.readlines(): if line.startswith(UNAME_LABEL): return ''.join(filter(is_uname_char, line[len(UNAME_LABEL):].strip())) finally: f.close() return '' def kcare_uname_su(): patch_level = loaded_patch_level() if patch_level is None or patch_level == '0': return platform.release() return parse_uname(patch_level) def is_same_patch(new_patch_file): # mocked: py/kcarectl_tests args = [KPATCH_CTL, 'file-info', new_patch_file] new_patch_info = check_output(args) current_patch_info = _patch_info() build_time_label = 'kpatch-build-time' return get_patch_value(new_patch_info, build_time_label) == get_patch_value(current_patch_info, build_time_label) def kcare_update_effective_version(new_version): if os.path.exists(KCARE_UNAME_FILE): try: f = open(KCARE_UNAME_FILE, 'w') f.write(new_version) f.close() return True except Exception: pass return False def kcare_uname(): if os.path.exists(KCARE_UNAME_FILE): return open(KCARE_UNAME_FILE, 'r').read().strip() else: # TODO: talk to @kolshanov about runtime results from KPATCH_CTL info # (euname from kpatch-description -- not from kpatch.info file) return kcare_uname_su() def kcare_need_update(applied_level, new_level): if new_level == '0': return False try: cur_int = int(applied_level) new_int = int(new_level) if new_int < cur_int: # ignore down-patching return False except (TypeError, ValueError): pass if applied_level != new_level: return True new_patch_file = get_cache_path(KHASH, new_level, PATCH_BIN) if not is_same_patch(new_patch_file): return True return False class FakeSecHead(object): def __init__(self, fp): self.fp = fp self.sechead = '[asection]\n' def readline(self): # pragma: no py3 cover if self.sechead: try: return self.sechead finally: self.sechead = None else: return self.fp.readline() def __iter__(self): # pragma: no py2 cover if self.sechead: yield self.sechead self.sechead = None for line in self.fp: yield line def get_proxy_from_env(scheme): if scheme == 'http': return os.getenv('http_proxy') or os.getenv('HTTP_PROXY') elif scheme == 'https': return os.getenv('https_proxy') or os.getenv('HTTPS_PROXY') def get_config_settings(): result = {} cp = ConfigParser(defaults={'HTTP_PROXY': '', 'HTTPS_PROXY': ''}) try: config = FakeSecHead(open(CONFIG)) if PY2: # pragma: no py3 cover cp.readfp(config) else: # pragma: no py2 cover cp.read_file(config) except Exception: return {} def bool_converter(value): return value.upper() in ('1', 'TRUE', 'YES', 'Y') def read_var(name, default=None, convert=None, dst=None): try: value = cp.get('asection', name) except Exception: value = default if value is not None: if convert: value = convert(value) result[dst or name] = value for scheme, variable in [('http', 'HTTP_PROXY'), ('https', 'HTTPS_PROXY')]: # environment settings take precedence over kcare.config ones if not get_proxy_from_env(scheme): proxy = cp.get('asection', variable) if proxy: os.environ[variable] = proxy read_var('UPDATE_POLICY', convert=str.upper) read_var('PATCH_METHOD', convert=str.upper) read_var('PATCH_SERVER', convert=lambda v: v.rstrip('/')) read_var('AUTO_UPDATE', convert=bool_converter) read_var('LIB_AUTO_UPDATE', convert=bool_converter) read_var('REGISTRATION_URL', convert=lambda v: v.rstrip('/'), dst='REGISTRATION_API_URL') read_var('PREFIX', dst='TEST_PREFIX') read_var('IGNORE_UNKNOWN_KERNEL', convert=bool_converter) read_var('UPDATE_SYSCTL_CONFIG', convert=bool_converter, dst='LOAD_KCARE_SYSCTL') read_var('CHECK_SSL_CERTS', convert=bool_converter) read_var('PATCH_TYPE', convert=str.lower) read_var('PREV_PATCH_TYPE', convert=str.lower) read_var('PATCH_LEVEL', convert=lambda v: v or None, dst='LEVEL') read_var('STICKY_PATCH', convert=str.upper, dst='STICKY') read_var('REPORT_FQDN', convert=bool_converter) read_var('SILENCE_ERRORS', convert=bool_converter) read_var('FORCE_GID') read_var('LIBCARE_DISABLED', convert=bool_converter) read_var('BEFORE_UPDATE_COMMAND', convert=lambda v: v.strip()) read_var('AFTER_UPDATE_COMMAND', convert=lambda v: v.strip()) read_var('KMSG_OUTPUT', convert=bool_converter) read_var('KCORE_OUTPUT_SIZE', convert=int) read_var('USERSPACE_PATCHES', convert=lambda v: [ptch.strip().lower() for ptch in v.split(',')]) return result def update_sysctl(): if LOAD_KCARE_SYSCTL: if not (os.path.isfile(SYSCTL_CONFIG) and os.access(SYSCTL_CONFIG, os.R_OK)): kcarelog.warning('File {0} does not exist or has no read access'.format(SYSCTL_CONFIG)) return code, _, _ = run_command(['/sbin/sysctl', '-q', '-p', SYSCTL_CONFIG], catch_stdout=True) if code != 0: kcarelog.warning('Unable to load kcare sysctl.conf: {0}'.format(code)) def is_cpanel(): return os.path.isfile('/usr/local/cpanel/cpanel') def is_secure_boot(): # mocked: py/kcarectl_tests/test_load_kmod.py efivars_location = "/sys/firmware/efi/efivars/" if not os.path.exists(efivars_location): return False for filename in os.listdir(efivars_location): if filename.startswith('SecureBoot'): varfile = os.path.join(efivars_location, filename) # Get last byte with open(varfile, 'rb') as vfd: return vfd.read()[-1:] == b'\x01' return False def inside_vz_container(): # mocked: py/kcarectl_tests/test_load_kmod.py return os.path.exists('/proc/vz/veinfo') and not os.path.exists('/proc/vz/version') def inside_lxc_container(): # mocked: py/kcarectl_tests/test_load_kmod.py return '/lxc/' in open('/proc/1/cgroup').read() def inside_docker_container(): # mocked: py/kcarectl_tests/test_load_kmod.py return os.path.isfile('/.dockerenv') def edit_sysctl_conf(remove, append): """ Update SYSCTL_CONFIG accordingly the edits """ # Create if it does not exist if not os.path.isfile(SYSCTL_CONFIG): open(SYSCTL_CONFIG, 'a').close() # Check kcare sysctl path and read access if not os.access(SYSCTL_CONFIG, os.R_OK): kcarelog.warning('File {0} has no read access'.format(SYSCTL_CONFIG)) return with open(SYSCTL_CONFIG, 'r+') as sysctl: lines = sysctl.readlines() sysctl.seek(0) for line in lines: # Do not rewrite lines that should be deleted if not any(line.startswith(r) for r in remove): sysctl.write(line) # Write additional lines for a in append: sysctl.write(a + '\n') sysctl.truncate() def run_command(command, catch_stdout=False, catch_stderr=False, shell=False): # mocked: py/kcarectl_tests/conftest.py stdout = subprocess.PIPE if catch_stdout else None stderr = subprocess.PIPE if catch_stderr else None p = subprocess.Popen(command, stdout=stdout, stderr=stderr, shell=shell) stdout, stderr = p.communicate() code = p.returncode if stdout is not None: stdout = nstr(stdout) if stderr is not None: stderr = nstr(stderr) return code, stdout, stderr def check_output(args): _, stdout, _ = run_command(args, catch_stdout=True) return stdout def get_loaded_modules_uncached(): return [line.split()[0] for line in open('/proc/modules')] def get_loaded_modules(): modules = getattr(get_loaded_modules, 'modules', None) if modules: return modules get_loaded_modules.modules = get_loaded_modules_uncached() return get_loaded_modules.modules def detect_conflicting_modules(modules): for module in modules: if CONFLICTING_MODULES_RE.match(module): raise KcareError("Detected '{0}' kernel module loaded. Please unload that module first".format(module)) def get_current_kmod_version(): kmod_version_file = '/sys/module/kcare/version' if not os.path.exists(kmod_version_file): return with open(kmod_version_file, 'r') as f: version = f.read().strip() return version def is_kmod_version_changed(khash, plevel): old_version = get_current_kmod_version() if not old_version: return True new_version = check_output( ['/sbin/modinfo', '-F', 'version', get_cache_path(khash, plevel, KMOD_BIN)] ).strip() return old_version != new_version def get_kcare_kmod_link(): return '/lib/modules/{0}/extra/kcare.ko'.format(platform.uname()[2]) def kmod_is_signed(): level = get_latest_patch_level(reason='info') kmod_file = get_cache_path(KHASH, level, KMOD_BIN) if not os.path.isfile(kmod_file): return with open(kmod_file, 'rb') as vfd: return vfd.read()[-28:] == b'~Module signature appended~\n' def load_kmod(kmod, **kwargs): cmd = ['/sbin/insmod', kmod] for key, value in kwargs.items(): cmd.append('{0}={1}'.format(key, value)) code, _, _ = run_command(cmd, catch_stdout=True) if code != 0: raise KcareError('Unable to load kmod ({0} {1}). Try to run with `--check-compatibility` flag.'.format(kmod, code)) def check_compatibility(): if is_secure_boot() and not kmod_is_signed(): raise KcareError('Secure boot is enabled. Not supported by KernelCare.') if inside_vz_container() or inside_lxc_container() or inside_docker_container(): raise KcareError('You are running inside a container. Kernelcare should be executed on host side instead.') def load_kcare_kmod(khash, level): # To make `kdump` service work. We need to copy # `kcare.ko` into `/lib/modules/$(uname -r)/extra/kcare.ko` # and call `/sbin/depmod` kcare_link = get_kcare_kmod_link() kcare_file = get_cache_path(khash, level, KMOD_BIN) try: shutil.copy(kcare_file, kcare_link) except Exception: kcare_link = kcare_file kmod_args = {} if KPATCH_DEBUG: kmod_args['kpatch_debug'] = 1 load_kmod(kcare_link, **kmod_args) run_command(['/sbin/depmod'], catch_stdout=True, catch_stderr=True) def unload_kmod(modname): code, _, _ = run_command(['/sbin/rmmod', modname], catch_stdout=True) if code != 0: raise KcareError('Unable to unload {0} kmod {1}'.format(modname, code)) def apply_fixups(khash, current_level, modules): loaded = [] for mod in ['vmlinux'] + modules: modpath = get_cache_path(khash, current_level, 'fixup_{0}.ko'.format(mod)) if os.path.exists(modpath): load_kmod(modpath) loaded.append('fixup_{0}'.format(mod)) return loaded def remove_fixups(fixups): for mod in fixups: try: unload_kmod(mod) except Exception: # pragma: no cover pass def get_freezer_style(freezer, modules): if freezer: method = freezer elif PATCH_METHOD: method = PATCH_METHOD elif get_freezer_blacklist().intersection(modules): # blacklist module found, use smart freezer # xxx: this branch could be safely removed when smart would work by default return 'freeze_conflict', freezer, PATCH_METHOD, True else: # user doesn't provide patch method and no conflicting modules loaded return 'default', freezer, PATCH_METHOD, False # non default patch method, translate it into form accepted by kpatch_ctl patch_method_map = { 'NONE': 'freeze_none', 'NOFREEZE': 'freeze_none', 'FULL': 'freeze_all', 'FREEZE': 'freeze_all', 'SMART': 'freeze_conflict', } method = method.upper() if method in patch_method_map: method = patch_method_map[method] else: raise KcareError('Unable to detect freezer style ({0}, {1}, {2}, {3})' .format(method, freezer, PATCH_METHOD, False)) return method, freezer, PATCH_METHOD, False def kcare_load(khash, level, mode, freezer='', use_anchor=False): state_data = {'khash': khash, 'future': level, 'mode': mode} register_action('start', state_data) current_level = loaded_patch_level() modules = get_loaded_modules() detect_conflicting_modules(modules) # get freezer in the beginning to prevent any further job in case of exception freezer_style = get_freezer_style(freezer, modules) patch_file = get_cache_path(khash, level, PATCH_BIN) save_cache_latest(khash, level) description = '{0}-{1}:{2};{3}'.format(level, PATCH_TYPE, _timestmap_str(), # future server_info['ltimestamp'] parse_uname(level)) kmod_loaded = 'kcare' in modules kmod_changed = kmod_loaded and is_kmod_version_changed(khash, level) patch_loaded = current_level is not None same_patch = patch_loaded and is_same_patch(patch_file) and kcare_update_effective_version(description) state_data.update({'current': current_level, 'kmod_changed': kmod_changed}) if same_patch: register_action('done', state_data) return if patch_loaded: register_action('fxp', state_data) fixups = apply_fixups(khash, current_level, modules) register_action('unpatch', state_data) kpatch_ctl_unpatch(freezer_style) register_action('unfxp', state_data) remove_fixups(fixups) if kmod_changed: register_action('unload', state_data) unload_kmod('kcare') kmod_loaded = False if not kmod_loaded: register_action('load', state_data) load_kcare_kmod(khash, level) if use_anchor: # KCARE-509 touch_anchor() register_action('patch', state_data) kpatch_ctl_patch(patch_file, khash, level, description, freezer_style) update_sysctl() loginfo('Patch level {0} applied. Effective kernel version {1}'.format(level, kcare_uname())) # do final actions when update is considered as successful register_action('wait', state_data) nohup_fork(lambda: commit_update(state_data), sleep=SUCCESS_TIMEOUT) def kpatch_ctl_patch(patch_file, khash, level, description, freezer_style): args = [KPATCH_CTL] blacklist_file = get_cache_path(khash, level, BLACKLIST_FILE) if os.path.exists(blacklist_file): args.extend(['-b', blacklist_file]) args.extend(['patch', '-d', description]) args.extend(['-m', freezer_style[0]]) args.append(patch_file) code, _, _ = run_command(args, catch_stdout=True) if code != 0: raise ApplyPatchError(code, freezer_style, level, patch_file) def kpatch_ctl_unpatch(freezer_style): code, _, _ = run_command([KPATCH_CTL, 'unpatch', '-m', freezer_style[0]], catch_stdout=True) if code != 0: raise KcareError('Error unpatching [{0}] {1}'.format(code, str(freezer_style))) def register_action(action, state_data): state_data['action'] = action state_data['ts'] = int(time.time()) atomic_write(os.path.join(PATCH_CACHE, 'kcare.state'), str(state_data)) def kcare_unload(freezer=''): current_level = loaded_patch_level() pf = PatchFetcher(KHASH) pf.fetch_fixups(current_level) modules = get_loaded_modules() freezer_style = get_freezer_style(freezer, modules) with execute_hooks(): if 'kcare' in modules: need_unpatch = current_level is not None if need_unpatch: fixups = apply_fixups(KHASH, current_level, modules) code, _, _ = run_command([KPATCH_CTL, 'unpatch', '-m', freezer_style[0]], catch_stdout=True) remove_fixups(fixups) if code != 0: raise KcareError('Error unpatching [{0}] {1}'.format(code, str(freezer_style))) # Unload kcare module and retry once after 10 seconds if failed retry(check_exc(KcareError), count=1, delay=10)(unload_kmod)('kcare') try: os.unlink(get_kcare_kmod_link()) except Exception: pass def kcare_info(is_json): pli = _patch_level_info() if is_json: return _kcare_info_json(pli) else: if pli.code != 0: return pli.msg if pli.applied_lvl is not None: return _patch_info() def _kcare_info_json(pli): result = {'message': pli.msg} if pli.applied_lvl is not None: result.update(data_as_dict(_patch_info())) result.update(parse_patch_description(result.get('kpatch-description'))) result['kpatch-state'] = pli.state return json.dumps(result) def _patch_info(): return check_output([KPATCH_CTL, 'info']) def data_as_dict(data): result = {} data = data.splitlines() for line in data: if line: key, delimiter, value = line.partition(':') if delimiter: result[key] = value.strip() return result def get_patch_value(info, label): return data_as_dict(info).get(label) def loaded_patch_description(): if 'kcare' not in get_loaded_modules(): return None # example: 28-:1532349972;4.4.0-128.154 # (patch level: number)-(patch type: free/extra/empty):(timestamp);(effective kernel version from kpatch.info) return get_patch_value(_patch_info(), 'kpatch-description') def parse_patch_description(desc): result = { 'patch-level': None, 'patch-type': 'default', 'last-update': '', 'kernel-version': '' } if not desc: return result level_type_timestamp, _, kernel = desc.partition(';') level_type, _, timestamp = level_type_timestamp.partition(':') patch_level, _, patch_type = level_type.partition('-') # need to return patch_level=None not to break old code # TODO: refactor all loaded_patch_level() usages to work with empty string instead of None result['patch-level'] = patch_level or None result['patch-type'] = patch_type or 'default' result['last-update'] = timestamp result['kernel-version'] = kernel return result def loaded_patch_level(): # mocked: py/kcarectl_tests return parse_patch_description(loaded_patch_description())['patch-level'] class PLI: PATCH_LATEST = 0 PATCH_NEED_UPDATE = 1 PATCH_UNAVALIABLE = 2 PATCH_NOT_NEEDED = 3 def __init__(self, code, msg, remote_lvl, applied_lvl, state): self.code = code self.msg = msg self.remote_lvl = remote_lvl self.applied_lvl = applied_lvl self.state = state def _patch_level_info(): current_patch_level = loaded_patch_level() try: # this line raises UnknownKernel from the bottom of this try new_patch_level = get_latest_patch_level(reason='info') if current_patch_level: if kcare_need_update(current_patch_level, new_patch_level): code, msg, state = ( PLI.PATCH_NEED_UPDATE, "Update available, run 'kcarectl --update'.", 'applied', ) else: code, msg, state = ( PLI.PATCH_LATEST, 'The latest patch is applied.', 'applied', ) else: # no patch applied if new_patch_level == '0': code, msg, state = ( PLI.PATCH_NOT_NEEDED, "This kernel doesn't require any patches.", 'unset', ) else: code, msg, state = ( PLI.PATCH_NEED_UPDATE, "No patches applied, but some are available, run 'kcarectl --update'.", 'unset', ) info = PLI(code, msg, new_patch_level, current_patch_level, state) except UnknownKernelException: code = PLI.PATCH_UNAVALIABLE if STICKY: msg = 'Invalid sticky patch tag {0} for kernel ({1} {2}). ' \ 'Please check /etc/sysconfig/kcare/kcare.conf ' \ 'STICKY_PATCH settings'.format(STICKY, get_distro()[0], platform.release()) else: msg = 'Unknown kernel ({0} {1} {2}), no patches available'.format( get_distro()[0], platform.release(), get_kernel_hash()) info = PLI(code, msg, None, None, 'unavailable') return info def check_gpg_bin(): if not os.path.isfile(GPG_BIN): raise KcareError('No {0} present. Please install gnupg'.format(GPG_BIN)) def gpg_exec(args, input=None): """ Simple wrapper that doesn't supress stderr. Just runs GPG_BIN with args and if it's not succeed prints stderr """ check_gpg_bin() p = subprocess.Popen([GPG_BIN, ] + args, env={'GNUPGHOME': GPG_KEY_DIR}, stderr=subprocess.PIPE, stdin=subprocess.PIPE) _, stderrdata = p.communicate(input=input) if p.returncode: logerror('Error executing command [{0} {1}]'.format(GPG_BIN, ' '.join(args))) logerror(stderrdata) return p.returncode def import_gpg_key(import_key): if not os.path.exists(GPG_KEY_DIR): os.makedirs(GPG_KEY_DIR) gpg_exec(['--import', import_key]) gpg_exec(['--import-ownertrust'], input=GPG_OWNER_TRUST) def rm_serverid(): os.unlink(SYSTEMID) def set_server_id(server_id): with open(SYSTEMID, 'w') as f: f.write('server_id={0}\n'.format(server_id)) def unregister(silent=False): url = None try: server_id = server_id_store.get_serverid() if server_id is None: if not silent: logerror('Error unregistering server: cannot find server id') return url = REGISTRATION_API_URL + '/unregister_server.plain?server_id={0}'.format(server_id) response = urlopen(url) content = nstr(response.read()) res = data_as_dict(content) if res['success'] == 'true': rm_serverid() if not silent: loginfo('Server was unregistered') elif not silent: logerror(content) logerror('Error unregistering server: ' + res['message']) except HTTPError as e: if not silent: print_cln_http_error(e, url) def register_retry(url): # pragma: no cover unit print('Register auto-retry has been enabled, the system can be registered later') pid = os.fork() if pid > 0: return os.setsid() pid = os.fork() import sys if pid > 0: sys.exit(0) sys.stdout.flush() si = open('/dev/null', 'r') so = open('/dev/null', 'a+') os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(so.fileno(), sys.stderr.fileno()) while True: time.sleep(60 * 60 * 2) code, server_id = try_register(url) if code == 0 and server_id: set_server_id(server_id) sys.exit(0) def tag_server(tag): """ Request to tag server from ePortal. See KCARE-947 for more info :param tag: String used to tag the server :return: 0 on success, -1 on wrong server id, other values otherwise """ url = None try: # TODO: is it ok to send request in case when no server_id found? (machine is not registered in ePortal) server_id = server_id_store.get_serverid() query = urlencode([('server_id', server_id), ('tag', tag)]) url = REGISTRATION_API_URL + '/tag_server.plain?{0}'.format(query) response = urlopen(url) res = data_as_dict(nstr(response.read())) return int(res['code']) except HTTPError as e: print_cln_http_error(e, url) return -3 except URLError as ue: print_cln_http_error(ue, url) return -4 except Exception as ee: logerror('Internal Error {0}'.format(ee)) return -5 def try_register(url): try: response = urlopen(url) res = data_as_dict(nstr(response.read())) return int(res['code']), res.get('server_id') except (HTTPError, URLError) as e: print_cln_http_error(e, url) return None, None except Exception: return None, None def get_hostname(): # KCARE-1165 If fqdn gathering is forced if REPORT_FQDN: # getaddrinfo() -> [(family, socktype, proto, canonname, sockaddr), ...] hostname = socket.getaddrinfo(socket.gethostname(), 0, 0, 0, 0, socket.AI_CANONNAME)[0][3] else: hostname = platform.node() return hostname def register(key, retry=False): # TODO: save key try: unregister(True) except Exception: pass hostname = get_hostname() query = urlencode([('hostname', hostname), ('key', key)]) url = '{0}/register_server.plain?{1}'.format(REGISTRATION_API_URL, query) code, server_id = try_register(url) if code == 0: set_server_id(server_id) loginfo('Server Registered') return 0 elif code == 1: logerror('Account Locked') elif code == 2: logerror('Invalid Key') elif code == 3: logerror('You have reached maximum registered servers for this key. ' 'Please go to your CLN account, remove unused servers and try again.') elif code == 4: logerror('IP is not allowed. Please change allowed IP ranges for the key in KernelCare Key tab in CLN') elif code == 5: logerror('This IP was already used for trial, you cannot use it for trial again') elif code == 6: logerror( 'This IP was banned. Please contact support for more information at https://www.kernelcare.com/support/') else: logerror('Unknown Error {0}'.format(code)) if retry: # pragma: no cover # TODO: save key to use it in case of problems and remove this register_retry(url) return 0 return code or -1 def kcdoctor(): doctor_url = 'https://www.cloudlinux.com/clinfo/kcdoctor.sh' doctor_filename = KCDOCTOR with tempfile.NamedTemporaryFile() as doctor_dst: try: signature = fetch_signature(doctor_url, doctor_dst.name) save_to_file(urlopen(doctor_url), doctor_dst.name) check_gpg_signature(doctor_dst.name, signature) doctor_filename = doctor_dst.name except (socket.error, HTTPError, URLError) as err: logerror('Kcare doctor download error: {0}. Fallback to the local one.'.format(err)) code, _, stderr = run_command(['bash', doctor_filename], catch_stderr=True) if code: raise KcareError("Script failed with '{0}' {1}".format(stderr, code)) class ServerIdStore: def __init__(self): self._server_id = None def _systemid(self): if not os.path.exists(SYSTEMID): return None cp = ConfigParser() config = FakeSecHead(open(SYSTEMID)) if PY2: # pragma: no py3 cover cp.readfp(config) else: # pragma: no py2 cover cp.read_file(config) return cp.get('asection', 'server_id') def _im360(self): if not os.path.exists(IM360_LICENSE_FILE): return None data = {} with open(IM360_LICENSE_FILE) as f: content = f.read() if content: try: data = json.loads(content) except Exception: pass # we are not interested why lic file can't be parsed return data.get('id') def get_serverid(self): """Get server_id or None if not present. Lookup order: SYSTEMID then IM360_LICENSE_FILE """ if not self._server_id: self._server_id = self._systemid() or self._im360() return self._server_id server_id_store = ServerIdStore() KHASH = get_kernel_hash() def get_kernel_url(khash, *parts): return get_patch_server_url(TEST_PREFIX, khash, *parts) def get_patch_server_url(*parts): return '/'.join(it.strip('/') for it in filter(None, (PATCH_SERVER,) + parts)) def check_new_kc_version(): url = get_patch_server_url('{0}-new-version'.format(EFFECTIVE_LATEST)) try: urlopen(url) except URLError: return False loginfo('A new version of the KernelCare package is available. ' 'To continue to get kernel updates, please install the new version') return True def get_latest_patch_level(reason, policy=POLICY_REMOTE): # mocked: py/kcarectl_tests/test_patch_level_info.py """ Get patch level to apply. :param reason: what was the source of request (update, info etc.) :param policy: REMOTE -- get latest patch_level from patchserver, LOCAL -- use cached latest, LOCAL_FIRST -- if cached level is None get latest from patchserver, use cache otherwise :return: patch_level string """ cached_level = get_cache_latest(KHASH) consider_remote_ex = policy == POLICY_REMOTE or (policy == POLICY_LOCAL_FIRST and cached_level is None) try: remote_level = fetch_patch_level(reason) except Exception as ex: if consider_remote_ex: raise else: kcarelog.warning('Unable to send data: {0}'.format(ex)) if policy == POLICY_REMOTE: level = remote_level else: level = cached_level if cached_level is None: if policy == POLICY_LOCAL: level = '0' elif policy == POLICY_LOCAL_FIRST: level = remote_level else: raise KcareError('Unknown policy, choose one of: REMOTE, LOCAL, LOCAL_FIRST') return level def update_patch_type(ptype): global PATCH_TYPE if ptype == 'edf': # The only way user can get here if call kcarectl --set-patch-type # we don't support this anyway and can silently ignore return if ptype != 'default': PATCH_TYPE = ptype else: PATCH_TYPE = '' if probe_patch(KHASH, fetch_patch_level(reason='probe'), PATCH_TYPE): update_config('PATCH_TYPE', PATCH_TYPE) if PATCH_TYPE == 'free' and is_cpanel(): gid = FORCE_GID or CPANEL_GID edit_sysctl_conf( ('fs.enforce_symlinksifowner', 'fs.symlinkown_gid',), ('fs.enforce_symlinksifowner=1', 'fs.symlinkown_gid={0}'.format(gid),) ) loginfo("'{0}' patch type selected".format(ptype)) else: raise KcareError("'{0}' patch type is unavailable for your kernel".format(ptype)) def do_update(freezer, mode, policy=POLICY_REMOTE): """ :param mode: UPDATE_MODE_MANUAL, UPDATE_MODE_AUTO or UPDATE_MODE_SMART :param policy: REMOTE -- download latest and patches from patchserver, LOCAL -- use cached files, LOCAL_FIRST -- download latest and patches if cached level is None, use cache in other cases :param freezer: freezer mode """ if policy == POLICY_REMOTE: check_new_kc_version() try: level = get_latest_patch_level(reason='update', policy=policy) except UnknownKernelException as e: if mode == UPDATE_MODE_AUTO and IGNORE_UNKNOWN_KERNEL: msg = str(e) kcarelog.warning(msg) return raise current_level = loaded_patch_level() pf = PatchFetcher(KHASH, level) pf.fetch_patch() if not kcare_need_update(applied_level=current_level, new_level=level): loginfo('No updates are needed for this kernel') return # take into account AUTO_UPDATE config setting in case of `--auto-update` cli option if mode != UPDATE_MODE_AUTO or AUTO_UPDATE: with execute_hooks(): pf.fetch_fixups(current_level) try: kcare_load(KHASH, level, mode, freezer, use_anchor=mode == UPDATE_MODE_SMART) finally: # Rotate crash report dumps clean_directory('/var/cache/kcare/dumps/', keep_n=3, pattern="kcore*.dump") clean_directory('/var/cache/kcare/dumps/', keep_n=3, pattern="kmsg*.log") clear_cache(KHASH, level) """ This is needed to support sticky keys as per https://cloudlinux.atlassian.net/browse/KCARE-953 """ STICKY = False def _stickyfy(prefix, fname): return prefix + '.' + fname def stickyfy(file): """ Used to add sticky prefix to satisfy KCARE-953 :param file: name of the file to stickify :return: stickified file. """ if STICKY: if STICKY != 'KEY': return _stickyfy(STICKY, file) try: server_id = server_id_store.get_serverid() if server_id: response = urlopen(REGISTRATION_API_URL + '/sticky_patch.plain?server_id={0}'.format(server_id)) res = data_as_dict(nstr(response.read())) code = int(res['code']) if code == 0: return _stickyfy(res['prefix'], file) if code == 1: return file if code == 2: kcarelog.info('Server ID is not recognized. Please check if the server is registered') sys.exit(-1) kcarelog.info('Error: ' + res['message']) sys.exit(-3) else: kcarelog.info('Patch set to STICKY=KEY, but server is not registered with the key') sys.exit(-4) except HTTPError as e: print_cln_http_error(e, e.url) sys.exit(-5) return file def initialize_logging(level): # pragma: no cover syslog_initialized = False if os.path.exists('/dev/log'): formatter = logging.Formatter('kcare %(levelname)s: %(message)s') try: handler = logging.handlers.SysLogHandler(address='/dev/log', facility=logging.handlers.SysLogHandler.LOG_USER) handler.setLevel(logging.INFO) handler.setFormatter(formatter) syslog_initialized = True except Exception: pass if not syslog_initialized: formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s') handler = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes=1024 ** 2, backupCount=2) handler.setLevel(level) handler.setFormatter(formatter) kcarelog.addHandler(handler) ################################# # from python 2.7.17 ssl stdlib # ################################# def _dnsname_match(dn, hostname, max_wildcards=1): # pragma: no cover """Matching according to RFC 6125, section 6.4.3 http://tools.ietf.org/html/rfc6125#section-6.4.3 """ pats = [] if not dn: return False pieces = dn.split(r'.') leftmost = pieces[0] remainder = pieces[1:] wildcards = leftmost.count('*') if wildcards > max_wildcards: # Issue #17980: avoid denials of service by refusing more # than one wildcard per fragment. A survery of established # policy among SSL implementations showed it to be a # reasonable choice. raise CertificateError('too many wildcards in certificate DNS name: ' + repr(dn)) # speed up common case w/o wildcards if not wildcards: return dn.lower() == hostname.lower() # RFC 6125, section 6.4.3, subitem 1. # The client SHOULD NOT attempt to match a presented identifier in which # the wildcard character comprises a label other than the left-most label. if leftmost == '*': # When '*' is a fragment by itself, it matches a non-empty dotless # fragment. pats.append('[^.]+') elif leftmost.startswith('xn--') or hostname.startswith('xn--'): # RFC 6125, section 6.4.3, subitem 3. # The client SHOULD NOT attempt to match a presented identifier # where the wildcard character is embedded within an A-label or # U-label of an internationalized domain name. pats.append(re.escape(leftmost)) else: # Otherwise, '*' matches any dotless string, e.g. www* pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) # add the remaining fragments, ignore any wildcards for frag in remainder: pats.append(re.escape(frag)) pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) return pat.match(hostname) # match_hostname tweaked to get dns names from pyopenssl x509 cert object def match_hostname(cert, hostname): # pragma: no cover san = [] for i in range(cert.get_extension_count()): e = cert.get_extension(i) if e.get_short_name() == 'subjectAltName': san = [it.strip().split(':', 1) for it in str(e).split(',')] if not cert: raise ValueError('empty or no certificate, match_hostname needs a ' 'SSL socket or SSL context with either ' 'CERT_OPTIONAL or CERT_REQUIRED') dnsnames = [] for key, value in san: if key == 'DNS': if _dnsname_match(value, hostname): return dnsnames.append(value) if not dnsnames: # The subject is only checked when there is no dNSName entry # in subjectAltName cn = cert.get_subject().commonName if _dnsname_match(cn, hostname): return dnsnames.append(value) if len(dnsnames) > 1: raise CertificateError( "hostname {0} doesn't match either of {1}".format(hostname, ', '.join(map(repr, dnsnames)))) elif len(dnsnames) == 1: raise CertificateError("hostname {0} doesn't match {1}".format(hostname, dnsnames[0])) else: raise CertificateError('no appropriate commonName or ' 'subjectAltName fields were found') ##################### # end of ssl stdlib # ##################### # --- {{{ libcare --- def get_userspace_cache_path(libname, *parts): return os.path.join(PATCH_CACHE, 'userspace', libname, *parts) LIBNAME_MAP = { 'mysqld': 'db', 'mariadbd': 'db', 'qemu-kvm': 'qemu', 'qemu-system-x86_64': 'qemu' } USERSPACE_MAP = { 'db': ['mysqld', 'mariadbd'], 'qemu': ['qemu-kvm', 'qemu-system-x86_64'], 'libs': ['libc', 'libssl'], } def fetch_userspace_patch(libname, build_id): prefix = TEST_PREFIX or 'main' libname = urlquote(libname) url = get_patch_server_url(LIBNAME_MAP.get(libname, 'u'), prefix, libname, build_id, 'latest.v1') try: response = urlopen_auth(url, check_license=False) except NotFound: # There is no latest info, so we need to clear cache for corresponding # build_id to prevent updates by "-ctl" utility. shutil.rmtree(get_userspace_cache_path('all', build_id), ignore_errors=True) raise meta = json.loads(nstr(response.read())) plevel = str(meta['level']) patch_path = get_userspace_cache_path('all', build_id, plevel, 'patch.tar.gz') if not os.path.exists(patch_path): url = get_patch_server_url(meta['patch_url']) try: fetch_url(url, patch_path, check_signature=USE_SIGNATURE) except HTTPError as ex: # No license - no access if ex.code in (403, 401): raise NoLibcareLicenseException('KC+ licence is required') raise dst = get_userspace_cache_path('all', build_id, plevel) cmd = ['tar', 'xf', patch_path, '-C', dst, '--no-same-owner'] code, stdout, stderr = run_command(cmd, catch_stdout=True, catch_stderr=True) if code: raise KcareError("Patches unpacking error: '{0}' '{1}' {2}".format(stderr, stdout, code)) link_name = get_userspace_cache_path('all', build_id, 'latest') if not os.path.islink(link_name) and os.path.isdir(link_name): shutil.rmtree(link_name) os.symlink(plevel, link_name + '.tmp') os.rename(link_name + '.tmp', link_name) def unsupported_if_not_found(clbl): def wrapper(*args, **kwargs): try: return clbl(*args, **kwargs) except OSError as err: if err.errno == errno.ENOENT: raise KcareError('Unsupported Linux distribution') raise # pragma: no cover unit return wrapper @unsupported_if_not_found def _libcare_info(): code, info, stderr = run_command([LIBCARE_CLIENT, 'info', '-j'], catch_stdout=True, catch_stderr=True) if code: raise KcareError("Gathering userspace libraries info error: '{0}' {1}".format(stderr, code)) result = [] for line in info.split('\n'): if line: try: result.append(json.loads(line)) except ValueError: # We have to do that because socket's output isn't separated to stderr and stdout # so there are chances that will be non-json lines pass # FIXME: remove that libe when library names will be separated to lower # level from process name and pid result = [{'comm': line.pop('comm'), 'pid': line.pop('pid'), 'libs': line} for line in result] # Filter libs whithout patchlvl for line in result: line['libs'] = dict((k, v) for k, v in line['libs'].items() if 'patchlvl' in v) return result def _libcare_patch_info(): info = _libcare_info() patches = set() for rec in info: for _, data in rec['libs'].items(): patches.add((data['buildid'], data['patchlvl'])) result = [] for build_id, patchlvl in patches: patch_info_filename = get_userspace_cache_path('all', build_id, str(patchlvl), 'info.json') if os.path.isfile(patch_info_filename): with open(patch_info_filename, 'r') as fd: result.append(json.load(fd)) return result def libcare_patch_info(): result = _libcare_patch_info() if not result: logerror("No patched processes.") return json.dumps({'result': result}) def libcare_info(): result = _libcare_info() if not result: logerror("No patched processes.") return json.dumps({'result': result}) def _libcare_version(): result = {} for rec in _libcare_patch_info(): result[rec.get('package')] = rec.get('latest-version', '') return result def libcare_version(libname): for package, version in _libcare_version().items(): if libname.startswith(package): return version return '' @unsupported_if_not_found def libcare_patch_all(): cmd = [LIBCARE_CLIENT, 'update'] code, stdout, stderr = run_command(cmd, catch_stdout=True, catch_stderr=True) if code: raise KcareError("Userspace patch applying error: '{0}' '{1}' {2}".format(stdout, stderr, code)) def refresh_applied_patches_list(clbl): def save_current_state(): ''' KPT-1543 Save info about applyed patches ''' content = '' try: content = '\n'.join([' '.join(rec) for rec in _libcare_version().items()]) finally: atomic_write(LIBCARE_PATCHES, content) def wrapper(*args, **kwargs): try: return clbl(*args, **kwargs) finally: save_current_state() return wrapper @refresh_applied_patches_list @unsupported_if_not_found def libcare_unload(): cmd = [LIBCARE_CLIENT, 'unload'] code, stdout, stderr = run_command(cmd, catch_stdout=True, catch_stderr=True) if code: raise KcareError("Userspace patch unloading error: '{0}' '{1}' {2}".format(stdout, stderr, code)) @unsupported_if_not_found def get_userspace_processes(patched=False, limit=None): """ Gather build snapshots for current userspace packages """ regexp = '|'.join("({0})".format(proc) for proc in limit or []) cmd = [LIBCARE_CLIENT, 'info', '-j'] if not patched: cmd += ['-l', '-r', regexp] code, info, stderr = run_command(cmd, catch_stdout=True, catch_stderr=True) if code: raise KcareError("Gathering userspace libraries info error: '{0}' {1}".format(stderr, code)) data = {} for line in info.split('\n'): if line: try: line_data = json.loads(line) except ValueError: # We have to do that because socket's output isn't separated to stderr and stdout # so there are chances that will be non-json lines continue pid = line_data.pop('pid') comm = line_data.pop('comm') process = tuple([pid, comm]) for libname, buildid in line_data.items(): lib = tuple([libname, buildid['buildid']]) if lib not in data: data[lib] = [] data[lib].append(process) return data def is_selinux_enabled(): try: code, _, _ = run_command(['/usr/sbin/selinuxenabled']) except OSError as err: if err.errno == errno.ENOENT: return False raise # pragma: no cover unit return code == 0 def is_selinux_module_present(semodule_name): code, out, err = run_command(['/usr/sbin/semodule', '-l'], catch_stdout=True) if code: raise KcareError("SELinux modules list gathering error: '{0}' {1}".format(err, code)) for line in out.split('\n'): if semodule_name in line: return True return False def skip_if_no_selinux_module(clbl): def wrapper(*args, **kwargs): if is_selinux_enabled() and not is_selinux_module_present('libcare'): raise KcareError('SELinux is enabled and kernelcare-selinux is not installed.') return clbl(*args, **kwargs) return wrapper @refresh_applied_patches_list @skip_if_no_selinux_module def do_userspace_update(mode=UPDATE_MODE_MANUAL, limit=None): """ Patch userspace processes to the latest version. """ # Auto-update means cron-initiated run and if no # LIB_AUTO_UPDATE flag in the config - nothing will happen. if mode == UPDATE_MODE_AUTO and not LIB_AUTO_UPDATE: return if limit is None: limit = list(USERSPACE_MAP.keys()) process_filter = [] for userspace_patch in limit: process_filter.extend(USERSPACE_MAP.get(userspace_patch, [])) if not process_filter: loginfo('No such userspace patches: {0}'.format(limit)) return False data = get_userspace_processes(limit=process_filter) failed = something_found = False for lib in data: # Download and unpack patches libname, build_id = lib try: fetch_userspace_patch(libname, build_id) something_found = True except NotFound: # There is no patch for that lib pass except NoLibcareLicenseException: pass except AlreadyTrialedException: raise except KcareError as ex: failed = True logerror(str(ex)) if failed: raise KcareError('There was an errors while patches downloading (unpacking).') if not something_found: loginfo('No patches were found.') return False try: # Batch apply for all collected patches libcare_patch_all() # TODO: clear userspace cache. We need the same logic as for kernel, lets do # it later to reduce this patch size. except KcareError as ex: logerror(str(ex)) raise KcareError('There was an errors while patches applying.') # KPT-1369 Show more detailed information about what was done result = {} for rec in _libcare_info(): for lib, libdata in rec.get('libs', {}).items(): result[lib] = result.get(lib, 0) + 1 if not result: # No patches were applyed return False for k, v in result.items(): loginfo("Shared library `{0}` was patched for {1} processes.".format(k, v)) return True def libcare_server_started(): """Assume that whenever the service is not running, we did not patch anything.""" if os.path.exists('/sbin/service'): # pragma: no cover cmd = '/sbin/service' elif os.path.exists('/usr/sbin/service'): # pragma: no cover cmd = '/usr/sbin/service' else: # pragma: no cover return False code, _, _ = run_command([cmd, 'libcare', 'status'], catch_stdout=True, catch_stderr=True) return code == 0 # --- end libcare }}} --- def main(): parser = ArgumentParser(description='Manage KernelCare patches for your kernel') parser.add_argument('-i', '--info', help='Display information about KernelCare. Use with --json parameter to get result in JSON format.', action='store_true') parser.add_argument('-u', '--update', help='Download latest patches and apply them to the current kernel', action='store_true') parser.add_argument('--unload', help='Unload patches', action='store_true') parser.add_argument('--smart-update', help='Patch kernel based on UPDATE POLICY settings', action='store_true') parser.add_argument('--auto-update', help='Check if update is available, if so -- update', action='store_true') parser.add_argument('--local', help='Update from a server local directory; accepts a path where patches are located', metavar='PATH') parser.add_argument('--patch-info', help='Return the list of applied patches', action='store_true') parser.add_argument('--freezer', help='Freezer type: full (default), smart, none', metavar='freezer') parser.add_argument('--nofreeze', help="[deprecated] Don't freeze tasks before patching", action='store_true') parser.add_argument('--force', help='[deprecated] When used with update, ' 'forces applying the patch even if unable to freeze some threads', action='store_true') parser.add_argument('--uname', help='Return safe kernel version', action='store_true') parser.add_argument('--license-info', help='Return current license info', action='store_true') parser.add_argument('--import-key', help='Import gpg key', metavar='PATH') parser.add_argument('--register', help='Register using KernelCare Key', metavar='KEY') parser.add_argument('--register-autoretry', help='Retry registering indefinitely if failed on the first attempt', action='store_true') parser.add_argument('--unregister', help='Unregister from KernelCare (for key-based servers only)', action='store_true') parser.add_argument('--check', help='Check if new update available', action='store_true') parser.add_argument('--latest-patch-info', help='Return patch info for the latest available patch. ' 'Use with --json parameter to get result in JSON format.', action='store_true') parser.add_argument('--test', help='[deprecated] Use --prefix=test instead', action='store_true') parser.add_argument('--tag', help='Tag server with custom metadata, for ePortal users only', metavar='TAG') parser.add_argument('--prefix', help='Patch source prefix used to test different builds ' 'by downloading builds from different locations based on prefix', metavar='PREFIX') parser.add_argument('--nosignature', help='Do not check signature', action='store_true') parser.add_argument('--set-monitoring-key', help='Set monitoring key for IP based licenses. 16 to 32 characters, alphanumeric only', metavar='KEY') parser.add_argument('--doctor', help='Submits a vitals report to CloudLinux for analysis and bug-fixes', action='store_true') parser.add_argument('--enable-auto-update', help='Enable auto updates', action='store_true') parser.add_argument('--disable-auto-update', help='Disable auto updates', action='store_true') parser.add_argument('--plugin-info', help='Provides the information shown in control panel plugins for KernelCare. ' 'Use with --json parameter to get result in JSON format.', action='store_true') parser.add_argument('--json', help="Return '--plugin-info', '--latest-patch-info', " "'--patch-info' and '--info' results in JSON format", action='store_true') parser.add_argument('--version', help='Return the current version of KernelCare', action='store_true') parser.add_argument('--kpatch-debug', help='Enable the debug mode', action='store_true') parser.add_argument('--no-check-cert', help='Disable the patch server SSL certificates checking', action='store_true') parser.add_argument('--set-patch-level', help='Set patch level to be applied. To select latest patch level set -1', action='store', type=int, default=None, required=False) parser.add_argument('--check-compatibility', help='Check compatibility.', action='store_true') exclusive_group = parser.add_mutually_exclusive_group() exclusive_group.add_argument('--set-patch-type', help="Set patch type feed. To select default feed use 'default' option", action='store') exclusive_group.add_argument('--edf-enabled', help='Enable exploit detection framework', action='store_true') exclusive_group.add_argument('--edf-disabled', help='Disable exploit detection framework', action='store_true') parser.add_argument('--set-sticky-patch', help='Set patch to stick to date in DDMMYY format, ' 'or retrieve it from KEY if set to KEY. ' 'Leave empty to unstick', action='store', default=None, required=False) parser.add_argument('-q', '--quiet', help='Suppress messages, provide only errors and warnings to stderr', action='store_true', required=False) parser.add_argument('--has-flags', help='Check agent features') if not LIBCARE_DISABLED: parser.add_argument('--lib-update', help='Download latest patches and apply them to the current userspace libraries', action='store_true') parser.add_argument('--lib-unload', '--userspace-unload', help='Unload userspace patches', action='store_true') parser.add_argument('--lib-auto-update', help='Check if update is available, if so -- update', action='store_true') parser.add_argument('--lib-info', '--userspace-info', help='Display information about KernelCare+.', action='store_true') parser.add_argument('--lib-patch-info', '--userspace-patch-info', help='Return the list of applied userspace patches', action='store_true') parser.add_argument('--lib-version', '--userspace-version', help='Return safe package version', metavar='PACKAGENAME') parser.add_argument('--userspace-update', metavar='USERSPACE_PATCHES', nargs='?', const="", help='Download latest patches and apply them to the corresponding userspace processes') parser.add_argument('--userspace-auto-update', help='Download latest patches and apply them to the corresponding userspace processes', action='store_true') args = parser.parse_args() if args.has_flags is not None: if set(filter(None, args.has_flags.split(','))).issubset(FLAGS): return 0 else: return 1 globals().update(get_config_settings()) global PATCH_TYPE # do not remove args.auto_update! # once added to machine, kcare-cron is never changed by package update; # old clients has no -q option in their cron, # so auto_update default silent mode must be saved forever if args.quiet or args.auto_update: global PRINT_LEVEL if SILENCE_ERRORS: PRINT_LEVEL = PRINT_CRITICAL else: PRINT_LEVEL = PRINT_ERROR if not args.uname: if os.getuid() != 0: print('Please run as root', file=sys.stderr) return 1 # should be after root role check to create a log file with correct rights initialize_logging(logging.WARNING if args.quiet else logging.INFO) if args.set_patch_level: global LEVEL if args.set_patch_level >= 0: LEVEL = str(args.set_patch_level) update_config('PATCH_LEVEL', LEVEL) else: LEVEL = None update_config('PATCH_LEVEL', '') if args.set_sticky_patch is not None: update_config('STICKY_PATCH', args.set_sticky_patch) global STICKY STICKY = args.set_sticky_patch if args.nosignature: global USE_SIGNATURE USE_SIGNATURE = False if args.no_check_cert: global CHECK_SSL_CERTS CHECK_SSL_CERTS = False if args.kpatch_debug: global KPATCH_DEBUG KPATCH_DEBUG = True if args.check_compatibility: check_compatibility() # EDF do nothing if args.edf_enabled: warnings.warn('Flag --edf-enabled has been deprecated and will be not ' 'available in future releases.', DeprecationWarning) elif args.edf_disabled: if PATCH_TYPE == 'edf': args.set_patch_type = ('' if PREV_PATCH_TYPE == 'edf' else PREV_PATCH_TYPE) or 'default' args.update = True global TEST_PREFIX if args.prefix: TEST_PREFIX = args.prefix if args.test: warnings.warn('Flag --test has been deprecated and will be not ' 'available in future releases.', DeprecationWarning) TEST_PREFIX = 'test' TEST_PREFIX = TEST_PREFIX.strip('/') if TEST_PREFIX and TEST_PREFIX not in EXPECTED_PREFIX: kcarelog.warning('Prefix `{0}` is not in expected one {1}.'.format( TEST_PREFIX, ' '.join(EXPECTED_PREFIX))) if args.local: global UPDATE_FROM_LOCAL UPDATE_FROM_LOCAL = True global PATCH_SERVER PATCH_SERVER = 'file:' + args.local if args.set_patch_type: update_patch_type(args.set_patch_type) if PATCH_TYPE == 'edf': PATCH_TYPE = edf_fallback_ptype() warnings.warn( 'edf patches are deprecated. Fallback to {0}'.format( PATCH_TYPE or 'default'), DeprecationWarning) apply_ptype(PATCH_TYPE) if args.doctor: kcdoctor() return if args.plugin_info: if args.json: plugin_info(fmt='json') else: plugin_info() return if args.enable_auto_update: update_config('AUTO_UPDATE', 'YES') return if args.disable_auto_update: update_config('AUTO_UPDATE', 'NO') return if args.set_monitoring_key: return set_monitoring_key_for_ip_license(args.set_monitoring_key) if args.unregister: unregister() if args.register: if PATCH_TYPE == 'free': update_config('PATCH_TYPE', 'extra') return register(args.register, args.register_autoretry) if args.license_info: # license_info returns zero if no valid license found and non-zero otherwise if license_info() != 0: return 0 else: return 1 if args.tag is not None: return tag_server(args.tag) if args.version: print(VERSION) if not LIBCARE_DISABLED: if args.lib_update: if do_userspace_update(): loginfo('Userspace patches are applied.') if args.lib_auto_update: do_userspace_update(mode=UPDATE_MODE_AUTO) elif args.lib_unload: libcare_unload() loginfo('Userspace patches are unloaded.') if args.lib_info: print(libcare_info()) if args.lib_patch_info: print(libcare_patch_info()) if args.lib_version and libcare_server_started(): print(libcare_version(args.lib_version)) if args.userspace_update is not None: if args.userspace_update == '': # Get from config or defaults limit = USERSPACE_PATCHES or list(USERSPACE_MAP.keys()) else: limit = [ptch.strip().lower() for ptch in args.userspace_update.split(',')] if do_userspace_update(limit=sorted(limit)): loginfo('Userspace patches are applied.') if args.userspace_auto_update: do_userspace_update(mode=UPDATE_MODE_AUTO, limit=None) if args.info: print(kcare_info(is_json=args.json)) freezer = '' if args.force or args.nofreeze: warnings.warn('Flags --force and --nofreeze have been deprecated and will be not ' 'available in future releases.', DeprecationWarning) freezer = 'none' if args.freezer: freezer = args.freezer if args.smart_update: do_update(freezer, mode=UPDATE_MODE_SMART, policy=UPDATE_POLICY) if args.update: do_update(freezer, mode=UPDATE_MODE_MANUAL) loginfo('Kernel is safe') if args.uname: print(kcare_uname()) if args.unload: kcare_unload(freezer) loginfo('KernelCare protection disabled. Your kernel might not be safe') if args.auto_update: global CHECK_CLN_LICENSE_STATUS CHECK_CLN_LICENSE_STATUS = False # wait to prevent spikes at the beginning of each minute KPT-1874 time.sleep(random.randint(0, 30)) do_update(freezer, mode=UPDATE_MODE_AUTO) if args.patch_info: patch_info(is_json=args.json) if args.latest_patch_info: kcare_latest_patch_info(is_json=args.json) if args.import_key: import_gpg_key(args.import_key) if args.check: kcare_check() if __name__ == '__main__': # pragma: no cover unit try: sys.exit(main()) except URLError as err: logerror('{0}: {1}'.format(err, getattr(err, 'url', 'unknown'))) except KcareError as err: logerror(str(err)) sys.exit(1) except Exception as err: logexc(err)