New version of salt-formula from Saltstack
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1785 lines
47KB

  1. # -*- coding: utf-8 -*-
  2. '''
  3. Work with virtual machines managed by libvirt
  4. :depends: libvirt Python module
  5. '''
  6. # Special Thanks to Michael Dehann, many of the concepts, and a few structures
  7. # of his in the virt func module have been used
  8. # Import python libs
  9. from __future__ import absolute_import
  10. import os
  11. import re
  12. import sys
  13. import shutil
  14. import subprocess
  15. import string # pylint: disable=deprecated-module
  16. import logging
  17. # Import third party libs
  18. import yaml
  19. import jinja2
  20. import jinja2.exceptions
  21. import salt.ext.six as six
  22. from salt.ext.six.moves import StringIO as _StringIO # pylint: disable=import-error
  23. from xml.dom import minidom
  24. try:
  25. import libvirt # pylint: disable=import-error
  26. HAS_ALL_IMPORTS = True
  27. except ImportError:
  28. HAS_ALL_IMPORTS = False
  29. # Import salt libs
  30. import salt.utils
  31. import salt.utils.files
  32. import salt.utils.templates
  33. import salt.utils.validate.net
  34. from salt.exceptions import CommandExecutionError, SaltInvocationError
  35. log = logging.getLogger(__name__)
  36. # Set up template environment
  37. JINJA = jinja2.Environment(
  38. loader=jinja2.FileSystemLoader(
  39. os.path.join(salt.utils.templates.TEMPLATE_DIRNAME, 'virt')
  40. )
  41. )
  42. VIRT_STATE_NAME_MAP = {0: 'running',
  43. 1: 'running',
  44. 2: 'running',
  45. 3: 'paused',
  46. 4: 'shutdown',
  47. 5: 'shutdown',
  48. 6: 'crashed'}
  49. VIRT_DEFAULT_HYPER = 'kvm'
  50. def __virtual__():
  51. if not HAS_ALL_IMPORTS:
  52. return False
  53. return 'virt'
  54. def __get_conn():
  55. '''
  56. Detects what type of dom this node is and attempts to connect to the
  57. correct hypervisor via libvirt.
  58. '''
  59. # This has only been tested on kvm and xen, it needs to be expanded to
  60. # support all vm layers supported by libvirt
  61. def __esxi_uri():
  62. '''
  63. Connect to an ESXi host with a configuration like so:
  64. .. code-block:: yaml
  65. libvirt:
  66. hypervisor: esxi
  67. connection: esx01
  68. The connection setting can either be an explicit libvirt URI,
  69. or a libvirt URI alias as in this example. No, it cannot be
  70. just a hostname.
  71. Example libvirt `/etc/libvirt/libvirt.conf`:
  72. .. code-block::
  73. uri_aliases = [
  74. "esx01=esx://10.1.1.101/?no_verify=1&auto_answer=1",
  75. "esx02=esx://10.1.1.102/?no_verify=1&auto_answer=1",
  76. ]
  77. Reference:
  78. - http://libvirt.org/drvesx.html#uriformat
  79. - http://libvirt.org/uri.html#URI_config
  80. '''
  81. connection = __salt__['config.get']('libvirt:connection', 'esx')
  82. return connection
  83. def __esxi_auth():
  84. '''
  85. We rely on that the credentials is provided to libvirt through
  86. its built in mechanisms.
  87. Example libvirt `/etc/libvirt/auth.conf`:
  88. .. code-block::
  89. [credentials-myvirt]
  90. username=user
  91. password=secret
  92. [auth-esx-10.1.1.101]
  93. credentials=myvirt
  94. [auth-esx-10.1.1.102]
  95. credentials=myvirt
  96. Reference:
  97. - http://libvirt.org/auth.html#Auth_client_config
  98. '''
  99. return [[libvirt.VIR_CRED_EXTERNAL], lambda: 0, None]
  100. if 'virt.connect' in __opts__:
  101. conn_str = __opts__['virt.connect']
  102. else:
  103. conn_str = 'qemu:///system'
  104. conn_func = {
  105. 'esxi': [libvirt.openAuth, [__esxi_uri(),
  106. __esxi_auth(),
  107. 0]],
  108. 'qemu': [libvirt.open, [conn_str]],
  109. }
  110. hypervisor = __salt__['config.get']('libvirt:hypervisor', 'qemu')
  111. try:
  112. conn = conn_func[hypervisor][0](*conn_func[hypervisor][1])
  113. except Exception:
  114. raise CommandExecutionError(
  115. 'Sorry, {0} failed to open a connection to the hypervisor '
  116. 'software at {1}'.format(
  117. __grains__['fqdn'],
  118. conn_func[hypervisor][1][0]
  119. )
  120. )
  121. return conn
  122. def _get_dom(vm_):
  123. '''
  124. Return a domain object for the named vm
  125. '''
  126. conn = __get_conn()
  127. if vm_ not in list_vms():
  128. raise CommandExecutionError('The specified vm is not present')
  129. return conn.lookupByName(vm_)
  130. def _libvirt_creds():
  131. '''
  132. Returns the user and group that the disk images should be owned by
  133. '''
  134. g_cmd = 'grep ^\\s*group /etc/libvirt/qemu.conf'
  135. u_cmd = 'grep ^\\s*user /etc/libvirt/qemu.conf'
  136. try:
  137. group = subprocess.Popen(g_cmd,
  138. shell=True,
  139. stdout=subprocess.PIPE).communicate()[0].split('"')[1]
  140. except IndexError:
  141. group = 'root'
  142. try:
  143. user = subprocess.Popen(u_cmd,
  144. shell=True,
  145. stdout=subprocess.PIPE).communicate()[0].split('"')[1]
  146. except IndexError:
  147. user = 'root'
  148. return {'user': user, 'group': group}
  149. def _get_migrate_command():
  150. '''
  151. Returns the command shared by the different migration types
  152. '''
  153. if __salt__['config.option']('virt.tunnel'):
  154. return ('virsh migrate --p2p --tunnelled --live --persistent '
  155. '--undefinesource ')
  156. return 'virsh migrate --live --persistent --undefinesource '
  157. def _get_target(target, ssh):
  158. proto = 'qemu'
  159. if ssh:
  160. proto += '+ssh'
  161. return ' {0}://{1}/{2}'.format(proto, target, 'system')
  162. def _gen_xml(name,
  163. cpu,
  164. mem,
  165. diskp,
  166. nicp,
  167. hypervisor,
  168. **kwargs):
  169. '''
  170. Generate the XML string to define a libvirt vm
  171. '''
  172. hypervisor = 'vmware' if hypervisor == 'esxi' else hypervisor
  173. mem = mem * 1024 # MB
  174. context = {
  175. 'hypervisor': hypervisor,
  176. 'name': name,
  177. 'cpu': str(cpu),
  178. 'mem': str(mem),
  179. }
  180. if hypervisor in ['qemu', 'kvm']:
  181. context['controller_model'] = False
  182. elif hypervisor in ['esxi', 'vmware']:
  183. # TODO: make bus and model parameterized, this works for 64-bit Linux
  184. context['controller_model'] = 'lsilogic'
  185. if 'boot_dev' in kwargs:
  186. context['boot_dev'] = []
  187. for dev in kwargs['boot_dev'].split():
  188. context['boot_dev'].append(dev)
  189. else:
  190. context['boot_dev'] = ['hd']
  191. if 'serial_type' in kwargs:
  192. context['serial_type'] = kwargs['serial_type']
  193. if 'serial_type' in context and context['serial_type'] == 'tcp':
  194. if 'telnet_port' in kwargs:
  195. context['telnet_port'] = kwargs['telnet_port']
  196. else:
  197. context['telnet_port'] = 23023 # FIXME: use random unused port
  198. if 'serial_type' in context:
  199. if 'console' in kwargs:
  200. context['console'] = kwargs['console']
  201. else:
  202. context['console'] = True
  203. context['disks'] = {}
  204. for i, disk in enumerate(diskp):
  205. for disk_name, args in disk.items():
  206. context['disks'][disk_name] = {}
  207. fn_ = '{0}.{1}'.format(disk_name, args['format'])
  208. context['disks'][disk_name]['file_name'] = fn_
  209. context['disks'][disk_name]['source_file'] = os.path.join(args['pool'],
  210. name,
  211. fn_)
  212. if hypervisor in ['qemu', 'kvm']:
  213. context['disks'][disk_name]['target_dev'] = 'vd{0}'.format(string.ascii_lowercase[i])
  214. context['disks'][disk_name]['address'] = False
  215. context['disks'][disk_name]['driver'] = True
  216. elif hypervisor in ['esxi', 'vmware']:
  217. context['disks'][disk_name]['target_dev'] = 'sd{0}'.format(string.ascii_lowercase[i])
  218. context['disks'][disk_name]['address'] = True
  219. context['disks'][disk_name]['driver'] = False
  220. context['disks'][disk_name]['disk_bus'] = args['model']
  221. context['disks'][disk_name]['type'] = args['format']
  222. context['disks'][disk_name]['index'] = str(i)
  223. context['nics'] = nicp
  224. fn_ = 'libvirt_domain.jinja'
  225. try:
  226. template = JINJA.get_template(fn_)
  227. except jinja2.exceptions.TemplateNotFound:
  228. log.error('Could not load template {0}'.format(fn_))
  229. return ''
  230. return template.render(**context)
  231. def _gen_vol_xml(vmname,
  232. diskname,
  233. size,
  234. hypervisor,
  235. **kwargs):
  236. '''
  237. Generate the XML string to define a libvirt storage volume
  238. '''
  239. size = int(size) * 1024 # MB
  240. disk_info = _get_image_info(hypervisor, vmname, **kwargs)
  241. context = {
  242. 'name': vmname,
  243. 'filename': '{0}.{1}'.format(diskname, disk_info['disktype']),
  244. 'volname': diskname,
  245. 'disktype': disk_info['disktype'],
  246. 'size': str(size),
  247. 'pool': disk_info['pool'],
  248. }
  249. fn_ = 'libvirt_volume.jinja'
  250. try:
  251. template = JINJA.get_template(fn_)
  252. except jinja2.exceptions.TemplateNotFound:
  253. log.error('Could not load template {0}'.format(fn_))
  254. return ''
  255. return template.render(**context)
  256. def _qemu_image_info(path):
  257. '''
  258. Detect information for the image at path
  259. '''
  260. ret = {}
  261. out = __salt__['cmd.run']('qemu-img info {0}'.format(path))
  262. match_map = {'size': r'virtual size: \w+ \((\d+) byte[s]?\)',
  263. 'format': r'file format: (\w+)'}
  264. for info, search in match_map.items():
  265. try:
  266. ret[info] = re.search(search, out).group(1)
  267. except AttributeError:
  268. continue
  269. return ret
  270. # TODO: this function is deprecated, should be replaced with
  271. # _qemu_image_info()
  272. def _image_type(vda):
  273. '''
  274. Detect what driver needs to be used for the given image
  275. '''
  276. out = __salt__['cmd.run']('qemu-img info {0}'.format(vda))
  277. if 'file format: qcow2' in out:
  278. return 'qcow2'
  279. else:
  280. return 'raw'
  281. # TODO: this function is deprecated, should be merged and replaced
  282. # with _disk_profile()
  283. def _get_image_info(hypervisor, name, **kwargs):
  284. '''
  285. Determine disk image info, such as filename, image format and
  286. storage pool, based on which hypervisor is used
  287. '''
  288. ret = {}
  289. if hypervisor in ['esxi', 'vmware']:
  290. ret['disktype'] = 'vmdk'
  291. ret['filename'] = '{0}{1}'.format(name, '.vmdk')
  292. ret['pool'] = '[{0}] '.format(kwargs.get('pool', '0'))
  293. elif hypervisor in ['kvm', 'qemu']:
  294. ret['disktype'] = 'qcow2'
  295. ret['filename'] = '{0}{1}'.format(name, '.qcow2')
  296. ret['pool'] = __salt__['config.option']('virt.images')
  297. return ret
  298. def _disk_profile(profile, hypervisor, **kwargs):
  299. '''
  300. Gather the disk profile from the config or apply the default based
  301. on the active hypervisor
  302. This is the ``default`` profile for KVM/QEMU, which can be
  303. overridden in the configuration:
  304. .. code-block:: yaml
  305. virt:
  306. disk:
  307. default:
  308. - system:
  309. size: 8192
  310. format: qcow2
  311. model: virtio
  312. The ``format`` and ``model`` parameters are optional, and will
  313. default to whatever is best suitable for the active hypervisor.
  314. '''
  315. default = [
  316. {'system':
  317. {'size': '8192'}
  318. }
  319. ]
  320. if hypervisor in ['esxi', 'vmware']:
  321. overlay = {'format': 'vmdk',
  322. 'model': 'scsi',
  323. 'pool': '[{0}] '.format(kwargs.get('pool', '0'))
  324. }
  325. elif hypervisor in ['qemu', 'kvm']:
  326. overlay = {'format': 'qcow2',
  327. 'model': 'virtio',
  328. 'pool': __salt__['config.option']('virt.images')
  329. }
  330. else:
  331. overlay = {}
  332. disklist = __salt__['config.get']('virt:disk', {}).get(profile, default)
  333. for key, val in overlay.items():
  334. for i, disks in enumerate(disklist):
  335. for disk in disks:
  336. if key not in disks[disk]:
  337. disklist[i][disk][key] = val
  338. return disklist
  339. def _nic_profile(profile_name, hypervisor, **kwargs):
  340. default = [{'eth0': {}}]
  341. vmware_overlay = {'type': 'bridge', 'source': 'DEFAULT', 'model': 'e1000'}
  342. kvm_overlay = {'type': 'bridge', 'source': 'br0', 'model': 'virtio'}
  343. overlays = {
  344. 'kvm': kvm_overlay,
  345. 'qemu': kvm_overlay,
  346. 'esxi': vmware_overlay,
  347. 'vmware': vmware_overlay,
  348. }
  349. # support old location
  350. config_data = __salt__['config.option']('virt.nic', {}).get(
  351. profile_name, None
  352. )
  353. if config_data is None:
  354. config_data = __salt__['config.get']('virt:nic', {}).get(
  355. profile_name, default
  356. )
  357. interfaces = []
  358. def append_dict_profile_to_interface_list(profile_dict):
  359. for interface_name, attributes in profile_dict.items():
  360. attributes['name'] = interface_name
  361. interfaces.append(attributes)
  362. # old style dicts (top-level dicts)
  363. #
  364. # virt:
  365. # nic:
  366. # eth0:
  367. # bridge: br0
  368. # eth1:
  369. # network: test_net
  370. if isinstance(config_data, dict):
  371. append_dict_profile_to_interface_list(config_data)
  372. # new style lists (may contain dicts)
  373. #
  374. # virt:
  375. # nic:
  376. # - eth0:
  377. # bridge: br0
  378. # - eth1:
  379. # network: test_net
  380. #
  381. # virt:
  382. # nic:
  383. # - name: eth0
  384. # bridge: br0
  385. # - name: eth1
  386. # network: test_net
  387. elif isinstance(config_data, list):
  388. for interface in config_data:
  389. if isinstance(interface, dict):
  390. if len(interface) == 1:
  391. append_dict_profile_to_interface_list(interface)
  392. else:
  393. interfaces.append(interface)
  394. def _normalize_net_types(attributes):
  395. '''
  396. Guess which style of definition:
  397. bridge: br0
  398. or
  399. network: net0
  400. or
  401. type: network
  402. source: net0
  403. '''
  404. for type_ in ['bridge', 'network']:
  405. if type_ in attributes:
  406. attributes['type'] = type_
  407. # we want to discard the original key
  408. attributes['source'] = attributes.pop(type_)
  409. attributes['type'] = attributes.get('type', None)
  410. attributes['source'] = attributes.get('source', None)
  411. def _apply_default_overlay(attributes):
  412. for key, value in overlays[hypervisor].items():
  413. if key not in attributes or not attributes[key]:
  414. attributes[key] = value
  415. def _assign_mac(attributes):
  416. dmac = '{0}_mac'.format(attributes['name'])
  417. if dmac in kwargs:
  418. dmac = kwargs[dmac]
  419. if salt.utils.validate.net.mac(dmac):
  420. attributes['mac'] = dmac
  421. else:
  422. msg = 'Malformed MAC address: {0}'.format(dmac)
  423. raise CommandExecutionError(msg)
  424. else:
  425. attributes['mac'] = salt.utils.gen_mac()
  426. for interface in interfaces:
  427. _normalize_net_types(interface)
  428. _assign_mac(interface)
  429. if hypervisor in overlays:
  430. _apply_default_overlay(interface)
  431. return interfaces
  432. def init(name,
  433. cpu,
  434. mem,
  435. image=None,
  436. nic='default',
  437. hypervisor=VIRT_DEFAULT_HYPER,
  438. start=True, # pylint: disable=redefined-outer-name
  439. disk='default',
  440. saltenv='base',
  441. **kwargs):
  442. '''
  443. Initialize a new vm
  444. CLI Example:
  445. .. code-block:: bash
  446. salt 'hypervisor' virt.init vm_name 4 512 salt://path/to/image.raw
  447. salt 'hypervisor' virt.init vm_name 4 512 nic=profile disk=profile
  448. '''
  449. hypervisor = __salt__['config.get']('libvirt:hypervisor', hypervisor)
  450. nicp = _nic_profile(nic, hypervisor, **kwargs)
  451. diskp = None
  452. seedable = False
  453. if image: # with disk template image
  454. # if image was used, assume only one disk, i.e. the
  455. # 'default' disk profile
  456. # TODO: make it possible to use disk profiles and use the
  457. # template image as the system disk
  458. #diskp = _disk_profile('default', hypervisor, **kwargs)
  459. #new diskp TCP cloud
  460. diskp = _disk_profile(disk, hypervisor, **kwargs)
  461. # When using a disk profile extract the sole dict key of the first
  462. # array element as the filename for disk
  463. disk_name = next(diskp[0].iterkeys())
  464. disk_type = diskp[0][disk_name]['format']
  465. disk_file_name = '{0}.{1}'.format(disk_name, disk_type)
  466. # disk size TCP cloud
  467. disk_size = diskp[0][disk_name]['size']
  468. if hypervisor in ['esxi', 'vmware']:
  469. # TODO: we should be copying the image file onto the ESX host
  470. raise SaltInvocationError('virt.init does not support image '
  471. 'template template in conjunction '
  472. 'with esxi hypervisor')
  473. elif hypervisor in ['qemu', 'kvm']:
  474. img_dir = __salt__['config.option']('virt.images')
  475. img_dest = os.path.join(
  476. img_dir,
  477. name,
  478. disk_file_name
  479. )
  480. img_dir = os.path.dirname(img_dest)
  481. sfn = __salt__['cp.cache_file'](image, saltenv)
  482. if not os.path.isdir(img_dir):
  483. os.makedirs(img_dir)
  484. try:
  485. salt.utils.files.copyfile(sfn, img_dest)
  486. mask = os.umask(0)
  487. os.umask(mask)
  488. # Apply umask and remove exec bit
  489. # Resizing image TCP cloud
  490. cmd = 'qemu-img resize ' + img_dest + ' ' + str(disk_size) + 'M'
  491. subprocess.call(cmd, shell=True)
  492. mode = (0o0777 ^ mask) & 0o0666
  493. os.chmod(img_dest, mode)
  494. except (IOError, OSError) as e:
  495. raise CommandExecutionError('problem copying image. {0} - {1}'.format(image, e))
  496. seedable = True
  497. else:
  498. log.error('unsupported hypervisor when handling disk image')
  499. else:
  500. # no disk template image specified, create disks based on disk profile
  501. diskp = _disk_profile(disk, hypervisor, **kwargs)
  502. if hypervisor in ['qemu', 'kvm']:
  503. # TODO: we should be creating disks in the local filesystem with
  504. # qemu-img
  505. raise SaltInvocationError('virt.init does not support disk '
  506. 'profiles in conjunction with '
  507. 'qemu/kvm at this time, use image '
  508. 'template instead')
  509. else:
  510. # assume libvirt manages disks for us
  511. for disk in diskp:
  512. for disk_name, args in disk.items():
  513. xml = _gen_vol_xml(name,
  514. disk_name,
  515. args['size'],
  516. hypervisor)
  517. define_vol_xml_str(xml)
  518. xml = _gen_xml(name, cpu, mem, diskp, nicp, hypervisor, **kwargs)
  519. define_xml_str(xml)
  520. if kwargs.get('seed') and seedable:
  521. install = kwargs.get('install', True)
  522. seed_cmd = kwargs.get('seed_cmd', 'seed.apply')
  523. __salt__[seed_cmd](img_dest,
  524. id_=name,
  525. config=kwargs.get('config'),
  526. install=install)
  527. if start:
  528. create(name)
  529. return True
  530. def list_vms():
  531. '''
  532. Return a list of virtual machine names on the minion
  533. CLI Example:
  534. .. code-block:: bash
  535. salt '*' virt.list_vms
  536. '''
  537. vms = []
  538. vms.extend(list_active_vms())
  539. vms.extend(list_inactive_vms())
  540. return vms
  541. def list_active_vms():
  542. '''
  543. Return a list of names for active virtual machine on the minion
  544. CLI Example:
  545. .. code-block:: bash
  546. salt '*' virt.list_active_vms
  547. '''
  548. conn = __get_conn()
  549. vms = []
  550. for id_ in conn.listDomainsID():
  551. vms.append(conn.lookupByID(id_).name())
  552. return vms
  553. def list_inactive_vms():
  554. '''
  555. Return a list of names for inactive virtual machine on the minion
  556. CLI Example:
  557. .. code-block:: bash
  558. salt '*' virt.list_inactive_vms
  559. '''
  560. conn = __get_conn()
  561. vms = []
  562. for id_ in conn.listDefinedDomains():
  563. vms.append(id_)
  564. return vms
  565. def vm_info(vm_=None):
  566. '''
  567. Return detailed information about the vms on this hyper in a
  568. list of dicts:
  569. .. code-block:: python
  570. [
  571. 'your-vm': {
  572. 'cpu': <int>,
  573. 'maxMem': <int>,
  574. 'mem': <int>,
  575. 'state': '<state>',
  576. 'cputime' <int>
  577. },
  578. ...
  579. ]
  580. If you pass a VM name in as an argument then it will return info
  581. for just the named VM, otherwise it will return all VMs.
  582. CLI Example:
  583. .. code-block:: bash
  584. salt '*' virt.vm_info
  585. '''
  586. def _info(vm_):
  587. dom = _get_dom(vm_)
  588. raw = dom.info()
  589. return {'cpu': raw[3],
  590. 'cputime': int(raw[4]),
  591. 'disks': get_disks(vm_),
  592. 'graphics': get_graphics(vm_),
  593. 'nics': get_nics(vm_),
  594. 'maxMem': int(raw[1]),
  595. 'mem': int(raw[2]),
  596. 'state': VIRT_STATE_NAME_MAP.get(raw[0], 'unknown')}
  597. info = {}
  598. if vm_:
  599. info[vm_] = _info(vm_)
  600. else:
  601. for vm_ in list_vms():
  602. info[vm_] = _info(vm_)
  603. return info
  604. def vm_state(vm_=None):
  605. '''
  606. Return list of all the vms and their state.
  607. If you pass a VM name in as an argument then it will return info
  608. for just the named VM, otherwise it will return all VMs.
  609. CLI Example:
  610. .. code-block:: bash
  611. salt '*' virt.vm_state <vm name>
  612. '''
  613. def _info(vm_):
  614. state = ''
  615. dom = _get_dom(vm_)
  616. raw = dom.info()
  617. state = VIRT_STATE_NAME_MAP.get(raw[0], 'unknown')
  618. return state
  619. info = {}
  620. if vm_:
  621. info[vm_] = _info(vm_)
  622. else:
  623. for vm_ in list_vms():
  624. info[vm_] = _info(vm_)
  625. return info
  626. def node_info():
  627. '''
  628. Return a dict with information about this node
  629. CLI Example:
  630. .. code-block:: bash
  631. salt '*' virt.node_info
  632. '''
  633. conn = __get_conn()
  634. raw = conn.getInfo()
  635. info = {'cpucores': raw[6],
  636. 'cpumhz': raw[3],
  637. 'cpumodel': str(raw[0]),
  638. 'cpus': raw[2],
  639. 'cputhreads': raw[7],
  640. 'numanodes': raw[4],
  641. 'phymemory': raw[1],
  642. 'sockets': raw[5]}
  643. return info
  644. def get_nics(vm_):
  645. '''
  646. Return info about the network interfaces of a named vm
  647. CLI Example:
  648. .. code-block:: bash
  649. salt '*' virt.get_nics <vm name>
  650. '''
  651. nics = {}
  652. doc = minidom.parse(_StringIO(get_xml(vm_)))
  653. for node in doc.getElementsByTagName('devices'):
  654. i_nodes = node.getElementsByTagName('interface')
  655. for i_node in i_nodes:
  656. nic = {}
  657. nic['type'] = i_node.getAttribute('type')
  658. for v_node in i_node.getElementsByTagName('*'):
  659. if v_node.tagName == 'mac':
  660. nic['mac'] = v_node.getAttribute('address')
  661. if v_node.tagName == 'model':
  662. nic['model'] = v_node.getAttribute('type')
  663. if v_node.tagName == 'target':
  664. nic['target'] = v_node.getAttribute('dev')
  665. # driver, source, and match can all have optional attributes
  666. if re.match('(driver|source|address)', v_node.tagName):
  667. temp = {}
  668. for key, value in v_node.attributes.items():
  669. temp[key] = value
  670. nic[str(v_node.tagName)] = temp
  671. # virtualport needs to be handled separately, to pick up the
  672. # type attribute of the virtualport itself
  673. if v_node.tagName == 'virtualport':
  674. temp = {}
  675. temp['type'] = v_node.getAttribute('type')
  676. for key, value in v_node.attributes.items():
  677. temp[key] = value
  678. nic['virtualport'] = temp
  679. if 'mac' not in nic:
  680. continue
  681. nics[nic['mac']] = nic
  682. return nics
  683. def get_macs(vm_):
  684. '''
  685. Return a list off MAC addresses from the named vm
  686. CLI Example:
  687. .. code-block:: bash
  688. salt '*' virt.get_macs <vm name>
  689. '''
  690. macs = []
  691. doc = minidom.parse(_StringIO(get_xml(vm_)))
  692. for node in doc.getElementsByTagName('devices'):
  693. i_nodes = node.getElementsByTagName('interface')
  694. for i_node in i_nodes:
  695. for v_node in i_node.getElementsByTagName('mac'):
  696. macs.append(v_node.getAttribute('address'))
  697. return macs
  698. def get_graphics(vm_):
  699. '''
  700. Returns the information on vnc for a given vm
  701. CLI Example:
  702. .. code-block:: bash
  703. salt '*' virt.get_graphics <vm name>
  704. '''
  705. out = {'autoport': 'None',
  706. 'keymap': 'None',
  707. 'listen': 'None',
  708. 'port': 'None',
  709. 'type': 'vnc'}
  710. xml = get_xml(vm_)
  711. ssock = _StringIO(xml)
  712. doc = minidom.parse(ssock)
  713. for node in doc.getElementsByTagName('domain'):
  714. g_nodes = node.getElementsByTagName('graphics')
  715. for g_node in g_nodes:
  716. for key, value in g_node.attributes.items():
  717. out[key] = value
  718. return out
  719. def get_disks(vm_):
  720. '''
  721. Return the disks of a named vm
  722. CLI Example:
  723. .. code-block:: bash
  724. salt '*' virt.get_disks <vm name>
  725. '''
  726. disks = {}
  727. doc = minidom.parse(_StringIO(get_xml(vm_)))
  728. for elem in doc.getElementsByTagName('disk'):
  729. sources = elem.getElementsByTagName('source')
  730. targets = elem.getElementsByTagName('target')
  731. if len(sources) > 0:
  732. source = sources[0]
  733. else:
  734. continue
  735. if len(targets) > 0:
  736. target = targets[0]
  737. else:
  738. continue
  739. if target.hasAttribute('dev'):
  740. qemu_target = ''
  741. if source.hasAttribute('file'):
  742. qemu_target = source.getAttribute('file')
  743. elif source.hasAttribute('dev'):
  744. qemu_target = source.getAttribute('dev')
  745. elif source.hasAttribute('protocol') and \
  746. source.hasAttribute('name'): # For rbd network
  747. qemu_target = '{0}:{1}'.format(
  748. source.getAttribute('protocol'),
  749. source.getAttribute('name'))
  750. if qemu_target:
  751. disks[target.getAttribute('dev')] = {
  752. 'file': qemu_target}
  753. for dev in disks:
  754. try:
  755. hypervisor = __salt__['config.get']('libvirt:hypervisor', 'kvm')
  756. if hypervisor not in ['qemu', 'kvm']:
  757. break
  758. output = []
  759. qemu_output = subprocess.Popen(['qemu-img', 'info',
  760. disks[dev]['file']],
  761. shell=False,
  762. stdout=subprocess.PIPE).communicate()[0]
  763. snapshots = False
  764. columns = None
  765. lines = qemu_output.strip().split('\n')
  766. for line in lines:
  767. if line.startswith('Snapshot list:'):
  768. snapshots = True
  769. continue
  770. # If this is a copy-on-write image, then the backing file
  771. # represents the base image
  772. #
  773. # backing file: base.qcow2 (actual path: /var/shared/base.qcow2)
  774. elif line.startswith('backing file'):
  775. matches = re.match(r'.*\(actual path: (.*?)\)', line)
  776. if matches:
  777. output.append('backing file: {0}'.format(matches.group(1)))
  778. continue
  779. elif snapshots:
  780. if line.startswith('ID'): # Do not parse table headers
  781. line = line.replace('VM SIZE', 'VMSIZE')
  782. line = line.replace('VM CLOCK', 'TIME VMCLOCK')
  783. columns = re.split(r'\s+', line)
  784. columns = [c.lower() for c in columns]
  785. output.append('snapshots:')
  786. continue
  787. fields = re.split(r'\s+', line)
  788. for i, field in enumerate(fields):
  789. sep = ' '
  790. if i == 0:
  791. sep = '-'
  792. output.append(
  793. '{0} {1}: "{2}"'.format(
  794. sep, columns[i], field
  795. )
  796. )
  797. continue
  798. output.append(line)
  799. output = '\n'.join(output)
  800. disks[dev].update(yaml.safe_load(output))
  801. except TypeError:
  802. disks[dev].update(yaml.safe_load('image: Does not exist'))
  803. return disks
  804. def setmem(vm_, memory, config=False):
  805. '''
  806. Changes the amount of memory allocated to VM. The VM must be shutdown
  807. for this to work.
  808. memory is to be specified in MB
  809. If config is True then we ask libvirt to modify the config as well
  810. CLI Example:
  811. .. code-block:: bash
  812. salt '*' virt.setmem myvm 768
  813. '''
  814. if vm_state(vm_) != 'shutdown':
  815. return False
  816. dom = _get_dom(vm_)
  817. # libvirt has a funny bitwise system for the flags in that the flag
  818. # to affect the "current" setting is 0, which means that to set the
  819. # current setting we have to call it a second time with just 0 set
  820. flags = libvirt.VIR_DOMAIN_MEM_MAXIMUM
  821. if config:
  822. flags = flags | libvirt.VIR_DOMAIN_AFFECT_CONFIG
  823. ret1 = dom.setMemoryFlags(memory * 1024, flags)
  824. ret2 = dom.setMemoryFlags(memory * 1024, libvirt.VIR_DOMAIN_AFFECT_CURRENT)
  825. # return True if both calls succeeded
  826. return ret1 == ret2 == 0
  827. def setvcpus(vm_, vcpus, config=False):
  828. '''
  829. Changes the amount of vcpus allocated to VM. The VM must be shutdown
  830. for this to work.
  831. vcpus is an int representing the number to be assigned
  832. If config is True then we ask libvirt to modify the config as well
  833. CLI Example:
  834. .. code-block:: bash
  835. salt '*' virt.setvcpus myvm 2
  836. '''
  837. if vm_state(vm_) != 'shutdown':
  838. return False
  839. dom = _get_dom(vm_)
  840. # see notes in setmem
  841. flags = libvirt.VIR_DOMAIN_VCPU_MAXIMUM
  842. if config:
  843. flags = flags | libvirt.VIR_DOMAIN_AFFECT_CONFIG
  844. ret1 = dom.setVcpusFlags(vcpus, flags)
  845. ret2 = dom.setVcpusFlags(vcpus, libvirt.VIR_DOMAIN_AFFECT_CURRENT)
  846. return ret1 == ret2 == 0
  847. def freemem():
  848. '''
  849. Return an int representing the amount of memory that has not been given
  850. to virtual machines on this node
  851. CLI Example:
  852. .. code-block:: bash
  853. salt '*' virt.freemem
  854. '''
  855. conn = __get_conn()
  856. mem = conn.getInfo()[1]
  857. # Take off just enough to sustain the hypervisor
  858. mem -= 256
  859. for vm_ in list_vms():
  860. dom = _get_dom(vm_)
  861. if dom.ID() > 0:
  862. mem -= dom.info()[2] / 1024
  863. return mem
  864. def freecpu():
  865. '''
  866. Return an int representing the number of unallocated cpus on this
  867. hypervisor
  868. CLI Example:
  869. .. code-block:: bash
  870. salt '*' virt.freecpu
  871. '''
  872. conn = __get_conn()
  873. cpus = conn.getInfo()[2]
  874. for vm_ in list_vms():
  875. dom = _get_dom(vm_)
  876. if dom.ID() > 0:
  877. cpus -= dom.info()[3]
  878. return cpus
  879. def full_info():
  880. '''
  881. Return the node_info, vm_info and freemem
  882. CLI Example:
  883. .. code-block:: bash
  884. salt '*' virt.full_info
  885. '''
  886. return {'freecpu': freecpu(),
  887. 'freemem': freemem(),
  888. 'node_info': node_info(),
  889. 'vm_info': vm_info()}
  890. def get_xml(vm_):
  891. '''
  892. Returns the XML for a given vm
  893. CLI Example:
  894. .. code-block:: bash
  895. salt '*' virt.get_xml <vm name>
  896. '''
  897. dom = _get_dom(vm_)
  898. return dom.XMLDesc(0)
  899. def get_profiles(hypervisor=None):
  900. '''
  901. Return the virt profiles for hypervisor.
  902. Currently there are profiles for:
  903. - nic
  904. - disk
  905. CLI Example:
  906. .. code-block:: bash
  907. salt '*' virt.get_profiles
  908. salt '*' virt.get_profiles hypervisor=esxi
  909. '''
  910. ret = {}
  911. if hypervisor:
  912. hypervisor = hypervisor
  913. else:
  914. hypervisor = __salt__['config.get']('libvirt:hypervisor', VIRT_DEFAULT_HYPER)
  915. virtconf = __salt__['config.get']('virt', {})
  916. for typ in ['disk', 'nic']:
  917. _func = getattr(sys.modules[__name__], '_{0}_profile'.format(typ))
  918. ret[typ] = {'default': _func('default', hypervisor)}
  919. if typ in virtconf:
  920. ret.setdefault(typ, {})
  921. for prf in virtconf[typ]:
  922. ret[typ][prf] = _func(prf, hypervisor)
  923. return ret
  924. def shutdown(vm_):
  925. '''
  926. Send a soft shutdown signal to the named vm
  927. CLI Example:
  928. .. code-block:: bash
  929. salt '*' virt.shutdown <vm name>
  930. '''
  931. dom = _get_dom(vm_)
  932. return dom.shutdown() == 0
  933. def pause(vm_):
  934. '''
  935. Pause the named vm
  936. CLI Example:
  937. .. code-block:: bash
  938. salt '*' virt.pause <vm name>
  939. '''
  940. dom = _get_dom(vm_)
  941. return dom.suspend() == 0
  942. def resume(vm_):
  943. '''
  944. Resume the named vm
  945. CLI Example:
  946. .. code-block:: bash
  947. salt '*' virt.resume <vm name>
  948. '''
  949. dom = _get_dom(vm_)
  950. return dom.resume() == 0
  951. def create(vm_):
  952. '''
  953. Start a defined domain
  954. CLI Example:
  955. .. code-block:: bash
  956. salt '*' virt.create <vm name>
  957. '''
  958. dom = _get_dom(vm_)
  959. return dom.create() == 0
  960. def start(vm_):
  961. '''
  962. Alias for the obscurely named 'create' function
  963. CLI Example:
  964. .. code-block:: bash
  965. salt '*' virt.start <vm name>
  966. '''
  967. return create(vm_)
  968. def stop(vm_):
  969. '''
  970. Alias for the obscurely named 'destroy' function
  971. CLI Example:
  972. .. code-block:: bash
  973. salt '*' virt.stop <vm name>
  974. '''
  975. return destroy(vm_)
  976. def reboot(vm_):
  977. '''
  978. Reboot a domain via ACPI request
  979. CLI Example:
  980. .. code-block:: bash
  981. salt '*' virt.reboot <vm name>
  982. '''
  983. dom = _get_dom(vm_)
  984. # reboot has a few modes of operation, passing 0 in means the
  985. # hypervisor will pick the best method for rebooting
  986. return dom.reboot(0) == 0
  987. def reset(vm_):
  988. '''
  989. Reset a VM by emulating the reset button on a physical machine
  990. CLI Example:
  991. .. code-block:: bash
  992. salt '*' virt.reset <vm name>
  993. '''
  994. dom = _get_dom(vm_)
  995. # reset takes a flag, like reboot, but it is not yet used
  996. # so we just pass in 0
  997. # see: http://libvirt.org/html/libvirt-libvirt.html#virDomainReset
  998. return dom.reset(0) == 0
  999. def ctrl_alt_del(vm_):
  1000. '''
  1001. Sends CTRL+ALT+DEL to a VM
  1002. CLI Example:
  1003. .. code-block:: bash
  1004. salt '*' virt.ctrl_alt_del <vm name>
  1005. '''
  1006. dom = _get_dom(vm_)
  1007. return dom.sendKey(0, 0, [29, 56, 111], 3, 0) == 0
  1008. def create_xml_str(xml):
  1009. '''
  1010. Start a domain based on the XML passed to the function
  1011. CLI Example:
  1012. .. code-block:: bash
  1013. salt '*' virt.create_xml_str <XML in string format>
  1014. '''
  1015. conn = __get_conn()
  1016. return conn.createXML(xml, 0) is not None
  1017. def create_xml_path(path):
  1018. '''
  1019. Start a domain based on the XML-file path passed to the function
  1020. CLI Example:
  1021. .. code-block:: bash
  1022. salt '*' virt.create_xml_path <path to XML file on the node>
  1023. '''
  1024. if not os.path.isfile(path):
  1025. return False
  1026. return create_xml_str(salt.utils.fopen(path, 'r').read())
  1027. def define_xml_str(xml):
  1028. '''
  1029. Define a domain based on the XML passed to the function
  1030. CLI Example:
  1031. .. code-block:: bash
  1032. salt '*' virt.define_xml_str <XML in string format>
  1033. '''
  1034. conn = __get_conn()
  1035. return conn.defineXML(xml) is not None
  1036. def define_xml_path(path):
  1037. '''
  1038. Define a domain based on the XML-file path passed to the function
  1039. CLI Example:
  1040. .. code-block:: bash
  1041. salt '*' virt.define_xml_path <path to XML file on the node>
  1042. '''
  1043. if not os.path.isfile(path):
  1044. return False
  1045. return define_xml_str(salt.utils.fopen(path, 'r').read())
  1046. def define_vol_xml_str(xml):
  1047. '''
  1048. Define a volume based on the XML passed to the function
  1049. CLI Example:
  1050. .. code-block:: bash
  1051. salt '*' virt.define_vol_xml_str <XML in string format>
  1052. '''
  1053. poolname = __salt__['config.get']('libvirt:storagepool', 'default')
  1054. conn = __get_conn()
  1055. pool = conn.storagePoolLookupByName(str(poolname))
  1056. return pool.createXML(xml, 0) is not None
  1057. def define_vol_xml_path(path):
  1058. '''
  1059. Define a volume based on the XML-file path passed to the function
  1060. CLI Example:
  1061. .. code-block:: bash
  1062. salt '*' virt.define_vol_xml_path <path to XML file on the node>
  1063. '''
  1064. if not os.path.isfile(path):
  1065. return False
  1066. return define_vol_xml_str(salt.utils.fopen(path, 'r').read())
  1067. def migrate_non_shared(vm_, target, ssh=False):
  1068. '''
  1069. Attempt to execute non-shared storage "all" migration
  1070. CLI Example:
  1071. .. code-block:: bash
  1072. salt '*' virt.migrate_non_shared <vm name> <target hypervisor>
  1073. '''
  1074. cmd = _get_migrate_command() + ' --copy-storage-all ' + vm_\
  1075. + _get_target(target, ssh)
  1076. return subprocess.Popen(cmd,
  1077. shell=True,
  1078. stdout=subprocess.PIPE).communicate()[0]
  1079. def migrate_non_shared_inc(vm_, target, ssh=False):
  1080. '''
  1081. Attempt to execute non-shared storage "all" migration
  1082. CLI Example:
  1083. .. code-block:: bash
  1084. salt '*' virt.migrate_non_shared_inc <vm name> <target hypervisor>
  1085. '''
  1086. cmd = _get_migrate_command() + ' --copy-storage-inc ' + vm_\
  1087. + _get_target(target, ssh)
  1088. return subprocess.Popen(cmd,
  1089. shell=True,
  1090. stdout=subprocess.PIPE).communicate()[0]
  1091. def migrate(vm_, target, ssh=False):
  1092. '''
  1093. Shared storage migration
  1094. CLI Example:
  1095. .. code-block:: bash
  1096. salt '*' virt.migrate <vm name> <target hypervisor>
  1097. '''
  1098. cmd = _get_migrate_command() + ' ' + vm_\
  1099. + _get_target(target, ssh)
  1100. return subprocess.Popen(cmd,
  1101. shell=True,
  1102. stdout=subprocess.PIPE).communicate()[0]
  1103. def seed_non_shared_migrate(disks, force=False):
  1104. '''
  1105. Non shared migration requires that the disks be present on the migration
  1106. destination, pass the disks information via this function, to the
  1107. migration destination before executing the migration.
  1108. CLI Example:
  1109. .. code-block:: bash
  1110. salt '*' virt.seed_non_shared_migrate <disks>
  1111. '''
  1112. for _, data in disks.items():
  1113. fn_ = data['file']
  1114. form = data['file format']
  1115. size = data['virtual size'].split()[1][1:]
  1116. if os.path.isfile(fn_) and not force:
  1117. # the target exists, check to see if it is compatible
  1118. pre = yaml.safe_load(subprocess.Popen('qemu-img info arch',
  1119. shell=True,
  1120. stdout=subprocess.PIPE).communicate()[0])
  1121. if pre['file format'] != data['file format']\
  1122. and pre['virtual size'] != data['virtual size']:
  1123. return False
  1124. if not os.path.isdir(os.path.dirname(fn_)):
  1125. os.makedirs(os.path.dirname(fn_))
  1126. if os.path.isfile(fn_):
  1127. os.remove(fn_)
  1128. cmd = 'qemu-img create -f ' + form + ' ' + fn_ + ' ' + size
  1129. subprocess.call(cmd, shell=True)
  1130. creds = _libvirt_creds()
  1131. cmd = 'chown ' + creds['user'] + ':' + creds['group'] + ' ' + fn_
  1132. subprocess.call(cmd, shell=True)
  1133. return True
  1134. def set_autostart(vm_, state='on'):
  1135. '''
  1136. Set the autostart flag on a VM so that the VM will start with the host
  1137. system on reboot.
  1138. CLI Example:
  1139. .. code-block:: bash
  1140. salt "*" virt.set_autostart <vm name> <on | off>
  1141. '''
  1142. dom = _get_dom(vm_)
  1143. if state == 'on':
  1144. return dom.setAutostart(1) == 0
  1145. elif state == 'off':
  1146. return dom.setAutostart(0) == 0
  1147. else:
  1148. # return False if state is set to something other then on or off
  1149. return False
  1150. def destroy(vm_):
  1151. '''
  1152. Hard power down the virtual machine, this is equivalent to pulling the
  1153. power
  1154. CLI Example:
  1155. .. code-block:: bash
  1156. salt '*' virt.destroy <vm name>
  1157. '''
  1158. dom = _get_dom(vm_)
  1159. return dom.destroy() == 0
  1160. def undefine(vm_):
  1161. '''
  1162. Remove a defined vm, this does not purge the virtual machine image, and
  1163. this only works if the vm is powered down
  1164. CLI Example:
  1165. .. code-block:: bash
  1166. salt '*' virt.undefine <vm name>
  1167. '''
  1168. dom = _get_dom(vm_)
  1169. return dom.undefine() == 0
  1170. def purge(vm_, dirs=False):
  1171. '''
  1172. Recursively destroy and delete a virtual machine, pass True for dir's to
  1173. also delete the directories containing the virtual machine disk images -
  1174. USE WITH EXTREME CAUTION!
  1175. CLI Example:
  1176. .. code-block:: bash
  1177. salt '*' virt.purge <vm name>
  1178. '''
  1179. disks = get_disks(vm_)
  1180. try:
  1181. if not destroy(vm_):
  1182. return False
  1183. except libvirt.libvirtError:
  1184. # This is thrown if the machine is already shut down
  1185. pass
  1186. directories = set()
  1187. for disk in disks:
  1188. os.remove(disks[disk]['file'])
  1189. directories.add(os.path.dirname(disks[disk]['file']))
  1190. if dirs:
  1191. for dir_ in directories:
  1192. shutil.rmtree(dir_)
  1193. undefine(vm_)
  1194. return True
  1195. def virt_type():
  1196. '''
  1197. Returns the virtual machine type as a string
  1198. CLI Example:
  1199. .. code-block:: bash
  1200. salt '*' virt.virt_type
  1201. '''
  1202. return __grains__['virtual']
  1203. def is_kvm_hyper():
  1204. '''
  1205. Returns a bool whether or not this node is a KVM hypervisor
  1206. CLI Example:
  1207. .. code-block:: bash
  1208. salt '*' virt.is_kvm_hyper
  1209. '''
  1210. try:
  1211. if 'kvm_' not in salt.utils.fopen('/proc/modules').read():
  1212. return False
  1213. except IOError:
  1214. # No /proc/modules? Are we on Windows? Or Solaris?
  1215. return False
  1216. return 'libvirtd' in __salt__['cmd.run'](__grains__['ps'])
  1217. def is_xen_hyper():
  1218. '''
  1219. Returns a bool whether or not this node is a XEN hypervisor
  1220. CLI Example:
  1221. .. code-block:: bash
  1222. salt '*' virt.is_xen_hyper
  1223. '''
  1224. try:
  1225. if __grains__['virtual_subtype'] != 'Xen Dom0':
  1226. return False
  1227. except KeyError:
  1228. # virtual_subtype isn't set everywhere.
  1229. return False
  1230. try:
  1231. if 'xen_' not in salt.utils.fopen('/proc/modules').read():
  1232. return False
  1233. except IOError:
  1234. # No /proc/modules? Are we on Windows? Or Solaris?
  1235. return False
  1236. return 'libvirtd' in __salt__['cmd.run'](__grains__['ps'])
  1237. def is_hyper():
  1238. '''
  1239. Returns a bool whether or not this node is a hypervisor of any kind
  1240. CLI Example:
  1241. .. code-block:: bash
  1242. salt '*' virt.is_hyper
  1243. '''
  1244. try:
  1245. import libvirt # pylint: disable=import-error
  1246. except ImportError:
  1247. # not a usable hypervisor without libvirt module
  1248. return False
  1249. return is_xen_hyper() or is_kvm_hyper()
  1250. def vm_cputime(vm_=None):
  1251. '''
  1252. Return cputime used by the vms on this hyper in a
  1253. list of dicts:
  1254. .. code-block:: python
  1255. [
  1256. 'your-vm': {
  1257. 'cputime' <int>
  1258. 'cputime_percent' <int>
  1259. },
  1260. ...
  1261. ]
  1262. If you pass a VM name in as an argument then it will return info
  1263. for just the named VM, otherwise it will return all VMs.
  1264. CLI Example:
  1265. .. code-block:: bash
  1266. salt '*' virt.vm_cputime
  1267. '''
  1268. host_cpus = __get_conn().getInfo()[2]
  1269. def _info(vm_):
  1270. dom = _get_dom(vm_)
  1271. raw = dom.info()
  1272. vcpus = int(raw[3])
  1273. cputime = int(raw[4])
  1274. cputime_percent = 0
  1275. if cputime:
  1276. # Divide by vcpus to always return a number between 0 and 100
  1277. cputime_percent = (1.0e-7 * cputime / host_cpus) / vcpus
  1278. return {
  1279. 'cputime': int(raw[4]),
  1280. 'cputime_percent': int('{0:.0f}'.format(cputime_percent))
  1281. }
  1282. info = {}
  1283. if vm_:
  1284. info[vm_] = _info(vm_)
  1285. else:
  1286. for vm_ in list_vms():
  1287. info[vm_] = _info(vm_)
  1288. return info
  1289. def vm_netstats(vm_=None):
  1290. '''
  1291. Return combined network counters used by the vms on this hyper in a
  1292. list of dicts:
  1293. .. code-block:: python
  1294. [
  1295. 'your-vm': {
  1296. 'rx_bytes' : 0,
  1297. 'rx_packets' : 0,
  1298. 'rx_errs' : 0,
  1299. 'rx_drop' : 0,
  1300. 'tx_bytes' : 0,
  1301. 'tx_packets' : 0,
  1302. 'tx_errs' : 0,
  1303. 'tx_drop' : 0
  1304. },
  1305. ...
  1306. ]
  1307. If you pass a VM name in as an argument then it will return info
  1308. for just the named VM, otherwise it will return all VMs.
  1309. CLI Example:
  1310. .. code-block:: bash
  1311. salt '*' virt.vm_netstats
  1312. '''
  1313. def _info(vm_):
  1314. dom = _get_dom(vm_)
  1315. nics = get_nics(vm_)
  1316. ret = {
  1317. 'rx_bytes': 0,
  1318. 'rx_packets': 0,
  1319. 'rx_errs': 0,
  1320. 'rx_drop': 0,
  1321. 'tx_bytes': 0,
  1322. 'tx_packets': 0,
  1323. 'tx_errs': 0,
  1324. 'tx_drop': 0
  1325. }
  1326. for attrs in six.itervalues(nics):
  1327. if 'target' in attrs:
  1328. dev = attrs['target']
  1329. stats = dom.interfaceStats(dev)
  1330. ret['rx_bytes'] += stats[0]
  1331. ret['rx_packets'] += stats[1]
  1332. ret['rx_errs'] += stats[2]
  1333. ret['rx_drop'] += stats[3]
  1334. ret['tx_bytes'] += stats[4]
  1335. ret['tx_packets'] += stats[5]
  1336. ret['tx_errs'] += stats[6]
  1337. ret['tx_drop'] += stats[7]
  1338. return ret
  1339. info = {}
  1340. if vm_:
  1341. info[vm_] = _info(vm_)
  1342. else:
  1343. for vm_ in list_vms():
  1344. info[vm_] = _info(vm_)
  1345. return info
  1346. def vm_diskstats(vm_=None):
  1347. '''
  1348. Return disk usage counters used by the vms on this hyper in a
  1349. list of dicts:
  1350. .. code-block:: python
  1351. [
  1352. 'your-vm': {
  1353. 'rd_req' : 0,
  1354. 'rd_bytes' : 0,
  1355. 'wr_req' : 0,
  1356. 'wr_bytes' : 0,
  1357. 'errs' : 0
  1358. },
  1359. ...
  1360. ]
  1361. If you pass a VM name in as an argument then it will return info
  1362. for just the named VM, otherwise it will return all VMs.
  1363. CLI Example:
  1364. .. code-block:: bash
  1365. salt '*' virt.vm_blockstats
  1366. '''
  1367. def get_disk_devs(vm_):
  1368. doc = minidom.parse(_StringIO(get_xml(vm_)))
  1369. disks = []
  1370. for elem in doc.getElementsByTagName('disk'):
  1371. targets = elem.getElementsByTagName('target')
  1372. target = targets[0]
  1373. disks.append(target.getAttribute('dev'))
  1374. return disks
  1375. def _info(vm_):
  1376. dom = _get_dom(vm_)
  1377. # Do not use get_disks, since it uses qemu-img and is very slow
  1378. # and unsuitable for any sort of real time statistics
  1379. disks = get_disk_devs(vm_)
  1380. ret = {'rd_req': 0,
  1381. 'rd_bytes': 0,
  1382. 'wr_req': 0,
  1383. 'wr_bytes': 0,
  1384. 'errs': 0
  1385. }
  1386. for disk in disks:
  1387. stats = dom.blockStats(disk)
  1388. ret['rd_req'] += stats[0]
  1389. ret['rd_bytes'] += stats[1]
  1390. ret['wr_req'] += stats[2]
  1391. ret['wr_bytes'] += stats[3]
  1392. ret['errs'] += stats[4]
  1393. return ret
  1394. info = {}
  1395. if vm_:
  1396. info[vm_] = _info(vm_)
  1397. else:
  1398. # Can not run function blockStats on inactive VMs
  1399. for vm_ in list_active_vms():
  1400. info[vm_] = _info(vm_)
  1401. return info