【Nova】nova-compute代码学习3-创建虚拟机

时间:2021-10-16 10:37:23

这一篇学习下nova-compute创建虚拟机的流程

首先说下,创建虚拟机实例时,我们需要给它分配磁盘设备block device,OpenStack中block device有4种类型: root device、swap device、ephemeral device和cinder volume, 前3种对应我们创建flavor时的 根磁盘、交换盘和临时磁盘,因为cinder我至今没有使用过,因此涉及到的时候会直接跳过,后面会单独学习。

root device: 提供boot loader,虚拟机会从这里启动,可以有多个,但是需要确定其中一个的boot_index = 0

swap device:作为交换分区,只能有1个,与虚拟内存有关

ephemeral device:额外提供的临时设备

针对每个block device在数据库里都包括一条block_device_mapping记录,顾名思义, 就是块设备的映射信息,里面包括device_type(设备类型,disk/cdrom/floppy/lun),device_name(设备名,例如/dev/vda, 没有指定的情况下使用默认值), boot_index(引导顺序), disk_bus(总线类型,没有指定的情况下使用虚拟化驱动支持的默认类型),source_type(image/snapshot/volume/blank,决定块设备产生的源头, 例如image, 那么块设备就是使用镜像创建的, 这种情况下还要记录对应的image_id),destination_type(local/volume,决定块设备的实际存储位置, 是在cinder提供的volume上, 还是在hypervisor本地)、guest_format

那怎么通过block_device_mapping中的信息判断block device是哪种类型呢?

swap device:source_type为blank,destination_type为local,guest_format为swap

ephemeral device: source_type为blank 且 不是swap device, 可见

root device: source_type不为blank 且 boot_index = 0

之前我们在nova-scheduler调度过程分析中提到过在调度完成后,由nova-scheduler通过调用目标主机nova-compute服务的run_instance RpcAPI来发起虚拟机实例的实际创建工作,那么就从run_instance这个RpcAPI开始进行分析,可以看出创建虚拟机实例的过程大致分为以下几步:

1.BUILDING阶段,验证实例组策略;

2.NETWORKING阶段,异步为实例分配网络资源;

3.BLOCK_DEVICE_MAPPING阶段,为实例的块设备获取默认的设备名

4.SPAWNING阶段,缓存镜像,为实例创建磁盘文件(包括文件注入),进行volume相关操作,配置实例的iptables防火墙规则,通过虚拟化驱动为实例创建虚拟机并启动,等待虚拟机的电源状态变为RUNNING,创建即宣告完成。

文件注入有2种方式:一种是通过cdrom挂载iso文件,另一种是将文件写入虚拟机的guest文件系统

# 验证给定的disk_bus在给定的virt_type下是否是有效的
def is_disk_bus_valid_for_virt(virt_type, disk_bus):
valid_bus = {
'qemu': ['virtio', 'scsi', 'ide', 'usb', 'fdc'],
'kvm': ['virtio', 'scsi', 'ide', 'usb', 'fdc'],
'xen': ['xen', 'ide'],
'uml': ['uml'],
'lxc': ['lxc'],
}

if virt_type not in valid_bus:
raise exception.UnsupportedVirtType(virt=virt_type)

return disk_bus in valid_bus[virt_type]

# 获取指定设备类型的总线
def get_disk_bus_for_device_type(virt_type, image_meta=None, device_type="disk"):
if image_meta:
key = "hw_" + device_type + "_bus"
disk_bus = image_meta.get('properties', {}).get(key)
# 首先尝试从镜像元数据中找出disk_bus
if disk_bus is not None:
# 如果镜像元数据中的disk_bus不为空, 那么验证disk_bus是否是有效的
if not is_disk_bus_valid_for_virt(virt_type, disk_bus):
raise exception.UnsupportedHardware(model=disk_bus,
virt=virt_type)
# 如果有效, 直接返回disk_bus
return disk_bus

# 然后尝试返回hypervisor默认的disk_bus
if virt_type == "uml":
if device_type == "disk":
return "uml"
elif virt_type == "lxc":
return "lxc"
elif virt_type == "xen":
if device_type == "cdrom":
return "ide"
elif device_type == "disk":
return "xen"
elif virt_type in ("qemu", "kvm"):
if device_type == "cdrom":
arch = libvirt_utils.get_arch(image_meta)
if arch in ("ppc", "ppc64"):
return "scsi"
else:
return "ide"
elif device_type == "disk":
return "virtio"
elif device_type == "floppy":
return "fdc"

return None

# 从设备名获取其总线
def get_disk_bus_for_disk_dev(virt_type, disk_dev):
if disk_dev[:2] == 'hd':
return "ide"
elif disk_dev[:2] == 'sd':
if virt_type == "xen":
return "xen"
else:
return "scsi"
elif disk_dev[:2] == 'vd':
return "virtio"
elif disk_dev[:2] == 'fd':
return "fdc"
elif disk_dev[:3] == 'xvd':
return "xen"
elif disk_dev[:3] == 'ubd':
return "uml"
else:
raise exception.NovaException(
_("Unable to determine disk bus for '%s'") %
disk_dev[:1])

# 从磁盘总线类型获取设备名前缀
def get_dev_prefix_for_disk_bus(disk_bus):
# 如果配置了disk_prefix, 那么直接返回即可
if CONF.libvirt.disk_prefix:
return CONF.libvirt.disk_prefix
if disk_bus == "ide":
return "hd"
elif disk_bus == "virtio":
return "vd"
elif disk_bus == "xen":
return "sd"
elif disk_bus == "scsi":
return "sd"
elif disk_bus == "usb":
return "sd"
elif disk_bus == "fdc":
return "fd"
elif disk_bus == "uml":
return "ubd"
elif disk_bus == "lxc":
return None
else:
raise exception.NovaException(
_("Unable to determine disk prefix for %s") %
disk_bus)

# 通过磁盘总线获取最多支持的设备数量
def get_dev_count_for_disk_bus(disk_bus):
# ide总线只支持4个设备
if disk_bus == "ide":
return 4
else:
return 26

# 通过总线类型获取设备名
def find_disk_dev_for_disk_bus(mapping, bus, last_device=False):
# 通过总线类型获取设备名前缀
dev_prefix = get_dev_prefix_for_disk_bus(bus)
if dev_prefix is None:
return None

# 通过总线类型获取最多支持的设备数量
max_dev = get_dev_count_for_disk_bus(bus)
# last_device用于指定是否是最后一个设备
if last_device:
devs = [max_dev - 1]
else:
devs = range(max_dev)

for idx in devs:
# Linux上的设备名是有规律的, 对于同种类型的设备, 它们的名称从"<设备名前缀>a"依次递增
disk_dev = dev_prefix + chr(ord('a') + idx)
# 还要判断设备名是否在已有的映射信息中, 不能重名
if not has_disk_dev(mapping, disk_dev):
return disk_dev

raise exception.NovaException(
_("No free disk device names for prefix '%s'"),
dev_prefix)

# 获取root块设备的信息
def get_root_info(virt_type, image_meta, root_bdm, disk_bus, cdrom_bus, root_device_name=None):
# source_type为image, destination_type为local?
no_root_bdm = (not root_bdm or (
root_bdm.get('source_type') == 'image' and
root_bdm.get('destination_type') == 'local'))
if no_root_bdm:
if (image_meta and image_meta.get('disk_format') == 'iso'):
# 如果镜像的disk_format是iso, 那说明我们要从cdrom启动
root_device_bus = cdrom_bus
root_device_type = 'cdrom'
else:
# 从硬盘启动
root_device_bus = disk_bus
root_device_type = 'disk'
if root_device_name:
# 通常我们可以设备名获取其设备类型继而了解其总线类型, 使用Linux的人应该有经验
# 从root设备名称获取其总线类型, 当设备名为vd*, 意味着我们要使用virtio总线
root_device_bus = get_disk_bus_for_disk_dev(virt_type,
root_device_name)
else:
# 反过来, 我们也可以根据总线类型获取到设备名, 当然设备名不能与已有设备重名
root_device_name = find_disk_dev_for_disk_bus({}, root_device_bus)

# 返回root块设备信息
return {'bus': root_device_bus,
'type': root_device_type,
'dev': block_device.strip_dev(root_device_name),
'boot_index': '1'}
else:
# 如果root块设备没有设备名且指定了root_device_name
if not get_device_name(root_bdm) and root_device_name:
# 因为要修改root_bdm, 对root_bdm进行拷贝, 避免对原始root_bdm进行污染
root_bdm = root_bdm.copy()
root_bdm['device_name'] = root_device_name
# 返回root_bdm的信息, 包括bus、dev、type、format和boot_index等
return get_info_from_bdm(virt_type, root_bdm, {}, disk_bus)

# 获取实例最终的块设备映射信息
def get_disk_mapping(virt_type, instance,
disk_bus, cdrom_bus,
block_device_info=None,
image_meta=None, rescue=False):
# 获取实例的云主机类型即flavor
inst_type = flavors.extract_flavor(instance)

mapping = {}

# 获取所有块设备的名称列表, 例如['sda', 'sdb']
pre_assigned_device_names = \
[block_device.strip_dev(get_device_name(bdm)) for bdm in itertools.chain(
driver.block_device_info_get_ephemerals(block_device_info), # DriverEphemeralBlockDevice实例列表
[driver.block_device_info_get_swap(block_device_info)], # DriverSwapBlockDevice实例列表, 只有一个元素
driver.block_device_info_get_mapping(block_device_info)) # DriverSnapshotBlockDevice和DriverVolumeBlockDevice实例列表
if get_device_name(bdm)]

if virt_type == "lxc":
root_disk_bus = disk_bus
root_device_type = 'disk'

# 获取总线类型上下个设备的设备信息作为root块设备信息
root_info = get_next_disk_info(mapping,
root_disk_bus,
root_device_type,
boot_index=1)
mapping['root'] = root_info
mapping['disk'] = root_info

return mapping

if rescue:
# 用于救援模式
# 获取总线类型上下个设备的设备信息作为root块设备信息和救援用的块设备信息
rescue_info = get_next_disk_info(mapping,
disk_bus, boot_index=1)
mapping['disk.rescue'] = rescue_info
mapping['root'] = rescue_info

os_info = get_next_disk_info(mapping,
disk_bus)
mapping['disk'] = os_info

return mapping

# 从DriverSnapshotBlockDevice和DriverVolumeBlockDevice实例列表中获取root块设备, 即boot_index为0的且只返回其中之一
root_bdm = block_device.get_root_bdm(
driver.block_device_info_get_mapping(block_device_info))

# 获取root块设备的设备名, 不是绝对路径即不带/dev/
root_device_name = block_device.strip_dev(
driver.block_device_info_get_root(block_device_info))
# 获取root块设备的信息, root_bdm为None时, 是默认的信息
root_info = get_root_info(virt_type, image_meta, root_bdm,
disk_bus, cdrom_bus, root_device_name)

mapping['root'] = root_info
if not root_bdm and not block_device.volume_in_mapping(root_info['dev'],
block_device_info):
# 如果root_bdm为None并且root_info中的设备名不在已有的block_device_info中,
# 记录默认的root块设备信息
mapping['disk'] = root_info

# 是否有默认的ephemeral块设备
default_eph = has_default_ephemeral(instance, disk_bus, block_device_info,
mapping)
if default_eph:
mapping['disk.local'] = default_eph

for idx, eph in enumerate(driver.block_device_info_get_ephemerals(
block_device_info)):
# 返回eph的信息, 包括bus、dev、type、format和boot_index等
eph_info = get_info_from_bdm(
virt_type, eph, mapping, disk_bus,
assigned_devices=pre_assigned_device_names)
# get_eph_disk -> 'disk.eph' + str(index), 记录eph的信息
mapping[get_eph_disk(idx)] = eph_info
# 把eph_info更新至eph实例
update_bdm(eph, eph_info)

# 获取swap块设备驱动实例
swap = driver.block_device_info_get_swap(block_device_info)
if swap and swap.get('swap_size', 0) > 0:
# 获取swap的信息
swap_info = get_info_from_bdm(virt_type, swap, mapping, disk_bus)
mapping['disk.swap'] = swap_info
# 把swap_info更新至swap实例
update_bdm(swap, swap_info)
elif inst_type['swap'] > 0:
# 如果已有的block_device_info没有swap信息或者已有的swap信息的swap_size<=0,
# 并且云主机类型flavor中的交换盘空间>0, 说明我们还是要为实例创建交换盘;
# 获取默认的交换盘信息
swap_info = get_next_disk_info(mapping,
disk_bus)
# 如果交换盘名不在已有的block_device_info中, 就记录到mapping中
if not block_device.volume_in_mapping(swap_info['dev'],
block_device_info):
mapping['disk.swap'] = swap_info

# 对ephemeral和swap之外的块设备进行处理
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
# 获取vol实例的信息, 必要时为其分配默认的设备名
vol_info = get_info_from_bdm(
virt_type, vol, mapping,
assigned_devices=pre_assigned_device_names)
mapping[block_device.prepend_dev(vol_info['dev'])] = vol_info
update_bdm(vol, vol_info)

# config drive: 用于进行文件注入的驱动
if configdrive.required_by(instance):
# 获取配置config drive类型: cdrom/disk
device_type = get_config_drive_type()
disk_bus = get_disk_bus_for_device_type(virt_type,
image_meta,
device_type)
config_info = get_next_disk_info(mapping,
disk_bus,
device_type,
last_device=True)
mapping['disk.config'] = config_info

# 返回最终的块设备映射信息
return mapping

def get_disk_info(virt_type, instance, block_device_info=None,
image_meta=None, rescue=False):

# 获取磁盘设备的总线
disk_bus = get_disk_bus_for_device_type(virt_type, image_meta, "disk")
# 获取cdrom设备的总线
cdrom_bus = get_disk_bus_for_device_type(virt_type, image_meta, "cdrom")
# 获取实例最终的块设备映射信息
mapping = get_disk_mapping(virt_type, instance,
disk_bus, cdrom_bus,
block_device_info,
image_meta, rescue)

return {'disk_bus': disk_bus,
'cdrom_bus': cdrom_bus,
'mapping': mapping}

# 为块设备获取默认名
def default_device_names(virt_type, context, instance, root_device_name, ephemerals, swap, block_device_mapping):

block_device_info = {
'root_device_name': root_device_name,
'swap': driver_block_device.get_swap( # 将swap块设备信息转换为DriverSwapBlockDevice实例列表且返回其中一个实例
driver_block_device.convert_swap(swap)), # 因为只能有一个swap块设备
'ephemerals': driver_block_device.convert_ephemerals(ephemerals), # 将ephemeral块设备信息转换为DriverEphemeralBlockDevice实例列表
'block_device_mapping': ( # 将其他块设备信息转换为DriverVolumeBlockDevice实例列表和DriverSnapshotBlockDevice实例列表
driver_block_device.convert_volumes( # DriverSnapshotBlockDevice是DriverVolumeBlockDevice的子类
block_device_mapping) +
driver_block_device.convert_snapshots(
block_device_mapping))
}

# 获取实例最终的块设备映射信息, 改动的信息已保存至block_device_info的块设备驱动实例中
get_disk_info(virt_type, instance, block_device_info)

for driver_bdm in itertools.chain(block_device_info['ephemerals'],
[block_device_info['swap']] if
block_device_info['swap'] else [],
block_device_info['block_device_mapping']):
# 将块设备驱动实例的信息保存进数据库
driver_bdm.save(context)

class LibvirtDriver(driver.ComputeDriver):

# 获取root块设备的默认名称
def default_root_device_name(self, instance, image_meta, root_bdm):

# 获取磁盘块设备的总线类型
disk_bus = blockinfo.get_disk_bus_for_device_type(
CONF.libvirt.virt_type, image_meta, "disk")
# 获取cdrom块设备的总线类型
cdrom_bus = blockinfo.get_disk_bus_for_device_type(
CONF.libvirt.virt_type, image_meta, "cdrom")
# 获取root块设备信息
root_info = blockinfo.get_root_info(
CONF.libvirt.virt_type, image_meta, root_bdm, disk_bus,
cdrom_bus)
# 返回root块设备名, 譬如/dev/vda
return block_device.prepend_dev(root_info['dev'])

# 为实例的块设备获取默认的名称
def default_device_names_for_instance(self, instance, root_device_name,
*block_device_lists):
ephemerals, swap, block_device_mapping = block_device_lists[:3]

# 为块设备获取默认的名称并保存进数据库
blockinfo.default_device_names(CONF.libvirt.virt_type,
nova_context.get_admin_context(),
instance, root_device_name,
ephemerals, swap,
block_device_mapping)

# 创建raw格式的镜像
@staticmethod
def _create_local(target, local_size, unit='G',
fs_format=None, label=None):
libvirt_utils.create_image('raw', target,
'%d%c' % (local_size, unit))

# 创建缓存目录中的ephemeral镜像, 只在不存在时才进行创建, 不同大小和文件系统类型都需要创建一个对应的
# 譬如20GB的临时磁盘那么就创建一个ephemeral_20_default镜像
def _create_ephemeral(self, target, ephemeral_size,
fs_label, os_type, is_block_dev=False,
max_size=None):
if not is_block_dev:
# 创建raw格式的、ephemeral_size大小的ephemeral镜像
self._create_local(target, ephemeral_size)

# 根据需要格式化raw格式的ephemeral镜像
disk.mkfs(os_type, fs_label, target, run_as_root=is_block_dev)

# 创建缓存目录中的swap镜像, 只在不存在时才进行创建, 不同大小都需要创建一个对应的
# 譬如1024MB的交换盘那么就创建一个swap_1024镜像
@staticmethod
def _create_swap(target, swap_mb, max_size=None):
libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
utils.mkfs('swap', target)

# 将实例的元数据、网络配置、管理员密码、文件等注入实例的disk中
def _inject_data(self, instance, network_info, admin_pass, files, suffix):
target_partition = None
if not instance['kernel_id']:
target_partition = CONF.libvirt.inject_partition
if target_partition == 0:
target_partition = None
if CONF.libvirt.virt_type == 'lxc':
target_partition = None

if CONF.libvirt.inject_key and instance.get('key_data'):
key = str(instance['key_data'])
else:
key = None

if not CONF.libvirt.inject_password:
admin_pass = None

# 要注入的网络配置模板信息
net = netutils.get_injected_network_template(network_info)

metadata = instance.get('metadata')

image_type = CONF.libvirt.images_type
if any((key, net, metadata, admin_pass, files)):
injection_path = self.image_backend.image(
instance,
'disk' + suffix,
image_type).path
img_id = instance['image_ref']

try:
disk.inject_data(injection_path,
key, net, metadata, admin_pass, files,
partition=target_partition,
use_cow=CONF.use_cow_images,
mandatory=('files',))
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_('Error injecting data into image '
'%(img_id)s (%(e)s)'),
{'img_id': img_id, 'e': e},
instance=instance)

def _create_image(self, context, instance,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None, files=None,
admin_pass=None, inject_files=True):
if not suffix:
suffix = ''

# 判断实例是否是从volume启动
booted_from_volume = self._is_booted_from_volume(
instance, disk_mapping)

def image(fname, image_type=CONF.libvirt.images_type):
return self.image_backend.image(instance,
fname + suffix, image_type)

def raw(fname):
return image(fname, image_type='raw')

# 在hypervisor上为实例创建存储文件的目录, 以下简称实例目录
fileutils.ensure_tree(libvirt_utils.get_instance_path(instance))

LOG.info(_('Creating image'), instance=instance)

# 判断实例目录下的console.log文件是否存在, 存在就修改其owner为nova
self._chown_console_log_for_instance(instance)

# 判断实例目录下的disk.config文件是否存在, 存在就修改其owner为nova
self._chown_disk_config_for_instance(instance)

# 清空console.log文件内容
libvirt_utils.write_to_file(
self._get_console_log_path(instance), '', 7)

if not disk_images:
# 获取镜像信息
disk_images = {'image_id': instance['image_ref'],
'kernel_id': instance['kernel_id'],
'ramdisk_id': instance['ramdisk_id']}

# 将kernel_id、ramdisk_id对应的镜像从Glance下载到本地的缓存目录中,
# 并在实例目录下创建对应的名为kernel和ramdisk的Qcow2 overlay镜像
if disk_images['kernel_id']:
fname = imagecache.get_cache_fname(disk_images, 'kernel_id')
raw('kernel').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['kernel_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
if disk_images['ramdisk_id']:
fname = imagecache.get_cache_fname(disk_images, 'ramdisk_id')
raw('ramdisk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['ramdisk_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])

# 获取实例的flavor
inst_type = flavors.extract_flavor(instance)

if not booted_from_volume:
# 如果不是从volume启动
# 获取image_id对应的缓存文件名, 通过sha1进行哈希过的
root_fname = imagecache.get_cache_fname(disk_images, 'image_id')
size = instance['root_gb'] * units.Gi

if size == 0 or suffix == '.rescue':
size = None

# 将image_id对应的镜像从Glance下载到本地的缓存目录中, 缓存文件名为root_fname
# 并在实例目录下创建名为disk的Qcow2 overlay镜像
image('disk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=root_fname,
size=size,
image_id=disk_images['image_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])

# 获取实例的guest os对应的文件系统类型, 默认是default
os_type_with_default = disk.get_fs_type_for_os_type(
instance['os_type'])

# 创建实例时指定的ephemeral_gb
# 注意:如果ephemeral_gb为0, 那么disk_mapping中是没有disk.local的
ephemeral_gb = instance['ephemeral_gb']
if 'disk.local' in disk_mapping:
disk_image = image('disk.local')
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral0',
os_type=instance["os_type"],
is_block_dev=disk_image.is_block_dev)
fname = "ephemeral_%s_%s" % (ephemeral_gb, os_type_with_default)
size = ephemeral_gb * units.Gi
# 如果缓存目录中的ephemeral镜像不存在, 就进行创建, 是raw格式的;
# 并在实例目录下创建名为disk.local的Qcow2 overlay镜像
disk_image.cache(fetch_func=fn,
filename=fname,
size=size,
ephemeral_size=ephemeral_gb)

# 与上面类似, 创建缓存目录内的ephemeral镜像;
# 并在实例目录下创建名为disk.eph<idx>的Qcow2 overlay镜像
for idx, eph in enumerate(driver.block_device_info_get_ephemerals(
block_device_info)):
disk_image = image(blockinfo.get_eph_disk(idx))
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral%d' % idx,
os_type=instance["os_type"],
is_block_dev=disk_image.is_block_dev)
size = eph['size'] * units.Gi
fname = "ephemeral_%s_%s" % (eph['size'], os_type_with_default)
disk_image.cache(
fetch_func=fn,
filename=fname,
size=size,
ephemeral_size=eph['size'])

# 与上面类似, 创建缓存目录内的swap镜像;
# 并在实例目录下创建名为disk.swap的Qcow2 overlay镜像
if 'disk.swap' in disk_mapping:
mapping = disk_mapping['disk.swap']
swap_mb = 0

swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
swap_mb = swap['swap_size']
elif (inst_type['swap'] > 0 and
not block_device.volume_in_mapping(
mapping['dev'], block_device_info)):
swap_mb = inst_type['swap']

if swap_mb > 0:
size = swap_mb * units.Mi
image('disk.swap').cache(fetch_func=self._create_swap,
filename="swap_%s" % swap_mb,
size=size,
swap_mb=swap_mb)

if configdrive.required_by(instance):
LOG.info(_('Using config drive'), instance=instance)
extra_md = {}
if admin_pass:
extra_md['admin_pass'] = admin_pass

inst_md = instance_metadata.InstanceMetadata(instance,
content=files, extra_md=extra_md, network_info=network_info)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
# 实例目录/disk.config
configdrive_path = self._get_disk_config_path(instance)
LOG.info(_('Creating config drive at %(path)s'),
{'path': configdrive_path}, instance=instance)

try:
# 使用genisoimage将元数据文件压缩成iso格式的disk.config文件
cdb.make_drive(configdrive_path)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_('Creating config drive failed '
'with error: %s'),
e, instance=instance)

elif inject_files and CONF.libvirt.inject_partition != -2:
if booted_from_volume:
LOG.warn(_('File injection into a boot from volume '
'instance is not supported'), instance=instance)
# 将实例的元数据、网络配置、管理员密码、文件等注入实例的disk中
self._inject_data(
instance, network_info, admin_pass, files, suffix)

if CONF.libvirt.virt_type == 'uml':
libvirt_utils.chown(image('disk').path, 'root')

# 使能实例连接的网桥的hairpin;
# hairpin off时,网桥不会将流量转发回接收端口
def _enable_hairpin(self, xml):
interfaces = self.get_interfaces(xml)
for interface in interfaces:
utils.execute('tee',
'/sys/class/net/%s/brport/hairpin_mode' % interface,
process_input='1',
run_as_root=True,
check_exit_code=[0, 1])

# 定义domain并根据需要启动domain
def _create_domain(self, xml=None, domain=None,
instance=None, launch_flags=0, power_on=True):
inst_path = None
if instance:
inst_path = libvirt_utils.get_instance_path(instance)

if CONF.libvirt.virt_type == 'lxc':
if not inst_path:
inst_path = None

container_dir = os.path.join(inst_path, 'rootfs')
fileutils.ensure_tree(container_dir)
image = self.image_backend.image(instance, 'disk')
container_root_device = disk.setup_container(image.path,
container_dir=container_dir,
use_cow=CONF.use_cow_images)

if container_root_device:
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'root_device_name': container_root_device})

if xml:
try:
# 使用XML文件定义domain
domain = self._conn.defineXML(xml)
except Exception as e:
LOG.error(_("An error occurred while trying to define a domain"
" with xml: %s") % xml)
raise e

if power_on:
try:
# 如果需要开机, 那就启动domain
domain.createWithFlags(launch_flags)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("An error occurred while trying to launch a "
"defined domain with xml: %s") %
domain.XMLDesc(0))

if not utils.is_neutron():
try:
# hairpin on
self._enable_hairpin(domain.XMLDesc(0))
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_("An error occurred while enabling hairpin "
"mode on domain with xml: %s")
% domain.XMLDesc(0))

if CONF.libvirt.virt_type == 'lxc':
state = self.get_info(instance)['state']
container_dir = os.path.join(inst_path, 'rootfs')
if state == power_state.RUNNING:
disk.clean_lxc_namespace(container_dir=container_dir)
else:
disk.teardown_container(container_dir=container_dir)

return domain

def _create_domain_and_network(self, context, xml, instance, network_info,
block_device_info=None, power_on=True,
reboot=False, vifs_already_plugged=False):

block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)

# 处理volume相关的block device
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_info = blockinfo.get_info_from_bdm(
CONF.libvirt.virt_type, vol)
conf = self.volume_driver_method('connect_volume',
connection_info,
disk_info)

if 'data' in connection_info:
connection_info['data']['device_path'] = conf.source_path
vol['connection_info'] = connection_info
vol.save(context)

if (not reboot and 'data' in connection_info and
'volume_id' in connection_info['data']):
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(
context, self._volume_api, volume_id, connection_info)

if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)

timeout = CONF.vif_plugging_timeout
if (self._conn_supports_start_paused() and
utils.is_neutron() and not
vifs_already_plugged and power_on and timeout):
events = self._get_neutron_events(network_info)
else:
events = []

launch_flags = events and libvirt.VIR_DOMAIN_START_PAUSED or 0
domain = None
try:
with self.virtapi.wait_for_instance_event(
instance, events, deadline=timeout,
error_callback=self._neutron_failed_callback):
# nova-network下这里只需要保证网桥的存在, 必要时进行创建
self.plug_vifs(instance, network_info)
# 实例的iptables防火墙规则的创建和应用
self.firewall_driver.setup_basic_filtering(instance,
network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
# 定义domain并启动domain
domain = self._create_domain(
xml, instance=instance,
launch_flags=launch_flags,
power_on=power_on)
# noop
self.firewall_driver.apply_instance_filter(instance,
network_info)
except exception.VirtualInterfaceCreateException:
with excutils.save_and_reraise_exception():
if domain:
domain.destroy()
self.cleanup(context, instance, network_info=network_info,
block_device_info=block_device_info)
except eventlet.timeout.Timeout:
LOG.warn(_('Timeout waiting for vif plugging callback for '
'instance %(uuid)s'), {'uuid': instance['uuid']})
if CONF.vif_plugging_is_fatal:
if domain:
domain.destroy()
self.cleanup(context, instance, network_info=network_info,
block_device_info=block_device_info)
raise exception.VirtualInterfaceCreateException()

if launch_flags & libvirt.VIR_DOMAIN_START_PAUSED:
# 如果启动的flag中包含VIR_DOMAIN_START_PAUSED, 那么在这里对domain执行恢复操作
domain.resume()
return domain

def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None):
# 获取实例的块设备信息
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
block_device_info,
image_meta)
# 缓存镜像和创建实例的磁盘文件
self._create_image(context, instance,
disk_info['mapping'],
network_info=network_info,
block_device_info=block_device_info,
files=injected_files,
admin_pass=admin_password)
# 生成定义domain的Libvirt XML文件
xml = self.to_xml(context, instance, network_info,
disk_info, image_meta,
block_device_info=block_device_info,
write_to_disk=True)

# 创建domain并启动, 以及进行网络配置
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info)
LOG.debug(_("Instance is running"), instance=instance)

def _wait_for_boot():
"""Called at an interval until the VM is running."""
state = self.get_info(instance)['state']

if state == power_state.RUNNING:
LOG.info(_("Instance spawned successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()

# 等待domain的电源状态变为RUNNING
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot)
timer.start(interval=0.5).wait()

class ComputeManager(manager.Manager):

def __init__(self, compute_driver=None, *args, **kwargs):
......
self._resource_tracker_dict = {}
self.driver = driver.load_compute_driver(self.virtapi, compute_driver)

# 使用_resource_tracker_dict这个字典来维护管理的节点和它们的资源跟踪器;
# 这里给定节点名称, 返回其对应的资源跟踪器, 必要时进行创建
def _get_resource_tracker(self, nodename):
rt = self._resource_tracker_dict.get(nodename)
if not rt:
if not self.driver.node_is_available(nodename):
raise exception.NovaException(
_("%s is not a valid node managed by this "
"compute host.") % nodename)

rt = resource_tracker.ResourceTracker(self.host,
self.driver,
nodename)
self._resource_tracker_dict[nodename] = rt
return rt

def _update_resource_tracker(self, context, instance):
# 判断实例所在的主机是不是就是本主机
# 且本计算服务是否管理着实例所在的node
if (instance['host'] == self.host and
self.driver.node_is_available(instance['node'])):
# 获取节点的资源跟踪器
rt = self._get_resource_tracker(instance.get('node'))
# 更新节点的资源跟踪器的资源用量
rt.update_usage(context, instance)

# 对注入文件的内容进行解码
def _decode_files(self, injected_files):
if not injected_files:
return []

def _decode(f):
path, contents = f
try:
# 注入文件的内容使用base64进行编码
decoded = base64.b64decode(contents)
# 返回注入文件的路径和解码后的内容
return path, decoded
except TypeError:
raise exception.Base64Exception(path=path)

return [_decode(f) for f in injected_files]

# 判断实例是否在hypervisor上存在
def _check_instance_exists(self, context, instance):
# 通过实例name来判断实例是否存在, 基本不可能有重名的可能
# 因为name是使用实例id来生成, 并且实例id是自增的
if self.driver.instance_exists(instance['name']):
# 存在就抛出InstanceExists异常
raise exception.InstanceExists(name=instance['name'])

# 更改实例的信息
def _instance_update(self, context, instance_uuid, **kwargs):
# 更新实例的数据库信息
instance_ref = self.conductor_api.instance_update(context,
instance_uuid,
**kwargs)
# 更新实例所在节点的资源用量信息
self._update_resource_tracker(context, instance_ref)

return instance_ref

def _start_building(self, context, instance):
LOG.audit(_('Starting instance...'), context=context,
instance=instance)
# 修改实例的vm_state为BUILDING
self._instance_update(context, instance['uuid'],
vm_state=vm_states.BUILDING,
task_state=None,
expected_task_state=(task_states.SCHEDULING,
None))

# 预创建实例
def _prebuild_instance(self, context, instance):
# 判断实例在hypervisor上是否存在
self._check_instance_exists(context, instance)

try:
# 开始创建, 其实只是修改了实例的task_state
self._start_building(context, instance)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
msg = _("Instance disappeared before we could start it")
raise exception.BuildAbortException(instance_uuid=instance['uuid'],
reason=msg)

# 验证实例组策略
def _validate_instance_group_policy(self, context, instance,
filter_properties):
scheduler_hints = filter_properties.get('scheduler_hints') or {}
group_hint = scheduler_hints.get('group')
# 如果没有指定实例组, 直接返回
if not group_hint:
return

@utils.synchronized(group_hint)
def _do_validation(context, instance, group_hint):
group = instance_group_obj.InstanceGroup.get_by_hint(
context, group_hint)
# 如果实例组的策略没有anti-affinity, 那么就不用进行验证了
if 'anti-affinity' not in group.policies:
return

group_hosts = group.get_hosts(context, exclude=[instance['uuid']])
if self.host in group_hosts:
# 如果实例组的策略是反亲和, 那么当本主机与实例组中任一成员实例所在的主机一样,
# 就说明验证不通过, 因为违背了反亲和, 并抛出RescheduledException异常
msg = _("Anti-affinity instance group policy was violated.")
raise exception.RescheduledException(
instance_uuid=instance['uuid'],
reason=msg)

_do_validation(context, instance, group_hint)

# 用于异步分配网络资源的方法
def _allocate_network_async(self, context, instance, requested_networks,
macs, security_groups, is_vpn, dhcp_options):
LOG.debug(_("Allocating IP information in the background."),
instance=instance)
retries = CONF.network_allocate_retries
if retries < 0:
LOG.warn(_("Treating negative config value (%(retries)s) for "
"'network_allocate_retries' as 0."),
{'retries': retries})
# 当配置的网络分配重试次数network_allocate_retries<=1时, 尝试次数为1; 反之为network_allocate_retries+1?
attempts = retries > 1 and retries + 1 or 1
retry_time = 1
for attempt in range(1, attempts + 1):
try:
# 通过rpc调用让nova-network为实例分配网络资源
nwinfo = self.network_api.allocate_for_instance(
context, instance, vpn=is_vpn,
requested_networks=requested_networks,
macs=macs,
security_groups=security_groups,
dhcp_options=dhcp_options)
LOG.debug(_('Instance network_info: |%s|'), nwinfo,
instance=instance)
# 修改实例的系统元数据, 并保存至数据库
sys_meta = utils.metadata_to_dict(instance['system_metadata'])
sys_meta['network_allocated'] = 'True'
self._instance_update(context, instance['uuid'],
system_metadata=sys_meta)
return nwinfo
except Exception:
exc_info = sys.exc_info()
log_info = {'attempt': attempt,
'attempts': attempts}
if attempt == attempts:
LOG.exception(_('Instance failed network setup '
'after %(attempts)d attempt(s)'),
log_info)
raise exc_info[0], exc_info[1], exc_info[2]
LOG.warn(_('Instance failed network setup '
'(attempt %(attempt)d of %(attempts)d)'),
log_info, instance=instance)
# 在下一次重试之前, sleep一定时间, 确定这里不会阻塞线程吗?
time.sleep(retry_time)
retry_time *= 2
if retry_time > 30:
retry_time = 30

# 为实例分配网络资源
def _allocate_network(self, context, instance, requested_networks, macs, security_groups, dhcp_options):
# 修改实例的task_state为NETWORKING
instance = self._instance_update(context, instance['uuid'],
vm_state=vm_states.BUILDING,
task_state=task_states.NETWORKING,
expected_task_state=[None])
# 判断创建实例的镜像是否为配置的VPN镜像
is_vpn = pipelib.is_vpn_image(instance['image_ref'])
# 使用异步方式进行网络资源分配
return network_model.NetworkInfoAsyncWrapper(
self._allocate_network_async, context, instance,
requested_networks, macs, security_groups, is_vpn,
dhcp_options)

# 获取默认的root块设备名称
def _default_root_device_name(self, instance, image_meta, root_bdm):
try:
# 通过虚拟化驱动为root块设备获取默认的名称
return self.driver.default_root_device_name(instance,
image_meta,
root_bdm)
except NotImplementedError:
# 如果虚拟化驱动没有实现default_root_device_name方法, 那么就使用compute_utils中通用的方法
return compute_utils.get_next_device_name(instance, [])

def _default_device_names_for_instance(self, instance, root_device_name, *block_device_lists):
try:
# 使用虚拟化驱动为块设备获取默认的名称
self.driver.default_device_names_for_instance(instance,
root_device_name,
*block_device_lists)
except NotImplementedError:
# 如果虚拟化驱动没有实现default_device_names_for_instance方法, 那么就使用compute_utils中通用的方法
compute_utils.default_device_names_for_instance(
instance, root_device_name, *block_device_lists)

# 验证所有块设备都设定了device_name, 如果没有, 提供一个默认的名称
def _default_block_device_names(self, context, instance, image_meta, block_devices):
# 从所有块设备中找出root块设备, 就是找出boot_index为0的那个;
# 如果有多个块设备的boot_index为0, 那么选取其中一个
root_bdm = block_device.get_root_bdm(block_devices)
if not root_bdm:
return

root_device_name = None
update_instance = False
update_root_bdm = False

if root_bdm.device_name:
# 如果数据库中root块设备映射记录指定了device_name,
# 那么我们把这个device_name更新到实例记录的root_device_name中
root_device_name = root_bdm.device_name
instance['root_device_name'] = root_device_name
update_instance = True
elif instance['root_device_name']:
# 如果数据库中root块设备映射记录没有指定device_name但是实例记录指定了root_device_name,
# 那么我们把这个root_device_name更新到root块设备映射记录的device_name中
root_device_name = instance['root_device_name']
root_bdm.device_name = root_device_name
update_root_bdm = True
else:
# 获取root块设备的默认名称
root_device_name = self._default_root_device_name(instance,
image_meta,
root_bdm)

instance['root_device_name'] = root_device_name
root_bdm.device_name = root_device_name
update_instance = update_root_bdm = True

if update_instance:
# 如果修改了instance信息, 那么将其保存进数据库
self._instance_update(context, instance['uuid'],
root_device_name=root_device_name)
if update_root_bdm:
# 如果修改了root块设备的信息, 也将其保存进数据库
root_bdm.save()

def _is_mapping(bdm):
return (bdm.source_type in ('image', 'volume', 'snapshot') and
driver_block_device.is_implemented(bdm))

# 获取所有块设备中的ephemeral块设备
ephemerals = filter(block_device.new_format_is_ephemeral,
block_devices)
# 获取所有块设备中的swap块设备
swap = filter(block_device.new_format_is_swap,
block_devices)
# 过滤所有块设备的source_type不为blank并且是已实现类型的块设备
block_device_mapping = filter(_is_mapping, block_devices)

# 为实例的块设备获取默认的名称
self._default_device_names_for_instance(instance,
root_device_name,
ephemerals,
swap,
block_device_mapping)

# 为块设备做准备工作
def _prep_block_device(self, context, instance, bdms):
try:
block_device_info = {
'root_device_name': instance['root_device_name'],
'swap': driver_block_device.convert_swap(bdms),
'ephemerals': driver_block_device.convert_ephemerals(bdms),
'block_device_mapping': (
# 如果是volume块设备那么需要通过volume_api进行初始化准备
driver_block_device.attach_block_devices(
driver_block_device.convert_volumes(bdms),
context, instance, self.volume_api,
self.driver) +
driver_block_device.attach_block_devices(
driver_block_device.convert_snapshots(bdms),
context, instance, self.volume_api,
self.driver, self._await_block_device_map_created) +
driver_block_device.attach_block_devices(
driver_block_device.convert_images(bdms),
context, instance, self.volume_api,
self.driver, self._await_block_device_map_created))
}

if self.use_legacy_block_device_info:
# 如果使用过时的块设备信息, 那么就要补上这些信息, Libvirt下是不使用的
for bdm_type in ('swap', 'ephemerals', 'block_device_mapping'):
block_device_info[bdm_type] = \
driver_block_device.legacy_block_devices(
block_device_info[bdm_type])

# 将swap从列表中提出一个出来, 因为我们只能有一个swap块设备
block_device_info['swap'] = driver_block_device.get_swap(
block_device_info['swap'])
return block_device_info

except exception.OverQuota:
msg = ('Failed to create block device for instance due to being '
'over volume resource quota')
LOG.debug(msg, instance=instance)
raise exception.InvalidBDM()

except Exception:
LOG.exception(_('Instance failed block device setup'),
instance=instance)
raise exception.InvalidBDM()

@object_compat
def _spawn(self, context, instance, image_meta, network_info,
block_device_info, injected_files, admin_password,
set_access_ip=False):
# 修改实例状态为BUILDING、任务状态为SPAWNING
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SPAWNING
instance.save(expected_task_state=task_states.BLOCK_DEVICE_MAPPING)

try:
# 调用虚拟化驱动的spawn方法
self.driver.spawn(context, instance, image_meta,
injected_files, admin_password,
network_info,
block_device_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_('Instance failed to spawn'), instance=instance)

current_power_state = self._get_power_state(context, instance)

instance.power_state = current_power_state
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.launched_at = timeutils.utcnow()

# 创建实例
def _build_instance(self, context, request_spec, filter_properties,
requested_networks, injected_files, admin_password, is_first_time,
node, instance, image_meta, legacy_bdm_in_spec):
context = context.elevated()

# 如果使用neutron的安全组, 那么从request_spec中获取创建实例选定的所有安全组
if request_spec and self.is_neutron_security_groups:
security_groups = request_spec.get('security_group')
else:
security_groups = []

if node is None:
# 如果没有指定node, 那么从管理的所有节点中选取一个作为node
node = self.driver.get_available_nodes(refresh=True)[0]
LOG.debug(_("No node specified, defaulting to %s"), node)

network_info = None
# 从数据库获取实例的块设备映射信息
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance['uuid'])


injected_files_orig = injected_files
# 对注入文件的内容进行解码
injected_files = self._decode_files(injected_files)

# 获取本节点的资源追踪器
rt = self._get_resource_tracker(node)
try:
limits = filter_properties.get('limits', {})
# 创建一个Claim上下文管理器, 当发生异常时, 把之前索要的资源返还给资源追踪器
with rt.instance_claim(context, instance, limits):
# 验证实例组策略, 之前提过, 我们可以为实例指定实例组, 实例组通常有一个策略,
# 用于指定成员实例之间是否亲和, 反亲和时实例不能落在与任一成员实例一样的主机上
self._validate_instance_group_policy(context, instance,
filter_properties)
# 返回的macs为None
macs = self.driver.macs_for_instance(instance)
# 返回的dhcp_options也为None
dhcp_options = self.driver.dhcp_options_for_instance(instance)

# 为实例分配网络资源
network_info = self._allocate_network(context, instance,
requested_networks, macs, security_groups,
dhcp_options)

# 修改实例的task_state为BLOCK_DEVICE_MAPPING
self._instance_update(
context, instance['uuid'],
vm_state=vm_states.BUILDING,
task_state=task_states.BLOCK_DEVICE_MAPPING)

# 验证所有块设备是否有设备名, 如果没有, 提供一个默认的设备名
self._default_block_device_names(context, instance, image_meta,
bdms)

# 为块设备做准备工作
block_device_info = self._prep_block_device(
context, instance, bdms)

set_access_ip = (is_first_time and
not instance['access_ip_v4'] and
not instance['access_ip_v6'])

# 创建实例
instance = self._spawn(context, instance, image_meta,
network_info, block_device_info,
injected_files, admin_password,
set_access_ip=set_access_ip)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
msg = _("Instance disappeared during build")
if network_info is not None:
network_info.wait(do_raise=False)
try:
self._deallocate_network(context, instance)
except Exception:
msg = _('Failed to dealloc network '
'for deleted instance')
LOG.exception(msg, instance=instance)
raise exception.BuildAbortException(
instance_uuid=instance['uuid'],
reason=msg)
except (exception.UnexpectedTaskStateError,
exception.VirtualInterfaceCreateException) as e:
with excutils.save_and_reraise_exception():
LOG.debug(e.format_message(), instance=instance)
if network_info is not None:
network_info.wait(do_raise=False)
except exception.InvalidBDM:
with excutils.save_and_reraise_exception():
if network_info is not None:
network_info.wait(do_raise=False)
try:
self._deallocate_network(context, instance)
except Exception:
msg = _('Failed to dealloc network '
'for failed instance')
LOG.exception(msg, instance=instance)
except Exception:
exc_info = sys.exc_info()
if network_info is not None:
network_info.wait(do_raise=False)
rescheduled = self._reschedule_or_error(context, instance,
exc_info, requested_networks, admin_password,
injected_files_orig, is_first_time, request_spec,
filter_properties, bdms, legacy_bdm_in_spec)
if rescheduled:
self._log_original_error(exc_info, instance['uuid'])
raise exception.RescheduledException(
instance_uuid=instance['uuid'],
reason=unicode(exc_info[1]))
else:
raise exc_info[0], exc_info[1], exc_info[2]

return instance, network_info

def _run_instance(self, context, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, node, instance,
legacy_bdm_in_spec):

extra_usage_info = {}

def notify(status, msg="", fault=None, **kwargs):
"""Send a create.{start,error,end} notification."""
type_ = "create.%(status)s" % dict(status=status)
info = extra_usage_info.copy()
info['message'] = unicode(msg)
self._notify_about_instance_usage(context, instance, type_,
extra_usage_info=info, fault=fault, **kwargs)

try:
# 进行预创建工作
self._prebuild_instance(context, instance)

if request_spec and request_spec.get('image'):
image_meta = request_spec['image']
else:
image_meta = {}

extra_usage_info = {"image_name": image_meta.get('name', '')}

# 通知开始创建工作
notify("start")

# 创建实例
instance, network_info = self._build_instance(context,
request_spec, filter_properties, requested_networks,
injected_files, admin_password, is_first_time, node,
instance, image_meta, legacy_bdm_in_spec)
# 通知创建成功
notify("end", msg=_("Success"), network_info=network_info)

except exception.RescheduledException as e:
notify("error", fault=e)

except exception.BuildAbortException as e:
LOG.info(e)
notify("end", msg=unicode(e)) # notify that build is done

except Exception as e:
with excutils.save_and_reraise_exception():
self._set_instance_error_state(context, instance['uuid'])
notify("error", fault=e) # notify that build failed

@wrap_exception() # 最外层的装饰器, 捕捉异常后, 通过notifier进行通知
@reverts_task_state # 装饰器, 在任务执行失败时, 恢复实例的task_state
@wrap_instance_event # 装饰器, 对实例执行任务之前进行记录, 操作完后记录结果
@wrap_instance_fault # 装饰器, 捕捉与实例有关的异常, 并记录错误信息到数据库, 然后重新抛出
def run_instance(self, context, instance, request_spec,
filter_properties, requested_networks,
injected_files, admin_password,
is_first_time, node, legacy_bdm_in_spec):

if filter_properties is None:
filter_properties = {}

# 在创建虚拟机实例的时候需要加锁进行同步
@utils.synchronized(instance['uuid'])
def do_run_instance():
self._run_instance(context, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, node, instance,
legacy_bdm_in_spec)
# 进行创建工作
do_run_instance()