## @file\r
# Common routines used by all tools\r
#\r
-# Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>\r
+# Copyright (c) 2007 - 2015, Intel Corporation. All rights reserved.<BR>\r
# This program and the accompanying materials\r
# are licensed and made available under the terms and conditions of the BSD License\r
# which accompanies this distribution. The full text of the license may be found at\r
import re\r
import cPickle\r
import array\r
+import shutil\r
+from struct import pack\r
from UserDict import IterableUserDict\r
from UserList import UserList\r
\r
from CommonDataClass.DataClass import *\r
from Parsing import GetSplitValueList\r
from Common.LongFilePathSupport import OpenLongFilePath as open\r
+from Common.MultipleWorkspace import MultipleWorkspace as mws\r
\r
## Regular expression used to find out place holders in string template\r
-gPlaceholderPattern = re.compile("\$\{([^$()\s]+)\}", re.MULTILINE|re.UNICODE)\r
+gPlaceholderPattern = re.compile("\$\{([^$()\s]+)\}", re.MULTILINE | re.UNICODE)\r
\r
## Dictionary used to store file time stamp for quick re-access\r
gFileTimeStampCache = {} # {file path : file time stamp}\r
## Dictionary used to store dependencies of files\r
gDependencyDatabase = {} # arch : {file path : [dependent files list]}\r
\r
+def GetVariableOffset(mapfilepath, efifilepath, varnames):\r
+ """ Parse map file to get variable offset in current EFI file \r
+ @param mapfilepath Map file absolution path\r
+ @param efifilepath: EFI binary file full path\r
+ @param varnames iteratable container whose elements are variable names to be searched\r
+ \r
+ @return List whos elements are tuple with variable name and raw offset\r
+ """\r
+ lines = []\r
+ try:\r
+ f = open(mapfilepath, 'r')\r
+ lines = f.readlines()\r
+ f.close()\r
+ except:\r
+ return None\r
+ \r
+ if len(lines) == 0: return None\r
+ firstline = lines[0].strip()\r
+ if (firstline.startswith("Archive member included ") and\r
+ firstline.endswith(" file (symbol)")):\r
+ return _parseForGCC(lines, efifilepath, varnames)\r
+ return _parseGeneral(lines, efifilepath, varnames)\r
+\r
+def _parseForGCC(lines, efifilepath, varnames):\r
+ """ Parse map file generated by GCC linker """\r
+ status = 0\r
+ sections = []\r
+ varoffset = []\r
+ for line in lines:\r
+ line = line.strip()\r
+ # status machine transection\r
+ if status == 0 and line == "Memory Configuration":\r
+ status = 1\r
+ continue\r
+ elif status == 1 and line == 'Linker script and memory map':\r
+ status = 2\r
+ continue\r
+ elif status ==2 and line == 'START GROUP':\r
+ status = 3\r
+ continue\r
+\r
+ # status handler\r
+ if status == 2:\r
+ m = re.match('^([\w_\.]+) +([\da-fA-Fx]+) +([\da-fA-Fx]+)$', line)\r
+ if m != None:\r
+ sections.append(m.groups(0))\r
+ for varname in varnames:\r
+ m = re.match("^([\da-fA-Fx]+) +[_]*(%s)$" % varname, line)\r
+ if m != None:\r
+ varoffset.append((varname, int(m.groups(0)[0], 16) , int(sections[-1][1], 16), sections[-1][0]))\r
+\r
+ if not varoffset:\r
+ return []\r
+ # get section information from efi file\r
+ efisecs = PeImageClass(efifilepath).SectionHeaderList\r
+ if efisecs == None or len(efisecs) == 0:\r
+ return []\r
+ #redirection\r
+ redirection = 0\r
+ for efisec in efisecs:\r
+ for section in sections:\r
+ if section[0].strip() == efisec[0].strip() and section[0].strip() == '.text':\r
+ redirection = int(section[1], 16) - efisec[1]\r
+\r
+ ret = []\r
+ for var in varoffset:\r
+ for efisec in efisecs:\r
+ if var[1] >= efisec[1] and var[1] < efisec[1]+efisec[3]:\r
+ ret.append((var[0], hex(efisec[2] + var[1] - efisec[1] - redirection)))\r
+ return ret\r
+\r
+def _parseGeneral(lines, efifilepath, varnames):\r
+ status = 0 #0 - beginning of file; 1 - PE section definition; 2 - symbol table\r
+ secs = [] # key = section name\r
+ varoffset = []\r
+ secRe = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\da-fA-F]+)[Hh]? +([.\w\$]+) +(\w+)', re.UNICODE)\r
+ symRe = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\.:\\\\\w\?@\$]+) +([\da-fA-F]+)', re.UNICODE)\r
+\r
+ for line in lines:\r
+ line = line.strip()\r
+ if re.match("^Start[' ']+Length[' ']+Name[' ']+Class", line):\r
+ status = 1\r
+ continue\r
+ if re.match("^Address[' ']+Publics by Value[' ']+Rva\+Base", line):\r
+ status = 2\r
+ continue\r
+ if re.match("^entry point at", line):\r
+ status = 3\r
+ continue \r
+ if status == 1 and len(line) != 0:\r
+ m = secRe.match(line)\r
+ assert m != None, "Fail to parse the section in map file , line is %s" % line\r
+ sec_no, sec_start, sec_length, sec_name, sec_class = m.groups(0)\r
+ secs.append([int(sec_no, 16), int(sec_start, 16), int(sec_length, 16), sec_name, sec_class])\r
+ if status == 2 and len(line) != 0:\r
+ for varname in varnames:\r
+ m = symRe.match(line)\r
+ assert m != None, "Fail to parse the symbol in map file, line is %s" % line\r
+ sec_no, sym_offset, sym_name, vir_addr = m.groups(0)\r
+ sec_no = int(sec_no, 16)\r
+ sym_offset = int(sym_offset, 16)\r
+ vir_addr = int(vir_addr, 16)\r
+ m2 = re.match('^[_]*(%s)' % varname, sym_name)\r
+ if m2 != None:\r
+ # fond a binary pcd entry in map file\r
+ for sec in secs:\r
+ if sec[0] == sec_no and (sym_offset >= sec[1] and sym_offset < sec[1] + sec[2]):\r
+ varoffset.append([varname, sec[3], sym_offset, vir_addr, sec_no])\r
+\r
+ if not varoffset: return []\r
+\r
+ # get section information from efi file\r
+ efisecs = PeImageClass(efifilepath).SectionHeaderList\r
+ if efisecs == None or len(efisecs) == 0:\r
+ return []\r
+\r
+ ret = []\r
+ for var in varoffset:\r
+ index = 0\r
+ for efisec in efisecs:\r
+ index = index + 1\r
+ if var[1].strip() == efisec[0].strip():\r
+ ret.append((var[0], hex(efisec[2] + var[2])))\r
+ elif var[4] == index:\r
+ ret.append((var[0], hex(efisec[2] + var[2])))\r
+\r
+ return ret\r
+\r
+## Routine to process duplicated INF\r
+#\r
+# This function is called by following two cases:\r
+# Case 1 in DSC:\r
+# [components.arch]\r
+# Pkg/module/module.inf\r
+# Pkg/module/module.inf {\r
+# <Defines>\r
+# FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836\r
+# }\r
+# Case 2 in FDF:\r
+# INF Pkg/module/module.inf\r
+# INF FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836 Pkg/module/module.inf\r
+#\r
+# This function copies Pkg/module/module.inf to\r
+# Conf/.cache/0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf\r
+#\r
+# @param Path Original PathClass object\r
+# @param BaseName New file base name\r
+#\r
+# @retval return the new PathClass object\r
+#\r
+def ProcessDuplicatedInf(Path, BaseName, Workspace):\r
+ Filename = os.path.split(Path.File)[1]\r
+ if '.' in Filename:\r
+ Filename = BaseName + Path.BaseName + Filename[Filename.rfind('.'):]\r
+ else:\r
+ Filename = BaseName + Path.BaseName\r
+\r
+ #\r
+ # If -N is specified on command line, cache is disabled\r
+ # The directory has to be created\r
+ #\r
+ DbDir = os.path.split(GlobalData.gDatabasePath)[0]\r
+ if not os.path.exists(DbDir):\r
+ os.makedirs(DbDir)\r
+ #\r
+ # A temporary INF is copied to database path which must have write permission\r
+ # The temporary will be removed at the end of build\r
+ # In case of name conflict, the file name is \r
+ # FILE_GUIDBaseName (0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf)\r
+ #\r
+ TempFullPath = os.path.join(DbDir,\r
+ Filename)\r
+ RtPath = PathClass(Path.File, Workspace)\r
+ #\r
+ # Modify the full path to temporary path, keep other unchanged\r
+ #\r
+ # To build same module more than once, the module path with FILE_GUID overridden has\r
+ # the file name FILE_GUIDmodule.inf, but the relative path (self.MetaFile.File) is the real path\r
+ # in DSC which is used as relative path by C files and other files in INF. \r
+ # A trick was used: all module paths are PathClass instances, after the initialization\r
+ # of PathClass, the PathClass.Path is overridden by the temporary INF path.\r
+ #\r
+ # The reason for creating a temporary INF is:\r
+ # Platform.Modules which is the base to create ModuleAutoGen objects is a dictionary,\r
+ # the key is the full path of INF, the value is an object to save overridden library instances, PCDs.\r
+ # A different key for the same module is needed to create different output directory,\r
+ # retrieve overridden PCDs, library instances.\r
+ #\r
+ # The BaseName is the FILE_GUID which is also the output directory name.\r
+ #\r
+ #\r
+ RtPath.Path = TempFullPath\r
+ RtPath.BaseName = BaseName\r
+ #\r
+ # If file exists, compare contents\r
+ #\r
+ if os.path.exists(TempFullPath):\r
+ with open(str(Path), 'rb') as f1: Src = f1.read()\r
+ with open(TempFullPath, 'rb') as f2: Dst = f2.read()\r
+ if Src == Dst:\r
+ return RtPath\r
+ GlobalData.gTempInfs.append(TempFullPath)\r
+ shutil.copy2(str(Path), TempFullPath)\r
+ return RtPath\r
+\r
+## Remove temporary created INFs whose paths were saved in gTempInfs\r
+#\r
+def ClearDuplicatedInf():\r
+ for File in GlobalData.gTempInfs:\r
+ if os.path.exists(File):\r
+ os.remove(File)\r
+\r
## callback routine for processing variable option\r
#\r
# This function can be used to process variable number of option values. The\r
def GuidStringToGuidStructureString(Guid):\r
GuidList = Guid.split('-')\r
Result = '{'\r
- for Index in range(0,3,1):\r
+ for Index in range(0, 3, 1):\r
Result = Result + '0x' + GuidList[Index] + ', '\r
Result = Result + '{0x' + GuidList[3][0:2] + ', 0x' + GuidList[3][2:4]\r
- for Index in range(0,12,2):\r
- Result = Result + ', 0x' + GuidList[4][Index:Index+2]\r
+ for Index in range(0, 12, 2):\r
+ Result = Result + ', 0x' + GuidList[4][Index:Index + 2]\r
Result += '}}'\r
return Result\r
\r
Fd.write(Content)\r
Fd.close()\r
except IOError, X:\r
- EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData='IOError %s'%X)\r
+ EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData='IOError %s' % X)\r
\r
return True\r
\r
#\r
# @retval A list of all files\r
#\r
-def GetFiles(Root, SkipList=None, FullPath = True):\r
+def GetFiles(Root, SkipList=None, FullPath=True):\r
OriPath = Root\r
FileList = []\r
for Root, Dirs, Files in os.walk(Root):\r
return NewFile\r
\r
def RealPath2(File, Dir='', OverrideDir=''):\r
+ NewFile = None\r
if OverrideDir:\r
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(OverrideDir, File))]\r
if NewFile:\r
if OverrideDir[-1] == os.path.sep:\r
return NewFile[len(OverrideDir):], NewFile[0:len(OverrideDir)]\r
else:\r
- return NewFile[len(OverrideDir)+1:], NewFile[0:len(OverrideDir)]\r
+ return NewFile[len(OverrideDir) + 1:], NewFile[0:len(OverrideDir)]\r
if GlobalData.gAllFiles:\r
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(Dir, File))]\r
- else:\r
+ if not NewFile:\r
NewFile = os.path.normpath(os.path.join(Dir, File))\r
+ if not os.path.exists(NewFile):\r
+ return None, None\r
if NewFile:\r
if Dir:\r
if Dir[-1] == os.path.sep:\r
return NewFile[len(Dir):], NewFile[0:len(Dir)]\r
else:\r
- return NewFile[len(Dir)+1:], NewFile[0:len(Dir)]\r
+ return NewFile[len(Dir) + 1:], NewFile[0:len(Dir)]\r
else:\r
return NewFile, ''\r
\r
# Replace the default dir to current dir\r
if Dir == '.':\r
Dir = os.getcwd()\r
- Dir = Dir[len(Workspace)+1:]\r
+ Dir = Dir[len(Workspace) + 1:]\r
\r
# First check if File has Edk definition itself\r
if File.find('$(EFI_SOURCE)') > -1 or File.find('$(EDK_SOURCE)') > -1:\r
# Dir is current module dir related to workspace\r
if Dir == '.':\r
Dir = os.getcwd()\r
- Dir = Dir[len(Workspace)+1:]\r
+ Dir = Dir[len(Workspace) + 1:]\r
\r
NewFile = File\r
RelaPath = AllFiles[os.path.normpath(Dir)]\r
#\r
# PlaceHolderName, PlaceHolderStartPoint, PlaceHolderEndPoint\r
#\r
- for PlaceHolder,Start,End in PlaceHolderList:\r
+ for PlaceHolder, Start, End in PlaceHolderList:\r
self._SubSectionList.append(TemplateSection[SubSectionStart:Start])\r
self._SubSectionList.append(TemplateSection[Start:End])\r
self._PlaceHolderList.append(PlaceHolder)\r
if len(key) > 1:\r
RestKeys = key[1:]\r
elif self._Level_ > 1:\r
- RestKeys = [self._Wildcard for i in range(0, self._Level_-1)]\r
+ RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]\r
else:\r
FirstKey = key\r
if self._Level_ > 1:\r
- RestKeys = [self._Wildcard for i in range(0, self._Level_-1)]\r
+ RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]\r
\r
if FirstKey == None or str(FirstKey).upper() in self._ValidWildcardList:\r
FirstKey = self._Wildcard\r
if len(key) > 1:\r
RestKeys = key[1:]\r
else:\r
- RestKeys = [self._Wildcard for i in range(0, self._Level_-1)]\r
+ RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]\r
else:\r
FirstKey = key\r
if self._Level_ > 1:\r
- RestKeys = [self._Wildcard for i in range(0, self._Level_-1)]\r
+ RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]\r
\r
if FirstKey in self._ValidWildcardList:\r
FirstKey = self._Wildcard\r
Pair += 1\r
elif ch == ')' and not InStr:\r
Pair -= 1\r
- \r
+\r
if (Pair > 0 or InStr) and ch == TAB_VALUE_SPLIT:\r
NewStr += '-'\r
else:\r
IsValid = (len(FieldList) <= 3)\r
else:\r
IsValid = (len(FieldList) <= 1)\r
- return [Value, Type, Size], IsValid, 0 \r
+ return [Value, Type, Size], IsValid, 0\r
elif PcdType in (MODEL_PCD_DYNAMIC_VPD, MODEL_PCD_DYNAMIC_EX_VPD):\r
VpdOffset = FieldList[0]\r
Value = Size = ''\r
return [VpdOffset, Size, Value], IsValid, 2\r
elif PcdType in (MODEL_PCD_DYNAMIC_HII, MODEL_PCD_DYNAMIC_EX_HII):\r
HiiString = FieldList[0]\r
- Guid = Offset = Value = ''\r
+ Guid = Offset = Value = Attribute = ''\r
if len(FieldList) > 1:\r
Guid = FieldList[1]\r
if len(FieldList) > 2:\r
Offset = FieldList[2]\r
if len(FieldList) > 3:\r
Value = FieldList[3]\r
- IsValid = (3 <= len(FieldList) <= 4)\r
- return [HiiString, Guid, Offset, Value], IsValid, 3\r
+ if len(FieldList) > 4:\r
+ Attribute = FieldList[4]\r
+ IsValid = (3 <= len(FieldList) <= 5)\r
+ return [HiiString, Guid, Offset, Value, Attribute], IsValid, 3\r
return [], False, 0\r
\r
## AnalyzePcdData\r
# \r
# @retval ValueList: A List contain value, datum type and toke number. \r
#\r
-def AnalyzePcdData(Setting): \r
- ValueList = ['', '', ''] \r
- \r
- ValueRe = re.compile(r'^\s*L?\".*\|.*\"')\r
+def AnalyzePcdData(Setting):\r
+ ValueList = ['', '', '']\r
+\r
+ ValueRe = re.compile(r'^\s*L?\".*\|.*\"')\r
PtrValue = ValueRe.findall(Setting)\r
\r
ValueUpdateFlag = False\r
\r
if len(PtrValue) >= 1:\r
Setting = re.sub(ValueRe, '', Setting)\r
- ValueUpdateFlag = True \r
+ ValueUpdateFlag = True\r
\r
TokenList = Setting.split(TAB_VALUE_SPLIT)\r
ValueList[0:len(TokenList)] = TokenList\r
# \r
# @retval ValueList: A List contain VpdOffset, MaxDatumSize and InitialValue. \r
#\r
-def AnalyzeVpdPcdData(Setting): \r
- ValueList = ['', '', ''] \r
- \r
- ValueRe = re.compile(r'\s*L?\".*\|.*\"\s*$')\r
+def AnalyzeVpdPcdData(Setting):\r
+ ValueList = ['', '', '']\r
+\r
+ ValueRe = re.compile(r'\s*L?\".*\|.*\"\s*$')\r
PtrValue = ValueRe.findall(Setting)\r
\r
ValueUpdateFlag = False\r
\r
if len(PtrValue) >= 1:\r
Setting = re.sub(ValueRe, '', Setting)\r
- ValueUpdateFlag = True \r
+ ValueUpdateFlag = True\r
\r
TokenList = Setting.split(TAB_VALUE_SPLIT)\r
ValueList[0:len(TokenList)] = TokenList\r
#\r
def CheckPcdDatum(Type, Value):\r
if Type == "VOID*":\r
- ValueRe = re.compile(r'\s*L?\".*\"\s*$')\r
+ ValueRe = re.compile(r'\s*L?\".*\"\s*$')\r
if not (((Value.startswith('L"') or Value.startswith('"')) and Value.endswith('"'))\r
or (Value.startswith('{') and Value.endswith('}'))\r
):\r
return False, "Invalid value [%s] of type [%s]; must be in the form of {...} for array"\\r
- ", or \"...\" for string, or L\"...\" for unicode string" % (Value, Type) \r
+ ", or \"...\" for string, or L\"...\" for unicode string" % (Value, Type)\r
elif ValueRe.match(Value):\r
# Check the chars in UnicodeString or CString is printable\r
if Value.startswith("L"):\r
\r
if CurrentChar in ["/", "-"] and LastChar in [" ", "\t", "\r", "\n"]:\r
if Index > OptionStart:\r
- OptionList.append(OptionString[OptionStart:Index-1])\r
+ OptionList.append(OptionString[OptionStart:Index - 1])\r
OptionStart = Index\r
LastChar = CurrentChar\r
OptionList.append(OptionString[OptionStart:])\r
return os.path.sep.join(P1[:Index])\r
return os.path.sep.join(P1)\r
\r
+#\r
+# Convert string to C format array\r
+#\r
+def ConvertStringToByteArray(Value):\r
+ Value = Value.strip()\r
+ if not Value:\r
+ return None\r
+ if Value[0] == '{':\r
+ if not Value.endswith('}'):\r
+ return None\r
+ Value = Value.replace(' ', '').replace('{', '').replace('}', '')\r
+ ValFields = Value.split(',')\r
+ try:\r
+ for Index in range(len(ValFields)):\r
+ ValFields[Index] = str(int(ValFields[Index], 0))\r
+ except ValueError:\r
+ return None\r
+ Value = '{' + ','.join(ValFields) + '}'\r
+ return Value\r
+\r
+ Unicode = False\r
+ if Value.startswith('L"'):\r
+ if not Value.endswith('"'):\r
+ return None\r
+ Value = Value[1:]\r
+ Unicode = True\r
+ elif not Value.startswith('"') or not Value.endswith('"'):\r
+ return None\r
+\r
+ Value = eval(Value) # translate escape character\r
+ NewValue = '{'\r
+ for Index in range(0,len(Value)):\r
+ if Unicode:\r
+ NewValue = NewValue + str(ord(Value[Index]) % 0x10000) + ','\r
+ else:\r
+ NewValue = NewValue + str(ord(Value[Index]) % 0x100) + ','\r
+ Value = NewValue + '0}'\r
+ return Value\r
+\r
class PathClass(object):\r
def __init__(self, File='', Root='', AlterRoot='', Type='', IsBinary=False,\r
Arch='COMMON', ToolChainFamily='', Target='', TagName='', ToolCode=''):\r
\r
# Remove any '.' and '..' in path\r
if self.Root:\r
+ self.Root = mws.getWs(self.Root, self.File)\r
self.Path = os.path.normpath(os.path.join(self.Root, self.File))\r
self.Root = os.path.normpath(CommonPath([self.Root, self.Path]))\r
# eliminate the side-effect of 'C:'\r
if self.Root[-1] == os.path.sep:\r
self.File = self.Path[len(self.Root):]\r
else:\r
- self.File = self.Path[len(self.Root)+1:]\r
+ self.File = self.Path[len(self.Root) + 1:]\r
else:\r
self.Path = os.path.normpath(self.File)\r
\r
RealFile = os.path.join(self.AlterRoot, self.File)\r
elif self.Root:\r
RealFile = os.path.join(self.Root, self.File)\r
- return FILE_NOT_FOUND, os.path.join(self.AlterRoot, RealFile)\r
+ if len (mws.getPkgPath()) == 0:\r
+ return FILE_NOT_FOUND, os.path.join(self.AlterRoot, RealFile)\r
+ else:\r
+ return FILE_NOT_FOUND, "%s is not found in packages path:\n\t%s" % (self.File, '\n\t'.join(mws.getPkgPath()))\r
\r
ErrorCode = 0\r
ErrorInfo = ''\r
\r
self.AvailableSkuIds = sdict()\r
self.SkuIdSet = []\r
- \r
+ self.SkuIdNumberSet = []\r
if SkuIdentifier == '' or SkuIdentifier is None:\r
self.SkuIdSet = ['DEFAULT']\r
+ self.SkuIdNumberSet = ['0U']\r
elif SkuIdentifier == 'ALL':\r
self.SkuIdSet = SkuIds.keys()\r
+ self.SkuIdNumberSet = [num.strip() + 'U' for num in SkuIds.values()]\r
else:\r
r = SkuIdentifier.split('|') \r
self.SkuIdSet=[r[k].strip() for k in range(len(r))] \r
+ k = None\r
+ try: \r
+ self.SkuIdNumberSet = [SkuIds[k].strip() + 'U' for k in self.SkuIdSet] \r
+ except Exception:\r
+ EdkLogger.error("build", PARAMETER_INVALID,\r
+ ExtraData = "SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"\r
+ % (k, " ".join(SkuIds.keys())))\r
if len(self.SkuIdSet) == 2 and 'DEFAULT' in self.SkuIdSet and SkuIdentifier != 'ALL':\r
self.SkuIdSet.remove('DEFAULT')\r
- \r
+ self.SkuIdNumberSet.remove('0U')\r
for each in self.SkuIdSet:\r
if each in SkuIds:\r
self.AvailableSkuIds[each] = SkuIds[each]\r
return self.SkuIdSet[0]\r
else:\r
return 'DEFAULT'\r
- \r
+ def __GetAvailableSkuIdNumber(self):\r
+ return self.SkuIdNumberSet\r
SystemSkuId = property(__GetSystemSkuID)\r
AvailableSkuIdSet = property(__GetAvailableSkuIds)\r
SkuUsageType = property(__SkuUsageType)\r
+ AvailableSkuIdNumSet = property(__GetAvailableSkuIdNumber)\r
+\r
+#\r
+# Pack a registry format GUID\r
+#\r
+def PackRegistryFormatGuid(Guid):\r
+ Guid = Guid.split('-')\r
+ return pack('=LHHBBBBBBBB',\r
+ int(Guid[0], 16),\r
+ int(Guid[1], 16),\r
+ int(Guid[2], 16),\r
+ int(Guid[3][-4:-2], 16),\r
+ int(Guid[3][-2:], 16),\r
+ int(Guid[4][-12:-10], 16),\r
+ int(Guid[4][-10:-8], 16),\r
+ int(Guid[4][-8:-6], 16),\r
+ int(Guid[4][-6:-4], 16),\r
+ int(Guid[4][-4:-2], 16),\r
+ int(Guid[4][-2:], 16)\r
+ )\r
\r
##\r
#\r