初始提交

This commit is contained in:
2025-05-13 22:00:58 +08:00
commit e4c030b0c0
564 changed files with 78858 additions and 0 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

223
build/script/nv/build_utils.py Executable file
View File

@ -0,0 +1,223 @@
#!/usr/bin/env python3
# coding=utf-8
# Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2021-2022. All rights reserved.
"""
* Description: Utilities of compile system.
* Create: 2020-1-2
"""
import os
import sys
import stat
import time
import subprocess
import re
import shutil
import logging
"""
Colors defines. To highlight important output.
"""
__colors__ = {'purple':'\033[95m', 'red':'\033[91m', 'blue':'\033[94m', 'green':'\033[92m', 'end':'\033[0m'}
"""
Error handling, highlight in red.
"""
class BuildError(Exception):
def __init__(self, err):
emsg = "%s%s%s"%(color_red(), err, color_end())
Exception.__init__(self, emsg)
# End of class BuildError
"""
timer
"""
class BuildTimer:
def __init__(self, name='A'):
self._start = -1
self._name = name
def start(self):
self._start = time.time()
def stop(self):
if self._start == -1:
raise BuildError("Timer %s never been started!"%self._name)
retval = time.time() - self._start
self._start = -1
return retval
# End of class BuildTimer
def color_red():
return __colors__.get('red')
def color_purple():
return __colors__.get('purple')
def color_blue():
return __colors__.get('blue')
def color_green():
return __colors__.get('green')
def color_end():
return __colors__.get('end')
def print_info(msg):
print(msg)
def print_tips(msg):
print("%s%s%s"%(color_purple(), msg, color_end()))
def print_warning(msg):
print("%s%s%s"%(color_green(), msg, color_end()))
def print_alert(msg):
print("%s%s%s"%(color_red(), msg, color_end()))
def fn_filter_dirs(dirs, filters=[]):
retval = list(dirs)
for dir_path in dirs:
for item in filters:
fstr = "%s%s%s"%(os.sep, item, os.sep)
if (dir_path.find(fstr) >= 0):
try:
print("remove dir_path:%s" % dir_path)
retval.remove(dir_path)
except ValueError as e:
print(e)
return retval
def fn_search_all_files(top_dir, file_name, excludes=[]):
"""
Traverse sub-folders to find all files named "file_name".
"""
retval = []
for dir_path, dir_names, file_names in os.walk(top_dir, followlinks=True):
# remove useless folder first
dir_names = [dir_names.remove(x) for x in dir_names if x.startswith(".")]
if file_name in file_names:
retval.append(os.path.join(dir_path, file_name))
return fn_filter_dirs(retval, excludes)
def fn_search_all_dirs(top_dir, dir_name, excludes=[]):
"""
Traverse sub-folders to find all files named "dir_name".
"""
retval = []
for dir_path, dir_names, file_names in os.walk(top_dir, followlinks=True):
if not dir_names:
continue
# remove useless folder first
temp_dirs = list(dir_names)
dirnames = [x for x in dir_names if not x.startswith(".")]
for dirname in dirnames:
if dirname and dirname == dir_name:
retval.append(os.path.join(dir_path, dirname))
return fn_filter_dirs(retval, excludes)
def fn_get_subdirs(dir_path):
lst = [name for name in os.listdir(dir_path) if os.path.isdir(os.path.join(dir_path, name)) and name[0] != '.']
lst.sort()
return lst
def fn_str_to_int(text, num=None):
if num is not None:
return int(text, num)
match1 = re.match(r'\s*0x',text)
match2 = re.match(r'\s*0X',text)
if match1 or match2:
return int(text, 16)
else:
return int(text, 10)
"""
Convert build error from scons to string.
"""
def bf_to_str(bf):
if bf is None:
return '(unknown targets product None in list)'
elif bf.node:
return str(bf.node) + ': ' + bf.errstr
elif bf.filename:
return bf.filename + ': ' + bf.errstr
else:
return str(bf)
"""
call shell
"""
def exec_shell(cmd, logfile=None):
cmdlist = cmd
logfp = None
if isinstance(cmd, str):
cmdlist = cmd.split(' ')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if not logger.handlers:
logger.addHandler(logging.StreamHandler(sys.stdout))
if logfile:
if os.path.isfile(logfile):
os.unlink(logfile)
if len(logger.handlers) < 2: # 1. console; 2. file
logfp = logging.FileHandler(logfile, encoding='utf-8')
logger.addHandler(logfp)
try:
logging.info(str(cmdlist))
logging.info('\n')
subp = subprocess.Popen(cmdlist, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
try:
output = subp.stdout.readline()
output = output.decode(encoding='UTF-8', errors='replace')
if output == '' and subp.poll() is not None:
break
if output:
logging.info(output.strip())
except UnicodeDecodeError as err:
pass
return subp.returncode
except FileNotFoundError as err:
logging.error(err)
raise Exception(err)
except Exception as err:
logging.error(err)
raise Exception(err)
finally:
if logfile and logfp:
logfp.close()
def add_temp_sys_path(path):
env_path = os.environ.get('PATH')
if path.startswith(os.sep):
work_path = path
else:
work_path = os.path.join(os.getcwd(), path)
if work_path not in env_path:
new_env_path = ':'.join([work_path, env_path])
os.environ['PATH'] = new_env_path
return os.environ.get('PATH')
def rm_all(items):
for item in items:
if os.path.isdir(item):
shutil.rmtree(item)
elif os.path.isfile(item):
os.unlink(item)
else:
pass
def rm_pyc(root):
pyc_dirs = fn_search_all_dirs(root, "__pycache__")
rm_all(pyc_dirs)
def get_diff(list0, list1):
diff = list(set(list0) - set(list1))
return diff

174
build/script/nv/conf_parser.py Executable file
View File

@ -0,0 +1,174 @@
#!/usr/bin/env python3
# coding=utf-8
# Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2021-2022. All rights reserved.
"""
* Description: Settings parsers.
* Create: 2020-1-2
"""
import os
import json
from build_utils import color_red
from build_utils import color_end
__all__ = ["MconfParser", "BuildConfParser"]
def nv_repeat_check(pairs):
key_list = []
for key_temp in pairs:
if key_temp[0] not in key_list:
key_list.append(key_temp[0])
else:
raise Exception("nv items(%s) repeat"%key_temp[0])
pairs = dict(pairs)
return pairs
class ParserError(Exception):
"""
Parse errors, highlight in red.
"""
def __init__(self, err):
emsg = "%s%s%s"%(color_red(), err, color_end())
Exception.__init__(self, emsg)
pass
"""
Json format config file parser
"""
class BuildConfParser:
def __init__(self, conf_path):
if not os.path.isfile(conf_path):
raise ParserError("Configration file %s NOT found!"%conf_path)
with open(conf_path, 'r', encoding='utf-8') as conf:
try:
myread = conf.read()
self.conf_data = json.loads(myread)
self.nvconf_data = json.loads(myread, object_pairs_hook = nv_repeat_check)
except Exception as err:
msg = "%s\nParsing file:%s"%(err, conf_path)
raise ParserError(msg)
self.conf_data = self._parse(self.conf_data)
self.nvconf_data = self._parse(self.nvconf_data)
def get_conf_data(self):
return self.conf_data
def get_nvconf_data(self):
return self.nvconf_data
def get(self, option):
return self.conf_data.get(option)
def _parse(self, data):
"""
parse the python sentence starts with ###
"""
for key, value in data.items():
if isinstance(value, dict):
# Recursion
value = self._parse(value)
if isinstance(value, list):
# Recursion
data[key] = self._parse_list(value)
if isinstance(value, int):
data[key] = value
if isinstance(value, str) and value.startswith('###'):
value = self._exec(value)
data[key] = value
return data
def _parse_list(self, values):
new_list = []
for val in values:
if type(val) is str and val.startswith('###'):
value = self._exec(val)
new_list.append(value)
elif isinstance(val, dict):
new_list.append(self._parse(val))
else:
new_list.append(val)
return new_list
def _exec(self, code):
"""
Execute the simple python sentence.
For the security reason, only allows 'os.path.join' to be input, as a path string
to support multiple platforms.
If it needs to support more python features, please use compile and eval, but careful about
the security issues.
"""
start = code.find("os.path.join")
if start < 0:
raise ParserError("The input doesn't support!")
lpt = code.find("(")
if lpt < 0 or lpt < start:
raise ParserError("The input doesn't support!")
rpt = code.find(")")
if rpt < 0 or rpt < lpt:
raise ParserError("The input doesn't support!")
path_parts = code[lpt + 1:rpt].split(",")
ret = ""
for part in path_parts:
ret = os.path.join(ret, part.lstrip(" '\"").rstrip(" '\""))
return ret
"""
Menuconfig format config file parser
"""
class MconfParser:
def __init__(self, conf_path):
if not os.path.isfile(conf_path):
raise ParserError("Configration file %s NOT found!"%conf_path)
with open(conf_path, 'r', encoding='utf-8') as conf:
self.conf_data = conf.readlines()
self.conf_data = self._parse(self.conf_data)
def get(self, option):
data = self.conf_data.get(option)
if data is None:
# Option not found be treated as false.
return 'n'
# strip " when met string values.
return data.replace('"', '')
def _parse(self, data):
settings = {}
for option in data:
if self._option_is_valid(option) is True:
key, value = self._parse_option(option)
settings[key] = value.strip().replace('\n', '').replace('\r', '')
return settings
def _option_is_valid(self, option):
option = option.strip()
if (option is None) or (option == '') or (option.startswith('#') is True):
# skip blanks and comments.
return False
return True
def _parse_option(self, option):
cfg = option.split('=')
if len(cfg) == 2:
# like "KEY=value", length always be 2. return in KEY, value
return cfg[0], cfg[1]
else:
raise ParserError("Unknow format of the option:%s"%option)
def test():
"""
Test only.
"""
parser = BuildConfParser("build/config/riscv32_toolchain.json")
print(parser.get('TargetFolder'))
mparser = MconfParser("build/config/settings.json")
print(mparser.get('CONFIG_TARGET_SOFT_VER'))
if __name__ == "__main__":
test()

View File

@ -0,0 +1,423 @@
#!/usr/bin/env python3
# coding=utf-8
# Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2021-2022. All rights reserved.
"""
* Description: NV binary create.
* Create: 2020-3-10
"""
from ctypes import *
import os
import re
import ctypes
import sys
import pycparser
from parse_msgdefs import Visitor
from conf_parser import ParserError
nextIsBitfield = 0
bitField = 0
bitLength = 0
lastByteLength = 0
totalByteLen = 0
baseByteSize = 0
# type define
class generate_data_stream:
def __init__(self):
self.v = Visitor()
def phase_etypes(self, file):
code = pycparser.parse_file(file)
tmp = Visitor()
tmp.visit(code)
self.v.typedefs.update(tmp.typedefs)
def is_dec(self, s):
try:
int(s)
return True
except ValueError:
pass
return False
def is_hex(self, s):
try:
int(s, 16)
return True
except ValueError:
pass
return False
def byte_len(self, value):
if value < 256:
return 1
elif value < 65536:
return 2
elif value < 4294967296:
return 4
else:
return 8
# value 的几种case需要分别考虑
#1.直接数字
#2.直接字符串
#3.枚举
#4.简单数组
#5.结构体
#6.结构体数组
#7.指针
#8.指针数组
def get_value(self, value_str):
if self.is_dec(value_str):
value = int(value_str)
return value
elif self.is_hex(value_str):
value = int(value_str, 16)
return value
return None
def get_char_value(self, value_str):
if type(value_str) is int:
return value_str
if len(value_str) == 1:
return ord(value_str)
return None
def get_value_for_array(self, value_str):
if '[' not in value_str or ']' not in value_str:
return None
value_line_list = list(filter(None, re.split(r'[;,\s\"\[\]]\s*', value_str)))
return value_line_list
def get_value_for_char_array(self, value_str):
if '\"' not in value_str:
return None
value_str = value_str.replace('\"','')
value_line_list = [x for x in value_str]
return value_line_list
def get_value_str(self, value):
value_line_list = list(filter(None, value.split('\n')))
value_list = []
for i in range(len(value_line_list)):
tmp_list = list(filter(None, value_line_list[i].split(' = ')))
value_list.append(tmp_list[1])
return value_list
def get_enum_value(self, enum_fields, enum_str):
if 1:
enum_value = enum_fields.members.get(enum_str)
if enum_value is None:
msg = "[error] [%s] not a enum value, please check!!" % enum_str
raise ParserError(msg)
return enum_value
for field in enum_fields.members:
if enum_str == field:
return enum_fields.members[field]
def get_bitfield_value(self, fields, typename, value):
global nextIsBitfield
global bitField
global bitLength
global lastByteLength
global totalByteLen
global baseByteSize
bufferData = b''
bitsize = self.get_value(fields.bitsize)
bitLength += bitsize
if value.bit_length() > bitsize:
msg = "[error] [%s]'s value exceeds its bit width!!" % typename
raise ParserError(msg)
if bitLength == bitsize:
baseByteSize = sizeof(fields)
totalByteLen = 0
writeLen = 0
# 前后类型相同,不压缩处理
if bitLength > bitsize and sizeof(fields) == lastByteLength:
if bitLength > baseByteSize * 8:
writelen = max(self.byte_len(bitField), lastByteLength)
bufferData += bitField.to_bytes(writelen, byteorder="little", signed=True) if bitField < 0 \
else bitField.to_bytes(writelen, byteorder="little", signed=False)
totalByteLen += writelen
bitField = 0
bitLength = bitsize # 记录未写入的bit
# 前后类型不同,考虑压缩场景
elif bitLength > bitsize and sizeof(fields) != lastByteLength:
baseByteSize = sizeof(fields) if sizeof(fields) > lastByteLength else lastByteLength
if bitLength > baseByteSize * 8: # 不压缩处理
writelen = max(self.byte_len(bitField), lastByteLength)
bufferData += bitField.to_bytes(writelen, byteorder="little", signed=True) if bitField < 0 \
else bitField.to_bytes(writelen, byteorder="little", signed=False)
totalByteLen += writelen
# 对齐处理
if totalByteLen % sizeof(fields) != 0:
bitField = 0
alignByteLen = sizeof(fields) - totalByteLen if sizeof(fields) > totalByteLen \
else totalByteLen - sizeof(fields)
bufferData += bitField.to_bytes(alignByteLen, byteorder="little", signed=True) if bitField < 0 \
else bitField.to_bytes(alignByteLen, byteorder="little", signed=False)
totalByteLen += alignByteLen
bitField = 0
bitLength = bitsize
if totalByteLen % baseByteSize != 0: # 非对齐场景,不压缩
writelen = max(self.byte_len(bitField), lastByteLength)
bufferData += bitField.to_bytes(writelen, byteorder="little", signed=True) if bitField < 0 \
else bitField.to_bytes(writelen, byteorder="little", signed=False)
totalByteLen += writelen
bitField = 0
bitLength = bitsize
lastByteLength = sizeof(fields)
if bitLength > bitsize:
bitField = (bitField | (value << (bitLength - bitsize)))
else:
bitField = value
# print("bitsize: ",bitsize)
# print("value: ",value)
# print("lastByteLength: ",lastByteLength)
# print("bitLength: ",bitLength)
# print("totalByteLen :", totalByteLen)
# print("bitField: ",bitField)
if bitLength == (sizeof(fields) * 8):
writelen = max(self.byte_len(bitField), sizeof(fields))
bufferData += bitField.to_bytes(writelen, byteorder="little", signed=True) if bitField < 0 \
else bitField.to_bytes(writelen, byteorder="little", signed=False)
totalByteLen += writelen
bitField = 0
bitLength = 0
return bufferData
if nextIsBitfield == 0:
# 后面非位域,数据输出。如果后面嵌套结构体,结构体起始仍为位域,仍需要考虑压缩
writelen = max(self.byte_len(bitField), sizeof(fields))
bufferData += bitField.to_bytes(writelen, byteorder="little", signed=True) if bitField < 0 \
else bitField.to_bytes(writelen, byteorder="little", signed=False)
totalByteLen += writelen
bitField = 0
bitLength = 0
if totalByteLen % baseByteSize != 0:
bitField = 0
alignByteLen = baseByteSize - (totalByteLen % baseByteSize)
bufferData += bitField.to_bytes(alignByteLen, byteorder="little", signed=True) if bitField < 0 \
else bitField.to_bytes(alignByteLen, byteorder="little", signed=False)
# print("bufferData: ", bufferData)
return bufferData
def print_type(self, typename, value, isBaseType, isEnum, isSomeKindOfArray, isUnion, isPointer):
print("%s, value: %s, isBaseType :" % (typename, value), isBaseType) if isBaseType else None
print("%s, value: %s, isEnum :" % (typename, value), isEnum) if isEnum else None
print("%s, value: %s, isSomeKindOfArray :" % (typename, value), isSomeKindOfArray) if isSomeKindOfArray else None
print("%s, value: %s, isUnion :" % (typename, value), isUnion) if isUnion else None
print("%s, value: %s, isPointer :" % (typename, value), isPointer) if isPointer else None
def recursion_parse(self, fields, type_name, value):
global nextIsBitfield
# print('recurse field: ', fields)
# print('recurse type_name: ', type_name)
# print('recurse value: ', value)
isBitfield = hasattr(fields, "bitsize")
isBaseType = not hasattr(fields, "_fields_")
isEnum = hasattr(fields, "members")
isSomeKindOfArray = issubclass(fields, ctypes.Array)
isUnion = isinstance(fields, ctypes.Union)
isPointer = hasattr(fields, "contents")
#self.print_type(type_name, value, isBaseType, isEnum, isSomeKindOfArray, isUnion, isPointer)
if ((isBaseType and not isSomeKindOfArray) or isEnum or isPointer) and \
(type(value) is list or type(value) is dict):
msg = "[error] [%s] is not a array or a structrue, the value cannot be a list or a dict!!" % type_name
raise ParserError(msg)
if not ((isBaseType and not isSomeKindOfArray) or isEnum or isPointer) and \
not (type(value) is list or type(value) is dict):
msg = "[error] [%s] is a array or a structrue, the value must be a list or a dict!!" % type_name
raise ParserError(msg)
if isUnion and not isSomeKindOfArray and type(value) is list:
msg = "[error] [%s] is a union, the value must be a hex or int or a dict!!" % type_name
raise ParserError(msg)
buffer = b''
if isEnum:
buffer += self.get_enum_value(fields, value).to_bytes(sizeof(fields), byteorder="little")
# print("buffer: ", buffer)
return buffer
if isBaseType and not isSomeKindOfArray:
# print("base size: ", sizeof(fields))
# print("fileds: ", fields)
# print("type: ", type(fields))
if sizeof(fields) == 1 and \
isinstance(value, str) and \
not value.startswith('0x') and not value.startswith('0X'):
value = self.get_char_value(value)
else:
value = self.get_value(value)
if isBitfield:
buffer += self.get_bitfield_value(fields, type_name, value)
else:
buffer += value.to_bytes(sizeof(fields), byteorder="little", signed=True) if value < 0 \
else value.to_bytes(sizeof(fields), byteorder="little", signed=False)
# print("buffer: ", buffer)
return buffer
if isUnion and not isSomeKindOfArray:
# print("union size: ", sizeof(fields))
# union要么指定成员赋值要么直接整体赋值,不能是list,list无法知道对具体哪个成员赋值
if type(value) is not dict:
buffer += self.get_value(value).to_bytes(sizeof(fields), byteorder="little")
else:
# 增加枚举结构的解析
pass
# print("buffer: ", buffer)
return buffer
if isSomeKindOfArray:
if type(value) is not list:
msg = "[error] [%s] is a array, the value must be a list!!" % type_name
raise ParserError(msg)
# print("array type: ", fields._type_)
# print("array length: ", fields._length_)
idx = 0
for val in value:
buffer += self.recursion_parse(fields._type_, '%s[%d]' % (type_name, idx), val)
idx += 1
# print("sizeof array [%s]: " % type_name, sizeof(fields))
if len(buffer) > sizeof(fields):
msg = "[error] the value is oversized the array: [%s]!!" % type_name
raise ParserError(msg)
buffer += bytearray(sizeof(fields) - len(buffer))
# print("buffer: ", buffer)
return buffer
all_types = fields._fields_
# print('all_types in struct [%s] : ' % type_name, all_types)
if type(value) is list:
typesLen = len(all_types)
typesIdx = 0
for (item, item_class) in all_types:
if len(value) == 0:
buffer += bytearray(sizeof(item_class))
continue
if typesIdx + 1 < typesLen:
nextField = all_types[typesIdx + 1][1]
nextIsBitfield = hasattr(nextField, "bitsize")
else:
nextIsBitfield = 0
buffer += self.recursion_parse(item_class, item, value[0])
del(value[0])
typesIdx += 1
if len(value) != 0:
msg = "[error] the value is not match the type: [%s]!!" % type_name
raise ParserError(msg)
return buffer
if type(value) is dict:
typesLen = len(all_types)
typesIdx = 0
for (item, item_class) in all_types:
item_value = value.get(item)
if item_value is None:
buffer += bytearray(sizeof(item_class))
continue
if typesIdx + 1 < typesLen:
nextField = all_types[typesIdx + 1][1]
nextIsBitfield = hasattr(nextField, "bitsize")
else:
nextIsBitfield = 0
buffer += self.recursion_parse(item_class, item, item_value)
typesIdx += 1
return buffer
def generate(self, struct_name, value):
fields = self.v.typedefs.get(struct_name)
# print("struct: %s, value: " %struct_name, value)
# print('types: ', self.v.typedefs)
if fields is None:
msg = "[error] not found the type [%s]!" % struct_name
raise ParserError(msg)
buffer = self.recursion_parse(fields, struct_name, value)
return buffer, len(buffer)
#DEBUG
if __name__=="__main__":
class g_env:
flash_size = 0x3000
protocolKvIndex = 0
appKvIndex = 0x1000
secureKvindex = 0x2000
KV_STORE_DIR = {'security' : 0 ,'protocol' : 1 ,'application' : 2 ,'asset' : 3 }
KV_STATUS_DIR = {'alive':0xffff,'reserved':0,'deprecated':1}
KV_PAGE_ID = {'security' : 0xcb7e ,'protocol' : 0xda81 ,'application' : 0x254d ,'backup' : 0x34b2}
g_kv_pairs = {
'yml_test_eg' : {
'value' : {
'num_supported_bands' : 1,
'band_ids' :[8, 7, 6]
},
'permanence': False
},
'yml_test_eg2_mixed' : {
'value' : {
'param1' : 1,
'param2' : [[1,2,], [3,4], [5]]
},
'permanence': False
},
'yml_test_eg2' : {
'value' : [1, [[1,2,], [3,4]]],
'permanence': False
},
'test_nv_type_nest_deep' :{
'value' : [1, "C", 0,
[[1, 0, 0x2222, [["TEST_NV_ENUM_1"], ["TEST_NV_ENUM_2"]]],
[3, 0, 0x4444, [["TEST_NV_ENUM_3"], ["TEST_NV_ENUM_4"]]]]
],
'permanence': False
},
'test_nv_type_nest_deep____pure_value' :{
'value' : [1, 0, "C",
[[1, 0x2222, 0, [[0x1111], [0x2222]]]],
[3, 0x4444, 0, [[0x33334444], [0x5555]]]
],
'permanence': False
},
'test_nv_type_nest_deep___dict' :{
'value' : [1, [[1,2,], [3,4]]],
'permanence': False
},
'test_nv_type_nest_deep___mixed' :{
'value' : [1, [[1,2,], [3,4]]],
'permanence': False
}
}
struct_name = sys.argv[1]
etypes = sys.argv[2]
test = generate_data_stream()
test.phase_etypes(etypes)
nv_file_Bin = bytearray(g_env.flash_size)
for i in range(0, g_env.flash_size):
nv_file_Bin[i] = 0xFF
# SetKvPageHead(nv_file_Bin)
for keyValueItem in g_kv_pairs:
if keyValueItem != struct_name:
continue
value,value_len = test.generate(struct_name, g_kv_pairs[struct_name]['value'])
print("value: ", value)
print("value_len: ", value_len)
with open('nv.bin', 'wb') as f:
f.write(value)
break

View File

@ -0,0 +1,34 @@
#!/usr/bin/env python3
# coding=utf-8
# Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2021-2023. All rights reserved.
"""
* Description: Utilities of compile system.
* Change: 2023-3-17
"""
import os
import sys
import glob
def generate_nv_source(input, output):
if input == 'noneed':
headers = glob.glob(os.path.join("*.h"))
else:
headers = glob.glob(os.path.join(input, "*.h"))
# dummy C file that includes all the required headers to get structure defs.
lines = []
for header in headers:
lines.append('#include <%s>' % os.path.basename(header))
with open(output, "w") as f:
f.write("\n".join(lines)) # for compatible representation to old implementation.
def generate_nv_dir(nv_c_path):
if not os.path.exists(nv_c_path):
os.makedirs(nv_c_path)
if __name__ == "__main__":
if sys.argv[1] == 'NV':
generate_nv_source(sys.argv[2], sys.argv[3])
if sys.argv[1] == 'MKPATH':
generate_nv_dir(sys.argv[2])

667
build/script/nv/nv_binary.py Executable file
View File

@ -0,0 +1,667 @@
#!/usr/bin/env python3
# coding=utf-8
# Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2021-2022. All rights reserved.
"""
* Description: NV binary create.
* Create: 2021-11-30
"""
import os
import re
import sys
import stat
import json
import hashlib
import struct
import shutil
import zlib
import binascii
g_root = os.path.realpath(__file__)
g_root = os.path.dirname(g_root)
g_root = os.path.realpath(os.path.join(g_root, "..", "..", ".."))
sys.path.append(os.path.join(g_root, 'build'))
sys.path.append(os.path.join(g_root, 'build', 'script'))
# print(g_root)
from conf_parser import BuildConfParser, ParserError
from build_utils import fn_str_to_int
from generate_data_stream import generate_data_stream
from ctypes import c_char, c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint, Structure, sizeof
TD_CHAR = c_char
TD_S8 = c_byte
TD_U8 = c_ubyte
TD_S16 = c_short
TD_U16 = c_ushort
TD_S32 = c_int
TD_U32 = c_uint
g_u8_max = 0xFF
g_u16_max = 0xFFFF
class KeyHead(Structure):
_fields_ = [
("magic", TD_U8), # Magic number to indicate the start of the item
("valid", TD_U8), # flag to indicate whether the value is valid
("length", TD_U16), # Length of the key_data field in bytes
("type", TD_U8), # Normal (0xFF) or permanent (0x00) or keep (0x04 )
("upgrade", TD_U8),
("key_id", TD_U16), # The Key ID
("enc_key", TD_U16), # Allows some customisation of the data AES key used, 0x0 - key_data is plaintext, Others - key_data is encrypted
("version", TD_U16), # Version of the key
("rnd", TD_U32) # Key header crc calculated from length
]
class NvPageHead(Structure):
_fields_ = [
("id", TD_U16),
("reserved", TD_U8),
("num_pages", TD_U8),
("inverted_details_word", TD_U32),
("last_write", TD_U32), # last_write
("unused", TD_U32), # We want this header to be 4 words long - this allows us to alias anything after it
]
# 1.根据指定的alias合并所有配置文件
# 1.1 解析alias中所有type=Nv的配置。
# 2.如果需要从合并的配置文件中提取NV项ID生成ID枚举头文件提供给源码使用构建过程不允许修改源码因此该头文件只有需要的时候调用脚本生成
# 3.预编译所有nv结构
# 4.解析配置文件中各个nv项的数据内容与结构结合生成bin
class BuildNv:
def __init__(self, alias, root=None, targets=None, backup=False, use_crc16=False):
self.alias = alias
self.root = root if root is not None else g_root
self.targets = targets
self.is_backup = backup
self.use_crc16 = use_crc16
self.tmp_path = os.path.join(self.root, json_conf["BUILD_TEMP_PATH"])
self.nv_relative_path = os.path.join(self.root, json_conf["NV_RELATIVE_PATH"])
self.nv_root = os.path.join(self.root, json_conf["NV_DEFAULT_CFG_DIR"])
self.nv_output_dir = os.path.join(self.root, json_conf["OUT_BIN_DIR"])
if not backup:
self.nv_output_name = json_conf["OUT_BIN_NAME"]
else:
self.nv_output_name = json_conf["OUT_BACKUP_BIN_NAME"]
self.nv_ver_src_dict = dict()
self.nv_ver_dict = dict()
self.nv_flash_cfg = None
self.nv_cores_ver_bin = dict()
self.nv_chip_ver_bin = dict()
self.nv_flash_page_index = dict()
def set_nv_output_dir(self, path):
self.nv_output_dir = path
def start_work(self):
self._merge_cfgs()
self._load_nv_flash_cfg()
self._parse_etypes()
self._gen_binary()
self._create_header()
def _merge_cfgs(self):
'''
Merge config sources in self.nv_ver_src_dict.
This will build the self.nv_ver_dict like the following tree:
|--- ver1 : {
| "merged_cfg" : json file after merge all nv configuration with the same product type.
| "prod_type" : "XXXX" }
|
chip|---target1---|--- ver2 :
| |
| |--- ver3 :
| |
| |--- core : "Each target corresponds to one core."
|---targetN...
'''
for target in self.alias:
if self._nv_ver_prepare(target) is False:
continue
# print('nv_ver_src_dict: ', self.nv_ver_src_dict)
for chip in self.nv_ver_src_dict:
src_chip_dict = self.nv_ver_src_dict[chip]
# print("src_chip_dict =",src_chip_dict)
self.nv_ver_dict[chip] = {}
chip_dict = self.nv_ver_dict[chip]
for target in src_chip_dict:
if chip_dict.get(target) is None:
chip_dict[target] = {'core':src_chip_dict[target]['core']}
nv_tmp_dir = os.path.join(self.nv_relative_path)
for ver_name in src_chip_dict[target]:
if ver_name == 'core':
continue
cfg_file_prefix = os.path.join(nv_tmp_dir, 'cfg', '%s_nv' % (target)) # 生成中间文件路径
# print("cfg_file_prefix = ", cfg_file_prefix)
chip_dict[target][ver_name] = self._merge_ver_cfg(cfg_file_prefix, src_chip_dict[target][ver_name])
def _parse_etypes(self):
for chip in self.nv_ver_dict:
chip_dict = self.nv_ver_dict[chip]
for target in chip_dict:
# TODO: scons in chip dir or nv_config dir? etypes path depends on scons path
nv_tmp_dir = os.path.join(self.tmp_path, target)
etypes_path = os.path.join(nv_tmp_dir, "%s.etypes" % target) # 中间文件xxx.etypes路径
if os.path.exists(etypes_path) is not True: # 判断中间文件是否存在,如果不在说明该模块没有被编译,需要加入到编译链接中
etypes_path = os.path.join(self.tmp_path, "etypes", "%s.etypes" % target)
if os.path.exists(etypes_path) is not True:
msg = "[error] [%s] need add nv_config module in alias! %s" % (target, etypes_path)
raise ParserError(msg)
stream_gen = generate_data_stream()
stream_gen.phase_etypes(etypes_path)
chip_dict[target]["stream_gen"] = stream_gen
dtabase_txt = os.path.join(self.root, json_conf['DATABASE_TXT_FILE'])
shutil.copy(etypes_path, dtabase_txt)
def _gen_binary(self):
'''
|--- ver1 : binary file of ver1.
| (version name : product_type + version name)
chip|---core1---|--- ver2 :
| |
| |--- ver3 :
|---coreN...
'''
self._gen_binary_prepare()
self._gen_target_version_binary()
self._gen_chip_nv_binary()
def _gen_target_version_binary(self):
for chip in self.nv_cores_ver_bin:
cores = self.nv_cores_ver_bin[chip]
# print("cores =", cores)
for core in cores:
cores[core] = self._gen_version_binary(self.nv_ver_dict[chip], chip, core)
def _gen_version_binary(self, chip_ver_dict, chip, core):
ver_binary_dict = dict()
# print("chip_ver_dict = ", chip_ver_dict) # 字典信息包含核名字,配置文件路径
if chip_ver_dict is None:
return ver_binary_dict
for target in chip_ver_dict:
if chip_ver_dict[target].get('core') != core: # 判断字典里面的target读出来的core是否和core相同
print("chip_ver_dict[target].get('core') = ", chip_ver_dict[target].get('core'))
continue
stream_gen = chip_ver_dict[target].get('stream_gen')
for ver in chip_ver_dict[target]:
# print("ver =", ver)
if ver == 'core' or ver == 'stream_gen':
continue
stream_gen = chip_ver_dict[target].get('stream_gen')
cfg_file = chip_ver_dict[target][ver]["merged_cfg"]
# print("cfg_file =",cfg_file)
stream = self._gen_nv_stream(cfg_file, stream_gen, chip, core)
nv_ver_bin = \
os.path.join(self.nv_relative_path, 'bin', '%s_nv.bin' % (core))
# print("nv_ver_bin = ", nv_ver_bin) # 生成nvbin文件的路径
prod_type = chip_ver_dict[target][ver]["prod_type"]
prod_type = "all" if prod_type is None else prod_type
ver_binary_dict["%s_%s" % (chip, prod_type)] = self._write_binary_to_file(nv_ver_bin, stream)
return ver_binary_dict
def _write_binary_to_file(self, file_path, stream):
if os.path.exists(os.path.dirname(file_path)) is False:
os.makedirs(os.path.dirname(file_path))
if os.path.exists(file_path) is True:
os.remove(file_path)
flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL
modes = stat.S_IWUSR | stat.S_IRUSR
with os.fdopen(os.open(file_path, flags, modes), 'wb') as fout:
fout.write(stream)
return file_path
def _gen_binary_prepare(self):
for chip in self.nv_ver_dict:
self.nv_chip_ver_bin[chip] = dict()
self.nv_cores_ver_bin[chip] = dict()
self.nv_flash_page_index[chip] = dict()
nv_flash_chip_cfg = self.nv_flash_cfg[chip]
cores = nv_flash_chip_cfg["cores"].keys()
chip_nv_ver = self.nv_ver_dict[chip]
for target in chip_nv_ver:
core = chip_nv_ver[target].get('core')
for core in cores:
if core not in cores:
msg = "[error] [%s] [%s] not a core cfg in nv_storage_cfg.json!" % (target, core)
raise ParserError(msg)
if self.nv_cores_ver_bin[chip].get(core) is None:
self.nv_cores_ver_bin[chip][core] = dict()
page_size = fn_str_to_int(nv_flash_chip_cfg["size"]["page_size"])
total_size = 0
for core in cores:
core_page_nums = nv_flash_chip_cfg['cores'][core]['page_nums']
total_size += core_page_nums * page_size
self.nv_flash_page_index[chip][core] = [(sizeof(NvPageHead) + num * page_size, (num + 1) * page_size) \
for num in range(0, core_page_nums)]
if total_size > fn_str_to_int(nv_flash_chip_cfg["size"]['flash_size']):
msg = "[error] cores size: %s, over total flash size: %s!" % \
(total_size, nv_flash_chip_cfg["size"]['flash_size'])
raise ParserError(msg)
def _gen_nv_stream(self, cfg_file, stream_gen, chip, core):
core_nv_bin = self._init_nv_page_head(chip, core)
cfg_data = BuildConfParser(cfg_file).get_conf_data()
key_id_list = []
last_key_item_start_addr = 0
for module in cfg_data:
for key_item in cfg_data[module]:
if key_item == 'module_id':
continue
key_struct_name = cfg_data[module][key_item].get("structure_type")
key_value = cfg_data[module][key_item].get("value")
key_attr = cfg_data[module][key_item].get("attributions")
key_id = cfg_data[module][key_item].get("key_id")
key_id = fn_str_to_int(key_id) if type(key_id) is not int else key_id
if key_struct_name is None or key_value is None or key_attr is None or key_value == []:
msg = "[error] 'structure_type' 'value' 'attributions' must be configured!"
raise ParserError(msg)
if key_id in key_id_list:
msg = "[error] key id:%d repeated, please check!" % key_id
raise ParserError(msg)
key_id_list.append(key_id)
key_data, key_data_len = stream_gen.generate(key_struct_name, key_value)
page_index, key_item_start_addr = self._find_usable_addr(chip, core, key_data, key_data_len)
core_nv_bin, key_item_start_addr = \
self._init_key_head(core_nv_bin, key_item_start_addr, key_data_len, key_id, key_attr)
core_nv_bin, key_item_start_addr = \
self._set_key_data(core_nv_bin, key_item_start_addr, key_data, key_data_len)
core_nv_bin, key_item_start_addr = \
self._set_key_hash(core_nv_bin, key_item_start_addr, key_data_len)
self._update_core_index(chip, core, page_index, key_item_start_addr)
last_key_item_start_addr = max(last_key_item_start_addr, key_item_start_addr)
core_nv_bin = self._set_unused_page(chip, core_nv_bin, last_key_item_start_addr)
self._reset_core_index(chip, core)
return core_nv_bin
def _set_unused_page(self, chip, core_nv_bin, key_item_start_addr):
page_size = fn_str_to_int(self.nv_flash_cfg[chip]['size']['page_size'])
core_nv_end_addr = (key_item_start_addr + (page_size - 1)) & ~(page_size - 1)
for i in range(key_item_start_addr, core_nv_end_addr):
core_nv_bin[i] = 0xFF
return core_nv_bin[0 : core_nv_end_addr]
def _gen_chip_nv_binary(self):
for chip in self.nv_cores_ver_bin:
chip_bins = self.nv_cores_ver_bin[chip]
ver_list = []
for core in chip_bins:
ver_list.extend(chip_bins[core].keys())
ver_list = set(ver_list)
for ver in ver_list:
self.nv_chip_ver_bin[chip][ver] = self._assemble_ver_bins(chip, ver)
def _assemble_ver_bins(self, chip, ver):
flash_bin = bytearray(fn_str_to_int(self.nv_flash_cfg[chip]['size']['flash_size']))
for i in range(0, len(flash_bin)):
flash_bin[i] = 0xFF
start_addr = 0
chip_bins = self.nv_cores_ver_bin[chip]
cores = self.nv_cores_ver_bin[chip]
for core in chip_bins:
ver_bin = chip_bins[core].get(ver)
flash_bin, start_addr = self._append_file_to_stream(flash_bin, start_addr, chip, core, ver_bin)
chip_ver_bin_file = os.path.join(self.nv_output_dir, self.nv_output_name)
# TODO: 目前只考虑单核场景去除 NV bin 末尾的无用 FF
return self._write_binary_to_file(chip_ver_bin_file, flash_bin[0 : start_addr])
def _append_file_to_stream(self, flash_bin, start_addr, chip, core, ver_bin):
core_bin_size = fn_str_to_int(self.nv_flash_cfg[chip]['size']["page_size"]) * \
self.nv_flash_cfg[chip]["cores"][core]['page_nums']
core_nv_bin = b''
if ver_bin is None:
core_nv_bin = self._init_nv_page_head(chip, core)
else:
with open(ver_bin, 'rb') as f:
core_nv_bin = f.read()
#print('core_nv_bin = ', core_nv_bin)
tail_addr = start_addr + len(core_nv_bin)
flash_bin[start_addr : tail_addr] = core_nv_bin
return flash_bin, tail_addr
def _reset_core_index(self, chip, core):
nv_flash_chip_cfg = self.nv_flash_cfg[chip]
page_size = fn_str_to_int(nv_flash_chip_cfg['size']["page_size"])
core_page_nums = nv_flash_chip_cfg['cores'][core]['page_nums']
self.nv_flash_page_index[chip][core] = [(sizeof(NvPageHead) + num * page_size, (num + 1) * page_size) \
for num in range(0, core_page_nums)]
def _update_core_index(self, chip, core, index, addr):
(start_addr, page_max_addr) = self.nv_flash_page_index[chip][core][index]
if start_addr >= addr or addr > page_max_addr:
msg = "[error] addr %s invalid!" % addr
raise ParserError(msg)
self.nv_flash_page_index[chip][core][index] = (addr, page_max_addr)
#print("update page index: \n", self.nv_flash_page_index)
def _find_usable_addr(self, chip, core, key_data, key_data_len):
page_size = fn_str_to_int(self.nv_flash_cfg[chip]['size'].get('page_size'))
key_item_total_len = self._get_key_item_len(key_data_len)
if key_item_total_len > page_size - sizeof(NvPageHead):
msg = "[error] key over page size !" % key_id
raise ParserError(msg)
index = 0
for (start_addr, page_max_addr) in self.nv_flash_page_index[chip][core]:
if start_addr + key_item_total_len > page_max_addr:
index += 1
continue
return index, start_addr
msg = "[error] no more enough space for [%s]!" % core
raise ParserError(msg)
def _get_key_type_from_attr(self, key_id, attr):
if (attr & 1) and (not (attr & ~1)):
return 0xFF
elif (attr & 2) and (not (attr & ~2)):
return 0x00
elif (attr & 4) and (not (attr & ~4)):
return 0xFF
else:
msg = "[error] attribution config err: [id-%s] [attr-%s] !" % (key_id, attr)
raise ParserError(msg)
def _get_key_upgrade_from_attr(self, key_id, attr):
if (attr & 1) and (not (attr & ~1)):
return 0xFF
elif (attr & 2) and (not (attr & ~2)):
return 0xFF
elif (attr & 4) and (not (attr & ~4)):
return 0x00
else:
msg = "[error] attribution config err: [id-%s] [attr-%s] !" % (key_id, attr)
raise ParserError(msg)
def _get_key_item_len(self, key_data_len):
if key_data_len % 4 != 0:
key_data_len += 4 - key_data_len % 4
return sizeof(KeyHead) + 4 + key_data_len
def _set_key_hash(self, nv_bin, key_item_start_addr, key_data_len):
if key_data_len % 4 != 0:
key_data_len += 4 - key_data_len % 4
hash_start_addr = key_item_start_addr - key_data_len - sizeof(KeyHead)
hash_end_addr = hash_start_addr + key_data_len + sizeof(KeyHead)
if not self.use_crc16:
crc32num = zlib.crc32(nv_bin[hash_start_addr : hash_end_addr])
else:
crc32num = binascii.crc_hqx(nv_bin[hash_start_addr : hash_end_addr], 0)
crc32ret = '{:0>8X}'.format(crc32num)
crc32ret = re.sub(r"(?<=\w)(?=(?:\w\w)+$)", " 0x", crc32ret)
crc32ret = '0x' + crc32ret
crc32list = [int(x,16) for x in crc32ret.split(" ")]
sha256bytearray = bytes(crc32list)
tail_addr = key_item_start_addr + len(sha256bytearray)
nv_bin[key_item_start_addr : tail_addr] = sha256bytearray
return nv_bin, tail_addr
def _set_key_data(self, nv_bin, key_item_start_addr, key_data, key_data_len):
if key_data_len % 4 != 0:
for i in range(0, 4 - key_data_len % 4) :
key_data += b'\x00'
key_data_len += 4 - key_data_len % 4
tail_addr = key_item_start_addr + key_data_len
nv_bin[key_item_start_addr : tail_addr] = key_data
return nv_bin, tail_addr
def _init_key_head(self, nv_bin, key_item_start_addr, key_data_len, key_id, key_attr):
nv_key_st = KeyHead.from_buffer(nv_bin[key_item_start_addr:])
nv_key_st.magic = 0xA9
nv_key_st.length = key_data_len
nv_key_st.type = self._get_key_type_from_attr(key_id, key_attr)
nv_key_st.upgrade = self._get_key_upgrade_from_attr(key_id, key_attr)
nv_key_st.key_id = key_id
nv_key_st.version = 65535
nv_key_st.enc_key = 0 # TODO: 目前不支持加密且nv加密部分不由nv脚本做可能放到初始化
tail_addr = key_item_start_addr + sizeof(KeyHead)
nv_bin[key_item_start_addr : tail_addr] = nv_key_st
return nv_bin, tail_addr
def _init_nv_page_head(self, chip, core):
nv_flash_chip_cfg = self.nv_flash_cfg[chip]
default_page_nums = nv_flash_chip_cfg.get('default_page_nums')
page_size = fn_str_to_int(nv_flash_chip_cfg['size']['page_size'])
page_nums = nv_flash_chip_cfg['cores'][core]['page_nums']
if not self.is_backup:
page_id_start = nv_flash_chip_cfg['cores'][core]['page_id_start']
else:
page_id_start = '0x34B2'
core_nv_size = page_nums * page_size
core_nv_bin = bytearray(core_nv_size)
for i in range(0, core_nv_size):
core_nv_bin[i] = 0xFF
for i in range(0, page_nums):
start_addr = i * page_size
nv_page_head = NvPageHead.from_buffer(core_nv_bin[start_addr:])
nv_page_head.id = fn_str_to_int(page_id_start)
nv_page_head.reserved = 1
nv_page_head.num_pages = i
nv_page_head.inverted_details_word = ~int.from_bytes(struct.pack('HBB', \
nv_page_head.id, nv_page_head.reserved, nv_page_head.num_pages), 'little')
nv_page_head.last_write = 0
nv_page_head.unused = ~nv_page_head.last_write
core_nv_bin[start_addr : start_addr + sizeof(NvPageHead)] = nv_page_head
return core_nv_bin
def _load_nv_flash_cfg(self):
self.nv_flash_cfg = dict()
for chip in self.nv_ver_dict:
cfg_file = os.path.join(self.root, json_conf["NV_TARGET_JSON_PATH"])
self.nv_flash_cfg[chip] = BuildConfParser(cfg_file).get_conf_data()
def _add_nv_ver(self, chip, target, core, ver, common_cfg, ver_cfg, prod_type=None):
'''
Add version config into self.nv_ver_src_dict.
There are three configuration scenarios.One target may correspond to multiple NV versions.
|--- ver1: { srcs:[default, common, cfg1], prod_type: }
|
chip|---target1---|--- ver2: { srcs:[default, cfg2], prod_type: }
| |
| |--- ver3: { srcs:[default, common], prod_type: }
| |
| |--- core : "Each target corresponds to one core."
|---targetN...
'''
ver_cfgs = []
if os.path.exists(common_cfg) is True:
ver_cfgs.append(common_cfg)
if ver_cfg is not None and os.path.exists(ver_cfg):
ver_cfgs.append(ver_cfg)
if self.nv_ver_src_dict.get(chip) is None:
self.nv_ver_src_dict[chip] = dict()
chip_dict = self.nv_ver_src_dict[chip]
if chip_dict.get(target) is not None and chip_dict[target].get(ver) is not None:
msg = "[error] Ver config Repeate!"
raise ParserError(msg)
if chip_dict.get(target) is None:
chip_dict[target] = {ver:{"srcs":ver_cfgs, 'prod_type': prod_type}}
else:
chip_dict[target].update({ver:{"srcs":ver_cfgs, 'prod_type': prod_type}})
if chip_dict[target].get('core') is None:
chip_dict[target]['core'] = core
elif chip_dict[target].get('core') != core:
msg = "[error] [%s] core not match!" % target
raise ParserError(msg)
def _nv_ver_prepare(self, target):
'''
1. Check nv configurations.
2. Add all correct config into self.nv_ver_src_dict..
'''
if type(self.alias[target]) is list:
return False
target_type = self.alias[target].get("TYPE")
if target_type is None or target_type != 'nv':
return False
core = self.alias[target].get("CORE")
if core is None:
msg = "[error] core name not exist!"
raise ParserError(msg)
chip = self.alias[target].get("CHIP")
#default_cfg = os.path.join(self.nv_root, '%s_default.json' % core)
'''
if chip is None or os.path.exists(default_cfg) is False:
msg = "[error] chip name OR %s not exist!" % default_cfg
raise ParserError(msg)
'''
kernel_name = self.alias[target].get("KERNEL_BIN")
if kernel_name is None:
msg = "[error] KERNEL is null!"
raise ParserError(msg)
if self.targets is not None and kernel_name not in self.targets:
return False
cfgs = self.alias[target].get("COMPONENT")
if cfgs is None:
msg = "[error] COMPONENT is null!"
raise ParserError(msg)
prod_type = self.alias[target].get("PRODUCT_TYPE")
if prod_type == "":
prod_type = None
if prod_type is not None and type(prod_type) is not str:
msg = "[error] PRODUCT_TYPE must be a string type, one kernel only suuport one product type!"
raise ParserError(msg)
cfg_dir = os.path.join(self.nv_root, kernel_name)
common_cfg = os.path.join(cfg_dir, 'common.json')
for cfg in cfgs:
cfg_file = os.path.join(cfg_dir, '%s.json' % cfg) if cfg != 'common' else None
self._add_nv_ver(chip, kernel_name, core, cfg, common_cfg, cfg_file, prod_type)
def _prod_type_filter(self, srcs, prod_type = None):
combination = dict()
module_dict = dict()
for src in srcs:
# print("[INFO] nv config src file: ", src)
src_conf = BuildConfParser(src).get_conf_data()
for module in src_conf:
module_id = src_conf.get(module).get('module_id')
if module_id is None:
msg = "[error][file:%s][%s] module_id is null!" % (src, module)
raise ParserError(msg)
module_id = fn_str_to_int(module_id) if type(module_id) is not int else module_id
if module_id > g_u8_max :
msg = "[error][file:%s][%s] module_id is more than 0xFF!" % (src, module)
raise ParserError(msg)
# diffrent module must config with diffrent module_id
if module_id in module_dict:
if module_dict[module_id] != module:
msg = "[error][file:%s][%s] module_id is the same to [%s]!" % (src, module, module_dict[module_id])
raise ParserError(msg)
else:
module_dict[module_id] = module
if module not in combination:
combination[module] = {'module_id': module_id}
elif combination.get(module).get('module_id') != module_id:
msg = "[error][%s][%s] module_id is not same as other file!" % (src, module)
raise ParserError(msg)
for item in src_conf.get(module):
key_cfg = src_conf.get(module).get(item)
if item == 'module_id':
continue
key_id = key_cfg.get('key_id')
key_id = None if key_id is None else (fn_str_to_int(key_id) if type(key_id) is not int else key_id)
# print("key id: %d, module id: %d, key>>8: %d"%( key_id, module_id, key_id>>8))
if key_id is None or key_id > g_u16_max :
msg = "[error][file:%s][%s][%s] key_id is null or more than unsighed 16 or not match with module_id!" % (src, module, item)
raise ParserError(msg)
item_prod_type = key_cfg.get('product_type')
key_status = key_cfg.get('key_status')
#print('prodtype: %s, key prod: %s'%(prod_type, item_prod_type))
if (prod_type == item_prod_type or (item_prod_type is not None and prod_type in item_prod_type)) \
and key_status == 'alive':
combination[module].update({item:key_cfg})
return combination
def _nv_cfg_writer(self, dst_file, combination):
if os.path.exists(os.path.dirname(dst_file)) is False:
os.makedirs(os.path.dirname(dst_file))
if os.path.exists(dst_file):
os.remove(dst_file)
flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL
modes = stat.S_IWUSR | stat.S_IRUSR
with os.fdopen(os.open(dst_file, flags, modes), 'w') as fout:
fout.write(json.dumps(combination, indent=4))
def _merge_ver_cfg(self, file_prefix, ver_cfg):
srcs = ver_cfg.get('srcs')
if srcs is None:
msg = "[error] ver cfg file is null!"
raise ParserError(msg)
prod_type = ver_cfg.get('prod_type')
combination = self._prod_type_filter(srcs, prod_type)
dst_file = '%s.json' % file_prefix
self._nv_cfg_writer(dst_file, combination)
return { "merged_cfg" : dst_file, "prod_type" : prod_type}
def _create_header(self):
pass
# 检查配置文件中是否有必须配置项
def check_key(json_conf):
check_item = ['OUT_BIN_DIR', 'BUILD_TEMP_PATH', 'NV_TARGET_JSON_PATH', \
'NV_RELATIVE_PATH', 'NV_DEFAULT_CFG_DIR', 'DATABASE_TXT_FILE']
keys = dict.keys(json_conf)
for check_key in check_item:
if check_key not in keys:
msg = "[error] [nv_binary] need add ConfigMap (%s) in json_conf!" % (check_key)
raise ParserError(msg)
def test(targets, flag, backup, use_crc16):
root = g_root
nv_target_json_path = os.path.join(root, json_conf["NV_TARGET_JSON_PATH"])
alias_conf = BuildConfParser(nv_target_json_path).get_conf_data()
worker = BuildNv(alias_conf, root, targets, backup, use_crc16)
if flag:
worker.set_nv_output_dir(os.path.join(root, json_conf["OUT_BIN_DIR"]))
worker.start_work()
def nv_begin(in_path, targets, flag, gen_backup=False, use_crc16=False):
global json_conf
with open(in_path, 'r') as i:
json_conf = json.load(i)
check_key(json_conf)
test(targets, flag, False, use_crc16)
if gen_backup:
test(targets, flag, True, use_crc16)
print("build nv bin success!!")
if __name__ == "__main__":
in_path = sys.argv[2]
targets = sys.argv[3].split()
flag = len(sys.argv) == 3
nv_begin(in_path, targets, flag)

282
build/script/nv/parse_msgdefs.py Executable file
View File

@ -0,0 +1,282 @@
#!/usr/bin/env python3
# coding=utf-8
# Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2021-2022. All rights reserved.
"""
* Description: NV binary create.
* Create: 2020-3-10
"""
import pycparser
import ctypes
binary_operators = {
"&": lambda x, y: x & y,
"|": lambda x, y: x | y,
"*": lambda x, y: x * y,
"+": lambda x, y: x + y,
"-": lambda x, y: x - y,
"/": lambda x, y: x // y,
"<<": lambda x, y: x << y,
">>": lambda x, y: x >> y,
"<=": lambda x, y: int(x <= y),
">=": lambda x, y: int(x >= y),
"==": lambda x, y: int(x == y),
"&&": lambda x, y: int(bool(x and y)),
"||": lambda x, y: int(bool(x or y)),
}
unary_operators = {
"-": lambda x: -x,
"!": lambda x: 1 if x else 0,
"sizeof": lambda x: ctypes.sizeof(x),
}
fundamental_types = {
"": ctypes.c_int,
"_Bool": ctypes.c_bool,
"char unsigned": ctypes.c_ubyte,
"char": ctypes.c_byte,
"double": ctypes.c_double,
"double long": ctypes.c_longdouble,
"float": ctypes.c_float,
"long long unsigned": ctypes.c_ulonglong,
"long long": ctypes.c_longlong,
"long unsigned": ctypes.c_ulong,
"long": ctypes.c_long,
"short unsigned": ctypes.c_ushort,
"short": ctypes.c_short,
"unsigned": ctypes.c_uint,
"void": None,
}
def get_typename(names):
specifiers = []
for name in sorted(names):
if name in ["signed", "int"]:
continue
specifiers.append(name)
return " ".join(specifiers)
def create_enum_type(enumname, enums):
# enum size fixed to 4Bytes
return type(enumname, (ctypes.c_uint,), {"members": enums})
# enum size depends on max enum value
smallest = min(enums.values())
largest = max(enums.values())
if smallest < 0:
if -128 <= smallest and largest <= 127:
base = ctypes.c_byte
elif -32768 <= smallest and largest <= 32767:
base = ctypes.c_short
else:
base = ctypes.c_int
else:
if largest < 256:
base = ctypes.c_ubyte
elif largest < 65536:
base = ctypes.c_ushort
else:
base = ctypes.c_uint
return type(enumname, (base,), {"members": enums})
class Visitor(pycparser.c_ast.NodeVisitor):
# Parse all typedefs and expand identifiers.
def __init__(self):
self.enums = {}
self.typedefs = dict(fundamental_types)
# The context in which the ID names exist.
self.id_context = self.enums
self.anon_struct_count = 1
self.anon_enum_count = 1
self.anon_union_count = 1
def generic_visit(self, node):
# Dump ast to see what is not implemented.
raise SystemExit("Unhandled ast element at %s: %s" % (node.coord, node))
def visit_Decl(self, node):
return
def visit_FuncDef(self, node):
return
def visit_FuncDecl(self, node):
return
def visit_FileAST(self, node):
for c in node:
self.visit(c)
def visit_ID(self, node):
try:
return self.id_context[node.name]
except KeyError:
raise SystemExit("Failed to resolve identifier '%s' at %s" % (node.name, node.coord))
def visit_Typename(self, node):
return self.visit(node.type)
def visit_TypeDecl(self, node):
return self.visit(node.type)
def visit_CompoundLiteral(self, node):
return self.visit(node.type)
def visit_PtrDecl(self, node):
return ctypes.POINTER(self.visit(node.type))
def visit_Typedef(self, node):
if node.name in self.typedefs:
raise SystemExit("Duplicate typedef '%s' at %s" % (node.name, node.coord))
value = self.visit(node.type)
self.typedefs[node.name] = value
def visit_ArrayRef(self, node):
# For accessing type of an array element
array = self.visit(node.name)
#index = self.visit(node.subscript)
return array._type_
def visit_StructRef(self, node):
# This is needed to get access to types inside a struct.
struct = self.visit(node.name)
self.id_context = dict(struct._fields_)
try:
return self.visit(node.field)
finally:
self.id_context = self.enums
def visit_BinaryOp(self, node):
try:
op = binary_operators[node.op]
except KeyError:
raise SystemExit("Unhandled binary operator '%s' at %s" % (node.op, node.coord))
leftval = self.visit(node.left)
rightval = self.visit(node.right)
return op(leftval, rightval)
def visit_UnaryOp(self, node):
value = self.visit(node.expr)
try:
op = unary_operators[node.op]
except KeyError:
raise SystemExit("Unhandled unary operator '%s' at %s" % (node.op, node.coord))
return op(value)
def visit_Enum(self, node):
# Mapping of enum names to enum values from all parsed enums.
value = -1
enums = {}
for enum in node.values:
if enum.value is None:
value += 1
else:
value = self.visit(enum.value)
self.enums[enum.name] = enums[enum.name] = value
if node.name is None:
enumname = "enum_anon_%d" % self.anon_enum_count
self.anon_enum_count += 1
else:
enumname = "enum_%s" % str(node.name)
return create_enum_type(enumname, enums)
def visit_Constant(self, node):
if node.type not in ["int", "unsigned int"]:
raise SystemExit("Unhandled Constant node type '%s' at %s" % (node.type, node.coord))
value = node.value.rstrip("LlUu")
if value.startswith(("0x", "0X")):
return int(value, 16)
return int(value)
def visit_IdentifierType(self, node):
name = get_typename(node.names)
try:
return self.typedefs[name]
except KeyError:
raise SystemExit("Invalid type specifier '%s' at %s" % (name, node.coord))
def visit_Struct(self, node):
fields = []
# node.decls can be None when the struct declaration is outside the typedef.
if node.decls is not None:
for decl in node.decls:
if decl.bitsize is not None:
temp = self.visit(decl.type)
value = type(decl.name, (temp,),{"bitsize": decl.bitsize.value})
else:
value = self.visit(decl.type)
if value is None:
# This is the void type - indicates an invalid message definition type.
return None
fields.append((decl.name, value))
if node.name is None:
structname = "struct_anon_%d" % self.anon_struct_count
self.anon_struct_count += 1
else:
structname = "struct_%s" % str(node.name)
return type(structname, (ctypes.Structure,), {"_fields_": fields})
def visit_Union(self, node):
fields = []
for decl in node.decls:
value = self.visit(decl.type)
if value is None:
return None
fields.append((decl.name, value))
if node.name is None:
unionname = "union_anon_%d" % self.anon_union_count
self.anon_struct_count += 1
else:
unionname = "union_%s" % str(node.name)
return type(unionname, (ctypes.Union,), {"_fields_": fields})
def visit_ArrayDecl(self, node):
length = self.visit(node.dim)
type = self.visit(node.type)
if length is None or length < 0:
raise SystemExit("Invalid array len %s at %s" % (length, node.dim.coord))
if type is None:
raise SystemExit("Invalid array type %s at %s" % (type, node.type.coord))
return type * length
def message_enum_name(elemName):
if elemName.endswith("_t"):
return elemName[:-2].upper()
if elemName.endswith("_s"):
# NAS messages, already uppercase
return elemName[:-2]
if elemName.endswith("_STRUCT"):
return elemName[:-7]
if elemName.find("LOG_MESSAGE") != -1:
return elemName +'_ID'
def parse_preprocessed_headers(source):
# Public function that returns the required data for producing messagexml
try:
node = pycparser.parse_file(source)
except pycparser.plyparser.ParseError as e:
raise SystemExit("ERROR parsing msgdefs %s: %s" % (source, e))
v = Visitor()
v.visit(node)
messages = []
for structname, fields in sorted(v.typedefs.items()):
messageEnumName = message_enum_name(structname)
if messageEnumName is None:
# Not a valid message definition name.
continue
try:
messageId = v.enums[messageEnumName]
except KeyError:
# No associated message id, so not a message definition.
continue
if fields is None:
raise SystemExit("Message definition contains undefined type: %s" % structname)
messages.append((messageEnumName, structname, messageId, fields))
return messages