2
0
mirror of https://github.com/ddaodan/bgi-scripts.git synced 2025-11-01 13:34:13 +08:00

Add files via upload

This commit is contained in:
ddaodan
2025-09-15 19:19:12 +08:00
committed by GitHub
parent 24119920fb
commit e1ed012991
10 changed files with 2117 additions and 0 deletions

59
build/author_config.json Normal file
View File

@@ -0,0 +1,59 @@
{
"rename": {
"起个名字好难": "起个名字好难的喵",
"this-Fish": "蜜柑魚"
},
"links": {
"秋云": "https://github.com/physligl",
"起个名字好难的喵": "https://github.com/MisakaAldrich",
"火山": "https://github.com/RRRR623",
"mno": "https://github.com/Bedrockx",
"汐": "https://github.com/jiegedabaobei",
"Tool_tingsu": "https://github.com/Tooltingsu",
"吉吉喵": "https://github.com/JJMdzh",
"曦": "https://github.com/cx05121",
"ddaodan": "https://github.com/ddaodan",
"LCB-茶包": "https://github.com/kaedelcb",
"蜜柑魚": "https://github.com/this-Fish",
"彩虹QQ人": "https://github.com/KRdingsan",
"mfkvfhpdx": "https://github.com/mfkvfhpdx",
"提瓦特钓鱼玳师": "https://github.com/Hijiwos",
"柒叶子": "https://github.com/5117600049",
"不瘦五十斤不改名": "https://github.com/PanZic",
"½": "https://github.com/Traveler07",
"Patrick-Ze (AyakaMain)": "https://github.com/Patrick-Ze",
"AyakaMain": "https://github.com/Patrick-Ze",
"Patrick-Ze": "https://github.com/Patrick-Ze",
"风埠": "https://github.com/jhkif",
"jbcaaa": "https://github.com/jbcaaa",
"johsang": "https://github.com/johsang",
"寒烟": "https://github.com/214-hanyan",
"灰林鸮": "https://github.com/Strix-nivicolum",
"Tim": "https://github.com/Limint",
"花见木易": "https://github.com/Flower-MUYi",
"无限不循环": "https://github.com/non-repeating001",
"wjdsg": "https://gitee.com/wangjian0327",
"HZYgrandma": "https://github.com/HZYgrandma",
"huiyadanli": "https://github.com/huiyadanli",
"呱呱z": "https://github.com/jidingcai",
"Yang-z": "https://github.com/Yang-z",
"Tzi": "https://github.com/T888T",
"lifrom": "https://github.com/pkjsjq",
"愚溪": "https://github.com/Kupder",
"1xygyty1": "https://github.com/1xygyty1",
"miludelongwang": "https://github.com/miludelongwang",
"Alkaid": "https://github.com/Rosefinch-zzz",
"听雨♪": "https://github.com/TingYu-lulumi",
"小鹰划船不用桨": "https://github.com/Kotagan",
"阿城同学": "https://github.com/shilic",
"FuYeqi": "https://github.com/FuYeqi",
"Mochi麻糬": "https://github.com/ryanlin594529",
"下流山汉": "https://github.com/vulgar-rustic",
"XS": "https://github.com/xwsqy6",
"C-01-11011": "https://github.com/C-01-11011",
"baixi": "https://github.com/baixi232310",
"SmomoL": "https://github.com/SmomoL",
"小大": "https://github.com/xd1145",
"今天下雨w": "https://github.com/aqing11520"
}
}

662
build/build.js Normal file
View File

@@ -0,0 +1,662 @@
const fs = require('fs');
const path = require('path');
const zlib = require('zlib');
const { execSync } = require('child_process');
// 处理命令行参数
const args = process.argv.slice(2);
const forceFullUpdate = args.includes('--force') || args.includes('-f');
const enableGzip = args.includes('--gzip') || args.includes('-g');
// 在文件开头添加全局变量
const pathingDirsWithoutIcon = new Set();
// 检查是否存在现有的repo.json文件
const repoJsonPath = path.resolve(__dirname, '..', 'repo.json');
let existingRepoJson = null;
let modifiedFiles = [];
// 尝试加载现有的repo.json文件
try {
if (fs.existsSync(repoJsonPath) && !forceFullUpdate) {
existingRepoJson = JSON.parse(fs.readFileSync(repoJsonPath, 'utf8'));
console.log('找到现有的repo.json文件将执行增量更新');
// 获取Git中修改的文件
try {
// 获取当前分支名称
const currentBranch = execSync('git rev-parse --abbrev-ref HEAD').toString().trim();
console.log(`当前分支: ${currentBranch}`);
// 获取此次变更的文件列表 - 使用更安全的方法
let cmd = 'git diff --name-only HEAD~1 HEAD';
const changedFiles = execSync(cmd).toString().trim().split('\n');
modifiedFiles = changedFiles.filter(file => file.startsWith('repo/'));
console.log(`检测到 ${modifiedFiles.length} 个修改的文件:`);
modifiedFiles.forEach(file => console.log(` - ${file}`));
} catch (e) {
console.warn('无法获取Git修改文件列表将执行全量更新', e);
modifiedFiles = [];
}
} else {
if (forceFullUpdate) {
console.log('检测到--force参数将执行全量更新');
} else {
console.log('未找到现有的repo.json文件将执行全量更新');
}
}
} catch (e) {
console.warn('读取现有repo.json文件出错将执行全量更新', e);
}
function getGitTimestamp(filePath) {
try {
// 对路径进行特殊处理,处理路径中的特殊字符
const relativePath = path.relative(path.resolve(__dirname, '..'), filePath).replace(/\\/g, '/');
let cmd;
if (process.platform === 'win32') {
// Windows平台使用双引号
cmd = `git log -1 --format="%ai" -- "${relativePath.replace(/"/g, '\\"')}"`;
} else {
// Linux/Mac平台使用单引号
const quotedPath = relativePath.replace(/'/g, "'\\''"); // 处理单引号
cmd = `git log -1 --format="%ai" -- '${quotedPath}'`;
}
const time = execSync(cmd).toString().trim();
if (!time) {
console.warn(`未找到文件 ${filePath} 的提交记录`);
return null;
}
return time;
} catch (e) {
console.warn(`无法通过 Git 获取时间: ${filePath}`, e);
// 出错时,尝试使用文件的修改时间作为替代
try {
const stats = fs.statSync(filePath);
const modTime = stats.mtime;
const formattedTime = modTime.toISOString().replace('T', ' ').replace(/\.\d+Z$/, ' +0800');
console.log(`使用文件修改时间作为替代: ${formattedTime}`);
return formattedTime;
} catch (fsErr) {
console.warn(`无法获取文件修改时间: ${filePath}`, fsErr);
return null;
}
}
}
function formatTime(timestamp) {
if (!timestamp) return null;
// 将 "2023-01-01 12:00:00 +0800" 格式化为 "20230101120000"
return timestamp.replace(/[-: ]/g, '').split('+')[0];
}
// 格式化最后更新时间为标准的北京时间格式YYYY-MM-DD HH:MM:SS
function formatLastUpdated(timestamp) {
if (!timestamp) {
// 如果没有时间戳,使用当前时间作为默认值
const now = new Date();
const year = now.getFullYear();
const month = String(now.getMonth() + 1).padStart(2, '0');
const day = String(now.getDate()).padStart(2, '0');
const hour = String(now.getHours()).padStart(2, '0');
const minute = String(now.getMinutes()).padStart(2, '0');
const second = String(now.getSeconds()).padStart(2, '0');
return `${year}-${month}-${day} ${hour}:${minute}:${second}`;
}
try {
// 解析Git时间戳格式 (如: "2023-01-01 12:00:00 +0800")
const dateMatch = timestamp.match(/(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2})/);
if (dateMatch) {
const [_, year, month, day, hour, minute, second] = dateMatch;
return `${year}-${month}-${day} ${hour}:${minute}:${second}`;
}
// 尝试将时间戳解析为日期对象
const date = new Date(timestamp);
if (!isNaN(date.getTime())) {
const year = date.getFullYear();
const month = String(date.getMonth() + 1).padStart(2, '0');
const day = String(date.getDate()).padStart(2, '0');
const hour = String(date.getHours()).padStart(2, '0');
const minute = String(date.getMinutes()).padStart(2, '0');
const second = String(date.getSeconds()).padStart(2, '0');
return `${year}-${month}-${day} ${hour}:${minute}:${second}`;
}
return timestamp;
} catch (e) {
console.warn(`格式化时间戳出错 ${timestamp}:`, e);
return timestamp;
}
}
function convertNewlines(text) {
return text.replace(/\\n/g, '\n');
}
// 处理作者信息的通用函数 - 返回简单字符串格式(向后兼容)
function processAuthorInfo(authorInfo) {
if (!authorInfo) return '';
// 如果是字符串,直接返回
if (typeof authorInfo === 'string') {
return authorInfo.trim();
}
// 如果是数组,返回第一个作者的名字
if (Array.isArray(authorInfo)) {
const firstAuthor = authorInfo[0];
if (typeof firstAuthor === 'string') {
return firstAuthor.trim();
} else if (typeof firstAuthor === 'object' && firstAuthor.name) {
return firstAuthor.name.trim();
}
}
// 如果是对象
if (typeof authorInfo === 'object' && authorInfo.name) {
return authorInfo.name.trim();
}
return '';
}
// 处理详细作者信息的函数 - 返回完整的作者信息对象
function processDetailedAuthorInfo(authorInfo) {
if (!authorInfo) return null;
// 如果是字符串,转换为对象格式
if (typeof authorInfo === 'string') {
return [{ name: authorInfo.trim() }];
}
// 如果是数组,处理多个作者
if (Array.isArray(authorInfo)) {
const authors = authorInfo.map(author => {
if (typeof author === 'string') {
return { name: author.trim() };
} else if (typeof author === 'object' && author.name) {
const authorObj = { name: author.name.trim() };
if (author.link) {
authorObj.link = author.link;
} else if (author.links) {
authorObj.link = author.links;
}
return authorObj;
}
return null;
}).filter(author => author !== null);
return authors.length > 0 ? authors : null;
}
// 如果是对象
if (typeof authorInfo === 'object' && authorInfo.name) {
const authorObj = { name: authorInfo.name.trim() };
if (authorInfo.link) {
authorObj.link = authorInfo.link;
} else if (authorInfo.links) {
authorObj.link = authorInfo.links;
}
return [authorObj];
}
return null;
}
// 过滤长标签的通用函数,一个汉字算两个字符
function filterLongTags(tags) {
return tags.filter(tag => {
// 特殊处理以bgi≥开头的版本标签不论长度都保留
if (tag && tag.startsWith('bgi≥')) {
return true;
}
// 计算字符串的真实长度,一个汉字算两个字符
const realLength = [...tag].reduce((acc, c) => {
return acc + (c.charCodeAt(0) > 127 ? 2 : 1);
}, 0);
return realLength <= 10; // 过滤掉超过10个字符的标签
});
}
// 提取最低版本要求并格式化为标签
function formatMinVersionTag(minVersion) {
if (!minVersion) return null;
// 统一格式化为 bgi≥x.xx.xx
return `bgi≥${minVersion.trim()}`;
}
// 将版本标签置于标签列表首位
function prioritizeVersionTag(tags) {
if (!tags || !Array.isArray(tags)) return [];
// 查找 bgi≥ 开头的标签
const versionTags = tags.filter(tag => tag && tag.startsWith('bgi≥'));
const otherTags = tags.filter(tag => !tag || !tag.startsWith('bgi≥'));
// 如果有多个版本标签,只保留第一个
if (versionTags.length > 0) {
return [versionTags[0], ...otherTags];
}
return otherTags;
}
function extractInfoFromCombatFile(filePath) {
const content = fs.readFileSync(filePath, 'utf8');
const authorMatch = content.match(/\/\/\s*作者\s*:(.*)/);
const descriptionMatch = content.match(/\/\/\s*描述\s*:(.*)/);
const versionMatch = content.match(/\/\/\s*版本\s*:(.*)/);
const characterMatches = content.match(/^(?!\/\/).*?(\S+)(?=\s|$)/gm);
let tags = [...new Set(characterMatches || [])]
.map(char => char.trim())
.filter(char => char.length > 0 && !char.match(/^[,.]$/)); // 过滤掉单个逗号或句号
// 获取最后更新时间
const gitTimestamp = getGitTimestamp(filePath);
const lastUpdated = formatLastUpdated(gitTimestamp);
// 优先使用文件中的版本号,其次使用提交时间
const version = versionMatch ? versionMatch[1].trim() :
(gitTimestamp ? formatTime(gitTimestamp) : '');
const authorString = authorMatch ? authorMatch[1].trim() : '';
return {
author: processAuthorInfo(authorString) || '',
authors: processDetailedAuthorInfo(authorString),
description: descriptionMatch ? convertNewlines(descriptionMatch[1].trim()) : '',
tags: prioritizeVersionTag(filterLongTags(tags)),
version: version,
lastUpdated: lastUpdated
};
}
function extractInfoFromJSFolder(folderPath) {
const manifestPath = path.join(folderPath, 'manifest.json');
if (fs.existsSync(manifestPath)) {
try {
let manifestContent = fs.readFileSync(manifestPath, 'utf8');
manifestContent = manifestContent.replace(/,(\s*[}\]])/g, '$1');
const manifest = JSON.parse(manifestContent);
const combinedDescription = `${manifest.name || ''}~|~${manifest.description || ''}`;
// 获取manifest.json的修改时间仅处理这一个文件
const lastUpdatedTimestamp = getGitTimestamp(manifestPath);
// 格式化最后更新时间
const lastUpdated = formatLastUpdated(lastUpdatedTimestamp);
// 提取最低版本要求
let tags = manifest.tags || [];
// 从 bgi_version 字段获取
if (manifest.bgi_version) {
const minVersionTag = formatMinVersionTag(manifest.bgi_version);
if (minVersionTag) {
tags.unshift(minVersionTag);
}
} // 处理作者信息
const authorString = manifest.authors;
const authors = processDetailedAuthorInfo(manifest.authors);
return {
version: manifest.version || '',
description: convertNewlines(combinedDescription),
author: processAuthorInfo(authorString),
authors: authors,
tags: prioritizeVersionTag(filterLongTags(tags)),
lastUpdated: lastUpdated
};
} catch (error) {
console.error(`解析 ${manifestPath} 时出错:`, error);
console.error('文件内容:', fs.readFileSync(manifestPath, 'utf8'));
return { version: '', description: '', author: '', tags: [], lastUpdated: null };
}
}
return { version: '', description: '', author: '', tags: [], lastUpdated: null };
}
function extractInfoFromPathingFile(filePath, parentFolders) {
let content = fs.readFileSync(filePath, 'utf8');
// 检测并移除BOM
if (content.charCodeAt(0) === 0xFEFF) {
content = content.replace(/^\uFEFF/, '');
try {
fs.writeFileSync(filePath, content, 'utf8');
console.log(`已移除文件BOM标记: ${filePath}`);
} catch (error) {
console.error(`移除BOM标记时出错 ${filePath}:`, error);
}
}
const contentObj = JSON.parse(content);
// 获取最后更新时间
const gitTimestamp = getGitTimestamp(filePath);
const lastUpdated = formatLastUpdated(gitTimestamp);
// 优先使用文件中的版本号,其次使用提交时间
const version = contentObj.info?.version ||
(gitTimestamp ? formatTime(gitTimestamp) : '');
// 从父文件夹获取默认标签
let tags = parentFolders.slice(2)
.filter(tag => !tag.includes('@'))
.filter((tag, index, self) => self.indexOf(tag) === index);
// 如果存在自定义标签,与默认标签合并
if (contentObj.info && contentObj.info.tags && Array.isArray(contentObj.info.tags)) {
tags = [...tags, ...contentObj.info.tags];
}
// 提取最低版本要求,使用 bgi_version 字段
if (contentObj.info && contentObj.info.bgi_version) {
const minVersionTag = formatMinVersionTag(contentObj.info.bgi_version);
if (minVersionTag) {
tags.unshift(minVersionTag);
}
}
// 区分怪物拾取标识
if (contentObj.info && contentObj.info.enable_monster_loot_split) {
tags.unshift("区分怪物拾取");
}
if (contentObj.positions && Array.isArray(contentObj.positions)) {
const actions = contentObj.positions.map(pos => pos.action);
if (actions.includes('nahida_collect')) tags.push('纳西妲');
if (actions.includes('hydro_collect')) tags.push('水元素力收集');
if (actions.includes('anemo_collect')) tags.push('风元素力收集');
if (actions.includes('electro_collect')) tags.push('雷元素力收集');
if (actions.includes('up_down_grab_leaf')) tags.push('四叶印');
if (actions.includes('mining')) tags.push('挖矿');
if (actions.includes('fight')) tags.push('战斗');
if (actions.includes('log_output')) tags.push('有日志');
if (actions.includes('pick_around')) tags.push('转圈拾取');
if (actions.includes('fishing')) tags.push('钓鱼');
if (actions.includes('set_time')) tags.push('时间调整');
const move_modes = contentObj.positions.map(pos => pos.move_mode);
if (move_modes.includes('climb')) tags.push("有攀爬");
}
// 确保标签数组中没有重复项
tags = [...new Set(tags)];
// 过滤掉超过10个字符的标签并确保版本标签优先
tags = prioritizeVersionTag(filterLongTags(tags)); // 处理作者信息,优先使用 authors 字段,如果不存在则使用 author 字段
const authorData = contentObj.info?.authors || contentObj.info?.author;
return {
author: processAuthorInfo(authorData) || '',
authors: processDetailedAuthorInfo(authorData),
description: convertNewlines(contentObj.info?.description || ''),
version: version,
tags: tags,
lastUpdated: lastUpdated
};
}
function extractInfoFromTCGFile(filePath, parentFolder) {
const content = fs.readFileSync(filePath, 'utf8');
const authorMatch = content.match(/\/\/\s*作者:(.*)/);
const descriptionMatch = content.match(/\/\/\s*描述:(.*)/);
const versionMatch = content.match(/\/\/\s*版本:(.*)/);
// 移除最低版本提取TCG脚本无需最低版本要求
const characterMatches = content.match(/角色\d+\s?=([^|\r\n{]+)/g);
// 获取最后更新时间
const gitTimestamp = getGitTimestamp(filePath);
const lastUpdated = formatLastUpdated(gitTimestamp);
let tags = characterMatches
? characterMatches.map(match => match.split('=')[1].trim())
.filter(tag => tag && !tag.startsWith('角色'))
: [];
if (filePath.includes('惊喜牌组')) {
tags = ['惊喜牌组', ...tags];
}
if (filePath.includes('酒馆挑战')) {
tags = ['酒馆挑战', ...tags];
}
// 优先使用文件中的版本号,其次使用提交时间
const version = versionMatch ? versionMatch[1].trim() :
(gitTimestamp ? formatTime(gitTimestamp) : ''); return {
author: processAuthorInfo(authorMatch ? authorMatch[1].trim() : '') || '',
authors: processDetailedAuthorInfo(authorMatch ? authorMatch[1].trim() : ''),
description: descriptionMatch ? convertNewlines(descriptionMatch[1].trim()) : '',
tags: prioritizeVersionTag(filterLongTags([...new Set(tags)])), // 去重并过滤长标签
version: version,
lastUpdated: lastUpdated
};
}
// 检查文件是否需要处理(增量更新模式下)
function shouldProcessFile(filePath) {
// 如果没有现有的repo.json或没有修改文件列表则处理所有文件
if (!existingRepoJson || modifiedFiles.length === 0) {
return true;
}
// 将filePath转换为相对于仓库根目录的路径
const relativeFilePath = path.relative(path.resolve(__dirname, '..'), filePath).replace(/\\/g, '/');
// 检查此文件或其所在目录是否在修改列表中
return modifiedFiles.some(modifiedFile => {
return relativeFilePath === modifiedFile ||
relativeFilePath.startsWith(path.dirname(modifiedFile) + '/') ||
modifiedFile.startsWith(relativeFilePath + '/');
});
}
// 在目录树中查找节点的辅助函数
function findNodeInTree(tree, nodePath, currentPath = '') {
if (!tree) return null;
if (tree.type === 'directory') {
const newPath = currentPath ? `${currentPath}/${tree.name}` : tree.name;
if (newPath === nodePath) {
return tree;
}
if (tree.children) {
for (const child of tree.children) {
const result = findNodeInTree(child, nodePath, newPath);
if (result) {
return result;
}
}
}
}
return null;
}
function generateDirectoryTree(dir, currentDepth = 0, parentFolders = []) {
// 检查是否在增量更新模式下需要处理此目录
const shouldProcess = shouldProcessFile(dir);
// 如果在增量更新模式下不需要处理此目录尝试从现有repo.json找到对应节点
if (!shouldProcess && existingRepoJson && existingRepoJson.indexes) {
const category = parentFolders[0];
const relativePath = parentFolders.join('/');
// 在现有repo.json中查找此目录节点
const categoryTree = existingRepoJson.indexes.find(index => index.name === category);
if (categoryTree) {
const existingNode = findNodeInTree(categoryTree, relativePath);
if (existingNode) {
console.log(`使用现有数据: ${relativePath}`);
return existingNode;
}
}
}
const stats = fs.statSync(dir);
const info = {
name: path.basename(dir),
type: stats.isDirectory() ? 'directory' : 'file'
};
if (stats.isDirectory()) {
// 修改检查pathing目录图标的逻辑
if (parentFolders[0] === 'pathing') {
const hasIcon = fs.readdirSync(dir).some(file =>
file.toLowerCase() === 'icon.ico'
);
if (!hasIcon) {
// 使用 path.join 来确保正确的路径分隔符
const relativePath = path.join('pathing', path.basename(dir));
pathingDirsWithoutIcon.add(relativePath);
// console.log(`未找到icon.ico的pathing目录: ${relativePath}`);
}
}
if (parentFolders[0] === 'js' && currentDepth === 1) {
// 对于 js 文件夹下的直接子文件夹,不再递归
const manifestPath = path.join(dir, 'manifest.json');
if (fs.existsSync(manifestPath)) {
const jsInfo = extractInfoFromJSFolder(dir);
info.version = jsInfo.version || '';
info.author = jsInfo.author;
info.authors = jsInfo.authors;
info.description = jsInfo.description;
info.tags = jsInfo.tags;
info.lastUpdated = jsInfo.lastUpdated;
}
} else {
info.children = fs.readdirSync(dir)
.filter(child => {
// 过滤掉 desktop.ini 和 icon.ico
if (['desktop.ini', 'icon.ico'].includes(child)) {
return false;
}
// 对于pathing目录只保留.json文件和目录
if (parentFolders[0] === 'pathing') {
const childPath = path.join(dir, child);
const isDir = fs.statSync(childPath).isDirectory();
if (!isDir && !child.toLowerCase().endsWith('.json')) {
return false;
}
}
return true;
})
.map(child => {
const childPath = path.join(dir, child);
return generateDirectoryTree(childPath, currentDepth + 1, [...parentFolders, info.name]);
})
.filter(child => child !== null); // 过滤掉null
}
} else {
// 如果是 desktop.ini 或 icon.ico 文件,直接返回 null
if (['desktop.ini', 'icon.ico'].includes(info.name)) {
return null;
}
// 对于pathing目录中的文件直接处理.json后缀的文件
if (parentFolders[0] === 'pathing' && !info.name.toLowerCase().endsWith('.json')) {
return null;
}
info.version = '';
const category = parentFolders[0];
try {
switch (category) {
case 'combat':
Object.assign(info, extractInfoFromCombatFile(dir));
break;
case 'pathing':
Object.assign(info, extractInfoFromPathingFile(dir, parentFolders));
info.tags = info.tags.filter(tag => tag !== 'pathing');
break;
case 'tcg':
Object.assign(info, extractInfoFromTCGFile(dir, parentFolders[1]));
break;
}
} catch (error) {
console.error(`处理文件 ${dir} 时出错:`, error);
info.error = error.message;
}
}
return info;
}
const repoPath = path.resolve(__dirname, '..', 'repo');
// 定义期望的文件夹顺序
const folderOrder = ['pathing', 'js', 'combat', 'tcg', 'onekey'];
// 读取 repoPath 下的所有文件夹
const topLevelFolders = fs.readdirSync(repoPath)
.filter(item => fs.statSync(path.join(repoPath, item)).isDirectory());
// 对每个顶级文件夹调用 generateDirectoryTree并按照指定顺序排序
const result = folderOrder
.filter(folder => topLevelFolders.includes(folder))
.map(folder => {
const folderPath = path.join(repoPath, folder);
const tree = generateDirectoryTree(folderPath, 0, [folder]);
// 如果是pathing目录对其子目录进行排序
if (folder === 'pathing' && tree.children) {
tree.children.sort((a, b) => {
const aPath = path.join('pathing', a.name);
const bPath = path.join('pathing', b.name);
const aHasNoIcon = pathingDirsWithoutIcon.has(aPath);
const bHasNoIcon = pathingDirsWithoutIcon.has(bPath);
// 如果两个目录的图标状态不同,则按照有无图标排序
if (aHasNoIcon !== bHasNoIcon) {
return aHasNoIcon ? 1 : -1;
}
// 使用拼音排序
return a.name.localeCompare(b.name, 'zh-CN', {
numeric: true,
sensitivity: 'accent',
caseFirst: false
});
});
}
return tree;
});
const repoJson = {
"time": new Date().toLocaleString('zh-CN', {
year: 'numeric',
month: '2-digit',
day: '2-digit',
hour: '2-digit',
minute: '2-digit',
second: '2-digit',
hour12: false,
timeZone: 'Asia/Shanghai'
}).replace(/[/\s,:]/g, ''),
"url": "https://github.com/babalae/bettergi-scripts-list/archive/refs/heads/main.zip",
"file": "repo.json",
"indexes": result
};
fs.writeFileSync(repoJsonPath, JSON.stringify(repoJson, null, 2));
console.log('repo.json 文件已创建并保存在 repo 同级目录中。');
// 创建gzip压缩文件仅当启用gzip参数时
if (enableGzip) {
const gzipPath = repoJsonPath + '.gz';
const jsonContent = fs.readFileSync(repoJsonPath);
const compressedContent = zlib.gzipSync(jsonContent);
fs.writeFileSync(gzipPath, compressedContent);
console.log('repo.json.gz 压缩文件已创建并保存。');
} else {
console.log('未启用gzip压缩跳过创建repo.json.gz文件。如需启用请使用--gzip或-g参数。');
}

40
build/icon/1.filter.js Normal file
View File

@@ -0,0 +1,40 @@
// 这个脚本每次原神更新使用一次
const fs = require('fs');
const path = require('path');
// 读取JSON文件
const jsonPath = 'E:\\HuiTask\\更好的原神\\2.资料\\图标处理\\Material.json';
const jsonData = JSON.parse(fs.readFileSync(jsonPath, 'utf8'));
// 设置源文件夹和目标文件夹
const sourceDir = 'E:\\HuiTask\\更好的原神\\2.资料\\图标处理\\ItemIcon-tiny';
const targetDir = 'E:\\HuiTask\\更好的原神\\2.资料\\图标处理\\newPng';
// 确保目标文件夹存在
if (!fs.existsSync(targetDir)) {
fs.mkdirSync(targetDir, { recursive: true });
}
// 读取源文件夹中的所有文件
fs.readdirSync(sourceDir).forEach(file => {
const fileName = path.parse(file).name; // 获取文件名(不含扩展名)
// 查找所有匹配项
const matchedItems = jsonData.filter(item => item.Icon === fileName);
if (matchedItems.length > 0) {
const sourcePath = path.join(sourceDir, file);
// 为每个匹配项创建文件
matchedItems.forEach(matchedItem => {
const targetPath = path.join(targetDir, `${matchedItem.Name}.png`);
// 复制并重命名文件
fs.copyFileSync(sourcePath, targetPath);
console.log(`已复制并重命名: ${file} -> ${matchedItem.Name}.png`);
});
}
});
console.log('处理完成');

45
build/icon/2.match.js Normal file
View File

@@ -0,0 +1,45 @@
// 这个脚本,有新的脚本目录就要使用
const fs = require('fs');
const path = require('path');
// 定义路径
const pathingDir = 'D:\\HuiPrograming\\Projects\\CSharp\\MiHoYo\\bettergi-scripts-list\\repo\\pathing';
const pngDir = 'E:\\HuiTask\\更好的原神\\2.资料\\图标处理\\newPng';
const outputDir = 'E:\\HuiTask\\更好的原神\\2.资料\\图标处理\\matchedPng';
// 确保输出目录存在
if (!fs.existsSync(outputDir)) {
fs.mkdirSync(outputDir);
}
// 读取 pathing 目录下的所有文件夹名称
fs.readdir(pathingDir, { withFileTypes: true }, (err, entries) => {
if (err) {
console.error('读取 pathing 目录时出错:', err);
return;
}
// 过滤出目录
const directories = entries.filter(entry => entry.isDirectory()).map(dir => dir.name);
// 遍历目录名称
directories.forEach(dirName => {
const pngPath = path.join(pngDir, `${dirName}.png`);
const outputPath = path.join(outputDir, `${dirName}.png`);
// 检查对应的 PNG 文件是否存在
if (fs.existsSync(pngPath)) {
// 复制文件
fs.copyFile(pngPath, outputPath, err => {
if (err) {
console.error(`复制 ${dirName}.png 时出错:`, err);
} else {
console.log(`成功复制 ${dirName}.png 到 matchedPng 文件夹`);
}
});
} else {
console.log(`未找到对应的 PNG 文件: ${dirName}.png`);
}
});
});

59
build/icon/3.icon.js Normal file
View File

@@ -0,0 +1,59 @@
// 用于给每个脚本目录添加icon
const fs = require('fs');
const path = require('path');
const { exec } = require('child_process');
// 定义路径
const sourcePath = 'D:\\HuiPrograming\\Projects\\CSharp\\MiHoYo\\bettergi-scripts-list\\repo\\pathing';
const iconSourcePath = 'E:\\HuiTask\\更好的原神\\2.资料\\图标处理\\matchedIco';
const desktopIniPath = 'E:\\HuiTask\\更好的原神\\2.资料\\图标处理\\desktop.ini';
// 读取源目录
fs.readdir(sourcePath, { withFileTypes: true }, (err, entries) => {
if (err) {
console.error('读取源目录时出错:', err);
return;
}
// 遍历每个目录
entries.filter(entry => entry.isDirectory()).forEach(dir => {
const dirPath = path.join(sourcePath, dir.name);
const iconSourceFile = path.join(iconSourcePath, `${dir.name}.ico`);
const iconDestFile = path.join(dirPath, 'icon.ico');
const desktopIniDestFile = path.join(dirPath, 'desktop.ini');
// 检查图标源文件是否存在
if (!fs.existsSync(iconSourceFile)) {
console.log(`警告:${dir.name} 的图标文件不存在,跳过所有操作`);
return; // 跳过当前目录的所有后续操作
}
// 复制图标文件
fs.copyFile(iconSourceFile, iconDestFile, (err) => {
if (err) {
console.error(`复制图标文件到 ${dir.name} 时出错:`, err);
return; // 如果复制图标失败,跳过后续操作
}
console.log(`成功复制图标文件到 ${dir.name}`);
// 复制desktop.ini文件
fs.copyFile(desktopIniPath, desktopIniDestFile, (err) => {
if (err) {
console.error(`复制desktop.ini到 ${dir.name} 时出错:`, err);
return; // 如果复制desktop.ini失败跳过后续操作
}
console.log(`成功复制desktop.ini到 ${dir.name}`);
// 执行cmd命令
exec(`attrib +R "${dirPath}"`, (err, stdout, stderr) => {
if (err) {
console.error(`执行attrib命令时出错 ${dir.name}:`, err);
return;
}
console.log(`成功为 ${dir.name} 设置只读属性`);
});
});
});
});
});

View File

@@ -0,0 +1,38 @@
// 只找新的png文件然后复制到diffPng目录
const fs = require('fs');
const path = require('path');
// 定义路径
const pngDir = 'E:\\HuiTask\\更好的原神\\2.资料\\图标处理\\matchedPng';
const iconDir = 'E:\\HuiTask\\更好的原神\\2.资料\\图标处理\\matchedIco';
// 定义输出目录
const outputDir = 'E:\\HuiTask\\更好的原神\\2.资料\\图标处理\\diffPng';
// 确保输出目录存在
if (!fs.existsSync(outputDir)) {
fs.mkdirSync(outputDir, { recursive: true });
}
// 获取两个目录中的文件列表并去除扩展名
const getPureFilenames = (dir) => {
return fs.readdirSync(dir).map(file => path.parse(file).name);
};
const pngFiles = getPureFilenames(pngDir);
const icoFiles = getPureFilenames(iconDir);
// 找出在PNG目录存在但在ICO目录不存在的文件
const unmatchedPng = pngFiles.filter(name => !icoFiles.includes(name));
// 复制不匹配的PNG文件到输出目录
unmatchedPng.forEach(filename => {
const sourcePath = path.join(pngDir, `${filename}.png`);
const destPath = path.join(outputDir, `${filename}.png`);
fs.copyFileSync(sourcePath, destPath);
});
console.log(`找到并复制了 ${unmatchedPng.length} 个不匹配的 PNG 文件到: ${outputDir}`);
// 然后去 https://offlineconverter.com/ 转换为ico

112
build/js_authors.py Normal file
View File

@@ -0,0 +1,112 @@
import os
import sys
import json
# 获取配置文件路径(和脚本在同一目录)
script_dir = os.path.dirname(os.path.abspath(__file__))
config_path = os.path.join(script_dir, "author_config.json")
# 获取要处理的文件夹路径
if len(sys.argv) < 2:
print("❌ 用法python js_authors.py <JSON目录路径>")
sys.exit(1)
folder_path = sys.argv[1]
if not os.path.exists(folder_path):
print(f"❌ JSON目录不存在{folder_path}")
sys.exit(1)
if not os.path.exists(config_path):
print(f"❌ 配置文件不存在:{config_path}")
sys.exit(1)
# 加载配置
try:
with open(config_path, "r", encoding="utf-8") as f:
config = json.load(f)
except Exception as e:
print(f"❌ 配置文件加载失败:{e}")
sys.exit(1)
author_rename = config.get("rename", {})
author_links = config.get("links", {})
print(f"🚀 启动,处理目录:{folder_path}")
count_total = 0
count_modified = 0
for root, dirs, files in os.walk(folder_path):
for filename in files:
if filename.endswith("manifest.json"):
count_total += 1
file_path = os.path.join(root, filename)
print(f"\n🔍 处理文件:{file_path}")
try:
with open(file_path, "r", encoding="utf-8") as f:
data = json.load(f)
except Exception as e:
print(f"❌ 解析失败:{e}")
continue
# info = data.get("info")
# if not isinstance(info, dict):
# print("⚠️ 缺少 info 字段")
# continue
author_field = data.get("authors")
if author_field is None:
print("⚠️ 缺少 authors 字段")
continue
modified = False
# 字符串格式处理
if isinstance(author_field, str):
names = [name.strip() for name in author_field.split("&")]
new_authors = []
for name in names:
new_name = author_rename.get(name, name)
author_obj = {"name": new_name}
if new_name in author_links:
author_obj["links"] = author_links[new_name]
new_authors.append(author_obj)
data["authors"] = new_authors
modified = True
print("✅ 替换为结构化 author")
# 列表格式处理
elif isinstance(author_field, list):
for author_obj in author_field:
if not isinstance(author_obj, dict):
continue
name = author_obj.get("name")
if not name:
continue
new_name = author_rename.get(name, name)
if name != new_name:
author_obj["name"] = new_name
modified = True
print(f"📝 重命名:{name}{new_name}")
# 统一链接字段名
existing_link = author_obj.pop("link", None) or author_obj.pop("url", None) or author_obj.get("links")
if new_name in author_links:
if author_obj.get("links") != author_links[new_name]:
author_obj["links"] = author_links[new_name]
modified = True
print(f"🔧 更新链接:{new_name}{author_links[new_name]}")
elif "links" not in author_obj and existing_link:
author_obj["links"] = existing_link
modified = True
print(f"🔄 标准化已有链接字段为 links → {existing_link}")
if modified:
with open(file_path, "w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False, indent=2)
count_modified += 1
print("✅ 写入完成")
else:
print("⏭️ 无需修改")
print(f"\n🎉 处理完成:共 {count_total} 个 JSON 文件,修改了 {count_modified}")

52
build/other/firstPosTp.js Normal file
View File

@@ -0,0 +1,52 @@
const fs = require('fs');
const path = require('path');
// 定义pathing目录的路径
const pathingDir = path.resolve(__dirname, '..', '..', 'repo', 'pathing');
// 递归读取目录下的所有JSON文件
function readJsonFilesRecursively(dir) {
const files = fs.readdirSync(dir, { withFileTypes: true });
files.forEach(file => {
const filePath = path.join(dir, file.name);
if (file.isDirectory()) {
readJsonFilesRecursively(filePath);
} else if (path.extname(file.name).toLowerCase() === '.json') {
processJsonFile(filePath);
}
});
}
// 处理单个JSON文件
function processJsonFile(filePath) {
try {
const data = fs.readFileSync(filePath, 'utf8');
const jsonData = JSON.parse(data);
// 检查并修改第一个position的type
if (jsonData.positions && jsonData.positions.length > 0) {
const firstPosition = jsonData.positions[0];
if (firstPosition.type !== 'teleport') {
firstPosition.type = 'teleport';
console.log(`文件 ${filePath} 中的第一个position的type已更改为teleport`);
// 将修改后的数据写回文件
fs.writeFileSync(filePath, JSON.stringify(jsonData, null, 2), 'utf8');
console.log(`文件 ${filePath} 已成功更新`);
} else {
console.log(`文件 ${filePath} 中的第一个position的type已经是teleport`);
}
} else {
console.log(`文件 ${filePath} 中没有positions数组或数组为空`);
}
} catch (err) {
console.error(`处理文件 ${filePath} 时出错:`, err);
}
}
// 开始递归读取文件
console.log(`开始处理 ${pathingDir} 目录下的所有JSON文件`);
readJsonFilesRecursively(pathingDir);
console.log('处理完成');

173
build/pathing_authors.py Normal file
View File

@@ -0,0 +1,173 @@
import os
import json
def process_json_authors(input_path, verbose=True):
"""
处理 JSON 文件中的作者信息(支持 author → authors 结构化迁移、作者名重命名和链接统一)
参数:
input_path (str): 要处理的文件路径或目录路径
config_path (str): 配置文件路径(默认在脚本同级)
verbose (bool): 是否打印详细日志信息
返回:
dict: 包含处理总数和修改数量的统计信息
"""
result = {
"total_files": 0,
"modified_files": 0,
"errors": []
}
# 获取配置文件路径(和脚本在同一目录)
script_dir = os.path.dirname(os.path.abspath(__file__))
config_path = os.path.join(script_dir, "author_config.json")
if not os.path.exists(input_path):
raise FileNotFoundError(f"路径不存在:{input_path}")
if not os.path.exists(config_path):
raise FileNotFoundError(f"配置文件不存在:{config_path}")
# 加载配置
try:
with open(config_path, "r", encoding="utf-8") as f:
config = json.load(f)
except Exception as e:
raise RuntimeError(f"配置文件加载失败:{e}")
author_rename = config.get("rename", {})
author_links = config.get("links", {})
# 构建待处理文件列表
file_list = []
if os.path.isfile(input_path) and input_path.endswith(".json"):
file_list.append(input_path)
elif os.path.isdir(input_path):
for root, dirs, files in os.walk(input_path):
for filename in files:
if filename.endswith(".json"):
file_list.append(os.path.join(root, filename))
else:
raise ValueError("输入路径必须是 .json 文件或目录")
for file_path in file_list:
result["total_files"] += 1
if verbose:
print(f"\n🔍 处理文件:{file_path}")
try:
with open(file_path, "r", encoding="utf-8") as f:
data = json.load(f)
except Exception as e:
msg = f"❌ 解析失败:{e}"
if verbose:
print(msg)
result["errors"].append((file_path, str(e)))
continue
info = data.get("info")
if not isinstance(info, dict):
if verbose:
print("⚠️ 缺少 info 字段")
continue
modified = False
author_field = info.get("author")
if author_field is not None:
if isinstance(author_field, str):
names = [name.strip() for name in author_field.split("&")]
new_authors = []
for name in names:
new_name = author_rename.get(name, name)
author_obj = {"name": new_name}
if new_name in author_links:
author_obj["links"] = author_links[new_name]
new_authors.append(author_obj)
data["info"]["authors"] = new_authors
modified = True
if verbose:
print("✅ 替换为结构化 authors")
elif isinstance(author_field, list):
for author_obj in author_field:
if not isinstance(author_obj, dict):
continue
name = author_obj.get("name")
if not name:
continue
new_name = author_rename.get(name, name)
if name != new_name:
author_obj["name"] = new_name
modified = True
if verbose:
print(f"📝 重命名:{name}{new_name}")
existing_link = author_obj.pop("link", None) or author_obj.pop("url", None) or author_obj.get("links")
if new_name in author_links:
if author_obj.get("links") != author_links[new_name]:
author_obj["links"] = author_links[new_name]
modified = True
if verbose:
print(f"🔧 更新链接:{new_name}{author_links[new_name]}")
elif "links" not in author_obj and existing_link:
author_obj["links"] = existing_link
modified = True
if verbose:
print(f"🔄 标准化已有链接字段为 links → {existing_link}")
else:
authors_field = info.get("authors")
if isinstance(authors_field, list):
for author_obj in authors_field:
if not isinstance(author_obj, dict):
continue
name = author_obj.get("name")
if not name:
continue
new_name = author_rename.get(name, name)
if name != new_name:
author_obj["name"] = new_name
modified = True
if verbose:
print(f"📝 重命名authors{name}{new_name}")
existing_link = author_obj.pop("link", None) or author_obj.pop("url", None) or author_obj.get("links")
if new_name in author_links:
if author_obj.get("links") != author_links[new_name]:
author_obj["links"] = author_links[new_name]
modified = True
if verbose:
print(f"🔧 更新链接authors{new_name}{author_links[new_name]}")
elif "links" not in author_obj and existing_link:
author_obj["links"] = existing_link
modified = True
if verbose:
print(f"🔄 标准化已有链接字段为 links → {existing_link}")
else:
if verbose:
print("⚠️ 缺少 author 字段,且 authors 非标准格式")
if modified:
with open(file_path, "w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False, indent=2)
result["modified_files"] += 1
if verbose:
print("✅ 写入完成")
else:
if verbose:
print("⏭️ 无需修改")
if verbose:
print(f"\n🎉 处理完成:共 {result['total_files']} 个 JSON 文件,修改了 {result['modified_files']}")
return result
# 如果作为独立脚本运行
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print("❌ 用法python pathing_authors.py <JSON文件或目录路径>")
else:
process_json_authors(sys.argv[1])

877
build/validate.py Normal file
View File

@@ -0,0 +1,877 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import os
import subprocess
import re
import chardet
from packaging.version import parse
from semver import VersionInfo
# ==================== 配置和常量 ====================
# 定义有效的 type 和 move_mode 值
VALID_TYPES = ["teleport", "path", "target", "orientation"]
VALID_MOVE_MODES = ["swim", "walk", "fly", "climb", "run", "dash", "jump"]
# 定义 action 和 action_params 的最低兼容版本
ACTION_VERSION_MAP = {
"fight": "0.42.0",
"mining": "0.43.0",
"fishing": "0.43.0",
"force_tp": "0.42.0",
"log_output": "0.42.0",
"anemo_collect": "0.42.0",
"combat_script": "0.42.0",
"hydro_collect": "0.42.0",
"pick_around": "0.42.0",
"pyro_collect": "0.43.0",
"stop_flying": "0.42.0",
"normal_attack": "0.42.0",
"electro_collect": "0.42.0",
"nahida_collect": "0.42.0",
"up_down_grab_leaf": "0.42.0",
"set_time": "0.45.0",
"exit_and_relogin": "0.46.0",
"use_gadget": "0.48.1"
}
# 定义 action_params 的最低兼容版本和正则表达式验证
ACTION_PARAMS_VERSION_MAP = {
"stop_flying": {
"params": {"version": "0.44.0", "regex": r"^\d+(\.\d+)?$"}
},
"pick_around": {
"params": {"version": "0.42.0", "regex": r"^\d+$"}
},
"combat_script": {
"params": {"version": "0.42.0", "regex": r"^.+$"} # 任意非空字符串
},
"log_output": {
"params": {"version": "0.42.0", "regex": r"^.+$"} # 任意非空字符串
}
# 其他 action 类型没有明确的 action_params 格式要求
}
# 默认版本号
DEFAULT_BGI_VERSION = "0.42.0"
DEFAULT_VERSION = "1.0"
# ==================== 文件操作 ====================
def get_original_file(file_path):
"""从上游仓库获取原始文件内容,如果失败则尝试从本地获取"""
# 返回值增加一个来源标识: "upstream", "pr_submitted", None
try:
result = subprocess.run(['git', 'show', f'upstream/main:{file_path}'],
capture_output=True, text=True, encoding='utf-8')
if result.returncode == 0:
return json.loads(result.stdout), "upstream"
except Exception as e:
print(f"从上游仓库获取原始文件失败: {str(e)}")
try:
with open(file_path, 'r', encoding='utf-8') as f:
current_data = json.load(f)
# 创建一个副本,避免引用相同的对象
return json.loads(json.dumps(current_data)), "pr_submitted"
except Exception as e:
print(f"读取当前文件失败: {str(e)}")
return None, None
def load_json_file(file_path):
"""加载 JSON 文件"""
try:
with open(file_path, encoding='utf-8') as f:
return json.load(f), None
except Exception as e:
return None, f"❌ JSON 格式错误: {str(e)}"
def save_json_file(file_path, data):
"""保存 JSON 文件"""
try:
with open(file_path, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2, ensure_ascii=False)
return True
except Exception as e:
print(f"保存文件失败: {str(e)}")
return False
# ==================== 版本处理 ====================
def process_version(current, original, is_new):
"""处理版本号更新逻辑"""
if is_new:
return DEFAULT_VERSION
if not original:
return DEFAULT_VERSION
try:
cv = parse(current)
ov = parse(original)
# 强制更新版本号,无论当前版本是否大于原始版本
return f"{ov.major}.{ov.minor + 1}"
except Exception:
# 如果解析失败,尝试简单的数字处理
parts = original.split('.')
if len(parts) >= 2:
try:
major = int(parts[0])
minor = int(parts[1])
return f"{major}.{minor + 1}"
except ValueError:
pass
return f"{original}.1"
def extract_required_version(compatibility_issues):
"""从兼容性问题中提取所需的最高版本号"""
required_versions = []
for issue in compatibility_issues:
parts = issue.split(">=")
if len(parts) > 1:
version_part = parts[1].split(",")[0].strip()
version_match = re.search(r'(\d+\.\d+\.\d+)', version_part)
if version_match:
required_versions.append(version_match.group(1))
if not required_versions:
return None
try:
return max(required_versions, key=lambda v: VersionInfo.parse(v))
except ValueError:
return None
def parse_bgi_version(version_str):
"""解析 BGI 版本号"""
try:
# 确保删除 v 前缀
return VersionInfo.parse(version_str.lstrip('v'))
except ValueError:
return None
# ==================== 字段验证 ====================
def check_action_compatibility(action_type, action_params, bgi_version):
"""检查 action 和 action_params 与 BGI 版本的兼容性"""
issues = []
validation_issues = []
# 如果 action_type 为空,则跳过检查
if not action_type:
return issues, validation_issues
# 确保 bgi_version 是有效的格式
bgi_ver = parse_bgi_version(bgi_version)
if not bgi_ver:
validation_issues.append(f"无效的 bgi_version 格式: {bgi_version}")
return issues, validation_issues
# 检查 action 兼容性
if action_type in ACTION_VERSION_MAP:
min_version = ACTION_VERSION_MAP[action_type]
try:
if bgi_ver < VersionInfo.parse(min_version):
issues.append(f"action '{action_type}' 需要 BGI 版本 >= {min_version},当前为 {bgi_version}")
except ValueError:
validation_issues.append(f"无法比较版本: {min_version}{bgi_version}")
else:
validation_issues.append(f"未知的 action 类型: '{action_type}',已知类型: {', '.join(sorted(ACTION_VERSION_MAP.keys()))}")
# 检查 action_params 兼容性和格式
if action_type in ACTION_PARAMS_VERSION_MAP and action_params:
param_info = ACTION_PARAMS_VERSION_MAP[action_type]["params"]
min_version = param_info["version"]
regex_pattern = param_info["regex"]
# 版本兼容性检查
try:
if bgi_ver < VersionInfo.parse(min_version):
issues.append(f"action '{action_type}' 的参数需要 BGI 版本 >= {min_version},当前为 {bgi_version}")
except ValueError:
validation_issues.append(f"无法比较版本: {min_version}{bgi_version}")
# 参数格式验证
if not re.match(regex_pattern, str(action_params)):
validation_issues.append(f"action '{action_type}' 的参数格式不正确: '{action_params}',应匹配模式: {regex_pattern}")
return issues, validation_issues
def process_coordinates(positions):
"""统一处理坐标保留两位小数逻辑"""
coord_changed = False
for pos in positions:
for axis in ['x', 'y']:
if axis in pos and isinstance(pos[axis], (int, float)):
original = pos[axis]
pos[axis] = round(float(pos[axis]), 4)
if original != pos[axis]:
coord_changed = True
return coord_changed
def ensure_required_fields(info, filename):
"""统一处理必要字段检查逻辑"""
corrections = []
if info["name"] != filename:
info["name"] = filename
corrections.append(f"name 自动修正为 {filename}")
if info["type"] not in ["collect", "fight"]:
info["type"] = "collect"
corrections.append("type 自动修正为 collect")
if not info["authors"]:
author_name = os.getenv("GITHUB_ACTOR", "未知作者")
author_link = "https://github.com/" + os.getenv("GITHUB_ACTOR", "babalae/bettergi-scripts-list")
info["authors"] = [{"name": author_name, "links": author_link}]
corrections.append(f"authors 自动设置为 {info['authors']}")
return corrections
def check_position_fields(positions):
"""检查位置字段的有效性
自动修复功能:
1. 缺少 type 字段时,自动设置为 'path'
2. type 字段无效时,自动修正为 'path'
3. 当 type 为 'path''target' 且缺少 move_mode 时,自动设置为 'walk'
4. move_mode 字段无效时,自动修正为 'walk'
"""
validation_issues = []
notices = []
corrections = [] # 添加修正列表
for idx, pos in enumerate(positions):
# 检查必需字段
required_fields = ["x", "y", "type"]
missing_fields = [field for field in required_fields if field not in pos]
if missing_fields:
validation_issues.append(f"位置 {idx+1} 缺少必需字段: {', '.join(missing_fields)}")
# 自动添加缺失的 type 字段
if "type" in missing_fields:
pos["type"] = "path" # 自动修复:缺少 type 字段时设置为 path
corrections.append(f"位置 {idx+1} 缺少 type 字段,已设置为默认值 'path'")
# 如果添加了 path 类型,也需要添加 move_mode
if "move_mode" not in pos:
pos["move_mode"] = "walk" # 自动修复:为 path 类型添加默认 move_mode
corrections.append(f"位置 {idx+1} 缺少 move_mode 字段,已设置为默认值 'walk'")
# 移除 continue确保后续检查能够执行
# continue
# 验证 type 字段
if "type" in pos:
pos_type = pos["type"]
if pos_type not in VALID_TYPES:
validation_issues.append(f"位置 {idx+1}: type '{pos_type}' 无效,有效值为: {', '.join(VALID_TYPES)}")
# 自动修正无效的 type 字段
pos["type"] = "path" # 自动修复:无效 type 修正为 path
corrections.append(f"位置 {idx+1} 的 type '{pos_type}' 无效,已修正为 'path'")
pos_type = "path" # 更新 pos_type 以便后续检查
# 当 type 为 path 或 target 时,验证 move_mode
if pos_type in ["path", "target"]:
if "move_mode" not in pos:
validation_issues.append(f"位置 {idx+1}: type 为 '{pos_type}' 时必须指定 move_mode")
# 自动添加缺失的 move_mode
pos["move_mode"] = "walk" # 自动修复:缺少 move_mode 时设置为 walk
corrections.append(f"位置 {idx+1} 缺少 move_mode 字段,已设置为默认值 'walk'")
elif pos["move_mode"] not in VALID_MOVE_MODES:
validation_issues.append(f"位置 {idx+1}: move_mode '{pos['move_mode']}' 无效,有效值为: {', '.join(VALID_MOVE_MODES)}")
# 自动修正无效的 move_mode
pos["move_mode"] = "walk" # 自动修复:无效 move_mode 修正为 walk
corrections.append(f"位置 {idx+1} 的 move_mode '{pos['move_mode']}' 无效,已修正为 'walk'")
# 检查第一个位置是否为 teleport
if idx == 0 and pos.get("type") != "teleport":
notices.append("⚠️ 第一个 position 的 type 不是 teleport")
return validation_issues, notices, corrections
def check_bgi_version_compatibility(bgi_version, auto_fix=False):
"""检查 BGI 版本兼容性"""
corrections = []
# 删除可能存在的 v 前缀
if bgi_version.startswith('v'):
bgi_version = bgi_version.lstrip('v')
corrections.append(f"bgi_version 前缀 'v' 已删除")
bgi_ver = parse_bgi_version(bgi_version)
if not bgi_ver:
if auto_fix:
corrections.append(f"bgi_version {bgi_version} 格式无效,自动更新为 {DEFAULT_BGI_VERSION}")
return DEFAULT_BGI_VERSION, corrections
return bgi_version, []
if bgi_ver < VersionInfo.parse(DEFAULT_BGI_VERSION):
if auto_fix:
corrections.append(f"bgi_version {bgi_version} 自动更新为 {DEFAULT_BGI_VERSION} (原版本低于要求)")
return DEFAULT_BGI_VERSION, corrections
return bgi_version, corrections
def check_position_ids(positions):
"""检查并修复位置 ID 编编号的连续性
自动修复功能:
1. 缺少 id 字段时,自动按顺序添加
2. id 编号不连续时,自动重新排序
3. id 不是从 1 开始时,自动调整
4. id 值无效(非数字)时,自动修正
"""
corrections = []
validation_issues = []
if not positions:
return validation_issues, corrections
# 检查是否所有位置都有 id 字段,并收集现有 id 值
current_ids = []
missing_ids = []
invalid_ids = []
for idx, pos in enumerate(positions):
if "id" not in pos:
missing_ids.append(idx)
current_ids.append(None)
else:
try:
id_val = int(pos["id"])
current_ids.append(id_val)
except (ValueError, TypeError):
# 如果 id 不是数字,记录为无效
invalid_ids.append(idx)
current_ids.append(None)
# 如果有缺少 id 的位置,记录
if missing_ids:
corrections.append(f"{len(missing_ids)} 个位置自动添加了 id 字段")
# 如果有无效 id记录
if invalid_ids:
corrections.append(f"修正了 {len(invalid_ids)} 个无效的 id 值")
# 生成期望的 id 序列(从 1 开始)
expected_ids = list(range(1, len(positions) + 1))
# 检查当前 id 是否符合期望
needs_reorder = False
# 过滤掉 None 值来检查现有的有效 id
valid_current_ids = [id_val for id_val in current_ids if id_val is not None]
if len(valid_current_ids) != len(positions):
needs_reorder = True
elif valid_current_ids != expected_ids:
needs_reorder = True
else:
# 检查是否有重复的 id
if len(set(valid_current_ids)) != len(valid_current_ids):
needs_reorder = True
duplicates = [id_val for id_val in set(valid_current_ids) if valid_current_ids.count(id_val) > 1]
corrections.append(f"检测到重复的 id: {duplicates}")
# 如果需要重新排序,自动修复
if needs_reorder:
id_issues = []
# 分析具体问题
if missing_ids or invalid_ids:
if missing_ids:
id_issues.append("存在缺少id的位置")
if invalid_ids:
id_issues.append("存在无效id值")
if valid_current_ids:
if min(valid_current_ids) != 1:
id_issues.append("id不是从1开始")
# 检查连续性
sorted_valid_ids = sorted(valid_current_ids)
expected_sorted = list(range(1, len(valid_current_ids) + 1))
if sorted_valid_ids != expected_sorted:
id_issues.append("id编号不连续")
# 重新按顺序分配 id并将 id 字段放在第一个位置
for idx, pos in enumerate(positions):
new_id = idx + 1
# 创建新的有序字典id 放在第一个
new_pos = {"id": new_id}
# 添加其他字段
for key, value in pos.items():
if key != "id":
new_pos[key] = value
# 更新原位置
pos.clear()
pos.update(new_pos)
if id_issues:
corrections.append(f"id编号已重新排序并置于首位 (问题: {', '.join(id_issues)})")
else:
corrections.append("id编号已按顺序重新分配并置于首位")
return validation_issues, corrections
# ==================== 验证修复文件编码 ====================
def detect_encoding(file_path, read_size=2048):
try:
with open(file_path, 'rb') as f:
raw = f.read(read_size)
result = chardet.detect(raw)
return result['encoding'], result['confidence']
except:
return None, 0
def fix_encoding_name(enc, file_path=None):
if not enc:
return None
enc = enc.lower()
if enc in ['ascii']:
try:
with open(file_path, 'rb') as f:
raw = f.read()
raw.decode('utf-8')
return 'utf-8'
except:
return 'gb18030'
if enc in ['gb2312', 'gbk', 'windows-1252', 'iso-8859-1', 'gb18030']:
return 'gb18030'
return enc
def convert_to_utf8(file_path, original_encoding):
try:
encoding = fix_encoding_name(original_encoding, file_path)
with open(file_path, 'r', encoding=encoding, errors='replace') as f:
content = f.read()
with open(file_path, 'w', encoding='utf-8') as f:
f.write(content)
print(f"[✔] Converted to UTF-8: {file_path} (from {original_encoding}{encoding})")
except Exception as e:
print(f"[✖] Failed to convert: {file_path} | Error: {e}")
def process_file(file_path, target_extensions=None):
if target_extensions and not any(file_path.lower().endswith(ext) for ext in target_extensions):
return
encoding, confidence = detect_encoding(file_path)
if encoding is None or confidence < 0.7:
print(f"[⚠️] Unknown encoding: {file_path} | Detected: {encoding}, Conf: {confidence:.2f}")
return
if encoding.lower() == 'utf-8':
return # Skip already UTF-8
convert_to_utf8(file_path, encoding)
def scan_and_convert(path, target_extensions=None):
if os.path.isfile(path):
process_file(path, target_extensions)
elif os.path.isdir(path):
for dirpath, _, filenames in os.walk(path):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
process_file(filepath, target_extensions)
else:
print(f"❌ Path not found: {path}")
# ==================== 验证修复作者信息 ====================
def process_json_authors(input_path, verbose=False):
"""
处理 JSON 文件中的作者信息(支持 author → authors 结构化迁移、作者名重命名和链接统一)
参数:
input_path (str): 要处理的文件路径或目录路径
config_path (str): 配置文件路径(默认在脚本同级)
verbose (bool): 是否打印详细日志信息
返回:
dict: 包含处理总数和修改数量的统计信息
"""
result = {
"total_files": 0,
"modified_files": 0,
"errors": []
}
# 获取配置文件路径(和脚本在同一目录)
script_dir = os.path.dirname(os.path.abspath(__file__))
config_path = os.path.join(script_dir, "author_config.json")
if not os.path.exists(input_path):
raise FileNotFoundError(f"路径不存在:{input_path}")
if not os.path.exists(config_path):
raise FileNotFoundError(f"配置文件不存在:{config_path}")
# 加载配置
try:
with open(config_path, "r", encoding="utf-8") as f:
config = json.load(f)
except Exception as e:
raise RuntimeError(f"配置文件加载失败:{e}")
author_rename = config.get("rename", {})
author_links = config.get("links", {})
# 构建待处理文件列表
file_list = []
if os.path.isfile(input_path) and input_path.endswith(".json"):
file_list.append(input_path)
elif os.path.isdir(input_path):
for root, dirs, files in os.walk(input_path):
for filename in files:
if filename.endswith(".json"):
file_list.append(os.path.join(root, filename))
else:
raise ValueError("输入路径必须是 .json 文件或目录")
for file_path in file_list:
result["total_files"] += 1
if verbose:
print(f"\n🔍 处理文件:{file_path}")
try:
with open(file_path, "r", encoding="utf-8") as f:
data = json.load(f)
except Exception as e:
msg = f"❌ 解析失败:{e}"
if verbose:
print(msg)
result["errors"].append((file_path, str(e)))
continue
info = data.get("info")
if not isinstance(info, dict):
if verbose:
print("⚠️ 缺少 info 字段")
continue
modified = False
author_field = info.get("author")
if author_field is not None:
if isinstance(author_field, str):
names = [name.strip() for name in author_field.split("&")]
new_authors = []
for name in names:
new_name = author_rename.get(name, name)
author_obj = {"name": new_name}
if new_name in author_links:
author_obj["links"] = author_links[new_name]
new_authors.append(author_obj)
data["info"]["authors"] = new_authors
modified = True
if verbose:
print("✅ 替换为结构化 authors")
elif isinstance(author_field, list):
for author_obj in author_field:
if not isinstance(author_obj, dict):
continue
name = author_obj.get("name")
if not name:
continue
new_name = author_rename.get(name, name)
if name != new_name:
author_obj["name"] = new_name
modified = True
if verbose:
print(f"📝 重命名:{name}{new_name}")
existing_link = author_obj.pop("link", None) or author_obj.pop("url", None) or author_obj.get("links")
if new_name in author_links:
if author_obj.get("links") != author_links[new_name]:
author_obj["links"] = author_links[new_name]
modified = True
if verbose:
print(f"🔧 更新链接:{new_name}{author_links[new_name]}")
elif "links" not in author_obj and existing_link:
author_obj["links"] = existing_link
modified = True
if verbose:
print(f"🔄 标准化已有链接字段为 links → {existing_link}")
else:
authors_field = info.get("authors")
if isinstance(authors_field, list):
for author_obj in authors_field:
if not isinstance(author_obj, dict):
continue
name = author_obj.get("name")
if not name:
continue
new_name = author_rename.get(name, name)
if name != new_name:
author_obj["name"] = new_name
modified = True
if verbose:
print(f"📝 重命名authors{name}{new_name}")
existing_link = author_obj.pop("link", None) or author_obj.pop("url", None) or author_obj.get("links")
if new_name in author_links:
if author_obj.get("links") != author_links[new_name]:
author_obj["links"] = author_links[new_name]
modified = True
if verbose:
print(f"🔧 更新链接authors{new_name}{author_links[new_name]}")
elif "links" not in author_obj and existing_link:
author_obj["links"] = existing_link
modified = True
if verbose:
print(f"🔄 标准化已有链接字段为 links → {existing_link}")
else:
# if verbose:
print("⚠️ 缺少 author 字段,且 authors 非标准格式")
if modified:
with open(file_path, "w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False, indent=2)
result["modified_files"] += 1
if verbose:
print("✅ 写入完成")
else:
if verbose:
print("⏭️ 无需修改")
if verbose:
print(f"\n🎉 处理完成:共 {result['total_files']} 个 JSON 文件,修改了 {result['modified_files']}")
# ==================== 主验证逻辑 ====================
def initialize_data(data, file_path):
"""初始化数据结构,确保必要字段存在"""
messages = []
if "info" not in data:
data["info"] = {}
messages.append(f"⚠️ 文件缺少 info 字段,已添加默认值")
info = data["info"]
filename = os.path.splitext(os.path.basename(file_path))[0]
# 检查并添加必要的字段
if "name" not in info:
info["name"] = filename
messages.append(f"⚠️ 文件缺少 name 字段,已设置为文件名: {info['name']}")
if "type" not in info:
info["type"] = "collect"
messages.append(f"⚠️ 文件缺少 type 字段,已设置为默认值: collect")
if "authors" not in info:
author_name = os.getenv("GITHUB_ACTOR", "未知作者")
author_link = "https://github.com/" + os.getenv("GITHUB_ACTOR", "babalae/bettergi-scripts-list")
info["authors"] = [{"name": author_name, "links": author_link}]
messages.append(f"⚠️ 文件缺少 authors 字段,已设置为: {info['authors']}")
if "version" not in info:
info["version"] = DEFAULT_VERSION
messages.append(f"⚠️ 文件缺少 version 字段,已设置为默认值: {DEFAULT_VERSION}")
if "bgi_version" not in info:
info["bgi_version"] = DEFAULT_BGI_VERSION
messages.append(f"⚠️ 文件缺少 bgi_version 字段,已设置为默认值: {DEFAULT_BGI_VERSION}")
if "positions" not in data:
data["positions"] = []
messages.append(f"⚠️ 文件缺少 positions 字段,已添加空数组")
return data
def check_actions_compatibility(positions, bgi_version):
"""检查所有位置的 action 兼容性"""
compatibility_issues = []
validation_issues = []
for idx, pos in enumerate(positions):
action_type = pos.get("action", "")
action_params = pos.get("params", "")
if action_type:
compat_issues, valid_issues = check_action_compatibility(action_type, action_params, bgi_version)
for issue in compat_issues:
compatibility_issues.append(f"位置 {idx+1}: {issue}")
for issue in valid_issues:
validation_issues.append(f"位置 {idx+1}: {issue}")
return compatibility_issues, validation_issues
def update_bgi_version_for_compatibility(info, compatibility_issues, auto_fix):
"""根据兼容性问题更新 BGI 版本"""
corrections = []
if auto_fix and compatibility_issues:
max_required = extract_required_version(compatibility_issues)
if max_required:
# 确保 max_required 没有 v 前缀
max_required = max_required.lstrip('v')
try:
current_bgi = parse_bgi_version(info["bgi_version"])
if current_bgi and current_bgi < VersionInfo.parse(max_required):
info["bgi_version"] = max_required
corrections.append(f"bgi_version {info['bgi_version']} 自动更新为 {max_required} 以兼容所有功能")
return [], corrections
except ValueError as e:
# print(f"警告: 版本号解析失败 - {e}")
info["bgi_version"] = DEFAULT_BGI_VERSION
corrections.append(f"bgi_version 自动更新为 {DEFAULT_BGI_VERSION} (版本解析失败)")
return [], corrections
return compatibility_issues, corrections
def validate_file(file_path, auto_fix=False):
"""验证并修复 JSON 文件"""
# 加载文件
data, error = load_json_file(file_path)
if error:
print(error)
return []
# 获取原始文件
original_data, source = get_original_file(file_path) if auto_fix else (None, None)
is_new = not original_data if auto_fix else True
# 初始化数据结构
data = initialize_data(data, file_path)
info = data["info"]
filename = os.path.splitext(os.path.basename(file_path))[0]
# 收集所有修正 - 修复:添加了这一行来定义 all_corrections 变量
all_corrections = []
# 检查必要字段
corrections = ensure_required_fields(info, filename)
all_corrections.extend(corrections)
# 处理坐标
coord_changed = process_coordinates(data["positions"])
if coord_changed:
all_corrections.append("坐标值自动保留四位小数")
# 检查 BGI 版本兼容性
bgi_version, corrections = check_bgi_version_compatibility(info["bgi_version"], auto_fix)
if corrections:
info["bgi_version"] = bgi_version
all_corrections.extend(corrections) # 检查位置字段 - 修改为接收三个返回值
position_issues, notices, pos_corrections = check_position_fields(data["positions"])
if auto_fix and pos_corrections:
all_corrections.extend(pos_corrections)
# 检查位置 ID 编号
if auto_fix:
id_validation_issues, id_corrections = check_position_ids(data["positions"])
if id_corrections:
all_corrections.extend(id_corrections)
position_issues.extend(id_validation_issues)
# 检查 action 兼容性
compatibility_issues, action_validation_issues = check_actions_compatibility(data["positions"], info["bgi_version"])
position_issues.extend(action_validation_issues)
# 根据兼容性问题更新 BGI 版本
compatibility_issues, corrections = update_bgi_version_for_compatibility(info, compatibility_issues, auto_fix)
all_corrections.extend(corrections)
# 更新版本号 - 只有从上游仓库获取的文件才更新版本号
# if auto_fix:
if False:
has_original_version = False
original_version = None
if original_data and "info" in original_data and "version" in original_data["info"]:
original_version = original_data["info"]["version"]
has_original_version = True
print(f"成功获取原始版本号: {original_version}")
else:
print("未找到原始版本号,将视为新文件处理")
# 只有在没有原始版本号时才视为新文件
is_new = not has_original_version
print(f"原始版本号: {original_version}, 当前版本号: {info['version']}, 是否新文件: {is_new}, 来源: {source}")
# 只有当文件来源是上游仓库时才更新版本号
if source == "upstream":
new_version = process_version(info["version"], original_version, is_new)
if new_version != info["version"]:
info["version"] = new_version
all_corrections.append(f"version 自动更新为 {new_version}")
print(f"版本号已更新: {info['version']}")
else:
print(f"版本号未变化: {info['version']}")
else:
print(f"这是PR提交的文件保持版本号不变: {info['version']}(合并后再更新版本)")
# 合并所有通知
for issue in compatibility_issues:
notices.append(issue)
for issue in position_issues:
notices.append(issue)
# 保存修正
if auto_fix:
if all_corrections or position_issues:
if save_json_file(file_path, data):
print("✅ 文件已保存")
else:
notices.append("❌ 保存文件失败")
return notices
def main():
import argparse
parser = argparse.ArgumentParser(description='校验 BetterGI 脚本文件')
parser.add_argument('path', help='要校验的文件或目录路径')
parser.add_argument('--fix', action='store_true', help='自动修复问题')
args = parser.parse_args()
path = args.path
auto_fix = args.fix
all_notices = [] # 初始化 all_notices 变量
if os.path.isfile(path) and path.endswith('.json'):
scan_and_convert(path)
process_json_authors(path)
# print(f"\n🔍 校验文件: {path}")
notices = validate_file(path, auto_fix)
if notices:
all_notices.extend([f"{path}: {n}" for n in notices]) # 添加到 all_notices
print("\n校验注意事项:")
for notice in notices:
print(f"- {notice}")
else:
print("✅ 校验完成,没有发现问题")
elif os.path.isdir(path):
for root, _, files in os.walk(path):
for file in files:
if file.endswith('.json'):
file_path = os.path.join(root, file)
print(f"\n🔍 校验文件: {file_path}")
scan_and_convert(file_path)
process_json_authors(file_path)
notices = validate_file(file_path, auto_fix)
if notices:
all_notices.extend([f"{file_path}: {n}" for n in notices])
if all_notices:
print("\n所有校验注意事项:")
for notice in all_notices:
print(f"- {notice}")
else:
print("\n✅ 所有文件校验完成,没有发现问题")
else:
print(f"❌ 无效的路径: {path}")
if __name__ == "__main__":
main()