Latest lex_scan, script to copy to SME11, program to count references
This commit is contained in:
parent
8c3a0529a3
commit
032c544c53
87
copylex.sh
Executable file
87
copylex.sh
Executable file
@ -0,0 +1,87 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Usage/help function
|
||||||
|
usage() {
|
||||||
|
echo "Usage: $0 ModuleName"
|
||||||
|
echo " ModuleName must start with a capital letter and match a directory under:"
|
||||||
|
echo " ~/SME11/usr/share/smanager/lib/SrvMngr/I18N/Modules/"
|
||||||
|
echo "Example: $0 Useraccounts"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check parameter
|
||||||
|
if [ -z "$1" ]; then
|
||||||
|
echo "Error: No module name provided."
|
||||||
|
usage
|
||||||
|
fi
|
||||||
|
|
||||||
|
MODULE_NAME="$1"
|
||||||
|
|
||||||
|
# Check if parameter starts with a capital letter
|
||||||
|
if [[ ! "$MODULE_NAME" =~ ^[A-Z] ]]; then
|
||||||
|
echo "Error: Module name must start with a capital letter."
|
||||||
|
usage
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if directory exists
|
||||||
|
SRC_DIR=~/SME11/usr/share/smanager/lib/SrvMngr/I18N/Modules
|
||||||
|
MODULE_DIR="${SRC_DIR}/${MODULE_NAME}"
|
||||||
|
if [ ! -d "$MODULE_DIR" ]; then
|
||||||
|
echo "Error: Directory '${MODULE_DIR}' does not exist."
|
||||||
|
usage
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Convert to lowercase for filenames
|
||||||
|
MODULE_NAME_LC=$(echo "$MODULE_NAME" | tr '[:upper:]' '[:lower:]')
|
||||||
|
|
||||||
|
# Paths
|
||||||
|
LEX_FILE="${MODULE_DIR}/${MODULE_NAME_LC}_en.lex"
|
||||||
|
BAK_FILE="${LEX_FILE}.bak"
|
||||||
|
NEW_LEX_FILE="output/${MODULE_NAME}/${MODULE_NAME_LC}_en.lex.new1"
|
||||||
|
|
||||||
|
GENERAL_LEX_FILE="${SRC_DIR}/General/general_en.lex"
|
||||||
|
GENERAL_BAK_FILE="${GENERAL_LEX_FILE}.bak"
|
||||||
|
GENERAL_NEW_LEX_FILE="output/General/general_en.lex.new1"
|
||||||
|
|
||||||
|
# Move to backup only if .bak does not exist (module file)
|
||||||
|
if [ ! -f "$BAK_FILE" ]; then
|
||||||
|
mv -v "$LEX_FILE" "$BAK_FILE"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Move to backup only if .bak does not exist (general file)
|
||||||
|
if [ ! -f "$GENERAL_BAK_FILE" ]; then
|
||||||
|
mv -v "$GENERAL_LEX_FILE" "$GENERAL_BAK_FILE"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Copy new lex files
|
||||||
|
cp -v "$NEW_LEX_FILE" "$LEX_FILE"
|
||||||
|
cp -v "$GENERAL_NEW_LEX_FILE" "$GENERAL_LEX_FILE"
|
||||||
|
|
||||||
|
|
||||||
|
# Copy template files from output/<ModuleName>/<subdir>
|
||||||
|
TEMPLATE_SRC_PARENT="output/${MODULE_NAME}"
|
||||||
|
|
||||||
|
TEMPLATE_TARGET_BASE=~/SME11/usr/share/smanager/themes
|
||||||
|
|
||||||
|
if [ -d "$TEMPLATE_SRC_PARENT" ]; then
|
||||||
|
for SUBDIR in "$TEMPLATE_SRC_PARENT"/*/; do
|
||||||
|
[ ! -d "$SUBDIR" ] && continue # skip if no subdirs
|
||||||
|
SUBDIR_NAME=$(basename "$SUBDIR")
|
||||||
|
find "$SUBDIR" -type f | while read -r FILE; do
|
||||||
|
BASENAME=$(basename "$FILE")
|
||||||
|
# Remove the first .new in the name (if any), e.g., foo.html.new.ep => foo.html.ep
|
||||||
|
NEWNAME="${BASENAME/.new/}"
|
||||||
|
if [[ "$BASENAME" == _* ]]; then
|
||||||
|
DEST="$TEMPLATE_TARGET_BASE/$SUBDIR_NAME/templates/partials/"
|
||||||
|
else
|
||||||
|
DEST="$TEMPLATE_TARGET_BASE/$SUBDIR_NAME/templates/"
|
||||||
|
fi
|
||||||
|
mkdir -p "$DEST"
|
||||||
|
cp -v "$FILE" "$DEST/$NEWNAME"
|
||||||
|
done
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
ssh -p 1234 root@SME11.thereadclan.me.uk 'signal-event smanager-refresh'
|
264
count-references.py
Normal file
264
count-references.py
Normal file
@ -0,0 +1,264 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Lex ID Reference Counter
|
||||||
|
|
||||||
|
This program parses a lex file containing quoted lex-ids and strings,
|
||||||
|
then searches through *.ep and *.pm files in a directory hierarchy
|
||||||
|
to count references to those lex-ids and prints a sorted table.
|
||||||
|
Files with "I18N" or "AdminLTE" in their pathname are excluded from the search.
|
||||||
|
Results are sorted by Weighted Score (Total Refs × File Count) highest to lowest.
|
||||||
|
|
||||||
|
Usage: python3 count-references.py <lex_file> <search_directory>
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import glob
|
||||||
|
from collections import defaultdict
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
|
def parse_lex_file(lex_file_path):
|
||||||
|
"""
|
||||||
|
Parse the lex file to extract lex-ids.
|
||||||
|
Expected format: "lex-id" => "quoted string",
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
set: A set of lex-ids (without quotes)
|
||||||
|
"""
|
||||||
|
lex_ids = set()
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(lex_file_path, 'r', encoding='utf-8') as f:
|
||||||
|
content = f.read()
|
||||||
|
|
||||||
|
# Pattern to match "lex-id" => "quoted string",
|
||||||
|
# Captures the lex-id (first quoted string)
|
||||||
|
pattern = r"""['"]([^'"]+)['"]\s*=>\s*['"][^'"]*['"]\s*,"""
|
||||||
|
matches = re.findall(pattern, content)
|
||||||
|
|
||||||
|
for match in matches:
|
||||||
|
lex_ids.add(match)
|
||||||
|
|
||||||
|
print(f"Found {len(lex_ids)} lex-ids in {lex_file_path}")
|
||||||
|
|
||||||
|
except FileNotFoundError:
|
||||||
|
print(f"Error: Lex file '{lex_file_path}' not found.")
|
||||||
|
sys.exit(1)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error reading lex file: {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
return lex_ids
|
||||||
|
|
||||||
|
|
||||||
|
def find_target_files(search_directory):
|
||||||
|
"""
|
||||||
|
Find all *.ep and *.pm files in the directory hierarchy.
|
||||||
|
Excludes files whose pathname includes "I18N" or "AdminLTE".
|
||||||
|
|
||||||
|
Args:
|
||||||
|
search_directory (str): Root directory to search
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list: List of file paths (excluding I18N and AdminLTE files)
|
||||||
|
"""
|
||||||
|
target_files = []
|
||||||
|
|
||||||
|
if not os.path.exists(search_directory):
|
||||||
|
print(f"Error: Search directory '{search_directory}' not found.")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Use glob to find all .ep and .pm files recursively
|
||||||
|
ep_files = glob.glob(os.path.join(search_directory, '**', '*.ep'), recursive=True)
|
||||||
|
pm_files = glob.glob(os.path.join(search_directory, '**', '*.pm'), recursive=True)
|
||||||
|
|
||||||
|
all_files = ep_files + pm_files
|
||||||
|
|
||||||
|
# Filter out files with "I18N" or "AdminLTE" in their pathname
|
||||||
|
target_files = [f for f in all_files if "I18N" not in f and "AdminLTE" not in f]
|
||||||
|
|
||||||
|
excluded_count = len(all_files) - len(target_files)
|
||||||
|
i18n_excluded = [f for f in all_files if "I18N" in f]
|
||||||
|
adminlte_excluded = [f for f in all_files if "AdminLTE" in f]
|
||||||
|
|
||||||
|
print(f"Found {len(all_files)} total files (.ep and .pm)")
|
||||||
|
if excluded_count > 0:
|
||||||
|
print(f"Excluded {len(i18n_excluded)} files containing 'I18N' in pathname")
|
||||||
|
print(f"Excluded {len(adminlte_excluded)} files containing 'AdminLTE' in pathname")
|
||||||
|
print(f"Total excluded: {excluded_count} files")
|
||||||
|
print(f"Processing {len(target_files)} target files")
|
||||||
|
|
||||||
|
return target_files
|
||||||
|
|
||||||
|
|
||||||
|
def count_lex_references(lex_ids, target_files):
|
||||||
|
"""
|
||||||
|
Count references to lex-ids in target files and track file counts.
|
||||||
|
Looks for quoted lex-ids in the files.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
lex_ids (set): Set of lex-ids to search for
|
||||||
|
target_files (list): List of file paths to search in
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Dictionary with lex-id as key and dict containing 'total_refs' and 'file_count' as value
|
||||||
|
"""
|
||||||
|
# Structure: {lex_id: {'total_refs': count, 'file_count': count, 'files': set()}}
|
||||||
|
reference_data = defaultdict(lambda: {'total_refs': 0, 'file_count': 0, 'files': set()})
|
||||||
|
|
||||||
|
for file_path in target_files:
|
||||||
|
try:
|
||||||
|
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
|
||||||
|
content = f.read()
|
||||||
|
|
||||||
|
# Search for each lex-id in quotes
|
||||||
|
for lex_id in lex_ids:
|
||||||
|
# Pattern to match the lex-id in quotes
|
||||||
|
quoted_pattern = f"['\"]{re.escape(lex_id)}['\"]"
|
||||||
|
matches = re.findall(quoted_pattern, content)
|
||||||
|
|
||||||
|
if matches:
|
||||||
|
# Add to total reference count
|
||||||
|
reference_data[lex_id]['total_refs'] += len(matches)
|
||||||
|
# Add file to the set of files containing this lex_id
|
||||||
|
reference_data[lex_id]['files'].add(file_path)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Warning: Could not read file {file_path}: {e}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Calculate file counts from the sets
|
||||||
|
for lex_id in reference_data:
|
||||||
|
reference_data[lex_id]['file_count'] = len(reference_data[lex_id]['files'])
|
||||||
|
|
||||||
|
return reference_data
|
||||||
|
|
||||||
|
|
||||||
|
def print_results_table(reference_data):
|
||||||
|
"""
|
||||||
|
Print the results in a table format, sorted by Weighted Score (Total Refs × File Count) highest to lowest.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
reference_data (dict): Dictionary with lex-id as key and data dict as value
|
||||||
|
"""
|
||||||
|
if not reference_data:
|
||||||
|
print("No references found.")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Calculate weighted score for each lex_id and sort by it (descending), then by lex-id (ascending) for ties
|
||||||
|
def get_weighted_score(item):
|
||||||
|
lex_id, data = item
|
||||||
|
return data['total_refs'] * data['file_count']
|
||||||
|
|
||||||
|
sorted_items = sorted(reference_data.items(), key=lambda x: (-get_weighted_score(x), x[0]))
|
||||||
|
|
||||||
|
# Calculate column widths
|
||||||
|
max_lex_id_width = max(len(lex_id) for lex_id in reference_data.keys()) if reference_data else 0
|
||||||
|
max_total_refs_width = max(len(str(data['total_refs'])) for data in reference_data.values()) if reference_data else 0
|
||||||
|
max_file_count_width = max(len(str(data['file_count'])) for data in reference_data.values()) if reference_data else 0
|
||||||
|
|
||||||
|
# Calculate refs per file and weighted score values for width determination
|
||||||
|
refs_per_file_values = []
|
||||||
|
weighted_score_values = []
|
||||||
|
for data in reference_data.values():
|
||||||
|
if data['file_count'] > 0:
|
||||||
|
refs_per_file = data['total_refs'] / data['file_count']
|
||||||
|
refs_per_file_values.append(f"{refs_per_file:.1f}")
|
||||||
|
else:
|
||||||
|
refs_per_file_values.append("0.0")
|
||||||
|
|
||||||
|
weighted_score = data['total_refs'] * data['file_count']
|
||||||
|
weighted_score_values.append(str(weighted_score))
|
||||||
|
|
||||||
|
max_refs_per_file_width = max(len(val) for val in refs_per_file_values) if refs_per_file_values else 0
|
||||||
|
max_weighted_score_width = max(len(val) for val in weighted_score_values) if weighted_score_values else 0
|
||||||
|
|
||||||
|
# Ensure minimum widths for headers
|
||||||
|
lex_id_width = max(max_lex_id_width, len("Lex ID"))
|
||||||
|
total_refs_width = max(max_total_refs_width, len("Total Refs"))
|
||||||
|
file_count_width = max(max_file_count_width, len("Files"))
|
||||||
|
refs_per_file_width = max(max_refs_per_file_width, len("Refs/File"))
|
||||||
|
weighted_score_width = max(max_weighted_score_width, len("Weighted Score"))
|
||||||
|
|
||||||
|
# Calculate total table width
|
||||||
|
table_width = lex_id_width + total_refs_width + file_count_width + refs_per_file_width + weighted_score_width + 16 # 16 for separators
|
||||||
|
|
||||||
|
# Print header
|
||||||
|
print("\nReference Count Results (sorted by Weighted Score, excluding I18N and AdminLTE files):")
|
||||||
|
print("=" * table_width)
|
||||||
|
print(f"{'Lex ID':<{lex_id_width}} | {'Total Refs':>{total_refs_width}} | {'Files':>{file_count_width}} | {'Refs/File':>{refs_per_file_width}} | {'Weighted Score':>{weighted_score_width}}")
|
||||||
|
print("-" * table_width)
|
||||||
|
|
||||||
|
# Print results
|
||||||
|
total_references = 0
|
||||||
|
total_files_with_refs = set()
|
||||||
|
total_weighted_score = 0
|
||||||
|
|
||||||
|
for lex_id, data in sorted_items:
|
||||||
|
refs_per_file = data['total_refs'] / data['file_count'] if data['file_count'] > 0 else 0.0
|
||||||
|
weighted_score = data['total_refs'] * data['file_count']
|
||||||
|
print(f"{lex_id:<{lex_id_width}} | {data['total_refs']:>{total_refs_width}} | {data['file_count']:>{file_count_width}} | {refs_per_file:>{refs_per_file_width}.1f} | {weighted_score:>{weighted_score_width}}")
|
||||||
|
total_references += data['total_refs']
|
||||||
|
total_files_with_refs.update(data['files'])
|
||||||
|
total_weighted_score += weighted_score
|
||||||
|
|
||||||
|
# Calculate overall refs per file
|
||||||
|
overall_refs_per_file = total_references / len(total_files_with_refs) if total_files_with_refs else 0.0
|
||||||
|
|
||||||
|
print("-" * table_width)
|
||||||
|
print(f"{'Total':<{lex_id_width}} | {total_references:>{total_refs_width}} | {len(total_files_with_refs):>{file_count_width}} | {overall_refs_per_file:>{refs_per_file_width}.1f} | {total_weighted_score:>{weighted_score_width}}")
|
||||||
|
|
||||||
|
# Print summary
|
||||||
|
print(f"\nSummary:")
|
||||||
|
print(f"- Total lex-id references found: {total_references}")
|
||||||
|
print(f"- Total unique files with references: {len(total_files_with_refs)}")
|
||||||
|
print(f"- Total lex-ids with at least one reference: {len([data for data in reference_data.values() if data['total_refs'] > 0])}")
|
||||||
|
print(f"- Average references per file: {overall_refs_per_file:.1f}")
|
||||||
|
print(f"- Total weighted score: {total_weighted_score}")
|
||||||
|
print(f"- Results sorted by Weighted Score (Total Refs × File Count, highest to lowest)")
|
||||||
|
print(f"- Files with 'I18N' or 'AdminLTE' in pathname were excluded from search")
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Main function to orchestrate the program."""
|
||||||
|
if len(sys.argv) != 3:
|
||||||
|
print("Usage: python3 count-references.py <lex_file> <search_directory>")
|
||||||
|
print("\nExample:")
|
||||||
|
print(" python3 count-references.py lexicon.lex /path/to/search")
|
||||||
|
print("\nNote: Files with 'I18N' or 'AdminLTE' in their pathname will be excluded from the search.")
|
||||||
|
print("Results are sorted by Weighted Score (Total Refs × File Count, highest to lowest).")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
lex_file_path = sys.argv[1]
|
||||||
|
search_directory = sys.argv[2]
|
||||||
|
|
||||||
|
print(f"Parsing lex file: {lex_file_path}")
|
||||||
|
print(f"Searching directory: {search_directory}")
|
||||||
|
print()
|
||||||
|
|
||||||
|
# Step 1: Parse the lex file to get lex-ids
|
||||||
|
lex_ids = parse_lex_file(lex_file_path)
|
||||||
|
|
||||||
|
if not lex_ids:
|
||||||
|
print("No lex-ids found in the lex file.")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Step 2: Find all target files (.ep and .pm), excluding I18N and AdminLTE files
|
||||||
|
target_files = find_target_files(search_directory)
|
||||||
|
|
||||||
|
if not target_files:
|
||||||
|
print("No .ep or .pm files found in the search directory (after exclusions).")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Step 3: Count references to lex-ids in target files
|
||||||
|
print("Counting references...")
|
||||||
|
reference_data = count_lex_references(lex_ids, target_files)
|
||||||
|
|
||||||
|
# Step 4: Print results table
|
||||||
|
print_results_table(reference_data)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
479
lex_scan.py
479
lex_scan.py
@ -4,6 +4,10 @@ import os
|
|||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
import json
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
from datetime import datetime
|
||||||
|
import glob
|
||||||
|
|
||||||
|
|
||||||
# Configure logger
|
# Configure logger
|
||||||
|
|
||||||
@ -23,7 +27,7 @@ file_handler.setFormatter(formatter)
|
|||||||
|
|
||||||
# Console handler (WARNING and above)
|
# Console handler (WARNING and above)
|
||||||
console_handler = logging.StreamHandler()
|
console_handler = logging.StreamHandler()
|
||||||
console_handler.setLevel(logging.WARNING)
|
console_handler.setLevel(logging.INFO)
|
||||||
console_handler.setFormatter(formatter)
|
console_handler.setFormatter(formatter)
|
||||||
|
|
||||||
# Add handlers to the logger
|
# Add handlers to the logger
|
||||||
@ -31,10 +35,12 @@ logger.addHandler(file_handler)
|
|||||||
logger.addHandler(console_handler)
|
logger.addHandler(console_handler)
|
||||||
|
|
||||||
missing_files = []
|
missing_files = []
|
||||||
|
themes = ["default", "AdminLTE"]
|
||||||
|
|
||||||
|
|
||||||
def validate_panel_name(panel_name):
|
def validate_panel_name(panel_name):
|
||||||
if not panel_name[0].isupper():
|
if not panel_name[0].isupper():
|
||||||
logger.error(f"Error: Panel name \'{panel_name}\' must start with a capital letter.")
|
logger.error(f"❌Error: Panel name \'{panel_name}\' must start with a capital letter.")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
def get_full_base_path(system):
|
def get_full_base_path(system):
|
||||||
@ -44,7 +50,7 @@ def check_controller_file_exists(system, panel):
|
|||||||
full_base_path = get_full_base_path(system)
|
full_base_path = get_full_base_path(system)
|
||||||
controller_path = os.path.join(full_base_path, "lib/SrvMngr/Controller", f"{panel}.pm")
|
controller_path = os.path.join(full_base_path, "lib/SrvMngr/Controller", f"{panel}.pm")
|
||||||
if not os.path.exists(controller_path):
|
if not os.path.exists(controller_path):
|
||||||
logger.error(f"Error: Controller file \'{controller_path}\' does not exist.")
|
logger.error(f"❌Error: Controller file \'{controller_path}\' does not exist.")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
return controller_path
|
return controller_path
|
||||||
|
|
||||||
@ -88,9 +94,27 @@ def find_matching_files_variable_part(input_string, directory):
|
|||||||
return matching_files
|
return matching_files
|
||||||
return []
|
return []
|
||||||
|
|
||||||
|
def extract_string_for_exclusion(filename, prefix):
|
||||||
|
"""
|
||||||
|
Extracts the pattern _<prefix>_<alphan> (terminated by a dot) from the basename of the filename.
|
||||||
|
The prefix is passed as a variable. Returns the matched string or None if not found.
|
||||||
|
Logs when a string is added to the exclude list.
|
||||||
|
"""
|
||||||
|
#logger.info(f"extract:{prefix} {filename}")
|
||||||
|
base = os.path.basename(filename)
|
||||||
|
# Match: start, _<prefix>_, one or more alphanumerics, then a dot
|
||||||
|
pattern = rf'^_({re.escape(prefix)}_[a-zA-Z0-9]+)(?=\.)'
|
||||||
|
match = re.match(pattern, base)
|
||||||
|
if match:
|
||||||
|
result = match.group(1)
|
||||||
|
#logger.info(f"Returning '{result}' to exclude_list from file: {filename}")
|
||||||
|
return result
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
def scan_application_files(system, panel, prefix, scan_general=False):
|
def scan_application_files(system, panel, prefix, scan_general=False):
|
||||||
extracted_strings = {}
|
extracted_strings = {}
|
||||||
|
exclude_list = []
|
||||||
|
|
||||||
full_base_path = get_full_base_path(system)
|
full_base_path = get_full_base_path(system)
|
||||||
|
|
||||||
@ -105,7 +129,6 @@ def scan_application_files(system, panel, prefix, scan_general=False):
|
|||||||
scan_file_for_lexical_strings(controller_custom_path, prefix, extracted_strings, scan_general)
|
scan_file_for_lexical_strings(controller_custom_path, prefix, extracted_strings, scan_general)
|
||||||
|
|
||||||
# Template files
|
# Template files
|
||||||
themes = ["default", "AdminLTE"]
|
|
||||||
for theme in themes:
|
for theme in themes:
|
||||||
template_base_path = os.path.join(full_base_path, "themes", theme, "templates")
|
template_base_path = os.path.join(full_base_path, "themes", theme, "templates")
|
||||||
if panel in ['Backup','Yum','Bugreport']:
|
if panel in ['Backup','Yum','Bugreport']:
|
||||||
@ -114,7 +137,7 @@ def scan_application_files(system, panel, prefix, scan_general=False):
|
|||||||
# print(f"Matching template files: {panel.lower()!r} -> Matches: {[os.path.basename(m) for m in template_files]}")
|
# print(f"Matching template files: {panel.lower()!r} -> Matches: {[os.path.basename(m) for m in template_files]}")
|
||||||
for file_path in template_files:
|
for file_path in template_files:
|
||||||
panel_template_path = os.path.join(template_base_path, f"{file_path}")
|
panel_template_path = os.path.join(template_base_path, f"{file_path}")
|
||||||
logger.warning(f"Scanning panel template file: {panel_template_path}")
|
logger.info(f"Scanning panel template file: {panel_template_path}")
|
||||||
scan_file_for_lexical_strings(panel_template_path, prefix, extracted_strings, scan_general)
|
scan_file_for_lexical_strings(panel_template_path, prefix, extracted_strings, scan_general)
|
||||||
else:
|
else:
|
||||||
panel_template_path = os.path.join(template_base_path, f"{panel.lower()}.html.ep")
|
panel_template_path = os.path.join(template_base_path, f"{panel.lower()}.html.ep")
|
||||||
@ -130,20 +153,33 @@ def scan_application_files(system, panel, prefix, scan_general=False):
|
|||||||
partial_path = os.path.join(partials_dir, filename)
|
partial_path = os.path.join(partials_dir, filename)
|
||||||
logger.info(f"Scanning partial template file: {partial_path}")
|
logger.info(f"Scanning partial template file: {partial_path}")
|
||||||
scan_file_for_lexical_strings(partial_path, prefix, extracted_strings, scan_general)
|
scan_file_for_lexical_strings(partial_path, prefix, extracted_strings, scan_general)
|
||||||
|
# and add the <_prefix_<name>_> bit to the exclude list
|
||||||
|
result = extract_string_for_exclusion(filename, prefix)
|
||||||
|
if result:
|
||||||
|
if result not in exclude_list:
|
||||||
|
logger.info(f"Adding {result}")
|
||||||
|
exclude_list.append(result)
|
||||||
|
|
||||||
# Deduplicate lists of dicts in extracted_strings
|
# # Deduplicate lists of dicts in extracted_strings
|
||||||
for key, value in extracted_strings.items():
|
# for key, value in extracted_strings.items():
|
||||||
if isinstance(value, list) and value and isinstance(value[0], dict):
|
# if isinstance(value, list) and value and isinstance(value[0], dict):
|
||||||
# Deduplicate list of dicts using JSON serialization
|
# # Deduplicate list of dicts using JSON serialization
|
||||||
seen = set()
|
# seen = set()
|
||||||
deduped = []
|
# deduped = []
|
||||||
for d in value:
|
# for d in value:
|
||||||
ser = json.dumps(d, sort_keys=True)
|
# ser = json.dumps(d, sort_keys=True)
|
||||||
if ser not in seen:
|
# if ser not in seen:
|
||||||
seen.add(ser)
|
# seen.add(ser)
|
||||||
deduped.append(d)
|
# deduped.append(d)
|
||||||
extracted_strings[key] = deduped
|
# extracted_strings[key] = deduped
|
||||||
|
|
||||||
|
# And take out the excluded ones
|
||||||
|
# Assumes extracted_strings is a dict where values are lists of dicts or strings
|
||||||
|
if exclude_list:
|
||||||
|
logger.info(f"Found {len(exclude_list)} items in exclude list")
|
||||||
|
for key in list(extracted_strings.keys()):
|
||||||
|
if key in exclude_list:
|
||||||
|
del extracted_strings[key]
|
||||||
return extracted_strings
|
return extracted_strings
|
||||||
|
|
||||||
def scan_file_for_lexical_strings(filepath, prefix, extracted_strings_dict, scan_general):
|
def scan_file_for_lexical_strings(filepath, prefix, extracted_strings_dict, scan_general):
|
||||||
@ -184,7 +220,7 @@ def scan_file_for_lexical_strings(filepath, prefix, extracted_strings_dict, scan
|
|||||||
if filepath not in extracted_strings_dict[s]:
|
if filepath not in extracted_strings_dict[s]:
|
||||||
extracted_strings_dict[s].append(filepath)
|
extracted_strings_dict[s].append(filepath)
|
||||||
else:
|
else:
|
||||||
logger.error(f"Unexpected chars ({s}) found in {filepath}")
|
logger.error(f"❌Unexpected chars ({s}) found in {filepath}")
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
pattern = re.compile(
|
pattern = re.compile(
|
||||||
@ -204,37 +240,48 @@ def scan_file_for_lexical_strings(filepath, prefix, extracted_strings_dict, scan
|
|||||||
if filepath not in extracted_strings_dict[s]:
|
if filepath not in extracted_strings_dict[s]:
|
||||||
extracted_strings_dict[s].append(filepath)
|
extracted_strings_dict[s].append(filepath)
|
||||||
else:
|
else:
|
||||||
logger.error(f"Unexpected chars ({s}) found in {filepath}")
|
logger.error(f"❌Unexpected chars ({s}) found in {filepath}")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
def read_lex_file(filepath):
|
def read_lex_file(filepath):
|
||||||
logger.info(f"Reading file: {filepath}")
|
logger.info(f"Reading file: {filepath}")
|
||||||
lex_data = {}
|
lex_data = {}
|
||||||
with open(filepath, 'r', encoding='utf-8') as f:
|
if not os.path.exists(filepath):
|
||||||
content = f.read()
|
logger.warning(f"⚠️ File does not exist: {filepath}. Returning empty dictionary.")
|
||||||
# Improved regex: handles single/double quotes and escaped quotes in value
|
return lex_data
|
||||||
pattern = r"""
|
with open(filepath, 'r', encoding='utf-8') as f:
|
||||||
(['"])(.*?)\1 # key in quotes
|
content = f.read()
|
||||||
\s*=>\s*
|
# Improved regex: handles single/double quotes and escaped quotes in value
|
||||||
(['"])((?:\\.|(?!\3).)*)\3 # value in quotes, allowing escaped chars
|
pattern = r"""
|
||||||
"""
|
(['"])(.*?)\1 # key in quotes
|
||||||
matches = re.findall(pattern, content, re.DOTALL | re.VERBOSE)
|
\s*=>\s*
|
||||||
for _, key, quote, value in matches:
|
(['"])((?:\\.|(?!\3).)*)\3 # value in quotes, allowing escaped chars
|
||||||
# Unescape the quote character and backslashes in value
|
"""
|
||||||
value = value.replace(f"\\{quote}", quote).replace("\\\\", "\\")
|
matches = re.findall(pattern, content, re.DOTALL | re.VERBOSE)
|
||||||
lex_data[key] = value
|
for _, key, quote, value in matches:
|
||||||
return lex_data
|
# Unescape the quote character and backslashes in value
|
||||||
|
value = value.replace(f"\\{quote}", quote).replace("\\\\", "\\")
|
||||||
|
lex_data[key] = value
|
||||||
|
return lex_data
|
||||||
|
|
||||||
def write_lex_file(filepath, lex_data):
|
def write_lex_file(filepath, lex_data):
|
||||||
"""
|
"""
|
||||||
Writes a dictionary to a lex file, sorted alphabetically by key (case-insensitive).
|
Writes a dictionary to a lex file, sorted alphabetically by key (case-insensitive).
|
||||||
|
Adds a header with the current date and time.
|
||||||
"""
|
"""
|
||||||
# Sort the dictionary by key, case-insensitive
|
# Sort the dictionary by key, case-insensitive
|
||||||
sorted_items = sorted(lex_data.items(), key=lambda item: item[0].lower())
|
sorted_items = sorted(lex_data.items(), key=lambda item: item[0].lower())
|
||||||
|
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
||||||
|
# Extract panel name using regex
|
||||||
|
match = re.search(r'.*/output/([^/]+)/', filepath)
|
||||||
|
panel_name = match.group(1) if match else 'Unknown'
|
||||||
|
logger.info(f"{filepath} {panel_name}")
|
||||||
|
header = f"#\n# Lex file for {panel_name} generated on {now}\n#\n"
|
||||||
with open(filepath, 'w', encoding='utf-8') as f:
|
with open(filepath, 'w', encoding='utf-8') as f:
|
||||||
|
f.write(header)
|
||||||
for key, value in sorted_items:
|
for key, value in sorted_items:
|
||||||
value = value.replace("'",'"')
|
value = value.replace("'", '"')
|
||||||
f.write(f"'{key}' => '{value}',{os.linesep}")
|
f.write(f"'{key}' => '{value}',{os.linesep}")
|
||||||
|
|
||||||
|
|
||||||
def read_languages_json(filepath):
|
def read_languages_json(filepath):
|
||||||
@ -245,18 +292,61 @@ def read_languages_json(filepath):
|
|||||||
languages = json.load(f)
|
languages = json.load(f)
|
||||||
return languages
|
return languages
|
||||||
|
|
||||||
def update_file_with_new_lexical_string(filepath, old_string, new_string):
|
def convert_single_to_double_quotes(text):
|
||||||
try:
|
"""
|
||||||
with open(filepath, 'r') as f:
|
Replace all strings in single quotes with double quotes,
|
||||||
content = f.read()
|
while preserving single-quoted apostrophes (e.g., escaped or internal).
|
||||||
new_content = content.replace(old_string, new_string)
|
Example: 'It\\'s fine' → "It's fine"
|
||||||
with open(filepath, 'w') as f:
|
"""
|
||||||
f.write(new_content)
|
def replacer(match):
|
||||||
#map any single quotes to double
|
content = match.group(1)
|
||||||
logger.info(f"Updated \'{old_string}\' to \'{new_string}\' in file: {filepath}")
|
# Unescape escaped single quotes to real single quotes
|
||||||
except Exception as e:
|
content = content.replace("\\'", "'")
|
||||||
logger.error(f"Error updating file {filepath}: {e}")
|
# Escape any existing double quotes inside content
|
||||||
|
content = content.replace('"', r'\"')
|
||||||
|
return f'"{content}"'
|
||||||
|
|
||||||
|
# Regex explanation:
|
||||||
|
# (?<!\\) : negative lookbehind to ensure the quote is not escaped
|
||||||
|
# '((?:\\'|[^'])*?)' : capture content inside the quotes, allowing escaped quotes inside
|
||||||
|
pattern = r"'((?:\\'|[^'])*?)'"
|
||||||
|
return re.sub(pattern, replacer, text)
|
||||||
|
|
||||||
|
def update_file_with_new_lexical_string(filepath, old_string, new_string, filepath_new=None):
|
||||||
|
"""
|
||||||
|
Update occurrences of old_string with new_string in the file at filepath,
|
||||||
|
and write the result to filepath_new (defaults to filepath).
|
||||||
|
"""
|
||||||
|
if filepath_new is None:
|
||||||
|
filepath_new = filepath
|
||||||
|
try:
|
||||||
|
with open(filepath, 'r', encoding='utf-8') as f:
|
||||||
|
content = f.read()
|
||||||
|
if old_string not in content:
|
||||||
|
logger.warning(f"⚠️ No occurrences of '{old_string}' found in {filepath}")
|
||||||
|
return False
|
||||||
|
new_content = content.replace(old_string, new_string)
|
||||||
|
if old_string in new_content:
|
||||||
|
logger.warning(f"⚠️ Still occurrences of '{old_string}' found in {filepath}")
|
||||||
|
return False
|
||||||
|
# Optionally, map any single quotes to double quotes in the result
|
||||||
|
new_content = convert_single_to_double_quotes(new_content)
|
||||||
|
with open(filepath_new, 'w', encoding='utf-8') as f:
|
||||||
|
f.write(new_content)
|
||||||
|
|
||||||
|
# ✅ Read back and verify
|
||||||
|
with open(filepath_new, 'r', encoding='utf-8') as f:
|
||||||
|
saved_output = f.read()
|
||||||
|
if new_string in saved_output and old_string not in saved_output:
|
||||||
|
logger.info(f"✅ Successfully replaced and verified '{old_string}' → '{new_string}' in {filepath_new}")
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
logger.error(f"❌ Replacement failed to appear in written file {filepath_new}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
#logger.info(f"Updated '{old_string}' to '{new_string}' in file: {filepath_new}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"❌Error updating file {filepath} (writing to {filepath_new}): {e}")
|
||||||
|
|
||||||
def export_sorted_missing_lex(input_file1, input_file2, output_file):
|
def export_sorted_missing_lex(input_file1, input_file2, output_file):
|
||||||
"""
|
"""
|
||||||
@ -280,13 +370,71 @@ def export_sorted_missing_lex(input_file1, input_file2, output_file):
|
|||||||
#for k in sorted_missing_keys:
|
#for k in sorted_missing_keys:
|
||||||
# print(f"'{k}' => '{dict1[k]}',")
|
# print(f"'{k}' => '{dict1[k]}',")
|
||||||
|
|
||||||
|
def get_new_filepath(filepath, panel_lex_output_dir, themes):
|
||||||
|
"""
|
||||||
|
Constructs a new file path in panel_lex_output_dir:
|
||||||
|
- Adds `.new` before the extension.
|
||||||
|
- If any theme from `themes` appears in the filepath,
|
||||||
|
the file is placed inside a subdirectory named after the theme
|
||||||
|
under panel_lex_output_dir.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
filepath (str): Original file path.
|
||||||
|
panel_lex_output_dir (str): Output directory path.
|
||||||
|
themes (list of str): Theme names to scan for in filepath.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The constructed new file path.
|
||||||
|
"""
|
||||||
|
original = Path(filepath)
|
||||||
|
new_name = original.stem + '.new' + original.suffix
|
||||||
|
|
||||||
|
theme_subdir = None
|
||||||
|
for theme in themes:
|
||||||
|
if theme in filepath:
|
||||||
|
theme_subdir = theme
|
||||||
|
break
|
||||||
|
|
||||||
|
output_dir = Path(panel_lex_output_dir)
|
||||||
|
if theme_subdir:
|
||||||
|
output_dir = output_dir / theme_subdir
|
||||||
|
|
||||||
|
# Ensure directory exists
|
||||||
|
os.makedirs(output_dir, exist_ok=True)
|
||||||
|
|
||||||
|
filepath_new = output_dir / new_name
|
||||||
|
return str(filepath_new)
|
||||||
|
|
||||||
|
def delete_lex_output_files(panel_lex_output_dir):
|
||||||
|
"""
|
||||||
|
Recursively deletes all .html.new.ep and .new.pm files in the given directory.
|
||||||
|
Returns a list of deleted file paths.
|
||||||
|
"""
|
||||||
|
patterns = ['**/*.html.new.ep', '**/*.new.pm']
|
||||||
|
deleted_files = []
|
||||||
|
|
||||||
|
for pattern in patterns:
|
||||||
|
# Use glob with recursive=True
|
||||||
|
files = glob.glob(os.path.join(panel_lex_output_dir, pattern), recursive=True)
|
||||||
|
for file_path in files:
|
||||||
|
if os.path.isfile(file_path):
|
||||||
|
try:
|
||||||
|
os.remove(file_path)
|
||||||
|
deleted_files.append(file_path)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error deleting {file_path}: {e}")
|
||||||
|
|
||||||
|
return deleted_files
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser(description="Scan Mojolicious application files for lexical strings.")
|
parser = argparse.ArgumentParser(description="Scan and audit Mojolicious application files for lexical strings.")
|
||||||
parser.add_argument("-p", "--panel", required=True, help="Name of the Mojolicious panel (e.g., MyPanel).")
|
parser.add_argument("-p", "--panel", required=True, help="Name of the Mojolicious panel (e.g., MyPanel).")
|
||||||
parser.add_argument("-s", "--system", default="SME11", help="System name (default: SME11).")
|
parser.add_argument("-s", "--system", default="SME11", help="System name (default: SME11).")
|
||||||
parser.add_argument("-e", "--edit", action="store_true", help="Enable editing of original files (default: False).")
|
parser.add_argument("-a", "--audit", action="store_true", help="Enable audit of all strings (default: False).")
|
||||||
|
parser.add_argument("-e", "--edit", action="store_true", help="Enable audit of single words (default: False).")
|
||||||
parser.add_argument("-l", "--lang", action="store_true", help="Enable other language processing (default: False).")
|
parser.add_argument("-l", "--lang", action="store_true", help="Enable other language processing (default: False).")
|
||||||
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
@ -294,8 +442,10 @@ def main():
|
|||||||
system = args.system
|
system = args.system
|
||||||
edit_files = args.edit
|
edit_files = args.edit
|
||||||
do_lang = args.lang
|
do_lang = args.lang
|
||||||
|
do_audit = args.audit
|
||||||
|
|
||||||
|
logger.info(f"Lex scan for panel: {panel}, system: {system} audit: {do_audit} One word audit: {edit_files} Other lang: {do_lang}")
|
||||||
|
|
||||||
logger.warning(f"Lex scan for panel: {panel}, system: {system} edit: {edit_files} lang: {do_lang}\n")
|
|
||||||
|
|
||||||
validate_panel_name(panel)
|
validate_panel_name(panel)
|
||||||
controller_path = check_controller_file_exists(system, panel)
|
controller_path = check_controller_file_exists(system, panel)
|
||||||
@ -304,46 +454,137 @@ def main():
|
|||||||
if prefix:
|
if prefix:
|
||||||
logger.info(f"Scanning application files for strings with prefix \'{prefix}\'...")
|
logger.info(f"Scanning application files for strings with prefix \'{prefix}\'...")
|
||||||
extracted_panel_strings = scan_application_files(system, panel, prefix)
|
extracted_panel_strings = scan_application_files(system, panel, prefix)
|
||||||
logger.info(f"Deduplicated extracted panel strings: {len(extracted_panel_strings)} unique strings found.")
|
logger.info(f"Deduplicated extracted panel strings: {len(extracted_panel_strings)} unique strings found")
|
||||||
|
|
||||||
# Process panel-specific English lexical file
|
|
||||||
# Output to current working directory
|
|
||||||
panel_lex_output_dir = os.path.join(os.getcwd(), "output", panel.capitalize())
|
|
||||||
os.makedirs(panel_lex_output_dir, exist_ok=True)
|
|
||||||
|
|
||||||
full_base_path = get_full_base_path(system)
|
full_base_path = get_full_base_path(system)
|
||||||
|
panel_lex_output_dir = os.path.join(os.getcwd(), "output", panel.capitalize())
|
||||||
|
general_lex_output_dir = os.path.join(os.getcwd(), "output", "General")
|
||||||
|
|
||||||
# Corrected capitalization for panel in path
|
if do_audit:
|
||||||
en_lex_path = os.path.join(full_base_path, "lib/SrvMngr/I18N/Modules", panel, f"{panel.lower()}_en.lex.bak")
|
# Process panel-specific English lexical file
|
||||||
en_lex_new_path = os.path.join(panel_lex_output_dir, f"{panel.lower()}_en.lex.new")
|
# Output to current working directory
|
||||||
|
os.makedirs(panel_lex_output_dir, exist_ok=True)
|
||||||
|
|
||||||
en_lex_data = read_lex_file(en_lex_path)
|
# Corrected capitalization for panel in path
|
||||||
logger.info(f"Original English lex file lines: {len(en_lex_data)}")
|
en_lex_path = os.path.join(full_base_path, "lib/SrvMngr/I18N/Modules", panel, f"{panel.lower()}_en.lex.bak")
|
||||||
new_en_lex_data = {}
|
en_lex_new_path = os.path.join(panel_lex_output_dir, f"{panel.lower()}_en.lex.new")
|
||||||
|
|
||||||
for lex_string in extracted_panel_strings.keys():
|
en_lex_data = read_lex_file(en_lex_path)
|
||||||
if lex_string in en_lex_data:
|
logger.info(f"Original English lex file lines: {len(en_lex_data)}")
|
||||||
new_en_lex_data[lex_string] = en_lex_data[lex_string]
|
new_en_lex_data = {}
|
||||||
else:
|
|
||||||
#Replace rhs by the lhs less the prefix and no underlines, in lowercase (but capitalised)
|
for lex_string in extracted_panel_strings.keys():
|
||||||
# this may make a reasonable message, derived from the lex string id.
|
if lex_string in en_lex_data:
|
||||||
sometext = lex_string.replace(f"{prefix}_", "").replace("_", " ")
|
new_en_lex_data[lex_string] = en_lex_data[lex_string]
|
||||||
# Split into words
|
else:
|
||||||
words = sometext.split()
|
#Replace rhs by the lhs less the prefix and no underlines, in lowercase (but capitalised)
|
||||||
# Lowercase all words, capitalize the first
|
# this may make a reasonable message, derived from the lex string id.
|
||||||
if words:
|
sometext = lex_string.replace(f"{prefix}_", "").replace("_", " ")
|
||||||
words = [words[0].capitalize()] + [w.lower() for w in words[1:]]
|
# Split into words
|
||||||
sometext = ' '.join(words)
|
words = sometext.split()
|
||||||
new_en_lex_data[lex_string] = sometext
|
# Lowercase all words, capitalize the first
|
||||||
|
if words:
|
||||||
|
words = [words[0].capitalize()] + [w.lower() for w in words[1:]]
|
||||||
|
sometext = ' '.join(words)
|
||||||
|
new_en_lex_data[lex_string] = sometext
|
||||||
|
|
||||||
|
write_lex_file(en_lex_new_path, new_en_lex_data)
|
||||||
|
logger.info(f"Generated {en_lex_new_path}. Lines in new file: {len(new_en_lex_data)}, Lines in original file: {len(en_lex_data)}")
|
||||||
|
|
||||||
|
#Create file of the ones not in the new lex file
|
||||||
|
output_diff_file = os.path.join(panel_lex_output_dir, f"{panel.lower()}_en.lex.diff")
|
||||||
|
export_sorted_missing_lex(en_lex_path, en_lex_new_path, output_diff_file)
|
||||||
|
|
||||||
|
logger.info("Scanning application files for general lexical strings...")
|
||||||
|
extracted_general_strings = scan_application_files(system, panel, prefix, scan_general=True)
|
||||||
|
logger.info(f"Deduplicated extracted general strings: {len(extracted_general_strings)} unique strings found.")
|
||||||
|
|
||||||
|
os.makedirs(general_lex_output_dir, exist_ok=True)
|
||||||
|
|
||||||
|
general_en_lex_path_orig = os.path.join(full_base_path, "lib/SrvMngr/I18N/Modules", "General", "general_en.lex.bak")
|
||||||
|
general_en_lex_new_path = os.path.join(general_lex_output_dir, "general_en.lex.new")
|
||||||
|
|
||||||
|
general_en_lex_data_orig = read_lex_file(general_en_lex_path_orig)
|
||||||
|
logger.info(f"Original general English lex file lines: {len(general_en_lex_data_orig)}")
|
||||||
|
new_general_en_lex_data = read_lex_file(general_en_lex_new_path)
|
||||||
|
|
||||||
|
for lex_string in extracted_general_strings.keys():
|
||||||
|
if lex_string in general_en_lex_data_orig:
|
||||||
|
new_general_en_lex_data[lex_string] = general_en_lex_data_orig[lex_string]
|
||||||
|
else:
|
||||||
|
sometext = lex_string.replace("_", " ")
|
||||||
|
sometext = sometext.replace("'",'"')
|
||||||
|
# Split into words
|
||||||
|
words = sometext.split()
|
||||||
|
# Lowercase all words, capitalize the first
|
||||||
|
if words:
|
||||||
|
words = [words[0].capitalize()] + [w.lower() for w in words[1:]]
|
||||||
|
sometext = ' '.join(words)
|
||||||
|
new_general_en_lex_data[lex_string] = sometext
|
||||||
|
write_lex_file(general_en_lex_new_path, new_general_en_lex_data)
|
||||||
|
logger.info(f"Generated {general_en_lex_new_path}. Lines in new file: {len(new_general_en_lex_data)}, Lines in original file: {len(general_en_lex_data_orig)}")
|
||||||
|
|
||||||
write_lex_file(en_lex_new_path, new_en_lex_data)
|
logger.info("")
|
||||||
logger.info(f"Generated {en_lex_new_path}. Lines in new file: {len(new_en_lex_data)}, Lines in original file: {len(en_lex_data)}")
|
if edit_files:
|
||||||
|
|
||||||
#Create file of the ones not in the new lex file
|
|
||||||
output_diff_file = os.path.join(panel_lex_output_dir, f"{panel.lower()}_en.lex.diff")
|
|
||||||
export_sorted_missing_lex(en_lex_path, en_lex_new_path, output_diff_file)
|
|
||||||
|
|
||||||
|
logger.info("Handling single-word lexical strings...")
|
||||||
|
|
||||||
|
# Paths for original and new lex files
|
||||||
|
en_lex_path = os.path.join(full_base_path, "lib/SrvMngr/I18N/Modules", panel, f"{panel.lower()}_en.lex")
|
||||||
|
en_lex_new_path = os.path.join(panel_lex_output_dir, f"{panel.lower()}_en.lex.new1")
|
||||||
|
en_lex_data = read_lex_file(en_lex_path)
|
||||||
|
logger.info(f"Original English panel specific lex file lines: {len(en_lex_data)}")
|
||||||
|
|
||||||
|
general_en_lex_path_orig = os.path.join(general_lex_output_dir, "general_en.lex.new")
|
||||||
|
general_en_lex_data_orig = read_lex_file(general_en_lex_path_orig)
|
||||||
|
|
||||||
|
general_en_lex_new_path = os.path.join(general_lex_output_dir, "general_en.lex.new1")
|
||||||
|
new_general_en_lex_data = read_lex_file(general_en_lex_new_path) or general_en_lex_data_orig
|
||||||
|
|
||||||
|
logger.info(f"General English general lex file lines: {len(general_en_lex_data_orig)}")
|
||||||
|
|
||||||
|
# Delete temp .html.new.ep / .new.pm files
|
||||||
|
delete_lex_output_files(panel_lex_output_dir)
|
||||||
|
|
||||||
|
# Find one-word entries based on the string value, not key
|
||||||
|
for lex_string, filepaths in extracted_panel_strings.items():
|
||||||
|
# Check if the lex_string exists in panel lex data
|
||||||
|
if lex_string in en_lex_data:
|
||||||
|
actual_string = en_lex_data[lex_string]
|
||||||
|
# Look for clean, single-word values (e.g. "Save" or "DeleteMe")
|
||||||
|
if actual_string.isalnum():
|
||||||
|
just_one_word = actual_string
|
||||||
|
|
||||||
|
# Move it to the general lex file if it's not there
|
||||||
|
if just_one_word not in new_general_en_lex_data:
|
||||||
|
new_general_en_lex_data[just_one_word] = just_one_word
|
||||||
|
logger.info(f"Added '{just_one_word}' to general lex: {general_en_lex_new_path}")
|
||||||
|
|
||||||
|
# Update source files that refer to this lex string
|
||||||
|
for filepath in filepaths:
|
||||||
|
# Compute a themed output filepath
|
||||||
|
filepath_new = get_new_filepath(filepath, panel_lex_output_dir, themes)
|
||||||
|
|
||||||
|
# Use existing modified version if available
|
||||||
|
filepath_old = filepath_new if os.path.isfile(filepath_new) else filepath
|
||||||
|
|
||||||
|
logger.info(f"Changing {lex_string} to {just_one_word} in file {filepath_old} → {filepath_new}")
|
||||||
|
|
||||||
|
# Replace old lex_string with the actual string value
|
||||||
|
update_file_with_new_lexical_string(filepath_old, lex_string, just_one_word, filepath_new)
|
||||||
|
|
||||||
|
# Remove the entry from the panel lex file
|
||||||
|
en_lex_data.pop(lex_string)
|
||||||
|
|
||||||
|
# Write updated lex files
|
||||||
|
write_lex_file(general_en_lex_new_path, new_general_en_lex_data)
|
||||||
|
write_lex_file(en_lex_new_path, en_lex_data)
|
||||||
|
|
||||||
|
logger.info(f"New General English general lex file lines: {len(new_general_en_lex_data)} written to {general_en_lex_new_path}")
|
||||||
|
logger.info(f"New English panel-specific lex file lines: {len(en_lex_data)} written to {en_lex_new_path}")
|
||||||
|
|
||||||
|
logger.info("")
|
||||||
if do_lang:
|
if do_lang:
|
||||||
|
# Panel specific lex files
|
||||||
languages_json_path = os.path.join(".", "Templates", "languages.json") # Corrected path
|
languages_json_path = os.path.join(".", "Templates", "languages.json") # Corrected path
|
||||||
languages = read_languages_json(languages_json_path)
|
languages = read_languages_json(languages_json_path)
|
||||||
|
|
||||||
@ -370,39 +611,7 @@ def main():
|
|||||||
write_lex_file(lang_lex_new_path, new_lang_lex_data)
|
write_lex_file(lang_lex_new_path, new_lang_lex_data)
|
||||||
logger.info(f"Generated {lang_lex_new_path}. Lines in new file: {len(new_lang_lex_data)}, Lines in original file: {len(lang_lex_data)}")
|
logger.info(f"Generated {lang_lex_new_path}. Lines in new file: {len(new_lang_lex_data)}, Lines in original file: {len(lang_lex_data)}")
|
||||||
|
|
||||||
logger.info("")
|
# General lex for for each language
|
||||||
logger.info("Scanning application files for general lexical strings...")
|
|
||||||
extracted_general_strings = scan_application_files(system, panel, prefix, scan_general=True)
|
|
||||||
logger.info(f"Deduplicated extracted general strings: {len(extracted_general_strings)} unique strings found.")
|
|
||||||
|
|
||||||
general_lex_output_dir = os.path.join(os.getcwd(), "output", "General")
|
|
||||||
os.makedirs(general_lex_output_dir, exist_ok=True)
|
|
||||||
|
|
||||||
general_en_lex_path_orig = os.path.join(full_base_path, "lib/SrvMngr/I18N/Modules", "General", "general_en.lex.bak")
|
|
||||||
general_en_lex_new_path = os.path.join(general_lex_output_dir, "general_en.lex.new")
|
|
||||||
|
|
||||||
general_en_lex_data_orig = read_lex_file(general_en_lex_path_orig)
|
|
||||||
logger.info(f"Original general English lex file lines: {len(general_en_lex_data_orig)}")
|
|
||||||
new_general_en_lex_data = read_lex_file(general_en_lex_new_path)
|
|
||||||
|
|
||||||
for lex_string in extracted_general_strings.keys():
|
|
||||||
if lex_string in general_en_lex_data_orig:
|
|
||||||
new_general_en_lex_data[lex_string] = general_en_lex_data_orig[lex_string]
|
|
||||||
else:
|
|
||||||
sometext = lex_string.replace("_", " ")
|
|
||||||
sometext = sometext.replace("'",'"')
|
|
||||||
# Split into words
|
|
||||||
words = sometext.split()
|
|
||||||
# Lowercase all words, capitalize the first
|
|
||||||
if words:
|
|
||||||
words = [words[0].capitalize()] + [w.lower() for w in words[1:]]
|
|
||||||
sometext = ' '.join(words)
|
|
||||||
new_general_en_lex_data[lex_string] = sometext
|
|
||||||
write_lex_file(general_en_lex_new_path, new_general_en_lex_data)
|
|
||||||
logger.info(f"Generated {general_en_lex_new_path}. Lines in new file: {len(new_general_en_lex_data)}, Lines in original file: {len(general_en_lex_data_orig)}")
|
|
||||||
|
|
||||||
logger.info("")
|
|
||||||
if do_lang:
|
|
||||||
for lang_entry in languages:
|
for lang_entry in languages:
|
||||||
lang_code = lang_entry["code"]
|
lang_code = lang_entry["code"]
|
||||||
if lang_code == "en":
|
if lang_code == "en":
|
||||||
@ -425,36 +634,6 @@ def main():
|
|||||||
write_lex_file(general_lang_lex_new_path, new_general_lang_lex_data)
|
write_lex_file(general_lang_lex_new_path, new_general_lang_lex_data)
|
||||||
logger.info(f"Generated {general_lang_lex_new_path}. Lines in new file: {len(new_general_lang_lex_data)}, Lines in original file: {len(general_lang_lex_data)}")
|
logger.info(f"Generated {general_lang_lex_new_path}. Lines in new file: {len(new_general_lang_lex_data)}, Lines in original file: {len(general_lang_lex_data)}")
|
||||||
|
|
||||||
logger.info("")
|
|
||||||
if edit_files:
|
|
||||||
logger.info("Handling single-word lexical strings...")
|
|
||||||
for lex_string, filepaths in extracted_panel_strings.items():
|
|
||||||
if lex_string.startswith(f"{prefix}_"):
|
|
||||||
sometext_part = lex_string[len(prefix) + 1:]
|
|
||||||
if "_" not in sometext_part:
|
|
||||||
just_one_word = sometext_part
|
|
||||||
|
|
||||||
if just_one_word not in new_general_en_lex_data:
|
|
||||||
new_general_en_lex_data[just_one_word] = just_one_word
|
|
||||||
logger.info(f"Added \'{just_one_word}\' to {general_en_lex_new_path}")
|
|
||||||
write_lex_file(general_en_lex_new_path, new_general_en_lex_data)
|
|
||||||
|
|
||||||
for lang_entry in languages:
|
|
||||||
lang_code = lang_entry["code"]
|
|
||||||
if lang_code == "en":
|
|
||||||
continue
|
|
||||||
general_lang_lex_path = os.path.join(full_base_path, "lib/SrvMngr/I18N/Modules", "General", f"general_{lang_code}.lex")
|
|
||||||
general_lang_lex_new_path = os.path.join(general_lex_output_dir, f"general_{lang_code}.lex.new")
|
|
||||||
|
|
||||||
current_general_lang_lex_data = read_lex_file(general_lang_lex_new_path)
|
|
||||||
if just_one_word not in current_general_lang_lex_data:
|
|
||||||
current_general_lang_lex_data[just_one_word] = just_one_word
|
|
||||||
write_lex_file(general_lang_lex_new_path, current_general_lang_lex_data)
|
|
||||||
logger.info(f"Added \'{just_one_word}\' to {general_lang_lex_new_path}")
|
|
||||||
|
|
||||||
for filepath in filepaths:
|
|
||||||
update_file_with_new_lexical_string(filepath, lex_string, just_one_word)
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
logger.error("Could not determine prefix, exiting.")
|
logger.error("Could not determine prefix, exiting.")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
@ -462,7 +641,7 @@ def main():
|
|||||||
if missing_files:
|
if missing_files:
|
||||||
logger.warning("The following files were not found:")
|
logger.warning("The following files were not found:")
|
||||||
for f in missing_files:
|
for f in missing_files:
|
||||||
logger.warning(f"- {f}")
|
logger.warning(f"⚠️ - {f}")
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
Loading…
x
Reference in New Issue
Block a user