1
0

Compare commits

...

2 Commits

Author SHA1 Message Date
440bc63432 refactor: refactor debugging tools 2026-01-24 22:26:49 +08:00
f7acb3bfa9 chore: update doc build 2026-01-24 21:25:10 +08:00
12 changed files with 274 additions and 124 deletions

View File

@@ -1,4 +0,0 @@
# Ignore test used 3d Object
*.bin
*.obj
*.mtl

View File

@@ -1,92 +0,0 @@
import argparse, io, math, struct
# setup parser
parser = argparse.ArgumentParser(description='The Mesh Converter.')
parser.add_argument('-p', '--in-vpos', required=True, type=str, action='store', dest='in_vpos', metavar='vpos.bin')
parser.add_argument('-n', '--in-vbml', required=True, type=str, action='store', dest='in_vnml', metavar='vnml.bin')
parser.add_argument('-u', '--in-vuv', required=True, type=str, action='store', dest='in_vuv', metavar='vuv.bin')
parser.add_argument('-i', '--in-findices', required=True, type=str, action='store', dest='in_findices', metavar='findices.bin')
parser.add_argument('-o', '--out-obj', required=True, type=str, action='store', dest='out_obj', metavar='mesh.obj')
parser.add_argument('-m', '--out-mtl', required=True, type=str, action='store', dest='out_mtl', metavar='mesh.mtl')
Vector = tuple[float]
Indices = tuple[int]
def GetFileLength(fs: io.BufferedReader) -> int:
pos = fs.tell()
fs.seek(0, io.SEEK_END)
fsize = fs.tell()
fs.seek(pos, io.SEEK_SET)
return fsize
def EvaluateCount(filename: str, unit_size: int) -> int:
with open(filename, 'rb') as fs:
filesize = GetFileLength(fs)
count, modrem = divmod(filesize, unit_size)
if modrem != 0:
raise Exception("invalid file length")
return count
def AssertFileSize(fs: io.BufferedReader, expected_size: int):
if expected_size != GetFileLength(fs):
raise Exception("invalid file length")
def ReadFloats(filename: str, count: int) -> tuple[float]:
with open(filename, 'rb') as fs:
# construct class
cstruct = struct.Struct(f'<{count}f')
# assert file size
AssertFileSize(fs, cstruct.size)
# read
return cstruct.unpack(fs.read(cstruct.size))
def ReadShorts(filename: str, count: int) -> tuple[float]:
with open(filename, 'rb') as fs:
# construct class
cstruct = struct.Struct(f'<{count}H')
# assert file size
AssertFileSize(fs, cstruct.size)
# read
return cstruct.unpack(fs.read(cstruct.size))
def RecoupleTuple(fulllist: tuple, couple_count: int) -> tuple[tuple]:
count, modrem = divmod(len(fulllist), couple_count)
if modrem != 0:
raise Exception("invalid tuple length")
return tuple(map(lambda x: tuple(fulllist[x * couple_count:x * couple_count + couple_count]), range(count)))
def GenerateObj(filename: str, vpos: tuple[Vector], vnml: tuple[Vector], vuv: tuple[Vector], findices: tuple[Indices]):
with open(filename, 'w', encoding='utf-8') as fs:
for v in vpos:
fs.write('v {0} {1} {2}\n'.format(v[0], v[1], v[2]))
for v in vnml:
fs.write('vn {0} {1} {2}\n'.format(v[0], v[1], v[2]))
for v in vuv:
fs.write('vt {0} {1}\n'.format(v[0], v[1]))
for f in findices:
fs.write('f {0}/{0}/{0} {1}/{1}/{1} {2}/{2}/{2}\n'.format(f[0] + 1, f[1] + 1, f[2] + 1))
fs.write('g obj\n')
if __name__ == '__main__':
# parse arg
args = parser.parse_args()
#input("Prepare VertexPositions please.")
vertexcount = EvaluateCount(args.in_vpos, 3 * 4) # 3 float(4 bytes)
print(f'Vertex Count Evaluated: {vertexcount}')
vpos = RecoupleTuple(ReadFloats(args.in_vpos, 3 * vertexcount), 3)
#input("Prepare VertexNormals please.")
vnml = RecoupleTuple(ReadFloats(args.in_vnml, 3 * vertexcount), 3)
#input("Prepare VertexUVs please.")
vuv = RecoupleTuple(ReadFloats(args.in_vuv, 2 * vertexcount), 2)
#input("Prepare FaceIndices please.")
facecount = EvaluateCount(args.in_findices, 3 * 2) # 3 WORD(2 bytes)
print(f'Face Count Evaluated: {facecount}')
findices = RecoupleTuple(ReadShorts(args.in_findices, 3 * facecount), 3)
GenerateObj(args.out_obj, vpos, vnml, vuv, findices)
print('Done')

View File

@@ -1,20 +0,0 @@
# Tools
The developer need to know the loaded data whether is correct when testing LibCmo. So we create this folder and you can use Unvirt and the tools located in this folder to test the correction of loaded data.
Unvirt can show the data of each CKObject, such as Texture, Mesh and etc. For example, Unvirt can provide vertex's position, normal, UV, even the face's indices data for CKMesh. You can use tools to broswer memory to get them, but you couldn't evaluate them how they shape a mesh. This is the reason why this folder existed and in this README I will tell you how to debug the loaded data.
I suggest you to use HxD to broswer memory, but if you have other softwares, use it freely.
## CKTexture
* Install [PixelViewer](https://github.com/carina-studio/PixelViewer) first.
* Change profile to `BGRA_8888` (actually is little-endian RGBA8888, but I think the developer of PixelViewer get confused).
* The image resolution can be gotten from Uvirt. Set it in PixelViewer.
* The image address also can be gotten from Unvirt. Save the memory image data to local file and open it by PixelViewer.
## CKMesh
* Have a executable Python.
* Save VertexPosition, VertexNormal, VertexUV, FaceIndices data into file according to the given memory address by Unvirt.
* Call `MeshConv.py`, set the argument properly, then you will get a converted Wavefront OBJ file.

18
Assets/Tools/MeshConv/.gitignore vendored Normal file
View File

@@ -0,0 +1,18 @@
## ======== Personal ========
# Ignore test used 3d Object
*.bin
*.obj
*.mtl
## ======== Python ========
# Python-generated files
__pycache__/
*.py[oc]
build/
dist/
wheels/
*.egg-info
# Virtual environments
.venv

View File

@@ -0,0 +1 @@
3.11

View File

@@ -0,0 +1,13 @@
# MeshConv
Build complete Wavefront OBJ file from separated data for debugging libcmo21.
## Usage
- Restore this project by Astral UV.
- Save all mesh components into separate files in this directory.
* Vertex position as `VertexPosition.bin` for example.
* Vertex normal as `VertexNormal.bin` for example.
* Vertex UV as `VertexUV.bin` for example.
* Face indices as `FaceIndices.bin` for example.
- Execute `uv run main.py -p VertexPosition.bin -n VertexNormal.bin -u VertexUV.bin -i FaceIndices.bin -o mesh.obj -m mesh.mtl` for example. It will utilize previous saved file to generate a Wavefront OBJ file `mesh.obj` and corresponding material file `mesh.mtl`. For the usage of these switches, please refer to the source code.

View File

@@ -0,0 +1,178 @@
import argparse
import io
import struct
import typing
import itertools
from pathlib import Path
from dataclasses import dataclass
# region: Kernel
T = typing.TypeVar('T')
Vector = tuple[float, ...]
Indices = tuple[int, ...]
def get_file_length(fs: typing.BinaryIO) -> int:
"""
Get the full length of given file in bytes.
:param fs: File stream for measuring.
:return: File length in bytes.
"""
pos = fs.tell()
fs.seek(0, io.SEEK_END)
fsize = fs.tell()
fs.seek(pos, io.SEEK_SET)
return fsize
def evaluate_count(filename: Path, unit_size: int) -> int:
"""
Evaluate the count of items in given file.
:param filename: File name to evaluate.
:param unit_size: Size of each item in bytes.
:return: Count of items in given file.
"""
with open(filename, 'rb') as fs:
file_size = get_file_length(fs)
count, modrem = divmod(file_size, unit_size)
if modrem != 0:
raise Exception("invalid file length")
return count
def assert_file_size(fs: typing.BinaryIO, expected_size: int):
"""
Check whether given file has expected size.
:param fs: File stream to check.
:param expected_size: Expected file size.
"""
if expected_size != get_file_length(fs):
raise Exception("invalid file length")
def read_f32s(filename: Path, count: int) -> tuple[float, ...]:
with open(filename, 'rb') as fs:
# construct class
cstruct = struct.Struct(f'<{count}f')
# assert file size
assert_file_size(fs, cstruct.size)
# read
return cstruct.unpack(fs.read(cstruct.size))
def read_u16s(filename: Path, count: int) -> tuple[int, ...]:
with open(filename, 'rb') as fs:
# construct class
cstruct = struct.Struct(f'<{count}H')
# assert file size
assert_file_size(fs, cstruct.size)
# read
return cstruct.unpack(fs.read(cstruct.size))
def batched_tuple(full_list: tuple[T, ...], couple_count: int) -> tuple[tuple[T, ...], ...]:
"""
Batch a tuple into a tuple of tuples.
This function will check whether given tuple can be batched without any remnants.
If it is, throw exception, otherwise return the batched tuple.
For example, given `('roses', 'red', 'violets', 'blue', 'sugar', 'sweet')`,
it will produce `(('roses', 'red'), ('violets', 'blue'), ('sugar', 'sweet'))`.
:param full_list: The tuple to batch.
:param couple_count: The count of items in each batch.
:return: The batched tuple.
"""
# TODO: Replace the whole body with itertools.batched once we upgrade into Python 3.12
# return itertools.batched(full_list, couple_count, strict=True)
count, modrem = divmod(len(full_list), couple_count)
if modrem != 0:
raise Exception("invalid tuple length")
return tuple(map(lambda x: tuple(full_list[x * couple_count:x * couple_count + couple_count]), range(count)))
def build_obj_file(filename: Path, vpos: tuple[Vector, ...], vnml: tuple[Vector, ...], vuv: tuple[Vector, ...], findices: tuple[Indices, ...]):
with open(filename, 'w', encoding='utf-8') as fs:
for v in vpos:
fs.write(f'v {v[0]} {v[1]} {v[2]}\n')
for v in vnml:
fs.write(f'vn {v[0]} {v[1]} {v[2]}\n')
for v in vuv:
fs.write(f'vt {v[0]} {v[1]}\n')
for f in findices:
fs.write(f'f {f[0] + 1}/{f[0] + 1}/{f[0] + 1} {f[1] + 1}/{f[1] + 1}/{f[1] + 1} {f[2] + 1}/{f[2] + 1}/{f[2] + 1}\n')
fs.write('g obj\n')
# endregion
# region Command Line Processor
@dataclass
class Cli:
"""Command Line Arguments"""
in_vpos: Path
"""The path to file storing vertex positions"""
in_vnml: Path
"""The path to file storing vertex normals"""
in_vuv: Path
"""The path to file storing vertex UVs"""
in_findices: Path
"""The path to file storing face indices"""
out_obj: Path
"""The path to output OBJ file"""
out_mtl: Path
"""The path to output MTL file"""
def parse() -> Cli:
# construct parser
parser = argparse.ArgumentParser(description='The mesh data combinator for libcmo21 debugging.')
parser.add_argument('-p', '--in-vpos', required=True, type=str, action='store', dest='in_vpos', metavar='vpos.bin',
help='''The path to file storing vertex positions''')
parser.add_argument('-n', '--in-vnml', required=True, type=str, action='store', dest='in_vnml', metavar='vnml.bin',
help='''The path to file storing vertex normals''')
parser.add_argument('-u', '--in-vuv', required=True, type=str, action='store', dest='in_vuv', metavar='vuv.bin',
help='''The path to file storing vertex UVs''')
parser.add_argument('-i', '--in-findices', required=True, type=str, action='store', dest='in_findices', metavar='findices.bin',
help='''The path to file storing face indices''')
parser.add_argument('-o', '--out-obj', required=True, type=str, action='store', dest='out_obj', metavar='mesh.obj',
help='''The path to output OBJ file''')
parser.add_argument('-m', '--out-mtl', required=True, type=str, action='store', dest='out_mtl', metavar='mesh.mtl',
help='''The path to output MTL file''')
# parse arg
args = parser.parse_args()
# return value
return Cli(
Path(args.in_vpos),
Path(args.in_vnml),
Path(args.in_vuv),
Path(args.in_findices),
Path(args.out_obj),
Path(args.out_mtl)
)
# endregion
def main():
# parse arguments
opts = parse()
vertex_count = evaluate_count(opts.in_vpos, 3 * 4) # 3 float(4 bytes)
print(f'Vertex Count Evaluated: {vertex_count}')
vpos = batched_tuple(read_f32s(opts.in_vpos, 3 * vertex_count), 3)
vnml = batched_tuple(read_f32s(opts.in_vnml, 3 * vertex_count), 3)
vuv = batched_tuple(read_f32s(opts.in_vuv, 2 * vertex_count), 2)
face_count = evaluate_count(opts.in_findices, 3 * 2) # 3 WORD(2 bytes)
print(f'Face Count Evaluated: {face_count}')
findices = batched_tuple(read_u16s(opts.in_findices, 3 * face_count), 3)
build_obj_file(opts.out_obj, vpos, vnml, vuv, findices)
print('Done')
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,7 @@
[project]
name = "mesh-conv"
version = "0.1.0"
description = "Add your description here"
readme = "README.md"
requires-python = ">=3.11"
dependencies = []

8
Assets/Tools/MeshConv/uv.lock generated Normal file
View File

@@ -0,0 +1,8 @@
version = 1
revision = 2
requires-python = ">=3.11"
[[package]]
name = "mesh-conv"
version = "0.1.0"
source = { virtual = "." }

24
Assets/Tools/README.md Normal file
View File

@@ -0,0 +1,24 @@
# Tools
The developer need to know the loaded data whether is correct when testing LibCmo.
So we create this folder and you can use Unvirt and the tools located in this folder to test the correction of loaded data.
Unvirt can show the data of each CKObject, such as Texture, Mesh and etc.
For example, Unvirt can provide vertex's position, normal, UV, even the face's indices data for CKMesh.
You can use tools to broswer memory to get them, but you couldn't evaluate them how they shape a mesh.
This is the reason why this folder existed and in this README I will tell you how to debug the loaded data.
## Memory Inspector
I suggest you to use HxD to broswer memory, but if you have other softwares which you have been familiar with, use it freely.
## CKTexture Debugging
* Install [PixelViewer](https://github.com/carina-studio/PixelViewer) first.
* Change profile to `BGRA_8888` (actually is little-endian RGBA8888, but I think the developer of PixelViewer get confused).
* The image resolution can be gotten from Uvirt. Set it in PixelViewer.
* The image address also can be gotten from Unvirt. Save the image data from memory to local file and open it by PixelViewer.
## CKMesh Debugging
See [MeshConv README](./MeshConv/README.md)

View File

@@ -1,19 +1,36 @@
# Configure Doxygen config file
# Extract all public macros defined in LibCmo
# However, you should note that these extratcted macros have generator expressions.
get_target_property(LIBCMO_COMPILE_DEFINITIONS LibCmo COMPILE_DEFINITIONS)
if (LIBCMO_COMPILE_DEFINITIONS STREQUAL "LIBCMO_COMPILE_DEFINITIONS-NOTFOUND")
message(FATAL_ERROR "Cannot extract compile definitions from LibCmo.")
endif ()
# Convert list to string for expanding in future.
list(JOIN LIBCMO_COMPILE_DEFINITIONS " " LIBCMO_MACRO_GENERATOR_EXPRESSIONS)
# We simply configure Doxygen config file first.
configure_file(
${CMAKE_CURRENT_LIST_DIR}/Doxyfile.in
${CMAKE_CURRENT_BINARY_DIR}/Doxyfile
@ONLY
)
# Add custom target
add_custom_target (NeMoDocuments
doxygen ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile
# Then we use "file GENERATE" syntax to generate per-config truely Doxyfile used by Doxygen.
# Because there is no "$<>" syntax in Doxyfile, so we can safely use it.
# Please note that the generation of "file GENERATE" syntax will be postponed until the build stage.
file(GENERATE
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/$<CONFIG>/Doxyfile"
INPUT "${CMAKE_CURRENT_BINARY_DIR}/Doxyfile"
TARGET LibCmo
)
# Add custom target using per-config Doxyfile
add_custom_target (LibCmoDocuments
Doxygen::doxygen "${CMAKE_CURRENT_BINARY_DIR}/$<CONFIG>/Doxyfile"
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Generating LibCmo documentation" VERBATIM
COMMENT "Generating documentation" VERBATIM
)
# Install built documentation
install (DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/html
CONFIGURATIONS Release RelWithDebInfo MinSizeRel
DESTINATION ${NEMO_INSTALL_DOC_PATH}
)

View File

@@ -2310,7 +2310,7 @@ PERLMOD_MAKEVAR_PREFIX =
# C-preprocessor directives found in the sources and include files.
# The default value is: YES.
ENABLE_PREPROCESSING = NO
ENABLE_PREPROCESSING = YES
# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
# in the source code. If set to NO, only conditional compilation will be
@@ -2360,7 +2360,7 @@ INCLUDE_FILE_PATTERNS =
# recursively expanded use the := operator instead of the = operator.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
PREDEFINED = YYCC_DOXYGEN
PREDEFINED = @LIBCMO_MACRO_GENERATOR_EXPRESSIONS@
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
# tag can be used to specify a list of macro names that should be expanded. The