Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Major XCI/XCZ rewrite to support multiple XCI partitions #160

Merged
merged 26 commits into from
Dec 17, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
5394184
Keep all XCI partitions if --keep is specified
nicoboss Dec 3, 2023
a88575b
Implemented multiple fixes for the new --keep feature to keep all XCI…
nicoboss Dec 8, 2023
5772bd3
Fixed XCI/XCZ verification and tried figguring out why XCZ decompress…
nicoboss Dec 9, 2023
172388b
Fixed XCI/XCZ decompression crash by removing accidentally added endl…
nicoboss Dec 9, 2023
e5a0654
Removed debugging code after fixing the XCI/XCZ decompression crash
nicoboss Dec 9, 2023
4e61a75
XCZ to XCI decompression now results in identical files as in the pre…
nicoboss Dec 9, 2023
35b62c4
XCI to XCZ compression now results in identical files as in the previ…
nicoboss Dec 9, 2023
b445f66
Achieved hactoolnet compatibility for XCZ to XCI decompression
nicoboss Dec 9, 2023
dccf9c0
Fixed a major issue with the new HFS0 creation caused by using the sa…
nicoboss Dec 10, 2023
f763bc4
Fixed a major issue with the new PFS0 creation caused by using the sa…
nicoboss Dec 10, 2023
906b5c8
Improved adding file to container console output
nicoboss Dec 10, 2023
7f30a2f
Improved --info console output by using f-strings and hexadecimal values
nicoboss Dec 10, 2023
1f4f56c
HFS0 partitions should never be smaller than 0x200 bytes
nicoboss Dec 10, 2023
9404d45
Fixed XCI compression issue resulting in multiple HFS0 partition at t…
nicoboss Dec 16, 2023
dbe3551
Fixed HFS0 offset issue by using f.tell() instead of addpos
nicoboss Dec 16, 2023
17ce2eb
Fixed PFS0 and HFS0 addpos logic so it works for booth compression an…
nicoboss Dec 16, 2023
87e0a86
Fixed PFS0 and HFS0 addpos logic so it works for block compression
nicoboss Dec 16, 2023
c7e2ae5
Fixed major root hfs0 issue which cause hfs0 files inside the root hf…
nicoboss Dec 17, 2023
28096b5
Improved console output to show more but not too much information to …
nicoboss Dec 17, 2023
fb70f41
Add empty partitions instead of skipping them as the Yuzu Emulator ca…
nicoboss Dec 17, 2023
2d06d70
Apply the root HFS0 fixes to block compression
nicoboss Dec 17, 2023
378aacb
Fixed NCA hash verification not being able to deal with partitions ot…
nicoboss Dec 17, 2023
3f06ba5
Re-enabled solid compression as everything with multi-partition HFS0 …
nicoboss Dec 17, 2023
bf4ba02
Fixed major block decompression issue caused by calculating too many …
nicoboss Dec 17, 2023
7ed0981
Improved the console output by removing debug output and improving th…
nicoboss Dec 17, 2023
e98c405
Separated ThreadSafeCounter into ThreadSafeCounterManager for Android…
nicoboss Dec 17, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 21 additions & 8 deletions nsz/BlockCompressor.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,18 @@
from pathlib import Path
from traceback import format_exc
from zstandard import ZstdCompressionParameters, ZstdCompressor
from nsz.ThreadSafeCounter import Counter
from nsz.SectionFs import isNcaPacked, sortedFs
from multiprocessing import Process, Manager
from nsz.Fs import Pfs0, Hfs0, Nca, Type, Ticket, Xci, factory
from nsz.PathTools import *
import enlighten
#import sys
import sys

if hasattr(sys, 'getandroidapilevel'):
from nsz.ThreadSafeCounterManager import Counter
else:
from nsz.ThreadSafeCounterSharedMemory import Counter


def compressBlockTask(in_queue, out_list, readyForWork, pleaseKillYourself, blockSize):
while True:
Expand Down Expand Up @@ -220,22 +225,30 @@ def blockCompressNsp(filePath, compressionLevel, keep, fixPadding, useLongDistan

container.close()
return nszPath


def allign0x200(n):
return 0x200-n%0x200

def blockCompressXci(filePath, compressionLevel, keep, fixPadding, useLongDistanceMode, blockSizeExponent, outputDir, threads):
filePath = filePath.resolve()
container = factory(filePath)
container.open(str(filePath), 'rb')
secureIn = container.hfs0['secure']
xczPath = outputDir.joinpath(filePath.stem + '.xcz')

Print.info(f'Block compressing (level {compressionLevel}{" ldm" if useLongDistanceMode else ""}) {filePath} -> {xczPath}')

try:
with Xci.XciStream(str(xczPath), originalXciPath = filePath) as xci: # need filepath to copy XCI container settings
with Hfs0.Hfs0Stream(xci.hfs0.add('secure', 0), xci.f.tell()) as secureOut:
blockCompressContainer(secureIn, secureOut, compressionLevel, keep, useLongDistanceMode, blockSizeExponent, threads)

xci.hfs0.resize('secure', secureOut.actualSize)
for partitionIn in container.hfs0:
xci.hfs0.written = False
hfsPartitionOut = xci.hfs0.add(partitionIn._path, 0)
with Hfs0.Hfs0Stream(hfsPartitionOut, xci.f) as partitionOut:
if keep == True or partitionIn._path == 'secure':
blockCompressContainer(partitionIn, partitionOut, compressionLevel, keep, useLongDistanceMode, blockSizeExponent, threads)
alignedSize = partitionOut.actualSize + allign0x200(partitionOut.actualSize)
xci.hfs0.resize(partitionIn._path, alignedSize)
print(f'[RESIZE] {partitionIn._path} to {hex(alignedSize)}')
xci.hfs0.addpos += alignedSize
except BaseException as ex:
if not ex is KeyboardInterrupt:
Print.error(format_exc())
Expand Down
4 changes: 2 additions & 2 deletions nsz/BlockDecompressorReader.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ def __init__(self, nspf, BlockHeader):
self.BlockSize = 2**BlockHeader.blockSizeExponent
self.CompressedBlockOffsetList = [initialOffset]

for compressedBlockSize in BlockHeader.compressedBlockSizeList:
for compressedBlockSize in BlockHeader.compressedBlockSizeList[:-1]:
self.CompressedBlockOffsetList.append(self.CompressedBlockOffsetList[-1] + compressedBlockSize)

self.CompressedBlockSizeList = BlockHeader.compressedBlockSizeList
Expand All @@ -28,7 +28,7 @@ def __decompressBlock(self, blockID):
if blockID >= len(self.CompressedBlockOffsetList) - 1:
if blockID >= len(self.CompressedBlockOffsetList):
raise EOFError("BlockID exceeds the amounts of compressed blocks in that file!")
decompressedBlockSize = self.BlockHeader.decompressedSize % BlockSize
decompressedBlockSize = self.BlockHeader.decompressedSize % self.BlockSize
self.nspf.seek(self.CompressedBlockOffsetList[blockID])
if self.CompressedBlockSizeList[blockID] < decompressedBlockSize:
self.CurrentBlock = ZstdDecompressor().decompress(self.nspf.read(decompressedBlockSize))
Expand Down
24 changes: 8 additions & 16 deletions nsz/FileExistingChecks.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,23 +7,15 @@
from nsz.PathTools import *
import os

def ExtractHashes(gamePath):
def ExtractHashes(container):
fileHashes = set()
gamePath = gamePath.resolve()
container = factory(gamePath)
container.open(str(gamePath), 'rb')
if isXciXcz(gamePath):
container = container.hfs0['secure']
try:
for nspf in container:
if isinstance(nspf, Nca.Nca) and nspf.header.contentType == Type.Content.META:
for section in nspf:
if isinstance(section, Pfs0.Pfs0):
Cnmt = section.getCnmt()
for entry in Cnmt.contentEntries:
fileHashes.add(entry.hash.hex())
finally:
container.close()
for nspf in container:
if isinstance(nspf, Nca.Nca) and nspf.header.contentType == Type.Content.META:
for section in nspf:
if isinstance(section, Pfs0.Pfs0):
Cnmt = section.getCnmt()
for entry in Cnmt.contentEntries:
fileHashes.add(entry.hash.hex())
return fileHashes

def ExtractTitleIDAndVersion(gamePath, args = None):
Expand Down
16 changes: 8 additions & 8 deletions nsz/Fs/BaseFs.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,17 +148,17 @@ def getCnmt(self):

def printInfo(self, maxDepth = 3, indent = 0):
tabs = '\t' * indent
Print.info(tabs + 'magic = ' + str(self.magic))
Print.info(tabs + 'fsType = ' + str(self.fsType))
Print.info(tabs + 'cryptoType = ' + str(self.cryptoType))
Print.info(tabs + 'size = ' + str(self.size))
Print.info(tabs + 'headerSize = %s' % (str(self._headerSize)))
Print.info(tabs + 'offset = %s - (%s)' % (str(self.offset), str(self.sectionStart)))
Print.info(f'{tabs}magic = {self.magic}')
Print.info(f'{tabs}fsType = {self.fsType}')
Print.info(f'{tabs}cryptoType = {self.cryptoType}')
Print.info(f'{tabs}size = {hex(self.size)}')
Print.info(f'{tabs}headerSize = {self._headerSize}')
Print.info(f'{tabs}offset = {hex(self.offset)} - ({hex(self.sectionStart)})')
if self.cryptoCounter:
Print.info(tabs + 'cryptoCounter = ' + str(hx(self.cryptoCounter)))
Print.info(f'{tabs}cryptoCounter = {hx(self.cryptoCounter)}')

if self.cryptoKey:
Print.info(tabs + 'cryptoKey = ' + str(hx(self.cryptoKey)))
Print.info(f'{tabs}cryptoKey = {hx(self.cryptoKey)}')

Print.info('\n%s\t%s\n' % (tabs, '*' * 64))
Print.info('\n%s\tFiles:\n' % (tabs))
Expand Down
8 changes: 4 additions & 4 deletions nsz/Fs/File.py
Original file line number Diff line number Diff line change
Expand Up @@ -277,9 +277,9 @@ def setBktrCounter(self, ctr_val, ofs):
def printInfo(self, maxDepth = 3, indent = 0):
tabs = '\t' * indent
if self._path:
Print.info('%sFile Path: %s' % (tabs, self._path))
Print.info('%sFile Size: %s' % (tabs, self.size))
Print.info('%sFile Offset: %s' % (tabs, self.offset))
Print.info(f'{tabs}File Path: {self._path}')
Print.info(f'{tabs}File Size: {hex(self.size)}')
Print.info(f'{tabs}File Offset: {hex(self.offset)}')

def sha256(self):
hash = hashlib.sha256()
Expand Down Expand Up @@ -322,7 +322,7 @@ def read(self, size = None, direct = False):
if pageReadSize > self.size - self._bufferOffset:
pageReadSize = self.size - self._bufferOffset

#Print.info('disk read %s\t\t: relativePos = %x, bufferOffset = %x, align = %x, size = %x, pageReadSize = %x, bufferSize = %x' % (self.__class__.__name__, self._relativePos, self._bufferOffset, self._bufferAlign, size, pageReadSize, self._bufferSize))
#Print.info('disk read %s\t\t: absolutePos %x, relativePos = %x, bufferOffset = %x, align = %x, size = %x, pageReadSize = %x, bufferSize = %x' % (self.__class__.__name__, self.tellAbsolute(), self._relativePos, self._bufferOffset, self._bufferAlign, size, pageReadSize, self._bufferSize))
super(BufferedFile, self).seek(self._bufferOffset)
self._buffer = super(BufferedFile, self).read(pageReadSize)
self.pageRefreshed()
Expand Down
29 changes: 20 additions & 9 deletions nsz/Fs/Hfs0.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,10 @@ def __init__(self, f, mode = 'wb'):
super(Hfs0Stream, self).__init__(f, mode)
self.headerSize = 0x8000
self.files = []

self.actualSize = 0

self.seek(self.headerSize)
self.addpos = self.headerSize
self.written = False

def __enter__(self):
return self
Expand All @@ -34,18 +34,25 @@ def __exit__(self, type, value, traceback):

def write(self, value, size = None):
super(Hfs0Stream, self).write(value, len(value))
if self.tell() > self.actualSize:
self.actualSize = self.tell()
self.written = True
pos = self.tell()
if pos > self.actualSize:
self.actualSize = pos

def add(self, name, size, pleaseNoPrint = None):
Print.info('[ADDING] {0} {1} bytes to NSP'.format(name, size), pleaseNoPrint)
self.files.append({'name': name, 'size': size, 'offset': self.f.tell()})
return self.partition(self.f.tell(), size, n = BaseFile())
if self.written:
self.addpos = self.tell()
self.written = False
Print.info(f'[ADDING] {name} {hex(size)} bytes to HFS0 at {hex(self.addpos)}', pleaseNoPrint)
partition = self.partition(self.addpos, size, n = BaseFile())
self.files.append({'name': name, 'size': size, 'offset': self.addpos, 'partition': partition})
self.addpos += size
return partition

def get(self, name):
for i in self.files:
if i['name'] == name:
return i
return i['partition']
return None

def resize(self, name, size):
Expand All @@ -64,6 +71,9 @@ def close(self):
self.write(self.getHeader())
super(Hfs0Stream, self).close()

def updateHashHeader(self):
pass

def getHeader(self):
stringTable = '\x00'.join(file['name'] for file in self.files)+'\x00'

Expand All @@ -78,7 +88,7 @@ def getHeader(self):
stringOffset = 0

for f in self.files:
sizeOfHashedRegion = 0x200 if 0x200 < f['size'] else f['size']
sizeOfHashedRegion = 0 #0x200 if 0x200 < f['size'] else f['size']

h += (f['offset'] - headerSize).to_bytes(8, byteorder='little')
h += f['size'].to_bytes(8, byteorder='little')
Expand Down Expand Up @@ -126,6 +136,7 @@ def open(self, path = None, mode = 'rb', cryptoType = -1, cryptoKey = -1, crypto
nameOffset = self.readInt32() # just the offset
name = stringTable[nameOffset:stringEndOffset].decode('utf-8').rstrip(' \t\r\n\0')
stringEndOffset = nameOffset
Print.info(f'[OPEN ] {name} {hex(size)} bytes at {hex(offset)}')

self.readInt32() # junk data

Expand Down
19 changes: 13 additions & 6 deletions nsz/Fs/Pfs0.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ def __init__(self, headerSize, stringTableSize, path, mode = 'wb'):
self.actualSize = 0
self.f.seek(self.headerSize)
self.addpos = self.headerSize
self.written = False

def __enter__(self):
return self
Expand All @@ -35,13 +36,18 @@ def __exit__(self, type, value, traceback):

def write(self, value, size = None):
super(Pfs0Stream, self).write(value, len(value))
if self.tell() > self.actualSize:
self.actualSize = self.tell()
self.written = True
pos = self.tell()
if pos > self.actualSize:
self.actualSize = pos

def add(self, name, size, pleaseNoPrint = None):
Print.info('[ADDING] {0} {1} bytes to NSP'.format(name, size), pleaseNoPrint)
partition = self.partition(self.f.tell(), size, n = BaseFile())
self.files.append({'name': name, 'size': size, 'offset': self.f.tell(), 'partition': partition})
if self.written:
self.addpos = self.tell()
self.written = False
Print.info(f'[ADDING] {name} {hex(size)} bytes to PFS0 at {hex(self.addpos)}', pleaseNoPrint)
partition = self.partition(self.addpos, size, n = BaseFile())
self.files.append({'name': name, 'size': size, 'offset': self.addpos, 'partition': partition})
self.addpos += size
return partition

Expand Down Expand Up @@ -133,7 +139,7 @@ def tell(self):
return self.pos

def add(self, name, size, pleaseNoPrint = None):
Print.info('[ADDING] {0} {1} bytes to NSP'.format(name, size), pleaseNoPrint)
Print.info(f'[ADDING] {name} {hex(size)} bytes to PFS0 at {hex(self.addpos)}', pleaseNoPrint)
self.files.append({'name': name, 'size': size, 'offset': self.addpos})
self.addpos += size
return self
Expand Down Expand Up @@ -251,6 +257,7 @@ def open(self, path = None, mode = 'rb', cryptoType = -1, cryptoKey = -1, crypto
nameOffset = self.readInt32() # just the offset
name = stringTable[nameOffset:stringEndOffset].decode('utf-8').rstrip(' \t\r\n\0')
stringEndOffset = nameOffset
Print.info(f'[OPEN ] {name} {hex(size)} bytes at {hex(offset)}')

self.readInt32() # junk data

Expand Down
11 changes: 6 additions & 5 deletions nsz/Fs/Xci.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,18 +58,19 @@ def __exit__(self, type, value, traceback):
self.close()

def add(self, name, size, pleaseNoPrint = None):
Print.info('[ADDING] {0} {1} bytes to NSP'.format(name, size), pleaseNoPrint)
self.files.append({'name': name, 'size': size, 'offset': self.f.tell()})
t = {'name': name, 'size': size, 'offset': self.f.tell()}
return self.f
Print.info(f'[ADDING] {name} {hex(size)} bytes to XCI at {hex(self.f.tell())}', pleaseNoPrint)
partition = self.partition(self.f.tell(), size, n = BaseFile())
self.files.append({'name': name, 'size': size, 'offset': self.f.tell(), 'partition': partition})
self.addpos += size
return partition

def currentFileSize(self):
return self.f.tell() - self.files[-1]['offset']

def get(self, name):
for i in self.files:
if i['name'] == name:
return i
return i['partition']
return None

def resize(self, name, size):
Expand Down
37 changes: 22 additions & 15 deletions nsz/NszDecompressor.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ def __decompressContainer(readContainer, writeContainer, fileHashes, write, rais
writeContainer.add(newFileName, nca_size, pleaseNoPrint)
writeContainer.updateHashHeader()
for nspf in readContainer:
Print.info('[EXISTS] {0}'.format(nspf._path), pleaseNoPrint)
if not nspf._path.endswith('.ncz'):
verifyFile = nspf._path.endswith('.nca') and not nspf._path.endswith('.cnmt.nca')
hash = sha256()
Expand All @@ -70,24 +71,27 @@ def __decompressContainer(readContainer, writeContainer, fileHashes, write, rais
if write:
writeContainer.get(nspf._path).write(inputChunk)
if verifyFile:
if hash.hexdigest() in fileHashes:
Print.info('[VERIFIED] {0}'.format(nspf._path), pleaseNoPrint)
hashHexdigest = hash.hexdigest()
if hashHexdigest in fileHashes:
Print.info(f'[NCA HASH] {hashHexdigest}', pleaseNoPrint)
Print.info(f'[VERIFIED] {nspf._path} {hashHexdigest}', pleaseNoPrint)
else:
Print.info('[CORRUPTED] {0}'.format(nspf._path), pleaseNoPrint)
Print.info(f'[NCA HASH] {hashHexdigest}', pleaseNoPrint)
Print.info(f'[CORRUPTED] {nspf._path} {hashHexdigest}', pleaseNoPrint)
if raiseVerificationException:
raise VerificationException("Verification detected hash mismatch!")
elif not write:
Print.info('[EXISTS] {0}'.format(nspf._path), pleaseNoPrint)
continue
newFileName = Path(nspf._path).stem + '.nca'
if write:
written, hexHash = __decompressNcz(nspf, writeContainer.get(newFileName), statusReportInfo, pleaseNoPrint)
else:
written, hexHash = __decompressNcz(nspf, None, statusReportInfo, pleaseNoPrint)
if hexHash in fileHashes:
Print.info('[VERIFIED] {0}'.format(nspf._path), pleaseNoPrint)
Print.info(f'[NCA HASH] {hexHash}', pleaseNoPrint)
Print.info(f'[VERIFIED] {nspf._path}', pleaseNoPrint)
else:
Print.info('[CORRUPTED] {0}'.format(nspf._path), pleaseNoPrint)
Print.info(f'[NCA HASH] {hexHash}', pleaseNoPrint)
Print.info(f'[CORRUPTED] {nspf._path}', pleaseNoPrint)
if raiseVerificationException:
raise VerificationException("Verification detected hash mismatch")

Expand Down Expand Up @@ -135,7 +139,7 @@ def __decompressNcz(nspf, f, statusReportInfo, pleaseNoPrint):
useBlockCompression = blockMagic == b'NCZBLOCK'
blockSize = -1
if useBlockCompression:
Print.info("[NCZBLOCK] Using Block decompresion")
Print.info(f'[NCZBLOCK] Using Block decompresion for {nspf._path}')
BlockHeader = Header.Block(nspf)
blockDecompressorReader = BlockDecompressorReader.BlockDecompressorReader(nspf, BlockHeader)
pos = nspf.tell()
Expand Down Expand Up @@ -210,9 +214,9 @@ def __decompressNcz(nspf, f, statusReportInfo, pleaseNoPrint):


def __decompressNsz(filePath, outputDir, fixPadding, write, raiseVerificationException, raisePfs0Exception, originalFilePath, statusReportInfo, pleaseNoPrint):
fileHashes = FileExistingChecks.ExtractHashes(filePath)
container = factory(filePath)
container.open(str(filePath), 'rb')
fileHashes = FileExistingChecks.ExtractHashes(container)

try:
if write:
Expand Down Expand Up @@ -264,20 +268,23 @@ def __decompressNsz(filePath, outputDir, fixPadding, write, raiseVerificationExc


def __decompressXcz(filePath, outputDir, fixPadding, write, raiseVerificationException, raisePfs0Exception, originalFilePath, statusReportInfo, pleaseNoPrint):
fileHashes = FileExistingChecks.ExtractHashes(filePath)
container = factory(filePath)
container.open(str(filePath), 'rb')
secureIn = container.hfs0['secure']

if write:
filePathXci = changeExtension(filePath, '.xci')
outPath = filePathXci if outputDir == None else str(Path(outputDir).joinpath(Path(filePathXci).name))
Print.info('Decompressing %s -> %s' % (filePath, outPath), pleaseNoPrint)
with Xci.XciStream(outPath, originalXciPath = filePath) as xci: # need filepath to copy XCI container settings
with Hfs0.Hfs0Stream(xci.hfs0.add('secure', 0, pleaseNoPrint), xci.f.tell()) as secureOut:
__decompressContainer(secureIn, secureOut, fileHashes, write, raiseVerificationException, raisePfs0Exception, statusReportInfo, pleaseNoPrint)
xci.hfs0.resize('secure', secureOut.actualSize)
for partitionIn in container.hfs0:
fileHashes = FileExistingChecks.ExtractHashes(partitionIn)
hfsPartitionIn = xci.hfs0.add(partitionIn._path, 0x200, pleaseNoPrint)
with Hfs0.Hfs0Stream(hfsPartitionIn, xci.f.tell()) as partitionOut:
__decompressContainer(partitionIn, partitionOut, fileHashes, write, raiseVerificationException, raisePfs0Exception, statusReportInfo, pleaseNoPrint)
xci.hfs0.resize(partitionIn._path, partitionOut.actualSize)
else:
__decompressContainer(secureIn, None, fileHashes, write, raiseVerificationException, raisePfs0Exception, statusReportInfo, pleaseNoPrint)
for partitionIn in container.hfs0:
fileHashes = FileExistingChecks.ExtractHashes(partitionIn)
__decompressContainer(partitionIn, None, fileHashes, write, raiseVerificationException, raisePfs0Exception, statusReportInfo, pleaseNoPrint)

container.close()
Loading