Перейти из форума на сайт.

НовостиФайловые архивы
ПоискАктивные темыТоп лист
ПравилаКто в on-line?
Вход Забыли пароль? Первый раз на этом сайте? Регистрация
Компьютерный форум Ru.Board » Компьютеры » Программы » Расширения для Google Chrome / Google Chrome Extensions

Модерирует : gyra, Maz

gyra (01-08-2019 08:57): Расширения для Google Chrome / Google Chrome Extensions  Версия для печати • ПодписатьсяДобавить в закладки
На первую страницук этому сообщениюк последнему сообщению

   

OberStaFF



Junior Member
Редактировать | Профиль | Сообщение | Цитировать | Сообщить модератору


Код:
 
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
 
"""Support for formatting a data pack file used for platform agnostic resource
files.
"""
 
import collections
import exceptions
import os
import struct
import sys
import re
import shutil
 
 
if __name__ == '__main__':
    sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
 
from winreg import *
from shutil import copyfile
 
from grit import util
from grit.node import include
from grit.node import message
from grit.node import structure
 
# disable creation .pyc files
sys.dont_write_bytecode = True
 
PACK_FILE_VERSION = 5
BINARY, UTF8, UTF16 = range(3)
 
 
class WrongFileVersion(Exception):
    pass
 
 
class CorruptDataPack(Exception):
    pass
 
 
class DataPackSizes(object):
    def __init__(self, header, id_table, alias_table, data):
        self.header = header
        self.id_table = id_table
        self.alias_table = alias_table
        self.data = data
 
    @property
    def total(self):
        return sum(v for v in self.__dict__.itervalues())
 
    def __iter__(self):
        yield ('header', self.header)
        yield ('id_table', self.id_table)
        yield ('alias_table', self.alias_table)
        yield ('data', self.data)
 
    def __eq__(self, other):
        return self.__dict__ == other.__dict__
 
    def __repr__(self):
        return self.__class__.__name__ + repr(self.__dict__)
 
 
class DataPackContents(object):
    def __init__(self, resources, encoding, version, aliases, sizes):
        # Map of resource_id -> str.
        self.resources = resources
        # Encoding (int).
        self.encoding = encoding
        # Version (int).
        self.version = version
        # Map of resource_id->canonical_resource_id
        self.aliases = aliases
        # DataPackSizes instance.
        self.sizes = sizes
 
 
def Format(root, lang='en', output_dir='.'):
    """Writes out the data pack file format (platform agnostic resource file)."""
    id_map = root.GetIdMap()
    data = {}
    root.info = []
    for node in root.ActiveDescendants():
        with node:
            if isinstance(node, (include.IncludeNode, message.MessageNode,
                                 structure.StructureNode)):
                value = node.GetDataPackValue(lang, UTF8)
                if value is not None:
                    resource_id = id_map[node.GetTextualIds()[0]]
                    data[resource_id] = value
                    root.info.append('{},{},{}'.format(
                        node.attrs.get('name'), resource_id, node.source))
    return WriteDataPackToString(data, UTF8)
 
 
def ReadDataPack(input_file):
    return ReadDataPackFromString(util.ReadFile(input_file, util.BINARY))
 
 
def ReadDataPackFromString(data):
    """Reads a data pack file and returns a dictionary."""
    # Read the header.
    version = struct.unpack('<I', data[:4])[0]
    if version == 4:
        resource_count, encoding = struct.unpack('<IB', data[4:9])
        alias_count = 0
        header_size = 9
    elif version == 5:
        encoding, resource_count, alias_count = struct.unpack('<BxxxHH', data[4:12])
        header_size = 12
    else:
        raise WrongFileVersion('Found version: ' + str(version))
 
    resources = {}
    kIndexEntrySize = 2 + 4  # Each entry is a uint16 and a uint32.
 
    def entry_at_index(idx):
        offset = header_size + idx * kIndexEntrySize
        return struct.unpack('<HI', data[offset:offset + kIndexEntrySize])
 
    prev_resource_id, prev_offset = entry_at_index(0)
    for i in xrange(1, resource_count + 1):
        resource_id, offset = entry_at_index(i)
        resources[prev_resource_id] = data[prev_offset:offset]
        prev_resource_id, prev_offset = resource_id, offset
 
    id_table_size = (resource_count + 1) * kIndexEntrySize
    # Read the alias table.
    kAliasEntrySize = 2 + 2  # uint16, uint16
 
    def alias_at_index(idx):
        offset = header_size + id_table_size + idx * kAliasEntrySize
        return struct.unpack('<HH', data[offset:offset + kAliasEntrySize])
 
    aliases = {}
    for i in xrange(alias_count):
        resource_id, index = alias_at_index(i)
        aliased_id = entry_at_index(index)[0]
        aliases[resource_id] = aliased_id
        resources[resource_id] = resources[aliased_id]
 
    alias_table_size = kAliasEntrySize * alias_count
    sizes = DataPackSizes(
        header_size, id_table_size, alias_table_size,
        len(data) - header_size - id_table_size - alias_table_size)
    assert sizes.total == len(data), 'original={} computed={}'.format(
        len(data), sizes.total)
    return DataPackContents(resources, encoding, version, aliases, sizes)
 
 
def WriteDataPackToString(resources, encoding):
    """Returns a string with a map of id=>data in the data pack format."""
    ret = []
 
    # Compute alias map.
    resource_ids = sorted(resources)
    # Use reversed() so that for duplicates lower IDs clobber higher ones.
    id_by_data = {resources[k]: k for k in reversed(resource_ids)}
    # Map of resource_id -> resource_id, where value < key.
    alias_map = {k: id_by_data[v] for k, v in resources.iteritems()
                 if id_by_data[v] != k}
 
    # Write file header.
    resource_count = len(resources) - len(alias_map)
    # Padding bytes added for alignment.
    ret.append(struct.pack('<IBxxxHH', PACK_FILE_VERSION, encoding,
                           resource_count, len(alias_map)))
    HEADER_LENGTH = 4 + 4 + 2 + 2
 
    # Each main table entry is: uint16 + uint32 (and an extra entry at the end).
    # Each alias table entry is: uint16 + uint16.
    data_offset = HEADER_LENGTH + (resource_count + 1) * 6 + len(alias_map) * 4
 
    # Write main table.
    index_by_id = {}
    deduped_data = []
    index = 0
    for resource_id in resource_ids:
        if resource_id in alias_map:
            continue
        data = resources[resource_id]
        index_by_id[resource_id] = index
        ret.append(struct.pack('<HI', resource_id, data_offset))
        data_offset += len(data)
        deduped_data.append(data)
        index += 1
 
    assert index == resource_count
    # Add an extra entry at the end.
    ret.append(struct.pack('<HI', 0, data_offset))
 
    # Write alias table.
    for resource_id in sorted(alias_map):
        index = index_by_id[alias_map[resource_id]]
        ret.append(struct.pack('<HH', resource_id, index))
 
    # Write data.
    ret.extend(deduped_data)
    return ''.join(ret)
 
 
def WriteDataPack(resources, output_file, encoding):
    """Writes a map of id=>data into output_file as a data pack."""
    content = WriteDataPackToString(resources, encoding)
    with open(output_file, 'wb') as file:
        file.write(content)
 
 
def RePack(output_file, input_files, whitelist_file=None,
           suppress_removed_key_output=False):
    """Write a new data pack file by combining input pack files.
 
    Args:
        output_file: path to the new data pack file.
        input_files: a list of paths to the data pack files to combine.
        whitelist_file: path to the file that contains the list of resource IDs
                        that should be kept in the output file or None to include
                        all resources.
        suppress_removed_key_output: allows the caller to suppress the output from
                                     RePackFromDataPackStrings.
 
    Raises:
        KeyError: if there are duplicate keys or resource encoding is
        inconsistent.
    """
    input_data_packs = [ReadDataPack(filename) for filename in input_files]
    input_info_files = [filename + '.info' for filename in input_files]
    whitelist = None
    if whitelist_file:
        whitelist = util.ReadFile(whitelist_file, util.RAW_TEXT).strip().split('\n')
        whitelist = set(map(int, whitelist))
    inputs = [(p.resources, p.encoding) for p in input_data_packs]
    resources, encoding = RePackFromDataPackStrings(
        inputs, whitelist, suppress_removed_key_output)
    WriteDataPack(resources, output_file, encoding)
    with open(output_file + '.info', 'w') as output_info_file:
        for filename in input_info_files:
            with open(filename, 'r') as info_file:
                output_info_file.writelines(info_file.readlines())
 
 
def RePackFromDataPackStrings(inputs, whitelist,
                              suppress_removed_key_output=False):
    """Combines all inputs into one.
 
    Args:
        inputs: a list of (resources_by_id, encoding) tuples to be combined.
        whitelist: a list of resource IDs that should be kept in the output string
                   or None to include all resources.
        suppress_removed_key_output: Do not print removed keys.
 
    Returns:
        Returns (resources_by_id, encoding).
 
    Raises:
        KeyError: if there are duplicate keys or resource encoding is
        inconsistent.
    """
    resources = {}
    encoding = None
    for input_resources, input_encoding in inputs:
        # Make sure we have no dups.
        # duplicate_keys = set(input_resources.keys()) & set(resources.keys())
        # if duplicate_keys:
        # raise exceptions.KeyError('Duplicate keys: ' + str(list(duplicate_keys)))
 
        # Make sure encoding is consistent.
        if encoding in (None, BINARY):
            encoding = input_encoding
        elif input_encoding not in (BINARY, encoding):
            raise exceptions.KeyError('Inconsistent encodings: ' + str(encoding) +
                                      ' vs ' + str(input_encoding))
 
        if whitelist:
            whitelisted_resources = dict([(key, input_resources[key])
                                          for key in input_resources.keys()
                                          if key in whitelist])
            resources.update(whitelisted_resources)
            removed_keys = [key for key in input_resources.keys()
                            if key not in whitelist]
            if not suppress_removed_key_output:
                for key in removed_keys:
                    print 'RePackFromDataPackStrings Removed Key:', key
        else:
            resources.update(input_resources)
 
    # Encoding is 0 for BINARY, 1 for UTF8 and 2 for UTF16
    if encoding is None:
        encoding = BINARY
    return resources, encoding
 
def BackupTXT():
    fname, ext = sys.argv[1].split('.')
    d = '%s.txt' % fname  # lang_file.txt
    b = fname + '.txt.bak'  # unpacked_txt.bak
    if os.path.isfile(b):  # check backup txt
        print "Backup file exist"
    else:
        # copyfile(name + '.txt', name + '.txt.bak')
        copyfile(d, fname + '.txt.bak')
 
 
def BackupPAK():
    fname, ext = sys.argv[1].split('.')
    d = '%s.pak' % fname  # file.pak
    b = fname + '.pak.bak'  # file.pak.bak
    if os.path.isfile(b):  # check backup pak
        print "Backup file exists"
    else:
        # os.rename(name + '.pak', name + '.pak.bak')
        # os.rename(d, fname + '.pak.bak')
        copyfile(d, fname + '.pak.bak')
 
 
def UnpackLng():
    data = ReadDataPack(sys.argv[1])
    fname, ext = sys.argv[1].split('.')
    d = '%s.pak' % fname  # lang_file.pak
    # b = fname + '.pak.bak' # packed_lng.pak.bak
    file = open(fname + '.txt', 'wb')
    for (resource_id, text) in data.resources.iteritems():
        file.write("%s: %s\n" % (resource_id, text))
    file.close()
    os.remove(d)
    # BackupPAK(fname)
    # check and create backup file
    # else:
    #  file.close()
    #  if os.path.isfile(b): ## exists backup file lang_file.pak.bak
    #    print "Backup file exist"
    #    os.remove(d)
    #  else:
    #    os.rename(d, fname + '.pak.bak')
 
 
def PackLng():
    data2 = {}
    fname, ext = sys.argv[1].split('.')
    d = '%s.txt' % fname  # lang_file.txt
    # b = fname + '.txt.bak' # unpacked_txt.bak
    z = fname + '.pak'  # packed_lang_file.pak
    p = re.compile(r'^[0-9]{1,5}:')
    ID = 0
    for line in open(d, 'rb'):
        # print type(line)
        #  p = re.compile(r'^[0-9]+:')
        m = p.match(line)
        if m:
            key, value = line.split(':', 1)  # line.strip().split(':',1)
            ID = int(key)
            data2[ID] = value[1:].strip('\n')  # first space trim and line end
        else:
            data2[ID] += '\n' + line.strip('\n')  # if multiline add line end
    # BackupTXT(fname)
    WriteDataPack(data2, sys.argv[1], 1)
    # print "wrote %s" % sys.argv[1]
    if os.path.isfile(z):
        os.remove(z)
    os.rename(d, fname + '.pak')
 
    # check and create backup file
    # else:
    #  if os.path.isfile(b): ## exists backup file lang_file.pak.bak
    #    print "Backup file exist"
    #    WriteDataPack(data2, sys.argv[1], 1)
    #    # print "wrote %s" % sys.argv[1]
    #    os.remove(z)
    #    os.rename(d, fname + '.pak')
    #    return
    #  else:
    #    copyfile(fname + '.txt', fname + '.txt.bak')
    #    WriteDataPack(data2, sys.argv[1], 1)
    #    # print "wrote %s" % sys.argv[1]
    #    os.remove(z)
    #    os.rename(d, fname + '.pak')
 
 
def UnpackRes():
    # rev. from 06/02/2018
    # minor fixes
    data = ReadDataPack(sys.argv[1])
    directory, ext = sys.argv[1].split('.')
    d = '%s.pak' % directory  # packed_file.pak
    # b = directory + '.pak.bak'  # packed_file.pak.bak
    for (resource_id, text) in data.resources.iteritems():
        if text[:15] == '<!DOCTYPE html>' or text[:15] == '<!DOCTYPE HTML>' or text[:15] == '<!doctype html>' or text[:6] == '<html>':
        # html разобран на отдельные по причине выявленных пропусков, игнорирует чтение заголовка(отступы или что-то там ещё) и выдает в формате *.bin
            ext = '.html'
        # <!doctype html> with space in begining
        elif text[:18] == '\xEF\xBB\xBF\x3C\x21\x64\x6F\x63\x74\x79\x70\x65\x20\x68\x74\x6D\x6C\x3E':
            ext = '.html'
        # <!doctype html> without space in begining
        elif text[:14] == '\x21\x64\x6F\x63\x74\x79\x70\x65\x20\x68\x74\x6D\x6C\x3E':
            ext = '.html'
        # <!DOCTYPE HTML> with space in begining
        elif text[:18] == '\xEF\xBB\xBF\x3C\x21\x44\x4F\x43\x54\x59\x50\x45\x20\x48\x54\x4D\x4C\x3E':
            ext = '.html'    
        # <!DOCTYPE HTML> without space in begining
        elif text[:14] == '\x21\x44\x4F\x43\x54\x59\x50\x45\x20\x48\x54\x4D\x4C\x3E':
            ext = '.html'            
        # <!DOCTYPE html> with space in begining
        elif text[:18] == '\xEF\xBB\xBF\x3C\x21\x44\x4F\x43\x54\x59\x50\x45\x20\x68\x74\x6D\x6C\x3E':
            ext = '.html'
        # <!DOCTYPE html> without space in begining
        elif text[:14] == '\x21\x44\x4F\x43\x54\x59\x50\x45\x20\x68\x74\x6D\x6C\x3E':
            ext = '.html'
        # <html> with space in begining
        elif text[:9] == '\xEF\xBB\xBF\x3C\x68\x74\x6D\x6C\x3E':
            ext = '.html'
        # <html> without space in begining
        elif text[:5] == '\x68\x74\x6D\x6C\x3E':
            ext = '.html'      
        elif text[:4] == '\x89PNG':
            ext = '.png'
        elif text[:8] == '\x89\x50\x4E\x47\x0D\x0A\x1A\x0A':
            ext = '.png'
        elif text[:4] == '\x00\x00\x01\x00':
            ext = '.ico'
        elif text[:4] == '\x00\x00\x02\x00':
            ext = '.ico'
        elif text[:4] == '\x49\x49\x2A\x00':
            ext = '.tiff'
        elif text[:4] == '\x4D\x4D\x00\x2A':
            ext = '.tiff'
        # внутри архивов чаще всего исходные Polymer, возможно, и другие данные
        elif text[:4] == '\x1F\x8B\x08\x00':
            ext = '.gz'
        elif text[:6] == 'GIF87a':
            ext = '.gif'
        elif text[:6] == 'GIF89a':
            ext = '.gif'
        elif text[:3] == '\xFF\xD8\xFF':
            ext = '.jpg'
        elif text[:1] == '\0':
            ext = '.bin'
        else:
            ext = '.js'
        if not os.path.exists(directory):
            os.makedirs(directory)
        file = '%s/%s%s' % (directory, str(resource_id), ext)
        open(file, 'wb').write(text)
        # print "%s: " % (str(resource_id) + ext)
    else:
        os.remove(d)
        # check and create backup file
        # else:
        #     if os.path.isfile(b):  # exists backup file packed_file.pak.bak
        #         print "Backup file exist"
        #         os.remove(d)
        #     else:
        #         os.rename(d, directory + '.pak.bak')
 
 
def PackRes():
    resources = {}
    # return 'name_folder' from path
    path, directory = os.path.split(sys.argv[1])
    # string before, in old version
    # directory, ext = sys.argv[1].split('.')
    for d, dirs, files in os.walk(directory):
        if len(files) == 0:
            print "Folder _Resources/%s is empty" % directory
            return
        for file in files:
            key, ext = file.split('.')
            f = '%s/%s' % (directory, file)
            text = open(f, 'rb').read()
            try:
                resources[int(key)] = text
            except TypeError:
                continue
    if len(resources) == 0: sys.exit(0)
    # add sys.argv[1]+'.pak' for creation new file
    # grit.format.data_pack.
    WriteDataPack(resources, sys.argv[1] + '.pak', 1)
    # delete folder after packing
    shutil.rmtree(directory)
    # print "File %s created successfully" % sys.argv[1]
 
 
def Helper():
    file = open("help.md", "w")
    file.write('\n \
    Hello World\n \
    Hello World 2\n \
    ')
    file.close()
    # AddContext('HKCR', 'pakfile\shell\PAK\shell\lng', 'Hello', REG_SZ, 'New Value')
    # os.system('notepad.exe help.md | TASKKILL /F /IM data_pack_all.exe')
    # os.system('cmd /K echo Hello | TASKKILL /F /IM data_pack_all.exe | cls | echo Hello ')
    # os.system('taskkill /f /im data_pack_all.exe')
    # subprocess.Popen(["cmd", "print 'Hello'"])
    # print subprocess.Popen("echo Hello World", shell=True, stdout=subprocess.PIPE).stdout.read()
    return
 
 
# read key from registry
# try:
#     root_key=OpenKey(HKEY_CURRENT_USER, r'SOFTWARE\my path to\Registry', 0, KEY_READ)
#     [Pathname,regtype]=(QueryValueEx(root_key,"Pathname"))
#     CloseKey(root_key)
#     if (""==Pathname):
#         raise WindowsError
# except WindowsError:
#     return [""]
 
def AddContext(root, key_name, name_param, type_value, value):
    # REG_BINARY                       - Binary data in any form.
    # REG_DWORD                        - 32 - bit number.
    # REG_DWORD_LITTLE_ENDIAN          - A 32 - bit number in little - endian format.
    # REG_DWORD_BIG_ENDIAN             - A 32 - bit number in big - endian format.
    # REG_EXPAND_SZ                    - Null - terminated string containing references to environment variables( % PATH %).
    # REG_LINK                         - A Unicode symbolic link.
    # REG_MULTI_SZ                     - A sequence of null - terminated strings, terminated by two null characters.(Python handles this termination automatically.)
    # REG_NONE                         - No defined value type.
    # REG_RESOURCE_LIST                - A device - driver resource list.
    # REG_FULL_RESOURCE_DESCRIPTOR     - A hardware setting.
    # REG_RESOURCE_REQUIREMENTS_LIST   - A hardware resource list.
    # REG_SZ                           - A null - terminated string.
    if root == 'HKLM': root = HKEY_LOCAL_MACHINE
    if root == 'HKCU': root = HKEY_CURRENT_USER
    if root == 'HKCR': root = HKEY_CLASSES_ROOT
    if root == 'HKCC': root = HKEY_CURRENT_CONFIG
    if root == 'HKDD': root = HKEY_DYN_DATA
    if root == 'HKPD': root = HKEY_PERFORMANCE_DATA
    if root == 'HKU':  root = HKEY_USERS
    try:
        keyval = key_name
        # keyval = r"pakfile\shell\PAK\shell\lng"
        if not os.path.exists("keyval"):
            key = CreateKey(root, keyval)
            # key = CreateKey(HKEY_CLASSES_ROOT, keyval)
        Registrykey = OpenKey(root, key_name, 0, KEY_WRITE)
        # Registrykey = OpenKey(HKEY_CLASSES_ROOT, r"pakfile\shell\PAK\shell\lng", 0, KEY_WRITE)
        SetValueEx(Registrykey, name_param, 0, type_value, value)
        # SetValueEx(Registrykey, "Name Parameter", 0, REG_SZ, 'Value')
        CloseKey(Registrykey)
        return True
    except WindowsError:
        return False
 
 
def AddContextMenu():
    # FolderToPak
    AddContext('HKCR', r'Folder\shell\pakres', '', REG_SZ, u'Упаковка ресурсов PAK')
    AddContext('HKCR', r'Folder\shell\pakres', 'Icon', REG_SZ, sys.argv[0] + ',6')
    AddContext('HKCR', r'Folder\shell\pakres\command', '', REG_SZ, sys.argv[0] + ' "%1" -p')
    # TextFile menu
    AddContext('HKCR', r'txtfile\shell\pak', 'MUIVerb', REG_SZ, u'Упаковка PAK(язык)')
    AddContext('HKCR', r'txtfile\shell\pak', 'SubCommands', REG_SZ, '')
    AddContext('HKCR', r'txtfile\shell\pak', 'Icon', REG_SZ, sys.argv[0] + ',6')
    # TxtToLng
    AddContext('HKCR', r'txtfile\shell\pak\shell\paklng', '', REG_SZ, u'Языковой файл')
    AddContext('HKCR', r'txtfile\shell\pak\shell\paklng', 'Icon', REG_SZ, sys.argv[0] + ',2')
    AddContext('HKCR', r'txtfile\shell\pak\shell\paklng\command', '', REG_SZ, sys.argv[0] + ' "%1" -l')
    # TxtToLng with backup
    AddContext('HKCR', r'txtfile\shell\pak\shell\paklngbck', '', REG_SZ, u'Языковой файл(резервная копия)')
    AddContext('HKCR', r'txtfile\shell\pak\shell\paklngbck', 'Icon', REG_SZ, sys.argv[0] + ',2')
    AddContext('HKCR', r'txtfile\shell\pak\shell\paklngbck\command', '', REG_SZ, sys.argv[0] + ' "%1" -lb')
    # Pak file associate
    AddContext('HKCR', r'.pak', '', REG_SZ, u'pakfile')
    AddContext('HKCR', r'pakfile', '', REG_SZ, u'PAK file')
    AddContext('HKCR', r'pakfile\DefaultIcon', '', REG_SZ, sys.argv[0] + ',0')
    # PakFile menu
    AddContext('HKCR', r'pakfile\shell\pak', 'MUIVerb', REG_SZ, u'Распаковка PAK')
    AddContext('HKCR', r'pakfile\shell\pak', 'SubCommands', REG_SZ, '')
    AddContext('HKCR', r'pakfile\shell\pak', 'Icon', REG_SZ, sys.argv[0] + ',5')
    # LngUnpack
    AddContext('HKCR', r'pakfile\shell\PAK\shell\lng', '', REG_SZ, u'Языковой файл')
    AddContext('HKCR', r'pakfile\shell\PAK\shell\lng', 'Icon', REG_SZ, sys.argv[0] + ',1')
    AddContext('HKCR', r'pakfile\shell\PAK\shell\lng\command', '', REG_SZ, sys.argv[0] + ' "%1" -t')
    # LngUnpack with backup
    AddContext('HKCR', r'pakfile\shell\PAK\shell\lngbck', '', REG_SZ, u'Языковой файл(резервная копия)')
    AddContext('HKCR', r'pakfile\shell\PAK\shell\lngbck', 'Icon', REG_SZ, sys.argv[0] + ',1')
    AddContext('HKCR', r'pakfile\shell\PAK\shell\lngbck\command', '', REG_SZ, sys.argv[0] + ' "%1" -tb')
    # ResUnpack
    AddContext('HKCR', r'pakfile\shell\PAK\shell\res', '', REG_SZ, u'Файл ресурсов')
    AddContext('HKCR', r'pakfile\shell\PAK\shell\res', 'Icon', REG_SZ, sys.argv[0] + ',3')
    AddContext('HKCR', r'pakfile\shell\PAK\shell\res\command', '', REG_SZ, sys.argv[0] + ' "%1" -r')
    # ResUnpack with backup
    AddContext('HKCR', r'pakfile\shell\PAK\shell\resbck', '', REG_SZ, u'Файл ресурсов(резервная копия)')
    AddContext('HKCR', r'pakfile\shell\PAK\shell\resbck', 'Icon', REG_SZ, sys.argv[0] + ',3')
    AddContext('HKCR', r'pakfile\shell\PAK\shell\resbck\command', '', REG_SZ, sys.argv[0] + ' "%1" -rb')
 
 
def main():
    # Helper
    # Вызов справки
    if len(sys.argv) == 1:
        # Helper()
        AddContextMenu()
    elif (len(sys.argv) == 2) and (sys.argv[1] == '') or (sys.argv[1] == '-h'):
        Helper()
    # Unpack Lang
    # Распаковка языкового файла
    elif (len(sys.argv) == 3) and (sys.argv[2] == '-t'):
        UnpackLng()
    # Unpack Lang + Backup
    # Распаковка языкового файла + резервная копия
    elif (len(sys.argv) == 3) and (sys.argv[2] == '-tb') or (sys.argv[2] == '-bt'):
        BackupPAK()
        UnpackLng()
    # Pack Lng
    # Упаковка языкового файла
    elif (len(sys.argv) == 3) and (sys.argv[2] == '-l'):
        PackLng()
    # Pack Lng + Backup
    # Упаковка языкового файла + резервная копия
    elif (len(sys.argv) == 3) and (sys.argv[2] == '-lb') or (sys.argv[2] == '-bl'):
        BackupTXT()
        PackLng()
    # Unpack Resources
    # Распаковка ресурсов
    elif (len(sys.argv) == 3) and (sys.argv[2] == '-r'):
        UnpackRes()
    # Unpack Resources + Backup
    # Распаковка ресурсов + резервная копия
    elif (len(sys.argv) == 3) and (sys.argv[2] == '-rb') or (sys.argv[2] == '-br'):
        BackupPAK()
        UnpackRes()
    # Pack Resources
    # Упаковка ресурсов
    elif (len(sys.argv) == 3) and (sys.argv[2] == '-p'):
        PackRes()
    else:
        print "what filename ???"
 
# for test part from original
# def main():
#     if len(sys.argv) > 1:
        # When an argument is given, read and explode the file to text
        # format, for easier diffing.
        # data = ReadDataPack(sys.argv[1])
        # print data.encoding
        # for (resource_id, text) in data.resources.iteritems():
        #     print '%s: %s' % (resource_id, text)
    # else:
        # Write a simple file.
        # data = {1: '', 4: 'this is id 4', 6: 'this is id 6', 10: ''}
        # WriteDataPack(data, 'datapack1.pak', UTF8)
        # data2 = {1000: 'test', 5: 'five'}
        # WriteDataPack(data2, 'datapack2.pak', UTF8)
        # print 'wrote datapack1 and datapack2 to current directory.'
 
 
if __name__ == '__main__':
    main()
 


Всего записей: 160 | Зарегистр. 12-07-2013 | Отправлено: 16:00 04-03-2018 | Исправлено: OberStaFF, 10:18 07-03-2018
   

На первую страницук этому сообщениюк последнему сообщению

Компьютерный форум Ru.Board » Компьютеры » Программы » Расширения для Google Chrome / Google Chrome Extensions
gyra (01-08-2019 08:57): Расширения для Google Chrome / Google Chrome Extensions


Реклама на форуме Ru.Board.

Powered by Ikonboard "v2.1.7b" © 2000 Ikonboard.com
Modified by Ru.B0ard
© Ru.B0ard 2000-2024

BitCoin: 1NGG1chHtUvrtEqjeerQCKDMUi6S6CG4iC

Рейтинг.ru