Cleanup
224
add_aliases.py
|
@ -1,224 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright 2017 Google Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
import argparse
|
||||
import glob
|
||||
import os
|
||||
from os import path
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
from nototools import unicode_data
|
||||
|
||||
"""Create aliases in target directory.
|
||||
|
||||
In addition to links/copies named with aliased sequences, this can also
|
||||
create canonically named aliases/copies, if requested."""
|
||||
|
||||
|
||||
DATA_ROOT = path.dirname(path.abspath(__file__))
|
||||
|
||||
def str_to_seq(seq_str):
|
||||
res = [int(s, 16) for s in seq_str.split('_')]
|
||||
if 0xfe0f in res:
|
||||
print('0xfe0f in file name: %s' % seq_str)
|
||||
res = [x for x in res if x != 0xfe0f]
|
||||
return tuple(res)
|
||||
|
||||
|
||||
def seq_to_str(seq):
|
||||
return '_'.join('%04x' % cp for cp in seq)
|
||||
|
||||
|
||||
def read_default_unknown_flag_aliases():
|
||||
unknown_flag_path = path.join(DATA_ROOT, 'unknown_flag_aliases.txt')
|
||||
return read_emoji_aliases(unknown_flag_path)
|
||||
|
||||
|
||||
def read_default_emoji_aliases():
|
||||
alias_path = path.join(DATA_ROOT, 'emoji_aliases.txt')
|
||||
return read_emoji_aliases(alias_path)
|
||||
|
||||
|
||||
def read_emoji_aliases(filename):
|
||||
result = {}
|
||||
|
||||
with open(filename, 'r') as f:
|
||||
for line in f:
|
||||
ix = line.find('#')
|
||||
if (ix > -1):
|
||||
line = line[:ix]
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
als, trg = (s.strip() for s in line.split(';'))
|
||||
try:
|
||||
als_seq = tuple([int(x, 16) for x in als.split('_')])
|
||||
trg_seq = tuple([int(x, 16) for x in trg.split('_')])
|
||||
except:
|
||||
print('cannot process alias %s -> %s' % (als, trg))
|
||||
continue
|
||||
result[als_seq] = trg_seq
|
||||
return result
|
||||
|
||||
|
||||
def add_aliases(
|
||||
srcdir, dstdir, aliasfile, prefix, ext, replace=False, copy=False,
|
||||
canonical_names=False, dry_run=False):
|
||||
"""Use aliasfile to create aliases of files in srcdir matching prefix/ext in
|
||||
dstdir. If dstdir is null, use srcdir as dstdir. If replace is false
|
||||
and a file already exists in dstdir, report and do nothing. If copy is false
|
||||
create a symlink, else create a copy.
|
||||
|
||||
If canonical_names is true, check all source files and generate aliases/copies
|
||||
using the canonical name if different from the existing name.
|
||||
|
||||
If dry_run is true, report what would be done. Dstdir will be created if
|
||||
necessary, even if dry_run is true."""
|
||||
|
||||
if not path.isdir(srcdir):
|
||||
print('%s is not a directory' % srcdir, file=sys.stderr)
|
||||
return
|
||||
|
||||
if not dstdir:
|
||||
dstdir = srcdir
|
||||
elif not path.isdir(dstdir):
|
||||
os.makedirs(dstdir)
|
||||
|
||||
prefix_len = len(prefix)
|
||||
suffix_len = len(ext) + 1
|
||||
filenames = [path.basename(f)
|
||||
for f in glob.glob(path.join(srcdir, '%s*.%s' % (prefix, ext)))]
|
||||
seq_to_file = {
|
||||
str_to_seq(name[prefix_len:-suffix_len]) : name
|
||||
for name in filenames}
|
||||
|
||||
aliases = read_emoji_aliases(aliasfile)
|
||||
aliases_to_create = {}
|
||||
aliases_to_replace = []
|
||||
alias_exists = False
|
||||
|
||||
def check_alias_seq(seq):
|
||||
alias_str = seq_to_str(seq)
|
||||
alias_name = '%s%s.%s' % (prefix, alias_str, ext)
|
||||
alias_path = path.join(dstdir, alias_name)
|
||||
if path.exists(alias_path):
|
||||
if replace:
|
||||
aliases_to_replace.append(alias_name)
|
||||
else:
|
||||
print('alias %s exists' % alias_str, file=sys.stderr)
|
||||
alias_exists = True
|
||||
return None
|
||||
return alias_name
|
||||
|
||||
canonical_to_file = {}
|
||||
for als, trg in sorted(aliases.items()):
|
||||
if trg not in seq_to_file:
|
||||
print('target %s for %s does not exist' % (
|
||||
seq_to_str(trg), seq_to_str(als)), file=sys.stderr)
|
||||
continue
|
||||
alias_name = check_alias_seq(als)
|
||||
if alias_name:
|
||||
target_file = seq_to_file[trg]
|
||||
aliases_to_create[alias_name] = target_file
|
||||
if canonical_names:
|
||||
canonical_seq = unicode_data.get_canonical_emoji_sequence(als)
|
||||
if canonical_seq and canonical_seq != als:
|
||||
canonical_alias_name = check_alias_seq(canonical_seq)
|
||||
if canonical_alias_name:
|
||||
canonical_to_file[canonical_alias_name] = target_file
|
||||
|
||||
if canonical_names:
|
||||
print('adding %d canonical aliases' % len(canonical_to_file))
|
||||
for seq, f in seq_to_file.iteritems():
|
||||
canonical_seq = unicode_data.get_canonical_emoji_sequence(seq)
|
||||
if canonical_seq and canonical_seq != seq:
|
||||
alias_name = check_alias_seq(canonical_seq)
|
||||
if alias_name:
|
||||
canonical_to_file[alias_name] = f
|
||||
|
||||
print('adding %d total canonical sequences' % len(canonical_to_file))
|
||||
aliases_to_create.update(canonical_to_file)
|
||||
|
||||
if replace:
|
||||
if not dry_run:
|
||||
for k in sorted(aliases_to_replace):
|
||||
os.remove(path.join(dstdir, k))
|
||||
print('replacing %d files' % len(aliases_to_replace))
|
||||
elif alias_exists:
|
||||
print('aborting, aliases exist.', file=sys.stderr)
|
||||
return
|
||||
|
||||
for k, v in sorted(aliases_to_create.items()):
|
||||
if dry_run:
|
||||
msg = 'replace ' if k in aliases_to_replace else ''
|
||||
print('%s%s -> %s' % (msg, k, v))
|
||||
else:
|
||||
try:
|
||||
if copy:
|
||||
shutil.copy2(path.join(srcdir, v), path.join(dstdir, k))
|
||||
else:
|
||||
# fix this to create relative symlinks
|
||||
if srcdir == dstdir:
|
||||
os.symlink(v, path.join(dstdir, k))
|
||||
else:
|
||||
raise Exception('can\'t create cross-directory symlinks yet')
|
||||
except Exception as e:
|
||||
print('failed to create %s -> %s' % (k, v), file=sys.stderr)
|
||||
raise Exception('oops, ' + str(e))
|
||||
print('created %d %s' % (
|
||||
len(aliases_to_create), 'copies' if copy else 'symlinks'))
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'-s', '--srcdir', help='directory containing files to alias',
|
||||
required=True, metavar='dir')
|
||||
parser.add_argument(
|
||||
'-d', '--dstdir', help='directory to write aliases, default srcdir',
|
||||
metavar='dir')
|
||||
parser.add_argument(
|
||||
'-a', '--aliasfile', help='alias file (default emoji_aliases.txt)',
|
||||
metavar='file', default='emoji_aliases.txt')
|
||||
parser.add_argument(
|
||||
'-p', '--prefix', help='file name prefix (default emoji_u)',
|
||||
metavar='pfx', default='emoji_u')
|
||||
parser.add_argument(
|
||||
'-e', '--ext', help='file name extension (default png)',
|
||||
choices=['ai', 'png', 'svg'], default='png')
|
||||
parser.add_argument(
|
||||
'-r', '--replace', help='replace existing files/aliases',
|
||||
action='store_true')
|
||||
parser.add_argument(
|
||||
'-c', '--copy', help='create a copy of the file, not a symlink',
|
||||
action='store_true')
|
||||
parser.add_argument(
|
||||
'--canonical_names', help='include extra copies with canonical names '
|
||||
'(including fe0f emoji presentation character)', action='store_true');
|
||||
parser.add_argument(
|
||||
'-n', '--dry_run', help='print out aliases to create only',
|
||||
action='store_true')
|
||||
args = parser.parse_args()
|
||||
|
||||
add_aliases(
|
||||
args.srcdir, args.dstdir, args.aliasfile, args.prefix, args.ext,
|
||||
args.replace, args.copy, args.canonical_names, args.dry_run)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,195 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright 2014 Google Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Modify the Noto Color Emoji font to use GSUB rules for flags and keycaps."""
|
||||
|
||||
__author__ = "roozbeh@google.com (Roozbeh Pournader)"
|
||||
|
||||
|
||||
import sys
|
||||
|
||||
from fontTools import agl
|
||||
from fontTools import ttLib
|
||||
from fontTools.ttLib.tables import otTables
|
||||
|
||||
from nototools import font_data
|
||||
|
||||
|
||||
def create_script_list(script_tag='DFLT'):
|
||||
"""Create a ScriptList for the GSUB table."""
|
||||
def_lang_sys = otTables.DefaultLangSys()
|
||||
def_lang_sys.ReqFeatureIndex = 0xFFFF
|
||||
def_lang_sys.FeatureCount = 1
|
||||
def_lang_sys.FeatureIndex = [0]
|
||||
def_lang_sys.LookupOrder = None
|
||||
|
||||
script_record = otTables.ScriptRecord()
|
||||
script_record.ScriptTag = script_tag
|
||||
script_record.Script = otTables.Script()
|
||||
script_record.Script.DefaultLangSys = def_lang_sys
|
||||
script_record.Script.LangSysCount = 0
|
||||
script_record.Script.LangSysRecord = []
|
||||
|
||||
script_list = otTables.ScriptList()
|
||||
script_list.ScriptCount = 1
|
||||
script_list.ScriptRecord = [script_record]
|
||||
|
||||
return script_list
|
||||
|
||||
|
||||
def create_feature_list(feature_tag, lookup_count):
|
||||
"""Create a FeatureList for the GSUB table."""
|
||||
feature_record = otTables.FeatureRecord()
|
||||
feature_record.FeatureTag = feature_tag
|
||||
feature_record.Feature = otTables.Feature()
|
||||
feature_record.Feature.LookupCount = lookup_count
|
||||
feature_record.Feature.LookupListIndex = range(lookup_count)
|
||||
feature_record.Feature.FeatureParams = None
|
||||
|
||||
feature_list = otTables.FeatureList()
|
||||
feature_list.FeatureCount = 1
|
||||
feature_list.FeatureRecord = [feature_record]
|
||||
|
||||
return feature_list
|
||||
|
||||
|
||||
def create_lookup_list(lookups):
|
||||
"""Create a LookupList for the GSUB table."""
|
||||
lookup_list = otTables.LookupList()
|
||||
lookup_list.LookupCount = len(lookups)
|
||||
lookup_list.Lookup = lookups
|
||||
|
||||
return lookup_list
|
||||
|
||||
|
||||
def get_glyph_name_or_create(char, font):
|
||||
"""Return the glyph name for a character, creating if it doesn't exist."""
|
||||
cmap = font_data.get_cmap(font)
|
||||
if char in cmap:
|
||||
return cmap[char]
|
||||
|
||||
glyph_name = agl.UV2AGL[char]
|
||||
assert glyph_name not in font.glyphOrder
|
||||
|
||||
font['hmtx'].metrics[glyph_name] = [0, 0]
|
||||
cmap[char] = glyph_name
|
||||
|
||||
if 'glyf' in font:
|
||||
from fontTools.ttLib.tables import _g_l_y_f
|
||||
empty_glyph = _g_l_y_f.Glyph()
|
||||
font['glyf'].glyphs[glyph_name] = empty_glyph
|
||||
|
||||
font.glyphOrder.append(glyph_name)
|
||||
return glyph_name
|
||||
|
||||
|
||||
def create_lookup(table, font, flag=0):
|
||||
"""Create a Lookup based on mapping table."""
|
||||
cmap = font_data.get_cmap(font)
|
||||
|
||||
ligatures = {}
|
||||
for output, (ch1, ch2) in table.iteritems():
|
||||
output = cmap[output]
|
||||
ch1 = get_glyph_name_or_create(ch1, font)
|
||||
ch2 = get_glyph_name_or_create(ch2, font)
|
||||
|
||||
ligature = otTables.Ligature()
|
||||
ligature.CompCount = 2
|
||||
ligature.Component = [ch2]
|
||||
ligature.LigGlyph = output
|
||||
|
||||
try:
|
||||
ligatures[ch1].append(ligature)
|
||||
except KeyError:
|
||||
ligatures[ch1] = [ligature]
|
||||
|
||||
ligature_subst = otTables.LigatureSubst()
|
||||
ligature_subst.ligatures = ligatures
|
||||
|
||||
lookup = otTables.Lookup()
|
||||
lookup.LookupType = 4
|
||||
lookup.LookupFlag = flag
|
||||
lookup.SubTableCount = 1
|
||||
lookup.SubTable = [ligature_subst]
|
||||
|
||||
return lookup
|
||||
|
||||
|
||||
def create_simple_gsub(lookups, script='DFLT', feature='ccmp'):
|
||||
"""Create a simple GSUB table."""
|
||||
gsub_class = ttLib.getTableClass('GSUB')
|
||||
gsub = gsub_class('GSUB')
|
||||
|
||||
gsub.table = otTables.GSUB()
|
||||
gsub.table.Version = 1.0
|
||||
gsub.table.ScriptList = create_script_list(script)
|
||||
gsub.table.FeatureList = create_feature_list(feature, len(lookups))
|
||||
gsub.table.LookupList = create_lookup_list(lookups)
|
||||
return gsub
|
||||
|
||||
|
||||
def reg_indicator(letter):
|
||||
"""Return a regional indicator charater from corresponing capital letter.
|
||||
"""
|
||||
return 0x1F1E6 + ord(letter) - ord('A')
|
||||
|
||||
|
||||
EMOJI_FLAGS = {
|
||||
0xFE4E5: (reg_indicator('J'), reg_indicator('P')), # Japan
|
||||
0xFE4E6: (reg_indicator('U'), reg_indicator('S')), # United States
|
||||
0xFE4E7: (reg_indicator('F'), reg_indicator('R')), # France
|
||||
0xFE4E8: (reg_indicator('D'), reg_indicator('E')), # Germany
|
||||
0xFE4E9: (reg_indicator('I'), reg_indicator('T')), # Italy
|
||||
0xFE4EA: (reg_indicator('G'), reg_indicator('B')), # United Kingdom
|
||||
0xFE4EB: (reg_indicator('E'), reg_indicator('S')), # Spain
|
||||
0xFE4EC: (reg_indicator('R'), reg_indicator('U')), # Russia
|
||||
0xFE4ED: (reg_indicator('C'), reg_indicator('N')), # China
|
||||
0xFE4EE: (reg_indicator('K'), reg_indicator('R')), # Korea
|
||||
}
|
||||
|
||||
KEYCAP = 0x20E3
|
||||
|
||||
EMOJI_KEYCAPS = {
|
||||
0xFE82C: (ord('#'), KEYCAP),
|
||||
0xFE82E: (ord('1'), KEYCAP),
|
||||
0xFE82F: (ord('2'), KEYCAP),
|
||||
0xFE830: (ord('3'), KEYCAP),
|
||||
0xFE831: (ord('4'), KEYCAP),
|
||||
0xFE832: (ord('5'), KEYCAP),
|
||||
0xFE833: (ord('6'), KEYCAP),
|
||||
0xFE834: (ord('7'), KEYCAP),
|
||||
0xFE835: (ord('8'), KEYCAP),
|
||||
0xFE836: (ord('9'), KEYCAP),
|
||||
0xFE837: (ord('0'), KEYCAP),
|
||||
}
|
||||
|
||||
def main(argv):
|
||||
"""Modify all the fonts given in the command line."""
|
||||
for font_name in argv[1:]:
|
||||
font = ttLib.TTFont(font_name)
|
||||
|
||||
assert 'GSUB' not in font
|
||||
font['GSUB'] = create_simple_gsub([
|
||||
create_lookup(EMOJI_KEYCAPS, font),
|
||||
create_lookup(EMOJI_FLAGS, font)])
|
||||
|
||||
font_data.delete_from_cmap(
|
||||
font, EMOJI_FLAGS.keys() + EMOJI_KEYCAPS.keys())
|
||||
|
||||
font.save(font_name+'-fixed')
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv)
|
405
add_glyphs.py
|
@ -1,405 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""Extend a ttx file with additional data.
|
||||
|
||||
Takes a ttx file and one or more directories containing image files named
|
||||
after sequences of codepoints, extends the cmap, hmtx, GSUB, and GlyphOrder
|
||||
tables in the source ttx file based on these sequences, and writes out a new
|
||||
ttx file.
|
||||
|
||||
This can also apply aliases from an alias file."""
|
||||
|
||||
import argparse
|
||||
import collections
|
||||
import os
|
||||
from os import path
|
||||
import re
|
||||
import sys
|
||||
|
||||
from fontTools import ttx
|
||||
from fontTools.ttLib.tables import otTables
|
||||
|
||||
import add_emoji_gsub
|
||||
import add_aliases
|
||||
|
||||
sys.path.append(
|
||||
path.join(os.path.dirname(__file__), 'third_party', 'color_emoji'))
|
||||
from png import PNG
|
||||
|
||||
|
||||
def get_seq_to_file(image_dir, prefix, suffix):
|
||||
"""Return a mapping from codepoint sequences to files in the given directory,
|
||||
for files that match the prefix and suffix. File names with this prefix and
|
||||
suffix should consist of codepoints in hex separated by underscore. 'fe0f'
|
||||
(the codepoint of the emoji presentation variation selector) is stripped from
|
||||
the sequence.
|
||||
"""
|
||||
start = len(prefix)
|
||||
limit = -len(suffix)
|
||||
seq_to_file = {}
|
||||
for name in os.listdir(image_dir):
|
||||
if not (name.startswith(prefix) and name.endswith(suffix)):
|
||||
continue
|
||||
try:
|
||||
cps = [int(s, 16) for s in name[start:limit].split('_')]
|
||||
seq = tuple(cp for cp in cps if cp != 0xfe0f)
|
||||
except:
|
||||
raise Exception('could not parse "%s"' % name)
|
||||
for cp in cps:
|
||||
if not (0 <= cp <= 0x10ffff):
|
||||
raise Exception('bad codepoint(s) in "%s"' % name)
|
||||
if seq in seq_to_file:
|
||||
raise Exception('duplicate sequence for "%s" in %s' % (name, image_dir))
|
||||
seq_to_file[seq] = path.join(image_dir, name)
|
||||
return seq_to_file
|
||||
|
||||
|
||||
def collect_seq_to_file(image_dirs, prefix, suffix):
|
||||
"""Return a sequence to file mapping by calling get_seq_to_file on a list
|
||||
of directories. When sequences for files in later directories match those
|
||||
from earlier directories, the later file replaces the earlier one.
|
||||
"""
|
||||
seq_to_file = {}
|
||||
for image_dir in image_dirs:
|
||||
seq_to_file.update(get_seq_to_file(image_dir, prefix, suffix))
|
||||
return seq_to_file
|
||||
|
||||
|
||||
def remap_values(seq_to_file, map_fn):
|
||||
return {k: map_fn(v) for k, v in seq_to_file.items()}
|
||||
|
||||
|
||||
def get_png_file_to_advance_mapper(lineheight):
|
||||
def map_fn(filename):
|
||||
wid, ht = PNG(filename).get_size()
|
||||
return int(round(float(lineheight) * wid / ht))
|
||||
return map_fn
|
||||
|
||||
|
||||
def cp_name(cp):
|
||||
"""return uniXXXX or uXXXXX(X) as a name for the glyph mapped to this cp."""
|
||||
return '%s%04X' % ('u' if cp > 0xffff else 'uni', cp)
|
||||
|
||||
|
||||
def seq_name(seq):
|
||||
"""Sequences of length one get the cp_name. Others start with 'u' followed by
|
||||
two or more 4-to-6-digit hex strings separated by underscore."""
|
||||
if len(seq) == 1:
|
||||
return cp_name(seq[0])
|
||||
return 'u' + '_'.join('%04X' % cp for cp in seq)
|
||||
|
||||
|
||||
def collect_cps(seqs):
|
||||
cps = set()
|
||||
for seq in seqs:
|
||||
cps.update(seq)
|
||||
return cps
|
||||
|
||||
|
||||
def get_glyphorder_cps_and_truncate(glyphOrder):
|
||||
"""This scans glyphOrder for names that correspond to a single codepoint
|
||||
using the 'u(ni)XXXXXX' syntax. All names that don't match are moved
|
||||
to the front the glyphOrder list in their original order, and the
|
||||
list is truncated. The ones that do match are returned as a set of
|
||||
codepoints."""
|
||||
glyph_name_re = re.compile(r'^u(?:ni)?([0-9a-fA-F]{4,6})$')
|
||||
cps = set()
|
||||
write_ix = 0
|
||||
for ix, name in enumerate(glyphOrder):
|
||||
m = glyph_name_re.match(name)
|
||||
if m:
|
||||
cps.add(int(m.group(1), 16))
|
||||
else:
|
||||
glyphOrder[write_ix] = name
|
||||
write_ix += 1
|
||||
del glyphOrder[write_ix:]
|
||||
return cps
|
||||
|
||||
|
||||
def get_all_seqs(font, seq_to_advance):
|
||||
"""Copies the sequences from seq_to_advance and extends it with single-
|
||||
codepoint sequences from the GlyphOrder table as well as those internal
|
||||
to sequences in seq_to_advance. Reduces the GlyphOrder table. """
|
||||
|
||||
all_seqs = set(seq_to_advance.keys())
|
||||
# using collect_cps includes cps internal to a seq
|
||||
cps = collect_cps(all_seqs)
|
||||
glyphOrder = font.getGlyphOrder()
|
||||
# extract cps in glyphOrder and reduce glyphOrder to only those that remain
|
||||
glyphOrder_cps = get_glyphorder_cps_and_truncate(glyphOrder)
|
||||
cps.update(glyphOrder_cps)
|
||||
# add new single codepoint sequences from glyphOrder and sequences
|
||||
all_seqs.update((cp,) for cp in cps)
|
||||
return all_seqs
|
||||
|
||||
|
||||
def get_font_cmap(font):
|
||||
"""Return the first cmap in the font, we assume it exists and is a unicode
|
||||
cmap."""
|
||||
return font['cmap'].tables[0].cmap
|
||||
|
||||
|
||||
def add_glyph_data(font, seqs, seq_to_advance, vadvance):
|
||||
"""Add hmtx and GlyphOrder data for all sequences in seqs, and ensures there's
|
||||
a cmap entry for each single-codepoint sequence. Seqs not in seq_to_advance
|
||||
will get a zero advance."""
|
||||
|
||||
# We allow the template cmap to omit mappings for single-codepoint glyphs
|
||||
# defined in the template's GlyphOrder table. Similarly, the hmtx table can
|
||||
# omit advances. We assume glyphs named 'uniXXXX' or 'uXXXXX(X)' in the
|
||||
# GlyphOrder table correspond to codepoints based on the name; we don't
|
||||
# attempt to handle other types of names and these must occur in the cmap and
|
||||
# hmtx tables in the template.
|
||||
#
|
||||
# seq_to_advance maps sequences (including single codepoints) to advances.
|
||||
# All codepoints in these sequences will be added to the cmap. Some cps
|
||||
# in these sequences have no corresponding single-codepoint sequence, they
|
||||
# will also get added.
|
||||
#
|
||||
# The added codepoints have no advance information, so will get a zero
|
||||
# advance.
|
||||
|
||||
cmap = get_font_cmap(font)
|
||||
hmtx = font['hmtx'].metrics
|
||||
vmtx = font['vmtx'].metrics
|
||||
|
||||
# We don't expect sequences to be in the glyphOrder, since we removed all the
|
||||
# single-cp sequences from it and don't expect it to already contain names
|
||||
# corresponding to multiple-cp sequencess. But just in case, we use
|
||||
# reverseGlyphMap to avoid duplicating names accidentally.
|
||||
|
||||
updatedGlyphOrder = False
|
||||
reverseGlyphMap = font.getReverseGlyphMap()
|
||||
|
||||
# Order the glyphs by grouping all the single-codepoint sequences first,
|
||||
# then order by sequence so that related sequences are together. We group
|
||||
# by single-codepoint sequence first in order to keep these glyphs together--
|
||||
# they're used in the coverage tables for some of the substitutions, and
|
||||
# those tables can be more compact this way.
|
||||
for seq in sorted(seqs, key=lambda s: (0 if len(s) == 1 else 1, s)):
|
||||
name = seq_name(seq)
|
||||
if len(seq) == 1:
|
||||
cmap[seq[0]] = name
|
||||
advance = seq_to_advance.get(seq, 0)
|
||||
hmtx[name] = [advance, 0]
|
||||
vmtx[name] = [vadvance, 0]
|
||||
if name not in reverseGlyphMap:
|
||||
font.glyphOrder.append(name)
|
||||
updatedGlyphOrder=True
|
||||
|
||||
if updatedGlyphOrder:
|
||||
delattr(font, '_reverseGlyphOrderDict')
|
||||
|
||||
|
||||
def add_aliases_to_cmap(font, aliases):
|
||||
"""Some aliases might map a single codepoint to some other sequence. These
|
||||
should map directly to the glyph for that sequence in the cmap. (Others will
|
||||
map via GSUB).
|
||||
"""
|
||||
if not aliases:
|
||||
return
|
||||
|
||||
cp_aliases = [seq for seq in aliases if len(seq) == 1]
|
||||
if not cp_aliases:
|
||||
return
|
||||
|
||||
cmap = get_font_cmap(font)
|
||||
for src_seq in cp_aliases:
|
||||
cp = src_seq[0]
|
||||
name = seq_name(aliases[src_seq])
|
||||
cmap[cp] = name
|
||||
|
||||
|
||||
def get_rtl_seq(seq):
|
||||
"""Return the rtl variant of the sequence, if it has one, else the empty
|
||||
sequence.
|
||||
"""
|
||||
# Sequences with ZWJ or TAG_END in them will reflect. Fitzpatrick modifiers
|
||||
# however do not, so if we reflect we make a pass to swap them back into their
|
||||
# logical order.
|
||||
|
||||
ZWJ = 0x200d
|
||||
TAG_END = 0xe007f
|
||||
def is_fitzpatrick(cp):
|
||||
return 0x1f3fb <= cp <= 0x1f3ff
|
||||
|
||||
if not (ZWJ in seq or TAG_END in seq):
|
||||
return ()
|
||||
|
||||
rev_seq = list(seq)
|
||||
rev_seq.reverse()
|
||||
for i in range(len(rev_seq)-1, 0, -1):
|
||||
if is_fitzpatrick(rev_seq[i-1]):
|
||||
rev_seq[i-1], rev_seq[i] = rev_seq[i], rev_seq[i-1]
|
||||
return tuple(rev_seq)
|
||||
|
||||
|
||||
def get_gsub_ligature_lookup(font):
|
||||
"""If the font does not have a GSUB table, create one with a ligature
|
||||
substitution lookup. If it does, ensure the first lookup is a properly
|
||||
initialized ligature substitution lookup. Return the lookup."""
|
||||
|
||||
# The template might include more lookups after lookup 0, if it has a
|
||||
# GSUB table.
|
||||
if 'GSUB' not in font:
|
||||
ligature_subst = otTables.LigatureSubst()
|
||||
ligature_subst.ligatures = {}
|
||||
|
||||
lookup = otTables.Lookup()
|
||||
lookup.LookupType = 4
|
||||
lookup.LookupFlag = 0
|
||||
lookup.SubTableCount = 1
|
||||
lookup.SubTable = [ligature_subst]
|
||||
|
||||
font['GSUB'] = add_emoji_gsub.create_simple_gsub([lookup])
|
||||
else:
|
||||
lookup = font['GSUB'].table.LookupList.Lookup[0]
|
||||
assert lookup.LookupFlag == 0
|
||||
|
||||
# importXML doesn't fully init GSUB structures, so help it out
|
||||
st = lookup.SubTable[0]
|
||||
if not hasattr(lookup, 'LookupType'):
|
||||
assert st.LookupType == 4
|
||||
setattr(lookup, 'LookupType', 4)
|
||||
|
||||
if not hasattr(st, 'ligatures'):
|
||||
setattr(st, 'ligatures', {})
|
||||
|
||||
return lookup
|
||||
|
||||
|
||||
def add_ligature_sequences(font, seqs, aliases):
|
||||
"""Add ligature sequences."""
|
||||
|
||||
seq_to_target_name = {
|
||||
seq: seq_name(seq) for seq in seqs if len(seq) > 1}
|
||||
if aliases:
|
||||
seq_to_target_name.update({
|
||||
seq: seq_name(aliases[seq]) for seq in aliases if len(seq) > 1})
|
||||
if not seq_to_target_name:
|
||||
return
|
||||
|
||||
rtl_seq_to_target_name = {
|
||||
get_rtl_seq(seq): name for seq, name in seq_to_target_name.items()}
|
||||
seq_to_target_name.update(rtl_seq_to_target_name)
|
||||
# sequences that don't have rtl variants get mapped to the empty sequence,
|
||||
# delete it.
|
||||
if () in seq_to_target_name:
|
||||
del seq_to_target_name[()]
|
||||
|
||||
# organize by first codepoint in sequence
|
||||
keyed_ligatures = collections.defaultdict(list)
|
||||
for t in seq_to_target_name.items():
|
||||
first_cp = t[0][0]
|
||||
keyed_ligatures[first_cp].append(t)
|
||||
|
||||
def add_ligature(lookup, cmap, seq, name):
|
||||
# The sequences consist of codepoints, but the entries in the ligature table
|
||||
# are glyph names. Aliasing can give single codepoints names based on
|
||||
# sequences (e.g. 'guardsman' with 'male guardsman') so we map the
|
||||
# codepoints through the cmap to get the glyph names.
|
||||
glyph_names = [cmap[cp] for cp in seq]
|
||||
|
||||
lig = otTables.Ligature()
|
||||
lig.CompCount = len(seq)
|
||||
lig.Component = glyph_names[1:]
|
||||
lig.LigGlyph = name
|
||||
|
||||
ligatures = lookup.SubTable[0].ligatures
|
||||
first_name = glyph_names[0]
|
||||
try:
|
||||
ligatures[first_name].append(lig)
|
||||
except KeyError:
|
||||
ligatures[first_name] = [lig]
|
||||
|
||||
lookup = get_gsub_ligature_lookup(font)
|
||||
cmap = get_font_cmap(font)
|
||||
for first_cp in sorted(keyed_ligatures):
|
||||
pairs = keyed_ligatures[first_cp]
|
||||
|
||||
# Sort longest first, this ensures longer sequences with common prefixes
|
||||
# are handled before shorter ones. The secondary sort is a standard
|
||||
# sort on the codepoints in the sequence.
|
||||
pairs.sort(key = lambda pair: (-len(pair[0]), pair[0]))
|
||||
for seq, name in pairs:
|
||||
add_ligature(lookup, cmap, seq, name)
|
||||
|
||||
|
||||
def update_font_data(font, seq_to_advance, vadvance, aliases):
|
||||
"""Update the font's cmap, hmtx, GSUB, and GlyphOrder tables."""
|
||||
seqs = get_all_seqs(font, seq_to_advance)
|
||||
add_glyph_data(font, seqs, seq_to_advance, vadvance)
|
||||
add_aliases_to_cmap(font, aliases)
|
||||
add_ligature_sequences(font, seqs, aliases)
|
||||
|
||||
|
||||
def apply_aliases(seq_dict, aliases):
|
||||
"""Aliases is a mapping from sequence to replacement sequence. We can use
|
||||
an alias if the target is a key in the dictionary. Furthermore, if the
|
||||
source is a key in the dictionary, we can delete it. This updates the
|
||||
dictionary and returns the usable aliases."""
|
||||
usable_aliases = {}
|
||||
for k, v in aliases.items():
|
||||
if v in seq_dict:
|
||||
usable_aliases[k] = v
|
||||
if k in seq_dict:
|
||||
del seq_dict[k]
|
||||
return usable_aliases
|
||||
|
||||
|
||||
def update_ttx(in_file, out_file, image_dirs, prefix, ext, aliases_file):
|
||||
if ext != '.png':
|
||||
raise Exception('extension "%s" not supported' % ext)
|
||||
|
||||
seq_to_file = collect_seq_to_file(image_dirs, prefix, ext)
|
||||
if not seq_to_file:
|
||||
raise ValueError(
|
||||
'no sequences with prefix "%s" and extension "%s" in %s' % (
|
||||
prefix, ext, ', '.join(image_dirs)))
|
||||
|
||||
aliases = None
|
||||
if aliases_file:
|
||||
aliases = add_aliases.read_emoji_aliases(aliases_file)
|
||||
aliases = apply_aliases(seq_to_file, aliases)
|
||||
|
||||
font = ttx.TTFont()
|
||||
font.importXML(in_file)
|
||||
|
||||
lineheight = font['hhea'].ascent - font['hhea'].descent
|
||||
map_fn = get_png_file_to_advance_mapper(lineheight)
|
||||
seq_to_advance = remap_values(seq_to_file, map_fn)
|
||||
|
||||
vadvance = font['vhea'].advanceHeightMax if 'vhea' in font else lineheight
|
||||
|
||||
update_font_data(font, seq_to_advance, vadvance, aliases)
|
||||
|
||||
font.saveXML(out_file)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'-f', '--in_file', help='ttx input file', metavar='file', required=True)
|
||||
parser.add_argument(
|
||||
'-o', '--out_file', help='ttx output file', metavar='file', required=True)
|
||||
parser.add_argument(
|
||||
'-d', '--image_dirs', help='directories containing image files',
|
||||
nargs='+', metavar='dir', required=True)
|
||||
parser.add_argument(
|
||||
'-p', '--prefix', help='file prefix (default "emoji_u")',
|
||||
metavar='pfx', default='emoji_u')
|
||||
parser.add_argument(
|
||||
'-e', '--ext', help='file extension (default ".png", currently only '
|
||||
'".png" is supported', metavar='ext', default='.png')
|
||||
parser.add_argument(
|
||||
'-a', '--aliases', help='process alias table', const='emoji_aliases.txt',
|
||||
nargs='?', metavar='file')
|
||||
args = parser.parse_args()
|
||||
|
||||
update_ttx(
|
||||
args.in_file, args.out_file, args.image_dirs, args.prefix, args.ext,
|
||||
args.aliases)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,295 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
# Copyright 2015 Google, Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Google Author(s): Doug Felt
|
||||
|
||||
"""Tool to update GSUB, hmtx, cmap, glyf tables with svg image glyphs."""
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import glob
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
from fontTools.ttLib.tables import otTables
|
||||
from fontTools.ttLib.tables import _g_l_y_f
|
||||
from fontTools.ttLib.tables import S_V_G_ as SVG
|
||||
from fontTools import ttx
|
||||
|
||||
from nototools import tool_utils
|
||||
|
||||
import add_emoji_gsub
|
||||
import svg_builder
|
||||
|
||||
|
||||
class FontBuilder(object):
|
||||
"""A utility for mutating a ttx font. This maintains glyph_order, cmap, and
|
||||
hmtx tables, and optionally GSUB, glyf, and SVN tables as well."""
|
||||
|
||||
def __init__(self, font):
|
||||
self.font = font;
|
||||
self.glyph_order = font.getGlyphOrder()
|
||||
self.cmap = font['cmap'].tables[0].cmap
|
||||
self.hmtx = font['hmtx'].metrics
|
||||
|
||||
def init_gsub(self):
|
||||
"""Call this if you are going to add ligatures to the font. Creates a GSUB
|
||||
table if there isn't one already."""
|
||||
|
||||
if hasattr(self, 'ligatures'):
|
||||
return
|
||||
font = self.font
|
||||
if 'GSUB' not in font:
|
||||
ligature_subst = otTables.LigatureSubst()
|
||||
ligature_subst.ligatures = {}
|
||||
|
||||
lookup = otTables.Lookup()
|
||||
lookup.LookupType = 4
|
||||
lookup.LookupFlag = 0
|
||||
lookup.SubTableCount = 1
|
||||
lookup.SubTable = [ligature_subst]
|
||||
|
||||
font['GSUB'] = add_emoji_gsub.create_simple_gsub([lookup])
|
||||
else:
|
||||
lookup = font['GSUB'].table.LookupList.Lookup[0]
|
||||
assert lookup.LookupType == 4
|
||||
assert lookup.LookupFlag == 0
|
||||
self.ligatures = lookup.SubTable[0].ligatures
|
||||
|
||||
def init_glyf(self):
|
||||
"""Call this if you need to create empty glyf entries in the font when you
|
||||
add a new glyph."""
|
||||
|
||||
if hasattr(self, 'glyphs'):
|
||||
return
|
||||
font = self.font
|
||||
if 'glyf' not in font:
|
||||
glyf_table = _g_l_y_f.table__g_l_y_f()
|
||||
glyf_table.glyphs = {}
|
||||
glyf_table.glyphOrder = self.glyph_order
|
||||
font['glyf'] = glyf_table
|
||||
self.glyphs = font['glyf'].glyphs
|
||||
|
||||
def init_svg(self):
|
||||
"""Call this if you expect to add SVG images in the font. This calls
|
||||
init_glyf since SVG support currently requires fallback glyf records for
|
||||
each SVG image."""
|
||||
|
||||
if hasattr(self, 'svgs'):
|
||||
return
|
||||
|
||||
# svg requires glyf
|
||||
self.init_glyf()
|
||||
|
||||
font = self.font
|
||||
if 'SVG ' not in font:
|
||||
svg_table = SVG.table_S_V_G_()
|
||||
svg_table.docList = []
|
||||
svg_table.colorPalettes = None
|
||||
font['SVG '] = svg_table
|
||||
self.svgs = font['SVG '].docList
|
||||
|
||||
def glyph_name(self, string):
|
||||
return "_".join(["u%04X" % ord(char) for char in string])
|
||||
|
||||
def glyph_name_to_index(self, name):
|
||||
return self.glyph_order.index(name) if name in self.glyph_order else -1;
|
||||
|
||||
def glyph_index_to_name(self, glyph_index):
|
||||
if glyph_index < len(self.glyph_order):
|
||||
return self.glyph_order[glyph_index]
|
||||
return ''
|
||||
|
||||
def have_glyph(self, name):
|
||||
return self.name_to_glyph_index >= 0
|
||||
|
||||
def _add_ligature(self, glyphstr):
|
||||
lig = otTables.Ligature()
|
||||
lig.CompCount = len(glyphstr)
|
||||
lig.Component = [self.glyph_name(ch) for ch in glyphstr[1:]]
|
||||
lig.LigGlyph = self.glyph_name(glyphstr)
|
||||
|
||||
first = self.glyph_name(glyphstr[0])
|
||||
try:
|
||||
self.ligatures[first].append(lig)
|
||||
except KeyError:
|
||||
self.ligatures[first] = [lig]
|
||||
|
||||
def _add_empty_glyph(self, glyphstr, name):
|
||||
"""Create an empty glyph. If glyphstr is not a ligature, add a cmap entry
|
||||
for it."""
|
||||
if len(glyphstr) == 1:
|
||||
self.cmap[ord(glyphstr)] = name
|
||||
self.hmtx[name] = [0, 0]
|
||||
self.glyph_order.append(name)
|
||||
if hasattr(self, 'glyphs'):
|
||||
self.glyphs[name] = _g_l_y_f.Glyph()
|
||||
|
||||
def add_components_and_ligature(self, glyphstr):
|
||||
"""Convert glyphstr to a name and check if it already exists. If not, check
|
||||
if it is a ligature (longer than one codepoint), and if it is, generate
|
||||
empty glyphs with cmap entries for any missing ligature components and add a
|
||||
ligature record. Then generate an empty glyph for the name. Return a tuple
|
||||
with the name, index, and a bool indicating whether the glyph already
|
||||
existed."""
|
||||
|
||||
name = self.glyph_name(glyphstr)
|
||||
index = self.glyph_name_to_index(name)
|
||||
exists = index >= 0
|
||||
if not exists:
|
||||
if len(glyphstr) > 1:
|
||||
for char in glyphstr:
|
||||
if ord(char) not in self.cmap:
|
||||
char_name = self.glyph_name(char)
|
||||
self._add_empty_glyph(char, char_name)
|
||||
self._add_ligature(glyphstr)
|
||||
index = len(self.glyph_order)
|
||||
self._add_empty_glyph(glyphstr, name)
|
||||
return name, index, exists
|
||||
|
||||
def add_svg(self, doc, hmetrics, name, index):
|
||||
"""Add an svg table entry. If hmetrics is not None, update the hmtx table.
|
||||
This expects the glyph has already been added."""
|
||||
# sanity check to make sure name and index correspond.
|
||||
assert name == self.glyph_index_to_name(index)
|
||||
if hmetrics:
|
||||
self.hmtx[name] = hmetrics
|
||||
svg_record = (doc, index, index) # startGlyphId, endGlyphId are the same
|
||||
self.svgs.append(svg_record)
|
||||
|
||||
|
||||
def collect_glyphstr_file_pairs(prefix, ext, include=None, exclude=None, verbosity=1):
|
||||
"""Scan files with the given prefix and extension, and return a list of
|
||||
(glyphstr, filename) where glyphstr is the character or ligature, and filename
|
||||
is the image file associated with it. The glyphstr is formed by decoding the
|
||||
filename (exclusive of the prefix) as a sequence of hex codepoints separated
|
||||
by underscore. Include, if defined, is a regex string to include only matched
|
||||
filenames. Exclude, if defined, is a regex string to exclude matched
|
||||
filenames, and is applied after include."""
|
||||
|
||||
image_files = {}
|
||||
glob_pat = "%s*.%s" % (prefix, ext)
|
||||
leading = len(prefix)
|
||||
trailing = len(ext) + 1 # include dot
|
||||
logging.info("Looking for images matching '%s'.", glob_pat)
|
||||
ex_count = 0
|
||||
ex = re.compile(exclude) if exclude else None
|
||||
inc = re.compile(include) if include else None
|
||||
if inc:
|
||||
logging.info("Including images matching '%s'.", include)
|
||||
if ex:
|
||||
logging.info("Excluding images matching '%s'.", exclude)
|
||||
|
||||
for image_file in glob.glob(glob_pat):
|
||||
if inc and not inc.search(image_file):
|
||||
continue
|
||||
|
||||
if ex and ex.search(image_file):
|
||||
if verbosity > 1:
|
||||
print("Exclude %s" % image_file)
|
||||
ex_count += 1
|
||||
continue
|
||||
|
||||
codes = image_file[leading:-trailing]
|
||||
if "_" in codes:
|
||||
pieces = codes.split ("_")
|
||||
u = "".join ([unichr(int(code, 16)) for code in pieces])
|
||||
else:
|
||||
u = unichr(int(codes, 16))
|
||||
image_files[u] = image_file
|
||||
|
||||
if ex_count:
|
||||
logging.info("Excluded %d files.", ex_count)
|
||||
if not image_files:
|
||||
raise Exception ("No image files matching '%s'.", glob_pat)
|
||||
logging.info("Matched %s files.", len(image_files))
|
||||
return image_files.items()
|
||||
|
||||
|
||||
def sort_glyphstr_tuples(glyphstr_tuples):
|
||||
"""The list contains tuples whose first element is a string representing a
|
||||
character or ligature. It is sorted with shorter glyphstrs first, then
|
||||
alphabetically. This ensures that ligature components are added to the font
|
||||
before any ligatures that contain them."""
|
||||
glyphstr_tuples.sort(key=lambda t: (len(t[0]), t[0]))
|
||||
|
||||
|
||||
def add_image_glyphs(in_file, out_file, pairs):
|
||||
"""Add images from pairs (glyphstr, filename) to .ttx file in_file and write
|
||||
to .ttx file out_file."""
|
||||
|
||||
font = ttx.TTFont()
|
||||
font.importXML(in_file)
|
||||
|
||||
sort_glyphstr_tuples(pairs)
|
||||
|
||||
font_builder = FontBuilder(font)
|
||||
# we've already sorted by length, so the longest glyphstrs are at the end. To
|
||||
# see if we have ligatures, we just need to check the last one.
|
||||
if len(pairs[-1][0]) > 1:
|
||||
font_builder.init_gsub()
|
||||
|
||||
img_builder = svg_builder.SvgBuilder(font_builder)
|
||||
for glyphstr, filename in pairs:
|
||||
logging.debug("Adding glyph for U+%s", ",".join(
|
||||
["%04X" % ord(char) for char in glyphstr]))
|
||||
img_builder.add_from_filename(glyphstr, filename)
|
||||
|
||||
font.saveXML(out_file)
|
||||
logging.info("Added %s images to %s", len(pairs), out_file)
|
||||
|
||||
|
||||
def main(argv):
|
||||
usage = """This will search for files that have image_prefix followed by one
|
||||
or more hex numbers (separated by underscore if more than one), and end in
|
||||
".svg". For example, if image_prefix is "icons/u", then files with names like
|
||||
"icons/u1F4A9.svg" or "icons/u1F1EF_1F1F5.svg" will be loaded. The script
|
||||
then adds cmap, htmx, and potentially GSUB entries for the Unicode characters
|
||||
found. The advance width will be chosen based on image aspect ratio. If
|
||||
Unicode values outside the BMP are desired, the existing cmap table should be
|
||||
of the appropriate (format 12) type. Only the first cmap table and the first
|
||||
GSUB lookup (if existing) are modified."""
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Update cmap, glyf, GSUB, and hmtx tables from image glyphs.',
|
||||
epilog=usage)
|
||||
parser.add_argument(
|
||||
'in_file', help='Input ttx file name.', metavar='fname')
|
||||
parser.add_argument(
|
||||
'out_file', help='Output ttx file name.', metavar='fname')
|
||||
parser.add_argument(
|
||||
'image_prefix', help='Location and prefix of image files.',
|
||||
metavar='path')
|
||||
parser.add_argument(
|
||||
'-i', '--include', help='include files whoses name matches this regex',
|
||||
metavar='regex')
|
||||
parser.add_argument(
|
||||
'-e', '--exclude', help='exclude files whose name matches this regex',
|
||||
metavar='regex')
|
||||
parser.add_argument(
|
||||
'-l', '--loglevel', help='log level name', default='warning')
|
||||
args = parser.parse_args(argv)
|
||||
|
||||
tool_utils.setup_logging(args.loglevel)
|
||||
|
||||
pairs = collect_glyphstr_file_pairs(
|
||||
args.image_prefix, 'svg', include=args.include, exclude=args.exclude)
|
||||
add_image_glyphs(args.in_file, args.out_file, pairs)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv[1:])
|
|
@ -1,463 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright 2016 Google Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Compare emoji image file namings against unicode property data."""
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import collections
|
||||
import glob
|
||||
import os
|
||||
from os import path
|
||||
import re
|
||||
import sys
|
||||
|
||||
from nototools import unicode_data
|
||||
import add_aliases
|
||||
|
||||
ZWJ = 0x200d
|
||||
EMOJI_VS = 0xfe0f
|
||||
|
||||
END_TAG = 0xe007f
|
||||
|
||||
def _make_tag_set():
|
||||
tag_set = set()
|
||||
tag_set |= set(range(0xe0030, 0xe003a)) # 0-9
|
||||
tag_set |= set(range(0xe0061, 0xe007b)) # a-z
|
||||
tag_set.add(END_TAG)
|
||||
return tag_set
|
||||
|
||||
TAG_SET = _make_tag_set()
|
||||
|
||||
_namedata = None
|
||||
|
||||
def seq_name(seq):
|
||||
global _namedata
|
||||
|
||||
if not _namedata:
|
||||
def strip_vs_map(seq_map):
|
||||
return {
|
||||
unicode_data.strip_emoji_vs(k): v
|
||||
for k, v in seq_map.iteritems()}
|
||||
_namedata = [
|
||||
strip_vs_map(unicode_data.get_emoji_combining_sequences()),
|
||||
strip_vs_map(unicode_data.get_emoji_flag_sequences()),
|
||||
strip_vs_map(unicode_data.get_emoji_modifier_sequences()),
|
||||
strip_vs_map(unicode_data.get_emoji_zwj_sequences()),
|
||||
]
|
||||
|
||||
if len(seq) == 1:
|
||||
return unicode_data.name(seq[0], None)
|
||||
|
||||
for data in _namedata:
|
||||
if seq in data:
|
||||
return data[seq]
|
||||
if EMOJI_VS in seq:
|
||||
non_vs_seq = unicode_data.strip_emoji_vs(seq)
|
||||
for data in _namedata:
|
||||
if non_vs_seq in data:
|
||||
return data[non_vs_seq]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _check_no_vs(sorted_seq_to_filepath):
|
||||
"""Our image data does not use emoji presentation variation selectors."""
|
||||
for seq, fp in sorted_seq_to_filepath.iteritems():
|
||||
if EMOJI_VS in seq:
|
||||
print('check no VS: FE0F in path: %s' % fp)
|
||||
|
||||
|
||||
def _check_valid_emoji_cps(sorted_seq_to_filepath, unicode_version):
|
||||
"""Ensure all cps in these sequences are valid emoji cps or specific cps
|
||||
used in forming emoji sequences. This is a 'pre-check' that reports
|
||||
this specific problem."""
|
||||
|
||||
valid_cps = set(unicode_data.get_emoji())
|
||||
if unicode_version is None or unicode_version >= unicode_data.PROPOSED_EMOJI_AGE:
|
||||
valid_cps |= unicode_data.proposed_emoji_cps()
|
||||
else:
|
||||
valid_cps = set(
|
||||
cp for cp in valid_cps if unicode_data.age(cp) <= unicode_version)
|
||||
valid_cps.add(0x200d) # ZWJ
|
||||
valid_cps.add(0x20e3) # combining enclosing keycap
|
||||
valid_cps.add(0xfe0f) # variation selector (emoji presentation)
|
||||
valid_cps.add(0xfe82b) # PUA value for unknown flag
|
||||
valid_cps |= TAG_SET # used in subregion tag sequences
|
||||
|
||||
not_emoji = {}
|
||||
for seq, fp in sorted_seq_to_filepath.iteritems():
|
||||
for cp in seq:
|
||||
if cp not in valid_cps:
|
||||
if cp not in not_emoji:
|
||||
not_emoji[cp] = []
|
||||
not_emoji[cp].append(fp)
|
||||
|
||||
if len(not_emoji):
|
||||
print(
|
||||
'check valid emoji cps: %d non-emoji cp found' % len(not_emoji),
|
||||
file=sys.stderr)
|
||||
for cp in sorted(not_emoji):
|
||||
fps = not_emoji[cp]
|
||||
print(
|
||||
'check valid emoji cps: %04x (in %d sequences)' % (cp, len(fps)),
|
||||
file=sys.stderr)
|
||||
|
||||
|
||||
def _check_zwj(sorted_seq_to_filepath):
|
||||
"""Ensure zwj is only between two appropriate emoji. This is a 'pre-check'
|
||||
that reports this specific problem."""
|
||||
|
||||
for seq, fp in sorted_seq_to_filepath.iteritems():
|
||||
if ZWJ not in seq:
|
||||
continue
|
||||
if seq[0] == ZWJ:
|
||||
print('check zwj: zwj at head of sequence in %s' % fp, file=sys.stderr)
|
||||
if len(seq) == 1:
|
||||
continue
|
||||
if seq[-1] == ZWJ:
|
||||
print('check zwj: zwj at end of sequence in %s' % fp, file=sys.stderr)
|
||||
for i, cp in enumerate(seq):
|
||||
if cp == ZWJ:
|
||||
if i > 0:
|
||||
pcp = seq[i-1]
|
||||
if pcp != EMOJI_VS and not unicode_data.is_emoji(pcp):
|
||||
print(
|
||||
'check zwj: non-emoji %04x preceeds ZWJ in %s' % (pcp, fp),
|
||||
file=sys.stderr)
|
||||
if i < len(seq) - 1:
|
||||
fcp = seq[i+1]
|
||||
if not unicode_data.is_emoji(fcp):
|
||||
print(
|
||||
'check zwj: non-emoji %04x follows ZWJ in %s' % (fcp, fp),
|
||||
file=sys.stderr)
|
||||
|
||||
|
||||
def _check_flags(sorted_seq_to_filepath):
|
||||
"""Ensure regional indicators are only in sequences of one or two, and
|
||||
never mixed."""
|
||||
for seq, fp in sorted_seq_to_filepath.iteritems():
|
||||
have_reg = None
|
||||
for cp in seq:
|
||||
is_reg = unicode_data.is_regional_indicator(cp)
|
||||
if have_reg == None:
|
||||
have_reg = is_reg
|
||||
elif have_reg != is_reg:
|
||||
print(
|
||||
'check flags: mix of regional and non-regional in %s' % fp,
|
||||
file=sys.stderr)
|
||||
if have_reg and len(seq) > 2:
|
||||
# We provide dummy glyphs for regional indicators, so there are sequences
|
||||
# with single regional indicator symbols, the len check handles this.
|
||||
print(
|
||||
'check flags: regional indicator sequence length != 2 in %s' % fp,
|
||||
file=sys.stderr)
|
||||
|
||||
def _check_tags(sorted_seq_to_filepath):
|
||||
"""Ensure tag sequences (for subregion flags) conform to the spec. We don't
|
||||
validate against CLDR, just that there's a sequence of 2 or more tags starting
|
||||
and ending with the appropriate codepoints."""
|
||||
|
||||
BLACK_FLAG = 0x1f3f4
|
||||
BLACK_FLAG_SET = set([BLACK_FLAG])
|
||||
for seq, fp in sorted_seq_to_filepath.iteritems():
|
||||
seq_set = set(cp for cp in seq)
|
||||
overlap_set = seq_set & TAG_SET
|
||||
if not overlap_set:
|
||||
continue
|
||||
if seq[0] != BLACK_FLAG:
|
||||
print('check tags: bad start tag in %s' % fp)
|
||||
elif seq[-1] != END_TAG:
|
||||
print('check tags: bad end tag in %s' % fp)
|
||||
elif len(seq) < 4:
|
||||
print('check tags: sequence too short in %s' % fp)
|
||||
elif seq_set - TAG_SET != BLACK_FLAG_SET:
|
||||
print('check tags: non-tag items in %s' % fp)
|
||||
|
||||
|
||||
def _check_skintone(sorted_seq_to_filepath):
|
||||
"""Ensure skin tone modifiers are not applied to emoji that are not defined
|
||||
to take them. May appear standalone, though. Also check that emoji that take
|
||||
skin tone modifiers have a complete set."""
|
||||
base_to_modifiers = collections.defaultdict(set)
|
||||
for seq, fp in sorted_seq_to_filepath.iteritems():
|
||||
for i, cp in enumerate(seq):
|
||||
if unicode_data.is_skintone_modifier(cp):
|
||||
if i == 0:
|
||||
if len(seq) > 1:
|
||||
print(
|
||||
'check skintone: skin color selector first in sequence %s' % fp,
|
||||
file=sys.stderr)
|
||||
# standalone are ok
|
||||
continue
|
||||
pcp = seq[i-1]
|
||||
if not unicode_data.is_emoji_modifier_base(pcp):
|
||||
print(
|
||||
'check skintone: emoji skintone modifier applied to non-base ' +
|
||||
'at %d: %s' % (i, fp), file=sys.stderr)
|
||||
else:
|
||||
if pcp not in base_to_modifiers:
|
||||
base_to_modifiers[pcp] = set()
|
||||
base_to_modifiers[pcp].add(cp)
|
||||
|
||||
for cp, modifiers in sorted(base_to_modifiers.iteritems()):
|
||||
if len(modifiers) != 5:
|
||||
print(
|
||||
'check skintone: base %04x has %d modifiers defined (%s) in %s' % (
|
||||
cp, len(modifiers),
|
||||
', '.join('%04x' % cp for cp in sorted(modifiers)), fp),
|
||||
file=sys.stderr)
|
||||
|
||||
|
||||
def _check_zwj_sequences(sorted_seq_to_filepath, unicode_version):
|
||||
"""Verify that zwj sequences are valid for the given unicode version."""
|
||||
for seq, fp in sorted_seq_to_filepath.iteritems():
|
||||
if ZWJ not in seq:
|
||||
continue
|
||||
age = unicode_data.get_emoji_sequence_age(seq)
|
||||
if age is None or unicode_version is not None and age > unicode_version:
|
||||
print('check zwj sequences: undefined sequence %s' % fp)
|
||||
|
||||
|
||||
def _check_no_alias_sources(sorted_seq_to_filepath):
|
||||
"""Check that we don't have sequences that we expect to be aliased to
|
||||
some other sequence."""
|
||||
aliases = add_aliases.read_default_emoji_aliases()
|
||||
for seq, fp in sorted_seq_to_filepath.iteritems():
|
||||
if seq in aliases:
|
||||
print('check no alias sources: aliased sequence %s' % fp)
|
||||
|
||||
|
||||
def _check_coverage(seq_to_filepath, unicode_version):
|
||||
"""Ensure we have all and only the cps and sequences that we need for the
|
||||
font as of this version."""
|
||||
|
||||
age = unicode_version
|
||||
|
||||
non_vs_to_canonical = {}
|
||||
for k in seq_to_filepath:
|
||||
if EMOJI_VS in k:
|
||||
non_vs = unicode_data.strip_emoji_vs(k)
|
||||
non_vs_to_canonical[non_vs] = k
|
||||
|
||||
aliases = add_aliases.read_default_emoji_aliases()
|
||||
for k, v in sorted(aliases.items()):
|
||||
if v not in seq_to_filepath and v not in non_vs_to_canonical:
|
||||
alias_str = unicode_data.seq_to_string(k)
|
||||
target_str = unicode_data.seq_to_string(v)
|
||||
print('coverage: alias %s missing target %s' % (alias_str, target_str))
|
||||
continue
|
||||
if k in seq_to_filepath or k in non_vs_to_canonical:
|
||||
alias_str = unicode_data.seq_to_string(k)
|
||||
target_str = unicode_data.seq_to_string(v)
|
||||
print('coverage: alias %s already exists as %s (%s)' % (
|
||||
alias_str, target_str, seq_name(v)))
|
||||
continue
|
||||
filename = seq_to_filepath.get(v) or seq_to_filepath[non_vs_to_canonical[v]]
|
||||
seq_to_filepath[k] = 'alias:' + filename
|
||||
|
||||
# check single emoji, this includes most of the special chars
|
||||
emoji = sorted(unicode_data.get_emoji(age=age))
|
||||
for cp in emoji:
|
||||
if tuple([cp]) not in seq_to_filepath:
|
||||
print(
|
||||
'coverage: missing single %04x (%s)' % (
|
||||
cp, unicode_data.name(cp, '<no name>')))
|
||||
|
||||
# special characters
|
||||
# all but combining enclosing keycap are currently marked as emoji
|
||||
for cp in [ord('*'), ord('#'), ord(u'\u20e3')] + range(0x30, 0x3a):
|
||||
if cp not in emoji and tuple([cp]) not in seq_to_filepath:
|
||||
print('coverage: missing special %04x (%s)' % (cp, unicode_data.name(cp)))
|
||||
|
||||
# combining sequences
|
||||
comb_seq_to_name = sorted(
|
||||
unicode_data.get_emoji_combining_sequences(age=age).iteritems())
|
||||
for seq, name in comb_seq_to_name:
|
||||
if seq not in seq_to_filepath:
|
||||
# strip vs and try again
|
||||
non_vs_seq = unicode_data.strip_emoji_vs(seq)
|
||||
if non_vs_seq not in seq_to_filepath:
|
||||
print('coverage: missing combining sequence %s (%s)' %
|
||||
(unicode_data.seq_to_string(seq), name))
|
||||
|
||||
# flag sequences
|
||||
flag_seq_to_name = sorted(
|
||||
unicode_data.get_emoji_flag_sequences(age=age).iteritems())
|
||||
for seq, name in flag_seq_to_name:
|
||||
if seq not in seq_to_filepath:
|
||||
print('coverage: missing flag sequence %s (%s)' %
|
||||
(unicode_data.seq_to_string(seq), name))
|
||||
|
||||
# skin tone modifier sequences
|
||||
mod_seq_to_name = sorted(
|
||||
unicode_data.get_emoji_modifier_sequences(age=age).iteritems())
|
||||
for seq, name in mod_seq_to_name:
|
||||
if seq not in seq_to_filepath:
|
||||
print('coverage: missing modifier sequence %s (%s)' % (
|
||||
unicode_data.seq_to_string(seq), name))
|
||||
|
||||
# zwj sequences
|
||||
# some of ours include the emoji presentation variation selector and some
|
||||
# don't, and the same is true for the canonical sequences. normalize all
|
||||
# of them to omit it to test coverage, but report the canonical sequence.
|
||||
zwj_seq_without_vs = set()
|
||||
for seq in seq_to_filepath:
|
||||
if ZWJ not in seq:
|
||||
continue
|
||||
if EMOJI_VS in seq:
|
||||
seq = tuple(cp for cp in seq if cp != EMOJI_VS)
|
||||
zwj_seq_without_vs.add(seq)
|
||||
|
||||
for seq, name in sorted(
|
||||
unicode_data.get_emoji_zwj_sequences(age=age).iteritems()):
|
||||
if EMOJI_VS in seq:
|
||||
test_seq = tuple(s for s in seq if s != EMOJI_VS)
|
||||
else:
|
||||
test_seq = seq
|
||||
if test_seq not in zwj_seq_without_vs:
|
||||
print('coverage: missing (canonical) zwj sequence %s (%s)' % (
|
||||
unicode_data.seq_to_string(seq), name))
|
||||
|
||||
# check for 'unknown flag'
|
||||
# this is either emoji_ufe82b or 'unknown_flag', but we filter out things that
|
||||
# don't start with our prefix so 'unknown_flag' would be excluded by default.
|
||||
if tuple([0xfe82b]) not in seq_to_filepath:
|
||||
print('coverage: missing unknown flag PUA fe82b')
|
||||
|
||||
|
||||
def check_sequence_to_filepath(seq_to_filepath, unicode_version, coverage):
|
||||
sorted_seq_to_filepath = collections.OrderedDict(
|
||||
sorted(seq_to_filepath.items()))
|
||||
_check_no_vs(sorted_seq_to_filepath)
|
||||
_check_valid_emoji_cps(sorted_seq_to_filepath, unicode_version)
|
||||
_check_zwj(sorted_seq_to_filepath)
|
||||
_check_flags(sorted_seq_to_filepath)
|
||||
_check_tags(sorted_seq_to_filepath)
|
||||
_check_skintone(sorted_seq_to_filepath)
|
||||
_check_zwj_sequences(sorted_seq_to_filepath, unicode_version)
|
||||
_check_no_alias_sources(sorted_seq_to_filepath)
|
||||
if coverage:
|
||||
_check_coverage(sorted_seq_to_filepath, unicode_version)
|
||||
|
||||
|
||||
def create_sequence_to_filepath(name_to_dirpath, prefix, suffix):
|
||||
"""Check names, and convert name to sequences for names that are ok,
|
||||
returning a sequence to file path mapping. Reports bad segments
|
||||
of a name to stderr."""
|
||||
segment_re = re.compile(r'^[0-9a-f]{4,6}$')
|
||||
result = {}
|
||||
for name, dirname in name_to_dirpath.iteritems():
|
||||
if not name.startswith(prefix):
|
||||
print('expected prefix "%s" for "%s"' % (prefix, name))
|
||||
continue
|
||||
|
||||
segments = name[len(prefix): -len(suffix)].split('_')
|
||||
segfail = False
|
||||
seq = []
|
||||
for s in segments:
|
||||
if not segment_re.match(s):
|
||||
print('bad codepoint name "%s" in %s/%s' % (s, dirname, name))
|
||||
segfail = True
|
||||
continue
|
||||
n = int(s, 16)
|
||||
if n > 0x10ffff:
|
||||
print('codepoint "%s" out of range in %s/%s' % (s, dirname, name))
|
||||
segfail = True
|
||||
continue
|
||||
seq.append(n)
|
||||
if not segfail:
|
||||
result[tuple(seq)] = path.join(dirname, name)
|
||||
return result
|
||||
|
||||
|
||||
def collect_name_to_dirpath(directory, prefix, suffix, exclude=None):
|
||||
"""Return a mapping from filename to path rooted at directory, ignoring files
|
||||
that don't match suffix, and subtrees with names in exclude. Report when a
|
||||
filename appears in more than one subdir; the first path found is kept."""
|
||||
result = {}
|
||||
for dirname, dirs, files in os.walk(directory, topdown=True):
|
||||
if exclude:
|
||||
dirs[:] = [d for d in dirs if d not in exclude]
|
||||
|
||||
if directory != '.':
|
||||
dirname = path.join(directory, dirname)
|
||||
for f in files:
|
||||
if not f.endswith(suffix):
|
||||
continue
|
||||
if f in result:
|
||||
print('duplicate file "%s" in %s and %s ' % (
|
||||
f, dirname, result[f]), file=sys.stderr)
|
||||
continue
|
||||
result[f] = dirname
|
||||
return result
|
||||
|
||||
|
||||
def collect_name_to_dirpath_with_override(dirs, prefix, suffix, exclude=None):
|
||||
"""Return a mapping from filename to a directory path rooted at a directory
|
||||
in dirs, using collect_name_to_filepath. The last directory is retained. This
|
||||
does not report an error if a file appears under more than one root directory,
|
||||
so lets later root directories override earlier ones. Use 'exclude' to
|
||||
name subdirectories (of any root) whose subtree you wish to skip."""
|
||||
result = {}
|
||||
for d in dirs:
|
||||
result.update(collect_name_to_dirpath(d, prefix, suffix, exclude))
|
||||
return result
|
||||
|
||||
|
||||
def run_check(dirs, prefix, suffix, exclude, unicode_version, coverage):
|
||||
msg = ''
|
||||
if unicode_version:
|
||||
msg = ' (%3.1f)' % unicode_version
|
||||
print('Checking files with prefix "%s" and suffix "%s"%s in:\n %s' % (
|
||||
prefix, suffix, msg, '\n '.join(dirs)))
|
||||
name_to_dirpath = collect_name_to_dirpath_with_override(
|
||||
dirs, prefix=prefix, suffix=suffix, exclude=exclude)
|
||||
print('checking %d names' % len(name_to_dirpath))
|
||||
seq_to_filepath = create_sequence_to_filepath(name_to_dirpath, prefix, suffix)
|
||||
print('checking %d sequences' % len(seq_to_filepath))
|
||||
check_sequence_to_filepath(seq_to_filepath, unicode_version, coverage)
|
||||
print('done.')
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'-d', '--dirs', help='directory roots containing emoji images',
|
||||
metavar='dir', nargs='+', required=True)
|
||||
parser.add_argument(
|
||||
'-e', '--exclude', help='names of source subdirs to exclude',
|
||||
metavar='dir', nargs='+')
|
||||
parser.add_argument(
|
||||
'-c', '--coverage', help='test for complete coverage',
|
||||
action='store_true')
|
||||
parser.add_argument(
|
||||
'-p', '--prefix', help='prefix to match, default "emoji_u"',
|
||||
metavar='pfx', default='emoji_u')
|
||||
parser.add_argument(
|
||||
'-s', '--suffix', help='suffix to match, default ".png"', metavar='sfx',
|
||||
default='.png')
|
||||
parser.add_argument(
|
||||
'-u', '--unicode_version', help='limit to this unicode version or before',
|
||||
metavar='version', type=float)
|
||||
args = parser.parse_args()
|
||||
run_check(
|
||||
args.dirs, args.prefix, args.suffix, args.exclude, args.unicode_version,
|
||||
args.coverage)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,150 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
# Copyright 2015 Google, Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Google Author(s): Doug Felt
|
||||
|
||||
"""Tool to collect emoji svg glyphs into one directory for processing
|
||||
by add_svg_glyphs. There are two sources, noto/color_emoji/svg and
|
||||
noto/third_party/region-flags/svg. The add_svg_glyphs file expects
|
||||
the file names to contain the character string that represents it
|
||||
represented as a sequence of hex-encoded codepoints separated by
|
||||
underscore. The files in noto/color_emoji/svg do this, and have the
|
||||
prefix 'emoji_u', but the files in region-flags/svg just have the
|
||||
two-letter code.
|
||||
|
||||
We create a directory and copy the files into it with the required
|
||||
naming convention. First we do this for region-flags/svg, converting
|
||||
the names, and then we do this for color_emoji/svg, so any duplicates
|
||||
will be overwritten by what we assume are the preferred svg. We use
|
||||
copies instead of symlinks so we can continue to optimize or modify
|
||||
the files without messing with the originals."""
|
||||
|
||||
import argparse
|
||||
import glob
|
||||
import logging
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
from nototools import tool_utils
|
||||
|
||||
def _is_svg(f):
|
||||
return f.endswith('.svg')
|
||||
|
||||
|
||||
def _is_svg_and_startswith_emoji(f):
|
||||
return f.endswith('.svg') and f.startswith('emoji_u')
|
||||
|
||||
|
||||
def _flag_rename(f):
|
||||
"""Converts a file name from two-letter upper-case ASCII to our expected
|
||||
'emoji_uXXXXX_XXXXX form, mapping each character to the corresponding
|
||||
regional indicator symbol."""
|
||||
|
||||
cp_strs = []
|
||||
name, ext = os.path.splitext(f)
|
||||
if len(name) != 2:
|
||||
raise ValueError('illegal flag name "%s"' % f)
|
||||
for cp in name:
|
||||
if not ('A' <= cp <= 'Z'):
|
||||
raise ValueError('illegal flag name "%s"' % f)
|
||||
ncp = 0x1f1e6 - 0x41 + ord(cp)
|
||||
cp_strs.append("%04x" % ncp)
|
||||
return 'emoji_u%s%s' % ('_'.join(cp_strs), ext)
|
||||
|
||||
|
||||
def copy_with_rename(src_dir, dst_dir, accept_pred=None, rename=None):
|
||||
"""Copy files from src_dir to dst_dir that match accept_pred (all if None) and
|
||||
rename using rename (if not None), replacing existing files. accept_pred
|
||||
takes the filename and returns True if the file should be copied, rename takes
|
||||
the filename and returns a new file name."""
|
||||
|
||||
count = 0
|
||||
replace_count = 0
|
||||
for src_filename in os.listdir(src_dir):
|
||||
if accept_pred and not accept_pred(src_filename):
|
||||
continue
|
||||
dst_filename = rename(src_filename) if rename else src_filename
|
||||
src = os.path.join(src_dir, src_filename)
|
||||
dst = os.path.join(dst_dir, dst_filename)
|
||||
if os.path.exists(dst):
|
||||
logging.debug('Replacing existing file %s', dst)
|
||||
os.unlink(dst)
|
||||
replace_count += 1
|
||||
shutil.copy2(src, dst)
|
||||
logging.debug('cp -p %s %s', src, dst)
|
||||
count += 1
|
||||
if logging.getLogger().getEffectiveLevel() <= logging.INFO:
|
||||
src_short = tool_utils.short_path(src_dir)
|
||||
dst_short = tool_utils.short_path(dst_dir)
|
||||
logging.info('Copied %d files (replacing %d) from %s to %s',
|
||||
count, replace_count, src_short, dst_short)
|
||||
|
||||
|
||||
def build_svg_dir(dst_dir, clean=False, emoji_dir='', flags_dir=''):
|
||||
"""Copies/renames files from emoji_dir and then flags_dir, giving them the
|
||||
standard format and prefix ('emoji_u' followed by codepoints expressed in hex
|
||||
separated by underscore). If clean, removes the target dir before proceding.
|
||||
If either emoji_dir or flags_dir are empty, skips them."""
|
||||
|
||||
dst_dir = tool_utils.ensure_dir_exists(dst_dir, clean=clean)
|
||||
|
||||
if not emoji_dir and not flags_dir:
|
||||
logging.warning('Nothing to do.')
|
||||
return
|
||||
|
||||
if emoji_dir:
|
||||
copy_with_rename(
|
||||
emoji_dir, dst_dir, accept_pred=_is_svg_and_startswith_emoji)
|
||||
|
||||
if flags_dir:
|
||||
copy_with_rename(
|
||||
flags_dir, dst_dir, accept_pred=_is_svg, rename=_flag_rename)
|
||||
|
||||
|
||||
def main(argv):
|
||||
DEFAULT_EMOJI_DIR = '[emoji]/svg'
|
||||
DEFAULT_FLAGS_DIR = '[emoji]/third_party/region-flags/svg'
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Collect svg files into target directory with prefix.')
|
||||
parser.add_argument(
|
||||
'dst_dir', help='Directory to hold copied files.', metavar='dir')
|
||||
parser.add_argument(
|
||||
'--clean', '-c', help='Replace target directory', action='store_true')
|
||||
parser.add_argument(
|
||||
'--flags_dir', '-f', metavar='dir', help='directory containing flag svg, '
|
||||
'default %s' % DEFAULT_FLAGS_DIR, default=DEFAULT_FLAGS_DIR)
|
||||
parser.add_argument(
|
||||
'--emoji_dir', '-e', metavar='dir',
|
||||
help='directory containing emoji svg, default %s' % DEFAULT_EMOJI_DIR,
|
||||
default=DEFAULT_EMOJI_DIR)
|
||||
parser.add_argument(
|
||||
'-l', '--loglevel', help='log level name/value', default='warning')
|
||||
args = parser.parse_args(argv)
|
||||
|
||||
tool_utils.setup_logging(args.loglevel)
|
||||
|
||||
args.flags_dir = tool_utils.resolve_path(args.flags_dir)
|
||||
args.emoji_dir = tool_utils.resolve_path(args.emoji_dir)
|
||||
build_svg_dir(
|
||||
args.dst_dir, clean=args.clean, emoji_dir=args.emoji_dir,
|
||||
flags_dir=args.flags_dir)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv[1:])
|
|
@ -1,458 +0,0 @@
|
|||
# annotations
|
||||
###
|
||||
### aliases
|
||||
###
|
||||
annotation: ok
|
||||
1f3c3 # RUNNER -> man running
|
||||
1f3c3 1f3fb # light skin tone
|
||||
1f3c3 1f3fc # medium-light skin tone
|
||||
1f3c3 1f3fd # medium skin tone
|
||||
1f3c3 1f3fe # medium-dark skin tone
|
||||
1f3c3 1f3ff # dark skin tone
|
||||
1f3c4 # SURFER -> man surfing
|
||||
1f3c4 1f3fb # light skin tone
|
||||
1f3c4 1f3fc # medium-light skin tone
|
||||
1f3c4 1f3fd # medium skin tone
|
||||
1f3c4 1f3fe # medium-dark skin tone
|
||||
1f3c4 1f3ff # dark skin tone
|
||||
1f3ca # SWIMMER -> man swimming
|
||||
1f3ca 1f3fb # light skin tone
|
||||
1f3ca 1f3fc # medium-light skin tone
|
||||
1f3ca 1f3fd # medium skin tone
|
||||
1f3ca 1f3fe # medium-dark skin tone
|
||||
1f3ca 1f3ff # dark skin tone
|
||||
1f3cb # WEIGHT LIFTER -> man lifting weights
|
||||
1f3cb 1f3fb # light skin tone
|
||||
1f3cb 1f3fc # medium-light skin tone
|
||||
1f3cb 1f3fd # medium skin tone
|
||||
1f3cb 1f3fe # medium-dark skin tone
|
||||
1f3cb 1f3ff # dark skin tone
|
||||
1f3cc # GOLFER -> man golfing
|
||||
1f3cc 1f3fb # light skin tone
|
||||
1f3cc 1f3fc # medium-light skin tone
|
||||
1f3cc 1f3fd # medium skin tone
|
||||
1f3cc 1f3fe # medium-dark skin tone
|
||||
1f3cc 1f3ff # dark skin tone
|
||||
1f46a # FAMILY -> family: man, woman, boy
|
||||
1f46e # POLICE OFFICER -> man police officer
|
||||
1f46e 1f3fb # light skin tone
|
||||
1f46e 1f3fc # medium-light skin tone
|
||||
1f46e 1f3fd # medium skin tone
|
||||
1f46e 1f3fe # medium-dark skin tone
|
||||
1f46e 1f3ff # dark skin tone
|
||||
1f46f # WOMAN WITH BUNNY EARS -> women with bunny ears partying
|
||||
1f471 # PERSON WITH BLOND HAIR -> blond-haired man
|
||||
1f471 1f3fb # light skin tone
|
||||
1f471 1f3fc # medium-light skin tone
|
||||
1f471 1f3fd # medium skin tone
|
||||
1f471 1f3fe # medium-dark skin tone
|
||||
1f471 1f3ff # dark skin tone
|
||||
1f473 # MAN WITH TURBAN -> man wearing turban
|
||||
1f473 1f3fb # light skin tone
|
||||
1f473 1f3fc # medium-light skin tone
|
||||
1f473 1f3fd # medium skin tone
|
||||
1f473 1f3fe # medium-dark skin tone
|
||||
1f473 1f3ff # dark skin tone
|
||||
1f477 # CONSTRUCTION WORKER -> man construction worker
|
||||
1f477 1f3fb # light skin tone
|
||||
1f477 1f3fc # medium-light skin tone
|
||||
1f477 1f3fd # medium skin tone
|
||||
1f477 1f3fe # medium-dark skin tone
|
||||
1f477 1f3ff # dark skin tone
|
||||
1f481 # INFORMATION DESK PERSON -> woman tipping hand
|
||||
1f481 1f3fb # light skin tone
|
||||
1f481 1f3fc # medium-light skin tone
|
||||
1f481 1f3fd # medium skin tone
|
||||
1f481 1f3fe # medium-dark skin tone
|
||||
1f481 1f3ff # dark skin tone
|
||||
1f482 # GUARDSMAN -> man guard
|
||||
1f482 1f3fb # light skin tone
|
||||
1f482 1f3fc # medium-light skin tone
|
||||
1f482 1f3fd # medium skin tone
|
||||
1f482 1f3fe # medium-dark skin tone
|
||||
1f482 1f3ff # dark skin tone
|
||||
1f486 # FACE MASSAGE -> woman getting massage
|
||||
1f486 1f3fb # light skin tone
|
||||
1f486 1f3fc # medium-light skin tone
|
||||
1f486 1f3fd # medium skin tone
|
||||
1f486 1f3fe # medium-dark skin tone
|
||||
1f486 1f3ff # dark skin tone
|
||||
1f487 # HAIRCUT -> woman getting haircut
|
||||
1f487 1f3fb # light skin tone
|
||||
1f487 1f3fc # medium-light skin tone
|
||||
1f487 1f3fd # medium skin tone
|
||||
1f487 1f3fe # medium-dark skin tone
|
||||
1f487 1f3ff # dark skin tone
|
||||
1f48f # KISS -> kiss: woman, man
|
||||
1f491 # COUPLE WITH HEART -> couple with heart: woman, man
|
||||
1f575 # SLEUTH OR SPY -> man detective
|
||||
1f575 1f3fb # light skin tone
|
||||
1f575 1f3fc # medium-light skin tone
|
||||
1f575 1f3fd # medium skin tone
|
||||
1f575 1f3fe # medium-dark skin tone
|
||||
1f575 1f3ff # dark skin tone
|
||||
1f645 # FACE WITH NO GOOD GESTURE -> woman gesturing NO
|
||||
1f645 1f3fb # light skin tone
|
||||
1f645 1f3fc # medium-light skin tone
|
||||
1f645 1f3fd # medium skin tone
|
||||
1f645 1f3fe # medium-dark skin tone
|
||||
1f645 1f3ff # dark skin tone
|
||||
1f646 # FACE WITH OK GESTURE -> woman gesturing OK
|
||||
1f646 1f3fb # light skin tone
|
||||
1f646 1f3fc # medium-light skin tone
|
||||
1f646 1f3fd # medium skin tone
|
||||
1f646 1f3fe # medium-dark skin tone
|
||||
1f646 1f3ff # dark skin tone
|
||||
1f647 # PERSON BOWING DEEPLY -> man bowing
|
||||
1f647 1f3fb # light skin tone
|
||||
1f647 1f3fc # medium-light skin tone
|
||||
1f647 1f3fd # medium skin tone
|
||||
1f647 1f3fe # medium-dark skin tone
|
||||
1f647 1f3ff # dark skin tone
|
||||
1f64b # HAPPY PERSON RAISING ONE HAND -> woman raising hand
|
||||
1f64b 1f3fb # light skin tone
|
||||
1f64b 1f3fc # medium-light skin tone
|
||||
1f64b 1f3fd # medium skin tone
|
||||
1f64b 1f3fe # medium-dark skin tone
|
||||
1f64b 1f3ff # dark skin tone
|
||||
1f64d # PERSON FROWNING -> woman frowning
|
||||
1f64d 1f3fb # light skin tone
|
||||
1f64d 1f3fc # medium-light skin tone
|
||||
1f64d 1f3fd # medium skin tone
|
||||
1f64d 1f3fe # medium-dark skin tone
|
||||
1f64d 1f3ff # dark skin tone
|
||||
1f64e # PERSON WITH POUTING FACE -> woman pouting
|
||||
1f64e 1f3fb # light skin tone
|
||||
1f64e 1f3fc # medium-light skin tone
|
||||
1f64e 1f3fd # medium skin tone
|
||||
1f64e 1f3fe # medium-dark skin tone
|
||||
1f64e 1f3ff # dark skin tone
|
||||
1f6a3 # ROWBOAT -> man rowing boat
|
||||
1f6a3 1f3fb # light skin tone
|
||||
1f6a3 1f3fc # medium-light skin tone
|
||||
1f6a3 1f3fd # medium skin tone
|
||||
1f6a3 1f3fe # medium-dark skin tone
|
||||
1f6a3 1f3ff # dark skin tone
|
||||
1f6b4 # BICYCLIST -> man biking
|
||||
1f6b4 1f3fb # light skin tone
|
||||
1f6b4 1f3fc # medium-light skin tone
|
||||
1f6b4 1f3fd # medium skin tone
|
||||
1f6b4 1f3fe # medium-dark skin tone
|
||||
1f6b4 1f3ff # dark skin tone
|
||||
1f6b5 # MOUNTAIN BICYCLIST -> man mountain biking
|
||||
1f6b5 1f3fb # light skin tone
|
||||
1f6b5 1f3fc # medium-light skin tone
|
||||
1f6b5 1f3fd # medium skin tone
|
||||
1f6b5 1f3fe # medium-dark skin tone
|
||||
1f6b5 1f3ff # dark skin tone
|
||||
1f6b6 # PEDESTRIAN -> man walking
|
||||
1f6b6 1f3fb # light skin tone
|
||||
1f6b6 1f3fc # medium-light skin tone
|
||||
1f6b6 1f3fd # medium skin tone
|
||||
1f6b6 1f3fe # medium-dark skin tone
|
||||
1f6b6 1f3ff # dark skin tone
|
||||
1f926 # FACE PALM -> woman facepalming
|
||||
1f926 1f3fb # light skin tone
|
||||
1f926 1f3fc # medium-light skin tone
|
||||
1f926 1f3fd # medium skin tone
|
||||
1f926 1f3fe # medium-dark skin tone
|
||||
1f926 1f3ff # dark skin tone
|
||||
1f937 # SHRUG -> woman shrugging
|
||||
1f937 1f3fb # light skin tone
|
||||
1f937 1f3fc # medium-light skin tone
|
||||
1f937 1f3fd # medium skin tone
|
||||
1f937 1f3fe # medium-dark skin tone
|
||||
1f937 1f3ff # dark skin tone
|
||||
1f938 # PERSON DOING CARTWHEEL -> man cartwheeling
|
||||
1f938 1f3fb # light skin tone
|
||||
1f938 1f3fc # medium-light skin tone
|
||||
1f938 1f3fd # medium skin tone
|
||||
1f938 1f3fe # medium-dark skin tone
|
||||
1f938 1f3ff # dark skin tone
|
||||
1f939 # JUGGLING -> man juggling
|
||||
1f939 1f3fb # light skin tone
|
||||
1f939 1f3fc # medium-light skin tone
|
||||
1f939 1f3fd # medium skin tone
|
||||
1f939 1f3fe # medium-dark skin tone
|
||||
1f939 1f3ff # dark skin tone
|
||||
1f93c # WRESTLERS -> men wrestling
|
||||
1f93d # WATER POLO -> man playing water polo
|
||||
1f93d 1f3fb # light skin tone
|
||||
1f93d 1f3fc # medium-light skin tone
|
||||
1f93d 1f3fd # medium skin tone
|
||||
1f93d 1f3fe # medium-dark skin tone
|
||||
1f93d 1f3ff # dark skin tone
|
||||
1f93e # HANDBALL -> man playing handball
|
||||
1f93e 1f3fb # light skin tone
|
||||
1f93e 1f3fc # medium-light skin tone
|
||||
1f93e 1f3fd # medium skin tone
|
||||
1f93e 1f3fe # medium-dark skin tone
|
||||
1f93e 1f3ff # dark skin tone
|
||||
26f9 # PERSON WITH BALL -> man bouncing ball
|
||||
26f9 1f3fb # light skin tone
|
||||
26f9 1f3fc # medium-light skin tone
|
||||
26f9 1f3fd # medium skin tone
|
||||
26f9 1f3fe # medium-dark skin tone
|
||||
26f9 1f3ff # dark skin tone
|
||||
fe82b # no name -> no name
|
||||
|
||||
# flag aliases
|
||||
1f1e7 1f1fb # BV -> NO
|
||||
1f1e8 1f1f5 # CP -> FR
|
||||
1f1ed 1f1f2 # HM -> AU
|
||||
1f1f8 1f1ef # SJ -> NO
|
||||
1f1fa 1f1f2 # UM -> US
|
||||
|
||||
###
|
||||
### unwanted flags
|
||||
###
|
||||
annotation: error
|
||||
1f1e7 1f1f1
|
||||
1f1e7 1f1f6
|
||||
1f1e9 1f1ec
|
||||
1f1ea 1f1e6
|
||||
1f1ea 1f1ed
|
||||
1f1eb 1f1f0
|
||||
1f1ec 1f1eb
|
||||
1f1ec 1f1f5
|
||||
1f1ec 1f1f8
|
||||
1f1f2 1f1eb
|
||||
1f1f2 1f1f6
|
||||
1f1f3 1f1e8
|
||||
1f1f5 1f1f2
|
||||
1f1f7 1f1ea
|
||||
1f1f9 1f1eb
|
||||
1f1fc 1f1eb
|
||||
1f1fd 1f1f0
|
||||
1f1fe 1f1f9
|
||||
|
||||
###
|
||||
### new emoji
|
||||
###
|
||||
annotation: warning
|
||||
1f6f7
|
||||
1f6f8
|
||||
1f91f
|
||||
1f91f 1f3fb
|
||||
1f91f 1f3fc
|
||||
1f91f 1f3fd
|
||||
1f91f 1f3fe
|
||||
1f91f 1f3ff
|
||||
1f928
|
||||
1f929
|
||||
1f92a
|
||||
1f92b
|
||||
1f92c
|
||||
1f92d
|
||||
1f92e
|
||||
1f92f
|
||||
1f931
|
||||
1f931 1f3fb
|
||||
1f931 1f3fc
|
||||
1f931 1f3fd
|
||||
1f931 1f3fe
|
||||
1f931 1f3ff
|
||||
1f932
|
||||
1f932 1f3fb
|
||||
1f932 1f3fc
|
||||
1f932 1f3fd
|
||||
1f932 1f3fe
|
||||
1f932 1f3ff
|
||||
1f94c
|
||||
1f961
|
||||
1f962
|
||||
1f964
|
||||
1f965
|
||||
1f966
|
||||
1f995
|
||||
1f996
|
||||
1f997
|
||||
1f9d0
|
||||
1f9d1
|
||||
1f9d1 1f3fb
|
||||
1f9d1 1f3fc
|
||||
1f9d1 1f3fd
|
||||
1f9d1 1f3fe
|
||||
1f9d1 1f3ff
|
||||
1f9d2
|
||||
1f9d2 1f3fb
|
||||
1f9d2 1f3fc
|
||||
1f9d2 1f3fd
|
||||
1f9d2 1f3fe
|
||||
1f9d2 1f3ff
|
||||
1f9d3
|
||||
1f9d3 1f3fb
|
||||
1f9d3 1f3fc
|
||||
1f9d3 1f3fd
|
||||
1f9d3 1f3fe
|
||||
1f9d3 1f3ff
|
||||
1f9d4
|
||||
1f9d4 1f3fb
|
||||
1f9d4 1f3fc
|
||||
1f9d4 1f3fd
|
||||
1f9d4 1f3fe
|
||||
1f9d4 1f3ff
|
||||
1f9d5
|
||||
1f9d5 1f3fb
|
||||
1f9d5 1f3fc
|
||||
1f9d5 1f3fd
|
||||
1f9d5 1f3fe
|
||||
1f9d5 1f3ff
|
||||
1f9d6
|
||||
1f9d6 1f3fb
|
||||
1f9d6 1f3fc
|
||||
1f9d6 1f3fd
|
||||
1f9d6 1f3fe
|
||||
1f9d6 1f3ff
|
||||
1f9d6 200d 2640
|
||||
1f9d6 1f3fb 200d 2640
|
||||
1f9d6 1f3fc 200d 2640
|
||||
1f9d6 1f3fd 200d 2640
|
||||
1f9d6 1f3fe 200d 2640
|
||||
1f9d6 1f3ff 200d 2640
|
||||
1f9d6 200d 2642
|
||||
1f9d6 1f3fb 200d 2642
|
||||
1f9d6 1f3fc 200d 2642
|
||||
1f9d6 1f3fd 200d 2642
|
||||
1f9d6 1f3fe 200d 2642
|
||||
1f9d6 1f3ff 200d 2642
|
||||
1f9d7
|
||||
1f9d7 1f3fb
|
||||
1f9d7 1f3fc
|
||||
1f9d7 1f3fd
|
||||
1f9d7 1f3fe
|
||||
1f9d7 1f3ff
|
||||
1f9d7 200d 2640
|
||||
1f9d7 1f3fb 200d 2640
|
||||
1f9d7 1f3fc 200d 2640
|
||||
1f9d7 1f3fd 200d 2640
|
||||
1f9d7 1f3fe 200d 2640
|
||||
1f9d7 1f3ff 200d 2640
|
||||
1f9d7 200d 2642
|
||||
1f9d7 1f3fb 200d 2642
|
||||
1f9d7 1f3fc 200d 2642
|
||||
1f9d7 1f3fd 200d 2642
|
||||
1f9d7 1f3fe 200d 2642
|
||||
1f9d7 1f3ff 200d 2642
|
||||
1f9d8
|
||||
1f9d8 1f3fb
|
||||
1f9d8 1f3fc
|
||||
1f9d8 1f3fd
|
||||
1f9d8 1f3fe
|
||||
1f9d8 1f3ff
|
||||
1f9d8 200d 2640
|
||||
1f9d8 1f3fb 200d 2640
|
||||
1f9d8 1f3fc 200d 2640
|
||||
1f9d8 1f3fd 200d 2640
|
||||
1f9d8 1f3fe 200d 2640
|
||||
1f9d8 1f3ff 200d 2640
|
||||
1f9d8 200d 2642
|
||||
1f9d8 1f3fb 200d 2642
|
||||
1f9d8 1f3fc 200d 2642
|
||||
1f9d8 1f3fd 200d 2642
|
||||
1f9d8 1f3fe 200d 2642
|
||||
1f9d8 1f3ff 200d 2642
|
||||
1f9d9
|
||||
1f9d9 1f3fb
|
||||
1f9d9 1f3fc
|
||||
1f9d9 1f3fd
|
||||
1f9d9 1f3fe
|
||||
1f9d9 1f3ff
|
||||
1f9d9 200d 2640
|
||||
1f9d9 1f3fb 200d 2640
|
||||
1f9d9 1f3fc 200d 2640
|
||||
1f9d9 1f3fd 200d 2640
|
||||
1f9d9 1f3fe 200d 2640
|
||||
1f9d9 1f3ff 200d 2640
|
||||
1f9d9 200d 2642
|
||||
1f9d9 1f3fb 200d 2642
|
||||
1f9d9 1f3fc 200d 2642
|
||||
1f9d9 1f3fd 200d 2642
|
||||
1f9d9 1f3fe 200d 2642
|
||||
1f9d9 1f3ff 200d 2642
|
||||
1f9da
|
||||
1f9da 1f3fb
|
||||
1f9da 1f3fc
|
||||
1f9da 1f3fd
|
||||
1f9da 1f3fe
|
||||
1f9da 1f3ff
|
||||
1f9da 200d 2640
|
||||
1f9da 1f3fb 200d 2640
|
||||
1f9da 1f3fc 200d 2640
|
||||
1f9da 1f3fd 200d 2640
|
||||
1f9da 1f3fe 200d 2640
|
||||
1f9da 1f3ff 200d 2640
|
||||
1f9da 200d 2642
|
||||
1f9da 1f3fb 200d 2642
|
||||
1f9da 1f3fc 200d 2642
|
||||
1f9da 1f3fd 200d 2642
|
||||
1f9da 1f3fe 200d 2642
|
||||
1f9da 1f3ff 200d 2642
|
||||
1f9db
|
||||
1f9db 1f3fb
|
||||
1f9db 1f3fc
|
||||
1f9db 1f3fd
|
||||
1f9db 1f3fe
|
||||
1f9db 1f3ff
|
||||
1f9db 200d 2640
|
||||
1f9db 1f3fb 200d 2640
|
||||
1f9db 1f3fc 200d 2640
|
||||
1f9db 1f3fd 200d 2640
|
||||
1f9db 1f3fe 200d 2640
|
||||
1f9db 1f3ff 200d 2640
|
||||
1f9db 200d 2642
|
||||
1f9db 1f3fb 200d 2642
|
||||
1f9db 1f3fc 200d 2642
|
||||
1f9db 1f3fd 200d 2642
|
||||
1f9db 1f3fe 200d 2642
|
||||
1f9db 1f3ff 200d 2642
|
||||
1f9dc
|
||||
1f9dc 1f3fb
|
||||
1f9dc 1f3fc
|
||||
1f9dc 1f3fd
|
||||
1f9dc 1f3fe
|
||||
1f9dc 1f3ff
|
||||
1f9dc 200d 2640
|
||||
1f9dc 1f3fb 200d 2640
|
||||
1f9dc 1f3fc 200d 2640
|
||||
1f9dc 1f3fd 200d 2640
|
||||
1f9dc 1f3fe 200d 2640
|
||||
1f9dc 1f3ff 200d 2640
|
||||
1f9dc 200d 2642
|
||||
1f9dc 1f3fb 200d 2642
|
||||
1f9dc 1f3fc 200d 2642
|
||||
1f9dc 1f3fd 200d 2642
|
||||
1f9dc 1f3fe 200d 2642
|
||||
1f9dc 1f3ff 200d 2642
|
||||
1f9dd
|
||||
1f9dd 1f3fb
|
||||
1f9dd 1f3fc
|
||||
1f9dd 1f3fd
|
||||
1f9dd 1f3fe
|
||||
1f9dd 1f3ff
|
||||
1f9dd 200d 2640
|
||||
1f9dd 1f3fb 200d 2640
|
||||
1f9dd 1f3fc 200d 2640
|
||||
1f9dd 1f3fd 200d 2640
|
||||
1f9dd 1f3fe 200d 2640
|
||||
1f9dd 1f3ff 200d 2640
|
||||
1f9dd 200d 2642
|
||||
1f9dd 1f3fb 200d 2642
|
||||
1f9dd 1f3fc 200d 2642
|
||||
1f9dd 1f3fd 200d 2642
|
||||
1f9dd 1f3fe 200d 2642
|
||||
1f9dd 1f3ff 200d 2642
|
||||
1f9de
|
||||
1f9de 200d 2640
|
||||
1f9de 200d 2642
|
||||
1f9df
|
||||
1f9df 200d 2640
|
||||
1f9df 200d 2642
|
||||
1f9e0
|
||||
1f9e1
|
||||
1f9e2
|
||||
1f9e3
|
||||
1f9e4
|
||||
1f9e5
|
||||
1f9e6
|
||||
|
|
@ -1,56 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright 2014 Google Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Generate a glyph name for flag emojis."""
|
||||
from __future__ import print_function
|
||||
|
||||
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
|
||||
|
||||
import re
|
||||
import sys
|
||||
|
||||
import add_emoji_gsub
|
||||
|
||||
def two_letter_code_to_glyph_name(region_code):
|
||||
return 'u%04x_%04x' % (
|
||||
add_emoji_gsub.reg_indicator(region_code[0]),
|
||||
add_emoji_gsub.reg_indicator(region_code[1]))
|
||||
|
||||
|
||||
subcode_re = re.compile(r'[0-9a-z]{2}-[0-9a-z]+$')
|
||||
def hyphenated_code_to_glyph_name(sub_code):
|
||||
# Hyphenated codes use tag sequences, not regional indicator symbol pairs.
|
||||
sub_code = sub_code.lower()
|
||||
if not subcode_re.match(sub_code):
|
||||
raise Exception('%s is not a valid flag subcode' % sub_code)
|
||||
cps = ['u1f3f4']
|
||||
cps.extend('e00%02x' % ord(cp) for cp in sub_code if cp != '-')
|
||||
cps.append('e007f')
|
||||
return '_'.join(cps)
|
||||
|
||||
|
||||
def flag_code_to_glyph_name(flag_code):
|
||||
if '-' in flag_code:
|
||||
return hyphenated_code_to_glyph_name(flag_code)
|
||||
return two_letter_code_to_glyph_name(flag_code)
|
||||
|
||||
|
||||
def main():
|
||||
print(' '.join([
|
||||
flag_code_to_glyph_name(flag_code) for flag_code in sys.argv[1:]]))
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
85
flag_info.py
|
@ -1,85 +0,0 @@
|
|||
#!/usr/bin/python3
|
||||
#
|
||||
# Copyright 2016 Google Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Quick tool to display count/ids of flag images in a directory named
|
||||
either using ASCII upper case pairs or the emoji_u+codepoint_sequence
|
||||
names."""
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import re
|
||||
import glob
|
||||
import os
|
||||
from os import path
|
||||
|
||||
def _flag_names_from_emoji_file_names(src):
|
||||
def _flag_char(char_str):
|
||||
return unichr(ord('A') + int(char_str, 16) - 0x1f1e6)
|
||||
flag_re = re.compile('emoji_u(1f1[0-9a-f]{2})_(1f1[0-9a-f]{2}).png')
|
||||
flags = set()
|
||||
for f in glob.glob(path.join(src, 'emoji_u*.png')):
|
||||
m = flag_re.match(path.basename(f))
|
||||
if not m:
|
||||
continue
|
||||
flag_short_name = _flag_char(m.group(1)) + _flag_char(m.group(2))
|
||||
flags.add(flag_short_name)
|
||||
return flags
|
||||
|
||||
|
||||
def _flag_names_from_file_names(src):
|
||||
flag_re = re.compile('([A-Z]{2}).png')
|
||||
flags = set()
|
||||
for f in glob.glob(path.join(src, '*.png')):
|
||||
m = flag_re.match(path.basename(f))
|
||||
if not m:
|
||||
print('no match')
|
||||
continue
|
||||
flags.add(m.group(1))
|
||||
return flags
|
||||
|
||||
|
||||
def _dump_flag_info(names):
|
||||
prev = None
|
||||
print('%d flags' % len(names))
|
||||
for n in sorted(names):
|
||||
if n[0] != prev:
|
||||
if prev:
|
||||
print()
|
||||
prev = n[0]
|
||||
print(n, end=' ')
|
||||
print()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'-s', '--srcdir', help='location of files', metavar='dir',
|
||||
required=True)
|
||||
parser.add_argument(
|
||||
'-n', '--name_type', help='type of names', metavar='type',
|
||||
choices=['ascii', 'codepoint'], required=True)
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.name_type == 'ascii':
|
||||
names = _flag_names_from_file_names(args.srcdir)
|
||||
else:
|
||||
names = _flag_names_from_emoji_file_names(args.srcdir)
|
||||
print(args.srcdir)
|
||||
_dump_flag_info(names)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,405 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-#
|
||||
#
|
||||
# Copyright 2015 Google Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Generate name data for emoji resources. Currently in json format."""
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import collections
|
||||
import glob
|
||||
import json
|
||||
import os
|
||||
from os import path
|
||||
import re
|
||||
import sys
|
||||
|
||||
import generate_emoji_html
|
||||
|
||||
from nototools import tool_utils
|
||||
from nototools import unicode_data
|
||||
|
||||
def _create_custom_gendered_seq_names():
|
||||
"""The names have detail that is adequately represented by the image."""
|
||||
|
||||
BOY = 0x1f466
|
||||
GIRL = 0x1f467
|
||||
MAN = 0x1f468
|
||||
WOMAN = 0x1f469
|
||||
HEART = 0x2764 # Heavy Black Heart
|
||||
KISS_MARK = 0x1f48b
|
||||
return {
|
||||
(MAN, HEART, KISS_MARK, MAN): 'Kiss',
|
||||
(WOMAN, HEART, KISS_MARK, WOMAN): 'Kiss',
|
||||
(WOMAN, HEART, KISS_MARK, MAN): 'Kiss',
|
||||
(WOMAN, HEART, MAN): 'Couple with Heart',
|
||||
(MAN, HEART, MAN): 'Couple with Heart',
|
||||
(WOMAN, HEART, WOMAN): 'Couple with Heart',
|
||||
(MAN, GIRL): 'Family',
|
||||
(MAN, GIRL, GIRL): 'Family',
|
||||
(MAN, GIRL, BOY): 'Family',
|
||||
(MAN, BOY): 'Family',
|
||||
(MAN, BOY, BOY): 'Family',
|
||||
(MAN, WOMAN, GIRL): 'Family',
|
||||
(MAN, WOMAN, GIRL, GIRL): 'Family',
|
||||
(MAN, WOMAN, GIRL, BOY): 'Family',
|
||||
(MAN, WOMAN, BOY): 'Family',
|
||||
(MAN, WOMAN, BOY, BOY): 'Family',
|
||||
(MAN, MAN, GIRL): 'Family',
|
||||
(MAN, MAN, GIRL, GIRL): 'Family',
|
||||
(MAN, MAN, GIRL, BOY): 'Family',
|
||||
(MAN, MAN, BOY): 'Family',
|
||||
(MAN, MAN, BOY, BOY): 'Family',
|
||||
(WOMAN, GIRL): 'Family',
|
||||
(WOMAN, GIRL, GIRL): 'Family',
|
||||
(WOMAN, GIRL, BOY): 'Family',
|
||||
(WOMAN, BOY): 'Family',
|
||||
(WOMAN, BOY, BOY): 'Family',
|
||||
(WOMAN, WOMAN, GIRL): 'Family',
|
||||
(WOMAN, WOMAN, GIRL, GIRL): 'Family',
|
||||
(WOMAN, WOMAN, GIRL, BOY): 'Family',
|
||||
(WOMAN, WOMAN, BOY): 'Family',
|
||||
(WOMAN, WOMAN, BOY, BOY): 'Family' }
|
||||
|
||||
def _create_custom_seq_names():
|
||||
"""These have names that often are of the form 'Person xyz-ing' or 'Man Xyz.'
|
||||
We opt to simplify the former to an activity name or action, and the latter to
|
||||
drop the gender. This also generally makes the names shorter."""
|
||||
|
||||
EYE = 0x1f441
|
||||
SPEECH = 0x1f5e8
|
||||
WHITE_FLAG = 0x1f3f3
|
||||
RAINBOW = 0x1f308
|
||||
return {
|
||||
(EYE, SPEECH): 'I Witness',
|
||||
(WHITE_FLAG, RAINBOW): 'Rainbow Flag',
|
||||
(0x2695,): 'Health Worker',
|
||||
(0x2696,): 'Judge',
|
||||
(0x26f7,): 'Skiing',
|
||||
(0x26f9,): 'Bouncing a Ball',
|
||||
(0x2708,): 'Pilot',
|
||||
(0x1f33e,): 'Farmer',
|
||||
(0x1f373,): 'Cook',
|
||||
(0x1f393,): 'Student',
|
||||
(0x1f3a4,): 'Singer',
|
||||
(0x1f3a8,): 'Artist',
|
||||
(0x1f3c2,): 'Snowboarding',
|
||||
(0x1f3c3,): 'Running',
|
||||
(0x1f3c4,): 'Surfing',
|
||||
(0x1f3ca,): 'Swimming',
|
||||
(0x1f3cb,): 'Weight Lifting',
|
||||
(0x1f3cc,): 'Golfing',
|
||||
(0x1f3eb,): 'Teacher',
|
||||
(0x1f3ed,): 'Factory Worker',
|
||||
(0x1f46e,): 'Police Officer',
|
||||
(0x1f46f,): 'Partying',
|
||||
(0x1f471,): 'Person with Blond Hair',
|
||||
(0x1f473,): 'Person Wearing Turban',
|
||||
(0x1f477,): 'Construction Worker',
|
||||
(0x1f481,): 'Tipping Hand',
|
||||
(0x1f482,): 'Guard',
|
||||
(0x1f486,): 'Face Massage',
|
||||
(0x1f487,): 'Haircut',
|
||||
(0x1f4bb,): 'Technologist',
|
||||
(0x1f4bc,): 'Office Worker',
|
||||
(0x1f527,): 'Mechanic',
|
||||
(0x1f52c,): 'Scientist',
|
||||
(0x1f575,): 'Detective',
|
||||
(0x1f645,): 'No Good Gesture',
|
||||
(0x1f646,): 'OK Gesture',
|
||||
(0x1f647,): 'Bowing Deeply',
|
||||
(0x1f64b,): 'Raising Hand',
|
||||
(0x1f64d,): 'Frowning',
|
||||
(0x1f64e,): 'Pouting',
|
||||
(0x1f680,): 'Astronaut',
|
||||
(0x1f692,): 'Firefighter',
|
||||
(0x1f6a3,): 'Rowing',
|
||||
(0x1f6b4,): 'Bicycling',
|
||||
(0x1f6b5,): 'Mountain Biking',
|
||||
(0x1f6b6,): 'Walking',
|
||||
(0x1f926,): 'Face Palm',
|
||||
(0x1f937,): 'Shrug',
|
||||
(0x1f938,): 'Doing a Cartwheel',
|
||||
(0x1f939,): 'Juggling',
|
||||
(0x1f93c,): 'Wrestling',
|
||||
(0x1f93d,): 'Water Polo',
|
||||
(0x1f93e,): 'Playing Handball',
|
||||
(0x1f9d6,): 'Person in Steamy Room',
|
||||
(0x1f9d7,): 'Climbing',
|
||||
(0x1f9d8,): 'Person in Lotus Position',
|
||||
(0x1f9d9,): 'Mage',
|
||||
(0x1f9da,): 'Fairy',
|
||||
(0x1f9db,): 'Vampire',
|
||||
(0x1f9dd,): 'Elf',
|
||||
(0x1f9de,): 'Genie',
|
||||
(0x1f9df,): 'Zombie',
|
||||
}
|
||||
|
||||
_CUSTOM_GENDERED_SEQ_NAMES = _create_custom_gendered_seq_names()
|
||||
_CUSTOM_SEQ_NAMES = _create_custom_seq_names()
|
||||
|
||||
# Fixes for unusual capitalization or cases we don't care to handle in code.
|
||||
# Also prevents titlecasing 'S' after apostrophe in posessives. Note we _do_
|
||||
# want titlecasing after apostrophe in some cases, e.g. O'Clock.
|
||||
_CUSTOM_CAPS_NAMES = {
|
||||
(0x26d1,): 'Rescue Worker’s Helmet',
|
||||
(0x1f170,): 'A Button (blood type)', # a Button (Blood Type)
|
||||
(0x1f171,): 'B Button (blood type)', # B Button (Blood Type)
|
||||
(0x1f17e,): 'O Button (blood type)', # O Button (Blood Type)
|
||||
(0x1f18e,): 'AB Button (blood type)', # Ab Button (Blood Type)
|
||||
(0x1f191,): 'CL Button', # Cl Button
|
||||
(0x1f192,): 'COOL Button', # Cool Button
|
||||
(0x1f193,): 'FREE Button', # Free Button
|
||||
(0x1f194,): 'ID Button', # Id Button
|
||||
(0x1f195,): 'NEW Button', # New Button
|
||||
(0x1f196,): 'NG Button', # Ng Button
|
||||
(0x1f197,): 'OK Button', # Ok Button
|
||||
(0x1f198,): 'SOS Button', # Sos Button
|
||||
(0x1f199,): 'UP! Button', # Up! Button
|
||||
(0x1f19a,): 'VS Button', # Vs Button
|
||||
(0x1f3e7,): 'ATM Sign', # Atm Sign
|
||||
(0x1f44C,): 'OK Hand', # Ok Hand
|
||||
(0x1f452,): 'Woman’s Hat',
|
||||
(0x1f45a,): 'Woman’s Clothes',
|
||||
(0x1f45e,): 'Man’s Shoe',
|
||||
(0x1f461,): 'Woman’s Sandal',
|
||||
(0x1f462,): 'Woman’s Boot',
|
||||
(0x1f519,): 'BACK Arrow', # Back Arrow
|
||||
(0x1f51a,): 'END Arrow', # End Arrow
|
||||
(0x1f51b,): 'ON! Arrow', # On! Arrow
|
||||
(0x1f51c,): 'SOON Arrow', # Soon Arrow
|
||||
(0x1f51d,): 'TOP Arrow', # Top Arrow
|
||||
(0x1f6b9,): 'Men’s Room',
|
||||
(0x1f6ba,): 'Women’s Room',
|
||||
}
|
||||
|
||||
# For the custom sequences we ignore ZWJ, the emoji variation selector
|
||||
# and skin tone modifiers. We can't always ignore gender because
|
||||
# the gendered sequences match against them, but we ignore gender in other
|
||||
# cases so we define a separate set of gendered emoji to remove.
|
||||
|
||||
_NON_GENDER_CPS_TO_STRIP = frozenset(
|
||||
[0xfe0f, 0x200d] +
|
||||
range(unicode_data._FITZ_START, unicode_data._FITZ_END + 1))
|
||||
|
||||
_GENDER_CPS_TO_STRIP = frozenset([0x2640, 0x2642, 0x1f468, 0x1f469])
|
||||
|
||||
def _custom_name(seq):
|
||||
"""Apply three kinds of custom names, based on the sequence."""
|
||||
|
||||
seq = tuple([cp for cp in seq if cp not in _NON_GENDER_CPS_TO_STRIP])
|
||||
name = _CUSTOM_CAPS_NAMES.get(seq)
|
||||
if name:
|
||||
return name
|
||||
|
||||
# Single characters that participate in sequences (e.g. fire truck in the
|
||||
# firefighter sequences) should not get converted. Single characters
|
||||
# are in the custom caps names set but not the other sets.
|
||||
if len(seq) == 1:
|
||||
return None
|
||||
|
||||
name = _CUSTOM_GENDERED_SEQ_NAMES.get(seq)
|
||||
if name:
|
||||
return name
|
||||
|
||||
seq = tuple([cp for cp in seq if cp not in _GENDER_CPS_TO_STRIP])
|
||||
name = _CUSTOM_SEQ_NAMES.get(seq)
|
||||
|
||||
return name
|
||||
|
||||
|
||||
def _standard_name(seq):
|
||||
"""Use the standard emoji name, with some algorithmic modifications.
|
||||
|
||||
We want to ignore skin-tone modifiers (but of course if the sequence _is_
|
||||
the skin-tone modifier itself we keep that). So we strip these so we can
|
||||
start with the generic name ignoring skin tone.
|
||||
|
||||
Non-emoji that are turned into emoji using the emoji VS have '(emoji) '
|
||||
prepended to them, so strip that.
|
||||
|
||||
Regional indicator symbol names are a bit long, so shorten them.
|
||||
|
||||
Regional sequences are assumed to be ok as-is in terms of capitalization and
|
||||
punctuation, so no modifications are applied to them.
|
||||
|
||||
After title-casing we make some English articles/prepositions lower-case
|
||||
again. We also replace '&' with 'and'; Unicode seems rather fond of
|
||||
ampersand."""
|
||||
|
||||
if not unicode_data.is_skintone_modifier(seq[0]):
|
||||
seq = tuple([cp for cp in seq if not unicode_data.is_skintone_modifier(cp)])
|
||||
name = unicode_data.get_emoji_sequence_name(seq)
|
||||
|
||||
if name.startswith('(emoji) '):
|
||||
name = name[8:]
|
||||
|
||||
if len(seq) == 1 and unicode_data.is_regional_indicator(seq[0]):
|
||||
return 'Regional Symbol ' + unicode_data.regional_indicator_to_ascii(seq[0])
|
||||
|
||||
if (unicode_data.is_regional_indicator_seq(seq) or
|
||||
unicode_data.is_regional_tag_seq(seq)):
|
||||
return name
|
||||
|
||||
name = name.title()
|
||||
# Require space delimiting just in case...
|
||||
name = re.sub(r'\s&\s', ' and ', name)
|
||||
name = re.sub(
|
||||
# not \b at start because we retain capital at start of phrase
|
||||
r'(\s(:?A|And|From|In|Of|With|For))\b', lambda s: s.group(1).lower(),
|
||||
name)
|
||||
|
||||
return name
|
||||
|
||||
|
||||
def _name_data(seq, seq_file):
|
||||
name = _custom_name(seq) or _standard_name(seq)
|
||||
# we don't need canonical sequences
|
||||
sequence = ''.join('&#x%x;' % cp for cp in seq if cp != 0xfe0f)
|
||||
fname = path.basename(seq_file)
|
||||
return fname, sequence, name
|
||||
|
||||
|
||||
def generate_names(
|
||||
src_dir, dst_dir, skip_limit=20, omit_groups=None, pretty_print=False,
|
||||
verbose=False):
|
||||
srcdir = tool_utils.resolve_path(src_dir)
|
||||
if not path.isdir(srcdir):
|
||||
print('%s is not a directory' % src_dir, file=sys.stderr)
|
||||
return
|
||||
|
||||
if omit_groups:
|
||||
unknown_groups = set(omit_groups) - set(unicode_data.get_emoji_groups())
|
||||
if unknown_groups:
|
||||
print('did not recognize %d group%s: %s' % (
|
||||
len(unknown_groups), '' if len(unknown_groups) == 1 else 's',
|
||||
', '.join('"%s"' % g for g in omit_groups if g in unknown_groups)), file=sys.stderr)
|
||||
print('valid groups are:\n %s' % (
|
||||
'\n '.join(g for g in unicode_data.get_emoji_groups())), file=sys.stderr)
|
||||
return
|
||||
print('omitting %d group%s: %s' % (
|
||||
len(omit_groups), '' if len(omit_groups) == 1 else 's',
|
||||
', '.join('"%s"' % g for g in omit_groups)))
|
||||
else:
|
||||
# might be None
|
||||
print('keeping all groups')
|
||||
omit_groups = []
|
||||
|
||||
# make sure the destination exists
|
||||
dstdir = tool_utils.ensure_dir_exists(
|
||||
tool_utils.resolve_path(dst_dir))
|
||||
|
||||
# _get_image_data returns canonical cp sequences
|
||||
print('src dir:', srcdir)
|
||||
seq_to_file = generate_emoji_html._get_image_data(srcdir, 'png', 'emoji_u')
|
||||
print('seq to file has %d sequences' % len(seq_to_file))
|
||||
|
||||
# Aliases add non-gendered versions using gendered images for the most part.
|
||||
# But when we display the images, we don't distinguish genders in the
|
||||
# naming, we rely on the images-- so these look redundant. So we
|
||||
# intentionally don't generate images for these.
|
||||
# However, the alias file also includes the flag aliases, which we do want,
|
||||
# and it also fails to exclude the unknown flag pua (since it doesn't
|
||||
# map to anything), so we need to adjust for this.
|
||||
canonical_aliases = generate_emoji_html._get_canonical_aliases()
|
||||
|
||||
aliases = set([
|
||||
cps for cps in canonical_aliases.keys()
|
||||
if not unicode_data.is_regional_indicator_seq(cps)])
|
||||
aliases.add((0xfe82b,)) # unknown flag PUA
|
||||
excluded = aliases | generate_emoji_html._get_canonical_excluded()
|
||||
|
||||
# The flag aliases have distinct names, so we _do_ want to show them
|
||||
# multiple times.
|
||||
to_add = {}
|
||||
for seq in canonical_aliases:
|
||||
if unicode_data.is_regional_indicator_seq(seq):
|
||||
replace_seq = canonical_aliases[seq]
|
||||
if seq in seq_to_file:
|
||||
print('warning, alias %s has file %s' % (
|
||||
unicode_data.regional_indicator_seq_to_string(seq),
|
||||
seq_to_file[seq]))
|
||||
continue
|
||||
replace_file = seq_to_file.get(replace_seq)
|
||||
if replace_file:
|
||||
to_add[seq] = replace_file
|
||||
seq_to_file.update(to_add)
|
||||
|
||||
data = []
|
||||
last_skipped_group = None
|
||||
skipcount = 0
|
||||
for group in unicode_data.get_emoji_groups():
|
||||
if group in omit_groups:
|
||||
continue
|
||||
name_data = []
|
||||
for seq in unicode_data.get_emoji_in_group(group):
|
||||
if seq in excluded:
|
||||
continue
|
||||
seq_file = seq_to_file.get(seq, None)
|
||||
if seq_file is None:
|
||||
skipcount += 1
|
||||
if verbose:
|
||||
if group != last_skipped_group:
|
||||
print('group %s' % group)
|
||||
last_skipped_group = group
|
||||
print(' %s (%s)' % (
|
||||
unicode_data.seq_to_string(seq),
|
||||
', '.join(unicode_data.name(cp, 'x') for cp in seq)))
|
||||
if skip_limit >= 0 and skipcount > skip_limit:
|
||||
raise Exception('skipped too many items')
|
||||
else:
|
||||
name_data.append(_name_data(seq, seq_file))
|
||||
data.append({'category': group, 'emojis': name_data})
|
||||
|
||||
outfile = path.join(dstdir, 'data.json')
|
||||
with open(outfile, 'w') as f:
|
||||
indent = 2 if pretty_print else None
|
||||
separators = None if pretty_print else (',', ':')
|
||||
json.dump(data, f, indent=indent, separators=separators)
|
||||
print('wrote %s' % outfile)
|
||||
|
||||
|
||||
def main():
|
||||
DEFAULT_DSTDIR = '[emoji]/emoji'
|
||||
DEFAULT_IMAGEDIR = '[emoji]/build/compressed_pngs'
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'-s', '--srcdir', help='directory containing images (default %s)' %
|
||||
DEFAULT_IMAGEDIR, metavar='dir', default=DEFAULT_IMAGEDIR)
|
||||
parser.add_argument(
|
||||
'-d', '--dstdir', help='name of destination directory (default %s)' %
|
||||
DEFAULT_DSTDIR, metavar='fname', default=DEFAULT_DSTDIR)
|
||||
parser.add_argument(
|
||||
'-p', '--pretty_print', help='pretty-print json file',
|
||||
action='store_true')
|
||||
parser.add_argument(
|
||||
'-m', '--missing_limit', help='number of missing images before failure '
|
||||
'(default 20), use -1 for no limit', metavar='n', default=20)
|
||||
parser.add_argument(
|
||||
'--omit_groups', help='names of groups to omit (default "Misc, Flags")',
|
||||
metavar='name', default=['Misc', 'Flags'], nargs='*')
|
||||
parser.add_argument(
|
||||
'-v', '--verbose', help='print progress information to stdout',
|
||||
action='store_true')
|
||||
args = parser.parse_args()
|
||||
generate_names(
|
||||
args.srcdir, args.dstdir, args.missing_limit, args.omit_groups,
|
||||
pretty_print=args.pretty_print, verbose=args.verbose)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -1,159 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
# Copyright 2017 Google Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Generate 72x72 thumbnails including aliases.
|
||||
|
||||
Takes a source directory of images named using our emoji filename
|
||||
conventions and writes thumbnails of them into the destination
|
||||
directory. If a file is a target of one or more aliases, creates
|
||||
copies named for the aliases."""
|
||||
|
||||
|
||||
import argparse
|
||||
import collections
|
||||
import logging
|
||||
import os
|
||||
from os import path
|
||||
import shutil
|
||||
import subprocess
|
||||
|
||||
import add_aliases
|
||||
|
||||
from nototools import tool_utils
|
||||
from nototools import unicode_data
|
||||
|
||||
logger = logging.getLogger('emoji_thumbnails')
|
||||
|
||||
def create_thumbnail(src_path, dst_path, crop):
|
||||
# Uses imagemagik
|
||||
# We need images exactly 72x72 in size, with transparent background.
|
||||
# Remove 4-pixel LR margins from 136x128 source images if we crop.
|
||||
if crop:
|
||||
cmd = [
|
||||
'convert', src_path, '-crop', '128x128+4+0!', '-thumbnail', '72x72',
|
||||
'PNG32:' + dst_path]
|
||||
else:
|
||||
cmd = [
|
||||
'convert', '-thumbnail', '72x72', '-gravity', 'center', '-background',
|
||||
'none', '-extent', '72x72', src_path, 'PNG32:' + dst_path]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def get_inv_aliases():
|
||||
"""Return a mapping from target to list of sources for all alias
|
||||
targets in either the default alias table or the unknown_flag alias
|
||||
table."""
|
||||
|
||||
inv_aliases = collections.defaultdict(list)
|
||||
|
||||
standard_aliases = add_aliases.read_default_emoji_aliases()
|
||||
for k, v in standard_aliases.iteritems():
|
||||
inv_aliases[v].append(k)
|
||||
|
||||
unknown_flag_aliases = add_aliases.read_emoji_aliases(
|
||||
'unknown_flag_aliases.txt')
|
||||
for k, v in unknown_flag_aliases.iteritems():
|
||||
inv_aliases[v].append(k)
|
||||
|
||||
return inv_aliases
|
||||
|
||||
|
||||
def filename_to_sequence(filename, prefix, suffix):
|
||||
if not filename.startswith(prefix) and filename.endswith(suffix):
|
||||
raise ValueError('bad prefix or suffix: "%s"' % filename)
|
||||
seq_str = filename[len(prefix): -len(suffix)]
|
||||
seq = unicode_data.string_to_seq(seq_str)
|
||||
if not unicode_data.is_cp_seq(seq):
|
||||
raise ValueError('sequence includes non-codepoint: "%s"' % filename)
|
||||
return seq
|
||||
|
||||
|
||||
def sequence_to_filename(seq, prefix, suffix):
|
||||
return ''.join((prefix, unicode_data.seq_to_string(seq), suffix))
|
||||
|
||||
|
||||
def create_thumbnails_and_aliases(src_dir, dst_dir, crop, dst_prefix):
|
||||
"""Creates thumbnails in dst_dir based on sources in src.dir, using
|
||||
dst_prefix. Assumes the source prefix is 'emoji_u' and the common suffix
|
||||
is '.png'."""
|
||||
|
||||
src_dir = tool_utils.resolve_path(src_dir)
|
||||
if not path.isdir(src_dir):
|
||||
raise ValueError('"%s" is not a directory')
|
||||
|
||||
dst_dir = tool_utils.ensure_dir_exists(tool_utils.resolve_path(dst_dir))
|
||||
|
||||
src_prefix = 'emoji_u'
|
||||
suffix = '.png'
|
||||
|
||||
inv_aliases = get_inv_aliases()
|
||||
|
||||
for src_file in os.listdir(src_dir):
|
||||
try:
|
||||
seq = unicode_data.strip_emoji_vs(
|
||||
filename_to_sequence(src_file, src_prefix, suffix))
|
||||
except ValueError as ve:
|
||||
logger.warning('Error (%s), skipping' % ve)
|
||||
continue
|
||||
|
||||
src_path = path.join(src_dir, src_file)
|
||||
|
||||
dst_file = sequence_to_filename(seq, dst_prefix, suffix)
|
||||
dst_path = path.join(dst_dir, dst_file)
|
||||
|
||||
create_thumbnail(src_path, dst_path, crop)
|
||||
logger.info('wrote thumbnail%s: %s' % (
|
||||
' with crop' if crop else '', dst_file))
|
||||
|
||||
for alias_seq in inv_aliases.get(seq, ()):
|
||||
alias_file = sequence_to_filename(alias_seq, dst_prefix, suffix)
|
||||
alias_path = path.join(dst_dir, alias_file)
|
||||
shutil.copy2(dst_path, alias_path)
|
||||
logger.info('wrote alias: %s' % alias_file)
|
||||
|
||||
|
||||
def main():
|
||||
SRC_DEFAULT = '[emoji]/build/compressed_pngs'
|
||||
PREFIX_DEFAULT = 'android_'
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'-s', '--src_dir', help='source images (default \'%s\')' % SRC_DEFAULT,
|
||||
default=SRC_DEFAULT, metavar='dir')
|
||||
parser.add_argument(
|
||||
'-d', '--dst_dir', help='destination directory', metavar='dir',
|
||||
required=True)
|
||||
parser.add_argument(
|
||||
'-p', '--prefix', help='prefix for thumbnail (default \'%s\')' %
|
||||
PREFIX_DEFAULT, default=PREFIX_DEFAULT, metavar='str')
|
||||
parser.add_argument(
|
||||
'-c', '--crop', help='crop images (will automatically crop if '
|
||||
'src dir is the default)', action='store_true')
|
||||
parser.add_argument(
|
||||
'-v', '--verbose', help='write log output', metavar='level',
|
||||
choices='warning info debug'.split(), const='info',
|
||||
nargs='?')
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.verbose is not None:
|
||||
logging.basicConfig(level=getattr(logging, args.verbose.upper()))
|
||||
|
||||
crop = args.crop or (args.src_dir == SRC_DEFAULT)
|
||||
create_thumbnails_and_aliases(
|
||||
args.src_dir, args.dst_dir, crop, args.prefix)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,74 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright 2014 Google Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Modify an emoji font to map legacy PUA characters to standard ligatures."""
|
||||
|
||||
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
|
||||
|
||||
import sys
|
||||
import itertools
|
||||
|
||||
from fontTools import ttLib
|
||||
|
||||
from nototools import font_data
|
||||
|
||||
import add_emoji_gsub
|
||||
|
||||
|
||||
def get_glyph_name_from_gsub(char_seq, font):
|
||||
"""Find the glyph name for ligature of a given character sequence from GSUB.
|
||||
"""
|
||||
cmap = font_data.get_cmap(font)
|
||||
# FIXME: So many assumptions are made here.
|
||||
try:
|
||||
first_glyph = cmap[char_seq[0]]
|
||||
rest_of_glyphs = [cmap[ch] for ch in char_seq[1:]]
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
for lookup in font['GSUB'].table.LookupList.Lookup:
|
||||
ligatures = lookup.SubTable[0].ligatures
|
||||
try:
|
||||
for ligature in ligatures[first_glyph]:
|
||||
if ligature.Component == rest_of_glyphs:
|
||||
return ligature.LigGlyph
|
||||
except KeyError:
|
||||
continue
|
||||
return None
|
||||
|
||||
|
||||
def add_pua_cmap(source_file, target_file):
|
||||
"""Add PUA characters to the cmap of the first font and save as second."""
|
||||
font = ttLib.TTFont(source_file)
|
||||
cmap = font_data.get_cmap(font)
|
||||
for pua, (ch1, ch2) in itertools.chain(
|
||||
add_emoji_gsub.EMOJI_KEYCAPS.items(), add_emoji_gsub.EMOJI_FLAGS.items()
|
||||
):
|
||||
if pua not in cmap:
|
||||
glyph_name = get_glyph_name_from_gsub([ch1, ch2], font)
|
||||
if glyph_name is not None:
|
||||
cmap[pua] = glyph_name
|
||||
font.save(target_file)
|
||||
|
||||
|
||||
def main(argv):
|
||||
"""Save the first font given to the second font."""
|
||||
add_pua_cmap(argv[1], argv[2])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv)
|
||||
|
|
@ -1,126 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright 2016 Google Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Create a copy of the emoji images that instantiates aliases, etc. as
|
||||
symlinks."""
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import glob
|
||||
import os
|
||||
from os import path
|
||||
import re
|
||||
import shutil
|
||||
|
||||
from nototools import tool_utils
|
||||
|
||||
# copied from third_party/color_emoji/add_glyphs.py
|
||||
|
||||
EXTRA_SEQUENCES = {
|
||||
'u1F46A': '1F468_200D_1F469_200D_1F466', # MWB
|
||||
'u1F491': '1F469_200D_2764_FE0F_200D_1F468', # WHM
|
||||
'u1F48F': '1F469_200D_2764_FE0F_200D_1F48B_200D_1F468', # WHKM
|
||||
}
|
||||
|
||||
# Flag aliases - from: to
|
||||
FLAG_ALIASES = {
|
||||
'BV': 'NO',
|
||||
'CP': 'FR',
|
||||
'HM': 'AU',
|
||||
'SJ': 'NO',
|
||||
'UM': 'US',
|
||||
}
|
||||
|
||||
OMITTED_FLAGS = set(
|
||||
'BL BQ DG EA EH FK GF GP GS MF MQ NC PM RE TF WF XK YT'.split())
|
||||
|
||||
def _flag_str(ris_pair):
|
||||
return '_'.join('%04x' % (ord(cp) - ord('A') + 0x1f1e6)
|
||||
for cp in ris_pair)
|
||||
|
||||
def _copy_files(src, dst):
|
||||
"""Copies files named 'emoji_u*.png' from dst to src, and return a set of
|
||||
the names with 'emoji_u' and the extension stripped."""
|
||||
code_strings = set()
|
||||
tool_utils.check_dir_exists(src)
|
||||
dst = tool_utils.ensure_dir_exists(dst, clean=True)
|
||||
for f in glob.glob(path.join(src, 'emoji_u*.png')):
|
||||
shutil.copy(f, dst)
|
||||
code_strings.add(path.splitext(path.basename(f))[0][7:])
|
||||
return code_strings
|
||||
|
||||
|
||||
def _alias_people(code_strings, dst):
|
||||
"""Create aliases for people in dst, based on code_strings."""
|
||||
for src, ali in sorted(EXTRA_SEQUENCES.items()):
|
||||
if src[1:].lower() in code_strings:
|
||||
src_name = 'emoji_%s.png' % src.lower()
|
||||
ali_name = 'emoji_u%s.png' % ali.lower()
|
||||
print('creating symlink %s -> %s' % (ali_name, src_name))
|
||||
os.symlink(path.join(dst, src_name), path.join(dst, ali_name))
|
||||
else:
|
||||
print('people image %s not found' % src, file=os.stderr)
|
||||
|
||||
|
||||
def _alias_flags(code_strings, dst):
|
||||
for ali, src in sorted(FLAG_ALIASES.items()):
|
||||
src_str = _flag_str(src)
|
||||
if src_str in code_strings:
|
||||
src_name = 'emoji_u%s.png' % src_str
|
||||
ali_name = 'emoji_u%s.png' % _flag_str(ali)
|
||||
print('creating symlink %s (%s) -> %s (%s)' % (ali_name, ali, src_name, src))
|
||||
os.symlink(path.join(dst, src_name), path.join(dst, ali_name))
|
||||
else:
|
||||
print('flag image %s (%s) not found' % (src_name, src), file=os.stderr)
|
||||
|
||||
|
||||
def _alias_omitted_flags(code_strings, dst):
|
||||
UNKNOWN_FLAG = 'fe82b'
|
||||
if UNKNOWN_FLAG not in code_strings:
|
||||
print('unknown flag missing', file=os.stderr)
|
||||
return
|
||||
dst_name = 'emoji_u%s.png' % UNKNOWN_FLAG
|
||||
dst_path = path.join(dst, dst_name)
|
||||
for ali in sorted(OMITTED_FLAGS):
|
||||
ali_str = _flag_str(ali)
|
||||
if ali_str in code_strings:
|
||||
print('omitted flag %s has image %s' % (ali, ali_str), file=os.stderr)
|
||||
continue
|
||||
ali_name = 'emoji_u%s.png' % ali_str
|
||||
print('creating symlink %s (%s) -> unknown_flag (%s)' % (
|
||||
ali_str, ali, dst_name))
|
||||
os.symlink(dst_path, path.join(dst, ali_name))
|
||||
|
||||
|
||||
def materialize_images(src, dst):
|
||||
code_strings = _copy_files(src, dst)
|
||||
_alias_people(code_strings, dst)
|
||||
_alias_flags(code_strings, dst)
|
||||
_alias_omitted_flags(code_strings, dst)
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'-s', '--srcdir', help='path to input sources', metavar='dir',
|
||||
default = 'build/compressed_pngs')
|
||||
parser.add_argument(
|
||||
'-d', '--dstdir', help='destination for output images', metavar='dir')
|
||||
args = parser.parse_args()
|
||||
materialize_images(args.srcdir, args.dstdir)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
Before Width: | Height: | Size: 1.2 KiB |
Before Width: | Height: | Size: 3 KiB |
Before Width: | Height: | Size: 2.1 KiB |
Before Width: | Height: | Size: 2.5 KiB |
Before Width: | Height: | Size: 1.9 KiB |
Before Width: | Height: | Size: 3.2 KiB |
Before Width: | Height: | Size: 708 B |
Before Width: | Height: | Size: 2.1 KiB |
Before Width: | Height: | Size: 1.4 KiB |
Before Width: | Height: | Size: 2.8 KiB |
Before Width: | Height: | Size: 1.8 KiB |
Before Width: | Height: | Size: 3 KiB |
Before Width: | Height: | Size: 1.1 KiB |
Before Width: | Height: | Size: 2.3 KiB |
Before Width: | Height: | Size: 1.6 KiB |
Before Width: | Height: | Size: 2.9 KiB |
Before Width: | Height: | Size: 1.8 KiB |
Before Width: | Height: | Size: 3.1 KiB |
Before Width: | Height: | Size: 1.1 KiB |
Before Width: | Height: | Size: 2.3 KiB |
Before Width: | Height: | Size: 2.1 KiB |
Before Width: | Height: | Size: 3.3 KiB |
Before Width: | Height: | Size: 1.9 KiB |
Before Width: | Height: | Size: 3.1 KiB |
Before Width: | Height: | Size: 6.4 KiB |
Before Width: | Height: | Size: 6.3 KiB |
Before Width: | Height: | Size: 8.2 KiB |
Before Width: | Height: | Size: 7.5 KiB |
Before Width: | Height: | Size: 2.6 KiB |
Before Width: | Height: | Size: 2.3 KiB |
Before Width: | Height: | Size: 2.9 KiB |
Before Width: | Height: | Size: 2 KiB |
Before Width: | Height: | Size: 3.9 KiB |
Before Width: | Height: | Size: 3 KiB |
Before Width: | Height: | Size: 4.4 KiB |
Before Width: | Height: | Size: 2.8 KiB |
Before Width: | Height: | Size: 3.2 KiB |
Before Width: | Height: | Size: 3.4 KiB |
Before Width: | Height: | Size: 4 KiB |
Before Width: | Height: | Size: 4.2 KiB |
Before Width: | Height: | Size: 5.3 KiB |
Before Width: | Height: | Size: 3.6 KiB |
Before Width: | Height: | Size: 4.4 KiB |
Before Width: | Height: | Size: 3.3 KiB |
Before Width: | Height: | Size: 2.7 KiB |
Before Width: | Height: | Size: 3.2 KiB |
Before Width: | Height: | Size: 3.7 KiB |
Before Width: | Height: | Size: 2.8 KiB |
Before Width: | Height: | Size: 4.2 KiB |
Before Width: | Height: | Size: 1.2 KiB |
Before Width: | Height: | Size: 4.1 KiB |
Before Width: | Height: | Size: 1.1 KiB |
Before Width: | Height: | Size: 3.9 KiB |
Before Width: | Height: | Size: 4 KiB |
Before Width: | Height: | Size: 11 KiB |
Before Width: | Height: | Size: 1.6 KiB |
Before Width: | Height: | Size: 1.2 KiB |
Before Width: | Height: | Size: 3.8 KiB |
Before Width: | Height: | Size: 1.9 KiB |
Before Width: | Height: | Size: 5.8 KiB |
Before Width: | Height: | Size: 2.8 KiB |
Before Width: | Height: | Size: 10 KiB |
Before Width: | Height: | Size: 1.2 KiB |
Before Width: | Height: | Size: 4.4 KiB |
Before Width: | Height: | Size: 2.8 KiB |
Before Width: | Height: | Size: 3.7 KiB |
Before Width: | Height: | Size: 2.3 KiB |
Before Width: | Height: | Size: 4.2 KiB |
Before Width: | Height: | Size: 3.1 KiB |
Before Width: | Height: | Size: 4.2 KiB |
Before Width: | Height: | Size: 3.7 KiB |
Before Width: | Height: | Size: 1,008 B |
Before Width: | Height: | Size: 3 KiB |
Before Width: | Height: | Size: 11 KiB |
Before Width: | Height: | Size: 3 KiB |
Before Width: | Height: | Size: 4.5 KiB |
Before Width: | Height: | Size: 4 KiB |
Before Width: | Height: | Size: 3.3 KiB |
Before Width: | Height: | Size: 2.6 KiB |
Before Width: | Height: | Size: 1.2 KiB |
Before Width: | Height: | Size: 2.2 KiB |
Before Width: | Height: | Size: 3.3 KiB |
Before Width: | Height: | Size: 2.8 KiB |
Before Width: | Height: | Size: 3.7 KiB |
Before Width: | Height: | Size: 2.5 KiB |
Before Width: | Height: | Size: 3.1 KiB |
Before Width: | Height: | Size: 3.2 KiB |