type1font.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. """
  2. A class representing a Type 1 font.
  3. This version reads pfa and pfb files and splits them for embedding in
  4. pdf files. It also supports SlantFont and ExtendFont transformations,
  5. similarly to pdfTeX and friends. There is no support yet for subsetting.
  6. Usage::
  7. >>> font = Type1Font(filename)
  8. >>> clear_part, encrypted_part, finale = font.parts
  9. >>> slanted_font = font.transform({'slant': 0.167})
  10. >>> extended_font = font.transform({'extend': 1.2})
  11. Sources:
  12. * Adobe Technical Note #5040, Supporting Downloadable PostScript
  13. Language Fonts.
  14. * Adobe Type 1 Font Format, Adobe Systems Incorporated, third printing,
  15. v1.1, 1993. ISBN 0-201-57044-0.
  16. """
  17. import binascii
  18. import enum
  19. import itertools
  20. import re
  21. import struct
  22. import numpy as np
  23. # token types
  24. _TokenType = enum.Enum('_TokenType',
  25. 'whitespace name string delimiter number')
  26. class Type1Font:
  27. """
  28. A class representing a Type-1 font, for use by backends.
  29. Attributes
  30. ----------
  31. parts : tuple
  32. A 3-tuple of the cleartext part, the encrypted part, and the finale of
  33. zeros.
  34. prop : Dict[str, Any]
  35. A dictionary of font properties.
  36. """
  37. __slots__ = ('parts', 'prop')
  38. def __init__(self, input):
  39. """
  40. Initialize a Type-1 font.
  41. Parameters
  42. ----------
  43. input : str or 3-tuple
  44. Either a pfb file name, or a 3-tuple of already-decoded Type-1
  45. font `~.Type1Font.parts`.
  46. """
  47. if isinstance(input, tuple) and len(input) == 3:
  48. self.parts = input
  49. else:
  50. with open(input, 'rb') as file:
  51. data = self._read(file)
  52. self.parts = self._split(data)
  53. self._parse()
  54. def _read(self, file):
  55. """Read the font from a file, decoding into usable parts."""
  56. rawdata = file.read()
  57. if not rawdata.startswith(b'\x80'):
  58. return rawdata
  59. data = b''
  60. while rawdata:
  61. if not rawdata.startswith(b'\x80'):
  62. raise RuntimeError('Broken pfb file (expected byte 128, '
  63. 'got %d)' % rawdata[0])
  64. type = rawdata[1]
  65. if type in (1, 2):
  66. length, = struct.unpack('<i', rawdata[2:6])
  67. segment = rawdata[6:6 + length]
  68. rawdata = rawdata[6 + length:]
  69. if type == 1: # ASCII text: include verbatim
  70. data += segment
  71. elif type == 2: # binary data: encode in hexadecimal
  72. data += binascii.hexlify(segment)
  73. elif type == 3: # end of file
  74. break
  75. else:
  76. raise RuntimeError('Unknown segment type %d in pfb file' %
  77. type)
  78. return data
  79. def _split(self, data):
  80. """
  81. Split the Type 1 font into its three main parts.
  82. The three parts are: (1) the cleartext part, which ends in a
  83. eexec operator; (2) the encrypted part; (3) the fixed part,
  84. which contains 512 ASCII zeros possibly divided on various
  85. lines, a cleartomark operator, and possibly something else.
  86. """
  87. # Cleartext part: just find the eexec and skip whitespace
  88. idx = data.index(b'eexec')
  89. idx += len(b'eexec')
  90. while data[idx] in b' \t\r\n':
  91. idx += 1
  92. len1 = idx
  93. # Encrypted part: find the cleartomark operator and count
  94. # zeros backward
  95. idx = data.rindex(b'cleartomark') - 1
  96. zeros = 512
  97. while zeros and data[idx] in b'0' or data[idx] in b'\r\n':
  98. if data[idx] in b'0':
  99. zeros -= 1
  100. idx -= 1
  101. if zeros:
  102. raise RuntimeError('Insufficiently many zeros in Type 1 font')
  103. # Convert encrypted part to binary (if we read a pfb file, we may end
  104. # up converting binary to hexadecimal to binary again; but if we read
  105. # a pfa file, this part is already in hex, and I am not quite sure if
  106. # even the pfb format guarantees that it will be in binary).
  107. binary = binascii.unhexlify(data[len1:idx+1])
  108. return data[:len1], binary, data[idx+1:]
  109. _whitespace_re = re.compile(br'[\0\t\r\014\n ]+')
  110. _token_re = re.compile(br'/{0,2}[^]\0\t\r\v\n ()<>{}/%[]+')
  111. _comment_re = re.compile(br'%[^\r\n\v]*')
  112. _instring_re = re.compile(br'[()\\]')
  113. @classmethod
  114. def _tokens(cls, text):
  115. """
  116. A PostScript tokenizer. Yield (token, value) pairs such as
  117. (_TokenType.whitespace, ' ') or (_TokenType.name, '/Foobar').
  118. """
  119. pos = 0
  120. while pos < len(text):
  121. match = (cls._comment_re.match(text[pos:]) or
  122. cls._whitespace_re.match(text[pos:]))
  123. if match:
  124. yield (_TokenType.whitespace, match.group())
  125. pos += match.end()
  126. elif text[pos] == b'(':
  127. start = pos
  128. pos += 1
  129. depth = 1
  130. while depth:
  131. match = cls._instring_re.search(text[pos:])
  132. if match is None:
  133. return
  134. pos += match.end()
  135. if match.group() == b'(':
  136. depth += 1
  137. elif match.group() == b')':
  138. depth -= 1
  139. else: # a backslash - skip the next character
  140. pos += 1
  141. yield (_TokenType.string, text[start:pos])
  142. elif text[pos:pos + 2] in (b'<<', b'>>'):
  143. yield (_TokenType.delimiter, text[pos:pos + 2])
  144. pos += 2
  145. elif text[pos] == b'<':
  146. start = pos
  147. pos += text[pos:].index(b'>')
  148. yield (_TokenType.string, text[start:pos])
  149. else:
  150. match = cls._token_re.match(text[pos:])
  151. if match:
  152. try:
  153. float(match.group())
  154. yield (_TokenType.number, match.group())
  155. except ValueError:
  156. yield (_TokenType.name, match.group())
  157. pos += match.end()
  158. else:
  159. yield (_TokenType.delimiter, text[pos:pos + 1])
  160. pos += 1
  161. def _parse(self):
  162. """
  163. Find the values of various font properties. This limited kind
  164. of parsing is described in Chapter 10 "Adobe Type Manager
  165. Compatibility" of the Type-1 spec.
  166. """
  167. # Start with reasonable defaults
  168. prop = {'weight': 'Regular', 'ItalicAngle': 0.0, 'isFixedPitch': False,
  169. 'UnderlinePosition': -100, 'UnderlineThickness': 50}
  170. filtered = ((token, value)
  171. for token, value in self._tokens(self.parts[0])
  172. if token is not _TokenType.whitespace)
  173. # The spec calls this an ASCII format; in Python 2.x we could
  174. # just treat the strings and names as opaque bytes but let's
  175. # turn them into proper Unicode, and be lenient in case of high bytes.
  176. def convert(x): return x.decode('ascii', 'replace')
  177. for token, value in filtered:
  178. if token is _TokenType.name and value.startswith(b'/'):
  179. key = convert(value[1:])
  180. token, value = next(filtered)
  181. if token is _TokenType.name:
  182. if value in (b'true', b'false'):
  183. value = value == b'true'
  184. else:
  185. value = convert(value.lstrip(b'/'))
  186. elif token is _TokenType.string:
  187. value = convert(value.lstrip(b'(').rstrip(b')'))
  188. elif token is _TokenType.number:
  189. if b'.' in value:
  190. value = float(value)
  191. else:
  192. value = int(value)
  193. else: # more complicated value such as an array
  194. value = None
  195. if key != 'FontInfo' and value is not None:
  196. prop[key] = value
  197. # Fill in the various *Name properties
  198. if 'FontName' not in prop:
  199. prop['FontName'] = (prop.get('FullName') or
  200. prop.get('FamilyName') or
  201. 'Unknown')
  202. if 'FullName' not in prop:
  203. prop['FullName'] = prop['FontName']
  204. if 'FamilyName' not in prop:
  205. extras = ('(?i)([ -](regular|plain|italic|oblique|(semi)?bold|'
  206. '(ultra)?light|extra|condensed))+$')
  207. prop['FamilyName'] = re.sub(extras, '', prop['FullName'])
  208. self.prop = prop
  209. @classmethod
  210. def _transformer(cls, tokens, slant, extend):
  211. def fontname(name):
  212. result = name
  213. if slant:
  214. result += b'_Slant_%d' % int(1000 * slant)
  215. if extend != 1.0:
  216. result += b'_Extend_%d' % int(1000 * extend)
  217. return result
  218. def italicangle(angle):
  219. return b'%a' % (float(angle) - np.arctan(slant) / np.pi * 180)
  220. def fontmatrix(array):
  221. array = array.lstrip(b'[').rstrip(b']').split()
  222. array = [float(x) for x in array]
  223. oldmatrix = np.eye(3, 3)
  224. oldmatrix[0:3, 0] = array[::2]
  225. oldmatrix[0:3, 1] = array[1::2]
  226. modifier = np.array([[extend, 0, 0],
  227. [slant, 1, 0],
  228. [0, 0, 1]])
  229. newmatrix = np.dot(modifier, oldmatrix)
  230. array[::2] = newmatrix[0:3, 0]
  231. array[1::2] = newmatrix[0:3, 1]
  232. # Not directly using `b'%a' % x for x in array` for now as that
  233. # produces longer reprs on numpy<1.14, causing test failures.
  234. as_string = '[' + ' '.join(str(x) for x in array) + ']'
  235. return as_string.encode('latin-1')
  236. def replace(fun):
  237. def replacer(tokens):
  238. token, value = next(tokens) # name, e.g., /FontMatrix
  239. yield value
  240. token, value = next(tokens) # possible whitespace
  241. while token is _TokenType.whitespace:
  242. yield value
  243. token, value = next(tokens)
  244. if value != b'[': # name/number/etc.
  245. yield fun(value)
  246. else: # array, e.g., [1 2 3]
  247. result = b''
  248. while value != b']':
  249. result += value
  250. token, value = next(tokens)
  251. result += value
  252. yield fun(result)
  253. return replacer
  254. def suppress(tokens):
  255. for _ in itertools.takewhile(lambda x: x[1] != b'def', tokens):
  256. pass
  257. yield b''
  258. table = {b'/FontName': replace(fontname),
  259. b'/ItalicAngle': replace(italicangle),
  260. b'/FontMatrix': replace(fontmatrix),
  261. b'/UniqueID': suppress}
  262. for token, value in tokens:
  263. if token is _TokenType.name and value in table:
  264. yield from table[value](
  265. itertools.chain([(token, value)], tokens))
  266. else:
  267. yield value
  268. def transform(self, effects):
  269. """
  270. Return a new font that is slanted and/or extended.
  271. Parameters
  272. ----------
  273. effects : dict
  274. A dict with optional entries:
  275. - 'slant' : float, default: 0
  276. Tangent of the angle that the font is to be slanted to the
  277. right. Negative values slant to the left.
  278. - 'extend' : float, default: 1
  279. Scaling factor for the font width. Values less than 1 condense
  280. the glyphs.
  281. Returns
  282. -------
  283. `Type1Font`
  284. """
  285. tokenizer = self._tokens(self.parts[0])
  286. transformed = self._transformer(tokenizer,
  287. slant=effects.get('slant', 0.0),
  288. extend=effects.get('extend', 1.0))
  289. return Type1Font((b"".join(transformed), self.parts[1], self.parts[2]))