play around with Scintilla and Lexilla
This commit is contained in:
230
3rdparty/scintilla550/scintilla/scripts/CheckMentioned.py
vendored
Normal file
230
3rdparty/scintilla550/scintilla/scripts/CheckMentioned.py
vendored
Normal file
@ -0,0 +1,230 @@
|
||||
#!/usr/bin/env python3
|
||||
# CheckMentioned.py
|
||||
# Find all the symbols in scintilla/include/Scintilla.h and check if they
|
||||
# are mentioned in scintilla/doc/ScintillaDoc.html.
|
||||
# Requires Python 3.6 or later
|
||||
|
||||
import re, string, sys
|
||||
|
||||
srcRoot = "../.."
|
||||
|
||||
sys.path.append(srcRoot + "/scintilla/scripts")
|
||||
|
||||
import Face
|
||||
|
||||
uninteresting = {
|
||||
"SCINTILLA_H", "SCI_START", "SCI_LEXER_START", "SCI_OPTIONAL_START",
|
||||
# These archaic names are #defined to the Sci_ prefixed modern equivalents.
|
||||
# They are not documented so they are not used in new code.
|
||||
"CharacterRange", "TextRange", "TextToFind", "RangeToFormat", "NotifyHeader",
|
||||
}
|
||||
|
||||
incFileName = srcRoot + "/scintilla/include/Scintilla.h"
|
||||
docFileName = srcRoot + "/scintilla/doc/ScintillaDoc.html"
|
||||
identCharacters = "_" + string.ascii_letters + string.digits
|
||||
|
||||
# Convert all punctuation characters except '_' into spaces.
|
||||
def depunctuate(s):
|
||||
d = ""
|
||||
for ch in s:
|
||||
if ch in identCharacters:
|
||||
d = d + ch
|
||||
else:
|
||||
d = d + " "
|
||||
return d
|
||||
|
||||
symbols = {}
|
||||
with open(incFileName, "rt") as incFile:
|
||||
for line in incFile.readlines():
|
||||
if line.startswith("#define"):
|
||||
identifier = line.split()[1]
|
||||
symbols[identifier] = 0
|
||||
|
||||
with open(docFileName, "rt") as docFile:
|
||||
for line in docFile.readlines():
|
||||
for word in depunctuate(line).split():
|
||||
if word in symbols.keys():
|
||||
symbols[word] = 1
|
||||
|
||||
def convertIFaceTypeToC(t):
|
||||
if t == "keymod":
|
||||
return "int "
|
||||
elif t == "string":
|
||||
return "const char *"
|
||||
elif t == "stringresult":
|
||||
return "char *"
|
||||
elif t == "cells":
|
||||
return "cell *"
|
||||
elif t == "textrange":
|
||||
return "Sci_TextRange *"
|
||||
elif t == "findtext":
|
||||
return "Sci_TextToFind *"
|
||||
elif t == "formatrange":
|
||||
return "Sci_RangeToFormat *"
|
||||
elif t == "textrangefull":
|
||||
return "Sci_TextRangeFull *"
|
||||
elif t == "findtextfull":
|
||||
return "Sci_TextToFindFull *"
|
||||
elif t == "formatrangefull":
|
||||
return "Sci_RangeToFormatFull *"
|
||||
elif Face.IsEnumeration(t):
|
||||
return "int "
|
||||
return t + " "
|
||||
|
||||
def makeParm(t, n, v):
|
||||
return (convertIFaceTypeToC(t) + n).rstrip()
|
||||
|
||||
def makeSig(params):
|
||||
p1 = makeParm(params["Param1Type"], params["Param1Name"], params["Param1Value"])
|
||||
p2 = makeParm(params["Param2Type"], params["Param2Name"], params["Param2Value"])
|
||||
|
||||
retType = params["ReturnType"]
|
||||
if retType in ["void", "string", "stringresult"]:
|
||||
retType = ""
|
||||
elif Face.IsEnumeration(retType):
|
||||
retType = "int"
|
||||
if retType:
|
||||
retType = " → " + retType
|
||||
|
||||
if p1 == "" and p2 == "":
|
||||
return retType
|
||||
|
||||
if p1 == "":
|
||||
p1 = "<unused>"
|
||||
joiner = ""
|
||||
if p2 != "":
|
||||
joiner = ", "
|
||||
return "(" + p1 + joiner + p2 + ")" + retType
|
||||
|
||||
pathIface = srcRoot + "/scintilla/include/Scintilla.iface"
|
||||
|
||||
def retrieveFeatures():
|
||||
face = Face.Face()
|
||||
face.ReadFromFile(pathIface)
|
||||
sciToFeature = {}
|
||||
sccToValue = { "true":"1", "false":"0", "EN_SETFOCUS":"256", "EN_KILLFOCUS":"512"}
|
||||
for name in face.order:
|
||||
v = face.features[name]
|
||||
if v["FeatureType"] in ["fun", "get", "set"]:
|
||||
featureDefineName = "SCI_" + name.upper()
|
||||
sciToFeature[featureDefineName] = name
|
||||
elif v["FeatureType"] in ["val"]:
|
||||
featureDefineName = name.upper()
|
||||
sccToValue[featureDefineName] = v["Value"]
|
||||
elif v["FeatureType"] in ["evt"]:
|
||||
featureDefineName = "SCN_" + name.upper()
|
||||
sccToValue[featureDefineName] = v["Value"]
|
||||
return (face, sciToFeature, sccToValue)
|
||||
|
||||
def flattenSpaces(s):
|
||||
return s.replace("\n", " ").replace(" ", " ").replace(" ", " ").replace(" ", " ").strip()
|
||||
|
||||
def printCtag(ident, path):
|
||||
print(ident.strip() + "\t" + path + "\t" + "/^" + ident + "$/")
|
||||
|
||||
showCTags = True
|
||||
|
||||
def checkDocumentation():
|
||||
with open(docFileName, "rt") as docFile:
|
||||
docs = docFile.read()
|
||||
|
||||
face, sciToFeature, sccToValue = retrieveFeatures()
|
||||
|
||||
headers = {}
|
||||
definitions = {}
|
||||
|
||||
# Examine header sections which point to definitions
|
||||
#<a class="message" href="#SCI_SETLAYOUTCACHE">SCI_SETLAYOUTCACHE(int cacheMode)</a><br />
|
||||
dirPattern = re.compile(r'<a class="message" href="#([A-Z0-9_]+)">([A-Z][A-Za-z0-9_() *&;,\n]+)</a>')
|
||||
for api, sig in re.findall(dirPattern, docs):
|
||||
sigApi = re.split(r'\W+', sig)[0]
|
||||
sigFlat = flattenSpaces(sig)
|
||||
sigFlat = sigFlat.replace('colouralpha ', 'xxxx ') # Temporary to avoid next line
|
||||
sigFlat = sigFlat.replace('alpha ', 'int ')
|
||||
sigFlat = sigFlat.replace('xxxx ', 'colouralpha ')
|
||||
|
||||
sigFlat = sigFlat.replace("document *", "int ")
|
||||
sigFlat = sigFlat.rstrip()
|
||||
if '(' in sigFlat or api.startswith("SCI_"):
|
||||
name = sciToFeature[api]
|
||||
sigFromFace = api + makeSig(face.features[name])
|
||||
if sigFlat != sigFromFace:
|
||||
print(sigFlat, "|", sigFromFace)
|
||||
if showCTags:
|
||||
printCtag(api, docFileName)
|
||||
#~ printCtag(" " + name, pathIface)
|
||||
if api != sigApi:
|
||||
print(sigApi, ";;", sig, ";;", api)
|
||||
headers[api] = 1
|
||||
# Warns for most keyboard commands so not enabled
|
||||
#~ for api in sorted(sciToFeature.keys()):
|
||||
#~ if api not in headers:
|
||||
#~ print("No header for ", api)
|
||||
|
||||
# Examine definitions
|
||||
#<b id="SCI_SETLAYOUTCACHE">SCI_SETLAYOUTCACHE(int cacheMode)</b>
|
||||
defPattern = re.compile(r'<b id="([A-Z_0-9]+)">([A-Z][A-Za-z0-9_() *#\"=<>/&;,\n-]+?)</b>')
|
||||
for api, sig in re.findall(defPattern, docs):
|
||||
sigFlat = flattenSpaces(sig)
|
||||
if '<a' in sigFlat : # Remove anchors
|
||||
sigFlat = re.sub('<a.*>(.+)</a>', '\\1', sigFlat)
|
||||
sigFlat = sigFlat.replace('colouralpha ', 'xxxx ') # Temporary to avoid next line
|
||||
sigFlat = sigFlat.replace('alpha ', 'int ')
|
||||
sigFlat = sigFlat.replace('xxxx ', 'colouralpha ')
|
||||
sigFlat = sigFlat.replace("document *", "int ")
|
||||
|
||||
sigFlat = sigFlat.replace(' NUL-terminated', '')
|
||||
sigFlat = sigFlat.rstrip()
|
||||
#~ sigFlat = sigFlat.replace(' NUL-terminated', '')
|
||||
sigApi = re.split(r'\W+', sigFlat)[0]
|
||||
#~ print(sigFlat, ";;", sig, ";;", api)
|
||||
if '(' in sigFlat or api.startswith("SCI_"):
|
||||
try:
|
||||
name = sciToFeature[api]
|
||||
sigFromFace = api + makeSig(face.features[name])
|
||||
if sigFlat != sigFromFace:
|
||||
print(sigFlat, "|", sigFromFace)
|
||||
if showCTags:
|
||||
printCtag('="' + api, docFileName)
|
||||
#~ printCtag(" " + name, pathIface)
|
||||
except KeyError:
|
||||
pass # Feature removed but still has documentation
|
||||
if api != sigApi:
|
||||
print(sigApi, ";;", sig, ";;", api)
|
||||
definitions[api] = 1
|
||||
# Warns for most keyboard commands so not enabled
|
||||
#~ for api in sorted(sciToFeature.keys()):
|
||||
#~ if api not in definitions:
|
||||
#~ print("No definition for ", api)
|
||||
|
||||
outName = docFileName.replace("Doc", "Dox")
|
||||
with open(outName, "wt") as docFile:
|
||||
docFile.write(docs)
|
||||
|
||||
# Examine constant definitions
|
||||
#<code>SC_CARETSTICKY_WHITESPACE</code> (2)
|
||||
constPattern = re.compile(r'<code>(\w+)</code> *\((\w+)\)')
|
||||
for name, val in re.findall(constPattern, docs):
|
||||
try:
|
||||
valOfName = sccToValue[name]
|
||||
if val != valOfName:
|
||||
print(val, "<-", name, ";;", valOfName)
|
||||
except KeyError:
|
||||
print("***", val, "<-", name)
|
||||
|
||||
# Examine 'seealso' definitions
|
||||
#<a class="seealso" href="#SCI_CREATEDOCUMENT">SCI_CREATEDOCUMENT</a>
|
||||
seealsoPattern = re.compile(r'"seealso" href="#(\w+)">(\w+)[<(]')
|
||||
for ref, text in re.findall(seealsoPattern, docs):
|
||||
if ref != text:
|
||||
print(f"seealso {text} -> {ref}")
|
||||
|
||||
for name in sccToValue.keys():
|
||||
if name not in ["SCI_OPTIONAL_START", "SCI_LEXER_START"] and name not in docs:
|
||||
print(f"Unknown {name}")
|
||||
|
||||
for identifier in sorted(symbols.keys()):
|
||||
if not symbols[identifier] and identifier not in uninteresting:
|
||||
print(identifier)
|
||||
|
||||
checkDocumentation()
|
152
3rdparty/scintilla550/scintilla/scripts/Dependencies.py
vendored
Normal file
152
3rdparty/scintilla550/scintilla/scripts/Dependencies.py
vendored
Normal file
@ -0,0 +1,152 @@
|
||||
#!/usr/bin/env python3
|
||||
# Dependencies.py - discover, read, and write dependencies file for make.
|
||||
# The format like the output from "g++ -MM" which produces a
|
||||
# list of header (.h) files used by source files (.cxx).
|
||||
# As a module, provides
|
||||
# FindPathToHeader(header, includePath) -> path
|
||||
# FindHeadersInFile(filePath) -> [headers]
|
||||
# FindHeadersInFileRecursive(filePath, includePath, renames) -> [paths]
|
||||
# FindDependencies(sourceGlobs, includePath, objExt, startDirectory, renames) -> [dependencies]
|
||||
# ExtractDependencies(input) -> [dependencies]
|
||||
# TextFromDependencies(dependencies)
|
||||
# WriteDependencies(output, dependencies)
|
||||
# UpdateDependencies(filepath, dependencies)
|
||||
# PathStem(p) -> stem
|
||||
# InsertSynonym(dependencies, current, additional) -> [dependencies]
|
||||
# If run as a script reads from stdin and writes to stdout.
|
||||
# Only tested with ASCII file names.
|
||||
# Copyright 2019 by Neil Hodgson <neilh@scintilla.org>
|
||||
# The License.txt file describes the conditions under which this software may be distributed.
|
||||
# Requires Python 2.7 or later
|
||||
|
||||
import codecs, glob, os, sys
|
||||
|
||||
if __name__ == "__main__":
|
||||
import FileGenerator
|
||||
else:
|
||||
from . import FileGenerator
|
||||
|
||||
continuationLineEnd = " \\"
|
||||
|
||||
def FindPathToHeader(header, includePath):
|
||||
for incDir in includePath:
|
||||
relPath = os.path.join(incDir, header)
|
||||
if os.path.exists(relPath):
|
||||
return relPath
|
||||
return ""
|
||||
|
||||
fhifCache = {} # Remember the includes in each file. ~5x speed up.
|
||||
def FindHeadersInFile(filePath):
|
||||
if filePath not in fhifCache:
|
||||
headers = []
|
||||
with codecs.open(filePath, "r", "utf-8") as f:
|
||||
for line in f:
|
||||
if line.strip().startswith("#include"):
|
||||
parts = line.split()
|
||||
if len(parts) > 1:
|
||||
header = parts[1]
|
||||
if header[0] != '<': # No system headers
|
||||
headers.append(header.strip('"'))
|
||||
fhifCache[filePath] = headers
|
||||
return fhifCache[filePath]
|
||||
|
||||
def FindHeadersInFileRecursive(filePath, includePath, renames):
|
||||
headerPaths = []
|
||||
for header in FindHeadersInFile(filePath):
|
||||
if header in renames:
|
||||
header = renames[header]
|
||||
relPath = FindPathToHeader(header, includePath)
|
||||
if relPath and relPath not in headerPaths:
|
||||
headerPaths.append(relPath)
|
||||
subHeaders = FindHeadersInFileRecursive(relPath, includePath, renames)
|
||||
headerPaths.extend(sh for sh in subHeaders if sh not in headerPaths)
|
||||
return headerPaths
|
||||
|
||||
def RemoveStart(relPath, start):
|
||||
if relPath.startswith(start):
|
||||
return relPath[len(start):]
|
||||
return relPath
|
||||
|
||||
def ciKey(f):
|
||||
return f.lower()
|
||||
|
||||
def FindDependencies(sourceGlobs, includePath, objExt, startDirectory, renames={}):
|
||||
deps = []
|
||||
for sourceGlob in sourceGlobs:
|
||||
sourceFiles = glob.glob(sourceGlob)
|
||||
# Sorting the files minimizes deltas as order returned by OS may be arbitrary
|
||||
sourceFiles.sort(key=ciKey)
|
||||
for sourceName in sourceFiles:
|
||||
objName = os.path.splitext(os.path.basename(sourceName))[0]+objExt
|
||||
headerPaths = FindHeadersInFileRecursive(sourceName, includePath, renames)
|
||||
depsForSource = [sourceName] + headerPaths
|
||||
depsToAppend = [RemoveStart(fn.replace("\\", "/"), startDirectory) for
|
||||
fn in depsForSource]
|
||||
deps.append([objName, depsToAppend])
|
||||
return deps
|
||||
|
||||
def PathStem(p):
|
||||
""" Return the stem of a filename: "CallTip.o" -> "CallTip" """
|
||||
return os.path.splitext(os.path.basename(p))[0]
|
||||
|
||||
def InsertSynonym(dependencies, current, additional):
|
||||
""" Insert a copy of one object file with dependencies under a different name.
|
||||
Used when one source file is used to create two object files with different
|
||||
preprocessor definitions. """
|
||||
result = []
|
||||
for dep in dependencies:
|
||||
result.append(dep)
|
||||
if (dep[0] == current):
|
||||
depAdd = [additional, dep[1]]
|
||||
result.append(depAdd)
|
||||
return result
|
||||
|
||||
def ExtractDependencies(input):
|
||||
""" Create a list of dependencies from input list of lines
|
||||
Each element contains the name of the object and a list of
|
||||
files that it depends on.
|
||||
Dependencies that contain "/usr/" are removed as they are system headers. """
|
||||
|
||||
deps = []
|
||||
for line in input:
|
||||
headersLine = line.startswith(" ") or line.startswith("\t")
|
||||
line = line.strip()
|
||||
line = line.rstrip("\\ ")
|
||||
fileNames = line.strip().split(" ")
|
||||
if not headersLine:
|
||||
# its a source file line, there may be headers too
|
||||
sourceLine = fileNames[0].rstrip(":")
|
||||
fileNames = fileNames[1:]
|
||||
deps.append([sourceLine, []])
|
||||
deps[-1][1].extend(header for header in fileNames if "/usr/" not in header)
|
||||
return deps
|
||||
|
||||
def TextFromDependencies(dependencies):
|
||||
""" Convert a list of dependencies to text. """
|
||||
text = ""
|
||||
indentHeaders = "\t"
|
||||
joinHeaders = continuationLineEnd + os.linesep + indentHeaders
|
||||
for dep in dependencies:
|
||||
object, headers = dep
|
||||
text += object + ":"
|
||||
for header in headers:
|
||||
text += joinHeaders
|
||||
text += header
|
||||
if headers:
|
||||
text += os.linesep
|
||||
return text
|
||||
|
||||
def UpdateDependencies(filepath, dependencies, comment=""):
|
||||
""" Write a dependencies file if different from dependencies. """
|
||||
FileGenerator.UpdateFile(os.path.abspath(filepath), comment.rstrip() + os.linesep +
|
||||
TextFromDependencies(dependencies))
|
||||
|
||||
def WriteDependencies(output, dependencies):
|
||||
""" Write a list of dependencies out to a stream. """
|
||||
output.write(TextFromDependencies(dependencies))
|
||||
|
||||
if __name__ == "__main__":
|
||||
""" Act as a filter that reformats input dependencies to one per line. """
|
||||
inputLines = sys.stdin.readlines()
|
||||
deps = ExtractDependencies(inputLines)
|
||||
WriteDependencies(sys.stdout, deps)
|
147
3rdparty/scintilla550/scintilla/scripts/Face.py
vendored
Normal file
147
3rdparty/scintilla550/scintilla/scripts/Face.py
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
#!/usr/bin/env python3
|
||||
# Face.py - module for reading and parsing Scintilla.iface file
|
||||
# Implemented 2000 by Neil Hodgson neilh@scintilla.org
|
||||
# Released to the public domain.
|
||||
# Requires Python 2.7 or later
|
||||
|
||||
def sanitiseLine(line):
|
||||
line = line.rstrip('\n')
|
||||
if "##" in line:
|
||||
line = line[:line.find("##")]
|
||||
line = line.strip()
|
||||
return line
|
||||
|
||||
def decodeFunction(featureVal):
|
||||
retType, rest = featureVal.split(" ", 1)
|
||||
nameIdent, params = rest.split("(")
|
||||
name, value = nameIdent.split("=")
|
||||
params, rest = params.split(")")
|
||||
param1, param2 = params.split(",")
|
||||
return retType, name, value, param1, param2
|
||||
|
||||
def decodeEvent(featureVal):
|
||||
retType, rest = featureVal.split(" ", 1)
|
||||
nameIdent, params = rest.split("(")
|
||||
name, value = nameIdent.split("=")
|
||||
return retType, name, value
|
||||
|
||||
def decodeParam(p):
|
||||
param = p.strip()
|
||||
type = ""
|
||||
name = ""
|
||||
value = ""
|
||||
if " " in param:
|
||||
type, nv = param.split(" ")
|
||||
if "=" in nv:
|
||||
name, value = nv.split("=")
|
||||
else:
|
||||
name = nv
|
||||
return type, name, value
|
||||
|
||||
def IsEnumeration(t):
|
||||
return t[:1].isupper()
|
||||
|
||||
def PascalCase(s):
|
||||
capitalized = s.title()
|
||||
# Remove '_' except between digits
|
||||
pascalCase = ""
|
||||
characterPrevious = " "
|
||||
# Loop until penultimate character
|
||||
for i in range(len(capitalized)-1):
|
||||
character = capitalized[i]
|
||||
characterNext = capitalized[i+1]
|
||||
if character != "_" or (
|
||||
characterPrevious.isnumeric() and characterNext.isnumeric()):
|
||||
pascalCase += character
|
||||
characterPrevious = character
|
||||
# Add last character - not between digits so no special treatment
|
||||
pascalCase += capitalized[-1]
|
||||
return pascalCase
|
||||
|
||||
class Face:
|
||||
|
||||
def __init__(self):
|
||||
self.order = []
|
||||
self.features = {}
|
||||
self.values = {}
|
||||
self.events = {}
|
||||
self.aliases = {}
|
||||
|
||||
def ReadFromFile(self, name):
|
||||
currentCategory = ""
|
||||
currentComment = []
|
||||
currentCommentFinished = 0
|
||||
file = open(name)
|
||||
for line in file.readlines():
|
||||
line = sanitiseLine(line)
|
||||
if line:
|
||||
if line[0] == "#":
|
||||
if line[1] == " ":
|
||||
if currentCommentFinished:
|
||||
currentComment = []
|
||||
currentCommentFinished = 0
|
||||
currentComment.append(line[2:])
|
||||
else:
|
||||
currentCommentFinished = 1
|
||||
featureType, featureVal = line.split(" ", 1)
|
||||
if featureType in ["fun", "get", "set"]:
|
||||
try:
|
||||
retType, name, value, param1, param2 = decodeFunction(featureVal)
|
||||
except ValueError:
|
||||
print("Failed to decode %s" % line)
|
||||
raise
|
||||
p1 = decodeParam(param1)
|
||||
p2 = decodeParam(param2)
|
||||
self.features[name] = {
|
||||
"FeatureType": featureType,
|
||||
"ReturnType": retType,
|
||||
"Value": value,
|
||||
"Param1Type": p1[0], "Param1Name": p1[1], "Param1Value": p1[2],
|
||||
"Param2Type": p2[0], "Param2Name": p2[1], "Param2Value": p2[2],
|
||||
"Category": currentCategory, "Comment": currentComment
|
||||
}
|
||||
if value in self.values:
|
||||
raise Exception("Duplicate value " + value + " " + name)
|
||||
self.values[value] = 1
|
||||
self.order.append(name)
|
||||
currentComment = []
|
||||
elif featureType == "evt":
|
||||
retType, name, value = decodeEvent(featureVal)
|
||||
self.features[name] = {
|
||||
"FeatureType": featureType,
|
||||
"ReturnType": retType,
|
||||
"Value": value,
|
||||
"Category": currentCategory, "Comment": currentComment
|
||||
}
|
||||
if value in self.events:
|
||||
raise Exception("Duplicate event " + value + " " + name)
|
||||
self.events[value] = 1
|
||||
self.order.append(name)
|
||||
elif featureType == "cat":
|
||||
currentCategory = featureVal
|
||||
elif featureType == "val":
|
||||
try:
|
||||
name, value = featureVal.split("=", 1)
|
||||
except ValueError:
|
||||
print("Failure %s" % featureVal)
|
||||
raise Exception()
|
||||
self.features[name] = {
|
||||
"FeatureType": featureType,
|
||||
"Category": currentCategory,
|
||||
"Value": value }
|
||||
self.order.append(name)
|
||||
elif featureType == "enu" or featureType == "lex":
|
||||
name, value = featureVal.split("=", 1)
|
||||
self.features[name] = {
|
||||
"FeatureType": featureType,
|
||||
"Category": currentCategory,
|
||||
"Value": value,
|
||||
"Comment": currentComment }
|
||||
self.order.append(name)
|
||||
currentComment = []
|
||||
elif featureType == "ali":
|
||||
# Enumeration alias
|
||||
name, value = featureVal.split("=", 1)
|
||||
self.aliases[name] = value
|
||||
currentComment = []
|
||||
file.close()
|
185
3rdparty/scintilla550/scintilla/scripts/FileGenerator.py
vendored
Normal file
185
3rdparty/scintilla550/scintilla/scripts/FileGenerator.py
vendored
Normal file
@ -0,0 +1,185 @@
|
||||
#!/usr/bin/env python3
|
||||
# FileGenerator.py - implemented 2013 by Neil Hodgson neilh@scintilla.org
|
||||
# Released to the public domain.
|
||||
|
||||
# Generate or regenerate source files based on comments in those files.
|
||||
# May be modified in-place or a template may be generated into a complete file.
|
||||
# Requires Python 2.7 or later
|
||||
# The files are copied to a string apart from sections between a
|
||||
# ++Autogenerated comment and a --Autogenerated comment which is
|
||||
# generated by the CopyWithInsertion function. After the whole string is
|
||||
# instantiated, it is compared with the target file and if different the file
|
||||
# is rewritten.
|
||||
|
||||
from __future__ import with_statement
|
||||
|
||||
import codecs, os, re, string, sys
|
||||
|
||||
lineEnd = "\r\n" if sys.platform == "win32" else "\n"
|
||||
|
||||
def UpdateFile(filename, updated):
|
||||
""" If the file contents are different to updated then copy updated into the
|
||||
file else leave alone so Mercurial and make don't treat it as modified. """
|
||||
newOrChanged = "Changed"
|
||||
try:
|
||||
with codecs.open(filename, "r", "utf-8") as infile:
|
||||
original = infile.read()
|
||||
if updated == original:
|
||||
# Same as before so don't write
|
||||
return
|
||||
os.unlink(filename)
|
||||
except IOError: # File is not there yet
|
||||
newOrChanged = "New"
|
||||
with codecs.open(filename, "w", "utf-8") as outfile:
|
||||
outfile.write(updated)
|
||||
print("%s:0: %s" % (filename, newOrChanged))
|
||||
|
||||
# Automatically generated sections contain start and end comments,
|
||||
# a definition line and the results.
|
||||
# The results are replaced by regenerating based on the definition line.
|
||||
# The definition line is a comment prefix followed by "**".
|
||||
# If there is a digit after the ** then this indicates which list to use
|
||||
# and the digit and next character are not part of the definition
|
||||
# Backslash is used as an escape within the definition line.
|
||||
# The part between \( and \) is repeated for each item in the list.
|
||||
# \* is replaced by each list item. \t, and \n are tab and newline.
|
||||
# If there is no definition line than the first list is copied verbatim.
|
||||
# If retainDefs then the comments controlling generation are copied.
|
||||
def CopyWithInsertion(input, commentPrefix, retainDefs, lists):
|
||||
copying = 1
|
||||
generated = False
|
||||
listid = 0
|
||||
output = []
|
||||
for line in input.splitlines(0):
|
||||
isStartGenerated = line.lstrip().startswith(commentPrefix + "++Autogenerated")
|
||||
if copying and not isStartGenerated:
|
||||
output.append(line)
|
||||
if isStartGenerated:
|
||||
if retainDefs:
|
||||
output.append(line)
|
||||
copying = 0
|
||||
generated = False
|
||||
elif not copying and not generated:
|
||||
# Generating
|
||||
if line.startswith(commentPrefix + "**"):
|
||||
# Pattern to transform input data
|
||||
if retainDefs:
|
||||
output.append(line)
|
||||
definition = line[len(commentPrefix + "**"):]
|
||||
if (commentPrefix == "<!--") and (" -->" in definition):
|
||||
definition = definition.replace(" -->", "")
|
||||
listid = 0
|
||||
if definition[0] in string.digits:
|
||||
listid = int(definition[:1])
|
||||
definition = definition[2:]
|
||||
# Hide double slashes as a control character
|
||||
definition = definition.replace("\\\\", "\001")
|
||||
# Do some normal C style transforms
|
||||
definition = definition.replace("\\n", "\n")
|
||||
definition = definition.replace("\\t", "\t")
|
||||
# Get the doubled backslashes back as single backslashes
|
||||
definition = definition.replace("\001", "\\")
|
||||
startRepeat = definition.find("\\(")
|
||||
endRepeat = definition.find("\\)")
|
||||
intro = definition[:startRepeat]
|
||||
out = ""
|
||||
if intro.endswith("\n"):
|
||||
pos = 0
|
||||
else:
|
||||
pos = len(intro)
|
||||
out += intro
|
||||
middle = definition[startRepeat+2:endRepeat]
|
||||
for i in lists[listid]:
|
||||
item = middle.replace("\\*", i)
|
||||
if pos and (pos + len(item) >= 80):
|
||||
out += "\\\n"
|
||||
pos = 0
|
||||
out += item
|
||||
pos += len(item)
|
||||
if item.endswith("\n"):
|
||||
pos = 0
|
||||
outro = definition[endRepeat+2:]
|
||||
out += outro
|
||||
out = out.replace("\n", lineEnd) # correct EOLs in generated content
|
||||
output.append(out)
|
||||
else:
|
||||
# Simple form with no rule to transform input
|
||||
output.extend(lists[0])
|
||||
generated = True
|
||||
if line.lstrip().startswith(commentPrefix + "--Autogenerated") or \
|
||||
line.lstrip().startswith(commentPrefix + "~~Autogenerated"):
|
||||
copying = 1
|
||||
if retainDefs:
|
||||
output.append(line)
|
||||
output = [line.rstrip(" \t") for line in output] # trim trailing whitespace
|
||||
return lineEnd.join(output) + lineEnd
|
||||
|
||||
def GenerateFile(inpath, outpath, commentPrefix, retainDefs, *lists):
|
||||
"""Generate 'outpath' from 'inpath'.
|
||||
"""
|
||||
|
||||
try:
|
||||
with codecs.open(inpath, "r", "UTF-8") as infile:
|
||||
original = infile.read()
|
||||
updated = CopyWithInsertion(original, commentPrefix,
|
||||
retainDefs, lists)
|
||||
UpdateFile(outpath, updated)
|
||||
except IOError:
|
||||
print("Can not open %s" % inpath)
|
||||
|
||||
def Generate(inpath, outpath, commentPrefix, *lists):
|
||||
"""Generate 'outpath' from 'inpath'.
|
||||
"""
|
||||
GenerateFile(inpath, outpath, commentPrefix, inpath == outpath, *lists)
|
||||
|
||||
def Regenerate(filename, commentPrefix, *lists):
|
||||
"""Regenerate the given file.
|
||||
"""
|
||||
Generate(filename, filename, commentPrefix, *lists)
|
||||
|
||||
def UpdateLineInPlistFile(path, key, value):
|
||||
"""Replace a single string value preceded by 'key' in an XML plist file.
|
||||
"""
|
||||
lines = []
|
||||
keyCurrent = ""
|
||||
with codecs.open(path, "rb", "utf-8") as f:
|
||||
for line in f.readlines():
|
||||
ls = line.strip()
|
||||
if ls.startswith("<key>"):
|
||||
keyCurrent = ls.replace("<key>", "").replace("</key>", "")
|
||||
elif ls.startswith("<string>"):
|
||||
if keyCurrent == key:
|
||||
start, tag, rest = line.partition("<string>")
|
||||
_val, etag, end = rest.partition("</string>")
|
||||
line = start + tag + value + etag + end
|
||||
lines.append(line)
|
||||
contents = "".join(lines)
|
||||
UpdateFile(path, contents)
|
||||
|
||||
def UpdateLineInFile(path, linePrefix, lineReplace):
|
||||
lines = []
|
||||
updated = False
|
||||
with codecs.open(path, "r", "utf-8") as f:
|
||||
for line in f.readlines():
|
||||
line = line.rstrip()
|
||||
if not updated and line.startswith(linePrefix):
|
||||
lines.append(lineReplace)
|
||||
updated = True
|
||||
else:
|
||||
lines.append(line)
|
||||
if not updated:
|
||||
print(f"{path}:0: Can't find '{linePrefix}'")
|
||||
contents = lineEnd.join(lines) + lineEnd
|
||||
UpdateFile(path, contents)
|
||||
|
||||
def UpdateFileFromLines(path, lines, lineEndToUse):
|
||||
"""Join the lines with the lineEndToUse then update file if the result is different.
|
||||
"""
|
||||
contents = lineEndToUse.join(lines) + lineEndToUse
|
||||
UpdateFile(path, contents)
|
||||
|
||||
def ReplaceREInFile(path, match, replace, count=1):
|
||||
with codecs.open(path, "r", "utf-8") as f:
|
||||
contents = f.read()
|
||||
contents = re.sub(match, replace, contents, count)
|
||||
UpdateFile(path, contents)
|
127
3rdparty/scintilla550/scintilla/scripts/GenerateCaseConvert.py
vendored
Normal file
127
3rdparty/scintilla550/scintilla/scripts/GenerateCaseConvert.py
vendored
Normal file
@ -0,0 +1,127 @@
|
||||
#!/usr/bin/env python3
|
||||
# Script to generate CaseConvert.cxx from Python's Unicode data
|
||||
# Should be run rarely when a Python with a new version of Unicode data is available.
|
||||
# Requires Python 3.3 or later
|
||||
# Should not be run with old versions of Python.
|
||||
|
||||
# Current best approach divides case conversions into two cases:
|
||||
# simple symmetric and complex.
|
||||
# Simple symmetric is where a lower and upper case pair convert to each
|
||||
# other and the folded form is the same as the lower case.
|
||||
# There are 1006 symmetric pairs.
|
||||
# These are further divided into ranges (stored as lower, upper, range length,
|
||||
# range pitch and singletons (stored as lower, upper).
|
||||
# Complex is for cases that don't fit the above: where there are multiple
|
||||
# characters in one of the forms or fold is different to lower or
|
||||
# lower(upper(x)) or upper(lower(x)) are not x. These are represented as UTF-8
|
||||
# strings with original, folded, upper, and lower separated by '|'.
|
||||
# There are 126 complex cases.
|
||||
|
||||
import itertools, string, sys
|
||||
|
||||
from FileGenerator import Regenerate
|
||||
|
||||
def contiguousRanges(ll, diff):
|
||||
# ll is a list of lists
|
||||
# group into lists where first element of each element differs by diff
|
||||
out = [[ll[0]]]
|
||||
for s in ll[1:]:
|
||||
if s[0] != out[-1][-1][0] + diff:
|
||||
out.append([])
|
||||
out[-1].append(s)
|
||||
return out
|
||||
|
||||
def flatten(listOfLists):
|
||||
"Flatten one level of nesting"
|
||||
return itertools.chain.from_iterable(listOfLists)
|
||||
|
||||
def conversionSets():
|
||||
# For all Unicode characters, see whether they have case conversions
|
||||
# Return 2 sets: one of simple symmetric conversion cases and another
|
||||
# with complex cases.
|
||||
complexes = []
|
||||
symmetrics = []
|
||||
for ch in range(sys.maxunicode + 1):
|
||||
if ch >= 0xd800 and ch <= 0xDBFF:
|
||||
continue
|
||||
if ch >= 0xdc00 and ch <= 0xDFFF:
|
||||
continue
|
||||
uch = chr(ch)
|
||||
|
||||
fold = uch.casefold()
|
||||
upper = uch.upper()
|
||||
lower = uch.lower()
|
||||
symmetric = False
|
||||
if uch != upper and len(upper) == 1 and uch == lower and uch == fold:
|
||||
lowerUpper = upper.lower()
|
||||
foldUpper = upper.casefold()
|
||||
if lowerUpper == foldUpper and lowerUpper == uch:
|
||||
symmetric = True
|
||||
symmetrics.append((ch, ord(upper), ch - ord(upper)))
|
||||
if uch != lower and len(lower) == 1 and uch == upper and lower == fold:
|
||||
upperLower = lower.upper()
|
||||
if upperLower == uch:
|
||||
symmetric = True
|
||||
|
||||
if fold == uch:
|
||||
fold = ""
|
||||
if upper == uch:
|
||||
upper = ""
|
||||
if lower == uch:
|
||||
lower = ""
|
||||
|
||||
if (fold or upper or lower) and not symmetric:
|
||||
complexes.append((uch, fold, upper, lower))
|
||||
|
||||
return symmetrics, complexes
|
||||
|
||||
def groupRanges(symmetrics):
|
||||
# Group the symmetrics into groups where possible, returning a list
|
||||
# of ranges and a list of symmetrics that didn't fit into a range
|
||||
|
||||
def distance(s):
|
||||
return s[2]
|
||||
|
||||
groups = []
|
||||
uniquekeys = []
|
||||
for k, g in itertools.groupby(symmetrics, distance):
|
||||
groups.append(list(g)) # Store group iterator as a list
|
||||
uniquekeys.append(k)
|
||||
|
||||
contiguousGroups = flatten([contiguousRanges(g, 1) for g in groups])
|
||||
longGroups = [(x[0][0], x[0][1], len(x), 1) for x in contiguousGroups if len(x) > 4]
|
||||
|
||||
oneDiffs = [s for s in symmetrics if s[2] == 1]
|
||||
contiguousOnes = flatten([contiguousRanges(g, 2) for g in [oneDiffs]])
|
||||
longOneGroups = [(x[0][0], x[0][1], len(x), 2) for x in contiguousOnes if len(x) > 4]
|
||||
|
||||
rangeGroups = sorted(longGroups+longOneGroups, key=lambda s: s[0])
|
||||
|
||||
rangeCoverage = list(flatten([range(r[0], r[0]+r[2]*r[3], r[3]) for r in rangeGroups]))
|
||||
|
||||
nonRanges = [(x, u) for x, u, _d in symmetrics if x not in rangeCoverage]
|
||||
|
||||
return rangeGroups, nonRanges
|
||||
|
||||
def escape(s):
|
||||
return "".join((chr(c) if chr(c) in string.ascii_letters else "\\x%x" % c) for c in s.encode('utf-8'))
|
||||
|
||||
def updateCaseConvert():
|
||||
symmetrics, complexes = conversionSets()
|
||||
|
||||
rangeGroups, nonRanges = groupRanges(symmetrics)
|
||||
|
||||
print(len(rangeGroups), "ranges")
|
||||
rangeLines = ["%d,%d,%d,%d," % x for x in rangeGroups]
|
||||
|
||||
print(len(nonRanges), "non ranges")
|
||||
nonRangeLines = ["%d,%d," % x for x in nonRanges]
|
||||
|
||||
print(len(symmetrics), "symmetric")
|
||||
|
||||
complexLines = ['"%s|%s|%s|%s|"' % tuple(escape(t) for t in x) for x in complexes]
|
||||
print(len(complexLines), "complex")
|
||||
|
||||
Regenerate("../src/CaseConvert.cxx", "//", rangeLines, nonRangeLines, complexLines)
|
||||
|
||||
updateCaseConvert()
|
53
3rdparty/scintilla550/scintilla/scripts/GenerateCharacterCategory.py
vendored
Normal file
53
3rdparty/scintilla550/scintilla/scripts/GenerateCharacterCategory.py
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
#!/usr/bin/env python3
|
||||
# Script to generate scintilla/src/CharacterCategoryMap.cxx and lexilla/lexlib/CharacterCategory.cxx
|
||||
# from Python's Unicode data
|
||||
# Should be run rarely when a Python with a new version of Unicode data is available.
|
||||
# Requires Python 3.3 or later
|
||||
# Should not be run with old versions of Python.
|
||||
|
||||
import pathlib, platform, sys, unicodedata
|
||||
|
||||
from FileGenerator import Regenerate
|
||||
|
||||
def findCategories(filename):
|
||||
with filename.open(encoding="UTF-8") as infile:
|
||||
lines = [x.strip() for x in infile.readlines() if "\tcc" in x]
|
||||
values = "".join(lines).replace(" ","").split(",")
|
||||
print("Categrories:", values)
|
||||
return [v[2:] for v in values]
|
||||
|
||||
def updateCharacterCategory(filename):
|
||||
values = ["// Created with Python %s, Unicode %s" % (
|
||||
platform.python_version(), unicodedata.unidata_version)]
|
||||
|
||||
startRange = 0
|
||||
category = unicodedata.category(chr(startRange))
|
||||
table = []
|
||||
for ch in range(sys.maxunicode):
|
||||
uch = chr(ch)
|
||||
current = unicodedata.category(uch)
|
||||
if current != category:
|
||||
value = startRange * 32 + categories.index(category)
|
||||
table.append(value)
|
||||
category = current
|
||||
startRange = ch
|
||||
value = startRange * 32 + categories.index(category)
|
||||
table.append(value)
|
||||
|
||||
# the sentinel value is used to simplify CharacterCategoryMap::Optimize()
|
||||
category = 'Cn'
|
||||
value = (sys.maxunicode + 1)*32 + categories.index(category)
|
||||
table.append(value)
|
||||
|
||||
values.extend(["%d," % value for value in table])
|
||||
|
||||
Regenerate(filename, "//", values)
|
||||
|
||||
|
||||
scintillaDirectory = pathlib.Path(__file__).resolve().parent.parent
|
||||
|
||||
categories = findCategories(scintillaDirectory / "src" / "CharacterCategoryMap.h")
|
||||
|
||||
updateCharacterCategory(scintillaDirectory / "src" / "CharacterCategoryMap.cxx")
|
||||
|
||||
updateCharacterCategory(scintillaDirectory.parent / "lexilla" / "lexlib" / "CharacterCategory.cxx")
|
59
3rdparty/scintilla550/scintilla/scripts/HFacer.py
vendored
Normal file
59
3rdparty/scintilla550/scintilla/scripts/HFacer.py
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
#!/usr/bin/env python3
|
||||
# HFacer.py - regenerate the Scintilla.h and SciLexer.h files from the Scintilla.iface interface
|
||||
# definition file.
|
||||
# Implemented 2000 by Neil Hodgson neilh@scintilla.org
|
||||
# Requires Python 3.6 or later
|
||||
|
||||
import pathlib
|
||||
import Face
|
||||
import FileGenerator
|
||||
|
||||
def printHFile(f):
|
||||
out = []
|
||||
previousCategory = ""
|
||||
anyProvisional = False
|
||||
for name in f.order:
|
||||
v = f.features[name]
|
||||
if v["Category"] != "Deprecated":
|
||||
if v["Category"] == "Provisional" and previousCategory != "Provisional":
|
||||
out.append("#ifndef SCI_DISABLE_PROVISIONAL")
|
||||
anyProvisional = True
|
||||
previousCategory = v["Category"]
|
||||
if v["FeatureType"] in ["fun", "get", "set"]:
|
||||
featureDefineName = "SCI_" + name.upper()
|
||||
out.append("#define " + featureDefineName + " " + v["Value"])
|
||||
elif v["FeatureType"] in ["evt"]:
|
||||
featureDefineName = "SCN_" + name.upper()
|
||||
out.append("#define " + featureDefineName + " " + v["Value"])
|
||||
elif v["FeatureType"] in ["val"]:
|
||||
out.append("#define " + name + " " + v["Value"])
|
||||
if anyProvisional:
|
||||
out.append("#endif")
|
||||
return out
|
||||
|
||||
showUnused = False
|
||||
|
||||
def RegenerateAll(root, showMaxID):
|
||||
f = Face.Face()
|
||||
f.ReadFromFile(root / "include/Scintilla.iface")
|
||||
FileGenerator.Regenerate(root / "include/Scintilla.h", "/* ", printHFile(f))
|
||||
if showMaxID:
|
||||
valueSet = set(int(x) for x in f.values if int(x) < 3000)
|
||||
maximumID = max(valueSet)
|
||||
print("Maximum ID is %d" % maximumID)
|
||||
if showUnused:
|
||||
valuesUnused = sorted(x for x in range(2001,maximumID) if x not in valueSet)
|
||||
print("\nUnused values")
|
||||
valueToName = {}
|
||||
for name, feature in f.features.items():
|
||||
try:
|
||||
value = int(feature["Value"])
|
||||
valueToName[value] = name
|
||||
except ValueError:
|
||||
pass
|
||||
for v in valuesUnused:
|
||||
prev = valueToName.get(v-1, "")
|
||||
print(v, prev)
|
||||
|
||||
if __name__ == "__main__":
|
||||
RegenerateAll(pathlib.Path(__file__).resolve().parent.parent, True)
|
116
3rdparty/scintilla550/scintilla/scripts/HeaderCheck.py
vendored
Normal file
116
3rdparty/scintilla550/scintilla/scripts/HeaderCheck.py
vendored
Normal file
@ -0,0 +1,116 @@
|
||||
#!/usr/bin/env python3
|
||||
# Script to check that headers are in a consistent order
|
||||
# Canonical header order is defined in a file, normally scripts/HeaderOrder.txt
|
||||
# Requires Python 3.6 or later
|
||||
|
||||
import pathlib, sys
|
||||
|
||||
def IsHeader(x):
|
||||
return x.strip().startswith("#") and \
|
||||
("include" in x or "import" in x) and \
|
||||
"dllimport" not in x
|
||||
|
||||
def HeaderFromIncludeLine(s):
|
||||
#\s*#\s*(include|import)\s+\S+\s*
|
||||
return s.strip()[1:].strip()[7:].strip()
|
||||
|
||||
def ExtractHeaders(file):
|
||||
with file.open(encoding="iso-8859-1") as infile:
|
||||
return [HeaderFromIncludeLine(h) for h in infile if IsHeader(h)]
|
||||
|
||||
def ExtractWithPrefix(file, prefix):
|
||||
with file.open(encoding="iso-8859-1") as infile:
|
||||
return [s.strip()[len(prefix):] for s in infile if s.startswith(prefix)]
|
||||
|
||||
def ExcludeName(name, excludes):
|
||||
return any(exclude in name for exclude in excludes)
|
||||
|
||||
def SortLike(incs, order):
|
||||
return sorted(incs, key = lambda i: order.index(i))
|
||||
|
||||
basePrefix = "//base:"
|
||||
sourcePrefix = "//source:"
|
||||
excludePrefix = "//exclude:"
|
||||
|
||||
def CheckFiles(headerOrderTxt):
|
||||
headerOrderFile = pathlib.Path(headerOrderTxt).resolve()
|
||||
bases = ExtractWithPrefix(headerOrderFile, basePrefix)
|
||||
base = bases[0] if len(bases) > 0 else ".."
|
||||
orderDirectory = headerOrderFile.parent
|
||||
root = (orderDirectory / base).resolve()
|
||||
|
||||
# Find all the source code files
|
||||
patterns = ExtractWithPrefix(headerOrderFile, sourcePrefix)
|
||||
excludes = ExtractWithPrefix(headerOrderFile, excludePrefix)
|
||||
|
||||
filePaths = []
|
||||
for p in patterns:
|
||||
filePaths += root.glob(p)
|
||||
headerOrder = ExtractHeaders(headerOrderFile)
|
||||
originalOrder = headerOrder[:]
|
||||
orderedPaths = [p for p in sorted(filePaths) if not ExcludeName(str(p), excludes)]
|
||||
allIncs = set()
|
||||
for f in orderedPaths:
|
||||
#~ print(" File ", f.relative_to(root))
|
||||
incs = ExtractHeaders(f)
|
||||
allIncs = allIncs.union(set(incs))
|
||||
|
||||
m = 0
|
||||
i = 0
|
||||
# Detect headers not in header order list and insert at OK position
|
||||
needs = []
|
||||
while i < len(incs):
|
||||
if m == len(headerOrder):
|
||||
#~ print("**** extend", incs[i:])
|
||||
headerOrder.extend(incs[i:])
|
||||
needs.extend(incs[i:])
|
||||
break
|
||||
if headerOrder[m] == incs[i]:
|
||||
#~ print("equal", headerOrder[m])
|
||||
i += 1
|
||||
m += 1
|
||||
else:
|
||||
if headerOrder[m] not in incs:
|
||||
#~ print("skip", headerOrder[m])
|
||||
m += 1
|
||||
elif incs[i] not in headerOrder:
|
||||
#~ print(str(f) + ":1: Add master", incs[i])
|
||||
headerOrder.insert(m, incs[i])
|
||||
needs.append(incs[i])
|
||||
i += 1
|
||||
m += 1
|
||||
else:
|
||||
i += 1
|
||||
if needs:
|
||||
print(f"{f}:1: needs these headers:")
|
||||
for header in needs:
|
||||
print("#include " + header)
|
||||
|
||||
# Detect out of order
|
||||
ordered = SortLike(incs, headerOrder)
|
||||
if incs != ordered:
|
||||
print(f"{f}:1: is out of order")
|
||||
fOrdered = pathlib.Path(str(f) + ".ordered")
|
||||
with fOrdered.open("w") as headerOut:
|
||||
for header in ordered:
|
||||
headerOut.write("#include " + header + "\n")
|
||||
print(f"{fOrdered}:1: is ordered")
|
||||
|
||||
if headerOrder != originalOrder:
|
||||
newIncludes = set(headerOrder) - set(originalOrder)
|
||||
headerOrderNew = orderDirectory / "NewOrder.txt"
|
||||
print(f"{headerOrderFile}:1: changed to {headerOrderNew}")
|
||||
print(f" Added {', '.join(newIncludes)}.")
|
||||
with headerOrderNew.open("w") as headerOut:
|
||||
for header in headerOrder:
|
||||
headerOut.write("#include " + header + "\n")
|
||||
|
||||
unused = sorted(set(headerOrder) - allIncs)
|
||||
if unused:
|
||||
print("In HeaderOrder.txt but not used")
|
||||
print("\n".join(unused))
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
CheckFiles(sys.argv[1])
|
||||
else:
|
||||
CheckFiles("HeaderOrder.txt")
|
180
3rdparty/scintilla550/scintilla/scripts/HeaderOrder.txt
vendored
Normal file
180
3rdparty/scintilla550/scintilla/scripts/HeaderOrder.txt
vendored
Normal file
@ -0,0 +1,180 @@
|
||||
// Define the standard order in which to include header files
|
||||
// All platform headers should be included before Scintilla headers
|
||||
// and each of these groups are then divided into directory groups.
|
||||
|
||||
// Base of the repository relative to this file
|
||||
|
||||
//base:..
|
||||
|
||||
// File patterns to check:
|
||||
//source:include/*.h
|
||||
//source:src/*.cxx
|
||||
//source:lexlib/*.cxx
|
||||
//source:lexers/*.cxx
|
||||
//source:win32/*.cxx
|
||||
//source:gtk/*.cxx
|
||||
//source:cocoa/*.mm
|
||||
//source:cocoa/*.h
|
||||
//source:test/unit/*.cxx
|
||||
//source:lexilla/src/*.cxx
|
||||
//source:lexilla/test/*.cxx
|
||||
|
||||
// C standard library
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
// C++ wrappers of C standard library
|
||||
#include <cstddef>
|
||||
#include <cstdlib>
|
||||
#include <cstdint>
|
||||
#include <cassert>
|
||||
#include <cstring>
|
||||
#include <cstdio>
|
||||
#include <cstdarg>
|
||||
#include <ctime>
|
||||
#include <cmath>
|
||||
#include <climits>
|
||||
|
||||
// C++ standard library
|
||||
#include <stdexcept>
|
||||
#include <new>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <vector>
|
||||
#include <array>
|
||||
#include <map>
|
||||
#include <set>
|
||||
#include <forward_list>
|
||||
#include <optional>
|
||||
#include <algorithm>
|
||||
#include <iterator>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <numeric>
|
||||
#include <chrono>
|
||||
#include <regex>
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
#include <fstream>
|
||||
#include <iomanip>
|
||||
#include <atomic>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
#include <future>
|
||||
|
||||
// GTK headers
|
||||
#include <glib.h>
|
||||
#include <gmodule.h>
|
||||
#include <gdk/gdk.h>
|
||||
#include <gtk/gtk.h>
|
||||
#include <gdk/gdkkeysyms.h>
|
||||
#include <gdk/gdkwayland.h>
|
||||
#include <gtk/gtk-a11y.h>
|
||||
|
||||
// Windows headers
|
||||
#include <windows.h>
|
||||
#include <commctrl.h>
|
||||
#include <richedit.h>
|
||||
#include <windowsx.h>
|
||||
#include <shellscalingapi.h>
|
||||
#include <zmouse.h>
|
||||
#include <ole2.h>
|
||||
#include <d2d1.h>
|
||||
#include <dwrite.h>
|
||||
|
||||
// Cocoa headers
|
||||
#include <Cocoa/Cocoa.h>
|
||||
#import <Foundation/NSGeometry.h>
|
||||
#import <QuartzCore/CAGradientLayer.h>
|
||||
#import <QuartzCore/CAAnimation.h>
|
||||
#import <QuartzCore/CATransaction.h>
|
||||
|
||||
// Scintilla headers
|
||||
|
||||
// Non-platform-specific headers
|
||||
|
||||
// Exported headers
|
||||
|
||||
#include "Sci_Position.h"
|
||||
#include "ScintillaTypes.h"
|
||||
#include "ScintillaMessages.h"
|
||||
#include "ScintillaStructures.h"
|
||||
#include "ILoader.h"
|
||||
#include "ILexer.h"
|
||||
|
||||
// src platform interface
|
||||
#include "Debugging.h"
|
||||
#include "Geometry.h"
|
||||
#include "Platform.h"
|
||||
|
||||
#include "Scintilla.h"
|
||||
#include "ScintillaWidget.h"
|
||||
|
||||
// src
|
||||
#include "CharacterType.h"
|
||||
#include "CharacterCategoryMap.h"
|
||||
#include "Position.h"
|
||||
#include "UniqueString.h"
|
||||
#include "SplitVector.h"
|
||||
#include "Partitioning.h"
|
||||
#include "RunStyles.h"
|
||||
#include "SparseVector.h"
|
||||
#include "ContractionState.h"
|
||||
#include "ChangeHistory.h"
|
||||
#include "CellBuffer.h"
|
||||
#include "UndoHistory.h"
|
||||
#include "PerLine.h"
|
||||
#include "CallTip.h"
|
||||
#include "KeyMap.h"
|
||||
#include "Indicator.h"
|
||||
#include "XPM.h"
|
||||
#include "LineMarker.h"
|
||||
#include "Style.h"
|
||||
#include "ViewStyle.h"
|
||||
#include "CharClassify.h"
|
||||
#include "Decoration.h"
|
||||
#include "CaseFolder.h"
|
||||
#include "Document.h"
|
||||
#include "RESearch.h"
|
||||
#include "CaseConvert.h"
|
||||
#include "UniConversion.h"
|
||||
#include "DBCS.h"
|
||||
#include "Selection.h"
|
||||
#include "PositionCache.h"
|
||||
#include "EditModel.h"
|
||||
#include "MarginView.h"
|
||||
#include "EditView.h"
|
||||
#include "Editor.h"
|
||||
#include "ElapsedPeriod.h"
|
||||
|
||||
#include "AutoComplete.h"
|
||||
#include "ScintillaBase.h"
|
||||
|
||||
// Platform-specific headers
|
||||
|
||||
// win32
|
||||
#include "WinTypes.h"
|
||||
#include "PlatWin.h"
|
||||
#include "HanjaDic.h"
|
||||
#include "ScintillaWin.h"
|
||||
|
||||
// gtk
|
||||
#include "Wrappers.h"
|
||||
#include "ScintillaGTK.h"
|
||||
#include "scintilla-marshal.h"
|
||||
#include "ScintillaGTKAccessible.h"
|
||||
#include "Converter.h"
|
||||
|
||||
// cocoa
|
||||
#include "QuartzTextStyle.h"
|
||||
#include "QuartzTextStyleAttribute.h"
|
||||
#include "QuartzTextLayout.h"
|
||||
#import "InfoBarCommunicator.h"
|
||||
#include "InfoBar.h"
|
||||
#import "ScintillaView.h"
|
||||
#import "ScintillaCocoa.h"
|
||||
#import "PlatCocoa.h"
|
||||
|
||||
// Catch testing framework
|
||||
#include "catch.hpp"
|
||||
|
80
3rdparty/scintilla550/scintilla/scripts/LexGen.py
vendored
Normal file
80
3rdparty/scintilla550/scintilla/scripts/LexGen.py
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
#!/usr/bin/env python3
|
||||
# LexGen.py - implemented 2002 by Neil Hodgson neilh@scintilla.org
|
||||
# Released to the public domain.
|
||||
|
||||
# Update Scintilla files.
|
||||
# Update version numbers and modification dates in documentation and header files.
|
||||
# Update make dependencies.
|
||||
# Requires Python 3.6 or later
|
||||
|
||||
from FileGenerator import UpdateLineInFile, ReplaceREInFile, UpdateLineInPlistFile
|
||||
import ScintillaData
|
||||
import HFacer
|
||||
import os
|
||||
import pathlib
|
||||
import sys
|
||||
|
||||
baseDirectory = os.path.dirname(os.path.dirname(ScintillaData.__file__))
|
||||
sys.path.append(baseDirectory)
|
||||
|
||||
import win32.DepGen
|
||||
import gtk.DepGen
|
||||
|
||||
def UpdateVersionNumbers(sci, root):
|
||||
UpdateLineInFile(root / "win32/ScintRes.rc", "#define VERSION_SCINTILLA",
|
||||
"#define VERSION_SCINTILLA \"" + sci.versionDotted + "\"")
|
||||
UpdateLineInFile(root / "win32/ScintRes.rc", "#define VERSION_WORDS",
|
||||
"#define VERSION_WORDS " + sci.versionCommad)
|
||||
UpdateLineInFile(root / "qt/ScintillaEditBase/ScintillaEditBase.pro",
|
||||
"VERSION =",
|
||||
"VERSION = " + sci.versionDotted)
|
||||
UpdateLineInFile(root / "qt/ScintillaEdit/ScintillaEdit.pro",
|
||||
"VERSION =",
|
||||
"VERSION = " + sci.versionDotted)
|
||||
UpdateLineInFile(root / "doc/ScintillaDownload.html", " Release",
|
||||
" Release " + sci.versionDotted)
|
||||
ReplaceREInFile(root / "doc/ScintillaDownload.html",
|
||||
r"/www.scintilla.org/([a-zA-Z]+)\d{3,5}",
|
||||
r"/www.scintilla.org/\g<1>" + sci.version,
|
||||
0)
|
||||
UpdateLineInFile(root / "doc/index.html",
|
||||
' <font color="#FFCC99" size="3"> Release version',
|
||||
' <font color="#FFCC99" size="3"> Release version ' +\
|
||||
sci.versionDotted + '<br />')
|
||||
UpdateLineInFile(root / "doc/index.html",
|
||||
' Site last modified',
|
||||
' Site last modified ' + sci.mdyModified + '</font>')
|
||||
UpdateLineInFile(root / "doc/ScintillaHistory.html",
|
||||
' Released ',
|
||||
' Released ' + sci.dmyModified + '.')
|
||||
|
||||
cocoa = root / "cocoa"
|
||||
|
||||
UpdateLineInPlistFile(cocoa / "Scintilla" / "Info.plist",
|
||||
"CFBundleShortVersionString", sci.versionDotted)
|
||||
ReplaceREInFile(cocoa / "Scintilla"/ "Scintilla.xcodeproj" / "project.pbxproj",
|
||||
"CURRENT_PROJECT_VERSION = [0-9.]+;",
|
||||
f'CURRENT_PROJECT_VERSION = {sci.versionDotted};',
|
||||
0)
|
||||
|
||||
def RegenerateAll(rootDirectory):
|
||||
|
||||
root = pathlib.Path(rootDirectory)
|
||||
|
||||
scintillaBase = root.resolve()
|
||||
|
||||
sci = ScintillaData.ScintillaData(scintillaBase)
|
||||
|
||||
startDir = os.getcwd()
|
||||
os.chdir(os.path.join(scintillaBase, "win32"))
|
||||
win32.DepGen.Generate()
|
||||
os.chdir(os.path.join(scintillaBase, "gtk"))
|
||||
gtk.DepGen.Generate()
|
||||
os.chdir(startDir)
|
||||
|
||||
UpdateVersionNumbers(sci, root)
|
||||
|
||||
HFacer.RegenerateAll(root, False)
|
||||
|
||||
if __name__=="__main__":
|
||||
RegenerateAll(pathlib.Path(__file__).resolve().parent.parent)
|
290
3rdparty/scintilla550/scintilla/scripts/ScintillaAPIFacer.py
vendored
Normal file
290
3rdparty/scintilla550/scintilla/scripts/ScintillaAPIFacer.py
vendored
Normal file
@ -0,0 +1,290 @@
|
||||
#!/usr/bin/env python3
|
||||
# ScintillaAPIFacer.py - regenerate the ScintillaTypes.h, and ScintillaMessages.h
|
||||
# from the Scintilla.iface interface definition file.
|
||||
# Implemented 2019 by Neil Hodgson neilh@scintilla.org
|
||||
# Requires Python 3.6 or later
|
||||
|
||||
import pathlib
|
||||
|
||||
import Face
|
||||
import FileGenerator
|
||||
import HFacer
|
||||
|
||||
namespace = "Scintilla::"
|
||||
|
||||
typeAliases = {
|
||||
# Convert iface types to C++ types
|
||||
# bool and void are OK as is
|
||||
"cells": "const char *",
|
||||
"colour": "Colour",
|
||||
"colouralpha": "ColourAlpha",
|
||||
"findtext": "void *",
|
||||
"findtextfull": "TextToFindFull *",
|
||||
"formatrange": "void *",
|
||||
"formatrangefull": "RangeToFormatFull *",
|
||||
"int": "int",
|
||||
"keymod": "int",
|
||||
"line": "Line",
|
||||
"pointer": "void *",
|
||||
"position": "Position",
|
||||
"string": "const char *",
|
||||
"stringresult": "char *",
|
||||
"textrange": "void *",
|
||||
"textrangefull": "TextRangeFull *",
|
||||
}
|
||||
|
||||
basicTypes = [
|
||||
"bool",
|
||||
"char *",
|
||||
"Colour",
|
||||
"ColourAlpha",
|
||||
"const char *",
|
||||
"int",
|
||||
"intptr_t",
|
||||
"Line",
|
||||
"Position",
|
||||
"void",
|
||||
"void *",
|
||||
]
|
||||
|
||||
deadValues = [
|
||||
"INDIC_CONTAINER",
|
||||
"INDIC_IME",
|
||||
"INDIC_IME_MAX",
|
||||
"INDIC_MAX",
|
||||
]
|
||||
|
||||
def ActualTypeName(type, identifier=None):
|
||||
if type == "pointer" and identifier in ["doc", "DocPointer", "CreateDocument"]:
|
||||
return "IDocumentEditable *"
|
||||
elif type in typeAliases:
|
||||
return typeAliases[type]
|
||||
else:
|
||||
return type
|
||||
|
||||
def IsEnumeration(s):
|
||||
if s in ["Position", "Line", "Colour", "ColourAlpha"]:
|
||||
return False
|
||||
if s.endswith("*"):
|
||||
return False
|
||||
return s[:1].isupper()
|
||||
|
||||
def JoinTypeAndIdentifier(type, identifier):
|
||||
# Add a space to separate type from identifier unless type is pointer
|
||||
if type.endswith("*"):
|
||||
return type + identifier
|
||||
else:
|
||||
return type + " " + identifier
|
||||
|
||||
def ParametersArgsCallname(v):
|
||||
parameters = ""
|
||||
args = ""
|
||||
callName = "Call"
|
||||
|
||||
param1TypeBase = v["Param1Type"]
|
||||
param1Name = v["Param1Name"]
|
||||
param1Type = ActualTypeName(param1TypeBase, param1Name)
|
||||
param1Arg = ""
|
||||
if param1Type:
|
||||
castName = param1Name
|
||||
if param1Type.endswith("*"):
|
||||
castName = "reinterpret_cast<uintptr_t>(" + param1Name + ")"
|
||||
elif param1Type not in basicTypes:
|
||||
castName = "static_cast<uintptr_t>(" + param1Name + ")"
|
||||
if IsEnumeration(param1TypeBase):
|
||||
param1Type = namespace + param1Type
|
||||
param1Arg = JoinTypeAndIdentifier(param1Type, param1Name)
|
||||
parameters = param1Arg
|
||||
args = castName
|
||||
|
||||
param2TypeBase = v["Param2Type"]
|
||||
param2Name = v["Param2Name"]
|
||||
param2Type = ActualTypeName(param2TypeBase, param2Name)
|
||||
param2Arg = ""
|
||||
if param2Type:
|
||||
castName = param2Name
|
||||
if param2Type.endswith("*"):
|
||||
if param2Type == "const char *":
|
||||
callName = "CallString"
|
||||
else:
|
||||
callName = "CallPointer"
|
||||
elif param2Type not in basicTypes:
|
||||
castName = "static_cast<intptr_t>(" + param2Name + ")"
|
||||
if IsEnumeration(param2TypeBase):
|
||||
param2Type = namespace + param2Type
|
||||
param2Arg = JoinTypeAndIdentifier(param2Type, param2Name)
|
||||
if param1Arg:
|
||||
parameters = parameters + ", "
|
||||
parameters = parameters + param2Arg
|
||||
if not args:
|
||||
args = args + "0"
|
||||
if args:
|
||||
args = args + ", "
|
||||
args = args + castName
|
||||
|
||||
if args:
|
||||
args = ", " + args
|
||||
return (parameters, args, callName)
|
||||
|
||||
def ParametersExceptLast(parameters):
|
||||
if "," in parameters:
|
||||
return parameters[:parameters.rfind(",")]
|
||||
else:
|
||||
return ""
|
||||
|
||||
def HMessages(f):
|
||||
out = ["enum class Message {"]
|
||||
for name in f.order:
|
||||
v = f.features[name]
|
||||
if v["Category"] != "Deprecated":
|
||||
if v["FeatureType"] in ["fun", "get", "set"]:
|
||||
out.append("\t" + name + " = " + v["Value"] + ",")
|
||||
out.append("};")
|
||||
return out
|
||||
|
||||
def HEnumerations(f):
|
||||
out = []
|
||||
for name in f.order:
|
||||
v = f.features[name]
|
||||
if v["Category"] != "Deprecated":
|
||||
# Only want non-deprecated enumerations and lexers are not part of Scintilla API
|
||||
if v["FeatureType"] in ["enu"] and name != "Lexer":
|
||||
out.append("")
|
||||
prefixes = v["Value"].split()
|
||||
#out.append("enum class " + name + " {" + " // " + ",".join(prefixes))
|
||||
out.append("enum class " + name + " {")
|
||||
for valueName in f.order:
|
||||
prefixMatched = ""
|
||||
for p in prefixes:
|
||||
if valueName.startswith(p) and valueName not in deadValues:
|
||||
prefixMatched = p
|
||||
if prefixMatched:
|
||||
vEnum = f.features[valueName]
|
||||
valueNameNoPrefix = ""
|
||||
if valueName in f.aliases:
|
||||
valueNameNoPrefix = f.aliases[valueName]
|
||||
else:
|
||||
valueNameNoPrefix = valueName[len(prefixMatched):]
|
||||
if not valueNameNoPrefix: # Removed whole name
|
||||
valueNameNoPrefix = valueName
|
||||
if valueNameNoPrefix.startswith("SC_"):
|
||||
valueNameNoPrefix = valueNameNoPrefix[len("SC_"):]
|
||||
pascalName = Face.PascalCase(valueNameNoPrefix)
|
||||
out.append("\t" + pascalName + " = " + vEnum["Value"] + ",")
|
||||
out.append("};")
|
||||
|
||||
out.append("")
|
||||
out.append("enum class Notification {")
|
||||
for name in f.order:
|
||||
v = f.features[name]
|
||||
if v["Category"] != "Deprecated":
|
||||
if v["FeatureType"] in ["evt"]:
|
||||
out.append("\t" + name + " = " + v["Value"] + ",")
|
||||
out.append("};")
|
||||
|
||||
return out
|
||||
|
||||
def HConstants(f):
|
||||
# Constants not in an eumeration
|
||||
out = []
|
||||
allEnumPrefixes = [
|
||||
"SCE_", # Lexical styles
|
||||
"SCI_", # Message number allocation
|
||||
"SCEN_", # Notifications sent with WM_COMMAND
|
||||
]
|
||||
for _n, v in f.features.items():
|
||||
if v["Category"] != "Deprecated":
|
||||
# Only want non-deprecated enumerations and lexers are not part of Scintilla API
|
||||
if v["FeatureType"] in ["enu"]:
|
||||
allEnumPrefixes.extend(v["Value"].split())
|
||||
for name in f.order:
|
||||
v = f.features[name]
|
||||
if v["Category"] != "Deprecated":
|
||||
# Only want non-deprecated enumerations and lexers are not part of Scintilla API
|
||||
if v["FeatureType"] in ["val"]:
|
||||
hasPrefix = False
|
||||
for prefix in allEnumPrefixes:
|
||||
if name.startswith(prefix):
|
||||
hasPrefix = True
|
||||
if not hasPrefix:
|
||||
if name.startswith("SC_"):
|
||||
name = name[3:]
|
||||
type = "int"
|
||||
if name == "INVALID_POSITION":
|
||||
type = "Position"
|
||||
out.append("constexpr " + type + " " + Face.PascalCase(name) + " = " + v["Value"] + ";")
|
||||
return out
|
||||
|
||||
def HMethods(f):
|
||||
out = []
|
||||
for name in f.order:
|
||||
v = f.features[name]
|
||||
if v["Category"] != "Deprecated":
|
||||
if v["FeatureType"] in ["fun", "get", "set"]:
|
||||
if v["FeatureType"] == "get" and name.startswith("Get"):
|
||||
name = name[len("Get"):]
|
||||
retType = ActualTypeName(v["ReturnType"], name)
|
||||
if IsEnumeration(retType):
|
||||
retType = namespace + retType
|
||||
parameters, args, callName = ParametersArgsCallname(v)
|
||||
|
||||
out.append("\t" + JoinTypeAndIdentifier(retType, name) + "(" + parameters + ");")
|
||||
|
||||
# Extra method for stringresult that returns std::string
|
||||
if v["Param2Type"] == "stringresult":
|
||||
out.append("\t" + JoinTypeAndIdentifier("std::string", name) + \
|
||||
"(" + ParametersExceptLast(parameters) + ");")
|
||||
return out
|
||||
|
||||
def CXXMethods(f):
|
||||
out = []
|
||||
for name in f.order:
|
||||
v = f.features[name]
|
||||
if v["Category"] != "Deprecated":
|
||||
if v["FeatureType"] in ["fun", "get", "set"]:
|
||||
msgName = "Message::" + name
|
||||
if v["FeatureType"] == "get" and name.startswith("Get"):
|
||||
name = name[len("Get"):]
|
||||
retType = ActualTypeName(v["ReturnType"], name)
|
||||
parameters, args, callName = ParametersArgsCallname(v)
|
||||
returnIfNeeded = "return " if retType != "void" else ""
|
||||
|
||||
out.append(JoinTypeAndIdentifier(retType, "ScintillaCall::" + name) + "(" + parameters + ")" + " {")
|
||||
retCast = ""
|
||||
retCastEnd = ""
|
||||
if retType.endswith("*"):
|
||||
retCast = "reinterpret_cast<" + retType + ">("
|
||||
retCastEnd = ")"
|
||||
elif retType not in basicTypes or retType in ["int", "Colour", "ColourAlpha"]:
|
||||
if IsEnumeration(retType):
|
||||
retType = namespace + retType
|
||||
retCast = "static_cast<" + retType + ">("
|
||||
retCastEnd = ")"
|
||||
out.append("\t" + returnIfNeeded + retCast + callName + "(" + msgName + args + ")" + retCastEnd + ";")
|
||||
out.append("}")
|
||||
out.append("")
|
||||
|
||||
# Extra method for stringresult that returns std::string
|
||||
if v["Param2Type"] == "stringresult":
|
||||
paramList = ParametersExceptLast(parameters)
|
||||
argList = ParametersExceptLast(args)
|
||||
out.append(JoinTypeAndIdentifier("std::string", "ScintillaCall::" + name) + \
|
||||
"(" + paramList + ") {")
|
||||
out.append("\treturn CallReturnString(" + msgName + argList + ");")
|
||||
out.append("}")
|
||||
out.append("")
|
||||
|
||||
return out
|
||||
|
||||
def RegenerateAll(root):
|
||||
HFacer.RegenerateAll(root, False)
|
||||
f = Face.Face()
|
||||
include = root / "include"
|
||||
f.ReadFromFile(include / "Scintilla.iface")
|
||||
FileGenerator.Regenerate(include / "ScintillaMessages.h", "//", HMessages(f))
|
||||
FileGenerator.Regenerate(include / "ScintillaTypes.h", "//", HEnumerations(f), HConstants(f))
|
||||
FileGenerator.Regenerate(include / "ScintillaCall.h", "//", HMethods(f))
|
||||
FileGenerator.Regenerate(root / "call" / "ScintillaCall.cxx", "//", CXXMethods(f))
|
||||
|
||||
if __name__ == "__main__":
|
||||
RegenerateAll(pathlib.Path(__file__).resolve().parent.parent)
|
85
3rdparty/scintilla550/scintilla/scripts/ScintillaData.py
vendored
Normal file
85
3rdparty/scintilla550/scintilla/scripts/ScintillaData.py
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
#!/usr/bin/env python3
|
||||
# ScintillaData.py - implemented 2013 by Neil Hodgson neilh@scintilla.org
|
||||
# Released to the public domain.
|
||||
|
||||
# Common code used by Scintilla and SciTE for source file regeneration.
|
||||
# The ScintillaData object exposes information about Scintilla as properties:
|
||||
# Version properties
|
||||
# version
|
||||
# versionDotted
|
||||
# versionCommad
|
||||
#
|
||||
# Date last modified
|
||||
# dateModified
|
||||
# yearModified
|
||||
# mdyModified
|
||||
# dmyModified
|
||||
# myModified
|
||||
#
|
||||
# List of contributors
|
||||
# credits
|
||||
|
||||
# This file can be run to see the data it provides.
|
||||
# Requires Python 3.6 or later
|
||||
|
||||
import datetime, pathlib, sys
|
||||
|
||||
def FindCredits(historyFile, removeLinks=True):
|
||||
credits = []
|
||||
stage = 0
|
||||
with historyFile.open(encoding="utf-8") as f:
|
||||
for line in f.readlines():
|
||||
line = line.strip()
|
||||
if stage == 0 and line == "<table>":
|
||||
stage = 1
|
||||
elif stage == 1 and line == "</table>":
|
||||
stage = 2
|
||||
if stage == 1 and line.startswith("<td>"):
|
||||
credit = line[4:-5]
|
||||
if removeLinks and "<a" in line:
|
||||
title, _a, rest = credit.partition("<a href=")
|
||||
urlplus, _bracket, end = rest.partition(">")
|
||||
name = end.split("<")[0]
|
||||
url = urlplus[1:-1]
|
||||
credit = title.strip()
|
||||
if credit:
|
||||
credit += " "
|
||||
credit += name + " " + url
|
||||
credits.append(credit)
|
||||
return credits
|
||||
|
||||
class ScintillaData:
|
||||
def __init__(self, scintillaRoot):
|
||||
# Discover version information
|
||||
self.version = (scintillaRoot / "version.txt").read_text().strip()
|
||||
self.versionDotted = self.version[0:-2] + '.' + self.version[-2] + '.' + \
|
||||
self.version[-1]
|
||||
self.versionCommad = self.versionDotted.replace(".", ", ") + ', 0'
|
||||
|
||||
with (scintillaRoot / "doc" / "index.html").open() as f:
|
||||
self.dateModified = [d for d in f.readlines() if "Date.Modified" in d]\
|
||||
[0].split('\"')[3]
|
||||
# 20130602
|
||||
# index.html, SciTE.html
|
||||
dtModified = datetime.datetime.strptime(self.dateModified, "%Y%m%d")
|
||||
self.yearModified = self.dateModified[0:4]
|
||||
monthModified = dtModified.strftime("%B")
|
||||
dayModified = "%d" % dtModified.day
|
||||
self.mdyModified = monthModified + " " + dayModified + " " + self.yearModified
|
||||
# May 22 2013
|
||||
# index.html, SciTE.html
|
||||
self.dmyModified = dayModified + " " + monthModified + " " + self.yearModified
|
||||
# 22 May 2013
|
||||
# ScintillaHistory.html -- only first should change
|
||||
self.myModified = monthModified + " " + self.yearModified
|
||||
|
||||
self.credits = FindCredits(scintillaRoot / "doc" / "ScintillaHistory.html")
|
||||
|
||||
if __name__=="__main__":
|
||||
sci = ScintillaData(pathlib.Path(__file__).resolve().parent.parent)
|
||||
print("Version %s %s %s" % (sci.version, sci.versionDotted, sci.versionCommad))
|
||||
print("Date last modified %s %s %s %s %s" % (
|
||||
sci.dateModified, sci.yearModified, sci.mdyModified, sci.dmyModified, sci.myModified))
|
||||
print("Credits:")
|
||||
for c in sci.credits:
|
||||
sys.stdout.buffer.write(b" " + c.encode("utf-8") + b"\n")
|
0
3rdparty/scintilla550/scintilla/scripts/__init__.py
vendored
Normal file
0
3rdparty/scintilla550/scintilla/scripts/__init__.py
vendored
Normal file
5
3rdparty/scintilla550/scintilla/scripts/archive.sh
vendored
Normal file
5
3rdparty/scintilla550/scintilla/scripts/archive.sh
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
# Up to parent directory of scintilla
|
||||
cd ../..
|
||||
|
||||
# Archive Scintilla to scintilla.tgz
|
||||
hg archive --repository scintilla scintilla.tgz
|
Reference in New Issue
Block a user